diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index 4895987da4..0000000000 --- a/.cirrus.yml +++ /dev/null @@ -1,109 +0,0 @@ -env: - CIRRUS_CLONE_DEPTH: 1 - -windows_msys2_task: - timeout_in: 90m - windows_container: - image: cirrusci/windowsservercore:2019 - os_version: 2019 - cpu: 8 - memory: 8G - env: - CIRRUS_SHELL: powershell - MSYS: winsymlinks:native - MSYSTEM: MINGW64 - MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe - MSYS2_FINGERPRINT: 0 - MSYS2_PACKAGES: " - diffutils git grep make pkg-config sed - mingw-w64-x86_64-python - mingw-w64-x86_64-python-sphinx - mingw-w64-x86_64-toolchain - mingw-w64-x86_64-SDL2 - mingw-w64-x86_64-SDL2_image - mingw-w64-x86_64-gtk3 - mingw-w64-x86_64-glib2 - mingw-w64-x86_64-ninja - mingw-w64-x86_64-jemalloc - mingw-w64-x86_64-lzo2 - mingw-w64-x86_64-zstd - mingw-w64-x86_64-libjpeg-turbo - mingw-w64-x86_64-pixman - mingw-w64-x86_64-libgcrypt - mingw-w64-x86_64-libpng - mingw-w64-x86_64-libssh - mingw-w64-x86_64-snappy - mingw-w64-x86_64-libusb - mingw-w64-x86_64-usbredir - mingw-w64-x86_64-libtasn1 - mingw-w64-x86_64-nettle - mingw-w64-x86_64-cyrus-sasl - mingw-w64-x86_64-curl - mingw-w64-x86_64-gnutls - mingw-w64-x86_64-libnfs - " - CHERE_INVOKING: 1 - msys2_cache: - folder: C:\tools\archive - reupload_on_changes: false - # These env variables are used to generate fingerprint to trigger the cache procedure - # If wanna to force re-populate msys2, increase MSYS2_FINGERPRINT - fingerprint_script: - - | - echo $env:CIRRUS_TASK_NAME - echo $env:MSYS2_URL - echo $env:MSYS2_FINGERPRINT - echo $env:MSYS2_PACKAGES - populate_script: - - | - md -Force C:\tools\archive\pkg - $start_time = Get-Date - bitsadmin /transfer msys_download /dynamic /download /priority FOREGROUND $env:MSYS2_URL C:\tools\archive\base.exe - Write-Output "Download time taken: $((Get-Date).Subtract($start_time))" - cd C:\tools - C:\tools\archive\base.exe -y - del -Force C:\tools\archive\base.exe - Write-Output "Base install time taken: $((Get-Date).Subtract($start_time))" - $start_time = Get-Date - - ((Get-Content -path C:\tools\msys64\etc\\post-install\\07-pacman-key.post -Raw) -replace '--refresh-keys', '--version') | Set-Content -Path C:\tools\msys64\etc\\post-install\\07-pacman-key.post - C:\tools\msys64\usr\bin\bash.exe -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf" - C:\tools\msys64\usr\bin\bash.exe -lc "export" - C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Sy - echo Y | C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Suu --overwrite=* - taskkill /F /FI "MODULES eq msys-2.0.dll" - tasklist - C:\tools\msys64\usr\bin\bash.exe -lc "mv -f /etc/pacman.conf.pacnew /etc/pacman.conf || true" - C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -Syuu --overwrite=*" - Write-Output "Core install time taken: $((Get-Date).Subtract($start_time))" - $start_time = Get-Date - - C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -S --needed $env:MSYS2_PACKAGES" - Write-Output "Package install time taken: $((Get-Date).Subtract($start_time))" - $start_time = Get-Date - - del -Force -ErrorAction SilentlyContinue C:\tools\msys64\etc\mtab - del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\fd - del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stderr - del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdin - del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdout - del -Force -Recurse -ErrorAction SilentlyContinue C:\tools\msys64\var\cache\pacman\pkg - tar cf C:\tools\archive\msys64.tar -C C:\tools\ msys64 - - Write-Output "Package archive time taken: $((Get-Date).Subtract($start_time))" - del -Force -Recurse -ErrorAction SilentlyContinue c:\tools\msys64 - install_script: - - | - $start_time = Get-Date - cd C:\tools - ls C:\tools\archive\msys64.tar - tar xf C:\tools\archive\msys64.tar - Write-Output "Extract msys2 time taken: $((Get-Date).Subtract($start_time))" - script: - - C:\tools\msys64\usr\bin\bash.exe -lc "mkdir build" - - C:\tools\msys64\usr\bin\bash.exe -lc "cd build && ../configure --python=python3" - - C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make -j8" - - exit $LastExitCode - test_script: - - C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make V=1 check" - - exit $LastExitCode diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..93718ef425 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,21 @@ +# +# List of code-formatting clean ups the git blame can ignore +# +# git blame --ignore-revs-file .git-blame-ignore-revs +# +# or +# +# git config blame.ignoreRevsFile .git-blame-ignore-revs +# + +# gdbstub: clean-up indents +ad9e4585b3c7425759d3eea697afbca71d2c2082 + +# e1000e: fix code style +0eadd56bf53ab196a16d492d7dd31c62e1c24c32 + +# target/riscv: coding style fixes +8c7feddddd9218b407792120bcfda0347ed16205 + +# replace TABs with spaces +48805df9c22a0700fba4b3b548fafaa21726ca68 diff --git a/.gitignore b/.gitignore index 18346a6a7d..caa0549a15 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,5 @@ GTAGS *.swp *.patch *.gcov + +/subprojects/slirp diff --git a/.gitlab-ci.d/base.yml b/.gitlab-ci.d/base.yml index 0274228de8..2fbb58d2a3 100644 --- a/.gitlab-ci.d/base.yml +++ b/.gitlab-ci.d/base.yml @@ -75,5 +75,5 @@ - if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"' when: manual - # Jobs can run if any jobs they depend on were successfull + # Jobs can run if any jobs they depend on were successful - when: on_success diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml index cb96b55c3f..c9f2e737c0 100644 --- a/.gitlab-ci.d/buildtest-template.yml +++ b/.gitlab-ci.d/buildtest-template.yml @@ -5,19 +5,15 @@ before_script: - JOBS=$(expr $(nproc) + 1) script: - - if test -n "$LD_JOBS"; - then - scripts/git-submodule.sh update meson ; - fi - mkdir build - cd build - ../configure --enable-werror --disable-docs --enable-fdt=system - ${LD_JOBS:+--meson=git} ${TARGETS:+--target-list="$TARGETS"} + ${TARGETS:+--target-list="$TARGETS"} $CONFIGURE_ARGS || { cat config.log meson-logs/meson-log.txt && exit 1; } - if test -n "$LD_JOBS"; then - ../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ; + pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ; fi || exit 1; - make -j"$JOBS" - if test -n "$MAKE_CHECK_ARGS"; @@ -25,6 +21,22 @@ make -j"$JOBS" $MAKE_CHECK_ARGS ; fi +# We jump some hoops in common_test_job_template to avoid +# rebuilding all the object files we skip in the artifacts +.native_build_artifact_template: + artifacts: + expire_in: 2 days + paths: + - build + - .git-submodule-status + exclude: + - build/**/*.p + - build/**/*.a.p + - build/**/*.fa.p + - build/**/*.c.o + - build/**/*.c.o.d + - build/**/*.fa + .common_test_job_template: extends: .base_job_template stage: test diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml index d903c42798..0f1be14cb6 100644 --- a/.gitlab-ci.d/buildtest.yml +++ b/.gitlab-ci.d/buildtest.yml @@ -2,20 +2,16 @@ include: - local: '/.gitlab-ci.d/buildtest-template.yml' build-system-alpine: - extends: .native_build_job_template + extends: + - .native_build_job_template + - .native_build_artifact_template needs: - job: amd64-alpine-container variables: IMAGE: alpine - TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu - microblazeel-softmmu mips64el-softmmu + TARGETS: avr-softmmu loongarch64-softmmu mips64-softmmu mipsel-softmmu MAKE_CHECK_ARGS: check-build CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog - artifacts: - expire_in: 2 days - paths: - - .git-submodule-status - - build check-system-alpine: extends: .native_test_job_template @@ -36,19 +32,17 @@ avocado-system-alpine: MAKE_CHECK_ARGS: check-avocado build-system-ubuntu: - extends: .native_build_job_template + extends: + - .native_build_job_template + - .native_build_artifact_template needs: - job: amd64-ubuntu2004-container + job: amd64-ubuntu2204-container variables: - IMAGE: ubuntu2004 + IMAGE: ubuntu2204 CONFIGURE_ARGS: --enable-docs TARGETS: alpha-softmmu cris-softmmu hppa-softmmu microblazeel-softmmu mips64el-softmmu MAKE_CHECK_ARGS: check-build - artifacts: - expire_in: 2 days - paths: - - build check-system-ubuntu: extends: .native_test_job_template @@ -56,7 +50,7 @@ check-system-ubuntu: - job: build-system-ubuntu artifacts: true variables: - IMAGE: ubuntu2004 + IMAGE: ubuntu2204 MAKE_CHECK_ARGS: check avocado-system-ubuntu: @@ -65,23 +59,21 @@ avocado-system-ubuntu: - job: build-system-ubuntu artifacts: true variables: - IMAGE: ubuntu2004 + IMAGE: ubuntu2204 MAKE_CHECK_ARGS: check-avocado build-system-debian: - extends: .native_build_job_template + extends: + - .native_build_job_template + - .native_build_artifact_template needs: job: amd64-debian-container variables: IMAGE: debian-amd64 CONFIGURE_ARGS: --with-coroutine=sigaltstack - TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu - riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu + TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu + sparc-softmmu xtensaeb-softmmu MAKE_CHECK_ARGS: check-build - artifacts: - expire_in: 2 days - paths: - - build check-system-debian: extends: .native_test_job_template @@ -110,11 +102,13 @@ crash-test-debian: IMAGE: debian-amd64 script: - cd build - - make check-venv - - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-i386 + - make NINJA=":" check-venv + - pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386 build-system-fedora: - extends: .native_build_job_template + extends: + - .native_build_job_template + - .native_build_artifact_template needs: job: amd64-fedora-container variables: @@ -123,10 +117,6 @@ build-system-fedora: TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu MAKE_CHECK_ARGS: check-build - artifacts: - expire_in: 2 days - paths: - - build check-system-fedora: extends: .native_test_job_template @@ -155,12 +145,14 @@ crash-test-fedora: IMAGE: fedora script: - cd build - - make check-venv - - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc - - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32 + - make NINJA=":" check-venv + - pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc + - pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32 build-system-centos: - extends: .native_build_job_template + extends: + - .native_build_job_template + - .native_build_artifact_template needs: job: amd64-centos8-container variables: @@ -170,10 +162,6 @@ build-system-centos: TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu MAKE_CHECK_ARGS: check-build - artifacts: - expire_in: 2 days - paths: - - build check-system-centos: extends: .native_test_job_template @@ -194,17 +182,15 @@ avocado-system-centos: MAKE_CHECK_ARGS: check-avocado build-system-opensuse: - extends: .native_build_job_template + extends: + - .native_build_job_template + - .native_build_artifact_template needs: job: amd64-opensuse-leap-container variables: IMAGE: opensuse-leap TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu MAKE_CHECK_ARGS: check-build - artifacts: - expire_in: 2 days - paths: - - build check-system-opensuse: extends: .native_test_job_template @@ -339,7 +325,9 @@ clang-user: # Split in three sets of build/check/avocado to limit the execution time of each # job build-cfi-aarch64: - extends: .native_build_job_template + extends: + - .native_build_job_template + - .native_build_artifact_template needs: - job: amd64-fedora-container variables: @@ -355,10 +343,6 @@ build-cfi-aarch64: # skipped until the situation has been solved. QEMU_JOB_SKIPPED: 1 timeout: 90m - artifacts: - expire_in: 2 days - paths: - - build check-cfi-aarch64: extends: .native_test_job_template @@ -379,7 +363,9 @@ avocado-cfi-aarch64: MAKE_CHECK_ARGS: check-avocado build-cfi-ppc64-s390x: - extends: .native_build_job_template + extends: + - .native_build_job_template + - .native_build_artifact_template needs: - job: amd64-fedora-container variables: @@ -395,10 +381,6 @@ build-cfi-ppc64-s390x: # skipped until the situation has been solved. QEMU_JOB_SKIPPED: 1 timeout: 80m - artifacts: - expire_in: 2 days - paths: - - build check-cfi-ppc64-s390x: extends: .native_test_job_template @@ -419,7 +401,9 @@ avocado-cfi-ppc64-s390x: MAKE_CHECK_ARGS: check-avocado build-cfi-x86_64: - extends: .native_build_job_template + extends: + - .native_build_job_template + - .native_build_artifact_template needs: - job: amd64-fedora-container variables: @@ -431,10 +415,6 @@ build-cfi-x86_64: TARGETS: x86_64-softmmu MAKE_CHECK_ARGS: check-build timeout: 70m - artifacts: - expire_in: 2 days - paths: - - build check-cfi-x86_64: extends: .native_test_job_template @@ -457,22 +437,21 @@ avocado-cfi-x86_64: tsan-build: extends: .native_build_job_template needs: - job: amd64-ubuntu2004-container + job: amd64-ubuntu2204-container variables: - IMAGE: ubuntu2004 - CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10 + IMAGE: ubuntu2204 + CONFIGURE_ARGS: --enable-tsan --cc=clang --cxx=clang++ --enable-trace-backends=ust --disable-slirp TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user - MAKE_CHECK_ARGS: bench V=1 # gcov is a GCC features gcov: extends: .native_build_job_template needs: - job: amd64-ubuntu2004-container + job: amd64-ubuntu2204-container timeout: 80m variables: - IMAGE: ubuntu2004 + IMAGE: ubuntu2204 CONFIGURE_ARGS: --enable-gcov TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu MAKE_CHECK_ARGS: check @@ -548,7 +527,7 @@ build-without-defaults: --disable-strip TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user - MAKE_CHECK_ARGS: check-unit check-qtest-avr check-qtest-mips64 + MAKE_CHECK_ARGS: check build-libvhost-user: extends: .base_job_template @@ -565,7 +544,9 @@ build-libvhost-user: # No targets are built here, just tools, docs, and unit tests. This # also feeds into the eventual documentation deployment steps later build-tools-and-docs-debian: - extends: .native_build_job_template + extends: + - .native_build_job_template + - .native_build_artifact_template needs: job: amd64-debian-container # when running on 'master' we use pre-existing container @@ -575,10 +556,6 @@ build-tools-and-docs-debian: MAKE_CHECK_ARGS: check-unit ctags TAGS cscope CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools QEMU_JOB_PUBLISH: 1 - artifacts: - expire_in: 2 days - paths: - - build # Prepare for GitLab pages deployment. Anything copied into the # "public" directory will be deployed to $USER.gitlab.io/$PROJECT diff --git a/.gitlab-ci.d/cirrus.yml b/.gitlab-ci.d/cirrus.yml index 502dfd612c..1507c928e5 100644 --- a/.gitlab-ci.d/cirrus.yml +++ b/.gitlab-ci.d/cirrus.yml @@ -44,19 +44,6 @@ variables: QEMU_JOB_CIRRUS: 1 -x64-freebsd-12-build: - extends: .cirrus_build_job - variables: - NAME: freebsd-12 - CIRRUS_VM_INSTANCE_TYPE: freebsd_instance - CIRRUS_VM_IMAGE_SELECTOR: image_family - CIRRUS_VM_IMAGE_NAME: freebsd-12-4 - CIRRUS_VM_CPUS: 8 - CIRRUS_VM_RAM: 8G - UPDATE_COMMAND: pkg update; pkg upgrade -y - INSTALL_COMMAND: pkg install -y - TEST_TARGETS: check - x64-freebsd-13-build: extends: .cirrus_build_job variables: diff --git a/.gitlab-ci.d/cirrus/build.yml b/.gitlab-ci.d/cirrus/build.yml index 7ef6af8d33..a9444902ec 100644 --- a/.gitlab-ci.d/cirrus/build.yml +++ b/.gitlab-ci.d/cirrus/build.yml @@ -32,6 +32,9 @@ build_task: - $MAKE -j$(sysctl -n hw.ncpu) - for TARGET in $TEST_TARGETS ; do - $MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1 - || { cat meson-logs/testlog.txt; exit 1; } ; + $MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1 ; done + always: + build_result_artifacts: + path: build/meson-logs/*log.txt + type: text/plain diff --git a/.gitlab-ci.d/cirrus/freebsd-12.vars b/.gitlab-ci.d/cirrus/freebsd-12.vars deleted file mode 100644 index 8934e5d57f..0000000000 --- a/.gitlab-ci.d/cirrus/freebsd-12.vars +++ /dev/null @@ -1,16 +0,0 @@ -# THIS FILE WAS AUTO-GENERATED -# -# $ lcitool variables freebsd-12 qemu -# -# https://gitlab.com/libvirt/libvirt-ci - -CCACHE='/usr/local/bin/ccache' -CPAN_PKGS='' -CROSS_PKGS='' -MAKE='/usr/local/bin/gmake' -NINJA='/usr/local/bin/ninja' -PACKAGING_COMMAND='pkg' -PIP3='/usr/local/bin/pip-3.8' -PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract usbredir virglrenderer vte3 zstd' -PYPI_PKGS='' -PYTHON='/usr/local/bin/python3' diff --git a/.gitlab-ci.d/cirrus/freebsd-13.vars b/.gitlab-ci.d/cirrus/freebsd-13.vars index 65ce456c48..facb649f5b 100644 --- a/.gitlab-ci.d/cirrus/freebsd-13.vars +++ b/.gitlab-ci.d/cirrus/freebsd-13.vars @@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake' NINJA='/usr/local/bin/ninja' PACKAGING_COMMAND='pkg' PIP3='/usr/local/bin/pip-3.8' -PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract usbredir virglrenderer vte3 zstd' +PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd' PYPI_PKGS='' PYTHON='/usr/local/bin/python3' diff --git a/.gitlab-ci.d/cirrus/macos-12.vars b/.gitlab-ci.d/cirrus/macos-12.vars index 65b78fa08f..ceb294e153 100644 --- a/.gitlab-ci.d/cirrus/macos-12.vars +++ b/.gitlab-ci.d/cirrus/macos-12.vars @@ -11,6 +11,6 @@ MAKE='/opt/homebrew/bin/gmake' NINJA='/opt/homebrew/bin/ninja' PACKAGING_COMMAND='brew' PIP3='/opt/homebrew/bin/pip3' -PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract usbredir vde vte3 zlib zstd' +PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol tesseract usbredir vde vte3 xorriso zlib zstd' PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme' PYTHON='/opt/homebrew/bin/python3' diff --git a/.gitlab-ci.d/container-template.yml b/.gitlab-ci.d/container-template.yml index c434b9c8f3..8c1370b8f0 100644 --- a/.gitlab-ci.d/container-template.yml +++ b/.gitlab-ci.d/container-template.yml @@ -1,22 +1,21 @@ .container_job_template: extends: .base_job_template - image: docker:stable + image: docker:latest stage: containers services: - docker:dind before_script: - export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest" - - export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/$NAME:latest" + - export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest" - apk add python3 - - docker info - docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" + - until docker info; do sleep 1; done script: - echo "TAG:$TAG" - echo "COMMON_TAG:$COMMON_TAG" - - ./tests/docker/docker.py --engine docker build - -t "qemu/$NAME" -f "tests/docker/dockerfiles/$NAME.docker" - -r $CI_REGISTRY/qemu-project/qemu - - docker tag "qemu/$NAME" "$TAG" + - docker build --tag "$TAG" --cache-from "$TAG" --cache-from "$COMMON_TAG" + --build-arg BUILDKIT_INLINE_CACHE=1 + -f "tests/docker/dockerfiles/$NAME.docker" "." - docker push "$TAG" after_script: - docker logout diff --git a/.gitlab-ci.d/containers.yml b/.gitlab-ci.d/containers.yml index 96d2a3b58b..8637a13d86 100644 --- a/.gitlab-ci.d/containers.yml +++ b/.gitlab-ci.d/containers.yml @@ -13,10 +13,10 @@ amd64-debian-container: variables: NAME: debian-amd64 -amd64-ubuntu2004-container: +amd64-ubuntu2204-container: extends: .container_job_template variables: - NAME: ubuntu2004 + NAME: ubuntu2204 amd64-opensuse-leap-container: extends: .container_job_template diff --git a/.gitlab-ci.d/crossbuild-template.yml b/.gitlab-ci.d/crossbuild-template.yml index d07989e3b0..4f93b9e4e5 100644 --- a/.gitlab-ci.d/crossbuild-template.yml +++ b/.gitlab-ci.d/crossbuild-template.yml @@ -49,3 +49,14 @@ nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user xtensa-linux-user $CROSS_SKIP_TARGETS" - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS + +# We can still run some tests on some of our cross build jobs. They can add this +# template to their extends to save the build logs and test results +.cross_test_artifacts: + artifacts: + name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" + expire_in: 7 days + paths: + - build/meson-logs/testlog.txt + reports: + junit: build/meson-logs/testlog.junit.xml diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml index 101416080c..61b8ac86ee 100644 --- a/.gitlab-ci.d/crossbuilds.yml +++ b/.gitlab-ci.d/crossbuilds.yml @@ -1,13 +1,6 @@ include: - local: '/.gitlab-ci.d/crossbuild-template.yml' -cross-armel-system: - extends: .cross_system_build_job - needs: - job: armel-debian-cross-container - variables: - IMAGE: debian-armel-cross - cross-armel-user: extends: .cross_user_build_job needs: @@ -15,13 +8,6 @@ cross-armel-user: variables: IMAGE: debian-armel-cross -cross-armhf-system: - extends: .cross_system_build_job - needs: - job: armhf-debian-cross-container - variables: - IMAGE: debian-armhf-cross - cross-armhf-user: extends: .cross_user_build_job needs: @@ -43,16 +29,10 @@ cross-arm64-user: variables: IMAGE: debian-arm64-cross -cross-i386-system: - extends: .cross_system_build_job - needs: - job: i386-fedora-cross-container - variables: - IMAGE: fedora-i386-cross - MAKE_CHECK_ARGS: check-qtest - cross-i386-user: - extends: .cross_user_build_job + extends: + - .cross_user_build_job + - .cross_test_artifacts needs: job: i386-fedora-cross-container variables: @@ -60,7 +40,9 @@ cross-i386-user: MAKE_CHECK_ARGS: check cross-i386-tci: - extends: .cross_accel_build_job + extends: + - .cross_accel_build_job + - .cross_test_artifacts timeout: 60m needs: job: i386-fedora-cross-container diff --git a/.gitlab-ci.d/custom-runners.yml b/.gitlab-ci.d/custom-runners.yml index 9fdc476c48..8e5b9500f4 100644 --- a/.gitlab-ci.d/custom-runners.yml +++ b/.gitlab-ci.d/custom-runners.yml @@ -15,12 +15,15 @@ variables: # All custom runners can extend this template to upload the testlog # data as an artifact and also feed the junit report -.custom_artifacts_template: +.custom_runner_template: + extends: .base_job_template artifacts: name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" expire_in: 7 days + when: always paths: - - build/meson-logs/testlog.txt + - build/build.ninja + - build/meson-logs reports: junit: build/meson-logs/testlog.junit.xml diff --git a/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml b/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml index 068b0c4335..367424db78 100644 --- a/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml +++ b/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml @@ -1,4 +1,9 @@ +# All centos-stream-8 jobs should run successfully in an environment +# setup by the scripts/ci/setup/stream/8/build-environment.yml task +# "Installation of extra packages to build QEMU" + centos-stream-8-x86_64: + extends: .custom_runner_template allow_failure: true needs: [] stage: build @@ -8,15 +13,6 @@ centos-stream-8-x86_64: rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE" - artifacts: - name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" - when: on_failure - expire_in: 7 days - paths: - - build/tests/results/latest/results.xml - - build/tests/results/latest/test-results - reports: - junit: build/tests/results/latest/results.xml before_script: - JOBS=$(expr $(nproc) + 1) script: @@ -25,6 +21,4 @@ centos-stream-8-x86_64: - ../scripts/ci/org.centos/stream/8/x86_64/configure || { cat config.log meson-logs/meson-log.txt; exit 1; } - make -j"$JOBS" - - make NINJA=":" check - || { cat meson-logs/testlog.txt; exit 1; } ; - - ../scripts/ci/org.centos/stream/8/x86_64/test-avocado + - make NINJA=":" check check-avocado diff --git a/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml b/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml index f512eaeaa3..cdae6c5212 100644 --- a/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml +++ b/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml @@ -3,7 +3,7 @@ # "Install basic packages to build QEMU on Ubuntu 20.04/20.04" ubuntu-20.04-s390x-all-linux-static: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: @@ -24,7 +24,7 @@ ubuntu-20.04-s390x-all-linux-static: - make --output-sync -j`nproc` check ubuntu-20.04-s390x-all: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: @@ -43,7 +43,7 @@ ubuntu-20.04-s390x-all: - make --output-sync -j`nproc` check ubuntu-20.04-s390x-alldbg: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: @@ -66,7 +66,7 @@ ubuntu-20.04-s390x-alldbg: - make --output-sync -j`nproc` check ubuntu-20.04-s390x-clang: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: @@ -108,7 +108,7 @@ ubuntu-20.04-s390x-tci: - make --output-sync -j`nproc` ubuntu-20.04-s390x-notcg: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml index 42137aaf2a..b8a0d75162 100644 --- a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml +++ b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml @@ -1,9 +1,9 @@ # All ubuntu-22.04 jobs should run successfully in an environment # setup by the scripts/ci/setup/qemu/build-environment.yml task -# "Install basic packages to build QEMU on Ubuntu 20.04" +# "Install basic packages to build QEMU on Ubuntu 22.04" ubuntu-22.04-aarch32-all: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml index 8ba85be440..374b0956c3 100644 --- a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml +++ b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml @@ -1,9 +1,9 @@ -# All ubuntu-20.04 jobs should run successfully in an environment +# All ubuntu-22.04 jobs should run successfully in an environment # setup by the scripts/ci/setup/qemu/build-environment.yml task -# "Install basic packages to build QEMU on Ubuntu 20.04" +# "Install basic packages to build QEMU on Ubuntu 22.04" ubuntu-22.04-aarch64-all-linux-static: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: @@ -24,7 +24,7 @@ ubuntu-22.04-aarch64-all-linux-static: - make --output-sync -j`nproc --ignore=40` check ubuntu-22.04-aarch64-all: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: @@ -45,8 +45,30 @@ ubuntu-22.04-aarch64-all: - make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40` check +ubuntu-22.04-aarch64-without-defaults: + extends: .custom_runner_template + needs: [] + stage: build + tags: + - ubuntu_22.04 + - aarch64 + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH64_RUNNER_AVAILABLE" + when: manual + allow_failure: true + script: + - mkdir build + - cd build + - ../configure --disable-user --without-default-devices --without-default-features + || { cat config.log meson-logs/meson-log.txt; exit 1; } + - make --output-sync -j`nproc --ignore=40` + - make --output-sync -j`nproc --ignore=40` check + ubuntu-22.04-aarch64-alldbg: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: @@ -65,7 +87,7 @@ ubuntu-22.04-aarch64-alldbg: - make --output-sync -j`nproc --ignore=40` check ubuntu-22.04-aarch64-clang: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: @@ -107,7 +129,7 @@ ubuntu-22.04-aarch64-tci: - make --output-sync -j`nproc --ignore=40` ubuntu-22.04-aarch64-notcg: - extends: .custom_artifacts_template + extends: .custom_runner_template needs: [] stage: build tags: @@ -123,7 +145,7 @@ ubuntu-22.04-aarch64-notcg: script: - mkdir build - cd build - - ../configure --disable-tcg + - ../configure --disable-tcg --with-devices-aarch64=minimal || { cat config.log meson-logs/meson-log.txt; exit 1; } - make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40` check diff --git a/.gitlab-ci.d/edk2.yml b/.gitlab-ci.d/edk2.yml deleted file mode 100644 index 314e101745..0000000000 --- a/.gitlab-ci.d/edk2.yml +++ /dev/null @@ -1,85 +0,0 @@ -# All jobs needing docker-edk2 must use the same rules it uses. -.edk2_job_rules: - rules: - # Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set - - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"' - when: never - - # In forks, if QEMU_CI=1 is set, then create manual job - # if any of the files affecting the build are touched - - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"' - changes: - - .gitlab-ci.d/edk2.yml - - .gitlab-ci.d/edk2/Dockerfile - - roms/edk2/* - when: manual - - # In forks, if QEMU_CI=1 is set, then create manual job - # if the branch/tag starts with 'edk2' - - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^edk2/' - when: manual - - # In forks, if QEMU_CI=1 is set, then create manual job - # if last commit msg contains 'EDK2' (case insensitive) - - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /edk2/i' - when: manual - - # Run if any files affecting the build output are touched - - changes: - - .gitlab-ci.d/edk2.yml - - .gitlab-ci.d/edk2/Dockerfile - - roms/edk2/* - when: on_success - - # Run if the branch/tag starts with 'edk2' - - if: '$CI_COMMIT_REF_NAME =~ /^edk2/' - when: on_success - - # Run if last commit msg contains 'EDK2' (case insensitive) - - if: '$CI_COMMIT_MESSAGE =~ /edk2/i' - when: on_success - -docker-edk2: - extends: .edk2_job_rules - stage: containers - image: docker:19.03.1 - services: - - docker:19.03.1-dind - variables: - GIT_DEPTH: 3 - IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build - # We don't use TLS - DOCKER_HOST: tcp://docker:2375 - DOCKER_TLS_CERTDIR: "" - before_script: - - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY - script: - - docker pull $IMAGE_TAG || true - - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA - --tag $IMAGE_TAG .gitlab-ci.d/edk2 - - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA - - docker push $IMAGE_TAG - -build-edk2: - extends: .edk2_job_rules - stage: build - needs: ['docker-edk2'] - artifacts: - paths: # 'artifacts.zip' will contains the following files: - - pc-bios/edk2*bz2 - - pc-bios/edk2-licenses.txt - - edk2-stdout.log - - edk2-stderr.log - image: $CI_REGISTRY_IMAGE:edk2-cross-build - variables: - GIT_DEPTH: 3 - script: # Clone the required submodules and build EDK2 - - git submodule update --init roms/edk2 - - git -C roms/edk2 submodule update --init -- - ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 - BaseTools/Source/C/BrotliCompress/brotli - CryptoPkg/Library/OpensslLib/openssl - MdeModulePkg/Library/BrotliCustomDecompressLib/brotli - - export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1)) - - echo "=== Using ${JOBS} simultaneous jobs ===" - - make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2 diff --git a/.gitlab-ci.d/edk2/Dockerfile b/.gitlab-ci.d/edk2/Dockerfile deleted file mode 100644 index bbe50ff832..0000000000 --- a/.gitlab-ci.d/edk2/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -# -# Docker image to cross-compile EDK2 firmware binaries -# -FROM ubuntu:18.04 - -MAINTAINER Philippe Mathieu-Daudé - -# Install packages required to build EDK2 -RUN apt update \ - && \ - \ - DEBIAN_FRONTEND=noninteractive \ - apt install --assume-yes --no-install-recommends \ - build-essential \ - ca-certificates \ - dos2unix \ - gcc-aarch64-linux-gnu \ - gcc-arm-linux-gnueabi \ - git \ - iasl \ - make \ - nasm \ - python3 \ - uuid-dev \ - && \ - \ - rm -rf /var/lib/apt/lists/* diff --git a/.gitlab-ci.d/opensbi.yml b/.gitlab-ci.d/opensbi.yml index 04ed5a3ea1..b4d7eef688 100644 --- a/.gitlab-ci.d/opensbi.yml +++ b/.gitlab-ci.d/opensbi.yml @@ -42,17 +42,15 @@ docker-opensbi: extends: .opensbi_job_rules stage: containers - image: docker:19.03.1 + image: docker:latest services: - - docker:19.03.1-dind + - docker:dind variables: GIT_DEPTH: 3 IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build - # We don't use TLS - DOCKER_HOST: tcp://docker:2375 - DOCKER_TLS_CERTDIR: "" before_script: - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY + - until docker info; do sleep 1; done script: - docker pull $IMAGE_TAG || true - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA diff --git a/.gitlab-ci.d/opensbi/Dockerfile b/.gitlab-ci.d/opensbi/Dockerfile index 4ba8a4de86..5ccf4151f4 100644 --- a/.gitlab-ci.d/opensbi/Dockerfile +++ b/.gitlab-ci.d/opensbi/Dockerfile @@ -15,6 +15,7 @@ RUN apt update \ ca-certificates \ git \ make \ + python3 \ wget \ && \ \ diff --git a/.gitlab-ci.d/qemu-project.yml b/.gitlab-ci.d/qemu-project.yml index 691d9bf5dc..4d914c4897 100644 --- a/.gitlab-ci.d/qemu-project.yml +++ b/.gitlab-ci.d/qemu-project.yml @@ -1,10 +1,16 @@ # This file contains the set of jobs run by the QEMU project: # https://gitlab.com/qemu-project/qemu/-/pipelines +variables: + RUNNER_TAG: "" + +default: + tags: + - $RUNNER_TAG + include: - local: '/.gitlab-ci.d/base.yml' - local: '/.gitlab-ci.d/stages.yml' - - local: '/.gitlab-ci.d/edk2.yml' - local: '/.gitlab-ci.d/opensbi.yml' - local: '/.gitlab-ci.d/containers.yml' - local: '/.gitlab-ci.d/crossbuilds.yml' diff --git a/.gitlab-ci.d/windows.yml b/.gitlab-ci.d/windows.yml index 87235e43b4..472bacd2e2 100644 --- a/.gitlab-ci.d/windows.yml +++ b/.gitlab-ci.d/windows.yml @@ -59,6 +59,7 @@ msys2-64bit: mingw-w64-x86_64-SDL2 mingw-w64-x86_64-SDL2_image mingw-w64-x86_64-snappy + mingw-w64-x86_64-spice mingw-w64-x86_64-usbredir mingw-w64-x86_64-zstd " - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory @@ -108,6 +109,7 @@ msys2-32bit: mingw-w64-i686-SDL2 mingw-w64-i686-SDL2_image mingw-w64-i686-snappy + mingw-w64-i686-spice mingw-w64-i686-usbredir mingw-w64-i686-zstd " - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory diff --git a/.gitmodules b/.gitmodules index 6ce5bf49c5..f8b2ddf387 100644 --- a/.gitmodules +++ b/.gitmodules @@ -13,8 +13,8 @@ [submodule "roms/qemu-palcode"] path = roms/qemu-palcode url = https://gitlab.com/qemu-project/qemu-palcode.git -[submodule "dtc"] - path = dtc +[submodule "subprojects/dtc"] + path = subprojects/dtc url = https://gitlab.com/qemu-project/dtc.git [submodule "roms/u-boot"] path = roms/u-boot @@ -25,8 +25,8 @@ [submodule "roms/QemuMacDrivers"] path = roms/QemuMacDrivers url = https://gitlab.com/qemu-project/QemuMacDrivers.git -[submodule "ui/keycodemapdb"] - path = ui/keycodemapdb +[submodule "subprojects/keycodemapdb"] + path = subprojects/keycodemapdb url = https://gitlab.com/qemu-project/keycodemapdb.git [submodule "roms/seabios-hppa"] path = roms/seabios-hppa @@ -49,9 +49,6 @@ [submodule "roms/qboot"] path = roms/qboot url = https://gitlab.com/qemu-project/qboot.git -[submodule "meson"] - path = meson - url = https://gitlab.com/qemu-project/meson.git [submodule "roms/vbootrom"] path = roms/vbootrom url = https://gitlab.com/qemu-project/vbootrom.git diff --git a/.mailmap b/.mailmap index fad2aff5aa..b57da4827e 100644 --- a/.mailmap +++ b/.mailmap @@ -54,8 +54,10 @@ Aleksandar Markovic Aleksandar Rikalo Aleksandar Rikalo Alexander Graf +Ani Sinha Anthony Liguori Anthony Liguori Christian Borntraeger +Damien Hedde Filip Bozuta Frederic Konrad Frederic Konrad @@ -76,6 +78,7 @@ Philippe Mathieu-Daudé Philippe Mathieu-Daudé Stefan Brankovic Yongbok Kim +Taylor Simpson # Also list preferred name forms where people have changed their # git author config, or had utf8/latin1 encoding issues. diff --git a/.travis.yml b/.travis.yml index cf088ba4cf..b958eca5de 100644 --- a/.travis.yml +++ b/.travis.yml @@ -237,13 +237,15 @@ jobs: - libglib2.0-dev - libgnutls28-dev - ninja-build + - flex + - bison env: - CONFIG="--disable-containers --disable-system" - name: "[s390x] Clang (disable-tcg)" arch: s390x dist: focal - compiler: clang + compiler: clang-10 addons: apt_packages: - libaio-dev @@ -269,6 +271,7 @@ jobs: - libvdeplug-dev - libvte-2.91-dev - ninja-build + - clang-10 env: - TEST_CMD="make check-unit" - CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools diff --git a/MAINTAINERS b/MAINTAINERS index c6e6549f06..4b025a7b63 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -64,6 +64,21 @@ L: qemu-devel@nongnu.org F: * F: */ +Project policy and developer guides +R: Alex Bennée +R: Daniel P. Berrangé +R: Thomas Huth +R: Markus Armbruster +R: Philippe Mathieu-Daudé +R: Juan Quintela +W: https://www.qemu.org/docs/master/devel/index.html +S: Odd Fixes +F: docs/devel/style.rst +F: docs/devel/code-of-conduct.rst +F: docs/devel/conflict-resolution.rst +F: docs/devel/submitting-a-patch.rst +F: docs/devel/submitting-a-pull-request.rst + Responsible Disclosure, Reporting Security Issues ------------------------------------------------- W: https://wiki.qemu.org/SecurityProcess @@ -123,6 +138,7 @@ M: Richard Henderson R: Paolo Bonzini S: Maintained F: softmmu/cpus.c +F: softmmu/watchpoint.c F: cpus-common.c F: page-vary.c F: page-vary-common.c @@ -135,10 +151,15 @@ F: docs/devel/decodetree.rst F: docs/devel/tcg* F: include/exec/cpu*.h F: include/exec/exec-all.h +F: include/exec/tb-flush.h +F: include/exec/target_long.h F: include/exec/helper*.h F: include/sysemu/cpus.h F: include/sysemu/tcg.h F: include/hw/core/tcg-cpu-ops.h +F: host/include/*/host/cpuinfo.h +F: util/cpuinfo-*.c +F: include/tcg/ FPU emulation M: Aurelien Jarno @@ -197,7 +218,7 @@ F: tests/tcg/cris/ F: disas/cris.c Hexagon TCG CPUs -M: Taylor Simpson +M: Brian Cain S: Supported F: target/hexagon/ X: target/hexagon/idef-parser/ @@ -207,6 +228,7 @@ F: tests/tcg/hexagon/ F: disas/hexagon.c F: configs/targets/hexagon-linux-user/default.mak F: docker/dockerfiles/debian-hexagon-cross.docker +F: gdb-xml/hexagon*.xml Hexagon idef-parser M: Alessandro Di Federico @@ -227,6 +249,7 @@ M: Xiaojuan Yang S: Maintained F: target/loongarch/ F: tests/tcg/loongarch64/ +F: tests/avocado/machine_loongarch.py M68K TCG CPUs M: Laurent Vivier @@ -254,9 +277,9 @@ F: docs/system/cpu-models-mips.rst.inc F: tests/tcg/mips/ NiosII TCG CPUs -M: Chris Wulff -M: Marek Vasut -S: Maintained +R: Chris Wulff +R: Marek Vasut +S: Orphan F: target/nios2/ F: hw/nios2/ F: disas/nios2.c @@ -310,7 +333,7 @@ F: target/riscv/xthead*.decode RISC-V XVentanaCondOps extension M: Philipp Tomsich L: qemu-riscv@nongnu.org -S: Supported +S: Maintained F: target/riscv/XVentanaCondOps.decode F: target/riscv/insn_trans/trans_xventanacondops.c.inc @@ -368,6 +391,7 @@ S: Maintained F: target/xtensa/ F: hw/xtensa/ F: tests/tcg/xtensa/ +F: tests/tcg/xtensaeb/ F: disas/xtensa.c F: include/hw/xtensa/xtensa-isa.h F: configs/devices/xtensa*/default.mak @@ -442,6 +466,15 @@ F: target/i386/kvm/ F: target/i386/sev* F: scripts/kvm/vmxcap +Xen emulation on X86 KVM CPUs +M: David Woodhouse +M: Paul Durrant +S: Supported +F: include/sysemu/kvm_xen.h +F: target/i386/kvm/xen* +F: hw/i386/kvm/xen* +F: tests/avocado/xen_guest.py + Guest CPU Cores (other accelerators) ------------------------------------ Overall @@ -911,10 +944,12 @@ SBSA-REF M: Radoslaw Biernacki M: Peter Maydell R: Leif Lindholm +R: Marcin Juszkiewicz L: qemu-arm@nongnu.org S: Maintained F: hw/arm/sbsa-ref.c F: docs/system/arm/sbsa.rst +F: tests/avocado/machine_aarch64_sbsaref.py Sharp SL-5500 (Collie) PDA M: Peter Maydell @@ -998,12 +1033,6 @@ S: Maintained F: hw/ssi/xlnx-versal-ospi.c F: include/hw/ssi/xlnx-versal-ospi.h -ARM ACPI Subsystem -M: Shannon Zhao -L: qemu-arm@nongnu.org -S: Maintained -F: hw/arm/virt-acpi-build.c - STM32F100 M: Alexandre Iooss L: qemu-arm@nongnu.org @@ -1090,7 +1119,7 @@ F: include/hw/misc/pca9552*.h F: hw/net/ftgmac100.c F: include/hw/net/ftgmac100.h F: docs/system/arm/aspeed.rst -F: tests/qtest/*aspeed* +F: tests/*/*aspeed* F: hw/arm/fby35.c NRF51 @@ -1399,6 +1428,7 @@ M: Daniel Henrique Barboza R: Cédric Le Goater R: David Gibson R: Greg Kurz +R: Harsh Prateek Bora L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/*/spapr* @@ -1679,6 +1709,7 @@ F: hw/i2c/smbus_ich9.c F: hw/acpi/piix4.c F: hw/acpi/ich9*.c F: include/hw/acpi/ich9*.h +F: include/hw/southbridge/ich9.h F: include/hw/southbridge/piix.h F: hw/isa/apm.c F: include/hw/isa/apm.h @@ -1711,6 +1742,7 @@ F: include/hw/char/parallel.h F: include/hw/dma/i8257.h F: include/hw/i2c/pm_smbus.h F: include/hw/input/i8042.h +F: include/hw/intc/ioapic* F: include/hw/isa/i8259_internal.h F: include/hw/isa/superio.h F: include/hw/timer/hpet.h @@ -1795,7 +1827,7 @@ F: hw/misc/edu.c IDE M: John Snow L: qemu-block@nongnu.org -S: Supported +S: Odd Fixes F: include/hw/ide.h F: include/hw/ide/ F: hw/ide/ @@ -1820,7 +1852,7 @@ T: git https://github.com/cminyard/qemu.git master-ipmi-rebase Floppy M: John Snow L: qemu-block@nongnu.org -S: Supported +S: Odd Fixes F: hw/block/fdc.c F: hw/block/fdc-internal.h F: hw/block/fdc-isa.c @@ -1871,7 +1903,7 @@ F: hw/pci/pcie_doe.c ACPI/SMBIOS M: Michael S. Tsirkin M: Igor Mammedov -R: Ani Sinha +R: Ani Sinha S: Supported F: include/hw/acpi/* F: include/hw/firmware/smbios.h @@ -1889,6 +1921,18 @@ F: docs/specs/acpi_nvdimm.rst F: docs/specs/acpi_pci_hotplug.rst F: docs/specs/acpi_hw_reduced_hotplug.rst +ARM ACPI Subsystem +M: Shannon Zhao +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/virt-acpi-build.c + +RISC-V ACPI Subsystem +M: Sunil V L +L: qemu-riscv@nongnu.org +S: Maintained +F: hw/riscv/virt-acpi-build.c + ACPI/VIOT M: Jean-Philippe Brucker S: Supported @@ -1896,7 +1940,7 @@ F: hw/acpi/viot.c F: hw/acpi/viot.h ACPI/AVOCADO/BIOSBITS -M: Ani Sinha +M: Ani Sinha M: Michael S. Tsirkin S: Supported F: tests/avocado/acpi-bits/* @@ -2040,6 +2084,10 @@ F: backends/vhost-user.c F: include/sysemu/vhost-user-backend.h F: subprojects/libvhost-user/ +vhost-shadow-virtqueue +R: Eugenio Pérez +F: hw/virtio/vhost-shadow-virtqueue.* + virtio M: Michael S. Tsirkin S: Supported @@ -2098,7 +2146,6 @@ T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org virtiofs -M: Dr. David Alan Gilbert M: Stefan Hajnoczi S: Supported F: hw/virtio/vhost-user-fs* @@ -2193,6 +2240,7 @@ F: tests/qtest/fuzz-megasas-test.c Network packet abstractions M: Dmitry Fleytman +R: Akihiko Odaki S: Maintained F: include/net/eth.h F: net/eth.c @@ -2216,14 +2264,28 @@ F: docs/specs/rocker.txt e1000x M: Dmitry Fleytman +R: Akihiko Odaki S: Maintained F: hw/net/e1000x* e1000e M: Dmitry Fleytman +R: Akihiko Odaki S: Maintained F: hw/net/e1000e* F: tests/qtest/fuzz-e1000e-test.c +F: tests/qtest/e1000e-test.c +F: tests/qtest/libqos/e1000e.* + +igb +M: Akihiko Odaki +R: Sriram Yagnaraman +S: Maintained +F: docs/system/devices/igb.rst +F: hw/net/igb* +F: tests/avocado/netdev-ethtool.py +F: tests/qtest/igb-test.c +F: tests/qtest/libqos/igb.c eepro100 M: Stefan Weil @@ -2487,6 +2549,7 @@ Subsystems ---------- Overall Audio backends M: Gerd Hoffmann +M: Marc-André Lureau S: Odd Fixes F: audio/ X: audio/alsaaudio.c @@ -2510,7 +2573,7 @@ Core Audio framework backend M: Gerd Hoffmann M: Philippe Mathieu-Daudé R: Christian Schoenebeck -R: Akihiko Odaki +R: Akihiko Odaki S: Odd Fixes F: audio/coreaudio.c @@ -2632,8 +2695,8 @@ T: git https://gitlab.com/jsnow/qemu.git jobs T: git https://gitlab.com/vsementsov/qemu.git block Compute Express Link -M: Ben Widawsky M: Jonathan Cameron +R: Fan Ni S: Supported F: hw/cxl/ F: hw/mem/cxl_type3.c @@ -2731,9 +2794,11 @@ S: Maintained F: docs/system/gdb.rst F: gdbstub/* F: include/exec/gdbstub.h +F: include/gdbstub/* F: gdb-xml/ F: tests/tcg/multiarch/gdbstub/ F: scripts/feature_to_c.sh +F: scripts/probe-gdb-support.py Memory API M: Paolo Bonzini @@ -2781,6 +2846,7 @@ F: docs/spice-port-fqdn.txt Graphics M: Gerd Hoffmann +M: Marc-André Lureau S: Odd Fixes F: ui/ F: include/ui/ @@ -2791,7 +2857,7 @@ F: docs/devel/ui.rst Cocoa graphics M: Peter Maydell M: Philippe Mathieu-Daudé -R: Akihiko Odaki +R: Akihiko Odaki S: Odd Fixes F: ui/cocoa.m @@ -2816,13 +2882,15 @@ F: qapi/run-state.json Read, Copy, Update (RCU) M: Paolo Bonzini S: Maintained +F: docs/devel/lockcnt.txt +F: docs/devel/rcu.txt F: include/qemu/rcu*.h F: tests/unit/rcutorture.c F: tests/unit/test-rcu-*.c F: util/rcu.c Human Monitor (HMP) -M: Dr. David Alan Gilbert +M: Dr. David Alan Gilbert S: Maintained F: monitor/monitor-internal.h F: monitor/misc.c @@ -2862,9 +2930,11 @@ T: git https://gitlab.com/ehabkost/qemu.git machine-next Cryptodev Backends M: Gonglei +M: zhenwei pi S: Maintained F: include/sysemu/cryptodev*.h F: backends/cryptodev*.c +F: qapi/cryptodev.json Python library M: John Snow @@ -3093,7 +3163,8 @@ F: scripts/checkpatch.pl Migration M: Juan Quintela -M: Dr. David Alan Gilbert +R: Peter Xu +R: Leonardo Bras S: Maintained F: hw/core/vmstate-if.c F: include/hw/vmstate-if.h @@ -3237,6 +3308,7 @@ S: Supported F: replay/* F: block/blkreplay.c F: net/filter-replay.c +F: include/exec/replay-core.h F: include/sysemu/replay.h F: docs/devel/replay.rst F: docs/system/replay.rst @@ -3301,8 +3373,6 @@ F: roms/edk2 F: roms/edk2-* F: tests/data/uefi-boot-images/ F: tests/uefi-test-tools/ -F: .gitlab-ci.d/edk2.yml -F: .gitlab-ci.d/edk2/ VT-d Emulation M: Michael S. Tsirkin @@ -3313,6 +3383,10 @@ F: hw/i386/intel_iommu.c F: hw/i386/intel_iommu_internal.h F: include/hw/i386/intel_iommu.h +AMD-Vi Emulation +S: Orphan +F: hw/i386/amd_iommu.? + OpenSBI Firmware M: Bin Meng S: Supported @@ -3322,7 +3396,7 @@ F: .gitlab-ci.d/opensbi/ Clock framework M: Luc Michel -R: Damien Hedde +R: Damien Hedde S: Maintained F: include/hw/clock.h F: include/hw/qdev-clock.h @@ -3760,6 +3834,7 @@ F: scripts/ci/ F: tests/docker/ F: tests/vm/ F: tests/lcitool/ +F: tests/avocado/tuxrun_baselines.py F: scripts/archive-source.sh F: docs/devel/testing.rst W: https://gitlab.com/qemu-project/qemu/pipelines @@ -3776,8 +3851,7 @@ W: https://cirrus-ci.com/github/qemu/qemu Windows Hosted Continuous Integration M: Yonggang Luo S: Maintained -F: .cirrus.yml -W: https://cirrus-ci.com/github/qemu/qemu +F: .gitlab-ci.d/windows.yml Guest Test Compilation Support M: Alex Bennée @@ -3843,6 +3917,16 @@ F: configure F: scripts/mtest2make.py F: tests/Makefile.include +Kconfig +M: Paolo Bonzini +S: Maintained +F: scripts/minikconf.py +F: docs/devel/kconfig.rst +F: Kconfig* +F: */Kconfig* +F: hw/*/Kconfig* +F: target/*/Kconfig* + GIT submodules M: Daniel P. Berrange S: Odd Fixes @@ -3866,3 +3950,8 @@ Performance Tools and Tests M: Ahmed Karaman S: Maintained F: scripts/performance/ + +Code Coverage Tools +M: Alex Bennée +S: Odd Fixes +F: scripts/coverage/ diff --git a/Makefile b/Makefile index ce2f83a684..08fb6a3b05 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ quiet-command-run = $(if $(V),,$(if $2,printf " %-7s %s\n" $2 $3 && ))$1 quiet-@ = $(if $(V),,@) quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3) -UNCHECKED_GOALS := %clean TAGS cscope ctags dist \ +UNCHECKED_GOALS := TAGS gtags cscope ctags dist \ help check-help print-% \ docker docker-% vm-help vm-test vm-build-% @@ -115,15 +115,15 @@ Makefile.ninja: build.ninja $(NINJA) -t query build.ninja | sed -n '1,/^ input:/d; /^ outputs:/q; s/$$/ \\/p'; \ } > $@.tmp && mv $@.tmp $@ -include Makefile.ninja +endif +ifneq ($(MESON),) # A separate rule is needed for Makefile dependencies to avoid -n build.ninja: build.ninja.stamp $(build-files): build.ninja.stamp: meson.stamp $(build-files) - $(NINJA) $(if $V,-v,) build.ninja && touch $@ -endif + $(MESON) setup --reconfigure $(SRC_PATH) && touch $@ -ifneq ($(MESON),) Makefile.mtest: build.ninja scripts/mtest2make.py $(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@ -include Makefile.mtest @@ -176,10 +176,8 @@ plugins: endif # $(CONFIG_PLUGIN) else # config-host.mak does not exist -config-host.mak: ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail)) - @echo "Please call configure before running make!" - @exit 1 +$(error Please call configure before running make) endif endif # config-host.mak does not exist @@ -220,7 +218,7 @@ qemu-%.tar.bz2: distclean: clean recurse-distclean -$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || : - rm -f config-host.mak Makefile.prereqs qemu-bundle + rm -f config-host.mak Makefile.prereqs rm -f tests/tcg/*/config-target.mak tests/tcg/config-host.mak rm -f config.status rm -f roms/seabios/config.mak @@ -230,7 +228,7 @@ distclean: clean recurse-distclean rm -f Makefile.ninja Makefile.mtest build.ninja.stamp meson.stamp rm -f config.log rm -f linux-headers/asm - rm -Rf .sdk + rm -Rf .sdk qemu-bundle find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \ -type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \) diff --git a/VERSION b/VERSION index d182ea1650..6f6578c5fa 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -7.2.50 +8.0.50 diff --git a/accel/accel-softmmu.c b/accel/accel-softmmu.c index f9cdafb148..9c804ba9e3 100644 --- a/accel/accel-softmmu.c +++ b/accel/accel-softmmu.c @@ -27,7 +27,7 @@ #include "qemu/accel.h" #include "hw/boards.h" #include "sysemu/cpus.h" - +#include "qemu/error-report.h" #include "accel-softmmu.h" int accel_init_machine(AccelState *accel, MachineState *ms) diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c index fbf4fe3497..457eafa380 100644 --- a/accel/kvm/kvm-accel-ops.c +++ b/accel/kvm/kvm-accel-ops.c @@ -86,6 +86,13 @@ static bool kvm_cpus_are_resettable(void) return !kvm_enabled() || kvm_cpu_check_are_resettable(); } +#ifdef KVM_CAP_SET_GUEST_DEBUG +static int kvm_update_guest_debug_ops(CPUState *cpu) +{ + return kvm_update_guest_debug(cpu, 0); +} +#endif + static void kvm_accel_ops_class_init(ObjectClass *oc, void *data) { AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); @@ -99,6 +106,7 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data) ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm; #ifdef KVM_CAP_SET_GUEST_DEBUG + ops->update_guest_debug = kvm_update_guest_debug_ops; ops->supports_guest_debug = kvm_supports_guest_debug; ops->insert_breakpoint = kvm_insert_breakpoint; ops->remove_breakpoint = kvm_remove_breakpoint; diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 9b26582655..7679f397ae 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -685,6 +685,15 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu) uint32_t ring_size = s->kvm_dirty_ring_size; uint32_t count = 0, fetch = cpu->kvm_fetch_index; + /* + * It's possible that we race with vcpu creation code where the vcpu is + * put onto the vcpus list but not yet initialized the dirty ring + * structures. If so, skip it. + */ + if (!cpu->created) { + return 0; + } + assert(dirty_gfns && ring_size); trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index); @@ -1352,6 +1361,10 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, */ if (kvm_state->kvm_dirty_ring_size) { kvm_dirty_ring_reap_locked(kvm_state, NULL); + if (kvm_state->kvm_dirty_ring_with_bitmap) { + kvm_slot_sync_dirty_pages(mem); + kvm_slot_get_dirty_log(kvm_state, mem); + } } else { kvm_slot_get_dirty_log(kvm_state, mem); } @@ -1449,6 +1462,69 @@ static int kvm_dirty_ring_reaper_init(KVMState *s) return 0; } +static int kvm_dirty_ring_init(KVMState *s) +{ + uint32_t ring_size = s->kvm_dirty_ring_size; + uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn); + unsigned int capability = KVM_CAP_DIRTY_LOG_RING; + int ret; + + s->kvm_dirty_ring_size = 0; + s->kvm_dirty_ring_bytes = 0; + + /* Bail if the dirty ring size isn't specified */ + if (!ring_size) { + return 0; + } + + /* + * Read the max supported pages. Fall back to dirty logging mode + * if the dirty ring isn't supported. + */ + ret = kvm_vm_check_extension(s, capability); + if (ret <= 0) { + capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL; + ret = kvm_vm_check_extension(s, capability); + } + + if (ret <= 0) { + warn_report("KVM dirty ring not available, using bitmap method"); + return 0; + } + + if (ring_bytes > ret) { + error_report("KVM dirty ring size %" PRIu32 " too big " + "(maximum is %ld). Please use a smaller value.", + ring_size, (long)ret / sizeof(struct kvm_dirty_gfn)); + return -EINVAL; + } + + ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes); + if (ret) { + error_report("Enabling of KVM dirty ring failed: %s. " + "Suggested minimum value is 1024.", strerror(-ret)); + return -EIO; + } + + /* Enable the backup bitmap if it is supported */ + ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP); + if (ret > 0) { + ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0); + if (ret) { + error_report("Enabling of KVM dirty ring's backup bitmap failed: " + "%s. ", strerror(-ret)); + return -EIO; + } + + s->kvm_dirty_ring_with_bitmap = true; + } + + s->kvm_dirty_ring_size = ring_size; + s->kvm_dirty_ring_bytes = ring_bytes; + + return 0; +} + static void kvm_region_add(MemoryListener *listener, MemoryRegionSection *section) { @@ -1554,7 +1630,7 @@ static void kvm_log_sync(MemoryListener *listener, kvm_slots_unlock(); } -static void kvm_log_sync_global(MemoryListener *l) +static void kvm_log_sync_global(MemoryListener *l, bool last_stage) { KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener); KVMState *s = kvm_state; @@ -1573,6 +1649,12 @@ static void kvm_log_sync_global(MemoryListener *l) mem = &kml->slots[i]; if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { kvm_slot_sync_dirty_pages(mem); + + if (s->kvm_dirty_ring_with_bitmap && last_stage && + kvm_slot_get_dirty_log(s, mem)) { + kvm_slot_sync_dirty_pages(mem); + } + /* * This is not needed by KVM_GET_DIRTY_LOG because the * ioctl will unconditionally overwrite the whole region. @@ -2361,13 +2443,13 @@ static int kvm_init(MachineState *ms) static const char upgrade_note[] = "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" "(see http://sourceforge.net/projects/kvm).\n"; - struct { + const struct { const char *name; int num; } num_cpus[] = { { "SMP", ms->smp.cpus }, { "hotpluggable", ms->smp.max_cpus }, - { NULL, } + { /* end of list */ } }, *nc = num_cpus; int soft_vcpus_limit, hard_vcpus_limit; KVMState *s; @@ -2512,35 +2594,9 @@ static int kvm_init(MachineState *ms) * Enable KVM dirty ring if supported, otherwise fall back to * dirty logging mode */ - if (s->kvm_dirty_ring_size > 0) { - uint64_t ring_bytes; - - ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn); - - /* Read the max supported pages */ - ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING); - if (ret > 0) { - if (ring_bytes > ret) { - error_report("KVM dirty ring size %" PRIu32 " too big " - "(maximum is %ld). Please use a smaller value.", - s->kvm_dirty_ring_size, - (long)ret / sizeof(struct kvm_dirty_gfn)); - ret = -EINVAL; - goto err; - } - - ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes); - if (ret) { - error_report("Enabling of KVM dirty ring failed: %s. " - "Suggested minimum value is 1024.", strerror(-ret)); - goto err; - } - - s->kvm_dirty_ring_bytes = ring_bytes; - } else { - warn_report("KVM dirty ring not available, using bitmap method"); - s->kvm_dirty_ring_size = 0; - } + ret = kvm_dirty_ring_init(s); + if (ret < 0) { + goto err; } /* @@ -3305,7 +3361,7 @@ bool kvm_supports_guest_debug(void) return kvm_has_guest_debug; } -int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len) +int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) { struct kvm_sw_breakpoint *bp; int err; @@ -3343,7 +3399,7 @@ int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len) return 0; } -int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len) +int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) { struct kvm_sw_breakpoint *bp; int err; @@ -3701,8 +3757,12 @@ static void kvm_accel_instance_init(Object *obj) s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; /* KVM dirty ring is by default off */ s->kvm_dirty_ring_size = 0; + s->kvm_dirty_ring_with_bitmap = false; s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; s->notify_window = 0; + s->xen_version = 0; + s->xen_gnttab_max_frames = 64; + s->xen_evtchn_max_pirq = 256; } /** diff --git a/accel/kvm/kvm-cpus.h b/accel/kvm/kvm-cpus.h index fd63fe6a59..ca40add32c 100644 --- a/accel/kvm/kvm-cpus.h +++ b/accel/kvm/kvm-cpus.h @@ -19,8 +19,8 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu); void kvm_cpu_synchronize_post_init(CPUState *cpu); void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu); bool kvm_supports_guest_debug(void); -int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len); -int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len); +int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len); +int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len); void kvm_remove_all_breakpoints(CPUState *cpu); #endif /* KVM_CPUS_H */ diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c index c1b05767c0..813695b402 100644 --- a/accel/stubs/tcg-stub.c +++ b/accel/stubs/tcg-stub.c @@ -11,6 +11,7 @@ */ #include "qemu/osdep.h" +#include "exec/tb-flush.h" #include "exec/exec-all.h" void tb_flush(CPUState *cpu) @@ -25,7 +26,7 @@ void tcg_flush_jmp_cache(CPUState *cpu) { } -int probe_access_flags(CPUArchState *env, target_ulong addr, +int probe_access_flags(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t retaddr) { diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc index 8f2ce43ee6..ee222fd7e7 100644 --- a/accel/tcg/atomic_common.c.inc +++ b/accel/tcg/atomic_common.c.inc @@ -13,26 +13,12 @@ * See the COPYING file in the top-level directory. */ -static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, +static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr, MemOpIdx oi) { qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW); } -#if HAVE_ATOMIC128 -static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, - MemOpIdx oi) -{ - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); -} - -static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, - MemOpIdx oi) -{ - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); -} -#endif - /* * Atomic helpers callable from TCG. * These have a common interface and all defer to cpu_atomic_* @@ -40,7 +26,7 @@ static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, */ #define CMPXCHG_HELPER(OP, TYPE) \ - TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \ + TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr, \ TYPE oldv, TYPE newv, uint32_t oi) \ { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); } @@ -62,36 +48,16 @@ CMPXCHG_HELPER(cmpxchgo_le, Int128) #undef CMPXCHG_HELPER -Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr, - Int128 cmpv, Int128 newv, uint32_t oi) +Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr, + Int128 cmpv, Int128 newv, uint32_t oi) { #if TCG_TARGET_REG_BITS == 32 uintptr_t ra = GETPC(); Int128 oldv; - oldv = cpu_ld16_be_mmu(env, addr, oi, ra); + oldv = cpu_ld16_mmu(env, addr, oi, ra); if (int128_eq(oldv, cmpv)) { - cpu_st16_be_mmu(env, addr, newv, oi, ra); - } else { - /* Even with comparison failure, still need a write cycle. */ - probe_write(env, addr, 16, get_mmuidx(oi), ra); - } - return oldv; -#else - g_assert_not_reached(); -#endif -} - -Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr, - Int128 cmpv, Int128 newv, uint32_t oi) -{ -#if TCG_TARGET_REG_BITS == 32 - uintptr_t ra = GETPC(); - Int128 oldv; - - oldv = cpu_ld16_le_mmu(env, addr, oi, ra); - if (int128_eq(oldv, cmpv)) { - cpu_st16_le_mmu(env, addr, newv, oi, ra); + cpu_st16_mmu(env, addr, newv, oi, ra); } else { /* Even with comparison failure, still need a write cycle. */ probe_write(env, addr, 16, get_mmuidx(oi), ra); @@ -103,7 +69,7 @@ Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr, } #define ATOMIC_HELPER(OP, TYPE) \ - TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \ + TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr, \ TYPE val, uint32_t oi) \ { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); } diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index 404a530f7c..e312acd16d 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -73,8 +73,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv, MemOpIdx oi, uintptr_t retaddr) { - DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, - PAGE_READ | PAGE_WRITE, retaddr); + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); DATA_TYPE ret; #if DATA_SIZE == 16 @@ -87,38 +86,11 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, return ret; } -#if DATA_SIZE >= 16 -#if HAVE_ATOMIC128 -ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, - PAGE_READ, retaddr); - DATA_TYPE val; - - val = atomic16_read(haddr); - ATOMIC_MMU_CLEANUP; - atomic_trace_ld_post(env, addr, oi); - return val; -} - -void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, - MemOpIdx oi, uintptr_t retaddr) -{ - DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, - PAGE_WRITE, retaddr); - - atomic16_set(haddr, val); - ATOMIC_MMU_CLEANUP; - atomic_trace_st_post(env, addr, oi); -} -#endif -#else +#if DATA_SIZE < 16 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) { - DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, - PAGE_READ | PAGE_WRITE, retaddr); + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); DATA_TYPE ret; ret = qatomic_xchg__nocheck(haddr, val); @@ -131,9 +103,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ { \ - DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ - PAGE_READ | PAGE_WRITE, retaddr); \ - DATA_TYPE ret; \ + DATA_TYPE *haddr, ret; \ + haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \ ret = qatomic_##X(haddr, val); \ ATOMIC_MMU_CLEANUP; \ atomic_trace_rmw_post(env, addr, oi); \ @@ -163,9 +134,8 @@ GEN_ATOMIC_HELPER(xor_fetch) ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ { \ - XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ - PAGE_READ | PAGE_WRITE, retaddr); \ - XDATA_TYPE cmp, old, new, val = xval; \ + XDATA_TYPE *haddr, cmp, old, new, val = xval; \ + haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \ smp_mb(); \ cmp = qatomic_read__nocheck(haddr); \ do { \ @@ -188,7 +158,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) #undef GEN_ATOMIC_HELPER_FN -#endif /* DATA SIZE >= 16 */ +#endif /* DATA SIZE < 16 */ #undef END @@ -206,8 +176,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv, MemOpIdx oi, uintptr_t retaddr) { - DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, - PAGE_READ | PAGE_WRITE, retaddr); + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); DATA_TYPE ret; #if DATA_SIZE == 16 @@ -220,39 +189,11 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, return BSWAP(ret); } -#if DATA_SIZE >= 16 -#if HAVE_ATOMIC128 -ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, - PAGE_READ, retaddr); - DATA_TYPE val; - - val = atomic16_read(haddr); - ATOMIC_MMU_CLEANUP; - atomic_trace_ld_post(env, addr, oi); - return BSWAP(val); -} - -void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, - MemOpIdx oi, uintptr_t retaddr) -{ - DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, - PAGE_WRITE, retaddr); - - val = BSWAP(val); - atomic16_set(haddr, val); - ATOMIC_MMU_CLEANUP; - atomic_trace_st_post(env, addr, oi); -} -#endif -#else +#if DATA_SIZE < 16 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) { - DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, - PAGE_READ | PAGE_WRITE, retaddr); + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); ABI_TYPE ret; ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); @@ -265,9 +206,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ { \ - DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ - PAGE_READ | PAGE_WRITE, retaddr); \ - DATA_TYPE ret; \ + DATA_TYPE *haddr, ret; \ + haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \ ret = qatomic_##X(haddr, BSWAP(val)); \ ATOMIC_MMU_CLEANUP; \ atomic_trace_rmw_post(env, addr, oi); \ @@ -294,9 +234,8 @@ GEN_ATOMIC_HELPER(xor_fetch) ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ { \ - XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ - PAGE_READ | PAGE_WRITE, retaddr); \ - XDATA_TYPE ldo, ldn, old, new, val = xval; \ + XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \ + haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \ smp_mb(); \ ldn = qatomic_read__nocheck(haddr); \ do { \ @@ -326,7 +265,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new) #undef ADD #undef GEN_ATOMIC_HELPER_FN -#endif /* DATA_SIZE >= 16 */ +#endif /* DATA_SIZE < 16 */ #undef END #endif /* DATA_SIZE > 1 */ diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c index c7bc8c6efa..9a5fabf625 100644 --- a/accel/tcg/cpu-exec-common.c +++ b/accel/tcg/cpu-exec-common.c @@ -21,6 +21,8 @@ #include "sysemu/cpus.h" #include "sysemu/tcg.h" #include "exec/exec-all.h" +#include "qemu/plugin.h" +#include "internal.h" bool tcg_allowed; @@ -65,6 +67,8 @@ void cpu_loop_exit(CPUState *cpu) { /* Undo the setting in cpu_tb_exec. */ cpu->can_do_io = 1; + /* Undo any setting in generated code. */ + qemu_plugin_disable_mem_helpers(cpu); siglongjmp(cpu->jmp_env, 1); } @@ -78,6 +82,8 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) { + /* Prevent looping if already executing in a serial context. */ + g_assert(!cpu_in_serial_context(cpu)); cpu->exception_index = EXCP_ATOMIC; cpu_loop_exit_restore(cpu, pc); } diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index fe0e24f3c5..4a7db82a71 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -20,7 +20,6 @@ #include "qemu/osdep.h" #include "qemu/qemu-print.h" #include "qapi/error.h" -#include "qapi/qapi-commands-machine.h" #include "qapi/type-helpers.h" #include "hw/core/tcg-cpu-ops.h" #include "trace.h" @@ -28,7 +27,6 @@ #include "exec/exec-all.h" #include "tcg/tcg.h" #include "qemu/atomic.h" -#include "qemu/timer.h" #include "qemu/rcu.h" #include "exec/log.h" #include "qemu/main-loop.h" @@ -38,7 +36,7 @@ #include "sysemu/cpus.h" #include "exec/cpu-all.h" #include "sysemu/cpu-timers.h" -#include "sysemu/replay.h" +#include "exec/replay-core.h" #include "sysemu/tcg.h" #include "exec/helper-proto.h" #include "tb-jmp-cache.h" @@ -64,8 +62,8 @@ typedef struct SyncClocks { #define MAX_DELAY_PRINT_RATE 2000000000LL #define MAX_NB_PRINTS 100 -static int64_t max_delay; -static int64_t max_advance; +int64_t max_delay; +int64_t max_advance; static void align_clocks(SyncClocks *sc, CPUState *cpu) { @@ -161,7 +159,7 @@ uint32_t curr_cflags(CPUState *cpu) */ if (unlikely(cpu->singlestep_enabled)) { cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; - } else if (singlestep) { + } else if (qatomic_read(&one_insn_per_tb)) { cflags |= CF_NO_GOTO_TB | 1; } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { cflags |= CF_NO_GOTO_TB; @@ -185,7 +183,7 @@ static bool tb_lookup_cmp(const void *p, const void *d) const TranslationBlock *tb = p; const struct tb_desc *desc = d; - if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) && + if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) && tb_page_addr0(tb) == desc->page_addr0 && tb->cs_base == desc->cs_base && tb->flags == desc->flags && @@ -237,7 +235,7 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, return NULL; } desc.page_addr0 = phys_pc; - h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc), + h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc), flags, cflags, *cpu->trace_dstate); return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); } @@ -256,21 +254,46 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, hash = tb_jmp_cache_hash_func(pc); jc = cpu->tb_jmp_cache; - tb = tb_jmp_cache_get_tb(jc, hash); - if (likely(tb && - tb_jmp_cache_get_pc(jc, hash, tb) == pc && - tb->cs_base == cs_base && - tb->flags == flags && - tb->trace_vcpu_dstate == *cpu->trace_dstate && - tb_cflags(tb) == cflags)) { - return tb; + if (cflags & CF_PCREL) { + /* Use acquire to ensure current load of pc from jc. */ + tb = qatomic_load_acquire(&jc->array[hash].tb); + + if (likely(tb && + jc->array[hash].pc == pc && + tb->cs_base == cs_base && + tb->flags == flags && + tb->trace_vcpu_dstate == *cpu->trace_dstate && + tb_cflags(tb) == cflags)) { + return tb; + } + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + return NULL; + } + jc->array[hash].pc = pc; + /* Ensure pc is written first. */ + qatomic_store_release(&jc->array[hash].tb, tb); + } else { + /* Use rcu_read to ensure current load of pc from *tb. */ + tb = qatomic_rcu_read(&jc->array[hash].tb); + + if (likely(tb && + tb->pc == pc && + tb->cs_base == cs_base && + tb->flags == flags && + tb->trace_vcpu_dstate == *cpu->trace_dstate && + tb_cflags(tb) == cflags)) { + return tb; + } + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + return NULL; + } + /* Use the pc value already stored in tb->pc. */ + qatomic_set(&jc->array[hash].tb, tb); } - tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); - if (tb == NULL) { - return NULL; - } - tb_jmp_cache_set(jc, hash, tb, pc); + return tb; } @@ -284,7 +307,6 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu, cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, tb->flags, tb->cflags, lookup_symbol(pc)); -#if defined(DEBUG_DISAS) if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { FILE *logfile = qemu_log_trylock(); if (logfile) { @@ -300,7 +322,6 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu, qemu_log_unlock(logfile); } } -#endif /* DEBUG_DISAS */ } } @@ -436,6 +457,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) qemu_thread_jit_execute(); ret = tcg_qemu_tb_exec(env, tb_ptr); cpu->can_do_io = 1; + qemu_plugin_disable_mem_helpers(cpu); /* * TODO: Delay swapping back to the read-write region of the TB * until we actually need to modify the TB. The read-only copy, @@ -459,9 +481,9 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) if (cc->tcg_ops->synchronize_from_tb) { cc->tcg_ops->synchronize_from_tb(cpu, last_tb); } else { - assert(!TARGET_TB_PCREL); + tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL)); assert(cc->set_pc); - cc->set_pc(cpu, tb_pc(last_tb)); + cc->set_pc(cpu, last_tb->pc); } if (qemu_loglevel_mask(CPU_LOG_EXEC)) { target_ulong pc = log_pc(cpu, last_tb); @@ -503,7 +525,6 @@ static void cpu_exec_exit(CPUState *cpu) if (cc->tcg_ops->cpu_exec_exit) { cc->tcg_ops->cpu_exec_exit(cpu); } - QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL); } void cpu_exec_step_atomic(CPUState *cpu) @@ -557,7 +578,6 @@ void cpu_exec_step_atomic(CPUState *cpu) qemu_mutex_unlock_iothread(); } assert_no_pages_locked(); - qemu_plugin_disable_mem_helpers(cpu); } /* @@ -979,17 +999,27 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) tb = tb_lookup(cpu, pc, cs_base, flags, cflags); if (tb == NULL) { + CPUJumpCache *jc; uint32_t h; mmap_lock(); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); mmap_unlock(); + /* * We add the TB in the virtual pc hash table * for the fast lookup */ h = tb_jmp_cache_hash_func(pc); - tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc); + jc = cpu->tb_jmp_cache; + if (cflags & CF_PCREL) { + jc->array[h].pc = pc; + /* Ensure pc is written first. */ + qatomic_store_release(&jc->array[h].tb, tb); + } else { + /* Use the pc value already stored in tb->pc. */ + qatomic_set(&jc->array[h].tb, tb); + } } #ifndef CONFIG_USER_ONLY @@ -1012,7 +1042,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) #if !TARGET_TB_PCREL if (last_tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) { mmap_lock(); - TranslationBlock *edge = libafl_gen_edge(cpu, tb_pc(last_tb), tb_pc(tb), + TranslationBlock *edge = libafl_gen_edge(cpu, last_tb->pc, tb->pc, tb_exit, cs_base, flags, cflags); mmap_unlock(); @@ -1035,7 +1065,6 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); - QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL); /* Try to align the host and virtual clocks if the guest is in advance */ align_clocks(sc, cpu); @@ -1060,7 +1089,6 @@ static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc) if (qemu_mutex_iothread_locked()) { qemu_mutex_unlock_iothread(); } - qemu_plugin_disable_mem_helpers(cpu); assert_no_pages_locked(); } @@ -1133,86 +1161,3 @@ void tcg_exec_unrealizefn(CPUState *cpu) tlb_destroy(cpu); g_free_rcu(cpu->tb_jmp_cache, rcu); } - -#ifndef CONFIG_USER_ONLY - -static void dump_drift_info(GString *buf) -{ - if (!icount_enabled()) { - return; - } - - g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", - (cpu_get_clock() - icount_get()) / SCALE_MS); - if (icount_align_option) { - g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", - -max_delay / SCALE_MS); - g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", - max_advance / SCALE_MS); - } else { - g_string_append_printf(buf, "Max guest delay NA\n"); - g_string_append_printf(buf, "Max guest advance NA\n"); - } -} - -HumanReadableText *qmp_x_query_jit(Error **errp) -{ - g_autoptr(GString) buf = g_string_new(""); - - if (!tcg_enabled()) { - error_setg(errp, "JIT information is only available with accel=tcg"); - return NULL; - } - - dump_exec_info(buf); - dump_drift_info(buf); - - return human_readable_text_from_str(buf); -} - -HumanReadableText *qmp_x_query_opcount(Error **errp) -{ - g_autoptr(GString) buf = g_string_new(""); - - if (!tcg_enabled()) { - error_setg(errp, "Opcode count information is only available with accel=tcg"); - return NULL; - } - - tcg_dump_op_count(buf); - - return human_readable_text_from_str(buf); -} - -#ifdef CONFIG_PROFILER - -int64_t dev_time; - -HumanReadableText *qmp_x_query_profile(Error **errp) -{ - g_autoptr(GString) buf = g_string_new(""); - static int64_t last_cpu_exec_time; - int64_t cpu_exec_time; - int64_t delta; - - cpu_exec_time = tcg_cpu_exec_time(); - delta = cpu_exec_time - last_cpu_exec_time; - - g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n", - dev_time, dev_time / (double)NANOSECONDS_PER_SECOND); - g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n", - delta, delta / (double)NANOSECONDS_PER_SECOND); - last_cpu_exec_time = cpu_exec_time; - dev_time = 0; - - return human_readable_text_from_str(buf); -} -#else -HumanReadableText *qmp_x_query_profile(Error **errp) -{ - error_setg(errp, "Internal profiler not compiled"); - return NULL; -} -#endif - -#endif /* !CONFIG_USER_ONLY */ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 7692ee8fc6..79f1807057 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -40,6 +40,7 @@ #include "qemu/plugin-memory.h" #endif #include "tcg/tcg-ldst.h" +#include "exec/helper-proto.h" /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -81,6 +82,13 @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) +//// --- Begin LibAFL code --- + +// void syx_snapshot_dirty_list_add(hwaddr paddr); +void syx_snapshot_dirty_list_add_hostaddr(void* host_addr); + +//// --- End LibAFL code --- + static inline size_t tlb_n_entries(CPUTLBDescFast *fast) { return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; @@ -1250,7 +1258,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, desc->fulltlb[index] = *full; desc->fulltlb[index].xlat_section = iotlb - vaddr_page; desc->fulltlb[index].phys_addr = paddr_page; - desc->fulltlb[index].prot = prot; /* Now calculate the new entry */ tn.addend = addend - vaddr_page; @@ -1442,34 +1449,17 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, } } -static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) -{ -#if TCG_OVERSIZED_GUEST - return *(target_ulong *)((uintptr_t)entry + ofs); -#else - /* ofs might correspond to .addr_write, so use qatomic_read */ - return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); -#endif -} - /* Return true if ADDR is present in the victim tlb, and has been copied back to the main tlb. */ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, - size_t elt_ofs, target_ulong page) + MMUAccessType access_type, target_ulong page) { size_t vidx; assert_cpu_is_self(env_cpu(env)); for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; - target_ulong cmp; - - /* elt_ofs might correspond to .addr_write, so use qatomic_read */ -#if TCG_OVERSIZED_GUEST - cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); -#else - cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); -#endif + target_ulong cmp = tlb_read_idx(vtlb, access_type); if (cmp == page) { /* Found entry in victim tlb, swap tlb and iotlb. */ @@ -1491,11 +1481,6 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, return false; } -/* Macro to call the above, with local variables from the use context. */ -#define VICTIM_TLB_HIT(TY, ADDR) \ - victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ - (ADDR) & TARGET_PAGE_MASK) - static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, CPUTLBEntryFull *full, uintptr_t retaddr) { @@ -1528,29 +1513,12 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, { uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); - target_ulong tlb_addr, page_addr; - size_t elt_ofs; - int flags; + target_ulong tlb_addr = tlb_read_idx(entry, access_type); + target_ulong page_addr = addr & TARGET_PAGE_MASK; + int flags = TLB_FLAGS_MASK; - switch (access_type) { - case MMU_DATA_LOAD: - elt_ofs = offsetof(CPUTLBEntry, addr_read); - break; - case MMU_DATA_STORE: - elt_ofs = offsetof(CPUTLBEntry, addr_write); - break; - case MMU_INST_FETCH: - elt_ofs = offsetof(CPUTLBEntry, addr_code); - break; - default: - g_assert_not_reached(); - } - tlb_addr = tlb_read_ofs(entry, elt_ofs); - - flags = TLB_FLAGS_MASK; - page_addr = addr & TARGET_PAGE_MASK; if (!tlb_hit_page(tlb_addr, page_addr)) { - if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) { CPUState *cs = env_cpu(env); if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, @@ -1572,7 +1540,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, */ flags &= ~TLB_INVALID_MASK; } - tlb_addr = tlb_read_ofs(entry, elt_ofs); + tlb_addr = tlb_read_idx(entry, access_type); } flags &= tlb_addr; @@ -1589,12 +1557,12 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, return flags; } -int probe_access_full(CPUArchState *env, target_ulong addr, +int probe_access_full(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, CPUTLBEntryFull **pfull, uintptr_t retaddr) { - int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, + int flags = probe_access_internal(env, addr, size, access_type, mmu_idx, nonfault, phost, pfull, retaddr); /* Handle clean RAM pages. */ @@ -1606,14 +1574,25 @@ int probe_access_full(CPUArchState *env, target_ulong addr, return flags; } -int probe_access_flags(CPUArchState *env, target_ulong addr, +int probe_access_flags(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t retaddr) { CPUTLBEntryFull *full; + int flags; - return probe_access_full(env, addr, access_type, mmu_idx, - nonfault, phost, &full, retaddr); + g_assert(-(addr | TARGET_PAGE_MASK) >= size); + + flags = probe_access_internal(env, addr, size, access_type, mmu_idx, + nonfault, phost, &full, retaddr); + + /* Handle clean RAM pages. */ + if (unlikely(flags & TLB_NOTDIRTY)) { + notdirty_write(env_cpu(env), addr, 1, full, retaddr); + flags &= ~TLB_NOTDIRTY; + } + + return flags; } void *probe_access(CPUArchState *env, target_ulong addr, int size, @@ -1686,12 +1665,20 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, if (p == NULL) { return -1; } + + if (full->lg_page_size < TARGET_PAGE_BITS) { + return -1; + } + if (hostp) { *hostp = p; } return qemu_ram_addr_from_host_nofail(p); } +/* Load/store with atomicity primitives. */ +#include "ldst_atomicity.c.inc" + #ifdef CONFIG_PLUGIN /* * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. @@ -1740,15 +1727,203 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, #endif +/* + * Probe for a load/store operation. + * Return the host address and into @flags. + */ + +typedef struct MMULookupPageData { + CPUTLBEntryFull *full; + void *haddr; + target_ulong addr; + int flags; + int size; +} MMULookupPageData; + +typedef struct MMULookupLocals { + MMULookupPageData page[2]; + MemOp memop; + int mmu_idx; +} MMULookupLocals; + +/** + * mmu_lookup1: translate one page + * @env: cpu context + * @data: lookup parameters + * @mmu_idx: virtual address context + * @access_type: load/store/code + * @ra: return address into tcg generated code, or 0 + * + * Resolve the translation for the one page at @data.addr, filling in + * the rest of @data with the results. If the translation fails, + * tlb_fill will longjmp out. Return true if the softmmu tlb for + * @mmu_idx may have resized. + */ +static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data, + int mmu_idx, MMUAccessType access_type, uintptr_t ra) +{ + target_ulong addr = data->addr; + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = tlb_read_idx(entry, access_type); + bool maybe_resized = false; + + /* If the TLB entry is for a different page, reload and try again. */ + if (!tlb_hit(tlb_addr, addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, access_type, + addr & TARGET_PAGE_MASK)) { + tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra); + maybe_resized = true; + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK; + } + + data->flags = tlb_addr & TLB_FLAGS_MASK; + data->full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; + /* Compute haddr speculatively; depending on flags it might be invalid. */ + data->haddr = (void *)((uintptr_t)addr + entry->addend); + + return maybe_resized; +} + +/** + * mmu_watch_or_dirty + * @env: cpu context + * @data: lookup parameters + * @access_type: load/store/code + * @ra: return address into tcg generated code, or 0 + * + * Trigger watchpoints for @data.addr:@data.size; + * record writes to protected clean pages. + */ +static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data, + MMUAccessType access_type, uintptr_t ra) +{ + CPUTLBEntryFull *full = data->full; + target_ulong addr = data->addr; + int flags = data->flags; + int size = data->size; + + /* On watchpoint hit, this will longjmp out. */ + if (flags & TLB_WATCHPOINT) { + int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ; + cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra); + flags &= ~TLB_WATCHPOINT; + } + + /* Note that notdirty is only set for writes. */ + if (flags & TLB_NOTDIRTY) { + notdirty_write(env_cpu(env), addr, size, full, ra); + flags &= ~TLB_NOTDIRTY; + } + data->flags = flags; +} + +/** + * mmu_lookup: translate page(s) + * @env: cpu context + * @addr: virtual address + * @oi: combined mmu_idx and MemOp + * @ra: return address into tcg generated code, or 0 + * @access_type: load/store/code + * @l: output result + * + * Resolve the translation for the page(s) beginning at @addr, for MemOp.size + * bytes. Return true if the lookup crosses a page boundary. + */ +static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi, + uintptr_t ra, MMUAccessType type, MMULookupLocals *l) +{ + unsigned a_bits; + bool crosspage; + int flags; + + l->memop = get_memop(oi); + l->mmu_idx = get_mmuidx(oi); + + tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); + + /* Handle CPU specific unaligned behaviour */ + a_bits = get_alignment_bits(l->memop); + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra); + } + + l->page[0].addr = addr; + l->page[0].size = memop_size(l->memop); + l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; + l->page[1].size = 0; + crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; + + if (likely(!crosspage)) { + mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra); + + flags = l->page[0].flags; + if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { + mmu_watch_or_dirty(env, &l->page[0], type, ra); + } + if (unlikely(flags & TLB_BSWAP)) { + l->memop ^= MO_BSWAP; + } + + //// --- Begin LibAFL code --- + + if (type == MMU_DATA_STORE) { + syx_snapshot_dirty_list_add_hostaddr(l->page[0].haddr); + } + + //// --- End LibAFL code --- + + } else { + /* Finish compute of page crossing. */ + int size0 = l->page[1].addr - addr; + l->page[1].size = l->page[0].size - size0; + l->page[0].size = size0; + + /* + * Lookup both pages, recognizing exceptions from either. If the + * second lookup potentially resized, refresh first CPUTLBEntryFull. + */ + mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra); + if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) { + uintptr_t index = tlb_index(env, l->mmu_idx, addr); + l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index]; + } + + flags = l->page[0].flags | l->page[1].flags; + if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { + mmu_watch_or_dirty(env, &l->page[0], type, ra); + mmu_watch_or_dirty(env, &l->page[1], type, ra); + } + + //// --- Begin LibAFL code --- + + if (type == MMU_DATA_STORE) { + syx_snapshot_dirty_list_add_hostaddr(l->page[0].haddr); + syx_snapshot_dirty_list_add_hostaddr(l->page[1].haddr); + } + + //// --- End LibAFL code --- + + /* + * Since target/sparc is the only user of TLB_BSWAP, and all + * Sparc accesses are aligned, any treatment across two pages + * would be arbitrary. Refuse it until there's a use. + */ + tcg_debug_assert((flags & TLB_BSWAP) == 0); + } + + return crosspage; +} + /* * Probe for an atomic operation. Do not allow unaligned operations, * or io operations to proceed. Return the host address. - * - * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - MemOpIdx oi, int size, int prot, - uintptr_t retaddr) + MemOpIdx oi, int size, uintptr_t retaddr) { uintptr_t mmu_idx = get_mmuidx(oi); MemOp mop = get_memop(oi); @@ -1757,6 +1932,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, CPUTLBEntry *tlbe; target_ulong tlb_addr; void *hostaddr; + CPUTLBEntryFull *full; tcg_debug_assert(mmu_idx < NB_MMU_MODES); @@ -1783,55 +1959,60 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, tlbe = tlb_entry(env, mmu_idx, addr); /* Check TLB entry and enforce page permissions. */ - if (prot & PAGE_WRITE) { - tlb_addr = tlb_addr_write(tlbe); - if (!tlb_hit(tlb_addr, addr)) { - if (!VICTIM_TLB_HIT(addr_write, addr)) { - tlb_fill(env_cpu(env), addr, size, - MMU_DATA_STORE, mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - tlbe = tlb_entry(env, mmu_idx, addr); - } - tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; - } - - /* Let the guest notice RMW on a write-only page. */ - if ((prot & PAGE_READ) && - unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { + tlb_addr = tlb_addr_write(tlbe); + if (!tlb_hit(tlb_addr, addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE, + addr & TARGET_PAGE_MASK)) { tlb_fill(env_cpu(env), addr, size, - MMU_DATA_LOAD, mmu_idx, retaddr); - /* - * Since we don't support reads and writes to different addresses, - * and we do have the proper page loaded for write, this shouldn't - * ever return. But just in case, handle via stop-the-world. - */ - goto stop_the_world; - } - } else /* if (prot & PAGE_READ) */ { - tlb_addr = tlbe->addr_read; - if (!tlb_hit(tlb_addr, addr)) { - if (!VICTIM_TLB_HIT(addr_write, addr)) { - tlb_fill(env_cpu(env), addr, size, - MMU_DATA_LOAD, mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - tlbe = tlb_entry(env, mmu_idx, addr); - } - tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; + MMU_DATA_STORE, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); } + tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; } + /* + * Let the guest notice RMW on a write-only page. + * We have just verified that the page is writable. + * Subpage lookups may have left TLB_INVALID_MASK set, + * but addr_read will only be -1 if PAGE_READ was unset. + */ + if (unlikely(tlbe->addr_read == -1)) { + tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); + /* + * Since we don't support reads and writes to different + * addresses, and we do have the proper page loaded for + * write, this shouldn't ever return. But just in case, + * handle via stop-the-world. + */ + goto stop_the_world; + } + /* Collect TLB_WATCHPOINT for read. */ + tlb_addr |= tlbe->addr_read; + /* Notice an IO access or a needs-MMU-lookup access */ - if (unlikely(tlb_addr & TLB_MMIO)) { + if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { /* There's really nothing that can be done to support this apart from stop-the-world. */ goto stop_the_world; } hostaddr = (void *)((uintptr_t)addr + tlbe->addend); + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; + + //// --- Begin LibAFL code --- + + syx_snapshot_dirty_list_add_hostaddr(hostaddr); + + //// --- End LibAFL code --- if (unlikely(tlb_addr & TLB_NOTDIRTY)) { - notdirty_write(env_cpu(env), addr, size, - &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr); + notdirty_write(env_cpu(env), addr, size, full, retaddr); + } + + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, + BP_MEM_READ | BP_MEM_WRITE, retaddr); } return hostaddr; @@ -1840,25 +2021,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, cpu_loop_exit_atomic(env_cpu(env), retaddr); } -/* - * Verify that we have passed the correct MemOp to the correct function. - * - * In the case of the helper_*_mmu functions, we will have done this by - * using the MemOp to look up the helper during code generation. - * - * In the case of the cpu_*_mmu functions, this is up to the caller. - * We could present one function to target code, and dispatch based on - * the MemOp, but so far we have worked hard to avoid an indirect function - * call along the memory path. - */ -static void validate_memop(MemOpIdx oi, MemOp expected) -{ -#ifdef CONFIG_DEBUG_TCG - MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); - assert(have == expected); -#endif -} - /* * Load Helpers * @@ -1866,146 +2028,7 @@ static void validate_memop(MemOpIdx oi, MemOp expected) * specifically for reading instructions from system memory. It is * called by the translation loop and in some helpers where the code * is disassembled. It shouldn't be called directly by guest code. - */ - -typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); - -static inline uint64_t QEMU_ALWAYS_INLINE -load_memop(const void *haddr, MemOp op) -{ - switch (op) { - case MO_UB: - return ldub_p(haddr); - case MO_BEUW: - return lduw_be_p(haddr); - case MO_LEUW: - return lduw_le_p(haddr); - case MO_BEUL: - return (uint32_t)ldl_be_p(haddr); - case MO_LEUL: - return (uint32_t)ldl_le_p(haddr); - case MO_BEUQ: - return ldq_be_p(haddr); - case MO_LEUQ: - return ldq_le_p(haddr); - default: - qemu_build_not_reached(); - } -} - -static inline uint64_t QEMU_ALWAYS_INLINE -load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, - uintptr_t retaddr, MemOp op, bool code_read, - FullLoadHelper *full_load) -{ - const size_t tlb_off = code_read ? - offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); - const MMUAccessType access_type = - code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; - const unsigned a_bits = get_alignment_bits(get_memop(oi)); - const size_t size = memop_size(op); - uintptr_t mmu_idx = get_mmuidx(oi); - uintptr_t index; - CPUTLBEntry *entry; - target_ulong tlb_addr; - void *haddr; - uint64_t res; - - tcg_debug_assert(mmu_idx < NB_MMU_MODES); - - /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(env_cpu(env), addr, access_type, - mmu_idx, retaddr); - } - - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - tlb_addr = code_read ? entry->addr_code : entry->addr_read; - - /* If the TLB entry is for a different page, reload and try again. */ - if (!tlb_hit(tlb_addr, addr)) { - if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, - addr & TARGET_PAGE_MASK)) { - tlb_fill(env_cpu(env), addr, size, - access_type, mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - } - tlb_addr = code_read ? entry->addr_code : entry->addr_read; - tlb_addr &= ~TLB_INVALID_MASK; - } - - /* Handle anything that isn't just a straight memory access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUTLBEntryFull *full; - bool need_swap; - - /* For anything that is unaligned, recurse through full_load. */ - if ((addr & (size - 1)) != 0) { - goto do_unaligned_access; - } - - full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; - - /* Handle watchpoints. */ - if (unlikely(tlb_addr & TLB_WATCHPOINT)) { - /* On watchpoint hit, this will longjmp out. */ - cpu_check_watchpoint(env_cpu(env), addr, size, - full->attrs, BP_MEM_READ, retaddr); - } - - need_swap = size > 1 && (tlb_addr & TLB_BSWAP); - - /* Handle I/O access. */ - if (likely(tlb_addr & TLB_MMIO)) { - return io_readx(env, full, mmu_idx, addr, retaddr, - access_type, op ^ (need_swap * MO_BSWAP)); - } - - haddr = (void *)((uintptr_t)addr + entry->addend); - - /* - * Keep these two load_memop separate to ensure that the compiler - * is able to fold the entire function to a single instruction. - * There is a build-time assert inside to remind you of this. ;-) - */ - if (unlikely(need_swap)) { - return load_memop(haddr, op ^ MO_BSWAP); - } - return load_memop(haddr, op); - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (size > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 - >= TARGET_PAGE_SIZE)) { - target_ulong addr1, addr2; - uint64_t r1, r2; - unsigned shift; - do_unaligned_access: - addr1 = addr & ~((target_ulong)size - 1); - addr2 = addr1 + size; - r1 = full_load(env, addr1, oi, retaddr); - r2 = full_load(env, addr2, oi, retaddr); - shift = (addr & (size - 1)) * 8; - - if (memop_big_endian(op)) { - /* Big-endian combine. */ - res = (r1 << shift) | (r2 >> ((size * 8) - shift)); - } else { - /* Little-endian combine. */ - res = (r1 >> shift) | (r2 << ((size * 8) - shift)); - } - return res & MAKE_64BIT_MASK(0, size * 8); - } - - haddr = (void *)((uintptr_t)addr + entry->addend); - return load_memop(haddr, op); -} - -/* + * * For the benefit of TCG generated code, we want to avoid the * complication of ABI-specific return type promotion and always * return a value extended to the register size of the host. This is @@ -2015,89 +2038,433 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ -static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +/** + * do_ld_mmio_beN: + * @env: cpu context + * @p: translation parameters + * @ret_be: accumulated data + * @mmu_idx: virtual address context + * @ra: return address into tcg generated code, or 0 + * + * Load @p->size bytes from @p->addr, which is memory-mapped i/o. + * The bytes are concatenated in big-endian order with @ret_be. + */ +static uint64_t do_ld_mmio_beN(CPUArchState *env, MMULookupPageData *p, + uint64_t ret_be, int mmu_idx, + MMUAccessType type, uintptr_t ra) { - validate_memop(oi, MO_UB); - return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); + CPUTLBEntryFull *full = p->full; + target_ulong addr = p->addr; + int i, size = p->size; + + QEMU_IOTHREAD_LOCK_GUARD(); + for (i = 0; i < size; i++) { + uint8_t x = io_readx(env, full, mmu_idx, addr + i, ra, type, MO_UB); + ret_be = (ret_be << 8) | x; + } + return ret_be; } -tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +/** + * do_ld_bytes_beN + * @p: translation parameters + * @ret_be: accumulated data + * + * Load @p->size bytes from @p->haddr, which is RAM. + * The bytes to concatenated in big-endian order with @ret_be. + */ +static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be) { - return full_ldub_mmu(env, addr, oi, retaddr); + uint8_t *haddr = p->haddr; + int i, size = p->size; + + for (i = 0; i < size; i++) { + ret_be = (ret_be << 8) | haddr[i]; + } + return ret_be; } -static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, +/** + * do_ld_parts_beN + * @p: translation parameters + * @ret_be: accumulated data + * + * As do_ld_bytes_beN, but atomically on each aligned part. + */ +static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be) +{ + void *haddr = p->haddr; + int size = p->size; + + do { + uint64_t x; + int n; + + /* + * Find minimum of alignment and size. + * This is slightly stronger than required by MO_ATOM_SUBALIGN, which + * would have only checked the low bits of addr|size once at the start, + * but is just as easy. + */ + switch (((uintptr_t)haddr | size) & 7) { + case 4: + x = cpu_to_be32(load_atomic4(haddr)); + ret_be = (ret_be << 32) | x; + n = 4; + break; + case 2: + case 6: + x = cpu_to_be16(load_atomic2(haddr)); + ret_be = (ret_be << 16) | x; + n = 2; + break; + default: + x = *(uint8_t *)haddr; + ret_be = (ret_be << 8) | x; + n = 1; + break; + case 0: + g_assert_not_reached(); + } + haddr += n; + size -= n; + } while (size != 0); + return ret_be; +} + +/** + * do_ld_parts_be4 + * @p: translation parameters + * @ret_be: accumulated data + * + * As do_ld_bytes_beN, but with one atomic load. + * Four aligned bytes are guaranteed to cover the load. + */ +static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be) +{ + int o = p->addr & 3; + uint32_t x = load_atomic4(p->haddr - o); + + x = cpu_to_be32(x); + x <<= o * 8; + x >>= (4 - p->size) * 8; + return (ret_be << (p->size * 8)) | x; +} + +/** + * do_ld_parts_be8 + * @p: translation parameters + * @ret_be: accumulated data + * + * As do_ld_bytes_beN, but with one atomic load. + * Eight aligned bytes are guaranteed to cover the load. + */ +static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra, + MMULookupPageData *p, uint64_t ret_be) +{ + int o = p->addr & 7; + uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o); + + x = cpu_to_be64(x); + x <<= o * 8; + x >>= (8 - p->size) * 8; + return (ret_be << (p->size * 8)) | x; +} + +/** + * do_ld_parts_be16 + * @p: translation parameters + * @ret_be: accumulated data + * + * As do_ld_bytes_beN, but with one atomic load. + * 16 aligned bytes are guaranteed to cover the load. + */ +static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra, + MMULookupPageData *p, uint64_t ret_be) +{ + int o = p->addr & 15; + Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o); + int size = p->size; + + if (!HOST_BIG_ENDIAN) { + y = bswap128(y); + } + y = int128_lshift(y, o * 8); + y = int128_urshift(y, (16 - size) * 8); + x = int128_make64(ret_be); + x = int128_lshift(x, size * 8); + return int128_or(x, y); +} + +/* + * Wrapper for the above. + */ +static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p, + uint64_t ret_be, int mmu_idx, MMUAccessType type, + MemOp mop, uintptr_t ra) +{ + MemOp atom; + unsigned tmp, half_size; + + if (unlikely(p->flags & TLB_MMIO)) { + return do_ld_mmio_beN(env, p, ret_be, mmu_idx, type, ra); + } + + /* + * It is a given that we cross a page and therefore there is no + * atomicity for the load as a whole, but subobjects may need attention. + */ + atom = mop & MO_ATOM_MASK; + switch (atom) { + case MO_ATOM_SUBALIGN: + return do_ld_parts_beN(p, ret_be); + + case MO_ATOM_IFALIGN_PAIR: + case MO_ATOM_WITHIN16_PAIR: + tmp = mop & MO_SIZE; + tmp = tmp ? tmp - 1 : 0; + half_size = 1 << tmp; + if (atom == MO_ATOM_IFALIGN_PAIR + ? p->size == half_size + : p->size >= half_size) { + if (!HAVE_al8_fast && p->size < 4) { + return do_ld_whole_be4(p, ret_be); + } else { + return do_ld_whole_be8(env, ra, p, ret_be); + } + } + /* fall through */ + + case MO_ATOM_IFALIGN: + case MO_ATOM_WITHIN16: + case MO_ATOM_NONE: + return do_ld_bytes_beN(p, ret_be); + + default: + g_assert_not_reached(); + } +} + +/* + * Wrapper for the above, for 8 < size < 16. + */ +static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p, + uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra) +{ + int size = p->size; + uint64_t b; + MemOp atom; + + if (unlikely(p->flags & TLB_MMIO)) { + p->size = size - 8; + a = do_ld_mmio_beN(env, p, a, mmu_idx, MMU_DATA_LOAD, ra); + p->addr += p->size; + p->size = 8; + b = do_ld_mmio_beN(env, p, 0, mmu_idx, MMU_DATA_LOAD, ra); + return int128_make128(b, a); + } + + /* + * It is a given that we cross a page and therefore there is no + * atomicity for the load as a whole, but subobjects may need attention. + */ + atom = mop & MO_ATOM_MASK; + switch (atom) { + case MO_ATOM_SUBALIGN: + p->size = size - 8; + a = do_ld_parts_beN(p, a); + p->haddr += size - 8; + p->size = 8; + b = do_ld_parts_beN(p, 0); + break; + + case MO_ATOM_WITHIN16_PAIR: + /* Since size > 8, this is the half that must be atomic. */ + return do_ld_whole_be16(env, ra, p, a); + + case MO_ATOM_IFALIGN_PAIR: + /* + * Since size > 8, both halves are misaligned, + * and so neither is atomic. + */ + case MO_ATOM_IFALIGN: + case MO_ATOM_WITHIN16: + case MO_ATOM_NONE: + p->size = size - 8; + a = do_ld_bytes_beN(p, a); + b = ldq_be_p(p->haddr + size - 8); + break; + + default: + g_assert_not_reached(); + } + + return int128_make128(b, a); +} + +static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx, + MMUAccessType type, uintptr_t ra) +{ + if (unlikely(p->flags & TLB_MMIO)) { + return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB); + } else { + return *(uint8_t *)p->haddr; + } +} + +static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx, + MMUAccessType type, MemOp memop, uintptr_t ra) +{ + uint64_t ret; + + if (unlikely(p->flags & TLB_MMIO)) { + return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop); + } + + /* Perform the load host endian, then swap if necessary. */ + ret = load_atom_2(env, ra, p->haddr, memop); + if (memop & MO_BSWAP) { + ret = bswap16(ret); + } + return ret; +} + +static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx, + MMUAccessType type, MemOp memop, uintptr_t ra) +{ + uint32_t ret; + + if (unlikely(p->flags & TLB_MMIO)) { + return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop); + } + + /* Perform the load host endian. */ + ret = load_atom_4(env, ra, p->haddr, memop); + if (memop & MO_BSWAP) { + ret = bswap32(ret); + } + return ret; +} + +static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx, + MMUAccessType type, MemOp memop, uintptr_t ra) +{ + uint64_t ret; + + if (unlikely(p->flags & TLB_MMIO)) { + return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop); + } + + /* Perform the load host endian. */ + ret = load_atom_8(env, ra, p->haddr, memop); + if (memop & MO_BSWAP) { + ret = bswap64(ret); + } + return ret; +} + +static uint8_t do_ld1_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, + uintptr_t ra, MMUAccessType access_type) +{ + MMULookupLocals l; + bool crosspage; + + crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); + tcg_debug_assert(!crosspage); + + return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra); +} + +tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_LEUW); - return load_helper(env, addr, oi, retaddr, MO_LEUW, false, - full_le_lduw_mmu); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); + return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); } -tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, + uintptr_t ra, MMUAccessType access_type) { - return full_le_lduw_mmu(env, addr, oi, retaddr); + MMULookupLocals l; + bool crosspage; + uint16_t ret; + uint8_t a, b; + + crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); + if (likely(!crosspage)) { + return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); + } + + a = do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra); + b = do_ld_1(env, &l.page[1], l.mmu_idx, access_type, ra); + + if ((l.memop & MO_BSWAP) == MO_LE) { + ret = a | (b << 8); + } else { + ret = b | (a << 8); + } + return ret; } -static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_BEUW); - return load_helper(env, addr, oi, retaddr, MO_BEUW, false, - full_be_lduw_mmu); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); + return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); } -tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, + uintptr_t ra, MMUAccessType access_type) { - return full_be_lduw_mmu(env, addr, oi, retaddr); + MMULookupLocals l; + bool crosspage; + uint32_t ret; + + crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); + if (likely(!crosspage)) { + return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); + } + + ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); + ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); + if ((l.memop & MO_BSWAP) == MO_LE) { + ret = bswap32(ret); + } + return ret; } -static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_LEUL); - return load_helper(env, addr, oi, retaddr, MO_LEUL, false, - full_le_ldul_mmu); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); + return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); } -tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, + uintptr_t ra, MMUAccessType access_type) { - return full_le_ldul_mmu(env, addr, oi, retaddr); + MMULookupLocals l; + bool crosspage; + uint64_t ret; + + crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); + if (likely(!crosspage)) { + return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); + } + + ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); + ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); + if ((l.memop & MO_BSWAP) == MO_LE) { + ret = bswap64(ret); + } + return ret; } -static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_BEUL); - return load_helper(env, addr, oi, retaddr, MO_BEUL, false, - full_be_ldul_mmu); -} - -tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - return full_be_ldul_mmu(env, addr, oi, retaddr); -} - -uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_LEUQ); - return load_helper(env, addr, oi, retaddr, MO_LEUQ, false, - helper_le_ldq_mmu); -} - -uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_BEUQ); - return load_helper(env, addr, oi, retaddr, MO_BEUQ, false, - helper_be_ldq_mmu); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); + return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); } /* @@ -2105,576 +2472,574 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, * avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ - -tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr) { - return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); + return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr); } -tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr) { - return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); + return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr); } -tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr) { - return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); + return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr); } -tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +static Int128 do_ld16_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra) { - return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); + MMULookupLocals l; + bool crosspage; + uint64_t a, b; + Int128 ret; + int first; + + crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l); + if (likely(!crosspage)) { + /* Perform the load host endian. */ + if (unlikely(l.page[0].flags & TLB_MMIO)) { + QEMU_IOTHREAD_LOCK_GUARD(); + a = io_readx(env, l.page[0].full, l.mmu_idx, addr, + ra, MMU_DATA_LOAD, MO_64); + b = io_readx(env, l.page[0].full, l.mmu_idx, addr + 8, + ra, MMU_DATA_LOAD, MO_64); + ret = int128_make128(HOST_BIG_ENDIAN ? b : a, + HOST_BIG_ENDIAN ? a : b); + } else { + ret = load_atom_16(env, ra, l.page[0].haddr, l.memop); + } + if (l.memop & MO_BSWAP) { + ret = bswap128(ret); + } + return ret; + } + + first = l.page[0].size; + if (first == 8) { + MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64; + + a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); + b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); + if ((mop8 & MO_BSWAP) == MO_LE) { + ret = int128_make128(a, b); + } else { + ret = int128_make128(b, a); + } + return ret; + } + + if (first < 8) { + a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, + MMU_DATA_LOAD, l.memop, ra); + ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra); + } else { + ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra); + b = int128_getlo(ret); + ret = int128_lshift(ret, l.page[1].size * 8); + a = int128_gethi(ret); + b = do_ld_beN(env, &l.page[1], b, l.mmu_idx, + MMU_DATA_LOAD, l.memop, ra); + ret = int128_make128(b, a); + } + + if ((l.memop & MO_BSWAP) == MO_LE) { + ret = bswap128(ret); + } + return ret; } -tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, + uint32_t oi, uintptr_t retaddr) { - return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); + return do_ld16_mmu(env, addr, oi, retaddr); +} + +Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi) +{ + return helper_ld16_mmu(env, addr, oi, GETPC()); } /* * Load helpers for cpu_ldst.h. */ -static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t retaddr, - FullLoadHelper *full_load) +static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) { - uint64_t ret; - - ret = full_load(env, addr, oi, retaddr); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return ret; } uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu); + uint8_t ret; + + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB); + ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD); + plugin_load_cb(env, addr, oi); + return ret; } -uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu); + uint16_t ret; + + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); + ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD); + plugin_load_cb(env, addr, oi); + return ret; } -uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu); + uint32_t ret; + + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); + ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD); + plugin_load_cb(env, addr, oi); + return ret; } -uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu); + uint64_t ret; + + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); + ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD); + plugin_load_cb(env, addr, oi); + return ret; } -uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu); -} + Int128 ret; -uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu); -} - -uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); -} - -Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - MemOp mop = get_memop(oi); - int mmu_idx = get_mmuidx(oi); - MemOpIdx new_oi; - unsigned a_bits; - uint64_t h, l; - - tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); - a_bits = get_alignment_bits(mop); - - /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, - mmu_idx, ra); - } - - /* Construct an unaligned 64-bit replacement MemOpIdx. */ - mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; - new_oi = make_memop_idx(mop, mmu_idx); - - h = helper_be_ldq_mmu(env, addr, new_oi, ra); - l = helper_be_ldq_mmu(env, addr + 8, new_oi, ra); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return int128_make128(l, h); -} - -Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - MemOp mop = get_memop(oi); - int mmu_idx = get_mmuidx(oi); - MemOpIdx new_oi; - unsigned a_bits; - uint64_t h, l; - - tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); - a_bits = get_alignment_bits(mop); - - /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, - mmu_idx, ra); - } - - /* Construct an unaligned 64-bit replacement MemOpIdx. */ - mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; - new_oi = make_memop_idx(mop, mmu_idx); - - l = helper_le_ldq_mmu(env, addr, new_oi, ra); - h = helper_le_ldq_mmu(env, addr + 8, new_oi, ra); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return int128_make128(l, h); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); + ret = do_ld16_mmu(env, addr, oi, ra); + plugin_load_cb(env, addr, oi); + return ret; } /* * Store Helpers */ -static inline void QEMU_ALWAYS_INLINE -store_memop(void *haddr, uint64_t val, MemOp op) +/** + * do_st_mmio_leN: + * @env: cpu context + * @p: translation parameters + * @val_le: data to store + * @mmu_idx: virtual address context + * @ra: return address into tcg generated code, or 0 + * + * Store @p->size bytes at @p->addr, which is memory-mapped i/o. + * The bytes to store are extracted in little-endian order from @val_le; + * return the bytes of @val_le beyond @p->size that have not been stored. + */ +static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p, + uint64_t val_le, int mmu_idx, uintptr_t ra) { - switch (op) { - case MO_UB: - stb_p(haddr, val); - break; - case MO_BEUW: - stw_be_p(haddr, val); - break; - case MO_LEUW: - stw_le_p(haddr, val); - break; - case MO_BEUL: - stl_be_p(haddr, val); - break; - case MO_LEUL: - stl_le_p(haddr, val); - break; - case MO_BEUQ: - stq_be_p(haddr, val); - break; - case MO_LEUQ: - stq_le_p(haddr, val); - break; + CPUTLBEntryFull *full = p->full; + target_ulong addr = p->addr; + int i, size = p->size; + + QEMU_IOTHREAD_LOCK_GUARD(); + for (i = 0; i < size; i++, val_le >>= 8) { + io_writex(env, full, mmu_idx, val_le, addr + i, ra, MO_UB); + } + return val_le; +} + +/* + * Wrapper for the above. + */ +static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p, + uint64_t val_le, int mmu_idx, + MemOp mop, uintptr_t ra) +{ + MemOp atom; + unsigned tmp, half_size; + + if (unlikely(p->flags & TLB_MMIO)) { + return do_st_mmio_leN(env, p, val_le, mmu_idx, ra); + } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { + return val_le >> (p->size * 8); + } + + /* + * It is a given that we cross a page and therefore there is no atomicity + * for the store as a whole, but subobjects may need attention. + */ + atom = mop & MO_ATOM_MASK; + switch (atom) { + case MO_ATOM_SUBALIGN: + return store_parts_leN(p->haddr, p->size, val_le); + + case MO_ATOM_IFALIGN_PAIR: + case MO_ATOM_WITHIN16_PAIR: + tmp = mop & MO_SIZE; + tmp = tmp ? tmp - 1 : 0; + half_size = 1 << tmp; + if (atom == MO_ATOM_IFALIGN_PAIR + ? p->size == half_size + : p->size >= half_size) { + if (!HAVE_al8_fast && p->size <= 4) { + return store_whole_le4(p->haddr, p->size, val_le); + } else if (HAVE_al8) { + return store_whole_le8(p->haddr, p->size, val_le); + } else { + cpu_loop_exit_atomic(env_cpu(env), ra); + } + } + /* fall through */ + + case MO_ATOM_IFALIGN: + case MO_ATOM_WITHIN16: + case MO_ATOM_NONE: + return store_bytes_leN(p->haddr, p->size, val_le); + default: - qemu_build_not_reached(); + g_assert_not_reached(); } } -static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr); - -static void __attribute__((noinline)) -store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, - uintptr_t retaddr, size_t size, uintptr_t mmu_idx, - bool big_endian) +/* + * Wrapper for the above, for 8 < size < 16. + */ +static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p, + Int128 val_le, int mmu_idx, + MemOp mop, uintptr_t ra) { - const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); - uintptr_t index, index2; - CPUTLBEntry *entry, *entry2; - target_ulong page1, page2, tlb_addr, tlb_addr2; - MemOpIdx oi; - size_t size2; - int i; + int size = p->size; + MemOp atom; - /* - * Ensure the second page is in the TLB. Note that the first page - * is already guaranteed to be filled, and that the second page - * cannot evict the first. An exception to this rule is PAGE_WRITE_INV - * handling: the first page could have evicted itself. - */ - page1 = addr & TARGET_PAGE_MASK; - page2 = (addr + size) & TARGET_PAGE_MASK; - size2 = (addr + size) & ~TARGET_PAGE_MASK; - index2 = tlb_index(env, mmu_idx, page2); - entry2 = tlb_entry(env, mmu_idx, page2); - - tlb_addr2 = tlb_addr_write(entry2); - if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) { - if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { - tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, - mmu_idx, retaddr); - index2 = tlb_index(env, mmu_idx, page2); - entry2 = tlb_entry(env, mmu_idx, page2); - } - tlb_addr2 = tlb_addr_write(entry2); - } - - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - tlb_addr = tlb_addr_write(entry); - - /* - * Handle watchpoints. Since this may trap, all checks - * must happen before any store. - */ - if (unlikely(tlb_addr & TLB_WATCHPOINT)) { - cpu_check_watchpoint(env_cpu(env), addr, size - size2, - env_tlb(env)->d[mmu_idx].fulltlb[index].attrs, - BP_MEM_WRITE, retaddr); - } - if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { - cpu_check_watchpoint(env_cpu(env), page2, size2, - env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs, - BP_MEM_WRITE, retaddr); + if (unlikely(p->flags & TLB_MMIO)) { + p->size = 8; + do_st_mmio_leN(env, p, int128_getlo(val_le), mmu_idx, ra); + p->size = size - 8; + p->addr += 8; + return do_st_mmio_leN(env, p, int128_gethi(val_le), mmu_idx, ra); + } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { + return int128_gethi(val_le) >> ((size - 8) * 8); } /* - * XXX: not efficient, but simple. - * This loop must go in the forward direction to avoid issues - * with self-modifying code in Windows 64-bit. + * It is a given that we cross a page and therefore there is no atomicity + * for the store as a whole, but subobjects may need attention. */ - oi = make_memop_idx(MO_UB, mmu_idx); - if (big_endian) { - for (i = 0; i < size; ++i) { - /* Big-endian extract. */ - uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); - full_stb_mmu(env, addr + i, val8, oi, retaddr); + atom = mop & MO_ATOM_MASK; + switch (atom) { + case MO_ATOM_SUBALIGN: + store_parts_leN(p->haddr, 8, int128_getlo(val_le)); + return store_parts_leN(p->haddr + 8, p->size - 8, + int128_gethi(val_le)); + + case MO_ATOM_WITHIN16_PAIR: + /* Since size > 8, this is the half that must be atomic. */ + if (!HAVE_ATOMIC128_RW) { + cpu_loop_exit_atomic(env_cpu(env), ra); } - } else { - for (i = 0; i < size; ++i) { - /* Little-endian extract. */ - uint8_t val8 = val >> (i * 8); - full_stb_mmu(env, addr + i, val8, oi, retaddr); - } - } -} - -//// --- Begin LibAFL code --- - -void syx_snapshot_dirty_list_add_hostaddr(void* host_addr); - -//// --- End LibAFL code --- - -static inline void QEMU_ALWAYS_INLINE -store_helper(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr, MemOp op) -{ - const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); - const unsigned a_bits = get_alignment_bits(get_memop(oi)); - const size_t size = memop_size(op); - uintptr_t mmu_idx = get_mmuidx(oi); - uintptr_t index; - CPUTLBEntry *entry; - target_ulong tlb_addr; - void *haddr; - - tcg_debug_assert(mmu_idx < NB_MMU_MODES); - - /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } - - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - tlb_addr = tlb_addr_write(entry); - - /* If the TLB entry is for a different page, reload and try again. */ - if (!tlb_hit(tlb_addr, addr)) { - if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, - addr & TARGET_PAGE_MASK)) { - tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, - mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - } - tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; - } - - /* Handle anything that isn't just a straight memory access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUTLBEntryFull *full; - bool need_swap; - - /* For anything that is unaligned, recurse through byte stores. */ - if ((addr & (size - 1)) != 0) { - goto do_unaligned_access; - } - - full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; - - /* Handle watchpoints. */ - if (unlikely(tlb_addr & TLB_WATCHPOINT)) { - /* On watchpoint hit, this will longjmp out. */ - cpu_check_watchpoint(env_cpu(env), addr, size, - full->attrs, BP_MEM_WRITE, retaddr); - } - - need_swap = size > 1 && (tlb_addr & TLB_BSWAP); - - /* Handle I/O access. */ - if (tlb_addr & TLB_MMIO) { - io_writex(env, full, mmu_idx, val, addr, retaddr, - op ^ (need_swap * MO_BSWAP)); - return; - } - - /* Ignore writes to ROM. */ - if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { - return; - } - - /* Handle clean RAM pages. */ - if (tlb_addr & TLB_NOTDIRTY) { - notdirty_write(env_cpu(env), addr, size, full, retaddr); - } - - haddr = (void *)((uintptr_t)addr + entry->addend); - -//// --- Begin LibAFL code --- - - syx_snapshot_dirty_list_add_hostaddr(haddr); - -//// --- End LibAFL code --- + return store_whole_le16(p->haddr, p->size, val_le); + case MO_ATOM_IFALIGN_PAIR: /* - * Keep these two store_memop separate to ensure that the compiler - * is able to fold the entire function to a single instruction. - * There is a build-time assert inside to remind you of this. ;-) + * Since size > 8, both halves are misaligned, + * and so neither is atomic. */ - if (unlikely(need_swap)) { - store_memop(haddr, val, op ^ MO_BSWAP); + case MO_ATOM_IFALIGN: + case MO_ATOM_NONE: + stq_le_p(p->haddr, int128_getlo(val_le)); + return store_bytes_leN(p->haddr + 8, p->size - 8, + int128_gethi(val_le)); + + default: + g_assert_not_reached(); + } +} + +static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val, + int mmu_idx, uintptr_t ra) +{ + if (unlikely(p->flags & TLB_MMIO)) { + io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB); + } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { + /* nothing */ + } else { + *(uint8_t *)p->haddr = val; + } +} + +static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val, + int mmu_idx, MemOp memop, uintptr_t ra) +{ + if (unlikely(p->flags & TLB_MMIO)) { + io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop); + } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { + /* nothing */ + } else { + /* Swap to host endian if necessary, then store. */ + if (memop & MO_BSWAP) { + val = bswap16(val); + } + store_atom_2(env, ra, p->haddr, memop, val); + } +} + +static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val, + int mmu_idx, MemOp memop, uintptr_t ra) +{ + if (unlikely(p->flags & TLB_MMIO)) { + io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop); + } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { + /* nothing */ + } else { + /* Swap to host endian if necessary, then store. */ + if (memop & MO_BSWAP) { + val = bswap32(val); + } + store_atom_4(env, ra, p->haddr, memop, val); + } +} + +static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val, + int mmu_idx, MemOp memop, uintptr_t ra) +{ + if (unlikely(p->flags & TLB_MMIO)) { + io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop); + } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { + /* nothing */ + } else { + /* Swap to host endian if necessary, then store. */ + if (memop & MO_BSWAP) { + val = bswap64(val); + } + store_atom_8(env, ra, p->haddr, memop, val); + } +} + +void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + MMULookupLocals l; + bool crosspage; + + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); + crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); + tcg_debug_assert(!crosspage); + + do_st_1(env, &l.page[0], val, l.mmu_idx, ra); +} + +static void do_st2_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t ra) +{ + MMULookupLocals l; + bool crosspage; + uint8_t a, b; + + crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); + if (likely(!crosspage)) { + do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra); + return; + } + + if ((l.memop & MO_BSWAP) == MO_LE) { + a = val, b = val >> 8; + } else { + b = val, a = val >> 8; + } + do_st_1(env, &l.page[0], a, l.mmu_idx, ra); + do_st_1(env, &l.page[1], b, l.mmu_idx, ra); +} + +void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); + do_st2_mmu(env, addr, val, oi, retaddr); +} + +static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + MMULookupLocals l; + bool crosspage; + + crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); + if (likely(!crosspage)) { + do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra); + return; + } + + /* Swap to little endian for simplicity, then store by bytes. */ + if ((l.memop & MO_BSWAP) != MO_LE) { + val = bswap32(val); + } + val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); + (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); +} + +void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); + do_st4_mmu(env, addr, val, oi, retaddr); +} + +static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) +{ + MMULookupLocals l; + bool crosspage; + + crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); + if (likely(!crosspage)) { + do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra); + return; + } + + /* Swap to little endian for simplicity, then store by bytes. */ + if ((l.memop & MO_BSWAP) != MO_LE) { + val = bswap64(val); + } + val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); + (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); +} + +void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); + do_st8_mmu(env, addr, val, oi, retaddr); +} + +static void do_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t ra) +{ + MMULookupLocals l; + bool crosspage; + uint64_t a, b; + int first; + + crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); + if (likely(!crosspage)) { + /* Swap to host endian if necessary, then store. */ + if (l.memop & MO_BSWAP) { + val = bswap128(val); + } + if (unlikely(l.page[0].flags & TLB_MMIO)) { + QEMU_IOTHREAD_LOCK_GUARD(); + if (HOST_BIG_ENDIAN) { + b = int128_getlo(val), a = int128_gethi(val); + } else { + a = int128_getlo(val), b = int128_gethi(val); + } + io_writex(env, l.page[0].full, l.mmu_idx, a, addr, ra, MO_64); + io_writex(env, l.page[0].full, l.mmu_idx, b, addr + 8, ra, MO_64); + } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) { + /* nothing */ } else { - store_memop(haddr, val, op); + store_atom_16(env, ra, l.page[0].haddr, l.memop, val); } return; } - /* Handle slow unaligned access (it spans two pages or IO). */ - if (size > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 - >= TARGET_PAGE_SIZE)) { - do_unaligned_access: - store_helper_unaligned(env, addr, val, retaddr, size, - mmu_idx, memop_big_endian(op)); + first = l.page[0].size; + if (first == 8) { + MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64; + + if (l.memop & MO_BSWAP) { + val = bswap128(val); + } + if (HOST_BIG_ENDIAN) { + b = int128_getlo(val), a = int128_gethi(val); + } else { + a = int128_getlo(val), b = int128_gethi(val); + } + do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra); + do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra); return; } - haddr = (void *)((uintptr_t)addr + entry->addend); - -//// --- Begin LibAFL code --- - - syx_snapshot_dirty_list_add_hostaddr(haddr); - -//// --- End LibAFL code --- - - store_memop(haddr, val, op); + if ((l.memop & MO_BSWAP) != MO_LE) { + val = bswap128(val); + } + if (first < 8) { + do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra); + val = int128_urshift(val, first * 8); + do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); + } else { + b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); + do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra); + } } -static void __attribute__((noinline)) -full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) +void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_UB); - store_helper(env, addr, val, oi, retaddr, MO_UB); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); + do_st16_mmu(env, addr, val, oi, retaddr); } -void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, - MemOpIdx oi, uintptr_t retaddr) +void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) { - full_stb_mmu(env, addr, val, oi, retaddr); -} - -static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_LEUW); - store_helper(env, addr, val, oi, retaddr, MO_LEUW); -} - -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - full_le_stw_mmu(env, addr, val, oi, retaddr); -} - -static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_BEUW); - store_helper(env, addr, val, oi, retaddr, MO_BEUW); -} - -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - full_be_stw_mmu(env, addr, val, oi, retaddr); -} - -static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_LEUL); - store_helper(env, addr, val, oi, retaddr, MO_LEUL); -} - -void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - full_le_stl_mmu(env, addr, val, oi, retaddr); -} - -static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_BEUL); - store_helper(env, addr, val, oi, retaddr, MO_BEUL); -} - -void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - full_be_stl_mmu(env, addr, val, oi, retaddr); -} - -void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_LEUQ); - store_helper(env, addr, val, oi, retaddr, MO_LEUQ); -} - -void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_BEUQ); - store_helper(env, addr, val, oi, retaddr, MO_BEUQ); + helper_st16_mmu(env, addr, val, oi, GETPC()); } /* * Store Helpers for cpu_ldst.h */ -typedef void FullStoreHelper(CPUArchState *env, target_ulong addr, - uint64_t val, MemOpIdx oi, uintptr_t retaddr); - -static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, - uint64_t val, MemOpIdx oi, uintptr_t ra, - FullStoreHelper *full_store) +static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) { - full_store(env, addr, val, oi, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu); + helper_stb_mmu(env, addr, val, oi, retaddr); + plugin_store_cb(env, addr, oi); } -void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, +void cpu_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); + do_st2_mmu(env, addr, val, oi, retaddr); + plugin_store_cb(env, addr, oi); +} + +void cpu_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); + do_st4_mmu(env, addr, val, oi, retaddr); + plugin_store_cb(env, addr, oi); } -void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr) +void cpu_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); + do_st8_mmu(env, addr, val, oi, retaddr); + plugin_store_cb(env, addr, oi); } -void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) +void cpu_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu); -} - -void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu); -} - -void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu); -} - -void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); -} - -void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val, - MemOpIdx oi, uintptr_t ra) -{ - MemOp mop = get_memop(oi); - int mmu_idx = get_mmuidx(oi); - MemOpIdx new_oi; - unsigned a_bits; - - tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); - a_bits = get_alignment_bits(mop); - - /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, - mmu_idx, ra); - } - - /* Construct an unaligned 64-bit replacement MemOpIdx. */ - mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; - new_oi = make_memop_idx(mop, mmu_idx); - - helper_be_stq_mmu(env, addr, int128_gethi(val), new_oi, ra); - helper_be_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); -} - -void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, - MemOpIdx oi, uintptr_t ra) -{ - MemOp mop = get_memop(oi); - int mmu_idx = get_mmuidx(oi); - MemOpIdx new_oi; - unsigned a_bits; - - tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); - a_bits = get_alignment_bits(mop); - - /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, - mmu_idx, ra); - } - - /* Construct an unaligned 64-bit replacement MemOpIdx. */ - mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; - new_oi = make_memop_idx(mop, mmu_idx); - - helper_le_stq_mmu(env, addr, int128_getlo(val), new_oi, ra); - helper_le_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); + do_st16_mmu(env, addr, val, oi, retaddr); + plugin_store_cb(env, addr, oi); } #include "ldst_common.c.inc" @@ -2705,57 +3070,57 @@ void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, #include "atomic_template.h" #endif -#if HAVE_CMPXCHG128 || HAVE_ATOMIC128 +#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) #define DATA_SIZE 16 #include "atomic_template.h" #endif /* Code access functions. */ -static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); -} - uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) { MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); - return full_ldub_code(env, addr, oi, 0); -} - -static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); + return do_ld1_mmu(env, addr, oi, 0, MMU_INST_FETCH); } uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) { MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); - return full_lduw_code(env, addr, oi, 0); -} - -static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); + return do_ld2_mmu(env, addr, oi, 0, MMU_INST_FETCH); } uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) { MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); - return full_ldl_code(env, addr, oi, 0); -} - -static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code); + return do_ld4_mmu(env, addr, oi, 0, MMU_INST_FETCH); } uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) { MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); - return full_ldq_code(env, addr, oi, 0); + return do_ld8_mmu(env, addr, oi, 0, MMU_INST_FETCH); +} + +uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return do_ld1_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); +} + +uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return do_ld2_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); +} + +uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return do_ld4_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); +} + +uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return do_ld8_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); } diff --git a/accel/tcg/hmp.c b/accel/tcg/hmp.c deleted file mode 100644 index bb67941420..0000000000 --- a/accel/tcg/hmp.c +++ /dev/null @@ -1,14 +0,0 @@ -#include "qemu/osdep.h" -#include "qemu/error-report.h" -#include "qapi/error.h" -#include "qapi/qapi-commands-machine.h" -#include "exec/exec-all.h" -#include "monitor/monitor.h" - -static void hmp_tcg_register(void) -{ - monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); - monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount); -} - -type_init(hmp_tcg_register); diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h index 6edff16fb0..24f225cac7 100644 --- a/accel/tcg/internal.h +++ b/accel/tcg/internal.h @@ -57,11 +57,25 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, /* Return the current PC from CPU, which may be cached in TB. */ static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb) { -#if TARGET_TB_PCREL - return cpu->cc->get_pc(cpu); -#else - return tb_pc(tb); -#endif + if (tb_cflags(tb) & CF_PCREL) { + return cpu->cc->get_pc(cpu); + } else { + return tb->pc; + } } +/* + * Return true if CS is not running in parallel with other cpus, either + * because there are no other cpus or we are within an exclusive context. + */ +static inline bool cpu_in_serial_context(CPUState *cs) +{ + return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs); +} + +extern int64_t max_delay; +extern int64_t max_advance; + +extern bool one_insn_per_tb; + #endif /* ACCEL_TCG_INTERNAL_H */ diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc new file mode 100644 index 0000000000..2514899408 --- /dev/null +++ b/accel/tcg/ldst_atomicity.c.inc @@ -0,0 +1,1103 @@ +/* + * Routines common to user and system emulation of load/store. + * + * Copyright (c) 2022 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "host/load-extract-al16-al8.h" +#include "host/store-insert-al16.h" + +#ifdef CONFIG_ATOMIC64 +# define HAVE_al8 true +#else +# define HAVE_al8 false +#endif +#define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8) + +/** + * required_atomicity: + * + * Return the lg2 bytes of atomicity required by @memop for @p. + * If the operation must be split into two operations to be + * examined separately for atomicity, return -lg2. + */ +static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop) +{ + MemOp atom = memop & MO_ATOM_MASK; + MemOp size = memop & MO_SIZE; + MemOp half = size ? size - 1 : 0; + unsigned tmp; + int atmax; + + switch (atom) { + case MO_ATOM_NONE: + atmax = MO_8; + break; + + case MO_ATOM_IFALIGN_PAIR: + size = half; + /* fall through */ + + case MO_ATOM_IFALIGN: + tmp = (1 << size) - 1; + atmax = p & tmp ? MO_8 : size; + break; + + case MO_ATOM_WITHIN16: + tmp = p & 15; + atmax = (tmp + (1 << size) <= 16 ? size : MO_8); + break; + + case MO_ATOM_WITHIN16_PAIR: + tmp = p & 15; + if (tmp + (1 << size) <= 16) { + atmax = size; + } else if (tmp + (1 << half) == 16) { + /* + * The pair exactly straddles the boundary. + * Both halves are naturally aligned and atomic. + */ + atmax = half; + } else { + /* + * One of the pair crosses the boundary, and is non-atomic. + * The other of the pair does not cross, and is atomic. + */ + atmax = -half; + } + break; + + case MO_ATOM_SUBALIGN: + /* + * Examine the alignment of p to determine if there are subobjects + * that must be aligned. Note that we only really need ctz4() -- + * any more sigificant bits are discarded by the immediately + * following comparison. + */ + tmp = ctz32(p); + atmax = MIN(size, tmp); + break; + + default: + g_assert_not_reached(); + } + + /* + * Here we have the architectural atomicity of the operation. + * However, when executing in a serial context, we need no extra + * host atomicity in order to avoid racing. This reduction + * avoids looping with cpu_loop_exit_atomic. + */ + if (cpu_in_serial_context(env_cpu(env))) { + return MO_8; + } + return atmax; +} + +/** + * load_atomic2: + * @pv: host address + * + * Atomically load 2 aligned bytes from @pv. + */ +static inline uint16_t load_atomic2(void *pv) +{ + uint16_t *p = __builtin_assume_aligned(pv, 2); + return qatomic_read(p); +} + +/** + * load_atomic4: + * @pv: host address + * + * Atomically load 4 aligned bytes from @pv. + */ +static inline uint32_t load_atomic4(void *pv) +{ + uint32_t *p = __builtin_assume_aligned(pv, 4); + return qatomic_read(p); +} + +/** + * load_atomic8: + * @pv: host address + * + * Atomically load 8 aligned bytes from @pv. + */ +static inline uint64_t load_atomic8(void *pv) +{ + uint64_t *p = __builtin_assume_aligned(pv, 8); + + qemu_build_assert(HAVE_al8); + return qatomic_read__nocheck(p); +} + +/** + * load_atomic8_or_exit: + * @env: cpu context + * @ra: host unwind address + * @pv: host address + * + * Atomically load 8 aligned bytes from @pv. + * If this is not possible, longjmp out to restart serially. + */ +static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv) +{ + if (HAVE_al8) { + return load_atomic8(pv); + } + +#ifdef CONFIG_USER_ONLY + /* + * If the page is not writable, then assume the value is immutable + * and requires no locking. This ignores the case of MAP_SHARED with + * another process, because the fallback start_exclusive solution + * provides no protection across processes. + */ + if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) { + uint64_t *p = __builtin_assume_aligned(pv, 8); + return *p; + } +#endif + + /* Ultimate fallback: re-execute in serial context. */ + cpu_loop_exit_atomic(env_cpu(env), ra); +} + +/** + * load_atomic16_or_exit: + * @env: cpu context + * @ra: host unwind address + * @pv: host address + * + * Atomically load 16 aligned bytes from @pv. + * If this is not possible, longjmp out to restart serially. + */ +static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv) +{ + Int128 *p = __builtin_assume_aligned(pv, 16); + + if (HAVE_ATOMIC128_RO) { + return atomic16_read_ro(p); + } + +#ifdef CONFIG_USER_ONLY + /* + * We can only use cmpxchg to emulate a load if the page is writable. + * If the page is not writable, then assume the value is immutable + * and requires no locking. This ignores the case of MAP_SHARED with + * another process, because the fallback start_exclusive solution + * provides no protection across processes. + */ + if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) { + return *p; + } +#endif + + /* + * In system mode all guest pages are writable, and for user-only + * we have just checked writability. Try cmpxchg. + */ + if (HAVE_ATOMIC128_RW) { + return atomic16_read_rw(p); + } + + /* Ultimate fallback: re-execute in serial context. */ + cpu_loop_exit_atomic(env_cpu(env), ra); +} + +/** + * load_atom_extract_al4x2: + * @pv: host address + * + * Load 4 bytes from @p, from two sequential atomic 4-byte loads. + */ +static uint32_t load_atom_extract_al4x2(void *pv) +{ + uintptr_t pi = (uintptr_t)pv; + int sh = (pi & 3) * 8; + uint32_t a, b; + + pv = (void *)(pi & ~3); + a = load_atomic4(pv); + b = load_atomic4(pv + 4); + + if (HOST_BIG_ENDIAN) { + return (a << sh) | (b >> (-sh & 31)); + } else { + return (a >> sh) | (b << (-sh & 31)); + } +} + +/** + * load_atom_extract_al8x2: + * @pv: host address + * + * Load 8 bytes from @p, from two sequential atomic 8-byte loads. + */ +static uint64_t load_atom_extract_al8x2(void *pv) +{ + uintptr_t pi = (uintptr_t)pv; + int sh = (pi & 7) * 8; + uint64_t a, b; + + pv = (void *)(pi & ~7); + a = load_atomic8(pv); + b = load_atomic8(pv + 8); + + if (HOST_BIG_ENDIAN) { + return (a << sh) | (b >> (-sh & 63)); + } else { + return (a >> sh) | (b << (-sh & 63)); + } +} + +/** + * load_atom_extract_al8_or_exit: + * @env: cpu context + * @ra: host unwind address + * @pv: host address + * @s: object size in bytes, @s <= 4. + * + * Atomically load @s bytes from @p, when p % s != 0, and [p, p+s-1] does + * not cross an 8-byte boundary. This means that we can perform an atomic + * 8-byte load and extract. + * The value is returned in the low bits of a uint32_t. + */ +static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra, + void *pv, int s) +{ + uintptr_t pi = (uintptr_t)pv; + int o = pi & 7; + int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8; + + pv = (void *)(pi & ~7); + return load_atomic8_or_exit(env, ra, pv) >> shr; +} + +/** + * load_atom_extract_al16_or_exit: + * @env: cpu context + * @ra: host unwind address + * @p: host address + * @s: object size in bytes, @s <= 8. + * + * Atomically load @s bytes from @p, when p % 16 < 8 + * and p % 16 + s > 8. I.e. does not cross a 16-byte + * boundary, but *does* cross an 8-byte boundary. + * This is the slow version, so we must have eliminated + * any faster load_atom_extract_al8_or_exit case. + * + * If this is not possible, longjmp out to restart serially. + */ +static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra, + void *pv, int s) +{ + uintptr_t pi = (uintptr_t)pv; + int o = pi & 7; + int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8; + Int128 r; + + /* + * Note constraints above: p & 8 must be clear. + * Provoke SIGBUS if possible otherwise. + */ + pv = (void *)(pi & ~7); + r = load_atomic16_or_exit(env, ra, pv); + + r = int128_urshift(r, shr); + return int128_getlo(r); +} + +/** + * load_atom_4_by_2: + * @pv: host address + * + * Load 4 bytes from @pv, with two 2-byte atomic loads. + */ +static inline uint32_t load_atom_4_by_2(void *pv) +{ + uint32_t a = load_atomic2(pv); + uint32_t b = load_atomic2(pv + 2); + + if (HOST_BIG_ENDIAN) { + return (a << 16) | b; + } else { + return (b << 16) | a; + } +} + +/** + * load_atom_8_by_2: + * @pv: host address + * + * Load 8 bytes from @pv, with four 2-byte atomic loads. + */ +static inline uint64_t load_atom_8_by_2(void *pv) +{ + uint32_t a = load_atom_4_by_2(pv); + uint32_t b = load_atom_4_by_2(pv + 4); + + if (HOST_BIG_ENDIAN) { + return ((uint64_t)a << 32) | b; + } else { + return ((uint64_t)b << 32) | a; + } +} + +/** + * load_atom_8_by_4: + * @pv: host address + * + * Load 8 bytes from @pv, with two 4-byte atomic loads. + */ +static inline uint64_t load_atom_8_by_4(void *pv) +{ + uint32_t a = load_atomic4(pv); + uint32_t b = load_atomic4(pv + 4); + + if (HOST_BIG_ENDIAN) { + return ((uint64_t)a << 32) | b; + } else { + return ((uint64_t)b << 32) | a; + } +} + +/** + * load_atom_8_by_8_or_4: + * @pv: host address + * + * Load 8 bytes from aligned @pv, with at least 4-byte atomicity. + */ +static inline uint64_t load_atom_8_by_8_or_4(void *pv) +{ + if (HAVE_al8_fast) { + return load_atomic8(pv); + } else { + return load_atom_8_by_4(pv); + } +} + +/** + * load_atom_2: + * @p: host address + * @memop: the full memory op + * + * Load 2 bytes from @p, honoring the atomicity of @memop. + */ +static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + if (likely((pi & 1) == 0)) { + return load_atomic2(pv); + } + if (HAVE_ATOMIC128_RO) { + return load_atom_extract_al16_or_al8(pv, 2); + } + + atmax = required_atomicity(env, pi, memop); + switch (atmax) { + case MO_8: + return lduw_he_p(pv); + case MO_16: + /* The only case remaining is MO_ATOM_WITHIN16. */ + if (!HAVE_al8_fast && (pi & 3) == 1) { + /* Big or little endian, we want the middle two bytes. */ + return load_atomic4(pv - 1) >> 8; + } + if ((pi & 15) != 7) { + return load_atom_extract_al8_or_exit(env, ra, pv, 2); + } + return load_atom_extract_al16_or_exit(env, ra, pv, 2); + default: + g_assert_not_reached(); + } +} + +/** + * load_atom_4: + * @p: host address + * @memop: the full memory op + * + * Load 4 bytes from @p, honoring the atomicity of @memop. + */ +static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + if (likely((pi & 3) == 0)) { + return load_atomic4(pv); + } + if (HAVE_ATOMIC128_RO) { + return load_atom_extract_al16_or_al8(pv, 4); + } + + atmax = required_atomicity(env, pi, memop); + switch (atmax) { + case MO_8: + case MO_16: + case -MO_16: + /* + * For MO_ATOM_IFALIGN, this is more atomicity than required, + * but it's trivially supported on all hosts, better than 4 + * individual byte loads (when the host requires alignment), + * and overlaps with the MO_ATOM_SUBALIGN case of p % 2 == 0. + */ + return load_atom_extract_al4x2(pv); + case MO_32: + if (!(pi & 4)) { + return load_atom_extract_al8_or_exit(env, ra, pv, 4); + } + return load_atom_extract_al16_or_exit(env, ra, pv, 4); + default: + g_assert_not_reached(); + } +} + +/** + * load_atom_8: + * @p: host address + * @memop: the full memory op + * + * Load 8 bytes from @p, honoring the atomicity of @memop. + */ +static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + /* + * If the host does not support 8-byte atomics, wait until we have + * examined the atomicity parameters below. + */ + if (HAVE_al8 && likely((pi & 7) == 0)) { + return load_atomic8(pv); + } + if (HAVE_ATOMIC128_RO) { + return load_atom_extract_al16_or_al8(pv, 8); + } + + atmax = required_atomicity(env, pi, memop); + if (atmax == MO_64) { + if (!HAVE_al8 && (pi & 7) == 0) { + load_atomic8_or_exit(env, ra, pv); + } + return load_atom_extract_al16_or_exit(env, ra, pv, 8); + } + if (HAVE_al8_fast) { + return load_atom_extract_al8x2(pv); + } + switch (atmax) { + case MO_8: + return ldq_he_p(pv); + case MO_16: + return load_atom_8_by_2(pv); + case MO_32: + return load_atom_8_by_4(pv); + case -MO_32: + if (HAVE_al8) { + return load_atom_extract_al8x2(pv); + } + cpu_loop_exit_atomic(env_cpu(env), ra); + default: + g_assert_not_reached(); + } +} + +/** + * load_atom_16: + * @p: host address + * @memop: the full memory op + * + * Load 16 bytes from @p, honoring the atomicity of @memop. + */ +static Int128 load_atom_16(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + Int128 r; + uint64_t a, b; + + /* + * If the host does not support 16-byte atomics, wait until we have + * examined the atomicity parameters below. + */ + if (HAVE_ATOMIC128_RO && likely((pi & 15) == 0)) { + return atomic16_read_ro(pv); + } + + atmax = required_atomicity(env, pi, memop); + switch (atmax) { + case MO_8: + memcpy(&r, pv, 16); + return r; + case MO_16: + a = load_atom_8_by_2(pv); + b = load_atom_8_by_2(pv + 8); + break; + case MO_32: + a = load_atom_8_by_4(pv); + b = load_atom_8_by_4(pv + 8); + break; + case MO_64: + if (!HAVE_al8) { + cpu_loop_exit_atomic(env_cpu(env), ra); + } + a = load_atomic8(pv); + b = load_atomic8(pv + 8); + break; + case -MO_64: + if (!HAVE_al8) { + cpu_loop_exit_atomic(env_cpu(env), ra); + } + a = load_atom_extract_al8x2(pv); + b = load_atom_extract_al8x2(pv + 8); + break; + case MO_128: + return load_atomic16_or_exit(env, ra, pv); + default: + g_assert_not_reached(); + } + return int128_make128(HOST_BIG_ENDIAN ? b : a, HOST_BIG_ENDIAN ? a : b); +} + +/** + * store_atomic2: + * @pv: host address + * @val: value to store + * + * Atomically store 2 aligned bytes to @pv. + */ +static inline void store_atomic2(void *pv, uint16_t val) +{ + uint16_t *p = __builtin_assume_aligned(pv, 2); + qatomic_set(p, val); +} + +/** + * store_atomic4: + * @pv: host address + * @val: value to store + * + * Atomically store 4 aligned bytes to @pv. + */ +static inline void store_atomic4(void *pv, uint32_t val) +{ + uint32_t *p = __builtin_assume_aligned(pv, 4); + qatomic_set(p, val); +} + +/** + * store_atomic8: + * @pv: host address + * @val: value to store + * + * Atomically store 8 aligned bytes to @pv. + */ +static inline void store_atomic8(void *pv, uint64_t val) +{ + uint64_t *p = __builtin_assume_aligned(pv, 8); + + qemu_build_assert(HAVE_al8); + qatomic_set__nocheck(p, val); +} + +/** + * store_atom_4x2 + */ +static inline void store_atom_4_by_2(void *pv, uint32_t val) +{ + store_atomic2(pv, val >> (HOST_BIG_ENDIAN ? 16 : 0)); + store_atomic2(pv + 2, val >> (HOST_BIG_ENDIAN ? 0 : 16)); +} + +/** + * store_atom_8_by_2 + */ +static inline void store_atom_8_by_2(void *pv, uint64_t val) +{ + store_atom_4_by_2(pv, val >> (HOST_BIG_ENDIAN ? 32 : 0)); + store_atom_4_by_2(pv + 4, val >> (HOST_BIG_ENDIAN ? 0 : 32)); +} + +/** + * store_atom_8_by_4 + */ +static inline void store_atom_8_by_4(void *pv, uint64_t val) +{ + store_atomic4(pv, val >> (HOST_BIG_ENDIAN ? 32 : 0)); + store_atomic4(pv + 4, val >> (HOST_BIG_ENDIAN ? 0 : 32)); +} + +/** + * store_atom_insert_al4: + * @p: host address + * @val: shifted value to store + * @msk: mask for value to store + * + * Atomically store @val to @p, masked by @msk. + */ +static void store_atom_insert_al4(uint32_t *p, uint32_t val, uint32_t msk) +{ + uint32_t old, new; + + p = __builtin_assume_aligned(p, 4); + old = qatomic_read(p); + do { + new = (old & ~msk) | val; + } while (!__atomic_compare_exchange_n(p, &old, new, true, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)); +} + +/** + * store_atom_insert_al8: + * @p: host address + * @val: shifted value to store + * @msk: mask for value to store + * + * Atomically store @val to @p masked by @msk. + */ +static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk) +{ + uint64_t old, new; + + qemu_build_assert(HAVE_al8); + p = __builtin_assume_aligned(p, 8); + old = qatomic_read__nocheck(p); + do { + new = (old & ~msk) | val; + } while (!__atomic_compare_exchange_n(p, &old, new, true, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)); +} + +/** + * store_bytes_leN: + * @pv: host address + * @size: number of bytes to store + * @val_le: data to store + * + * Store @size bytes at @p. The bytes to store are extracted in little-endian order + * from @val_le; return the bytes of @val_le beyond @size that have not been stored. + */ +static uint64_t store_bytes_leN(void *pv, int size, uint64_t val_le) +{ + uint8_t *p = pv; + for (int i = 0; i < size; i++, val_le >>= 8) { + p[i] = val_le; + } + return val_le; +} + +/** + * store_parts_leN + * @pv: host address + * @size: number of bytes to store + * @val_le: data to store + * + * As store_bytes_leN, but atomically on each aligned part. + */ +G_GNUC_UNUSED +static uint64_t store_parts_leN(void *pv, int size, uint64_t val_le) +{ + do { + int n; + + /* Find minimum of alignment and size */ + switch (((uintptr_t)pv | size) & 7) { + case 4: + store_atomic4(pv, le32_to_cpu(val_le)); + val_le >>= 32; + n = 4; + break; + case 2: + case 6: + store_atomic2(pv, le16_to_cpu(val_le)); + val_le >>= 16; + n = 2; + break; + default: + *(uint8_t *)pv = val_le; + val_le >>= 8; + n = 1; + break; + case 0: + g_assert_not_reached(); + } + pv += n; + size -= n; + } while (size != 0); + + return val_le; +} + +/** + * store_whole_le4 + * @pv: host address + * @size: number of bytes to store + * @val_le: data to store + * + * As store_bytes_leN, but atomically as a whole. + * Four aligned bytes are guaranteed to cover the store. + */ +static uint64_t store_whole_le4(void *pv, int size, uint64_t val_le) +{ + int sz = size * 8; + int o = (uintptr_t)pv & 3; + int sh = o * 8; + uint32_t m = MAKE_64BIT_MASK(0, sz); + uint32_t v; + + if (HOST_BIG_ENDIAN) { + v = bswap32(val_le) >> sh; + m = bswap32(m) >> sh; + } else { + v = val_le << sh; + m <<= sh; + } + store_atom_insert_al4(pv - o, v, m); + return val_le >> sz; +} + +/** + * store_whole_le8 + * @pv: host address + * @size: number of bytes to store + * @val_le: data to store + * + * As store_bytes_leN, but atomically as a whole. + * Eight aligned bytes are guaranteed to cover the store. + */ +static uint64_t store_whole_le8(void *pv, int size, uint64_t val_le) +{ + int sz = size * 8; + int o = (uintptr_t)pv & 7; + int sh = o * 8; + uint64_t m = MAKE_64BIT_MASK(0, sz); + uint64_t v; + + qemu_build_assert(HAVE_al8); + if (HOST_BIG_ENDIAN) { + v = bswap64(val_le) >> sh; + m = bswap64(m) >> sh; + } else { + v = val_le << sh; + m <<= sh; + } + store_atom_insert_al8(pv - o, v, m); + return val_le >> sz; +} + +/** + * store_whole_le16 + * @pv: host address + * @size: number of bytes to store + * @val_le: data to store + * + * As store_bytes_leN, but atomically as a whole. + * 16 aligned bytes are guaranteed to cover the store. + */ +static uint64_t store_whole_le16(void *pv, int size, Int128 val_le) +{ + int sz = size * 8; + int o = (uintptr_t)pv & 15; + int sh = o * 8; + Int128 m, v; + + qemu_build_assert(HAVE_ATOMIC128_RW); + + /* Like MAKE_64BIT_MASK(0, sz), but larger. */ + if (sz <= 64) { + m = int128_make64(MAKE_64BIT_MASK(0, sz)); + } else { + m = int128_make128(-1, MAKE_64BIT_MASK(0, sz - 64)); + } + + if (HOST_BIG_ENDIAN) { + v = int128_urshift(bswap128(val_le), sh); + m = int128_urshift(bswap128(m), sh); + } else { + v = int128_lshift(val_le, sh); + m = int128_lshift(m, sh); + } + store_atom_insert_al16(pv - o, v, m); + + /* Unused if sz <= 64. */ + return int128_gethi(val_le) >> (sz - 64); +} + +/** + * store_atom_2: + * @p: host address + * @val: the value to store + * @memop: the full memory op + * + * Store 2 bytes to @p, honoring the atomicity of @memop. + */ +static void store_atom_2(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop, uint16_t val) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + if (likely((pi & 1) == 0)) { + store_atomic2(pv, val); + return; + } + + atmax = required_atomicity(env, pi, memop); + if (atmax == MO_8) { + stw_he_p(pv, val); + return; + } + + /* + * The only case remaining is MO_ATOM_WITHIN16. + * Big or little endian, we want the middle two bytes in each test. + */ + if ((pi & 3) == 1) { + store_atom_insert_al4(pv - 1, (uint32_t)val << 8, MAKE_64BIT_MASK(8, 16)); + return; + } else if ((pi & 7) == 3) { + if (HAVE_al8) { + store_atom_insert_al8(pv - 3, (uint64_t)val << 24, MAKE_64BIT_MASK(24, 16)); + return; + } + } else if ((pi & 15) == 7) { + if (HAVE_ATOMIC128_RW) { + Int128 v = int128_lshift(int128_make64(val), 56); + Int128 m = int128_lshift(int128_make64(0xffff), 56); + store_atom_insert_al16(pv - 7, v, m); + return; + } + } else { + g_assert_not_reached(); + } + + cpu_loop_exit_atomic(env_cpu(env), ra); +} + +/** + * store_atom_4: + * @p: host address + * @val: the value to store + * @memop: the full memory op + * + * Store 4 bytes to @p, honoring the atomicity of @memop. + */ +static void store_atom_4(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop, uint32_t val) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + if (likely((pi & 3) == 0)) { + store_atomic4(pv, val); + return; + } + + atmax = required_atomicity(env, pi, memop); + switch (atmax) { + case MO_8: + stl_he_p(pv, val); + return; + case MO_16: + store_atom_4_by_2(pv, val); + return; + case -MO_16: + { + uint32_t val_le = cpu_to_le32(val); + int s2 = pi & 3; + int s1 = 4 - s2; + + switch (s2) { + case 1: + val_le = store_whole_le4(pv, s1, val_le); + *(uint8_t *)(pv + 3) = val_le; + break; + case 3: + *(uint8_t *)pv = val_le; + store_whole_le4(pv + 1, s2, val_le >> 8); + break; + case 0: /* aligned */ + case 2: /* atmax MO_16 */ + default: + g_assert_not_reached(); + } + } + return; + case MO_32: + if ((pi & 7) < 4) { + if (HAVE_al8) { + store_whole_le8(pv, 4, cpu_to_le32(val)); + return; + } + } else { + if (HAVE_ATOMIC128_RW) { + store_whole_le16(pv, 4, int128_make64(cpu_to_le32(val))); + return; + } + } + cpu_loop_exit_atomic(env_cpu(env), ra); + default: + g_assert_not_reached(); + } +} + +/** + * store_atom_8: + * @p: host address + * @val: the value to store + * @memop: the full memory op + * + * Store 8 bytes to @p, honoring the atomicity of @memop. + */ +static void store_atom_8(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop, uint64_t val) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + if (HAVE_al8 && likely((pi & 7) == 0)) { + store_atomic8(pv, val); + return; + } + + atmax = required_atomicity(env, pi, memop); + switch (atmax) { + case MO_8: + stq_he_p(pv, val); + return; + case MO_16: + store_atom_8_by_2(pv, val); + return; + case MO_32: + store_atom_8_by_4(pv, val); + return; + case -MO_32: + if (HAVE_al8) { + uint64_t val_le = cpu_to_le64(val); + int s2 = pi & 7; + int s1 = 8 - s2; + + switch (s2) { + case 1 ... 3: + val_le = store_whole_le8(pv, s1, val_le); + store_bytes_leN(pv + s1, s2, val_le); + break; + case 5 ... 7: + val_le = store_bytes_leN(pv, s1, val_le); + store_whole_le8(pv + s1, s2, val_le); + break; + case 0: /* aligned */ + case 4: /* atmax MO_32 */ + default: + g_assert_not_reached(); + } + return; + } + break; + case MO_64: + if (HAVE_ATOMIC128_RW) { + store_whole_le16(pv, 8, int128_make64(cpu_to_le64(val))); + return; + } + break; + default: + g_assert_not_reached(); + } + cpu_loop_exit_atomic(env_cpu(env), ra); +} + +/** + * store_atom_16: + * @p: host address + * @val: the value to store + * @memop: the full memory op + * + * Store 16 bytes to @p, honoring the atomicity of @memop. + */ +static void store_atom_16(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop, Int128 val) +{ + uintptr_t pi = (uintptr_t)pv; + uint64_t a, b; + int atmax; + + if (HAVE_ATOMIC128_RW && likely((pi & 15) == 0)) { + atomic16_set(pv, val); + return; + } + + atmax = required_atomicity(env, pi, memop); + + a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val); + b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val); + switch (atmax) { + case MO_8: + memcpy(pv, &val, 16); + return; + case MO_16: + store_atom_8_by_2(pv, a); + store_atom_8_by_2(pv + 8, b); + return; + case MO_32: + store_atom_8_by_4(pv, a); + store_atom_8_by_4(pv + 8, b); + return; + case MO_64: + if (HAVE_al8) { + store_atomic8(pv, a); + store_atomic8(pv + 8, b); + return; + } + break; + case -MO_64: + if (HAVE_ATOMIC128_RW) { + uint64_t val_le; + int s2 = pi & 15; + int s1 = 16 - s2; + + if (HOST_BIG_ENDIAN) { + val = bswap128(val); + } + switch (s2) { + case 1 ... 7: + val_le = store_whole_le16(pv, s1, val); + store_bytes_leN(pv + s1, s2, val_le); + break; + case 9 ... 15: + store_bytes_leN(pv, s1, int128_getlo(val)); + val = int128_urshift(val, s1 * 8); + store_whole_le16(pv + s1, s2, val); + break; + case 0: /* aligned */ + case 8: /* atmax MO_64 */ + default: + g_assert_not_reached(); + } + return; + } + break; + case MO_128: + if (HAVE_ATOMIC128_RW) { + atomic16_set(pv, val); + return; + } + break; + default: + g_assert_not_reached(); + } + cpu_loop_exit_atomic(env_cpu(env), ra); +} diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc index 6ac8d871a3..5f8144b33a 100644 --- a/accel/tcg/ldst_common.c.inc +++ b/accel/tcg/ldst_common.c.inc @@ -26,7 +26,7 @@ uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); - return cpu_ldw_be_mmu(env, addr, oi, ra); + return cpu_ldw_mmu(env, addr, oi, ra); } int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, @@ -39,21 +39,21 @@ uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); - return cpu_ldl_be_mmu(env, addr, oi, ra); + return cpu_ldl_mmu(env, addr, oi, ra); } uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); - return cpu_ldq_be_mmu(env, addr, oi, ra); + return cpu_ldq_mmu(env, addr, oi, ra); } uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); - return cpu_ldw_le_mmu(env, addr, oi, ra); + return cpu_ldw_mmu(env, addr, oi, ra); } int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, @@ -66,14 +66,14 @@ uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); - return cpu_ldl_le_mmu(env, addr, oi, ra); + return cpu_ldl_mmu(env, addr, oi, ra); } uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); - return cpu_ldq_le_mmu(env, addr, oi, ra); + return cpu_ldq_mmu(env, addr, oi, ra); } void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, @@ -87,42 +87,42 @@ void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); - cpu_stw_be_mmu(env, addr, val, oi, ra); + cpu_stw_mmu(env, addr, val, oi, ra); } void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); - cpu_stl_be_mmu(env, addr, val, oi, ra); + cpu_stl_mmu(env, addr, val, oi, ra); } void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); - cpu_stq_be_mmu(env, addr, val, oi, ra); + cpu_stq_mmu(env, addr, val, oi, ra); } void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); - cpu_stw_le_mmu(env, addr, val, oi, ra); + cpu_stw_mmu(env, addr, val, oi, ra); } void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); - cpu_stl_le_mmu(env, addr, val, oi, ra); + cpu_stl_mmu(env, addr, val, oi, ra); } void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, int mmu_idx, uintptr_t ra) { MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); - cpu_stq_le_mmu(env, addr, val, oi, ra); + cpu_stq_mmu(env, addr, val, oi, ra); } /*--------------------------*/ diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build index 77740b1a0d..aeb20a6ef0 100644 --- a/accel/tcg/meson.build +++ b/accel/tcg/meson.build @@ -18,7 +18,7 @@ specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss) specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files( 'cputlb.c', - 'hmp.c', + 'monitor.c', )) tcg_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files( diff --git a/accel/tcg/monitor.c b/accel/tcg/monitor.c new file mode 100644 index 0000000000..92fce580f1 --- /dev/null +++ b/accel/tcg/monitor.c @@ -0,0 +1,120 @@ +/* + * SPDX-License-Identifier: LGPL-2.1-or-later + * + * QEMU TCG monitor + * + * Copyright (c) 2003-2005 Fabrice Bellard + */ + +#include "qemu/osdep.h" +#include "qemu/accel.h" +#include "qapi/error.h" +#include "qapi/type-helpers.h" +#include "qapi/qapi-commands-machine.h" +#include "monitor/monitor.h" +#include "sysemu/cpus.h" +#include "sysemu/cpu-timers.h" +#include "sysemu/tcg.h" +#include "internal.h" + + +static void dump_drift_info(GString *buf) +{ + if (!icount_enabled()) { + return; + } + + g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", + (cpu_get_clock() - icount_get()) / SCALE_MS); + if (icount_align_option) { + g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", + -max_delay / SCALE_MS); + g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", + max_advance / SCALE_MS); + } else { + g_string_append_printf(buf, "Max guest delay NA\n"); + g_string_append_printf(buf, "Max guest advance NA\n"); + } +} + +static void dump_accel_info(GString *buf) +{ + AccelState *accel = current_accel(); + bool one_insn_per_tb = object_property_get_bool(OBJECT(accel), + "one-insn-per-tb", + &error_fatal); + + g_string_append_printf(buf, "Accelerator settings:\n"); + g_string_append_printf(buf, "one-insn-per-tb: %s\n\n", + one_insn_per_tb ? "on" : "off"); +} + +HumanReadableText *qmp_x_query_jit(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + if (!tcg_enabled()) { + error_setg(errp, "JIT information is only available with accel=tcg"); + return NULL; + } + + dump_accel_info(buf); + dump_exec_info(buf); + dump_drift_info(buf); + + return human_readable_text_from_str(buf); +} + +HumanReadableText *qmp_x_query_opcount(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + if (!tcg_enabled()) { + error_setg(errp, + "Opcode count information is only available with accel=tcg"); + return NULL; + } + + tcg_dump_op_count(buf); + + return human_readable_text_from_str(buf); +} + +#ifdef CONFIG_PROFILER + +int64_t dev_time; + +HumanReadableText *qmp_x_query_profile(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + static int64_t last_cpu_exec_time; + int64_t cpu_exec_time; + int64_t delta; + + cpu_exec_time = tcg_cpu_exec_time(); + delta = cpu_exec_time - last_cpu_exec_time; + + g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n", + dev_time, dev_time / (double)NANOSECONDS_PER_SECOND); + g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n", + delta, delta / (double)NANOSECONDS_PER_SECOND); + last_cpu_exec_time = cpu_exec_time; + dev_time = 0; + + return human_readable_text_from_str(buf); +} +#else +HumanReadableText *qmp_x_query_profile(Error **errp) +{ + error_setg(errp, "Internal profiler not compiled"); + return NULL; +} +#endif + +static void hmp_tcg_register(void) +{ + monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); + monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount); +} + +type_init(hmp_tcg_register); diff --git a/accel/tcg/perf.c b/accel/tcg/perf.c index ae19f6e28f..65e35ea3b9 100644 --- a/accel/tcg/perf.c +++ b/accel/tcg/perf.c @@ -328,7 +328,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb, for (insn = 0; insn < tb->icount; insn++) { /* FIXME: This replicates the restore_state_to_opc() logic. */ q[insn].address = tcg_ctx->gen_insn_data[insn][0]; - if (TARGET_TB_PCREL) { + if (tb_cflags(tb) & CF_PCREL) { q[insn].address |= (guest_pc & TARGET_PAGE_MASK); } else { #if defined(TARGET_I386) diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 17a686bd9e..5b73a39ce5 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -44,6 +44,7 @@ */ #include "qemu/osdep.h" #include "tcg/tcg.h" +#include "tcg/tcg-temp-internal.h" #include "tcg/tcg-op.h" #include "exec/exec-all.h" #include "exec/plugin-gen.h" @@ -91,30 +92,12 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, void *userdata) { } -static void do_gen_mem_cb(TCGv vaddr, uint32_t info) -{ - TCGv_i32 cpu_index = tcg_temp_new_i32(); - TCGv_i32 meminfo = tcg_const_i32(info); - TCGv_i64 vaddr64 = tcg_temp_new_i64(); - TCGv_ptr udata = tcg_const_ptr(NULL); - - tcg_gen_ld_i32(cpu_index, cpu_env, - -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); - tcg_gen_extu_tl_i64(vaddr64, vaddr); - - gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata); - - tcg_temp_free_ptr(udata); - tcg_temp_free_i64(vaddr64); - tcg_temp_free_i32(meminfo); - tcg_temp_free_i32(cpu_index); -} - static void gen_empty_udata_cb(void) { - TCGv_i32 cpu_index = tcg_temp_new_i32(); - TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */ + TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); + TCGv_ptr udata = tcg_temp_ebb_new_ptr(); + tcg_gen_movi_ptr(udata, 0); tcg_gen_ld_i32(cpu_index, cpu_env, -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); @@ -129,9 +112,10 @@ static void gen_empty_udata_cb(void) */ static void gen_empty_inline_cb(void) { - TCGv_i64 val = tcg_temp_new_i64(); - TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */ + TCGv_i64 val = tcg_temp_ebb_new_i64(); + TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); + tcg_gen_movi_ptr(ptr, 0); tcg_gen_ld_i64(val, ptr, 0); /* pass an immediate != 0 so that it doesn't get optimized away */ tcg_gen_addi_i64(val, val, 0xdeadface); @@ -140,9 +124,22 @@ static void gen_empty_inline_cb(void) tcg_temp_free_i64(val); } -static void gen_empty_mem_cb(TCGv addr, uint32_t info) +static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info) { - do_gen_mem_cb(addr, info); + TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); + TCGv_i32 meminfo = tcg_temp_ebb_new_i32(); + TCGv_ptr udata = tcg_temp_ebb_new_ptr(); + + tcg_gen_movi_i32(meminfo, info); + tcg_gen_movi_ptr(udata, 0); + tcg_gen_ld_i32(cpu_index, cpu_env, + -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); + + gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata); + + tcg_temp_free_ptr(udata); + tcg_temp_free_i32(meminfo); + tcg_temp_free_i32(cpu_index); } /* @@ -151,9 +148,9 @@ static void gen_empty_mem_cb(TCGv addr, uint32_t info) */ static void gen_empty_mem_helper(void) { - TCGv_ptr ptr; + TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); - ptr = tcg_const_ptr(NULL); + tcg_gen_movi_ptr(ptr, 0); tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env)); tcg_temp_free_ptr(ptr); @@ -197,35 +194,17 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from) } } -union mem_gen_fn { - void (*mem_fn)(TCGv, uint32_t); - void (*inline_fn)(void); -}; - -static void gen_mem_wrapped(enum plugin_gen_cb type, - const union mem_gen_fn *f, TCGv addr, - uint32_t info, bool is_mem) +void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info) { enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); - gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw); - if (is_mem) { - f->mem_fn(addr, info); - } else { - f->inline_fn(); - } + gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw); + gen_empty_mem_cb(addr, info); tcg_gen_plugin_cb_end(); -} -void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) -{ - union mem_gen_fn fn; - - fn.mem_fn = gen_empty_mem_cb; - gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true); - - fn.inline_fn = gen_empty_inline_cb; - gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false); + gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw); + gen_empty_inline_cb(); + tcg_gen_plugin_cb_end(); } static TCGOp *find_op(TCGOp *op, TCGOpcode opc) @@ -275,33 +254,6 @@ static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc) return op; } -static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op) -{ - if (TCG_TARGET_REG_BITS == 32) { - /* mov_i32 */ - op = copy_op(begin_op, op, INDEX_op_mov_i32); - /* mov_i32 w/ $0 */ - op = copy_op(begin_op, op, INDEX_op_mov_i32); - } else { - /* extu_i32_i64 */ - op = copy_op(begin_op, op, INDEX_op_extu_i32_i64); - } - return op; -} - -static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op) -{ - if (TCG_TARGET_REG_BITS == 32) { - /* 2x mov_i32 */ - op = copy_op(begin_op, op, INDEX_op_mov_i32); - op = copy_op(begin_op, op, INDEX_op_mov_i32); - } else { - /* mov_i64 */ - op = copy_op(begin_op, op, INDEX_op_mov_i64); - } - return op; -} - static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) { if (UINTPTR_MAX == UINT32_MAX) { @@ -316,18 +268,6 @@ static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) return op; } -static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op) -{ - if (TARGET_LONG_BITS == 32) { - /* extu_i32_i64 */ - op = copy_extu_i32_i64(begin_op, op); - } else { - /* mov_i64 */ - op = copy_mov_i64(begin_op, op); - } - return op; -} - static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) { if (TCG_TARGET_REG_BITS == 32) { @@ -472,9 +412,6 @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb, tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); } - /* extu_tl_i64 */ - op = copy_extu_tl_i64(&begin_op, op); - if (type == PLUGIN_GEN_CB_MEM) { /* call */ op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb), @@ -626,8 +563,6 @@ static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn, /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */ void plugin_gen_disable_mem_helpers(void) { - TCGv_ptr ptr; - /* * We could emit the clearing unconditionally and be done. However, this can * be wasteful if for instance plugins don't track memory accesses, or if @@ -640,10 +575,8 @@ void plugin_gen_disable_mem_helpers(void) if (!tcg_ctx->plugin_tb->mem_helper) { return; } - ptr = tcg_const_ptr(NULL); - tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - - offsetof(ArchCPU, env)); - tcg_temp_free_ptr(ptr); + tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env, + offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env)); } static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb, diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h index b3f6e78835..bee87eb840 100644 --- a/accel/tcg/tb-jmp-cache.h +++ b/accel/tcg/tb-jmp-cache.h @@ -14,53 +14,15 @@ /* * Accessed in parallel; all accesses to 'tb' must be atomic. - * For TARGET_TB_PCREL, accesses to 'pc' must be protected by - * a load_acquire/store_release to 'tb'. + * For CF_PCREL, accesses to 'pc' must be protected by a + * load_acquire/store_release to 'tb'. */ struct CPUJumpCache { struct rcu_head rcu; struct { TranslationBlock *tb; -#if TARGET_TB_PCREL target_ulong pc; -#endif } array[TB_JMP_CACHE_SIZE]; }; -static inline TranslationBlock * -tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash) -{ -#if TARGET_TB_PCREL - /* Use acquire to ensure current load of pc from jc. */ - return qatomic_load_acquire(&jc->array[hash].tb); -#else - /* Use rcu_read to ensure current load of pc from *tb. */ - return qatomic_rcu_read(&jc->array[hash].tb); -#endif -} - -static inline target_ulong -tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb) -{ -#if TARGET_TB_PCREL - return jc->array[hash].pc; -#else - return tb_pc(tb); -#endif -} - -static inline void -tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash, - TranslationBlock *tb, target_ulong pc) -{ -#if TARGET_TB_PCREL - jc->array[hash].pc = pc; - /* Use store_release on tb to ensure pc is written first. */ - qatomic_store_release(&jc->array[hash].tb, tb); -#else - /* Use the pc value already stored in tb->pc. */ - qatomic_set(&jc->array[hash].tb, tb); -#endif -} - #endif /* ACCEL_TCG_TB_JMP_CACHE_H */ diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c index b3d6529ae2..991746f80f 100644 --- a/accel/tcg/tb-maint.c +++ b/accel/tcg/tb-maint.c @@ -19,9 +19,11 @@ #include "qemu/osdep.h" #include "qemu/interval-tree.h" +#include "qemu/qtree.h" #include "exec/cputlb.h" #include "exec/log.h" #include "exec/exec-all.h" +#include "exec/tb-flush.h" #include "exec/translate-all.h" #include "sysemu/tcg.h" #include "tcg/tcg.h" @@ -44,7 +46,7 @@ static bool tb_cmp(const void *ap, const void *bp) const TranslationBlock *a = ap; const TranslationBlock *b = bp; - return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) && + return ((tb_cflags(a) & CF_PCREL || a->pc == b->pc) && a->cs_base == b->cs_base && a->flags == b->flags && (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && @@ -125,29 +127,29 @@ static void tb_remove(TranslationBlock *tb) } /* TODO: For now, still shared with translate-all.c for system mode. */ -#define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \ - for (T = foreach_tb_first(start, end), \ - N = foreach_tb_next(T, start, end); \ +#define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \ + for (T = foreach_tb_first(start, last), \ + N = foreach_tb_next(T, start, last); \ T != NULL; \ - T = N, N = foreach_tb_next(N, start, end)) + T = N, N = foreach_tb_next(N, start, last)) typedef TranslationBlock *PageForEachNext; static PageForEachNext foreach_tb_first(tb_page_addr_t start, - tb_page_addr_t end) + tb_page_addr_t last) { - IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1); + IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last); return n ? container_of(n, TranslationBlock, itree) : NULL; } static PageForEachNext foreach_tb_next(PageForEachNext tb, tb_page_addr_t start, - tb_page_addr_t end) + tb_page_addr_t last) { IntervalTreeNode *n; if (tb) { - n = interval_tree_iter_next(&tb->itree, start, end - 1); + n = interval_tree_iter_next(&tb->itree, start, last); if (n) { return container_of(n, TranslationBlock, itree); } @@ -313,12 +315,12 @@ struct page_entry { * See also: page_collection_lock(). */ struct page_collection { - GTree *tree; + QTree *tree; struct page_entry *max; }; typedef int PageForEachNext; -#define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \ +#define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \ TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) #ifdef CONFIG_DEBUG_TCG @@ -466,7 +468,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) struct page_entry *pe; PageDesc *pd; - pe = g_tree_lookup(set->tree, &index); + pe = q_tree_lookup(set->tree, &index); if (pe) { return false; } @@ -477,7 +479,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) } pe = page_entry_new(pd, index); - g_tree_insert(set->tree, &pe->index, pe); + q_tree_insert(set->tree, &pe->index, pe); /* * If this is either (1) the first insertion or (2) a page whose index @@ -509,30 +511,30 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) } /* - * Lock a range of pages ([@start,@end[) as well as the pages of all + * Lock a range of pages ([@start,@last]) as well as the pages of all * intersecting TBs. * Locking order: acquire locks in ascending order of page index. */ static struct page_collection *page_collection_lock(tb_page_addr_t start, - tb_page_addr_t end) + tb_page_addr_t last) { struct page_collection *set = g_malloc(sizeof(*set)); tb_page_addr_t index; PageDesc *pd; start >>= TARGET_PAGE_BITS; - end >>= TARGET_PAGE_BITS; - g_assert(start <= end); + last >>= TARGET_PAGE_BITS; + g_assert(start <= last); - set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, + set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL, page_entry_destroy); set->max = NULL; assert_no_pages_locked(); retry: - g_tree_foreach(set->tree, page_entry_lock, NULL); + q_tree_foreach(set->tree, page_entry_lock, NULL); - for (index = start; index <= end; index++) { + for (index = start; index <= last; index++) { TranslationBlock *tb; PageForEachNext n; @@ -541,7 +543,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start, continue; } if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { - g_tree_foreach(set->tree, page_entry_unlock, NULL); + q_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; } assert_page_locked(pd); @@ -550,7 +552,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start, (tb_page_addr1(tb) != -1 && page_trylock_add(set, tb_page_addr1(tb)))) { /* drop all locks, and reacquire in order */ - g_tree_foreach(set->tree, page_entry_unlock, NULL); + q_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; } } @@ -561,7 +563,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start, static void page_collection_unlock(struct page_collection *set) { /* entries are unlocked and freed via page_entry_destroy */ - g_tree_destroy(set->tree); + q_tree_destroy(set->tree); g_free(set); } @@ -744,7 +746,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) tcg_region_reset_all(); /* XXX: flush processor icache at this point if cache flush is expensive */ - qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); + qatomic_inc(&tb_ctx.tb_flush_count); done: mmap_unlock(); @@ -756,9 +758,9 @@ done: void tb_flush(CPUState *cpu) { if (tcg_enabled()) { - unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); + unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count); - if (cpu_in_exclusive_context(cpu)) { + if (cpu_in_serial_context(cpu)) { do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); } else { async_safe_run_on_cpu(cpu, do_tb_flush, @@ -847,13 +849,13 @@ static void tb_jmp_cache_inval_tb(TranslationBlock *tb) { CPUState *cpu; - if (TARGET_TB_PCREL) { + if (tb_cflags(tb) & CF_PCREL) { /* A TB may be at any virtual address */ CPU_FOREACH(cpu) { tcg_flush_jmp_cache(cpu); } } else { - uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb)); + uint32_t h = tb_jmp_cache_hash_func(tb->pc); CPU_FOREACH(cpu) { CPUJumpCache *jc = cpu->tb_jmp_cache; @@ -885,7 +887,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) /* remove the TB from the hash list */ phys_pc = tb_page_addr0(tb); - h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), + h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb->pc), tb->flags, orig_cflags, tb->trace_vcpu_dstate); if (!qht_remove(&tb_ctx.htable, tb, h)) { return; @@ -966,7 +968,7 @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, tb_record(tb, p, p2); /* add in the hash table */ - h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), + h = tb_hash_func(phys_pc, (tb->cflags & CF_PCREL ? 0 : tb->pc), tb->flags, tb->cflags, tb->trace_vcpu_dstate); qht_insert(&tb_ctx.htable, tb, h, &existing_tb); @@ -989,14 +991,14 @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, * Called with mmap_lock held for user-mode emulation. * NOTE: this function must not be called while a TB is running. */ -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) { TranslationBlock *tb; PageForEachNext n; assert_memory_lock(); - PAGE_FOR_EACH_TB(start, end, unused, tb, n) { + PAGE_FOR_EACH_TB(start, last, unused, tb, n) { tb_phys_invalidate__locked(tb); } } @@ -1008,11 +1010,11 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) */ void tb_invalidate_phys_page(tb_page_addr_t addr) { - tb_page_addr_t start, end; + tb_page_addr_t start, last; start = addr & TARGET_PAGE_MASK; - end = start + TARGET_PAGE_SIZE; - tb_invalidate_phys_range(start, end); + last = addr | ~TARGET_PAGE_MASK; + tb_invalidate_phys_range(start, last); } /* @@ -1028,6 +1030,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) bool current_tb_modified; TranslationBlock *tb; PageForEachNext n; + tb_page_addr_t last; /* * Without precise smc semantics, or when outside of a TB, @@ -1044,10 +1047,11 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) assert_memory_lock(); current_tb = tcg_tb_lookup(pc); + last = addr | ~TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK; current_tb_modified = false; - PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) { + PAGE_FOR_EACH_TB(addr, last, unused, tb, n) { if (current_tb == tb && (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { /* @@ -1079,11 +1083,10 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) static void tb_invalidate_phys_page_range__locked(struct page_collection *pages, PageDesc *p, tb_page_addr_t start, - tb_page_addr_t end, + tb_page_addr_t last, uintptr_t retaddr) { TranslationBlock *tb; - tb_page_addr_t tb_start, tb_end; PageForEachNext n; #ifdef TARGET_HAS_PRECISE_SMC bool current_tb_modified = false; @@ -1091,22 +1094,22 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, #endif /* TARGET_HAS_PRECISE_SMC */ /* - * We remove all the TBs in the range [start, end[. + * We remove all the TBs in the range [start, last]. * XXX: see if in some cases it could be faster to invalidate all the code */ - PAGE_FOR_EACH_TB(start, end, p, tb, n) { + PAGE_FOR_EACH_TB(start, last, p, tb, n) { + tb_page_addr_t tb_start, tb_last; + /* NOTE: this is subtle as a TB may span two physical pages */ + tb_start = tb_page_addr0(tb); + tb_last = tb_start + tb->size - 1; if (n == 0) { - /* NOTE: tb_end may be after the end of the page, but - it is not a problem */ - tb_start = tb_page_addr0(tb); - tb_end = tb_start + tb->size; + tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK); } else { tb_start = tb_page_addr1(tb); - tb_end = tb_start + ((tb_page_addr0(tb) + tb->size) - & ~TARGET_PAGE_MASK); + tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK); } - if (!(tb_end <= start || tb_start >= end)) { + if (!(tb_last < start || tb_start > last)) { #ifdef TARGET_HAS_PRECISE_SMC if (current_tb == tb && (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { @@ -1148,7 +1151,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, void tb_invalidate_phys_page(tb_page_addr_t addr) { struct page_collection *pages; - tb_page_addr_t start, end; + tb_page_addr_t start, last; PageDesc *p; p = page_find(addr >> TARGET_PAGE_BITS); @@ -1157,35 +1160,37 @@ void tb_invalidate_phys_page(tb_page_addr_t addr) } start = addr & TARGET_PAGE_MASK; - end = start + TARGET_PAGE_SIZE; - pages = page_collection_lock(start, end); - tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); + last = addr | ~TARGET_PAGE_MASK; + pages = page_collection_lock(start, last); + tb_invalidate_phys_page_range__locked(pages, p, start, last, 0); page_collection_unlock(pages); } /* * Invalidate all TBs which intersect with the target physical address range - * [start;end[. NOTE: start and end may refer to *different* physical pages. + * [start;last]. NOTE: start and end may refer to *different* physical pages. * 'is_cpu_write_access' should be true if called from a real cpu write * access: the virtual CPU will exit the current TB if code is modified inside * this TB. */ -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) { struct page_collection *pages; - tb_page_addr_t next; + tb_page_addr_t index, index_last; - pages = page_collection_lock(start, end); - for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; - start < end; - start = next, next += TARGET_PAGE_SIZE) { - PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); - tb_page_addr_t bound = MIN(next, end); + pages = page_collection_lock(start, last); + + index_last = last >> TARGET_PAGE_BITS; + for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) { + PageDesc *pd = page_find(index); + tb_page_addr_t bound; if (pd == NULL) { continue; } assert_page_locked(pd); + bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK; + bound = MIN(bound, last); tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); } page_collection_unlock(pages); @@ -1206,7 +1211,7 @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages, } assert_page_locked(p); - tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra); + tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra); } /* @@ -1220,7 +1225,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, { struct page_collection *pages; - pages = page_collection_lock(ram_addr, ram_addr + size); + pages = page_collection_lock(ram_addr, ram_addr + size - 1); tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr); page_collection_unlock(pages); } diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c index 84cc7421be..3d2cfbbc97 100644 --- a/accel/tcg/tcg-accel-ops-icount.c +++ b/accel/tcg/tcg-accel-ops-icount.c @@ -89,7 +89,20 @@ void icount_handle_deadline(void) } } -void icount_prepare_for_run(CPUState *cpu) +/* Distribute the budget evenly across all CPUs */ +int64_t icount_percpu_budget(int cpu_count) +{ + int64_t limit = icount_get_limit(); + int64_t timeslice = limit / cpu_count; + + if (timeslice == 0) { + timeslice = limit; + } + + return timeslice; +} + +void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget) { int insns_left; @@ -101,13 +114,13 @@ void icount_prepare_for_run(CPUState *cpu) g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0); g_assert(cpu->icount_extra == 0); - cpu->icount_budget = icount_get_limit(); + replay_mutex_lock(); + + cpu->icount_budget = MIN(icount_get_limit(), cpu_budget); insns_left = MIN(0xffff, cpu->icount_budget); cpu_neg(cpu)->icount_decr.u16.low = insns_left; cpu->icount_extra = cpu->icount_budget - insns_left; - replay_mutex_lock(); - if (cpu->icount_budget == 0) { /* * We're called without the iothread lock, so must take it while diff --git a/accel/tcg/tcg-accel-ops-icount.h b/accel/tcg/tcg-accel-ops-icount.h index 1b6fd9c607..16a301b6dc 100644 --- a/accel/tcg/tcg-accel-ops-icount.h +++ b/accel/tcg/tcg-accel-ops-icount.h @@ -11,7 +11,8 @@ #define TCG_ACCEL_OPS_ICOUNT_H void icount_handle_deadline(void); -void icount_prepare_for_run(CPUState *cpu); +void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget); +int64_t icount_percpu_budget(int cpu_count); void icount_process_data(CPUState *cpu); void icount_handle_interrupt(CPUState *cpu, int mask); diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index 290833a37f..b6d10fa9a2 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -24,6 +24,7 @@ */ #include "qemu/osdep.h" +#include "qemu/lockable.h" #include "sysemu/tcg.h" #include "sysemu/replay.h" #include "sysemu/cpu-timers.h" @@ -71,11 +72,13 @@ static void rr_kick_next_cpu(void) { CPUState *cpu; do { - cpu = qatomic_mb_read(&rr_current_cpu); + cpu = qatomic_read(&rr_current_cpu); if (cpu) { cpu_exit(cpu); } - } while (cpu != qatomic_mb_read(&rr_current_cpu)); + /* Finish kicking this cpu before reading again. */ + smp_mb(); + } while (cpu != qatomic_read(&rr_current_cpu)); } static void rr_kick_thread(void *opaque) @@ -139,6 +142,33 @@ static void rr_force_rcu(Notifier *notify, void *data) rr_kick_next_cpu(); } +/* + * Calculate the number of CPUs that we will process in a single iteration of + * the main CPU thread loop so that we can fairly distribute the instruction + * count across CPUs. + * + * The CPU count is cached based on the CPU list generation ID to avoid + * iterating the list every time. + */ +static int rr_cpu_count(void) +{ + static unsigned int last_gen_id = ~0; + static int cpu_count; + CPUState *cpu; + + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); + + if (cpu_list_generation_id_get() != last_gen_id) { + cpu_count = 0; + CPU_FOREACH(cpu) { + ++cpu_count; + } + last_gen_id = cpu_list_generation_id_get(); + } + + return cpu_count; +} + /* * In the single-threaded case each vCPU is simulated in turn. If * there is more than a single vCPU we create a simple timer to kick @@ -185,11 +215,16 @@ static void *rr_cpu_thread_fn(void *arg) cpu->exit_request = 1; while (1) { + /* Only used for icount_enabled() */ + int64_t cpu_budget = 0; + qemu_mutex_unlock_iothread(); replay_mutex_lock(); qemu_mutex_lock_iothread(); if (icount_enabled()) { + int cpu_count = rr_cpu_count(); + /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ icount_account_warp_timer(); /* @@ -197,6 +232,8 @@ static void *rr_cpu_thread_fn(void *arg) * waking up the I/O thread and waiting for completion. */ icount_handle_deadline(); + + cpu_budget = icount_percpu_budget(cpu_count); } replay_mutex_unlock(); @@ -206,8 +243,9 @@ static void *rr_cpu_thread_fn(void *arg) } while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { - + /* Store rr_current_cpu before evaluating cpu_can_run(). */ qatomic_mb_set(&rr_current_cpu, cpu); + current_cpu = cpu; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, @@ -218,7 +256,7 @@ static void *rr_cpu_thread_fn(void *arg) qemu_mutex_unlock_iothread(); if (icount_enabled()) { - icount_prepare_for_run(cpu); + icount_prepare_for_run(cpu, cpu_budget); } r = tcg_cpus_exec(cpu); if (icount_enabled()) { @@ -245,7 +283,7 @@ static void *rr_cpu_thread_fn(void *arg) cpu = CPU_NEXT(cpu); } /* while (cpu && !cpu->exit_request).. */ - /* Does not need qatomic_mb_set because a spurious wakeup is okay. */ + /* Does not need a memory barrier because a spurious wakeup is okay. */ qatomic_set(&rr_current_cpu, NULL); if (cpu && cpu->exit_request) { diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index 19cbf1db3a..58c8e64096 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -31,6 +31,7 @@ #include "sysemu/cpu-timers.h" #include "qemu/main-loop.h" #include "qemu/guest-random.h" +#include "qemu/timer.h" #include "exec/exec-all.h" #include "exec/hwaddr.h" #include "exec/gdbstub.h" @@ -44,10 +45,21 @@ void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) { - uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT; + uint32_t cflags; + + /* + * Include the cluster number in the hash we use to look up TBs. + * This is important because a TB that is valid for one cluster at + * a given physical address and set of CPU flags is not necessarily + * valid for another: + * the two clusters may have different views of physical memory, or + * may have different CPU features (eg FPU present or absent). + */ + cflags = cpu->cluster_index << CF_CLUSTER_SHIFT; + cflags |= parallel ? CF_PARALLEL : 0; cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; - cpu->tcg_cflags = cflags; + cpu->tcg_cflags |= cflags; } void tcg_cpus_destroy(CPUState *cpu) @@ -116,7 +128,7 @@ static inline int xlat_gdb_type(CPUState *cpu, int gdbtype) return cputype; } -static int tcg_insert_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len) +static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len) { CPUState *cpu; int err = 0; @@ -147,7 +159,7 @@ static int tcg_insert_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len } } -static int tcg_remove_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len) +static int tcg_remove_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len) { CPUState *cpu; int err = 0; diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c index 4cdf93b378..35e07e1c39 100644 --- a/accel/tcg/tcg-all.c +++ b/accel/tcg/tcg-all.c @@ -25,12 +25,13 @@ #include "qemu/osdep.h" #include "sysemu/tcg.h" -#include "sysemu/replay.h" +#include "exec/replay-core.h" #include "sysemu/cpu-timers.h" #include "tcg/tcg.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/accel.h" +#include "qemu/atomic.h" #include "qapi/qapi-builtin-visit.h" #include "qemu/units.h" #if !defined(CONFIG_USER_ONLY) @@ -42,6 +43,7 @@ struct TCGState { AccelState parent_obj; bool mttcg_enabled; + bool one_insn_per_tb; int splitwx_enabled; unsigned long tb_size; }; @@ -117,6 +119,7 @@ static void tcg_accel_instance_init(Object *obj) } bool mttcg_enabled; +bool one_insn_per_tb; static int tcg_init_machine(MachineState *ms) { @@ -216,6 +219,20 @@ static void tcg_set_splitwx(Object *obj, bool value, Error **errp) s->splitwx_enabled = value; } +static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp) +{ + TCGState *s = TCG_STATE(obj); + return s->one_insn_per_tb; +} + +static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp) +{ + TCGState *s = TCG_STATE(obj); + s->one_insn_per_tb = value; + /* Set the global also: this changes the behaviour */ + qatomic_set(&one_insn_per_tb, value); +} + static int tcg_gdbstub_supported_sstep_flags(void) { /* @@ -253,6 +270,12 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data) tcg_get_splitwx, tcg_set_splitwx); object_class_property_set_description(oc, "split-wx", "Map jit pages into separate RW and RX regions"); + + object_class_property_add_bool(oc, "one-insn-per-tb", + tcg_get_one_insn_per_tb, + tcg_set_one_insn_per_tb); + object_class_property_set_description(oc, "one-insn-per-tb", + "Only put one guest insn in each translation block"); } static const TypeInfo tcg_accel_type = { diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c index ac7d28c251..97399493d5 100644 --- a/accel/tcg/tcg-runtime-gvec.c +++ b/accel/tcg/tcg-runtime-gvec.c @@ -550,6 +550,17 @@ void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc) clear_high(d, oprsz, desc); } +void HELPER(gvec_andcs)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & ~b; + } + clear_high(d, oprsz, desc); +} + void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc) { intptr_t oprsz = simd_oprsz(desc); diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h index a79abc2994..d35bab48de 100644 --- a/accel/tcg/tcg-runtime.h +++ b/accel/tcg/tcg-runtime.h @@ -39,62 +39,63 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr) #endif /* IN_HELPER_PROTO */ +DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32) +DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32) + DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) #ifdef CONFIG_ATOMIC64 DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, - i64, env, tl, i64, i64, i32) + i64, env, i64, i64, i64, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, - i64, env, tl, i64, i64, i32) + i64, env, i64, i64, i64, i32) #endif #ifdef CONFIG_CMPXCHG128 DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) + i128, env, i64, i128, i128, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) + i128, env, i64, i128, i128, i32) #endif -DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) -DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) +DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo, TCG_CALL_NO_WG, + i128, env, i64, i128, i128, i32) #ifdef CONFIG_ATOMIC64 #define GEN_ATOMIC_HELPERS(NAME) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \ - TCG_CALL_NO_WG, i64, env, tl, i64, i32) \ + TCG_CALL_NO_WG, i64, env, i64, i64, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \ - TCG_CALL_NO_WG, i64, env, tl, i64, i32) + TCG_CALL_NO_WG, i64, env, i64, i64, i32) #else #define GEN_ATOMIC_HELPERS(NAME) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) + TCG_CALL_NO_WG, i32, env, i64, i32, i32) #endif /* CONFIG_ATOMIC64 */ GEN_ATOMIC_HELPERS(fetch_add) @@ -217,6 +218,7 @@ DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_andcs, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index e64f262a46..e4e2195a8c 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -47,11 +47,12 @@ #include "exec/cputlb.h" #include "exec/translate-all.h" #include "exec/translator.h" +#include "exec/tb-flush.h" #include "qemu/bitmap.h" #include "qemu/qemu-print.h" -#include "qemu/timer.h" #include "qemu/main-loop.h" #include "qemu/cacheinfo.h" +#include "qemu/timer.h" #include "exec/log.h" #include "sysemu/cpus.h" #include "sysemu/cpu-timers.h" @@ -70,6 +71,31 @@ #include "tcg/tcg-internal.h" #include "exec/helper-head.h" +#include "tcg/tcg-temp-internal.h" + +// reintroduce this in QEMU +static TCGv_i64 tcg_const_i64(int64_t val) +{ + TCGv_i64 t0; + t0 = tcg_temp_new_i64(); + tcg_gen_movi_i64(t0, val); + return t0; +} + +#if TARGET_LONG_BITS == 32 +static TCGv_i32 tcg_const_i32(int32_t val) +{ + TCGv_i32 t0; + t0 = tcg_temp_new_i32(); + tcg_gen_movi_i32(t0, val); + return t0; +} + +#define tcg_const_tl tcg_const_i32 +#else +#define tcg_const_tl tcg_const_i64 +#endif + target_ulong libafl_gen_cur_pc; void libafl_helper_table_add(TCGHelperInfo* info); @@ -77,8 +103,8 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block, target_ulong dst_block, int exit_n, target_ulong cs_base, uint32_t flags, int cflags); -void libafl_gen_read(TCGv addr, MemOpIdx oi); -void libafl_gen_write(TCGv addr, MemOpIdx oi); +void libafl_gen_read(TCGTemp *addr, MemOpIdx oi); +void libafl_gen_write(TCGTemp *addr, MemOpIdx oi); void libafl_gen_cmp(target_ulong pc, TCGv op0, TCGv op1, MemOp ot); void libafl_gen_backdoor(target_ulong pc); @@ -298,7 +324,7 @@ void libafl_add_read_hook(uint64_t (*gen)(target_ulong pc, MemOpIdx oi, uint64_t } } -void libafl_gen_read(TCGv addr, MemOpIdx oi) +void libafl_gen_read(TCGTemp *addr, MemOpIdx oi) { size_t size = 1 << (oi & MO_SIZE); @@ -317,11 +343,7 @@ void libafl_gen_read(TCGv addr, MemOpIdx oi) TCGv_i64 tmp0 = tcg_const_i64(cur_id); TCGv_i64 tmp1 = tcg_const_i64(hook->data); TCGTemp *tmp2[3] = { tcgv_i64_temp(tmp0), -#if TARGET_LONG_BITS == 32 - tcgv_i32_temp(addr), -#else - tcgv_i64_temp(addr), -#endif + addr, tcgv_i64_temp(tmp1) }; tcg_gen_callN(func, NULL, 3, tmp2); tcg_temp_free_i64(tmp0); @@ -331,11 +353,10 @@ void libafl_gen_read(TCGv addr, MemOpIdx oi) TCGv tmp1 = tcg_const_tl(size); TCGv_i64 tmp2 = tcg_const_i64(hook->data); TCGTemp *tmp3[4] = { tcgv_i64_temp(tmp0), + addr, #if TARGET_LONG_BITS == 32 - tcgv_i32_temp(addr), tcgv_i32_temp(tmp1), #else - tcgv_i64_temp(addr), tcgv_i64_temp(tmp1), #endif tcgv_i64_temp(tmp2) }; @@ -413,7 +434,7 @@ void libafl_add_write_hook(uint64_t (*gen)(target_ulong pc, MemOpIdx oi, uint64_ } } -void libafl_gen_write(TCGv addr, MemOpIdx oi) +void libafl_gen_write(TCGTemp *addr, MemOpIdx oi) { size_t size = 1 << (oi & MO_SIZE); @@ -432,11 +453,7 @@ void libafl_gen_write(TCGv addr, MemOpIdx oi) TCGv_i64 tmp0 = tcg_const_i64(cur_id); TCGv_i64 tmp1 = tcg_const_i64(hook->data); TCGTemp *tmp2[3] = { tcgv_i64_temp(tmp0), -#if TARGET_LONG_BITS == 32 - tcgv_i32_temp(addr), -#else - tcgv_i64_temp(addr), -#endif + addr, tcgv_i64_temp(tmp1) }; tcg_gen_callN(func, NULL, 3, tmp2); tcg_temp_free_i64(tmp0); @@ -446,11 +463,10 @@ void libafl_gen_write(TCGv addr, MemOpIdx oi) TCGv tmp1 = tcg_const_tl(size); TCGv_i64 tmp2 = tcg_const_i64(hook->data); TCGTemp *tmp3[4] = { tcgv_i64_temp(tmp0), + addr, #if TARGET_LONG_BITS == 32 - tcgv_i32_temp(addr), tcgv_i32_temp(tmp1), #else - tcgv_i64_temp(addr), tcgv_i64_temp(tmp1), #endif tcgv_i64_temp(tmp2) }; @@ -648,9 +664,11 @@ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > TBContext tb_ctx; -/* Encode VAL as a signed leb128 sequence at P. - Return P incremented past the encoded value. */ -static uint8_t *encode_sleb128(uint8_t *p, target_long val) +/* + * Encode VAL as a signed leb128 sequence at P. + * Return P incremented past the encoded value. + */ +static uint8_t *encode_sleb128(uint8_t *p, int64_t val) { int more, byte; @@ -668,21 +686,23 @@ static uint8_t *encode_sleb128(uint8_t *p, target_long val) return p; } -/* Decode a signed leb128 sequence at *PP; increment *PP past the - decoded value. Return the decoded value. */ -static target_long decode_sleb128(const uint8_t **pp) +/* + * Decode a signed leb128 sequence at *PP; increment *PP past the + * decoded value. Return the decoded value. + */ +static int64_t decode_sleb128(const uint8_t **pp) { const uint8_t *p = *pp; - target_long val = 0; + int64_t val = 0; int byte, shift = 0; do { byte = *p++; - val |= (target_ulong)(byte & 0x7f) << shift; + val |= (int64_t)(byte & 0x7f) << shift; shift += 7; } while (byte & 0x80); if (shift < TARGET_LONG_BITS && (byte & 0x40)) { - val |= -(target_ulong)1 << shift; + val |= -(int64_t)1 << shift; } *pp = p; @@ -708,11 +728,11 @@ static int encode_search(TranslationBlock *tb, uint8_t *block) int i, j, n; for (i = 0, n = tb->icount; i < n; ++i) { - target_ulong prev; + uint64_t prev; for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { if (i == 0) { - prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0); + prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0); } else { prev = tcg_ctx->gen_insn_data[i - 1][j]; } @@ -747,8 +767,8 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc, } memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS); - if (!TARGET_TB_PCREL) { - data[0] = tb_pc(tb); + if (!(tb_cflags(tb) & CF_PCREL)) { + data[0] = tb->pc; } /* @@ -879,7 +899,7 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, //// --- End LibAFL code --- - gen_intermediate_code(env_cpu(env), tb, *max_insns, pc, host_pc); + gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc); assert(tb->size != 0); tcg_ctx->cpu = NULL; *max_insns = tb->icount; @@ -1122,9 +1142,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu, gen_code_buf = tcg_ctx->code_gen_ptr; tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); -#if !TARGET_TB_PCREL - tb->pc = pc; -#endif + if (!(cflags & CF_PCREL)) { + tb->pc = pc; + } tb->cs_base = cs_base; tb->flags = flags; tb->cflags = cflags; @@ -1132,6 +1152,13 @@ TranslationBlock *tb_gen_code(CPUState *cpu, tb_set_page_addr0(tb, phys_pc); tb_set_page_addr1(tb, -1); tcg_ctx->gen_tb = tb; + tcg_ctx->addr_type = TCG_TYPE_TL; +#ifdef CONFIG_SOFTMMU + tcg_ctx->page_bits = TARGET_PAGE_BITS; + tcg_ctx->page_mask = TARGET_PAGE_MASK; + tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS; +#endif + tb_overflow: #ifdef CONFIG_PROFILER @@ -1197,8 +1224,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu, } /* - * For TARGET_TB_PCREL, attribute all executions of the generated - * code to its first mapping. + * For CF_PCREL, attribute all executions of the generated code + * to its first mapping. */ perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf)); @@ -1209,7 +1236,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu, qatomic_set(&prof->search_out_len, prof->search_out_len + search_size); #endif -#ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && qemu_log_in_addr_range(pc)) { FILE *logfile = qemu_log_trylock(); @@ -1232,7 +1258,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, /* Dump header and the first instruction */ fprintf(logfile, "OUT: [size=%d]\n", gen_code_size); fprintf(logfile, - " -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", + " -- guest addr 0x%016" PRIx64 " + tb prologue\n", tcg_ctx->gen_insn_data[insn][0]); chunk_start = tcg_ctx->gen_insn_end_off[insn]; disas(logfile, tb->tc.ptr, chunk_start); @@ -1245,7 +1271,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, while (insn < tb->icount) { size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; if (chunk_end > chunk_start) { - fprintf(logfile, " -- guest addr 0x" TARGET_FMT_lx "\n", + fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n", tcg_ctx->gen_insn_data[insn][0]); disas(logfile, tb->tc.ptr + chunk_start, chunk_end - chunk_start); @@ -1282,7 +1308,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu, qemu_log_unlock(logfile); } } -#endif qatomic_set(&tcg_ctx->code_gen_ptr, (void *) ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, @@ -1360,7 +1385,7 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); addr = get_page_addr_code(env, pc); if (addr != -1) { - tb_invalidate_phys_range(addr, addr + 1); + tb_invalidate_phys_range(addr, addr); } } } diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c index 3992499c3d..52255a30a3 100644 --- a/accel/tcg/translator.c +++ b/accel/tcg/translator.c @@ -16,11 +16,35 @@ #include "exec/log.h" #include "exec/translator.h" #include "exec/plugin-gen.h" -#include "sysemu/replay.h" +#include "exec/replay-core.h" //// --- Begin LibAFL code --- #include "tcg/tcg-internal.h" +#include "tcg/tcg-temp-internal.h" + +// reintroduce this in QEMU +static TCGv_i64 tcg_const_i64(int64_t val) +{ + TCGv_i64 t0; + t0 = tcg_temp_new_i64(); + tcg_gen_movi_i64(t0, val); + return t0; +} + +#if TARGET_LONG_BITS == 32 +static TCGv_i32 tcg_const_i32(int32_t val) +{ + TCGv_i32 t0; + t0 = tcg_temp_new_i32(); + tcg_gen_movi_i32(t0, val); + return t0; +} + +#define tcg_const_tl tcg_const_i32 +#else +#define tcg_const_tl tcg_const_i64 +#endif extern target_ulong libafl_gen_cur_pc; @@ -54,19 +78,6 @@ extern struct libafl_backdoor_hook* libafl_backdoor_hooks; //// --- End LibAFL code --- -/* Pairs with tcg_clear_temp_count. - To be called by #TranslatorOps.{translate_insn,tb_stop} if - (1) the target is sufficiently clean to support reporting, - (2) as and when all temporaries are known to be consumed. - For most targets, (2) is at the end of translate_insn. */ -void translator_loop_temp_check(DisasContextBase *db) -{ - if (tcg_check_temp_count()) { - qemu_log("warning: TCG temporary leaks before " - TARGET_FMT_lx "\n", db->pc_next); - } -} - bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) { /* Suppress goto_tb if requested. */ @@ -78,7 +89,7 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0; } -void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns, +void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc, const TranslatorOps *ops, DisasContextBase *db) { @@ -91,7 +102,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns, db->pc_next = pc; db->is_jmp = DISAS_NEXT; db->num_insns = 0; - db->max_insns = max_insns; + db->max_insns = *max_insns; db->singlestep_enabled = cflags & CF_SINGLE_STEP; db->host_addr[0] = host_pc; db->host_addr[1] = NULL; @@ -103,9 +114,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns, ops->init_disas_context(db, cpu); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ - /* Reset the temp count so that we can identify leaks */ - tcg_clear_temp_count(); - /* Start translating. */ gen_tb_start(db->tb); ops->tb_start(db, cpu); @@ -114,7 +122,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns, plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY); while (true) { - db->num_insns++; + *max_insns = ++db->num_insns; ops->insn_start(db, cpu); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ @@ -248,7 +256,6 @@ post_translate_insn: tb->size = db->pc_next - db->pc_first; tb->icount = db->num_insns; -#ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) && qemu_log_in_addr_range(db->pc_first)) { FILE *logfile = qemu_log_trylock(); @@ -259,7 +266,6 @@ post_translate_insn: qemu_log_unlock(logfile); } } -#endif } static void *translator_access(CPUArchState *env, DisasContextBase *db, diff --git a/accel/tcg/user-exec-stub.c b/accel/tcg/user-exec-stub.c index 968cd3ca60..874e1f1a20 100644 --- a/accel/tcg/user-exec-stub.c +++ b/accel/tcg/user-exec-stub.c @@ -1,6 +1,6 @@ #include "qemu/osdep.h" #include "hw/core/cpu.h" -#include "sysemu/replay.h" +#include "exec/replay-core.h" bool enable_cpu_pm = false; diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index ae67d84638..dc8d6b5d40 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -480,24 +480,22 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last, * The flag PAGE_WRITE_ORG is positioned automatically depending * on PAGE_WRITE. The mmap_lock should already be held. */ -void page_set_flags(target_ulong start, target_ulong end, int flags) +void page_set_flags(target_ulong start, target_ulong last, int flags) { - target_ulong last; bool reset = false; bool inval_tb = false; /* This function should never be called with addresses outside the guest address space. If this assert fires, it probably indicates a missing call to h2g_valid. */ - assert(start < end); - assert(end - 1 <= GUEST_ADDR_MAX); + assert(start <= last); + assert(last <= GUEST_ADDR_MAX); /* Only set PAGE_ANON with new mappings. */ assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); assert_memory_lock(); - start = start & TARGET_PAGE_MASK; - end = TARGET_PAGE_ALIGN(end); - last = end - 1; + start &= TARGET_PAGE_MASK; + last |= ~TARGET_PAGE_MASK; if (!(flags & PAGE_VALID)) { flags = 0; @@ -510,7 +508,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) } if (!flags || reset) { - page_reset_target_data(start, end); + page_reset_target_data(start, last); inval_tb |= pageflags_unset(start, last); } if (flags) { @@ -518,7 +516,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) ~(reset ? 0 : PAGE_STICKY)); } if (inval_tb) { - tb_invalidate_phys_range(start, end); + tb_invalidate_phys_range(start, last); } } @@ -761,13 +759,14 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); } -int probe_access_flags(CPUArchState *env, target_ulong addr, +int probe_access_flags(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t ra) { int flags; - flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra); + g_assert(-(addr | TARGET_PAGE_MASK) >= size); + flags = probe_access_internal(env, addr, size, access_type, nonfault, ra); *phost = flags ? NULL : g2h(env_cpu(env), addr); return flags; } @@ -815,15 +814,14 @@ typedef struct TargetPageDataNode { static IntervalTreeRoot targetdata_root; -void page_reset_target_data(target_ulong start, target_ulong end) +void page_reset_target_data(target_ulong start, target_ulong last) { IntervalTreeNode *n, *next; - target_ulong last; assert_memory_lock(); - start = start & TARGET_PAGE_MASK; - last = TARGET_PAGE_ALIGN(end) - 1; + start &= TARGET_PAGE_MASK; + last |= ~TARGET_PAGE_MASK; for (n = interval_tree_iter_first(&targetdata_root, start, last), next = n ? interval_tree_iter_next(n, start, last) : NULL; @@ -886,40 +884,14 @@ void *page_get_target_data(target_ulong address) return t->data[(page - region) >> TARGET_PAGE_BITS]; } #else -void page_reset_target_data(target_ulong start, target_ulong end) { } +void page_reset_target_data(target_ulong start, target_ulong last) { } #endif /* TARGET_PAGE_DATA_SIZE */ /* The softmmu versions of these helpers are in cputlb.c. */ -/* - * Verify that we have passed the correct MemOp to the correct function. - * - * We could present one function to target code, and dispatch based on - * the MemOp, but so far we have worked hard to avoid an indirect function - * call along the memory path. - */ -static void validate_memop(MemOpIdx oi, MemOp expected) +static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra, MMUAccessType type) { -#ifdef CONFIG_DEBUG_TCG - MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); - assert(have == expected); -#endif -} - -void helper_unaligned_ld(CPUArchState *env, target_ulong addr) -{ - cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); -} - -void helper_unaligned_st(CPUArchState *env, target_ulong addr) -{ - cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); -} - -static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t ra, MMUAccessType type) -{ - MemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); void *ret; @@ -933,251 +905,320 @@ static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, return ret; } -uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +#include "ldst_atomicity.c.inc" + +static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra) { void *haddr; uint8_t ret; - validate_memop(oi, MO_UB); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + tcg_debug_assert((mop & MO_SIZE) == MO_8); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); ret = ldub_p(haddr); clear_helper_retaddr(); + return ret; +} + +tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t ra) +{ + return do_ld1_mmu(env, addr, get_memop(oi), ra); +} + +tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t ra) +{ + return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra); +} + +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra) { void *haddr; uint16_t ret; - validate_memop(oi, MO_BEUW); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = lduw_be_p(haddr); + tcg_debug_assert((mop & MO_SIZE) == MO_16); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); + ret = load_atom_2(env, ra, haddr, mop); clear_helper_retaddr(); + + if (mop & MO_BSWAP) { + ret = bswap16(ret); + } + return ret; +} + +tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t ra) +{ + return do_ld2_mmu(env, addr, get_memop(oi), ra); +} + +tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t ra) +{ + return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra); +} + +uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra) { void *haddr; uint32_t ret; - validate_memop(oi, MO_BEUL); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = ldl_be_p(haddr); + tcg_debug_assert((mop & MO_SIZE) == MO_32); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); + ret = load_atom_4(env, ra, haddr, mop); clear_helper_retaddr(); + + if (mop & MO_BSWAP) { + ret = bswap32(ret); + } + return ret; +} + +tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t ra) +{ + return do_ld4_mmu(env, addr, get_memop(oi), ra); +} + +tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t ra) +{ + return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra); +} + +uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra) { void *haddr; uint64_t ret; - validate_memop(oi, MO_BEUQ); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = ldq_be_p(haddr); + tcg_debug_assert((mop & MO_SIZE) == MO_64); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); + ret = load_atom_8(env, ra, haddr, mop); clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + + if (mop & MO_BSWAP) { + ret = bswap64(ret); + } return ret; } -uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, +uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; - uint16_t ret; - - validate_memop(oi, MO_LEUW); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = lduw_le_p(haddr); - clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return ret; + return do_ld8_mmu(env, addr, get_memop(oi), ra); } -uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - void *haddr; - uint32_t ret; - - validate_memop(oi, MO_LEUL); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = ldl_le_p(haddr); - clear_helper_retaddr(); + uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - void *haddr; - uint64_t ret; - - validate_memop(oi, MO_LEUQ); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = ldq_le_p(haddr); - clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return ret; -} - -Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra) { void *haddr; Int128 ret; - validate_memop(oi, MO_128 | MO_BE); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - memcpy(&ret, haddr, 16); + tcg_debug_assert((mop & MO_SIZE) == MO_128); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); + ret = load_atom_16(env, ra, haddr, mop); clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - if (!HOST_BIG_ENDIAN) { + if (mop & MO_BSWAP) { ret = bswap128(ret); } return ret; } -Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, +Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; - Int128 ret; + return do_ld16_mmu(env, addr, get_memop(oi), ra); +} - validate_memop(oi, MO_128 | MO_LE); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - memcpy(&ret, haddr, 16); - clear_helper_retaddr(); +Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi) +{ + return helper_ld16_mmu(env, addr, oi, GETPC()); +} + +Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - - if (HOST_BIG_ENDIAN) { - ret = bswap128(ret); - } return ret; } +static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, + MemOp mop, uintptr_t ra) +{ + void *haddr; + + tcg_debug_assert((mop & MO_SIZE) == MO_8); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); + stb_p(haddr, val); + clear_helper_retaddr(); +} + +void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + do_st1_mmu(env, addr, val, get_memop(oi), ra); +} + void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, MemOpIdx oi, uintptr_t ra) { - void *haddr; - - validate_memop(oi, MO_UB); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stb_p(haddr, val); - clear_helper_retaddr(); + do_st1_mmu(env, addr, val, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, +static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, + MemOp mop, uintptr_t ra) +{ + void *haddr; + + tcg_debug_assert((mop & MO_SIZE) == MO_16); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); + + if (mop & MO_BSWAP) { + val = bswap16(val); + } + store_atom_2(env, ra, haddr, mop, val); + clear_helper_retaddr(); +} + +void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t ra) { - void *haddr; - - validate_memop(oi, MO_BEUW); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stw_be_p(haddr, val); - clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); + do_st2_mmu(env, addr, val, get_memop(oi), ra); } -void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, +void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, MemOpIdx oi, uintptr_t ra) { - void *haddr; - - validate_memop(oi, MO_BEUL); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stl_be_p(haddr, val); - clear_helper_retaddr(); + do_st2_mmu(env, addr, val, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, +static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOp mop, uintptr_t ra) +{ + void *haddr; + + tcg_debug_assert((mop & MO_SIZE) == MO_32); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); + + if (mop & MO_BSWAP) { + val = bswap32(val); + } + store_atom_4(env, ra, haddr, mop, val); + clear_helper_retaddr(); +} + +void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t ra) { - void *haddr; + do_st4_mmu(env, addr, val, get_memop(oi), ra); +} - validate_memop(oi, MO_BEUQ); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stq_be_p(haddr, val); - clear_helper_retaddr(); +void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + do_st4_mmu(env, addr, val, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, +static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOp mop, uintptr_t ra) +{ + void *haddr; + + tcg_debug_assert((mop & MO_SIZE) == MO_64); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); + + if (mop & MO_BSWAP) { + val = bswap64(val); + } + store_atom_8(env, ra, haddr, mop, val); + clear_helper_retaddr(); +} + +void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, MemOpIdx oi, uintptr_t ra) { - void *haddr; - - validate_memop(oi, MO_LEUW); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stw_le_p(haddr, val); - clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); + do_st8_mmu(env, addr, val, get_memop(oi), ra); } -void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, +void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, MemOpIdx oi, uintptr_t ra) { - void *haddr; - - validate_memop(oi, MO_LEUL); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stl_le_p(haddr, val); - clear_helper_retaddr(); + do_st8_mmu(env, addr, val, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, - MemOpIdx oi, uintptr_t ra) +static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, + MemOp mop, uintptr_t ra) { void *haddr; - validate_memop(oi, MO_LEUQ); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stq_le_p(haddr, val); - clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); -} + tcg_debug_assert((mop & MO_SIZE) == MO_128); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); -void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, - Int128 val, MemOpIdx oi, uintptr_t ra) -{ - void *haddr; - - validate_memop(oi, MO_128 | MO_BE); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - if (!HOST_BIG_ENDIAN) { + if (mop & MO_BSWAP) { val = bswap128(val); } - memcpy(haddr, &val, 16); + store_atom_16(env, ra, haddr, mop, val); clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, - Int128 val, MemOpIdx oi, uintptr_t ra) +void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, + MemOpIdx oi, uintptr_t ra) { - void *haddr; + do_st16_mmu(env, addr, val, get_memop(oi), ra); +} - validate_memop(oi, MO_128 | MO_LE); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - if (HOST_BIG_ENDIAN) { - val = bswap128(val); - } - memcpy(haddr, &val, 16); - clear_helper_retaddr(); +void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) +{ + helper_st16_mmu(env, addr, val, oi, GETPC()); +} + +void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, + Int128 val, MemOpIdx oi, uintptr_t ra) +{ + do_st16_mmu(env, addr, val, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } @@ -1221,16 +1262,70 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) return ret; } +uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint8_t ret; + + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); + ret = ldub_p(haddr); + clear_helper_retaddr(); + return ret; +} + +uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint16_t ret; + + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); + ret = lduw_p(haddr); + clear_helper_retaddr(); + if (get_memop(oi) & MO_BSWAP) { + ret = bswap16(ret); + } + return ret; +} + +uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint32_t ret; + + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); + ret = ldl_p(haddr); + clear_helper_retaddr(); + if (get_memop(oi) & MO_BSWAP) { + ret = bswap32(ret); + } + return ret; +} + +uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint64_t ret; + + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldq_p(haddr); + clear_helper_retaddr(); + if (get_memop(oi) & MO_BSWAP) { + ret = bswap64(ret); + } + return ret; +} + #include "ldst_common.c.inc" /* * Do not allow unaligned operations to proceed. Return the host address. - * - * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - MemOpIdx oi, int size, int prot, - uintptr_t retaddr) + MemOpIdx oi, int size, uintptr_t retaddr) { MemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); @@ -1238,8 +1333,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, /* Enforce guest required alignment. */ if (unlikely(addr & ((1 << a_bits) - 1))) { - MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; - cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); + cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr); } /* Enforce qemu required alignment. */ @@ -1277,7 +1371,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, #include "atomic_template.h" #endif -#if HAVE_ATOMIC128 || HAVE_CMPXCHG128 +#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) #define DATA_SIZE 16 #include "atomic_template.h" #endif diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c index 69aa7d018b..5ff0cb8bd9 100644 --- a/accel/xen/xen-all.c +++ b/accel/xen/xen-all.c @@ -12,6 +12,7 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "qapi/error.h" +#include "hw/xen/xen_native.h" #include "hw/xen/xen-legacy-backend.h" #include "hw/xen/xen_pt.h" #include "chardev/char.h" @@ -23,99 +24,18 @@ #include "migration/global_state.h" #include "hw/boards.h" -//#define DEBUG_XEN - -#ifdef DEBUG_XEN -#define DPRINTF(fmt, ...) \ - do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) -#else -#define DPRINTF(fmt, ...) \ - do { } while (0) -#endif - bool xen_allowed; xc_interface *xen_xc; xenforeignmemory_handle *xen_fmem; xendevicemodel_handle *xen_dmod; -static int store_dev_info(int domid, Chardev *cs, const char *string) -{ - struct xs_handle *xs = NULL; - char *path = NULL; - char *newpath = NULL; - char *pts = NULL; - int ret = -1; - - /* Only continue if we're talking to a pty. */ - if (!CHARDEV_IS_PTY(cs)) { - return 0; - } - pts = cs->filename + 4; - - /* We now have everything we need to set the xenstore entry. */ - xs = xs_open(0); - if (xs == NULL) { - fprintf(stderr, "Could not contact XenStore\n"); - goto out; - } - - path = xs_get_domain_path(xs, domid); - if (path == NULL) { - fprintf(stderr, "xs_get_domain_path() error\n"); - goto out; - } - newpath = realloc(path, (strlen(path) + strlen(string) + - strlen("/tty") + 1)); - if (newpath == NULL) { - fprintf(stderr, "realloc error\n"); - goto out; - } - path = newpath; - - strcat(path, string); - strcat(path, "/tty"); - if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) { - fprintf(stderr, "xs_write for '%s' fail", string); - goto out; - } - ret = 0; - -out: - free(path); - xs_close(xs); - - return ret; -} - -void xenstore_store_pv_console_info(int i, Chardev *chr) -{ - if (i == 0) { - store_dev_info(xen_domid, chr, "/console"); - } else { - char buf[32]; - snprintf(buf, sizeof(buf), "/device/console/%d", i); - store_dev_info(xen_domid, chr, buf); - } -} - - -static void xenstore_record_dm_state(struct xs_handle *xs, const char *state) +static void xenstore_record_dm_state(const char *state) { char path[50]; - if (xs == NULL) { - error_report("xenstore connection not initialized"); - exit(1); - } - snprintf(path, sizeof (path), "device-model/%u/state", xen_domid); - /* - * This call may fail when running restricted so don't make it fatal in - * that case. Toolstacks should instead use QMP to listen for state changes. - */ - if (!xs_write(xs, XBT_NULL, path, state, strlen(state)) && - !xen_domid_restrict) { + if (!qemu_xen_xs_write(xenstore, XBT_NULL, path, state, strlen(state))) { error_report("error recording dm state"); exit(1); } @@ -127,7 +47,7 @@ static void xen_change_state_handler(void *opaque, bool running, { if (running) { /* record state running */ - xenstore_record_dm_state(xenstore, "running"); + xenstore_record_dm_state("running"); } } @@ -176,11 +96,21 @@ static int xen_init(MachineState *ms) xc_interface_close(xen_xc); return -1; } - qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); + + /* + * The XenStore write would fail when running restricted so don't attempt + * it in that case. Toolstacks should instead use QMP to listen for state + * changes. + */ + if (!xen_domid_restrict) { + qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); + } /* * opt out of system RAM being allocated by generic code */ mc->default_ram_id = NULL; + + xen_mode = XEN_ATTACH; return 0; } diff --git a/audio/alsaaudio.c b/audio/alsaaudio.c index 714bfb6453..057571dd1e 100644 --- a/audio/alsaaudio.c +++ b/audio/alsaaudio.c @@ -222,11 +222,7 @@ static int alsa_poll_helper (snd_pcm_t *handle, struct pollhlp *hlp, int mask) return -1; } - pfds = audio_calloc ("alsa_poll_helper", count, sizeof (*pfds)); - if (!pfds) { - dolog ("Could not initialize poll mode\n"); - return -1; - } + pfds = g_new0(struct pollfd, count); err = snd_pcm_poll_descriptors (handle, pfds, count); if (err < 0) { @@ -917,28 +913,23 @@ static void *alsa_audio_init(Audiodev *dev) alsa_init_per_direction(aopts->in); alsa_init_per_direction(aopts->out); - /* - * need to define them, as otherwise alsa produces no sound - * doesn't set has_* so alsa_open can identify it wasn't set by the user - */ + /* don't set has_* so alsa_open can identify it wasn't set by the user */ if (!dev->u.alsa.out->has_period_length) { - /* 1024 frames assuming 44100Hz */ - dev->u.alsa.out->period_length = 1024 * 1000000 / 44100; + /* 256 frames assuming 44100Hz */ + dev->u.alsa.out->period_length = 5805; } if (!dev->u.alsa.out->has_buffer_length) { /* 4096 frames assuming 44100Hz */ - dev->u.alsa.out->buffer_length = 4096ll * 1000000 / 44100; + dev->u.alsa.out->buffer_length = 92880; } - /* - * OptsVisitor sets unspecified optional fields to zero, but do not depend - * on it... - */ if (!dev->u.alsa.in->has_period_length) { - dev->u.alsa.in->period_length = 0; + /* 256 frames assuming 44100Hz */ + dev->u.alsa.in->period_length = 5805; } if (!dev->u.alsa.in->has_buffer_length) { - dev->u.alsa.in->buffer_length = 0; + /* 4096 frames assuming 44100Hz */ + dev->u.alsa.in->buffer_length = 92880; } return dev; diff --git a/audio/audio.c b/audio/audio.c index 4290309d18..90c7c49d11 100644 --- a/audio/audio.c +++ b/audio/audio.c @@ -33,6 +33,7 @@ #include "qapi/qapi-visit-audio.h" #include "qapi/qapi-commands-audio.h" #include "qemu/cutils.h" +#include "qemu/log.h" #include "qemu/module.h" #include "qemu/help_option.h" #include "sysemu/sysemu.h" @@ -148,26 +149,6 @@ static inline int audio_bits_to_index (int bits) } } -void *audio_calloc (const char *funcname, int nmemb, size_t size) -{ - int cond; - size_t len; - - len = nmemb * size; - cond = !nmemb || !size; - cond |= nmemb < 0; - cond |= len < size; - - if (audio_bug ("audio_calloc", cond)) { - AUD_log (NULL, "%s passed invalid arguments to audio_calloc\n", - funcname); - AUD_log (NULL, "nmemb=%d size=%zu (len=%zu)\n", nmemb, size, len); - return NULL; - } - - return g_malloc0 (len); -} - void AUD_vlog (const char *cap, const char *fmt, va_list ap) { if (cap) { @@ -400,13 +381,6 @@ void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len) /* * Capture */ -static void noop_conv (struct st_sample *dst, const void *src, int samples) -{ - (void) src; - (void) dst; - (void) samples; -} - static CaptureVoiceOut *audio_pcm_capture_find_specific(AudioState *s, struct audsettings *as) { @@ -504,15 +478,8 @@ static int audio_attach_capture (HWVoiceOut *hw) sw->info = hw->info; sw->empty = 1; sw->active = hw->enabled; - sw->conv = noop_conv; - sw->ratio = ((int64_t) hw_cap->info.freq << 32) / sw->info.freq; sw->vol = nominal_volume; sw->rate = st_rate_start (sw->info.freq, hw_cap->info.freq); - if (!sw->rate) { - dolog ("Could not start rate conversion for `%s'\n", SW_NAME (sw)); - g_free (sw); - return -1; - } QLIST_INSERT_HEAD (&hw_cap->sw_head, sw, entries); QLIST_INSERT_HEAD (&hw->cap_head, sc, entries); #ifdef DEBUG_CAPTURE @@ -547,8 +514,8 @@ static size_t audio_pcm_hw_find_min_in (HWVoiceIn *hw) static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw) { size_t live = hw->total_samples_captured - audio_pcm_hw_find_min_in (hw); - if (audio_bug(__func__, live > hw->conv_buf->size)) { - dolog("live=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size); + if (audio_bug(__func__, live > hw->conv_buf.size)) { + dolog("live=%zu hw->conv_buf.size=%zu\n", live, hw->conv_buf.size); return 0; } return live; @@ -557,13 +524,13 @@ static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw) static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples) { size_t conv = 0; - STSampleBuffer *conv_buf = hw->conv_buf; + STSampleBuffer *conv_buf = &hw->conv_buf; while (samples) { uint8_t *src = advance(pcm_buf, conv * hw->info.bytes_per_frame); size_t proc = MIN(samples, conv_buf->size - conv_buf->pos); - hw->conv(conv_buf->samples + conv_buf->pos, src, proc); + hw->conv(conv_buf->buffer + conv_buf->pos, src, proc); conv_buf->pos = (conv_buf->pos + proc) % conv_buf->size; samples -= proc; conv += proc; @@ -575,56 +542,65 @@ static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples) /* * Soft voice (capture) */ -static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t size) +static void audio_pcm_sw_resample_in(SWVoiceIn *sw, + size_t frames_in_max, size_t frames_out_max, + size_t *total_in, size_t *total_out) { HWVoiceIn *hw = sw->hw; - size_t samples, live, ret = 0, swlim, isamp, osamp, rpos, total = 0; - struct st_sample *src, *dst = sw->buf; + struct st_sample *src, *dst; + size_t live, rpos, frames_in, frames_out; + + live = hw->total_samples_captured - sw->total_hw_samples_acquired; + rpos = audio_ring_posb(hw->conv_buf.pos, live, hw->conv_buf.size); + + /* resample conv_buf from rpos to end of buffer */ + src = hw->conv_buf.buffer + rpos; + frames_in = MIN(frames_in_max, hw->conv_buf.size - rpos); + dst = sw->resample_buf.buffer; + frames_out = frames_out_max; + st_rate_flow(sw->rate, src, dst, &frames_in, &frames_out); + rpos += frames_in; + *total_in = frames_in; + *total_out = frames_out; + + /* resample conv_buf from start of buffer if there are input frames left */ + if (frames_in_max - frames_in && rpos == hw->conv_buf.size) { + src = hw->conv_buf.buffer; + frames_in = frames_in_max - frames_in; + dst += frames_out; + frames_out = frames_out_max - frames_out; + st_rate_flow(sw->rate, src, dst, &frames_in, &frames_out); + *total_in += frames_in; + *total_out += frames_out; + } +} + +static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t buf_len) +{ + HWVoiceIn *hw = sw->hw; + size_t live, frames_out_max, total_in, total_out; live = hw->total_samples_captured - sw->total_hw_samples_acquired; if (!live) { return 0; } - if (audio_bug(__func__, live > hw->conv_buf->size)) { - dolog("live_in=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size); + if (audio_bug(__func__, live > hw->conv_buf.size)) { + dolog("live_in=%zu hw->conv_buf.size=%zu\n", live, hw->conv_buf.size); return 0; } - rpos = audio_ring_posb(hw->conv_buf->pos, live, hw->conv_buf->size); + frames_out_max = MIN(buf_len / sw->info.bytes_per_frame, + sw->resample_buf.size); - samples = size / sw->info.bytes_per_frame; - - swlim = (live * sw->ratio) >> 32; - swlim = MIN (swlim, samples); - - while (swlim) { - src = hw->conv_buf->samples + rpos; - if (hw->conv_buf->pos > rpos) { - isamp = hw->conv_buf->pos - rpos; - } else { - isamp = hw->conv_buf->size - rpos; - } - - if (!isamp) { - break; - } - osamp = swlim; - - st_rate_flow (sw->rate, src, dst, &isamp, &osamp); - swlim -= osamp; - rpos = (rpos + isamp) % hw->conv_buf->size; - dst += osamp; - ret += osamp; - total += isamp; - } + audio_pcm_sw_resample_in(sw, live, frames_out_max, &total_in, &total_out); if (!hw->pcm_ops->volume_in) { - mixeng_volume (sw->buf, ret, &sw->vol); + mixeng_volume(sw->resample_buf.buffer, total_out, &sw->vol); } + sw->clip(buf, sw->resample_buf.buffer, total_out); - sw->clip (buf, sw->buf, ret); - sw->total_hw_samples_acquired += total; - return ret * sw->info.bytes_per_frame; + sw->total_hw_samples_acquired += total_in; + return total_out * sw->info.bytes_per_frame; } /* @@ -660,8 +636,8 @@ static size_t audio_pcm_hw_get_live_out (HWVoiceOut *hw, int *nb_live) if (nb_live1) { size_t live = smin; - if (audio_bug(__func__, live > hw->mix_buf->size)) { - dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size); + if (audio_bug(__func__, live > hw->mix_buf.size)) { + dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size); return 0; } return live; @@ -678,17 +654,17 @@ static size_t audio_pcm_hw_get_free(HWVoiceOut *hw) static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len) { size_t clipped = 0; - size_t pos = hw->mix_buf->pos; + size_t pos = hw->mix_buf.pos; while (len) { - st_sample *src = hw->mix_buf->samples + pos; + st_sample *src = hw->mix_buf.buffer + pos; uint8_t *dst = advance(pcm_buf, clipped * hw->info.bytes_per_frame); - size_t samples_till_end_of_buf = hw->mix_buf->size - pos; + size_t samples_till_end_of_buf = hw->mix_buf.size - pos; size_t samples_to_clip = MIN(len, samples_till_end_of_buf); hw->clip(dst, src, samples_to_clip); - pos = (pos + samples_to_clip) % hw->mix_buf->size; + pos = (pos + samples_to_clip) % hw->mix_buf.size; len -= samples_to_clip; clipped += samples_to_clip; } @@ -697,84 +673,113 @@ static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len) /* * Soft voice (playback) */ -static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size) +static void audio_pcm_sw_resample_out(SWVoiceOut *sw, + size_t frames_in_max, size_t frames_out_max, + size_t *total_in, size_t *total_out) { - size_t hwsamples, samples, isamp, osamp, wpos, live, dead, left, blck; - size_t hw_free; - size_t ret = 0, pos = 0, total = 0; - - if (!sw) { - return size; - } - - hwsamples = sw->hw->mix_buf->size; + HWVoiceOut *hw = sw->hw; + struct st_sample *src, *dst; + size_t live, wpos, frames_in, frames_out; live = sw->total_hw_samples_mixed; - if (audio_bug(__func__, live > hwsamples)) { - dolog("live=%zu hw->mix_buf->size=%zu\n", live, hwsamples); + wpos = (hw->mix_buf.pos + live) % hw->mix_buf.size; + + /* write to mix_buf from wpos to end of buffer */ + src = sw->resample_buf.buffer; + frames_in = frames_in_max; + dst = hw->mix_buf.buffer + wpos; + frames_out = MIN(frames_out_max, hw->mix_buf.size - wpos); + st_rate_flow_mix(sw->rate, src, dst, &frames_in, &frames_out); + wpos += frames_out; + *total_in = frames_in; + *total_out = frames_out; + + /* write to mix_buf from start of buffer if there are input frames left */ + if (frames_in_max - frames_in > 0 && wpos == hw->mix_buf.size) { + src += frames_in; + frames_in = frames_in_max - frames_in; + dst = hw->mix_buf.buffer; + frames_out = frames_out_max - frames_out; + st_rate_flow_mix(sw->rate, src, dst, &frames_in, &frames_out); + *total_in += frames_in; + *total_out += frames_out; + } +} + +static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t buf_len) +{ + HWVoiceOut *hw = sw->hw; + size_t live, dead, hw_free, sw_max, fe_max; + size_t frames_in_max, frames_out_max, total_in, total_out; + + live = sw->total_hw_samples_mixed; + if (audio_bug(__func__, live > hw->mix_buf.size)) { + dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size); return 0; } - if (live == hwsamples) { + if (live == hw->mix_buf.size) { #ifdef DEBUG_OUT dolog ("%s is full %zu\n", sw->name, live); #endif return 0; } - wpos = (sw->hw->mix_buf->pos + live) % hwsamples; - - dead = hwsamples - live; - hw_free = audio_pcm_hw_get_free(sw->hw); + dead = hw->mix_buf.size - live; + hw_free = audio_pcm_hw_get_free(hw); hw_free = hw_free > live ? hw_free - live : 0; - samples = ((int64_t)MIN(dead, hw_free) << 32) / sw->ratio; - samples = MIN(samples, size / sw->info.bytes_per_frame); - if (samples) { - sw->conv(sw->buf, buf, samples); + frames_out_max = MIN(dead, hw_free); + sw_max = st_rate_frames_in(sw->rate, frames_out_max); + fe_max = MIN(buf_len / sw->info.bytes_per_frame + sw->resample_buf.pos, + sw->resample_buf.size); + frames_in_max = MIN(sw_max, fe_max); + if (!frames_in_max) { + return 0; + } + + if (frames_in_max > sw->resample_buf.pos) { + sw->conv(sw->resample_buf.buffer + sw->resample_buf.pos, + buf, frames_in_max - sw->resample_buf.pos); if (!sw->hw->pcm_ops->volume_out) { - mixeng_volume(sw->buf, samples, &sw->vol); + mixeng_volume(sw->resample_buf.buffer + sw->resample_buf.pos, + frames_in_max - sw->resample_buf.pos, &sw->vol); } } - while (samples) { - dead = hwsamples - live; - left = hwsamples - wpos; - blck = MIN (dead, left); - if (!blck) { - break; - } - isamp = samples; - osamp = blck; - st_rate_flow_mix ( - sw->rate, - sw->buf + pos, - sw->hw->mix_buf->samples + wpos, - &isamp, - &osamp - ); - ret += isamp; - samples -= isamp; - pos += isamp; - live += osamp; - wpos = (wpos + osamp) % hwsamples; - total += osamp; - } + audio_pcm_sw_resample_out(sw, frames_in_max, frames_out_max, + &total_in, &total_out); - sw->total_hw_samples_mixed += total; + sw->total_hw_samples_mixed += total_out; sw->empty = sw->total_hw_samples_mixed == 0; + /* + * Upsampling may leave one audio frame in the resample buffer. Decrement + * total_in by one if there was a leftover frame from the previous resample + * pass in the resample buffer. Increment total_in by one if the current + * resample pass left one frame in the resample buffer. + */ + if (frames_in_max - total_in == 1) { + /* copy one leftover audio frame to the beginning of the buffer */ + *sw->resample_buf.buffer = *(sw->resample_buf.buffer + total_in); + total_in += 1 - sw->resample_buf.pos; + sw->resample_buf.pos = 1; + } else if (total_in >= sw->resample_buf.pos) { + total_in -= sw->resample_buf.pos; + sw->resample_buf.pos = 0; + } + #ifdef DEBUG_OUT dolog ( - "%s: write size %zu ret %zu total sw %zu\n", - SW_NAME (sw), - size / sw->info.bytes_per_frame, - ret, + "%s: write size %zu written %zu total mixed %zu\n", + SW_NAME(sw), + buf_len / sw->info.bytes_per_frame, + total_in, sw->total_hw_samples_mixed ); #endif - return ret * sw->info.bytes_per_frame; + return total_in * sw->info.bytes_per_frame; } #ifdef DEBUG_AUDIO @@ -992,18 +997,6 @@ void AUD_set_active_in (SWVoiceIn *sw, int on) } } -/** - * audio_frontend_frames_in() - returns the number of frames the resampling - * code generates from frames_in frames - * - * @sw: audio recording frontend - * @frames_in: number of frames - */ -static size_t audio_frontend_frames_in(SWVoiceIn *sw, size_t frames_in) -{ - return (int64_t)frames_in * sw->ratio >> 32; -} - static size_t audio_get_avail (SWVoiceIn *sw) { size_t live; @@ -1013,33 +1006,21 @@ static size_t audio_get_avail (SWVoiceIn *sw) } live = sw->hw->total_samples_captured - sw->total_hw_samples_acquired; - if (audio_bug(__func__, live > sw->hw->conv_buf->size)) { - dolog("live=%zu sw->hw->conv_buf->size=%zu\n", live, - sw->hw->conv_buf->size); + if (audio_bug(__func__, live > sw->hw->conv_buf.size)) { + dolog("live=%zu sw->hw->conv_buf.size=%zu\n", live, + sw->hw->conv_buf.size); return 0; } ldebug ( - "%s: get_avail live %zu frontend frames %zu\n", + "%s: get_avail live %zu frontend frames %u\n", SW_NAME (sw), - live, audio_frontend_frames_in(sw, live) + live, st_rate_frames_out(sw->rate, live) ); return live; } -/** - * audio_frontend_frames_out() - returns the number of frames needed to - * get frames_out frames after resampling - * - * @sw: audio playback frontend - * @frames_out: number of frames - */ -static size_t audio_frontend_frames_out(SWVoiceOut *sw, size_t frames_out) -{ - return ((int64_t)frames_out << 32) / sw->ratio; -} - static size_t audio_get_free(SWVoiceOut *sw) { size_t live, dead; @@ -1050,17 +1031,17 @@ static size_t audio_get_free(SWVoiceOut *sw) live = sw->total_hw_samples_mixed; - if (audio_bug(__func__, live > sw->hw->mix_buf->size)) { - dolog("live=%zu sw->hw->mix_buf->size=%zu\n", live, - sw->hw->mix_buf->size); + if (audio_bug(__func__, live > sw->hw->mix_buf.size)) { + dolog("live=%zu sw->hw->mix_buf.size=%zu\n", live, + sw->hw->mix_buf.size); return 0; } - dead = sw->hw->mix_buf->size - live; + dead = sw->hw->mix_buf.size - live; #ifdef DEBUG_OUT - dolog("%s: get_free live %zu dead %zu frontend frames %zu\n", - SW_NAME(sw), live, dead, audio_frontend_frames_out(sw, dead)); + dolog("%s: get_free live %zu dead %zu frontend frames %u\n", + SW_NAME(sw), live, dead, st_rate_frames_in(sw->rate, dead)); #endif return dead; @@ -1076,32 +1057,40 @@ static void audio_capture_mix_and_clear(HWVoiceOut *hw, size_t rpos, for (sc = hw->cap_head.lh_first; sc; sc = sc->entries.le_next) { SWVoiceOut *sw = &sc->sw; - int rpos2 = rpos; + size_t rpos2 = rpos; n = samples; while (n) { - size_t till_end_of_hw = hw->mix_buf->size - rpos2; - size_t to_write = MIN(till_end_of_hw, n); - size_t bytes = to_write * hw->info.bytes_per_frame; - size_t written; + size_t till_end_of_hw = hw->mix_buf.size - rpos2; + size_t to_read = MIN(till_end_of_hw, n); + size_t live, frames_in, frames_out; - sw->buf = hw->mix_buf->samples + rpos2; - written = audio_pcm_sw_write (sw, NULL, bytes); - if (written - bytes) { - dolog("Could not mix %zu bytes into a capture " + sw->resample_buf.buffer = hw->mix_buf.buffer + rpos2; + sw->resample_buf.size = to_read; + live = sw->total_hw_samples_mixed; + + audio_pcm_sw_resample_out(sw, + to_read, sw->hw->mix_buf.size - live, + &frames_in, &frames_out); + + sw->total_hw_samples_mixed += frames_out; + sw->empty = sw->total_hw_samples_mixed == 0; + + if (to_read - frames_in) { + dolog("Could not mix %zu frames into a capture " "buffer, mixed %zu\n", - bytes, written); + to_read, frames_in); break; } - n -= to_write; - rpos2 = (rpos2 + to_write) % hw->mix_buf->size; + n -= to_read; + rpos2 = (rpos2 + to_read) % hw->mix_buf.size; } } } - n = MIN(samples, hw->mix_buf->size - rpos); - mixeng_clear(hw->mix_buf->samples + rpos, n); - mixeng_clear(hw->mix_buf->samples, samples - n); + n = MIN(samples, hw->mix_buf.size - rpos); + mixeng_clear(hw->mix_buf.buffer + rpos, n); + mixeng_clear(hw->mix_buf.buffer, samples - n); } static size_t audio_pcm_hw_run_out(HWVoiceOut *hw, size_t live) @@ -1127,7 +1116,7 @@ static size_t audio_pcm_hw_run_out(HWVoiceOut *hw, size_t live) live -= proc; clipped += proc; - hw->mix_buf->pos = (hw->mix_buf->pos + proc) % hw->mix_buf->size; + hw->mix_buf.pos = (hw->mix_buf.pos + proc) % hw->mix_buf.size; if (proc == 0 || proc < decr) { break; @@ -1181,12 +1170,14 @@ static void audio_run_out (AudioState *s) size_t free; if (hw_free > sw->total_hw_samples_mixed) { - free = audio_frontend_frames_out(sw, + free = st_rate_frames_in(sw->rate, MIN(sw_free, hw_free - sw->total_hw_samples_mixed)); } else { free = 0; } - if (free > 0) { + if (free > sw->resample_buf.pos) { + free = MIN(free, sw->resample_buf.size) + - sw->resample_buf.pos; sw->callback.fn(sw->callback.opaque, free * sw->info.bytes_per_frame); } @@ -1198,8 +1189,8 @@ static void audio_run_out (AudioState *s) live = 0; } - if (audio_bug(__func__, live > hw->mix_buf->size)) { - dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size); + if (audio_bug(__func__, live > hw->mix_buf.size)) { + dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size); continue; } @@ -1227,13 +1218,13 @@ static void audio_run_out (AudioState *s) continue; } - prev_rpos = hw->mix_buf->pos; + prev_rpos = hw->mix_buf.pos; played = audio_pcm_hw_run_out(hw, live); replay_audio_out(&played); - if (audio_bug(__func__, hw->mix_buf->pos >= hw->mix_buf->size)) { - dolog("hw->mix_buf->pos=%zu hw->mix_buf->size=%zu played=%zu\n", - hw->mix_buf->pos, hw->mix_buf->size, played); - hw->mix_buf->pos = 0; + if (audio_bug(__func__, hw->mix_buf.pos >= hw->mix_buf.size)) { + dolog("hw->mix_buf.pos=%zu hw->mix_buf.size=%zu played=%zu\n", + hw->mix_buf.pos, hw->mix_buf.size, played); + hw->mix_buf.pos = 0; } #ifdef DEBUG_OUT @@ -1314,10 +1305,10 @@ static void audio_run_in (AudioState *s) if (replay_mode != REPLAY_MODE_PLAY) { captured = audio_pcm_hw_run_in( - hw, hw->conv_buf->size - audio_pcm_hw_get_live_in(hw)); + hw, hw->conv_buf.size - audio_pcm_hw_get_live_in(hw)); } - replay_audio_in(&captured, hw->conv_buf->samples, &hw->conv_buf->pos, - hw->conv_buf->size); + replay_audio_in(&captured, hw->conv_buf.buffer, &hw->conv_buf.pos, + hw->conv_buf.size); min = audio_pcm_hw_find_min_in (hw); hw->total_samples_captured += captured - min; @@ -1330,8 +1321,9 @@ static void audio_run_in (AudioState *s) size_t sw_avail = audio_get_avail(sw); size_t avail; - avail = audio_frontend_frames_in(sw, sw_avail); + avail = st_rate_frames_out(sw->rate, sw_avail); if (avail > 0) { + avail = MIN(avail, sw->resample_buf.size); sw->callback.fn(sw->callback.opaque, avail * sw->info.bytes_per_frame); } @@ -1350,14 +1342,14 @@ static void audio_run_capture (AudioState *s) SWVoiceOut *sw; captured = live = audio_pcm_hw_get_live_out (hw, NULL); - rpos = hw->mix_buf->pos; + rpos = hw->mix_buf.pos; while (live) { - size_t left = hw->mix_buf->size - rpos; + size_t left = hw->mix_buf.size - rpos; size_t to_capture = MIN(live, left); struct st_sample *src; struct capture_callback *cb; - src = hw->mix_buf->samples + rpos; + src = hw->mix_buf.buffer + rpos; hw->clip (cap->buf, src, to_capture); mixeng_clear (src, to_capture); @@ -1365,10 +1357,10 @@ static void audio_run_capture (AudioState *s) cb->ops.capture (cb->opaque, cap->buf, to_capture * hw->info.bytes_per_frame); } - rpos = (rpos + to_capture) % hw->mix_buf->size; + rpos = (rpos + to_capture) % hw->mix_buf.size; live -= to_capture; } - hw->mix_buf->pos = rpos; + hw->mix_buf.pos = rpos; for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { if (!sw->active && sw->empty) { @@ -1927,7 +1919,7 @@ CaptureVoiceOut *AUD_add_capture( audio_pcm_init_info (&hw->info, as); - cap->buf = g_malloc0_n(hw->mix_buf->size, hw->info.bytes_per_frame); + cap->buf = g_malloc0_n(hw->mix_buf.size, hw->info.bytes_per_frame); if (hw->info.is_float) { hw->clip = mixeng_clip_float[hw->info.nchannels == 2]; @@ -1979,7 +1971,7 @@ void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque) sw = sw1; } QLIST_REMOVE (cap, entries); - g_free (cap->hw.mix_buf); + g_free(cap->hw.mix_buf.buffer); g_free (cap->buf); g_free (cap); } @@ -2069,6 +2061,9 @@ void audio_create_pdos(Audiodev *dev) #ifdef CONFIG_AUDIO_PA CASE(PA, pa, Pa); #endif +#ifdef CONFIG_AUDIO_PIPEWIRE + CASE(PIPEWIRE, pipewire, Pipewire); +#endif #ifdef CONFIG_AUDIO_SDL CASE(SDL, sdl, Sdl); #endif diff --git a/audio/audio_int.h b/audio/audio_int.h index e87ce014a0..e57ff50155 100644 --- a/audio/audio_int.h +++ b/audio/audio_int.h @@ -58,7 +58,7 @@ typedef struct SWVoiceCap SWVoiceCap; typedef struct STSampleBuffer { size_t pos, size; - st_sample samples[]; + st_sample *buffer; } STSampleBuffer; typedef struct HWVoiceOut { @@ -71,7 +71,7 @@ typedef struct HWVoiceOut { f_sample *clip; uint64_t ts_helper; - STSampleBuffer *mix_buf; + STSampleBuffer mix_buf; void *buf_emul; size_t pos_emul, pending_emul, size_emul; @@ -93,7 +93,7 @@ typedef struct HWVoiceIn { size_t total_samples_captured; uint64_t ts_helper; - STSampleBuffer *conv_buf; + STSampleBuffer conv_buf; void *buf_emul; size_t pos_emul, pending_emul, size_emul; @@ -108,8 +108,7 @@ struct SWVoiceOut { AudioState *s; struct audio_pcm_info info; t_sample *conv; - int64_t ratio; - struct st_sample *buf; + STSampleBuffer resample_buf; void *rate; size_t total_hw_samples_mixed; int active; @@ -126,10 +125,9 @@ struct SWVoiceIn { AudioState *s; int active; struct audio_pcm_info info; - int64_t ratio; void *rate; size_t total_hw_samples_acquired; - struct st_sample *buf; + STSampleBuffer resample_buf; f_sample *clip; HWVoiceIn *hw; char *name; @@ -145,14 +143,14 @@ struct audio_driver { void *(*init) (Audiodev *); void (*fini) (void *); #ifdef CONFIG_GIO - void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager); + void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager, bool p2p); #endif struct audio_pcm_ops *pcm_ops; int can_be_default; int max_voices_out; int max_voices_in; - int voice_size_out; - int voice_size_in; + size_t voice_size_out; + size_t voice_size_in; QLIST_ENTRY(audio_driver) next; }; @@ -251,7 +249,6 @@ void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as); void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len); int audio_bug (const char *funcname, int cond); -void *audio_calloc (const char *funcname, int nmemb, size_t size); void audio_run(AudioState *s, const char *msg); @@ -294,9 +291,6 @@ static inline size_t audio_ring_posb(size_t pos, size_t dist, size_t len) #define ldebug(fmt, ...) (void)0 #endif -#define AUDIO_STRINGIFY_(n) #n -#define AUDIO_STRINGIFY(n) AUDIO_STRINGIFY_(n) - typedef struct AudiodevListEntry { Audiodev *dev; QSIMPLEQ_ENTRY(AudiodevListEntry) next; diff --git a/audio/audio_template.h b/audio/audio_template.h index 42b4712acb..dc0c74aa74 100644 --- a/audio/audio_template.h +++ b/audio/audio_template.h @@ -40,7 +40,7 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s, struct audio_driver *drv) { int max_voices = glue (drv->max_voices_, TYPE); - int voice_size = glue (drv->voice_size_, TYPE); + size_t voice_size = glue(drv->voice_size_, TYPE); if (glue (s->nb_hw_voices_, TYPE) > max_voices) { if (!max_voices) { @@ -63,16 +63,17 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s, } if (audio_bug(__func__, voice_size && !max_voices)) { - dolog ("drv=`%s' voice_size=%d max_voices=0\n", - drv->name, voice_size); + dolog("drv=`%s' voice_size=%zu max_voices=0\n", + drv->name, voice_size); } } static void glue (audio_pcm_hw_free_resources_, TYPE) (HW *hw) { g_free(hw->buf_emul); - g_free (HWBUF); - HWBUF = NULL; + g_free(HWBUF.buffer); + HWBUF.buffer = NULL; + HWBUF.size = 0; } static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw) @@ -83,56 +84,67 @@ static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw) dolog("Attempted to allocate empty buffer\n"); } - HWBUF = g_malloc0(sizeof(STSampleBuffer) + sizeof(st_sample) * samples); - HWBUF->size = samples; + HWBUF.buffer = g_new0(st_sample, samples); + HWBUF.size = samples; + HWBUF.pos = 0; } else { - HWBUF = NULL; + HWBUF.buffer = NULL; + HWBUF.size = 0; } } static void glue (audio_pcm_sw_free_resources_, TYPE) (SW *sw) { - g_free (sw->buf); + g_free(sw->resample_buf.buffer); + sw->resample_buf.buffer = NULL; + sw->resample_buf.size = 0; if (sw->rate) { st_rate_stop (sw->rate); } - - sw->buf = NULL; sw->rate = NULL; } static int glue (audio_pcm_sw_alloc_resources_, TYPE) (SW *sw) { - int samples; + HW *hw = sw->hw; + uint64_t samples; if (!glue(audio_get_pdo_, TYPE)(sw->s->dev)->mixing_engine) { return 0; } -#ifdef DAC - samples = ((int64_t) sw->HWBUF->size << 32) / sw->ratio; -#else - samples = (int64_t)sw->HWBUF->size * sw->ratio >> 32; -#endif + samples = muldiv64(HWBUF.size, sw->info.freq, hw->info.freq); + if (samples == 0) { + uint64_t f_fe_min; + uint64_t f_be = (uint32_t)hw->info.freq; - sw->buf = audio_calloc(__func__, samples, sizeof(struct st_sample)); - if (!sw->buf) { - dolog ("Could not allocate buffer for `%s' (%d samples)\n", - SW_NAME (sw), samples); + /* f_fe_min = ceil(1 [frames] * f_be [Hz] / size_be [frames]) */ + f_fe_min = (f_be + HWBUF.size - 1) / HWBUF.size; + qemu_log_mask(LOG_UNIMP, + AUDIO_CAP ": The guest selected a " NAME " sample rate" + " of %d Hz for %s. Only sample rates >= %" PRIu64 " Hz" + " are supported.\n", + sw->info.freq, sw->name, f_fe_min); return -1; } + /* + * Allocate one additional audio frame that is needed for upsampling + * if the resample buffer size is small. For large buffer sizes take + * care of overflows and truncation. + */ + samples = samples < SIZE_MAX ? samples + 1 : SIZE_MAX; + sw->resample_buf.buffer = g_new0(st_sample, samples); + sw->resample_buf.size = samples; + sw->resample_buf.pos = 0; + #ifdef DAC - sw->rate = st_rate_start (sw->info.freq, sw->hw->info.freq); + sw->rate = st_rate_start(sw->info.freq, hw->info.freq); #else - sw->rate = st_rate_start (sw->hw->info.freq, sw->info.freq); + sw->rate = st_rate_start(hw->info.freq, sw->info.freq); #endif - if (!sw->rate) { - g_free (sw->buf); - sw->buf = NULL; - return -1; - } + return 0; } @@ -149,11 +161,8 @@ static int glue (audio_pcm_sw_init_, TYPE) ( sw->hw = hw; sw->active = 0; #ifdef DAC - sw->ratio = ((int64_t) sw->hw->info.freq << 32) / sw->info.freq; sw->total_hw_samples_mixed = 0; sw->empty = 1; -#else - sw->ratio = ((int64_t) sw->info.freq << 32) / sw->hw->info.freq; #endif if (sw->info.is_float) { @@ -264,13 +273,11 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s, return NULL; } - hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE)); - if (!hw) { - dolog ("Can not allocate voice `%s' size %d\n", - drv->name, glue (drv->voice_size_, TYPE)); - return NULL; - } - + /* + * Since glue(s->nb_hw_voices_, TYPE) is != 0, glue(drv->voice_size_, TYPE) + * is guaranteed to be != 0. See the audio_init_nb_voices_* functions. + */ + hw = g_malloc0(glue(drv->voice_size_, TYPE)); hw->s = s; hw->pcm_ops = drv->pcm_ops; @@ -355,6 +362,10 @@ AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev) case AUDIODEV_DRIVER_PA: return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE); #endif +#ifdef CONFIG_AUDIO_PIPEWIRE + case AUDIODEV_DRIVER_PIPEWIRE: + return qapi_AudiodevPipewirePerDirectionOptions_base(dev->u.pipewire.TYPE); +#endif #ifdef CONFIG_AUDIO_SDL case AUDIODEV_DRIVER_SDL: return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE); @@ -418,33 +429,28 @@ static SW *glue(audio_pcm_create_voice_pair_, TYPE)( hw_as = *as; } - sw = audio_calloc(__func__, 1, sizeof(*sw)); - if (!sw) { - dolog ("Could not allocate soft voice `%s' (%zu bytes)\n", - sw_name ? sw_name : "unknown", sizeof (*sw)); - goto err1; - } + sw = g_new0(SW, 1); sw->s = s; hw = glue(audio_pcm_hw_add_, TYPE)(s, &hw_as); if (!hw) { - goto err2; + dolog("Could not create a backend for voice `%s'\n", sw_name); + goto err1; } glue (audio_pcm_hw_add_sw_, TYPE) (hw, sw); if (glue (audio_pcm_sw_init_, TYPE) (sw, hw, sw_name, as)) { - goto err3; + goto err2; } return sw; -err3: +err2: glue (audio_pcm_hw_del_sw_, TYPE) (sw); glue (audio_pcm_hw_gc_, TYPE) (&hw); -err2: - g_free (sw); err1: + g_free(sw); return NULL; } @@ -515,8 +521,8 @@ SW *glue (AUD_open_, TYPE) ( HW *hw = sw->hw; if (!hw) { - dolog ("Internal logic error voice `%s' has no hardware store\n", - SW_NAME (sw)); + dolog("Internal logic error: voice `%s' has no backend\n", + SW_NAME(sw)); goto fail; } @@ -527,7 +533,6 @@ SW *glue (AUD_open_, TYPE) ( } else { sw = glue(audio_pcm_create_voice_pair_, TYPE)(s, name, as); if (!sw) { - dolog ("Failed to create voice `%s'\n", name); return NULL; } } diff --git a/audio/dbusaudio.c b/audio/dbusaudio.c index 722df0355e..fece74f78c 100644 --- a/audio/dbusaudio.c +++ b/audio/dbusaudio.c @@ -43,6 +43,7 @@ typedef struct DBusAudio { GDBusObjectManagerServer *server; + bool p2p; GDBusObjectSkeleton *audio; QemuDBusDisplay1Audio *iface; GHashTable *out_listeners; @@ -448,7 +449,8 @@ dbus_audio_register_listener(AudioState *s, bool out) { DBusAudio *da = s->drv_opaque; - const char *sender = g_dbus_method_invocation_get_sender(invocation); + const char *sender = + da->p2p ? "p2p" : g_dbus_method_invocation_get_sender(invocation); g_autoptr(GDBusConnection) listener_conn = NULL; g_autoptr(GError) err = NULL; g_autoptr(GSocket) socket = NULL; @@ -591,7 +593,7 @@ dbus_audio_register_in_listener(AudioState *s, } static void -dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server) +dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p) { DBusAudio *da = s->drv_opaque; @@ -599,6 +601,7 @@ dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server) g_assert(!da->server); da->server = g_object_ref(server); + da->p2p = p2p; da->audio = g_dbus_object_skeleton_new(DBUS_DISPLAY1_AUDIO_PATH); da->iface = qemu_dbus_display1_audio_skeleton_new(); diff --git a/audio/meson.build b/audio/meson.build index 0722224ba9..65a49c1a10 100644 --- a/audio/meson.build +++ b/audio/meson.build @@ -19,6 +19,7 @@ foreach m : [ ['sdl', sdl, files('sdlaudio.c')], ['jack', jack, files('jackaudio.c')], ['sndio', sndio, files('sndioaudio.c')], + ['pipewire', pipewire, files('pwaudio.c')], ['spice', spice, files('spiceaudio.c')] ] if m[1].found() diff --git a/audio/mixeng.c b/audio/mixeng.c index 100a306d6f..69f6549224 100644 --- a/audio/mixeng.c +++ b/audio/mixeng.c @@ -414,12 +414,7 @@ struct rate { */ void *st_rate_start (int inrate, int outrate) { - struct rate *rate = audio_calloc(__func__, 1, sizeof(*rate)); - - if (!rate) { - dolog ("Could not allocate resampler (%zu bytes)\n", sizeof (*rate)); - return NULL; - } + struct rate *rate = g_new0(struct rate, 1); rate->opos = 0; @@ -445,6 +440,86 @@ void st_rate_stop (void *opaque) g_free (opaque); } +/** + * st_rate_frames_out() - returns the number of frames the resampling code + * generates from frames_in frames + * + * @opaque: pointer to struct rate + * @frames_in: number of frames + * + * When upsampling, there may be more than one correct result. In this case, + * the function returns the maximum number of output frames the resampling + * code can generate. + */ +uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in) +{ + struct rate *rate = opaque; + uint64_t opos_end, opos_delta; + uint32_t ipos_end; + uint32_t frames_out; + + if (rate->opos_inc == 1ULL << 32) { + return frames_in; + } + + /* no output frame without at least one input frame */ + if (!frames_in) { + return 0; + } + + /* last frame read was at rate->ipos - 1 */ + ipos_end = rate->ipos - 1 + frames_in; + opos_end = (uint64_t)ipos_end << 32; + + /* last frame written was at rate->opos - rate->opos_inc */ + if (opos_end + rate->opos_inc <= rate->opos) { + return 0; + } + opos_delta = opos_end - rate->opos + rate->opos_inc; + frames_out = opos_delta / rate->opos_inc; + + return opos_delta % rate->opos_inc ? frames_out : frames_out - 1; +} + +/** + * st_rate_frames_in() - returns the number of frames needed to + * get frames_out frames after resampling + * + * @opaque: pointer to struct rate + * @frames_out: number of frames + * + * When downsampling, there may be more than one correct result. In this + * case, the function returns the maximum number of input frames needed. + */ +uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out) +{ + struct rate *rate = opaque; + uint64_t opos_start, opos_end; + uint32_t ipos_start, ipos_end; + + if (rate->opos_inc == 1ULL << 32) { + return frames_out; + } + + if (frames_out) { + opos_start = rate->opos; + ipos_start = rate->ipos; + } else { + uint64_t offset; + + /* add offset = ceil(opos_inc) to opos and ipos to avoid an underflow */ + offset = (rate->opos_inc + (1ULL << 32) - 1) & ~((1ULL << 32) - 1); + opos_start = rate->opos + offset; + ipos_start = rate->ipos + (offset >> 32); + } + /* last frame written was at opos_start - rate->opos_inc */ + opos_end = opos_start - rate->opos_inc + rate->opos_inc * frames_out; + ipos_end = (opos_end >> 32) + 1; + + /* last frame read was at ipos_start - 1 */ + return ipos_end + 1 > ipos_start ? ipos_end + 1 - ipos_start : 0; +} + void mixeng_clear (struct st_sample *buf, int len) { memset (buf, 0, len * sizeof (struct st_sample)); diff --git a/audio/mixeng.h b/audio/mixeng.h index 2dcd6df245..f9de7cffeb 100644 --- a/audio/mixeng.h +++ b/audio/mixeng.h @@ -52,6 +52,8 @@ void st_rate_flow(void *opaque, st_sample *ibuf, st_sample *obuf, void st_rate_flow_mix(void *opaque, st_sample *ibuf, st_sample *obuf, size_t *isamp, size_t *osamp); void st_rate_stop (void *opaque); +uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in); +uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out); void mixeng_clear (struct st_sample *buf, int len); void mixeng_volume (struct st_sample *buf, int len, struct mixeng_volume *vol); diff --git a/audio/pwaudio.c b/audio/pwaudio.c new file mode 100644 index 0000000000..1d108bdebb --- /dev/null +++ b/audio/pwaudio.c @@ -0,0 +1,915 @@ +/* + * QEMU Pipewire audio driver + * + * Copyright (c) 2023 Red Hat Inc. + * + * Author: Dorinda Bassey + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "audio.h" +#include +#include "qemu/error-report.h" +#include +#include +#include +#include + +#include +#include "trace.h" + +#define AUDIO_CAP "pipewire" +#define RINGBUFFER_SIZE (1u << 22) +#define RINGBUFFER_MASK (RINGBUFFER_SIZE - 1) + +#include "audio_int.h" + +typedef struct pwvolume { + uint32_t channels; + float values[SPA_AUDIO_MAX_CHANNELS]; +} pwvolume; + +typedef struct pwaudio { + Audiodev *dev; + struct pw_thread_loop *thread_loop; + struct pw_context *context; + + struct pw_core *core; + struct spa_hook core_listener; + int last_seq, pending_seq, error; +} pwaudio; + +typedef struct PWVoice { + pwaudio *g; + struct pw_stream *stream; + struct spa_hook stream_listener; + struct spa_audio_info_raw info; + uint32_t highwater_mark; + uint32_t frame_size, req; + struct spa_ringbuffer ring; + uint8_t buffer[RINGBUFFER_SIZE]; + + pwvolume volume; + bool muted; +} PWVoice; + +typedef struct PWVoiceOut { + HWVoiceOut hw; + PWVoice v; +} PWVoiceOut; + +typedef struct PWVoiceIn { + HWVoiceIn hw; + PWVoice v; +} PWVoiceIn; + +static void +stream_destroy(void *data) +{ + PWVoice *v = (PWVoice *) data; + spa_hook_remove(&v->stream_listener); + v->stream = NULL; +} + +/* output data processing function to read stuffs from the buffer */ +static void +playback_on_process(void *data) +{ + PWVoice *v = data; + void *p; + struct pw_buffer *b; + struct spa_buffer *buf; + uint32_t req, index, n_bytes; + int32_t avail; + + assert(v->stream); + + /* obtain a buffer to read from */ + b = pw_stream_dequeue_buffer(v->stream); + if (b == NULL) { + error_report("out of buffers: %s", strerror(errno)); + return; + } + + buf = b->buffer; + p = buf->datas[0].data; + if (p == NULL) { + return; + } + /* calculate the total no of bytes to read data from buffer */ + req = b->requested * v->frame_size; + if (req == 0) { + req = v->req; + } + n_bytes = SPA_MIN(req, buf->datas[0].maxsize); + + /* get no of available bytes to read data from buffer */ + avail = spa_ringbuffer_get_read_index(&v->ring, &index); + + if (avail <= 0) { + PWVoiceOut *vo = container_of(data, PWVoiceOut, v); + audio_pcm_info_clear_buf(&vo->hw.info, p, n_bytes / v->frame_size); + } else { + if ((uint32_t) avail < n_bytes) { + /* + * PipeWire immediately calls this callback again if we provide + * less than n_bytes. Then audio_pcm_info_clear_buf() fills the + * rest of the buffer with silence. + */ + n_bytes = avail; + } + + spa_ringbuffer_read_data(&v->ring, + v->buffer, RINGBUFFER_SIZE, + index & RINGBUFFER_MASK, p, n_bytes); + + index += n_bytes; + spa_ringbuffer_read_update(&v->ring, index); + + } + buf->datas[0].chunk->offset = 0; + buf->datas[0].chunk->stride = v->frame_size; + buf->datas[0].chunk->size = n_bytes; + + /* queue the buffer for playback */ + pw_stream_queue_buffer(v->stream, b); +} + +/* output data processing function to generate stuffs in the buffer */ +static void +capture_on_process(void *data) +{ + PWVoice *v = (PWVoice *) data; + void *p; + struct pw_buffer *b; + struct spa_buffer *buf; + int32_t filled; + uint32_t index, offs, n_bytes; + + assert(v->stream); + + /* obtain a buffer */ + b = pw_stream_dequeue_buffer(v->stream); + if (b == NULL) { + error_report("out of buffers: %s", strerror(errno)); + return; + } + + /* Write data into buffer */ + buf = b->buffer; + p = buf->datas[0].data; + if (p == NULL) { + return; + } + offs = SPA_MIN(buf->datas[0].chunk->offset, buf->datas[0].maxsize); + n_bytes = SPA_MIN(buf->datas[0].chunk->size, buf->datas[0].maxsize - offs); + + filled = spa_ringbuffer_get_write_index(&v->ring, &index); + + + if (filled < 0) { + error_report("%p: underrun write:%u filled:%d", p, index, filled); + } else { + if ((uint32_t) filled + n_bytes > RINGBUFFER_SIZE) { + error_report("%p: overrun write:%u filled:%d + size:%u > max:%u", + p, index, filled, n_bytes, RINGBUFFER_SIZE); + } + } + spa_ringbuffer_write_data(&v->ring, + v->buffer, RINGBUFFER_SIZE, + index & RINGBUFFER_MASK, + SPA_PTROFF(p, offs, void), n_bytes); + index += n_bytes; + spa_ringbuffer_write_update(&v->ring, index); + + /* queue the buffer for playback */ + pw_stream_queue_buffer(v->stream, b); +} + +static void +on_stream_state_changed(void *data, enum pw_stream_state old, + enum pw_stream_state state, const char *error) +{ + PWVoice *v = (PWVoice *) data; + + trace_pw_state_changed(pw_stream_get_node_id(v->stream), + pw_stream_state_as_string(state)); + + switch (state) { + case PW_STREAM_STATE_ERROR: + case PW_STREAM_STATE_UNCONNECTED: + break; + case PW_STREAM_STATE_PAUSED: + case PW_STREAM_STATE_CONNECTING: + case PW_STREAM_STATE_STREAMING: + break; + } +} + +static const struct pw_stream_events capture_stream_events = { + PW_VERSION_STREAM_EVENTS, + .destroy = stream_destroy, + .state_changed = on_stream_state_changed, + .process = capture_on_process +}; + +static const struct pw_stream_events playback_stream_events = { + PW_VERSION_STREAM_EVENTS, + .destroy = stream_destroy, + .state_changed = on_stream_state_changed, + .process = playback_on_process +}; + +static size_t +qpw_read(HWVoiceIn *hw, void *data, size_t len) +{ + PWVoiceIn *pw = (PWVoiceIn *) hw; + PWVoice *v = &pw->v; + pwaudio *c = v->g; + const char *error = NULL; + size_t l; + int32_t avail; + uint32_t index; + + pw_thread_loop_lock(c->thread_loop); + if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) { + /* wait for stream to become ready */ + l = 0; + goto done_unlock; + } + /* get no of available bytes to read data from buffer */ + avail = spa_ringbuffer_get_read_index(&v->ring, &index); + + trace_pw_read(avail, index, len); + + if (avail < (int32_t) len) { + len = avail; + } + + spa_ringbuffer_read_data(&v->ring, + v->buffer, RINGBUFFER_SIZE, + index & RINGBUFFER_MASK, data, len); + index += len; + spa_ringbuffer_read_update(&v->ring, index); + l = len; + +done_unlock: + pw_thread_loop_unlock(c->thread_loop); + return l; +} + +static size_t qpw_buffer_get_free(HWVoiceOut *hw) +{ + PWVoiceOut *pw = (PWVoiceOut *)hw; + PWVoice *v = &pw->v; + pwaudio *c = v->g; + const char *error = NULL; + int32_t filled, avail; + uint32_t index; + + pw_thread_loop_lock(c->thread_loop); + if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) { + /* wait for stream to become ready */ + avail = 0; + goto done_unlock; + } + + filled = spa_ringbuffer_get_write_index(&v->ring, &index); + avail = v->highwater_mark - filled; + +done_unlock: + pw_thread_loop_unlock(c->thread_loop); + return avail; +} + +static size_t +qpw_write(HWVoiceOut *hw, void *data, size_t len) +{ + PWVoiceOut *pw = (PWVoiceOut *) hw; + PWVoice *v = &pw->v; + pwaudio *c = v->g; + const char *error = NULL; + int32_t filled, avail; + uint32_t index; + + pw_thread_loop_lock(c->thread_loop); + if (pw_stream_get_state(v->stream, &error) != PW_STREAM_STATE_STREAMING) { + /* wait for stream to become ready */ + len = 0; + goto done_unlock; + } + filled = spa_ringbuffer_get_write_index(&v->ring, &index); + avail = v->highwater_mark - filled; + + trace_pw_write(filled, avail, index, len); + + if (len > avail) { + len = avail; + } + + if (filled < 0) { + error_report("%p: underrun write:%u filled:%d", pw, index, filled); + } else { + if ((uint32_t) filled + len > RINGBUFFER_SIZE) { + error_report("%p: overrun write:%u filled:%d + size:%zu > max:%u", + pw, index, filled, len, RINGBUFFER_SIZE); + } + } + + spa_ringbuffer_write_data(&v->ring, + v->buffer, RINGBUFFER_SIZE, + index & RINGBUFFER_MASK, data, len); + index += len; + spa_ringbuffer_write_update(&v->ring, index); + +done_unlock: + pw_thread_loop_unlock(c->thread_loop); + return len; +} + +static int +audfmt_to_pw(AudioFormat fmt, int endianness) +{ + int format; + + switch (fmt) { + case AUDIO_FORMAT_S8: + format = SPA_AUDIO_FORMAT_S8; + break; + case AUDIO_FORMAT_U8: + format = SPA_AUDIO_FORMAT_U8; + break; + case AUDIO_FORMAT_S16: + format = endianness ? SPA_AUDIO_FORMAT_S16_BE : SPA_AUDIO_FORMAT_S16_LE; + break; + case AUDIO_FORMAT_U16: + format = endianness ? SPA_AUDIO_FORMAT_U16_BE : SPA_AUDIO_FORMAT_U16_LE; + break; + case AUDIO_FORMAT_S32: + format = endianness ? SPA_AUDIO_FORMAT_S32_BE : SPA_AUDIO_FORMAT_S32_LE; + break; + case AUDIO_FORMAT_U32: + format = endianness ? SPA_AUDIO_FORMAT_U32_BE : SPA_AUDIO_FORMAT_U32_LE; + break; + case AUDIO_FORMAT_F32: + format = endianness ? SPA_AUDIO_FORMAT_F32_BE : SPA_AUDIO_FORMAT_F32_LE; + break; + default: + dolog("Internal logic error: Bad audio format %d\n", fmt); + format = SPA_AUDIO_FORMAT_U8; + break; + } + return format; +} + +static AudioFormat +pw_to_audfmt(enum spa_audio_format fmt, int *endianness, + uint32_t *sample_size) +{ + switch (fmt) { + case SPA_AUDIO_FORMAT_S8: + *sample_size = 1; + return AUDIO_FORMAT_S8; + case SPA_AUDIO_FORMAT_U8: + *sample_size = 1; + return AUDIO_FORMAT_U8; + case SPA_AUDIO_FORMAT_S16_BE: + *sample_size = 2; + *endianness = 1; + return AUDIO_FORMAT_S16; + case SPA_AUDIO_FORMAT_S16_LE: + *sample_size = 2; + *endianness = 0; + return AUDIO_FORMAT_S16; + case SPA_AUDIO_FORMAT_U16_BE: + *sample_size = 2; + *endianness = 1; + return AUDIO_FORMAT_U16; + case SPA_AUDIO_FORMAT_U16_LE: + *sample_size = 2; + *endianness = 0; + return AUDIO_FORMAT_U16; + case SPA_AUDIO_FORMAT_S32_BE: + *sample_size = 4; + *endianness = 1; + return AUDIO_FORMAT_S32; + case SPA_AUDIO_FORMAT_S32_LE: + *sample_size = 4; + *endianness = 0; + return AUDIO_FORMAT_S32; + case SPA_AUDIO_FORMAT_U32_BE: + *sample_size = 4; + *endianness = 1; + return AUDIO_FORMAT_U32; + case SPA_AUDIO_FORMAT_U32_LE: + *sample_size = 4; + *endianness = 0; + return AUDIO_FORMAT_U32; + case SPA_AUDIO_FORMAT_F32_BE: + *sample_size = 4; + *endianness = 1; + return AUDIO_FORMAT_F32; + case SPA_AUDIO_FORMAT_F32_LE: + *sample_size = 4; + *endianness = 0; + return AUDIO_FORMAT_F32; + default: + *sample_size = 1; + dolog("Internal logic error: Bad spa_audio_format %d\n", fmt); + return AUDIO_FORMAT_U8; + } +} + +static int +create_stream(pwaudio *c, PWVoice *v, const char *stream_name, + const char *name, enum spa_direction dir) +{ + int res; + uint32_t n_params; + const struct spa_pod *params[2]; + uint8_t buffer[1024]; + struct spa_pod_builder b; + uint64_t buf_samples; + struct pw_properties *props; + + props = pw_properties_new(NULL, NULL); + + /* 75% of the timer period for faster updates */ + buf_samples = (uint64_t)v->g->dev->timer_period * v->info.rate + * 3 / 4 / 1000000; + pw_properties_setf(props, PW_KEY_NODE_LATENCY, "%" PRIu64 "/%u", + buf_samples, v->info.rate); + + trace_pw_period(buf_samples, v->info.rate); + if (name) { + pw_properties_set(props, PW_KEY_TARGET_OBJECT, name); + } + v->stream = pw_stream_new(c->core, stream_name, props); + + if (v->stream == NULL) { + return -1; + } + + if (dir == SPA_DIRECTION_INPUT) { + pw_stream_add_listener(v->stream, + &v->stream_listener, &capture_stream_events, v); + } else { + pw_stream_add_listener(v->stream, + &v->stream_listener, &playback_stream_events, v); + } + + n_params = 0; + spa_pod_builder_init(&b, buffer, sizeof(buffer)); + params[n_params++] = spa_format_audio_raw_build(&b, + SPA_PARAM_EnumFormat, + &v->info); + + /* connect the stream to a sink or source */ + res = pw_stream_connect(v->stream, + dir == + SPA_DIRECTION_INPUT ? PW_DIRECTION_INPUT : + PW_DIRECTION_OUTPUT, PW_ID_ANY, + PW_STREAM_FLAG_AUTOCONNECT | + PW_STREAM_FLAG_INACTIVE | + PW_STREAM_FLAG_MAP_BUFFERS | + PW_STREAM_FLAG_RT_PROCESS, params, n_params); + if (res < 0) { + pw_stream_destroy(v->stream); + return -1; + } + + return 0; +} + +static int +qpw_stream_new(pwaudio *c, PWVoice *v, const char *stream_name, + const char *name, enum spa_direction dir) +{ + int r; + + switch (v->info.channels) { + case 8: + v->info.position[0] = SPA_AUDIO_CHANNEL_FL; + v->info.position[1] = SPA_AUDIO_CHANNEL_FR; + v->info.position[2] = SPA_AUDIO_CHANNEL_FC; + v->info.position[3] = SPA_AUDIO_CHANNEL_LFE; + v->info.position[4] = SPA_AUDIO_CHANNEL_RL; + v->info.position[5] = SPA_AUDIO_CHANNEL_RR; + v->info.position[6] = SPA_AUDIO_CHANNEL_SL; + v->info.position[7] = SPA_AUDIO_CHANNEL_SR; + break; + case 6: + v->info.position[0] = SPA_AUDIO_CHANNEL_FL; + v->info.position[1] = SPA_AUDIO_CHANNEL_FR; + v->info.position[2] = SPA_AUDIO_CHANNEL_FC; + v->info.position[3] = SPA_AUDIO_CHANNEL_LFE; + v->info.position[4] = SPA_AUDIO_CHANNEL_RL; + v->info.position[5] = SPA_AUDIO_CHANNEL_RR; + break; + case 5: + v->info.position[0] = SPA_AUDIO_CHANNEL_FL; + v->info.position[1] = SPA_AUDIO_CHANNEL_FR; + v->info.position[2] = SPA_AUDIO_CHANNEL_FC; + v->info.position[3] = SPA_AUDIO_CHANNEL_LFE; + v->info.position[4] = SPA_AUDIO_CHANNEL_RC; + break; + case 4: + v->info.position[0] = SPA_AUDIO_CHANNEL_FL; + v->info.position[1] = SPA_AUDIO_CHANNEL_FR; + v->info.position[2] = SPA_AUDIO_CHANNEL_FC; + v->info.position[3] = SPA_AUDIO_CHANNEL_RC; + break; + case 3: + v->info.position[0] = SPA_AUDIO_CHANNEL_FL; + v->info.position[1] = SPA_AUDIO_CHANNEL_FR; + v->info.position[2] = SPA_AUDIO_CHANNEL_LFE; + break; + case 2: + v->info.position[0] = SPA_AUDIO_CHANNEL_FL; + v->info.position[1] = SPA_AUDIO_CHANNEL_FR; + break; + case 1: + v->info.position[0] = SPA_AUDIO_CHANNEL_MONO; + break; + default: + for (size_t i = 0; i < v->info.channels; i++) { + v->info.position[i] = SPA_AUDIO_CHANNEL_UNKNOWN; + } + break; + } + + /* create a new unconnected pwstream */ + r = create_stream(c, v, stream_name, name, dir); + if (r < 0) { + AUD_log(AUDIO_CAP, "Failed to create stream."); + return -1; + } + + return r; +} + +static int +qpw_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque) +{ + PWVoiceOut *pw = (PWVoiceOut *) hw; + PWVoice *v = &pw->v; + struct audsettings obt_as = *as; + pwaudio *c = v->g = drv_opaque; + AudiodevPipewireOptions *popts = &c->dev->u.pipewire; + AudiodevPipewirePerDirectionOptions *ppdo = popts->out; + int r; + + pw_thread_loop_lock(c->thread_loop); + + v->info.format = audfmt_to_pw(as->fmt, as->endianness); + v->info.channels = as->nchannels; + v->info.rate = as->freq; + + obt_as.fmt = + pw_to_audfmt(v->info.format, &obt_as.endianness, &v->frame_size); + v->frame_size *= as->nchannels; + + v->req = (uint64_t)c->dev->timer_period * v->info.rate + * 1 / 2 / 1000000 * v->frame_size; + + /* call the function that creates a new stream for playback */ + r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id, + ppdo->name, SPA_DIRECTION_OUTPUT); + if (r < 0) { + error_report("qpw_stream_new for playback failed"); + pw_thread_loop_unlock(c->thread_loop); + return -1; + } + + /* report the audio format we support */ + audio_pcm_init_info(&hw->info, &obt_as); + + /* report the buffer size to qemu */ + hw->samples = audio_buffer_frames( + qapi_AudiodevPipewirePerDirectionOptions_base(ppdo), &obt_as, 46440); + v->highwater_mark = MIN(RINGBUFFER_SIZE, + (ppdo->has_latency ? ppdo->latency : 46440) + * (uint64_t)v->info.rate / 1000000 * v->frame_size); + + pw_thread_loop_unlock(c->thread_loop); + return 0; +} + +static int +qpw_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque) +{ + PWVoiceIn *pw = (PWVoiceIn *) hw; + PWVoice *v = &pw->v; + struct audsettings obt_as = *as; + pwaudio *c = v->g = drv_opaque; + AudiodevPipewireOptions *popts = &c->dev->u.pipewire; + AudiodevPipewirePerDirectionOptions *ppdo = popts->in; + int r; + + pw_thread_loop_lock(c->thread_loop); + + v->info.format = audfmt_to_pw(as->fmt, as->endianness); + v->info.channels = as->nchannels; + v->info.rate = as->freq; + + obt_as.fmt = + pw_to_audfmt(v->info.format, &obt_as.endianness, &v->frame_size); + v->frame_size *= as->nchannels; + + /* call the function that creates a new stream for recording */ + r = qpw_stream_new(c, v, ppdo->stream_name ? : c->dev->id, + ppdo->name, SPA_DIRECTION_INPUT); + if (r < 0) { + error_report("qpw_stream_new for recording failed"); + pw_thread_loop_unlock(c->thread_loop); + return -1; + } + + /* report the audio format we support */ + audio_pcm_init_info(&hw->info, &obt_as); + + /* report the buffer size to qemu */ + hw->samples = audio_buffer_frames( + qapi_AudiodevPipewirePerDirectionOptions_base(ppdo), &obt_as, 46440); + + pw_thread_loop_unlock(c->thread_loop); + return 0; +} + +static void +qpw_fini_out(HWVoiceOut *hw) +{ + PWVoiceOut *pw = (PWVoiceOut *) hw; + PWVoice *v = &pw->v; + + if (v->stream) { + pwaudio *c = v->g; + pw_thread_loop_lock(c->thread_loop); + pw_stream_destroy(v->stream); + v->stream = NULL; + pw_thread_loop_unlock(c->thread_loop); + } +} + +static void +qpw_fini_in(HWVoiceIn *hw) +{ + PWVoiceIn *pw = (PWVoiceIn *) hw; + PWVoice *v = &pw->v; + + if (v->stream) { + pwaudio *c = v->g; + pw_thread_loop_lock(c->thread_loop); + pw_stream_destroy(v->stream); + v->stream = NULL; + pw_thread_loop_unlock(c->thread_loop); + } +} + +static void +qpw_enable_out(HWVoiceOut *hw, bool enable) +{ + PWVoiceOut *po = (PWVoiceOut *) hw; + PWVoice *v = &po->v; + pwaudio *c = v->g; + pw_thread_loop_lock(c->thread_loop); + pw_stream_set_active(v->stream, enable); + pw_thread_loop_unlock(c->thread_loop); +} + +static void +qpw_enable_in(HWVoiceIn *hw, bool enable) +{ + PWVoiceIn *pi = (PWVoiceIn *) hw; + PWVoice *v = &pi->v; + pwaudio *c = v->g; + pw_thread_loop_lock(c->thread_loop); + pw_stream_set_active(v->stream, enable); + pw_thread_loop_unlock(c->thread_loop); +} + +static void +qpw_volume_out(HWVoiceOut *hw, Volume *vol) +{ + PWVoiceOut *pw = (PWVoiceOut *) hw; + PWVoice *v = &pw->v; + pwaudio *c = v->g; + int i, ret; + + pw_thread_loop_lock(c->thread_loop); + v->volume.channels = vol->channels; + + for (i = 0; i < vol->channels; ++i) { + v->volume.values[i] = (float)vol->vol[i] / 255; + } + + ret = pw_stream_set_control(v->stream, + SPA_PROP_channelVolumes, v->volume.channels, v->volume.values, 0); + trace_pw_vol(ret == 0 ? "success" : "failed"); + + v->muted = vol->mute; + float val = v->muted ? 1.f : 0.f; + ret = pw_stream_set_control(v->stream, SPA_PROP_mute, 1, &val, 0); + pw_thread_loop_unlock(c->thread_loop); +} + +static void +qpw_volume_in(HWVoiceIn *hw, Volume *vol) +{ + PWVoiceIn *pw = (PWVoiceIn *) hw; + PWVoice *v = &pw->v; + pwaudio *c = v->g; + int i, ret; + + pw_thread_loop_lock(c->thread_loop); + v->volume.channels = vol->channels; + + for (i = 0; i < vol->channels; ++i) { + v->volume.values[i] = (float)vol->vol[i] / 255; + } + + ret = pw_stream_set_control(v->stream, + SPA_PROP_channelVolumes, v->volume.channels, v->volume.values, 0); + trace_pw_vol(ret == 0 ? "success" : "failed"); + + v->muted = vol->mute; + float val = v->muted ? 1.f : 0.f; + ret = pw_stream_set_control(v->stream, SPA_PROP_mute, 1, &val, 0); + pw_thread_loop_unlock(c->thread_loop); +} + +static int wait_resync(pwaudio *pw) +{ + int res; + pw->pending_seq = pw_core_sync(pw->core, PW_ID_CORE, pw->pending_seq); + + while (true) { + pw_thread_loop_wait(pw->thread_loop); + + res = pw->error; + if (res < 0) { + pw->error = 0; + return res; + } + if (pw->pending_seq == pw->last_seq) { + break; + } + } + return 0; +} +static void +on_core_error(void *data, uint32_t id, int seq, int res, const char *message) +{ + pwaudio *pw = data; + + error_report("error id:%u seq:%d res:%d (%s): %s", + id, seq, res, spa_strerror(res), message); + + /* stop and exit the thread loop */ + pw_thread_loop_signal(pw->thread_loop, FALSE); +} + +static void +on_core_done(void *data, uint32_t id, int seq) +{ + pwaudio *pw = data; + assert(id == PW_ID_CORE); + pw->last_seq = seq; + if (pw->pending_seq == seq) { + /* stop and exit the thread loop */ + pw_thread_loop_signal(pw->thread_loop, FALSE); + } +} + +static const struct pw_core_events core_events = { + PW_VERSION_CORE_EVENTS, + .done = on_core_done, + .error = on_core_error, +}; + +static void * +qpw_audio_init(Audiodev *dev) +{ + g_autofree pwaudio *pw = g_new0(pwaudio, 1); + pw_init(NULL, NULL); + + trace_pw_audio_init(); + assert(dev->driver == AUDIODEV_DRIVER_PIPEWIRE); + + pw->dev = dev; + pw->thread_loop = pw_thread_loop_new("Pipewire thread loop", NULL); + if (pw->thread_loop == NULL) { + error_report("Could not create Pipewire loop"); + goto fail; + } + + pw->context = + pw_context_new(pw_thread_loop_get_loop(pw->thread_loop), NULL, 0); + if (pw->context == NULL) { + error_report("Could not create Pipewire context"); + goto fail; + } + + if (pw_thread_loop_start(pw->thread_loop) < 0) { + error_report("Could not start Pipewire loop"); + goto fail; + } + + pw_thread_loop_lock(pw->thread_loop); + + pw->core = pw_context_connect(pw->context, NULL, 0); + if (pw->core == NULL) { + pw_thread_loop_unlock(pw->thread_loop); + goto fail; + } + + if (pw_core_add_listener(pw->core, &pw->core_listener, + &core_events, pw) < 0) { + pw_thread_loop_unlock(pw->thread_loop); + goto fail; + } + if (wait_resync(pw) < 0) { + pw_thread_loop_unlock(pw->thread_loop); + } + + pw_thread_loop_unlock(pw->thread_loop); + + return g_steal_pointer(&pw); + +fail: + AUD_log(AUDIO_CAP, "Failed to initialize PW context"); + if (pw->thread_loop) { + pw_thread_loop_stop(pw->thread_loop); + } + if (pw->context) { + g_clear_pointer(&pw->context, pw_context_destroy); + } + if (pw->thread_loop) { + g_clear_pointer(&pw->thread_loop, pw_thread_loop_destroy); + } + return NULL; +} + +static void +qpw_audio_fini(void *opaque) +{ + pwaudio *pw = opaque; + + if (pw->thread_loop) { + pw_thread_loop_stop(pw->thread_loop); + } + + if (pw->core) { + spa_hook_remove(&pw->core_listener); + spa_zero(pw->core_listener); + pw_core_disconnect(pw->core); + } + + if (pw->context) { + pw_context_destroy(pw->context); + } + pw_thread_loop_destroy(pw->thread_loop); + + g_free(pw); +} + +static struct audio_pcm_ops qpw_pcm_ops = { + .init_out = qpw_init_out, + .fini_out = qpw_fini_out, + .write = qpw_write, + .buffer_get_free = qpw_buffer_get_free, + .run_buffer_out = audio_generic_run_buffer_out, + .enable_out = qpw_enable_out, + .volume_out = qpw_volume_out, + .volume_in = qpw_volume_in, + + .init_in = qpw_init_in, + .fini_in = qpw_fini_in, + .read = qpw_read, + .run_buffer_in = audio_generic_run_buffer_in, + .enable_in = qpw_enable_in +}; + +static struct audio_driver pw_audio_driver = { + .name = "pipewire", + .descr = "http://www.pipewire.org/", + .init = qpw_audio_init, + .fini = qpw_audio_fini, + .pcm_ops = &qpw_pcm_ops, + .can_be_default = 1, + .max_voices_out = INT_MAX, + .max_voices_in = INT_MAX, + .voice_size_out = sizeof(PWVoiceOut), + .voice_size_in = sizeof(PWVoiceIn), +}; + +static void +register_audio_pw(void) +{ + audio_driver_register(&pw_audio_driver); +} + +type_init(register_audio_pw); diff --git a/audio/rate_template.h b/audio/rate_template.h index b432719ebb..6648f0d2e5 100644 --- a/audio/rate_template.h +++ b/audio/rate_template.h @@ -40,8 +40,6 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, int64_t t; #endif - ilast = rate->ilast; - istart = ibuf; iend = ibuf + *isamp; @@ -59,15 +57,17 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, return; } - while (obuf < oend) { + /* without input samples, there's nothing to do */ + if (ibuf >= iend) { + *osamp = 0; + return; + } - /* Safety catch to make sure we have input samples. */ - if (ibuf >= iend) { - break; - } + ilast = rate->ilast; + + while (true) { /* read as many input samples so that ipos > opos */ - while (rate->ipos <= (rate->opos >> 32)) { ilast = *ibuf++; rate->ipos++; @@ -78,6 +78,11 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, } } + /* make sure that the next output sample can be written */ + if (obuf >= oend) { + break; + } + icur = *ibuf; /* wrap ipos and opos around long before they overflow */ diff --git a/audio/trace-events b/audio/trace-events index e1ab643add..85dbb506b2 100644 --- a/audio/trace-events +++ b/audio/trace-events @@ -18,6 +18,14 @@ dbus_audio_register(const char *s, const char *dir) "sender = %s, dir = %s" dbus_audio_put_buffer_out(size_t len) "len = %zu" dbus_audio_read(size_t len) "len = %zu" +# pwaudio.c +pw_state_changed(int nodeid, const char *s) "node id: %d stream state: %s" +pw_read(int32_t avail, uint32_t index, size_t len) "avail=%d index=%u len=%zu" +pw_write(int32_t filled, int32_t avail, uint32_t index, size_t len) "filled=%d avail=%d index=%u len=%zu" +pw_vol(const char *ret) "set volume: %s" +pw_period(uint64_t quantum, uint32_t rate) "period =%" PRIu64 "/%u" +pw_audio_init(void) "Initialize Pipewire context" + # audio.c audio_timer_start(int interval) "interval %d ms" audio_timer_stop(void) "" diff --git a/backends/cryptodev-builtin.c b/backends/cryptodev-builtin.c index cda6ca3b71..39d0455280 100644 --- a/backends/cryptodev-builtin.c +++ b/backends/cryptodev-builtin.c @@ -59,6 +59,19 @@ struct CryptoDevBackendBuiltin { CryptoDevBackendBuiltinSession *sessions[MAX_NUM_SESSIONS]; }; +static void cryptodev_builtin_init_akcipher(CryptoDevBackend *backend) +{ + QCryptoAkCipherOptions opts; + + opts.alg = QCRYPTO_AKCIPHER_ALG_RSA; + opts.u.rsa.padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW; + if (qcrypto_akcipher_supports(&opts)) { + backend->conf.crypto_services |= + (1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER); + backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA; + } +} + static void cryptodev_builtin_init( CryptoDevBackend *backend, Error **errp) { @@ -72,21 +85,18 @@ static void cryptodev_builtin_init( return; } - cc = cryptodev_backend_new_client( - "cryptodev-builtin", NULL); + cc = cryptodev_backend_new_client(); cc->info_str = g_strdup_printf("cryptodev-builtin0"); cc->queue_index = 0; - cc->type = CRYPTODEV_BACKEND_TYPE_BUILTIN; + cc->type = QCRYPTODEV_BACKEND_TYPE_BUILTIN; backend->conf.peers.ccs[0] = cc; backend->conf.crypto_services = - 1u << VIRTIO_CRYPTO_SERVICE_CIPHER | - 1u << VIRTIO_CRYPTO_SERVICE_HASH | - 1u << VIRTIO_CRYPTO_SERVICE_MAC | - 1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER; + 1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER | + 1u << QCRYPTODEV_BACKEND_SERVICE_HASH | + 1u << QCRYPTODEV_BACKEND_SERVICE_MAC; backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC; backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1; - backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA; /* * Set the Maximum length of crypto request. * Why this value? Just avoid to overflow when @@ -95,6 +105,7 @@ static void cryptodev_builtin_init( backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendOpInfo); backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN; backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN; + cryptodev_builtin_init_akcipher(backend); cryptodev_backend_set_ready(backend, true); } @@ -528,17 +539,14 @@ static int cryptodev_builtin_asym_operation( static int cryptodev_builtin_operation( CryptoDevBackend *backend, - CryptoDevBackendOpInfo *op_info, - uint32_t queue_index, - CryptoDevCompletionFunc cb, - void *opaque) + CryptoDevBackendOpInfo *op_info) { CryptoDevBackendBuiltin *builtin = CRYPTODEV_BACKEND_BUILTIN(backend); CryptoDevBackendBuiltinSession *sess; CryptoDevBackendSymOpInfo *sym_op_info; CryptoDevBackendAsymOpInfo *asym_op_info; - enum CryptoDevBackendAlgType algtype = op_info->algtype; + QCryptodevBackendAlgType algtype = op_info->algtype; int status = -VIRTIO_CRYPTO_ERR; Error *local_error = NULL; @@ -550,11 +558,11 @@ static int cryptodev_builtin_operation( } sess = builtin->sessions[op_info->session_id]; - if (algtype == CRYPTODEV_BACKEND_ALG_SYM) { + if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) { sym_op_info = op_info->u.sym_op_info; status = cryptodev_builtin_sym_operation(sess, sym_op_info, &local_error); - } else if (algtype == CRYPTODEV_BACKEND_ALG_ASYM) { + } else if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) { asym_op_info = op_info->u.asym_op_info; status = cryptodev_builtin_asym_operation(sess, op_info->op_code, asym_op_info, &local_error); @@ -563,8 +571,8 @@ static int cryptodev_builtin_operation( if (local_error) { error_report_err(local_error); } - if (cb) { - cb(opaque, status); + if (op_info->cb) { + op_info->cb(op_info->opaque, status); } return 0; } diff --git a/backends/cryptodev-hmp-cmds.c b/backends/cryptodev-hmp-cmds.c new file mode 100644 index 0000000000..4f7220bb13 --- /dev/null +++ b/backends/cryptodev-hmp-cmds.c @@ -0,0 +1,54 @@ +/* + * HMP commands related to cryptodev + * + * Copyright (c) 2023 Bytedance.Inc + * + * Authors: + * zhenwei pi + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "monitor/hmp.h" +#include "monitor/monitor.h" +#include "qapi/qapi-commands-cryptodev.h" +#include "qapi/qmp/qdict.h" + + +void hmp_info_cryptodev(Monitor *mon, const QDict *qdict) +{ + QCryptodevInfoList *il; + QCryptodevBackendServiceTypeList *sl; + QCryptodevBackendClientList *cl; + + for (il = qmp_query_cryptodev(NULL); il; il = il->next) { + g_autofree char *services = NULL; + QCryptodevInfo *info = il->value; + char *tmp_services; + + /* build a string like 'service=[akcipher|mac|hash|cipher]' */ + for (sl = info->service; sl; sl = sl->next) { + const char *service = QCryptodevBackendServiceType_str(sl->value); + + if (!services) { + services = g_strdup(service); + } else { + tmp_services = g_strjoin("|", services, service, NULL); + g_free(services); + services = tmp_services; + } + } + monitor_printf(mon, "%s: service=[%s]\n", info->id, services); + + for (cl = info->client; cl; cl = cl->next) { + QCryptodevBackendClient *client = cl->value; + monitor_printf(mon, " queue %" PRIu32 ": type=%s\n", + client->queue, + QCryptodevBackendType_str(client->type)); + } + } + + qapi_free_QCryptodevInfoList(il); +} diff --git a/backends/cryptodev-lkcf.c b/backends/cryptodev-lkcf.c index 133bd706a4..45aba1ff67 100644 --- a/backends/cryptodev-lkcf.c +++ b/backends/cryptodev-lkcf.c @@ -223,14 +223,14 @@ static void cryptodev_lkcf_init(CryptoDevBackend *backend, Error **errp) return; } - cc = cryptodev_backend_new_client("cryptodev-lkcf", NULL); + cc = cryptodev_backend_new_client(); cc->info_str = g_strdup_printf("cryptodev-lkcf0"); cc->queue_index = 0; - cc->type = CRYPTODEV_BACKEND_TYPE_LKCF; + cc->type = QCRYPTODEV_BACKEND_TYPE_LKCF; backend->conf.peers.ccs[0] = cc; backend->conf.crypto_services = - 1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER; + 1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER; backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA; lkcf->running = true; @@ -469,15 +469,12 @@ static void *cryptodev_lkcf_worker(void *arg) static int cryptodev_lkcf_operation( CryptoDevBackend *backend, - CryptoDevBackendOpInfo *op_info, - uint32_t queue_index, - CryptoDevCompletionFunc cb, - void *opaque) + CryptoDevBackendOpInfo *op_info) { CryptoDevBackendLKCF *lkcf = CRYPTODEV_BACKEND_LKCF(backend); CryptoDevBackendLKCFSession *sess; - enum CryptoDevBackendAlgType algtype = op_info->algtype; + QCryptodevBackendAlgType algtype = op_info->algtype; CryptoDevLKCFTask *task; if (op_info->session_id >= MAX_SESSIONS || @@ -488,15 +485,15 @@ static int cryptodev_lkcf_operation( } sess = lkcf->sess[op_info->session_id]; - if (algtype != CRYPTODEV_BACKEND_ALG_ASYM) { + if (algtype != QCRYPTODEV_BACKEND_ALG_ASYM) { error_report("algtype not supported: %u", algtype); return -VIRTIO_CRYPTO_NOTSUPP; } task = g_new0(CryptoDevLKCFTask, 1); task->op_info = op_info; - task->cb = cb; - task->opaque = opaque; + task->cb = op_info->cb; + task->opaque = op_info->opaque; task->sess = sess; task->lkcf = lkcf; task->status = -VIRTIO_CRYPTO_ERR; diff --git a/backends/cryptodev-vhost-user.c b/backends/cryptodev-vhost-user.c index ab3028e045..b1d9eb735f 100644 --- a/backends/cryptodev-vhost-user.c +++ b/backends/cryptodev-vhost-user.c @@ -67,7 +67,7 @@ cryptodev_vhost_user_get_vhost( { CryptoDevBackendVhostUser *s = CRYPTODEV_BACKEND_VHOST_USER(b); - assert(cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER); + assert(cc->type == QCRYPTODEV_BACKEND_TYPE_VHOST_USER); assert(queue < MAX_CRYPTO_QUEUE_NUM); return s->vhost_crypto[queue]; @@ -198,12 +198,11 @@ static void cryptodev_vhost_user_init( s->opened = true; for (i = 0; i < queues; i++) { - cc = cryptodev_backend_new_client( - "cryptodev-vhost-user", NULL); + cc = cryptodev_backend_new_client(); cc->info_str = g_strdup_printf("cryptodev-vhost-user%zu to %s ", i, chr->label); cc->queue_index = i; - cc->type = CRYPTODEV_BACKEND_TYPE_VHOST_USER; + cc->type = QCRYPTODEV_BACKEND_TYPE_VHOST_USER; backend->conf.peers.ccs[i] = cc; @@ -222,9 +221,9 @@ static void cryptodev_vhost_user_init( cryptodev_vhost_user_event, NULL, s, NULL, true); backend->conf.crypto_services = - 1u << VIRTIO_CRYPTO_SERVICE_CIPHER | - 1u << VIRTIO_CRYPTO_SERVICE_HASH | - 1u << VIRTIO_CRYPTO_SERVICE_MAC; + 1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER | + 1u << QCRYPTODEV_BACKEND_SERVICE_HASH | + 1u << QCRYPTODEV_BACKEND_SERVICE_MAC; backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC; backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1; diff --git a/backends/cryptodev-vhost.c b/backends/cryptodev-vhost.c index 74ea0ad63d..93523732f3 100644 --- a/backends/cryptodev-vhost.c +++ b/backends/cryptodev-vhost.c @@ -127,7 +127,7 @@ cryptodev_get_vhost(CryptoDevBackendClient *cc, switch (cc->type) { #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX) - case CRYPTODEV_BACKEND_TYPE_VHOST_USER: + case QCRYPTODEV_BACKEND_TYPE_VHOST_USER: vhost_crypto = cryptodev_vhost_user_get_vhost(cc, b, queue); break; #endif @@ -195,7 +195,7 @@ int cryptodev_vhost_start(VirtIODevice *dev, int total_queues) * because vhost user doesn't interrupt masking/unmasking * properly. */ - if (cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER) { + if (cc->type == QCRYPTODEV_BACKEND_TYPE_VHOST_USER) { dev->use_guest_notifier_mask = false; } } diff --git a/backends/cryptodev.c b/backends/cryptodev.c index 54ee8c81f5..94ca393cee 100644 --- a/backends/cryptodev.c +++ b/backends/cryptodev.c @@ -23,29 +23,92 @@ #include "qemu/osdep.h" #include "sysemu/cryptodev.h" +#include "sysemu/stats.h" #include "qapi/error.h" +#include "qapi/qapi-commands-cryptodev.h" +#include "qapi/qapi-types-stats.h" #include "qapi/visitor.h" #include "qemu/config-file.h" #include "qemu/error-report.h" +#include "qemu/main-loop.h" #include "qom/object_interfaces.h" #include "hw/virtio/virtio-crypto.h" +#define SYM_ENCRYPT_OPS_STR "sym-encrypt-ops" +#define SYM_DECRYPT_OPS_STR "sym-decrypt-ops" +#define SYM_ENCRYPT_BYTES_STR "sym-encrypt-bytes" +#define SYM_DECRYPT_BYTES_STR "sym-decrypt-bytes" + +#define ASYM_ENCRYPT_OPS_STR "asym-encrypt-ops" +#define ASYM_DECRYPT_OPS_STR "asym-decrypt-ops" +#define ASYM_SIGN_OPS_STR "asym-sign-ops" +#define ASYM_VERIFY_OPS_STR "asym-verify-ops" +#define ASYM_ENCRYPT_BYTES_STR "asym-encrypt-bytes" +#define ASYM_DECRYPT_BYTES_STR "asym-decrypt-bytes" +#define ASYM_SIGN_BYTES_STR "asym-sign-bytes" +#define ASYM_VERIFY_BYTES_STR "asym-verify-bytes" + +typedef struct StatsArgs { + union StatsResultsType { + StatsResultList **stats; + StatsSchemaList **schema; + } result; + strList *names; + Error **errp; +} StatsArgs; static QTAILQ_HEAD(, CryptoDevBackendClient) crypto_clients; +static int qmp_query_cryptodev_foreach(Object *obj, void *data) +{ + CryptoDevBackend *backend; + QCryptodevInfoList **infolist = data; + uint32_t services, i; -CryptoDevBackendClient * -cryptodev_backend_new_client(const char *model, - const char *name) + if (!object_dynamic_cast(obj, TYPE_CRYPTODEV_BACKEND)) { + return 0; + } + + QCryptodevInfo *info = g_new0(QCryptodevInfo, 1); + info->id = g_strdup(object_get_canonical_path_component(obj)); + + backend = CRYPTODEV_BACKEND(obj); + services = backend->conf.crypto_services; + for (i = 0; i < QCRYPTODEV_BACKEND_SERVICE__MAX; i++) { + if (services & (1 << i)) { + QAPI_LIST_PREPEND(info->service, i); + } + } + + for (i = 0; i < backend->conf.peers.queues; i++) { + CryptoDevBackendClient *cc = backend->conf.peers.ccs[i]; + QCryptodevBackendClient *client = g_new0(QCryptodevBackendClient, 1); + + client->queue = cc->queue_index; + client->type = cc->type; + QAPI_LIST_PREPEND(info->client, client); + } + + QAPI_LIST_PREPEND(*infolist, info); + + return 0; +} + +QCryptodevInfoList *qmp_query_cryptodev(Error **errp) +{ + QCryptodevInfoList *list = NULL; + Object *objs = container_get(object_get_root(), "/objects"); + + object_child_foreach(objs, qmp_query_cryptodev_foreach, &list); + + return list; +} + +CryptoDevBackendClient *cryptodev_backend_new_client(void) { CryptoDevBackendClient *cc; cc = g_new0(CryptoDevBackendClient, 1); - cc->model = g_strdup(model); - if (name) { - cc->name = g_strdup(name); - } - QTAILQ_INSERT_TAIL(&crypto_clients, cc, next); return cc; @@ -55,8 +118,6 @@ void cryptodev_backend_free_client( CryptoDevBackendClient *cc) { QTAILQ_REMOVE(&crypto_clients, cc, next); - g_free(cc->name); - g_free(cc->model); g_free(cc->info_str); g_free(cc); } @@ -71,6 +132,9 @@ void cryptodev_backend_cleanup( if (bc->cleanup) { bc->cleanup(backend, errp); } + + g_free(backend->sym_stat); + g_free(backend->asym_stat); } int cryptodev_backend_create_session( @@ -107,38 +171,111 @@ int cryptodev_backend_close_session( static int cryptodev_backend_operation( CryptoDevBackend *backend, - CryptoDevBackendOpInfo *op_info, - uint32_t queue_index, - CryptoDevCompletionFunc cb, - void *opaque) + CryptoDevBackendOpInfo *op_info) { CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(backend); if (bc->do_op) { - return bc->do_op(backend, op_info, queue_index, cb, opaque); + return bc->do_op(backend, op_info); } return -VIRTIO_CRYPTO_NOTSUPP; } -int cryptodev_backend_crypto_operation( - CryptoDevBackend *backend, - void *opaque1, - uint32_t queue_index, - CryptoDevCompletionFunc cb, void *opaque2) +static int cryptodev_backend_account(CryptoDevBackend *backend, + CryptoDevBackendOpInfo *op_info) { - VirtIOCryptoReq *req = opaque1; - CryptoDevBackendOpInfo *op_info = &req->op_info; - enum CryptoDevBackendAlgType algtype = req->flags; + enum QCryptodevBackendAlgType algtype = op_info->algtype; + int len; - if ((algtype != CRYPTODEV_BACKEND_ALG_SYM) - && (algtype != CRYPTODEV_BACKEND_ALG_ASYM)) { + if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) { + CryptoDevBackendAsymOpInfo *asym_op_info = op_info->u.asym_op_info; + len = asym_op_info->src_len; + switch (op_info->op_code) { + case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT: + CryptodevAsymStatIncEncrypt(backend, len); + break; + case VIRTIO_CRYPTO_AKCIPHER_DECRYPT: + CryptodevAsymStatIncDecrypt(backend, len); + break; + case VIRTIO_CRYPTO_AKCIPHER_SIGN: + CryptodevAsymStatIncSign(backend, len); + break; + case VIRTIO_CRYPTO_AKCIPHER_VERIFY: + CryptodevAsymStatIncVerify(backend, len); + break; + default: + return -VIRTIO_CRYPTO_NOTSUPP; + } + } else if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) { + CryptoDevBackendSymOpInfo *sym_op_info = op_info->u.sym_op_info; + len = sym_op_info->src_len; + switch (op_info->op_code) { + case VIRTIO_CRYPTO_CIPHER_ENCRYPT: + CryptodevSymStatIncEncrypt(backend, len); + break; + case VIRTIO_CRYPTO_CIPHER_DECRYPT: + CryptodevSymStatIncDecrypt(backend, len); + break; + default: + return -VIRTIO_CRYPTO_NOTSUPP; + } + } else { error_report("Unsupported cryptodev alg type: %" PRIu32 "", algtype); return -VIRTIO_CRYPTO_NOTSUPP; } - return cryptodev_backend_operation(backend, op_info, queue_index, - cb, opaque2); + return len; +} + +static void cryptodev_backend_throttle_timer_cb(void *opaque) +{ + CryptoDevBackend *backend = (CryptoDevBackend *)opaque; + CryptoDevBackendOpInfo *op_info, *tmpop; + int ret; + + QTAILQ_FOREACH_SAFE(op_info, &backend->opinfos, next, tmpop) { + QTAILQ_REMOVE(&backend->opinfos, op_info, next); + ret = cryptodev_backend_account(backend, op_info); + if (ret < 0) { + op_info->cb(op_info->opaque, ret); + continue; + } + + throttle_account(&backend->ts, true, ret); + cryptodev_backend_operation(backend, op_info); + if (throttle_enabled(&backend->tc) && + throttle_schedule_timer(&backend->ts, &backend->tt, true)) { + break; + } + } +} + +int cryptodev_backend_crypto_operation( + CryptoDevBackend *backend, + CryptoDevBackendOpInfo *op_info) +{ + int ret; + + if (!throttle_enabled(&backend->tc)) { + goto do_account; + } + + if (throttle_schedule_timer(&backend->ts, &backend->tt, true) || + !QTAILQ_EMPTY(&backend->opinfos)) { + QTAILQ_INSERT_TAIL(&backend->opinfos, op_info, next); + return 0; + } + +do_account: + ret = cryptodev_backend_account(backend, op_info); + if (ret < 0) { + return ret; + } + + throttle_account(&backend->ts, true, ret); + + return cryptodev_backend_operation(backend, op_info); } static void @@ -169,15 +306,111 @@ cryptodev_backend_set_queues(Object *obj, Visitor *v, const char *name, backend->conf.peers.queues = value; } +static void cryptodev_backend_set_throttle(CryptoDevBackend *backend, int field, + uint64_t value, Error **errp) +{ + uint64_t orig = backend->tc.buckets[field].avg; + bool enabled = throttle_enabled(&backend->tc); + + if (orig == value) { + return; + } + + backend->tc.buckets[field].avg = value; + if (!throttle_enabled(&backend->tc)) { + throttle_timers_destroy(&backend->tt); + cryptodev_backend_throttle_timer_cb(backend); /* drain opinfos */ + return; + } + + if (!throttle_is_valid(&backend->tc, errp)) { + backend->tc.buckets[field].avg = orig; /* revert change */ + return; + } + + if (!enabled) { + throttle_init(&backend->ts); + throttle_timers_init(&backend->tt, qemu_get_aio_context(), + QEMU_CLOCK_REALTIME, + cryptodev_backend_throttle_timer_cb, /* FIXME */ + cryptodev_backend_throttle_timer_cb, backend); + } + + throttle_config(&backend->ts, QEMU_CLOCK_REALTIME, &backend->tc); +} + +static void cryptodev_backend_get_bps(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj); + uint64_t value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg; + + visit_type_uint64(v, name, &value, errp); +} + +static void cryptodev_backend_set_bps(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj); + uint64_t value; + + if (!visit_type_uint64(v, name, &value, errp)) { + return; + } + + cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp); +} + +static void cryptodev_backend_get_ops(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj); + uint64_t value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg; + + visit_type_uint64(v, name, &value, errp); +} + +static void cryptodev_backend_set_ops(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj); + uint64_t value; + + if (!visit_type_uint64(v, name, &value, errp)) { + return; + } + + cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp); +} + static void cryptodev_backend_complete(UserCreatable *uc, Error **errp) { CryptoDevBackend *backend = CRYPTODEV_BACKEND(uc); CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(uc); + uint32_t services; + uint64_t value; + + QTAILQ_INIT(&backend->opinfos); + value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg; + cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp); + value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg; + cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp); if (bc->init) { bc->init(backend, errp); } + + services = backend->conf.crypto_services; + if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_CIPHER)) { + backend->sym_stat = g_new0(CryptodevBackendSymStat, 1); + } + + if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER)) { + backend->asym_stat = g_new0(CryptodevBackendAsymStat, 1); + } } void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used) @@ -208,8 +441,12 @@ cryptodev_backend_can_be_deleted(UserCreatable *uc) static void cryptodev_backend_instance_init(Object *obj) { + CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj); + /* Initialize devices' queues property to 1 */ object_property_set_int(obj, "queues", 1, NULL); + + throttle_config_init(&backend->tc); } static void cryptodev_backend_finalize(Object *obj) @@ -217,6 +454,137 @@ static void cryptodev_backend_finalize(Object *obj) CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj); cryptodev_backend_cleanup(backend, NULL); + if (throttle_enabled(&backend->tc)) { + throttle_timers_destroy(&backend->tt); + } +} + +static StatsList *cryptodev_backend_stats_add(const char *name, int64_t *val, + StatsList *stats_list) +{ + Stats *stats = g_new0(Stats, 1); + + stats->name = g_strdup(name); + stats->value = g_new0(StatsValue, 1); + stats->value->type = QTYPE_QNUM; + stats->value->u.scalar = *val; + + QAPI_LIST_PREPEND(stats_list, stats); + return stats_list; +} + +static int cryptodev_backend_stats_query(Object *obj, void *data) +{ + StatsArgs *stats_args = data; + StatsResultList **stats_results = stats_args->result.stats; + StatsList *stats_list = NULL; + StatsResult *entry; + CryptoDevBackend *backend; + CryptodevBackendSymStat *sym_stat; + CryptodevBackendAsymStat *asym_stat; + + if (!object_dynamic_cast(obj, TYPE_CRYPTODEV_BACKEND)) { + return 0; + } + + backend = CRYPTODEV_BACKEND(obj); + sym_stat = backend->sym_stat; + if (sym_stat) { + stats_list = cryptodev_backend_stats_add(SYM_ENCRYPT_OPS_STR, + &sym_stat->encrypt_ops, stats_list); + stats_list = cryptodev_backend_stats_add(SYM_DECRYPT_OPS_STR, + &sym_stat->decrypt_ops, stats_list); + stats_list = cryptodev_backend_stats_add(SYM_ENCRYPT_BYTES_STR, + &sym_stat->encrypt_bytes, stats_list); + stats_list = cryptodev_backend_stats_add(SYM_DECRYPT_BYTES_STR, + &sym_stat->decrypt_bytes, stats_list); + } + + asym_stat = backend->asym_stat; + if (asym_stat) { + stats_list = cryptodev_backend_stats_add(ASYM_ENCRYPT_OPS_STR, + &asym_stat->encrypt_ops, stats_list); + stats_list = cryptodev_backend_stats_add(ASYM_DECRYPT_OPS_STR, + &asym_stat->decrypt_ops, stats_list); + stats_list = cryptodev_backend_stats_add(ASYM_SIGN_OPS_STR, + &asym_stat->sign_ops, stats_list); + stats_list = cryptodev_backend_stats_add(ASYM_VERIFY_OPS_STR, + &asym_stat->verify_ops, stats_list); + stats_list = cryptodev_backend_stats_add(ASYM_ENCRYPT_BYTES_STR, + &asym_stat->encrypt_bytes, stats_list); + stats_list = cryptodev_backend_stats_add(ASYM_DECRYPT_BYTES_STR, + &asym_stat->decrypt_bytes, stats_list); + stats_list = cryptodev_backend_stats_add(ASYM_SIGN_BYTES_STR, + &asym_stat->sign_bytes, stats_list); + stats_list = cryptodev_backend_stats_add(ASYM_VERIFY_BYTES_STR, + &asym_stat->verify_bytes, stats_list); + } + + entry = g_new0(StatsResult, 1); + entry->provider = STATS_PROVIDER_CRYPTODEV; + entry->qom_path = g_strdup(object_get_canonical_path(obj)); + entry->stats = stats_list; + QAPI_LIST_PREPEND(*stats_results, entry); + + return 0; +} + +static void cryptodev_backend_stats_cb(StatsResultList **result, + StatsTarget target, + strList *names, strList *targets, + Error **errp) +{ + switch (target) { + case STATS_TARGET_CRYPTODEV: + { + Object *objs = container_get(object_get_root(), "/objects"); + StatsArgs stats_args; + stats_args.result.stats = result; + stats_args.names = names; + stats_args.errp = errp; + + object_child_foreach(objs, cryptodev_backend_stats_query, &stats_args); + break; + } + default: + break; + } +} + +static StatsSchemaValueList *cryptodev_backend_schemas_add(const char *name, + StatsSchemaValueList *list) +{ + StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1); + + schema_entry->value = g_new0(StatsSchemaValue, 1); + schema_entry->value->type = STATS_TYPE_CUMULATIVE; + schema_entry->value->name = g_strdup(name); + schema_entry->next = list; + + return schema_entry; +} + +static void cryptodev_backend_schemas_cb(StatsSchemaList **result, + Error **errp) +{ + StatsSchemaValueList *stats_list = NULL; + const char *sym_stats[] = { SYM_ENCRYPT_OPS_STR, SYM_DECRYPT_OPS_STR, + SYM_ENCRYPT_BYTES_STR, SYM_DECRYPT_BYTES_STR }; + const char *asym_stats[] = { ASYM_ENCRYPT_OPS_STR, ASYM_DECRYPT_OPS_STR, + ASYM_SIGN_OPS_STR, ASYM_VERIFY_OPS_STR, + ASYM_ENCRYPT_BYTES_STR, ASYM_DECRYPT_BYTES_STR, + ASYM_SIGN_BYTES_STR, ASYM_VERIFY_BYTES_STR }; + + for (int i = 0; i < ARRAY_SIZE(sym_stats); i++) { + stats_list = cryptodev_backend_schemas_add(sym_stats[i], stats_list); + } + + for (int i = 0; i < ARRAY_SIZE(asym_stats); i++) { + stats_list = cryptodev_backend_schemas_add(asym_stats[i], stats_list); + } + + add_stats_schema(result, STATS_PROVIDER_CRYPTODEV, STATS_TARGET_CRYPTODEV, + stats_list); } static void @@ -232,6 +600,17 @@ cryptodev_backend_class_init(ObjectClass *oc, void *data) cryptodev_backend_get_queues, cryptodev_backend_set_queues, NULL, NULL); + object_class_property_add(oc, "throttle-bps", "uint64", + cryptodev_backend_get_bps, + cryptodev_backend_set_bps, + NULL, NULL); + object_class_property_add(oc, "throttle-ops", "uint64", + cryptodev_backend_get_ops, + cryptodev_backend_set_ops, + NULL, NULL); + + add_stats_callbacks(STATS_PROVIDER_CRYPTODEV, cryptodev_backend_stats_cb, + cryptodev_backend_schemas_cb); } static const TypeInfo cryptodev_backend_info = { diff --git a/backends/hostmem-file.c b/backends/hostmem-file.c index 25141283c4..38ea65bec5 100644 --- a/backends/hostmem-file.c +++ b/backends/hostmem-file.c @@ -27,6 +27,7 @@ struct HostMemoryBackendFile { char *mem_path; uint64_t align; + uint64_t offset; bool discard_data; bool is_pmem; bool readonly; @@ -58,7 +59,8 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) ram_flags |= fb->is_pmem ? RAM_PMEM : 0; memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name, backend->size, fb->align, ram_flags, - fb->mem_path, fb->readonly, errp); + fb->mem_path, fb->offset, fb->readonly, + errp); g_free(name); #endif } @@ -125,6 +127,36 @@ static void file_memory_backend_set_align(Object *o, Visitor *v, fb->align = val; } +static void file_memory_backend_get_offset(Object *o, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o); + uint64_t val = fb->offset; + + visit_type_size(v, name, &val, errp); +} + +static void file_memory_backend_set_offset(Object *o, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + HostMemoryBackend *backend = MEMORY_BACKEND(o); + HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o); + uint64_t val; + + if (host_memory_backend_mr_inited(backend)) { + error_setg(errp, "cannot change property '%s' of %s", name, + object_get_typename(o)); + return; + } + + if (!visit_type_size(v, name, &val, errp)) { + return; + } + fb->offset = val; +} + #ifdef CONFIG_LIBPMEM static bool file_memory_backend_get_pmem(Object *o, Error **errp) { @@ -197,6 +229,12 @@ file_backend_class_init(ObjectClass *oc, void *data) file_memory_backend_get_align, file_memory_backend_set_align, NULL, NULL); + object_class_property_add(oc, "offset", "int", + file_memory_backend_get_offset, + file_memory_backend_set_offset, + NULL, NULL); + object_class_property_set_description(oc, "offset", + "Offset into the target file (ex: 1G)"); #ifdef CONFIG_LIBPMEM object_class_property_add_bool(oc, "pmem", file_memory_backend_get_pmem, file_memory_backend_set_pmem); diff --git a/backends/meson.build b/backends/meson.build index 954e658b25..b369e0a9d0 100644 --- a/backends/meson.build +++ b/backends/meson.build @@ -1,5 +1,6 @@ softmmu_ss.add([files( 'cryptodev-builtin.c', + 'cryptodev-hmp-cmds.c', 'cryptodev.c', 'hostmem-ram.c', 'hostmem.c', diff --git a/backends/tpm/tpm_backend.c b/backends/tpm/tpm_backend.c index 375587e743..485a20b9e0 100644 --- a/backends/tpm/tpm_backend.c +++ b/backends/tpm/tpm_backend.c @@ -100,8 +100,6 @@ bool tpm_backend_had_startup_error(TPMBackend *s) void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd) { - ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); - if (s->cmd != NULL) { error_report("There is a TPM request pending"); return; @@ -109,7 +107,7 @@ void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd) s->cmd = cmd; object_ref(OBJECT(s)); - thread_pool_submit_aio(pool, tpm_backend_worker_thread, s, + thread_pool_submit_aio(tpm_backend_worker_thread, s, tpm_backend_request_completed, s); } diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c index d18144b92e..402a2d6312 100644 --- a/backends/tpm/tpm_emulator.c +++ b/backends/tpm/tpm_emulator.c @@ -573,13 +573,13 @@ static int tpm_emulator_prepare_data_fd(TPMEmulator *tpm_emu) goto err_exit; } - closesocket(fds[1]); + close(fds[1]); return 0; err_exit: - closesocket(fds[0]); - closesocket(fds[1]); + close(fds[0]); + close(fds[1]); return -1; } diff --git a/backends/vhost-user.c b/backends/vhost-user.c index 0596223ac4..94c6a82d52 100644 --- a/backends/vhost-user.c +++ b/backends/vhost-user.c @@ -20,12 +20,6 @@ #include "io/channel-command.h" #include "hw/virtio/virtio-bus.h" -static bool -ioeventfd_enabled(void) -{ - return kvm_enabled() && kvm_eventfds_enabled(); -} - int vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev, unsigned nvqs, Error **errp) @@ -34,11 +28,6 @@ vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev, assert(!b->vdev && vdev); - if (!ioeventfd_enabled()) { - error_setg(errp, "vhost initialization failed: requires kvm"); - return -1; - } - if (!vhost_user_init(&b->vhost_user, &b->chr, errp)) { return -1; } diff --git a/block.c b/block.c index 0dd604d0f6..dae629075c 100644 --- a/block.c +++ b/block.c @@ -533,7 +533,6 @@ int coroutine_fn bdrv_co_create(BlockDriver *drv, const char *filename, int ret; GLOBAL_STATE_CODE(); ERRP_GUARD(); - assert_bdrv_graph_readable(); if (!drv->bdrv_co_create_opts) { error_setg(errp, "Driver '%s' does not support image creation", @@ -680,7 +679,7 @@ int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv, ret = 0; out: - blk_unref(blk); + blk_co_unref(blk); return ret; } @@ -1610,10 +1609,11 @@ out: * bdrv_refresh_total_sectors() which polls when called from non-coroutine * context. */ -static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, - const char *node_name, QDict *options, - int open_flags, Error **errp) +static int no_coroutine_fn GRAPH_UNLOCKED +bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name, + QDict *options, int open_flags, Error **errp) { + AioContext *ctx; Error *local_err = NULL; int i, ret; GLOBAL_STATE_CODE(); @@ -1661,13 +1661,22 @@ static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, bs->supported_read_flags |= BDRV_REQ_REGISTERED_BUF; bs->supported_write_flags |= BDRV_REQ_REGISTERED_BUF; + /* Get the context after .bdrv_open, it can change the context */ + ctx = bdrv_get_aio_context(bs); + aio_context_acquire(ctx); + ret = bdrv_refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { error_setg_errno(errp, -ret, "Could not refresh total sector count"); + aio_context_release(ctx); return ret; } + bdrv_graph_rdlock_main_loop(); bdrv_refresh_limits(bs, NULL, &local_err); + bdrv_graph_rdunlock_main_loop(); + aio_context_release(ctx); + if (local_err) { error_propagate(errp, local_err); return -EINVAL; @@ -3419,7 +3428,9 @@ static int bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs, } out: + bdrv_graph_rdlock_main_loop(); bdrv_refresh_limits(parent_bs, tran, NULL); + bdrv_graph_rdunlock_main_loop(); return 0; } @@ -3474,6 +3485,8 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, * itself, all options starting with "${bdref_key}." are considered part of the * BlockdevRef. * + * The caller must hold the main AioContext lock. + * * TODO Can this be unified with bdrv_open_image()? */ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, @@ -3640,6 +3653,9 @@ done: * BlockdevRef. * * The BlockdevRef will be removed from the options QDict. + * + * @parent can move to a different AioContext in this function. Callers must + * make sure that their AioContext locking is still correct after this. */ BdrvChild *bdrv_open_child(const char *filename, QDict *options, const char *bdref_key, @@ -3664,6 +3680,9 @@ BdrvChild *bdrv_open_child(const char *filename, /* * Wrapper on bdrv_open_child() for most popular case: open primary child of bs. + * + * @parent can move to a different AioContext in this function. Callers must + * make sure that their AioContext locking is still correct after this. */ int bdrv_open_file_child(const char *filename, QDict *options, const char *bdref_key, @@ -3806,9 +3825,7 @@ out: * should be opened. If specified, neither options nor a filename may be given, * nor can an existing BDS be reused (that is, *pbs has to be NULL). * - * The caller must always hold @filename AioContext lock, because this - * function eventually calls bdrv_refresh_total_sectors() which polls - * when called from non-coroutine context. + * The caller must always hold the main AioContext lock. */ static BlockDriverState * no_coroutine_fn bdrv_open_inherit(const char *filename, const char *reference, QDict *options, @@ -4096,11 +4113,7 @@ close_and_fail: return NULL; } -/* - * The caller must always hold @filename AioContext lock, because this - * function eventually calls bdrv_refresh_total_sectors() which polls - * when called from non-coroutine context. - */ +/* The caller must always hold the main AioContext lock. */ BlockDriverState *bdrv_open(const char *filename, const char *reference, QDict *options, int flags, Error **errp) { @@ -4917,7 +4930,10 @@ static void bdrv_reopen_commit(BDRVReopenState *reopen_state) qdict_del(bs->explicit_options, "backing"); qdict_del(bs->options, "backing"); + bdrv_graph_rdlock_main_loop(); bdrv_refresh_limits(bs, NULL, NULL); + bdrv_graph_rdunlock_main_loop(); + bdrv_refresh_total_sectors(bs, bs->total_sectors); } /* @@ -5315,7 +5331,9 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, out: tran_finalize(tran, ret); + bdrv_graph_rdlock_main_loop(); bdrv_refresh_limits(bs_top, NULL, NULL); + bdrv_graph_rdunlock_main_loop(); if (new_context && old_context != new_context) { aio_context_release(new_context); @@ -5381,12 +5399,17 @@ static void bdrv_delete(BlockDriverState *bs) * empty set of options. The reference to the QDict belongs to the block layer * after the call (even on failure), so if the caller intends to reuse the * dictionary, it needs to use qobject_ref() before calling bdrv_open. + * + * The caller holds the AioContext lock for @bs. It must make sure that @bs + * stays in the same AioContext, i.e. @options must not refer to nodes in a + * different AioContext. */ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, int flags, Error **errp) { ERRP_GUARD(); int ret; + AioContext *ctx = bdrv_get_aio_context(bs); BlockDriverState *new_node_bs = NULL; const char *drvname, *node_name; BlockDriver *drv; @@ -5407,8 +5430,14 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, GLOBAL_STATE_CODE(); + aio_context_release(ctx); + aio_context_acquire(qemu_get_aio_context()); new_node_bs = bdrv_new_open_driver_opts(drv, node_name, options, flags, errp); + aio_context_release(qemu_get_aio_context()); + aio_context_acquire(ctx); + assert(bdrv_get_aio_context(bs) == ctx); + options = NULL; /* bdrv_new_open_driver() eats options */ if (!new_node_bs) { error_prepend(errp, "Could not create node: "); @@ -5749,7 +5778,8 @@ exit: * sums the size of all data-bearing children. (This excludes backing * children.) */ -static int64_t bdrv_sum_allocated_file_size(BlockDriverState *bs) +static int64_t coroutine_fn GRAPH_RDLOCK +bdrv_sum_allocated_file_size(BlockDriverState *bs) { BdrvChild *child; int64_t child_size, sum = 0; @@ -5777,6 +5807,7 @@ int64_t coroutine_fn bdrv_co_get_allocated_file_size(BlockDriverState *bs) { BlockDriver *drv = bs->drv; IO_CODE(); + assert_bdrv_graph_readable(); if (!drv) { return -ENOMEDIUM; @@ -5849,7 +5880,7 @@ int64_t coroutine_fn bdrv_co_nb_sectors(BlockDriverState *bs) if (!drv) return -ENOMEDIUM; - if (drv->has_variable_length) { + if (bs->bl.has_variable_length) { int ret = bdrv_co_refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { return ret; @@ -5858,6 +5889,28 @@ int64_t coroutine_fn bdrv_co_nb_sectors(BlockDriverState *bs) return bs->total_sectors; } +/* + * This wrapper is written by hand because this function is in the hot I/O path, + * via blk_get_geometry. + */ +int64_t coroutine_mixed_fn bdrv_nb_sectors(BlockDriverState *bs) +{ + BlockDriver *drv = bs->drv; + IO_CODE(); + + if (!drv) + return -ENOMEDIUM; + + if (bs->bl.has_variable_length) { + int ret = bdrv_refresh_total_sectors(bs, bs->total_sectors); + if (ret < 0) { + return ret; + } + } + + return bs->total_sectors; +} + /** * Return length in bytes on success, -errno on error. * The length is always a multiple of BDRV_SECTOR_SIZE. @@ -5878,15 +5931,6 @@ int64_t coroutine_fn bdrv_co_getlength(BlockDriverState *bs) return ret * BDRV_SECTOR_SIZE; } -/* return 0 as number of sectors if no device present or error */ -void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr) -{ - int64_t nb_sectors = bdrv_nb_sectors(bs); - IO_CODE(); - - *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors; -} - bool bdrv_is_sg(BlockDriverState *bs) { IO_CODE(); @@ -6333,6 +6377,8 @@ int coroutine_fn bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) int ret; BlockDriver *drv = bs->drv; IO_CODE(); + assert_bdrv_graph_readable(); + /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ if (!drv) { return -ENOMEDIUM; @@ -6381,6 +6427,8 @@ BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs) void coroutine_fn bdrv_co_debug_event(BlockDriverState *bs, BlkdebugEvent event) { IO_CODE(); + assert_bdrv_graph_readable(); + if (!bs || !bs->drv || !bs->drv->bdrv_co_debug_event) { return; } @@ -7015,6 +7063,8 @@ void bdrv_img_create(const char *filename, const char *fmt, return; } + aio_context_acquire(qemu_get_aio_context()); + /* Create parameter list */ create_opts = qemu_opts_append(create_opts, drv->create_opts); create_opts = qemu_opts_append(create_opts, proto_drv->create_opts); @@ -7164,6 +7214,7 @@ out: qemu_opts_del(opts); qemu_opts_free(create_opts); error_propagate(errp, local_err); + aio_context_release(qemu_get_aio_context()); } AioContext *bdrv_get_aio_context(BlockDriverState *bs) @@ -7254,9 +7305,6 @@ static void bdrv_detach_aio_context(BlockDriverState *bs) bs->drv->bdrv_detach_aio_context(bs); } - if (bs->quiesce_counter) { - aio_enable_external(bs->aio_context); - } bs->aio_context = NULL; } @@ -7266,10 +7314,6 @@ static void bdrv_attach_aio_context(BlockDriverState *bs, BdrvAioNotifier *ban, *ban_tmp; GLOBAL_STATE_CODE(); - if (bs->quiesce_counter) { - aio_disable_external(new_context); - } - bs->aio_context = new_context; if (bs->drv && bs->drv->bdrv_attach_aio_context) { @@ -7953,6 +7997,25 @@ void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, return; } + /* + * Non-zoned block drivers do not follow zoned storage constraints + * (i.e. sequential writes to zones). Refuse mixing zoned and non-zoned + * drivers in a graph. + */ + if (!parent_bs->drv->supports_zoned_children && + child_bs->bl.zoned == BLK_Z_HM) { + /* + * The host-aware model allows zoned storage constraints and random + * write. Allow mixing host-aware and non-zoned drivers. Using + * host-aware device as a regular device. + */ + error_setg(errp, "Cannot add a %s child to a %s parent", + child_bs->bl.zoned == BLK_Z_HM ? "zoned" : "non-zoned", + parent_bs->drv->supports_zoned_children ? + "support zoned children" : "not support zoned children"); + return; + } + if (!QLIST_EMPTY(&child_bs->parents)) { error_setg(errp, "The node %s already has a parent", child_bs->node_name); diff --git a/block/amend.c b/block/amend.c index bc4bb7b416..53a410247c 100644 --- a/block/amend.c +++ b/block/amend.c @@ -46,6 +46,7 @@ static int coroutine_fn blockdev_amend_run(Job *job, Error **errp) { BlockdevAmendJob *s = container_of(job, BlockdevAmendJob, common); int ret; + GRAPH_RDLOCK_GUARD(); job_progress_set_remaining(&s->common, 1); ret = s->bs->drv->bdrv_co_amend(s->bs, s->opts, s->force, errp); @@ -54,7 +55,8 @@ static int coroutine_fn blockdev_amend_run(Job *job, Error **errp) return ret; } -static int blockdev_amend_pre_run(BlockdevAmendJob *s, Error **errp) +static int GRAPH_RDLOCK +blockdev_amend_pre_run(BlockdevAmendJob *s, Error **errp) { if (s->bs->drv->bdrv_amend_pre_run) { return s->bs->drv->bdrv_amend_pre_run(s->bs, errp); @@ -67,9 +69,11 @@ static void blockdev_amend_free(Job *job) { BlockdevAmendJob *s = container_of(job, BlockdevAmendJob, common); + bdrv_graph_rdlock_main_loop(); if (s->bs->drv->bdrv_amend_clean) { s->bs->drv->bdrv_amend_clean(s->bs); } + bdrv_graph_rdunlock_main_loop(); bdrv_unref(s->bs); } @@ -93,6 +97,8 @@ void qmp_x_blockdev_amend(const char *job_id, BlockDriver *drv = bdrv_find_format(fmt); BlockDriverState *bs; + GRAPH_RDLOCK_GUARD_MAINLOOP(); + bs = bdrv_lookup_bs(NULL, node_name, errp); if (!bs) { return; diff --git a/block/blkdebug.c b/block/blkdebug.c index 978c8cff9e..addad914b3 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -583,8 +583,8 @@ out: return ret; } -static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - BlkdebugIOType iotype) +static int coroutine_fn rule_check(BlockDriverState *bs, uint64_t offset, + uint64_t bytes, BlkdebugIOType iotype) { BDRVBlkdebugState *s = bs->opaque; BlkdebugRule *rule = NULL; diff --git a/block/blkio.c b/block/blkio.c index 0cdc99a729..72117fa005 100644 --- a/block/blkio.c +++ b/block/blkio.c @@ -306,23 +306,18 @@ static void blkio_attach_aio_context(BlockDriverState *bs, { BDRVBlkioState *s = bs->opaque; - aio_set_fd_handler(new_context, - s->completion_fd, - false, - blkio_completion_fd_read, - NULL, + aio_set_fd_handler(new_context, s->completion_fd, + blkio_completion_fd_read, NULL, blkio_completion_fd_poll, - blkio_completion_fd_poll_ready, - bs); + blkio_completion_fd_poll_ready, bs); } static void blkio_detach_aio_context(BlockDriverState *bs) { BDRVBlkioState *s = bs->opaque; - aio_set_fd_handler(bdrv_get_aio_context(bs), - s->completion_fd, - false, NULL, NULL, NULL, NULL, NULL); + aio_set_fd_handler(bdrv_get_aio_context(bs), s->completion_fd, NULL, NULL, + NULL, NULL, NULL); } /* Call with s->blkio_lock held to submit I/O after enqueuing a new request */ diff --git a/block/blkverify.c b/block/blkverify.c index 1c16f86b2e..7326461f30 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -265,8 +265,9 @@ static int coroutine_fn GRAPH_RDLOCK blkverify_co_flush(BlockDriverState *bs) return bdrv_co_flush(s->test_file->bs); } -static bool blkverify_recurse_can_replace(BlockDriverState *bs, - BlockDriverState *to_replace) +static bool GRAPH_RDLOCK +blkverify_recurse_can_replace(BlockDriverState *bs, + BlockDriverState *to_replace) { BDRVBlkverifyState *s = bs->opaque; diff --git a/block/block-backend.c b/block/block-backend.c index 278b04ce69..241f643507 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -80,9 +80,10 @@ struct BlockBackend { NotifierList remove_bs_notifiers, insert_bs_notifiers; QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers; - int quiesce_counter; + int quiesce_counter; /* atomic: written under BQL, read by other threads */ + QemuMutex queued_requests_lock; /* protects queued_requests */ CoQueue queued_requests; - bool disable_request_queuing; + bool disable_request_queuing; /* atomic */ VMChangeStateEntry *vmsh; bool force_allow_inactivate; @@ -368,6 +369,7 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm) block_acct_init(&blk->stats); + qemu_mutex_init(&blk->queued_requests_lock); qemu_co_queue_init(&blk->queued_requests); notifier_list_init(&blk->remove_bs_notifiers); notifier_list_init(&blk->insert_bs_notifiers); @@ -387,6 +389,8 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm) * Both sets of permissions can be changed later using blk_set_perm(). * * Return the new BlockBackend on success, null on failure. + * + * Callers must hold the AioContext lock of @bs. */ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, Error **errp) @@ -404,11 +408,15 @@ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm, /* * Creates a new BlockBackend, opens a new BlockDriverState, and connects both. - * The new BlockBackend is in the main AioContext. + * By default, the new BlockBackend is in the main AioContext, but if the + * parameters connect it with any existing node in a different AioContext, it + * may end up there instead. * * Just as with bdrv_open(), after having called this function the reference to * @options belongs to the block layer (even on failure). * + * Called without holding an AioContext lock. + * * TODO: Remove @filename and @flags; it should be possible to specify a whole * BDS tree just by specifying the @options QDict (or @reference, * alternatively). At the time of adding this function, this is not possible, @@ -420,6 +428,7 @@ BlockBackend *blk_new_open(const char *filename, const char *reference, { BlockBackend *blk; BlockDriverState *bs; + AioContext *ctx; uint64_t perm = 0; uint64_t shared = BLK_PERM_ALL; @@ -449,16 +458,24 @@ BlockBackend *blk_new_open(const char *filename, const char *reference, shared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED; } - blk = blk_new(qemu_get_aio_context(), perm, shared); + aio_context_acquire(qemu_get_aio_context()); bs = bdrv_open(filename, reference, options, flags, errp); + aio_context_release(qemu_get_aio_context()); if (!bs) { - blk_unref(blk); return NULL; } - blk->root = bdrv_root_attach_child(bs, "root", &child_root, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - perm, shared, blk, errp); + /* bdrv_open() could have moved bs to a different AioContext */ + ctx = bdrv_get_aio_context(bs); + blk = blk_new(bdrv_get_aio_context(bs), perm, shared); + blk->perm = perm; + blk->shared_perm = shared; + + aio_context_acquire(ctx); + blk_insert_bs(blk, bs, errp); + bdrv_unref(bs); + aio_context_release(ctx); + if (!blk->root) { blk_unref(blk); return NULL; @@ -485,6 +502,8 @@ static void blk_delete(BlockBackend *blk) assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers)); assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers)); assert(QLIST_EMPTY(&blk->aio_notifiers)); + assert(qemu_co_queue_empty(&blk->queued_requests)); + qemu_mutex_destroy(&blk->queued_requests_lock); QTAILQ_REMOVE(&block_backends, blk, link); drive_info_del(blk->legacy_dinfo); block_acct_cleanup(&blk->stats); @@ -897,6 +916,8 @@ void blk_remove_bs(BlockBackend *blk) /* * Associates a new BlockDriverState with @blk. + * + * Callers must hold the AioContext lock of @bs. */ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) { @@ -1057,7 +1078,7 @@ void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, blk->dev_opaque = opaque; /* Are we currently quiesced? Should we enforce this right now? */ - if (blk->quiesce_counter && ops && ops->drained_begin) { + if (qatomic_read(&blk->quiesce_counter) && ops && ops->drained_begin) { ops->drained_begin(opaque); } } @@ -1232,7 +1253,7 @@ void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow) void blk_set_disable_request_queuing(BlockBackend *blk, bool disable) { IO_CODE(); - blk->disable_request_queuing = disable; + qatomic_set(&blk->disable_request_queuing, disable); } static int coroutine_fn GRAPH_RDLOCK @@ -1266,15 +1287,30 @@ blk_check_byte_request(BlockBackend *blk, int64_t offset, int64_t bytes) return 0; } +/* Are we currently in a drained section? */ +bool blk_in_drain(BlockBackend *blk) +{ + GLOBAL_STATE_CODE(); /* change to IO_OR_GS_CODE(), if necessary */ + return qatomic_read(&blk->quiesce_counter); +} + /* To be called between exactly one pair of blk_inc/dec_in_flight() */ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk) { assert(blk->in_flight > 0); - if (blk->quiesce_counter && !blk->disable_request_queuing) { + if (qatomic_read(&blk->quiesce_counter) && + !qatomic_read(&blk->disable_request_queuing)) { + /* + * Take lock before decrementing in flight counter so main loop thread + * waits for us to enqueue ourselves before it can leave the drained + * section. + */ + qemu_mutex_lock(&blk->queued_requests_lock); blk_dec_in_flight(blk); - qemu_co_queue_wait(&blk->queued_requests, NULL); + qemu_co_queue_wait(&blk->queued_requests, &blk->queued_requests_lock); blk_inc_in_flight(blk); + qemu_mutex_unlock(&blk->queued_requests_lock); } } @@ -1615,26 +1651,53 @@ int64_t coroutine_fn blk_co_getlength(BlockBackend *blk) return bdrv_co_getlength(blk_bs(blk)); } -void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr) -{ - IO_CODE(); - if (!blk_bs(blk)) { - *nb_sectors_ptr = 0; - } else { - bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr); - } -} - int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk) { + BlockDriverState *bs = blk_bs(blk); + IO_CODE(); GRAPH_RDLOCK_GUARD(); - if (!blk_co_is_available(blk)) { + if (!bs) { return -ENOMEDIUM; + } else { + return bdrv_co_nb_sectors(bs); } +} - return bdrv_co_nb_sectors(blk_bs(blk)); +/* + * This wrapper is written by hand because this function is in the hot I/O path, + * via blk_get_geometry. + */ +int64_t coroutine_mixed_fn blk_nb_sectors(BlockBackend *blk) +{ + BlockDriverState *bs = blk_bs(blk); + + IO_CODE(); + + if (!bs) { + return -ENOMEDIUM; + } else { + return bdrv_nb_sectors(bs); + } +} + +/* return 0 as number of sectors if no device present or error */ +void coroutine_fn blk_co_get_geometry(BlockBackend *blk, + uint64_t *nb_sectors_ptr) +{ + int64_t ret = blk_co_nb_sectors(blk); + *nb_sectors_ptr = ret < 0 ? 0 : ret; +} + +/* + * This wrapper is written by hand because this function is in the hot I/O path. + */ +void coroutine_mixed_fn blk_get_geometry(BlockBackend *blk, + uint64_t *nb_sectors_ptr) +{ + int64_t ret = blk_nb_sectors(blk); + *nb_sectors_ptr = ret < 0 ? 0 : ret; } BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, @@ -1806,6 +1869,204 @@ int coroutine_fn blk_co_flush(BlockBackend *blk) return ret; } +static void coroutine_fn blk_aio_zone_report_entry(void *opaque) +{ + BlkAioEmAIOCB *acb = opaque; + BlkRwCo *rwco = &acb->rwco; + + rwco->ret = blk_co_zone_report(rwco->blk, rwco->offset, + (unsigned int*)(uintptr_t)acb->bytes, + rwco->iobuf); + blk_aio_complete(acb); +} + +BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset, + unsigned int *nr_zones, + BlockZoneDescriptor *zones, + BlockCompletionFunc *cb, void *opaque) +{ + BlkAioEmAIOCB *acb; + Coroutine *co; + IO_CODE(); + + blk_inc_in_flight(blk); + acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); + acb->rwco = (BlkRwCo) { + .blk = blk, + .offset = offset, + .iobuf = zones, + .ret = NOT_DONE, + }; + acb->bytes = (int64_t)(uintptr_t)nr_zones, + acb->has_returned = false; + + co = qemu_coroutine_create(blk_aio_zone_report_entry, acb); + aio_co_enter(blk_get_aio_context(blk), co); + + acb->has_returned = true; + if (acb->rwco.ret != NOT_DONE) { + replay_bh_schedule_oneshot_event(blk_get_aio_context(blk), + blk_aio_complete_bh, acb); + } + + return &acb->common; +} + +static void coroutine_fn blk_aio_zone_mgmt_entry(void *opaque) +{ + BlkAioEmAIOCB *acb = opaque; + BlkRwCo *rwco = &acb->rwco; + + rwco->ret = blk_co_zone_mgmt(rwco->blk, + (BlockZoneOp)(uintptr_t)rwco->iobuf, + rwco->offset, acb->bytes); + blk_aio_complete(acb); +} + +BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op, + int64_t offset, int64_t len, + BlockCompletionFunc *cb, void *opaque) { + BlkAioEmAIOCB *acb; + Coroutine *co; + IO_CODE(); + + blk_inc_in_flight(blk); + acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); + acb->rwco = (BlkRwCo) { + .blk = blk, + .offset = offset, + .iobuf = (void *)(uintptr_t)op, + .ret = NOT_DONE, + }; + acb->bytes = len; + acb->has_returned = false; + + co = qemu_coroutine_create(blk_aio_zone_mgmt_entry, acb); + aio_co_enter(blk_get_aio_context(blk), co); + + acb->has_returned = true; + if (acb->rwco.ret != NOT_DONE) { + replay_bh_schedule_oneshot_event(blk_get_aio_context(blk), + blk_aio_complete_bh, acb); + } + + return &acb->common; +} + +static void coroutine_fn blk_aio_zone_append_entry(void *opaque) +{ + BlkAioEmAIOCB *acb = opaque; + BlkRwCo *rwco = &acb->rwco; + + rwco->ret = blk_co_zone_append(rwco->blk, (int64_t *)(uintptr_t)acb->bytes, + rwco->iobuf, rwco->flags); + blk_aio_complete(acb); +} + +BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset, + QEMUIOVector *qiov, BdrvRequestFlags flags, + BlockCompletionFunc *cb, void *opaque) { + BlkAioEmAIOCB *acb; + Coroutine *co; + IO_CODE(); + + blk_inc_in_flight(blk); + acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); + acb->rwco = (BlkRwCo) { + .blk = blk, + .ret = NOT_DONE, + .flags = flags, + .iobuf = qiov, + }; + acb->bytes = (int64_t)(uintptr_t)offset; + acb->has_returned = false; + + co = qemu_coroutine_create(blk_aio_zone_append_entry, acb); + aio_co_enter(blk_get_aio_context(blk), co); + acb->has_returned = true; + if (acb->rwco.ret != NOT_DONE) { + replay_bh_schedule_oneshot_event(blk_get_aio_context(blk), + blk_aio_complete_bh, acb); + } + + return &acb->common; +} + +/* + * Send a zone_report command. + * offset is a byte offset from the start of the device. No alignment + * required for offset. + * nr_zones represents IN maximum and OUT actual. + */ +int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset, + unsigned int *nr_zones, + BlockZoneDescriptor *zones) +{ + int ret; + IO_CODE(); + + blk_inc_in_flight(blk); /* increase before waiting */ + blk_wait_while_drained(blk); + GRAPH_RDLOCK_GUARD(); + if (!blk_is_available(blk)) { + blk_dec_in_flight(blk); + return -ENOMEDIUM; + } + ret = bdrv_co_zone_report(blk_bs(blk), offset, nr_zones, zones); + blk_dec_in_flight(blk); + return ret; +} + +/* + * Send a zone_management command. + * op is the zone operation; + * offset is the byte offset from the start of the zoned device; + * len is the maximum number of bytes the command should operate on. It + * should be aligned with the device zone size. + */ +int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op, + int64_t offset, int64_t len) +{ + int ret; + IO_CODE(); + + blk_inc_in_flight(blk); + blk_wait_while_drained(blk); + GRAPH_RDLOCK_GUARD(); + + ret = blk_check_byte_request(blk, offset, len); + if (ret < 0) { + blk_dec_in_flight(blk); + return ret; + } + + ret = bdrv_co_zone_mgmt(blk_bs(blk), op, offset, len); + blk_dec_in_flight(blk); + return ret; +} + +/* + * Send a zone_append command. + */ +int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset, + QEMUIOVector *qiov, BdrvRequestFlags flags) +{ + int ret; + IO_CODE(); + + blk_inc_in_flight(blk); + blk_wait_while_drained(blk); + GRAPH_RDLOCK_GUARD(); + if (!blk_is_available(blk)) { + blk_dec_in_flight(blk); + return -ENOMEDIUM; + } + + ret = bdrv_co_zone_append(blk_bs(blk), offset, qiov, flags); + blk_dec_in_flight(blk); + return ret; +} + void blk_drain(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); @@ -1818,7 +2079,7 @@ void blk_drain(BlockBackend *blk) /* We may have -ENOMEDIUM completions in flight */ AIO_WAIT_WHILE(blk_get_aio_context(blk), - qatomic_mb_read(&blk->in_flight) > 0); + qatomic_read(&blk->in_flight) > 0); if (bs) { bdrv_drained_end(bs); @@ -1835,14 +2096,8 @@ void blk_drain_all(void) bdrv_drain_all_begin(); while ((blk = blk_all_next(blk)) != NULL) { - AioContext *ctx = blk_get_aio_context(blk); - - aio_context_acquire(ctx); - /* We may have -ENOMEDIUM completions in flight */ - AIO_WAIT_WHILE(ctx, qatomic_mb_read(&blk->in_flight) > 0); - - aio_context_release(ctx); + AIO_WAIT_WHILE_UNLOCKED(NULL, qatomic_read(&blk->in_flight) > 0); } bdrv_drain_all_end(); @@ -1991,7 +2246,15 @@ void blk_activate(BlockBackend *blk, Error **errp) return; } - bdrv_activate(bs, errp); + /* + * Migration code can call this function in coroutine context, so leave + * coroutine context if necessary. + */ + if (qemu_in_coroutine()) { + bdrv_co_activate(bs, errp); + } else { + bdrv_activate(bs, errp); + } } bool coroutine_fn blk_co_is_inserted(BlockBackend *blk) @@ -2155,9 +2418,14 @@ void blk_op_unblock_all(BlockBackend *blk, Error *reason) AioContext *blk_get_aio_context(BlockBackend *blk) { - BlockDriverState *bs = blk_bs(blk); + BlockDriverState *bs; IO_CODE(); + if (!blk) { + return qemu_get_aio_context(); + } + + bs = blk_bs(blk); if (bs) { AioContext *ctx = bdrv_get_aio_context(blk_bs(blk)); assert(ctx == blk->ctx); @@ -2172,52 +2440,31 @@ static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb) return blk_get_aio_context(blk_acb->blk); } -static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context, - bool update_root_node, Error **errp) -{ - BlockDriverState *bs = blk_bs(blk); - ThrottleGroupMember *tgm = &blk->public.throttle_group_member; - int ret; - - if (bs) { - bdrv_ref(bs); - - if (update_root_node) { - /* - * update_root_node MUST be false for blk_root_set_aio_ctx_commit(), - * as we are already in the commit function of a transaction. - */ - ret = bdrv_try_change_aio_context(bs, new_context, blk->root, errp); - if (ret < 0) { - bdrv_unref(bs); - return ret; - } - } - /* - * Make blk->ctx consistent with the root node before we invoke any - * other operations like drain that might inquire blk->ctx - */ - blk->ctx = new_context; - if (tgm->throttle_state) { - bdrv_drained_begin(bs); - throttle_group_detach_aio_context(tgm); - throttle_group_attach_aio_context(tgm, new_context); - bdrv_drained_end(bs); - } - - bdrv_unref(bs); - } else { - blk->ctx = new_context; - } - - return 0; -} - int blk_set_aio_context(BlockBackend *blk, AioContext *new_context, Error **errp) { + bool old_allow_change; + BlockDriverState *bs = blk_bs(blk); + int ret; + GLOBAL_STATE_CODE(); - return blk_do_set_aio_context(blk, new_context, true, errp); + + if (!bs) { + blk->ctx = new_context; + return 0; + } + + bdrv_ref(bs); + + old_allow_change = blk->allow_aio_context_change; + blk->allow_aio_context_change = true; + + ret = bdrv_try_change_aio_context(bs, new_context, NULL, errp); + + blk->allow_aio_context_change = old_allow_change; + + bdrv_unref(bs); + return ret; } typedef struct BdrvStateBlkRootContext { @@ -2229,8 +2476,14 @@ static void blk_root_set_aio_ctx_commit(void *opaque) { BdrvStateBlkRootContext *s = opaque; BlockBackend *blk = s->blk; + AioContext *new_context = s->new_ctx; + ThrottleGroupMember *tgm = &blk->public.throttle_group_member; - blk_do_set_aio_context(blk, s->new_ctx, false, &error_abort); + blk->ctx = new_context; + if (tgm->throttle_state) { + throttle_group_detach_aio_context(tgm); + throttle_group_attach_aio_context(tgm, new_context); + } } static TransactionActionDrv set_blk_root_context = { @@ -2568,7 +2821,7 @@ static void blk_root_drained_begin(BdrvChild *child) BlockBackend *blk = child->opaque; ThrottleGroupMember *tgm = &blk->public.throttle_group_member; - if (++blk->quiesce_counter == 1) { + if (qatomic_fetch_inc(&blk->quiesce_counter) == 0) { if (blk->dev_ops && blk->dev_ops->drained_begin) { blk->dev_ops->drained_begin(blk->dev_opaque); } @@ -2586,7 +2839,7 @@ static bool blk_root_drained_poll(BdrvChild *child) { BlockBackend *blk = child->opaque; bool busy = false; - assert(blk->quiesce_counter); + assert(qatomic_read(&blk->quiesce_counter)); if (blk->dev_ops && blk->dev_ops->drained_poll) { busy = blk->dev_ops->drained_poll(blk->dev_opaque); @@ -2597,18 +2850,21 @@ static bool blk_root_drained_poll(BdrvChild *child) static void blk_root_drained_end(BdrvChild *child) { BlockBackend *blk = child->opaque; - assert(blk->quiesce_counter); + assert(qatomic_read(&blk->quiesce_counter)); assert(blk->public.throttle_group_member.io_limits_disabled); qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled); - if (--blk->quiesce_counter == 0) { + if (qatomic_fetch_dec(&blk->quiesce_counter) == 1) { if (blk->dev_ops && blk->dev_ops->drained_end) { blk->dev_ops->drained_end(blk->dev_opaque); } - while (qemu_co_enter_next(&blk->queued_requests, NULL)) { + qemu_mutex_lock(&blk->queued_requests_lock); + while (qemu_co_enter_next(&blk->queued_requests, + &blk->queued_requests_lock)) { /* Resume all queued requests */ } + qemu_mutex_unlock(&blk->queued_requests_lock); } } diff --git a/block/commit.c b/block/commit.c index 2b20fd0fd4..aa45beb0f0 100644 --- a/block/commit.c +++ b/block/commit.c @@ -116,7 +116,6 @@ static int coroutine_fn commit_run(Job *job, Error **errp) { CommitBlockJob *s = container_of(job, CommitBlockJob, common.job); int64_t offset; - uint64_t delay_ns = 0; int ret = 0; int64_t n = 0; /* bytes */ QEMU_AUTO_VFREE void *buf = NULL; @@ -149,7 +148,7 @@ static int coroutine_fn commit_run(Job *job, Error **errp) /* Note that even when no rate limit is applied we need to yield * with no pending I/O here so that bdrv_drain_all() returns. */ - job_sleep_ns(&s->common.job, delay_ns); + block_job_ratelimit_sleep(&s->common); if (job_is_cancelled(&s->common.job)) { break; } @@ -184,9 +183,7 @@ static int coroutine_fn commit_run(Job *job, Error **errp) job_progress_update(&s->common.job, n); if (copy) { - delay_ns = block_job_ratelimit_get_delay(&s->common, n); - } else { - delay_ns = 0; + block_job_ratelimit_processed_bytes(&s->common, n); } } diff --git a/block/copy-before-write.c b/block/copy-before-write.c index 646d8227a4..b866e42271 100644 --- a/block/copy-before-write.c +++ b/block/copy-before-write.c @@ -412,6 +412,7 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, int64_t cluster_size; g_autoptr(BlockdevOptions) full_opts = NULL; BlockdevOptionsCbw *opts; + AioContext *ctx; int ret; full_opts = cbw_parse_options(options, errp); @@ -432,11 +433,15 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, return -EINVAL; } + ctx = bdrv_get_aio_context(bs); + aio_context_acquire(ctx); + if (opts->bitmap) { bitmap = block_dirty_bitmap_lookup(opts->bitmap->node, opts->bitmap->name, NULL, errp); if (!bitmap) { - return -EINVAL; + ret = -EINVAL; + goto out; } } s->on_cbw_error = opts->has_on_cbw_error ? opts->on_cbw_error : @@ -454,21 +459,24 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, s->bcs = block_copy_state_new(bs->file, s->target, bitmap, errp); if (!s->bcs) { error_prepend(errp, "Cannot create block-copy-state: "); - return -EINVAL; + ret = -EINVAL; + goto out; } cluster_size = block_copy_cluster_size(s->bcs); s->done_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp); if (!s->done_bitmap) { - return -EINVAL; + ret = -EINVAL; + goto out; } bdrv_disable_dirty_bitmap(s->done_bitmap); /* s->access_bitmap starts equal to bcs bitmap */ s->access_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp); if (!s->access_bitmap) { - return -EINVAL; + ret = -EINVAL; + goto out; } bdrv_disable_dirty_bitmap(s->access_bitmap); bdrv_dirty_bitmap_merge_internal(s->access_bitmap, @@ -478,7 +486,10 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, qemu_co_mutex_init(&s->lock); QLIST_INIT(&s->frozen_read_reqs); - return 0; + ret = 0; +out: + aio_context_release(ctx); + return ret; } static void cbw_close(BlockDriverState *bs) diff --git a/block/copy-on-read.c b/block/copy-on-read.c index cc0f848b0f..b4d6b7efc3 100644 --- a/block/copy-on-read.c +++ b/block/copy-on-read.c @@ -259,7 +259,6 @@ static BlockDriver bdrv_copy_on_read = { .bdrv_co_eject = cor_co_eject, .bdrv_co_lock_medium = cor_co_lock_medium, - .has_variable_length = true, .is_filter = true, }; diff --git a/block/coroutines.h b/block/coroutines.h index dd9f3d449b..f3226682d6 100644 --- a/block/coroutines.h +++ b/block/coroutines.h @@ -61,7 +61,7 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); int coroutine_fn GRAPH_RDLOCK bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); -int coroutine_fn +int coroutine_fn GRAPH_RDLOCK nbd_co_do_establish_connection(BlockDriverState *bs, bool blocking, Error **errp); @@ -85,7 +85,8 @@ bdrv_common_block_status_above(BlockDriverState *bs, int64_t *map, BlockDriverState **file, int *depth); -int co_wrapper_mixed + +int co_wrapper_mixed_bdrv_rdlock nbd_do_establish_connection(BlockDriverState *bs, bool blocking, Error **errp); #endif /* BLOCK_COROUTINES_H */ diff --git a/block/create.c b/block/create.c index bf67b9947c..6b23a21675 100644 --- a/block/create.c +++ b/block/create.c @@ -43,7 +43,6 @@ static int coroutine_fn blockdev_create_run(Job *job, Error **errp) int ret; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD(); job_progress_set_remaining(&s->common, 1); ret = s->drv->bdrv_co_create(s->opts, errp); diff --git a/block/crypto.c b/block/crypto.c index ca67289187..6ee8d46d30 100644 --- a/block/crypto.c +++ b/block/crypto.c @@ -99,12 +99,10 @@ struct BlockCryptoCreateData { }; -static int block_crypto_create_write_func(QCryptoBlock *block, - size_t offset, - const uint8_t *buf, - size_t buflen, - void *opaque, - Error **errp) +static int coroutine_fn GRAPH_UNLOCKED +block_crypto_create_write_func(QCryptoBlock *block, size_t offset, + const uint8_t *buf, size_t buflen, void *opaque, + Error **errp) { struct BlockCryptoCreateData *data = opaque; ssize_t ret; @@ -117,10 +115,9 @@ static int block_crypto_create_write_func(QCryptoBlock *block, return 0; } -static int block_crypto_create_init_func(QCryptoBlock *block, - size_t headerlen, - void *opaque, - Error **errp) +static int coroutine_fn GRAPH_UNLOCKED +block_crypto_create_init_func(QCryptoBlock *block, size_t headerlen, + void *opaque, Error **errp) { struct BlockCryptoCreateData *data = opaque; Error *local_error = NULL; @@ -314,7 +311,7 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, } -static int coroutine_fn +static int coroutine_fn GRAPH_UNLOCKED block_crypto_co_create_generic(BlockDriverState *bs, int64_t size, QCryptoBlockCreateOptions *opts, PreallocMode prealloc, Error **errp) @@ -355,7 +352,7 @@ block_crypto_co_create_generic(BlockDriverState *bs, int64_t size, ret = 0; cleanup: qcrypto_block_free(crypto); - blk_unref(blk); + blk_co_unref(blk); return ret; } @@ -627,7 +624,7 @@ static int block_crypto_open_luks(BlockDriverState *bs, bs, options, flags, errp); } -static int coroutine_fn +static int coroutine_fn GRAPH_UNLOCKED block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp) { BlockdevCreateOptionsLUKS *luks_opts; @@ -661,11 +658,11 @@ block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp) ret = 0; fail: - bdrv_unref(bs); + bdrv_co_unref(bs); return ret; } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED block_crypto_co_create_opts_luks(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { @@ -727,16 +724,18 @@ fail: * beforehand, it has been truncated and corrupted in the process. */ if (ret) { + bdrv_graph_co_rdlock(); bdrv_co_delete_file_noerr(bs); + bdrv_graph_co_rdunlock(); } - bdrv_unref(bs); + bdrv_co_unref(bs); qapi_free_QCryptoBlockCreateOptions(create_opts); qobject_unref(cryptoopts); return ret; } -static int coroutine_fn +static int coroutine_fn GRAPH_RDLOCK block_crypto_co_get_info_luks(BlockDriverState *bs, BlockDriverInfo *bdi) { BlockDriverInfo subbdi; diff --git a/block/curl.c b/block/curl.c index 8bb39a134e..0fc42d03d7 100644 --- a/block/curl.c +++ b/block/curl.c @@ -132,7 +132,7 @@ static gboolean curl_drop_socket(void *key, void *value, void *opaque) CURLSocket *socket = value; BDRVCURLState *s = socket->s; - aio_set_fd_handler(s->aio_context, socket->fd, false, + aio_set_fd_handler(s->aio_context, socket->fd, NULL, NULL, NULL, NULL, NULL); return true; } @@ -180,20 +180,20 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, trace_curl_sock_cb(action, (int)fd); switch (action) { case CURL_POLL_IN: - aio_set_fd_handler(s->aio_context, fd, false, + aio_set_fd_handler(s->aio_context, fd, curl_multi_do, NULL, NULL, NULL, socket); break; case CURL_POLL_OUT: - aio_set_fd_handler(s->aio_context, fd, false, + aio_set_fd_handler(s->aio_context, fd, NULL, curl_multi_do, NULL, NULL, socket); break; case CURL_POLL_INOUT: - aio_set_fd_handler(s->aio_context, fd, false, + aio_set_fd_handler(s->aio_context, fd, curl_multi_do, curl_multi_do, NULL, NULL, socket); break; case CURL_POLL_REMOVE: - aio_set_fd_handler(s->aio_context, fd, false, + aio_set_fd_handler(s->aio_context, fd, NULL, NULL, NULL, NULL, NULL); break; } diff --git a/block/dmg-lzfse.c b/block/dmg-lzfse.c index 6798cf4fbf..4ea0b9b20d 100644 --- a/block/dmg-lzfse.c +++ b/block/dmg-lzfse.c @@ -23,7 +23,12 @@ */ #include "qemu/osdep.h" #include "dmg.h" + +/* Work around a -Wstrict-prototypes warning in LZFSE headers */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-prototypes" #include +#pragma GCC diagnostic pop static int dmg_uncompress_lzfse_do(char *next_in, unsigned int avail_in, char *next_out, unsigned int avail_out) diff --git a/block/dmg.c b/block/dmg.c index e10b9a2ba5..2769900359 100644 --- a/block/dmg.c +++ b/block/dmg.c @@ -31,11 +31,8 @@ #include "qemu/memalign.h" #include "dmg.h" -int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in, - char *next_out, unsigned int avail_out); - -int (*dmg_uncompress_lzfse)(char *next_in, unsigned int avail_in, - char *next_out, unsigned int avail_out); +BdrvDmgUncompressFunc *dmg_uncompress_bz2; +BdrvDmgUncompressFunc *dmg_uncompress_lzfse; enum { /* Limit chunk sizes to prevent unreasonable amounts of memory being used diff --git a/block/dmg.h b/block/dmg.h index e488601b62..dcd6165e63 100644 --- a/block/dmg.h +++ b/block/dmg.h @@ -51,10 +51,10 @@ typedef struct BDRVDMGState { z_stream zstream; } BDRVDMGState; -extern int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in, - char *next_out, unsigned int avail_out); +typedef int BdrvDmgUncompressFunc(char *next_in, unsigned int avail_in, + char *next_out, unsigned int avail_out); -extern int (*dmg_uncompress_lzfse)(char *next_in, unsigned int avail_in, - char *next_out, unsigned int avail_out); +extern BdrvDmgUncompressFunc *dmg_uncompress_bz2; +extern BdrvDmgUncompressFunc *dmg_uncompress_lzfse; #endif diff --git a/block/export/export.c b/block/export/export.c index 28a91c9c42..10316b43c5 100644 --- a/block/export/export.c +++ b/block/export/export.c @@ -192,7 +192,10 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) return exp; fail: - blk_unref(blk); + if (blk) { + blk_set_dev_ops(blk, NULL, NULL); + blk_unref(blk); + } aio_context_release(ctx); if (exp) { g_free(exp->id); @@ -201,11 +204,10 @@ fail: return NULL; } -/* Callers must hold exp->ctx lock */ void blk_exp_ref(BlockExport *exp) { - assert(exp->refcount > 0); - exp->refcount++; + assert(qatomic_read(&exp->refcount) > 0); + qatomic_inc(&exp->refcount); } /* Runs in the main thread */ @@ -219,6 +221,7 @@ static void blk_exp_delete_bh(void *opaque) assert(exp->refcount == 0); QLIST_REMOVE(exp, next); exp->drv->delete(exp); + blk_set_dev_ops(exp->blk, NULL, NULL); blk_unref(exp->blk); qapi_event_send_block_export_deleted(exp->id); g_free(exp->id); @@ -227,11 +230,10 @@ static void blk_exp_delete_bh(void *opaque) aio_context_release(aio_context); } -/* Callers must hold exp->ctx lock */ void blk_exp_unref(BlockExport *exp) { - assert(exp->refcount > 0); - if (--exp->refcount == 0) { + assert(qatomic_read(&exp->refcount) > 0); + if (qatomic_fetch_dec(&exp->refcount) == 1) { /* Touch the block_exports list only in the main thread */ aio_bh_schedule_oneshot(qemu_get_aio_context(), blk_exp_delete_bh, exp); @@ -306,7 +308,7 @@ void blk_exp_close_all_type(BlockExportType type) blk_exp_request_shutdown(exp); } - AIO_WAIT_WHILE(NULL, blk_exp_has_type(type)); + AIO_WAIT_WHILE_UNLOCKED(NULL, blk_exp_has_type(type)); } void blk_exp_close_all(void) @@ -339,7 +341,8 @@ void qmp_block_export_del(const char *id, if (!has_mode) { mode = BLOCK_EXPORT_REMOVE_MODE_SAFE; } - if (mode == BLOCK_EXPORT_REMOVE_MODE_SAFE && exp->refcount > 1) { + if (mode == BLOCK_EXPORT_REMOVE_MODE_SAFE && + qatomic_read(&exp->refcount) > 1) { error_setg(errp, "export '%s' still in use", exp->id); error_append_hint(errp, "Use mode='hard' to force client " "disconnect\n"); diff --git a/block/export/fuse.c b/block/export/fuse.c index e5fc4af165..3307b64089 100644 --- a/block/export/fuse.c +++ b/block/export/fuse.c @@ -50,6 +50,7 @@ typedef struct FuseExport { struct fuse_session *fuse_session; struct fuse_buf fuse_buf; + unsigned int in_flight; /* atomic */ bool mounted, fd_handler_set_up; char *mountpoint; @@ -78,6 +79,42 @@ static void read_from_fuse_export(void *opaque); static bool is_regular_file(const char *path, Error **errp); +static void fuse_export_drained_begin(void *opaque) +{ + FuseExport *exp = opaque; + + aio_set_fd_handler(exp->common.ctx, + fuse_session_fd(exp->fuse_session), + NULL, NULL, NULL, NULL, NULL); + exp->fd_handler_set_up = false; +} + +static void fuse_export_drained_end(void *opaque) +{ + FuseExport *exp = opaque; + + /* Refresh AioContext in case it changed */ + exp->common.ctx = blk_get_aio_context(exp->common.blk); + + aio_set_fd_handler(exp->common.ctx, + fuse_session_fd(exp->fuse_session), + read_from_fuse_export, NULL, NULL, NULL, exp); + exp->fd_handler_set_up = true; +} + +static bool fuse_export_drained_poll(void *opaque) +{ + FuseExport *exp = opaque; + + return qatomic_read(&exp->in_flight) > 0; +} + +static const BlockDevOps fuse_export_blk_dev_ops = { + .drained_begin = fuse_export_drained_begin, + .drained_end = fuse_export_drained_end, + .drained_poll = fuse_export_drained_poll, +}; + static int fuse_export_create(BlockExport *blk_exp, BlockExportOptions *blk_exp_args, Error **errp) @@ -101,6 +138,15 @@ static int fuse_export_create(BlockExport *blk_exp, } } + blk_set_dev_ops(exp->common.blk, &fuse_export_blk_dev_ops, exp); + + /* + * We handle draining ourselves using an in-flight counter and by disabling + * the FUSE fd handler. Do not queue BlockBackend requests, they need to + * complete so the in-flight counter reaches zero. + */ + blk_set_disable_request_queuing(exp->common.blk, true); + init_exports_table(); /* @@ -224,7 +270,7 @@ static int setup_fuse_export(FuseExport *exp, const char *mountpoint, g_hash_table_insert(exports, g_strdup(mountpoint), NULL); aio_set_fd_handler(exp->common.ctx, - fuse_session_fd(exp->fuse_session), true, + fuse_session_fd(exp->fuse_session), read_from_fuse_export, NULL, NULL, NULL, exp); exp->fd_handler_set_up = true; @@ -246,6 +292,8 @@ static void read_from_fuse_export(void *opaque) blk_exp_ref(&exp->common); + qatomic_inc(&exp->in_flight); + do { ret = fuse_session_receive_buf(exp->fuse_session, &exp->fuse_buf); } while (ret == -EINTR); @@ -256,6 +304,10 @@ static void read_from_fuse_export(void *opaque) fuse_session_process_buf(exp->fuse_session, &exp->fuse_buf); out: + if (qatomic_fetch_dec(&exp->in_flight) == 1) { + aio_wait_kick(); /* wake AIO_WAIT_WHILE() */ + } + blk_exp_unref(&exp->common); } @@ -268,7 +320,7 @@ static void fuse_export_shutdown(BlockExport *blk_exp) if (exp->fd_handler_set_up) { aio_set_fd_handler(exp->common.ctx, - fuse_session_fd(exp->fuse_session), true, + fuse_session_fd(exp->fuse_session), NULL, NULL, NULL, NULL, NULL); exp->fd_handler_set_up = false; } @@ -673,7 +725,16 @@ static void fuse_fallocate(fuse_req_t req, fuse_ino_t inode, int mode, do { int size = MIN(length, BDRV_REQUEST_MAX_BYTES); - ret = blk_pdiscard(exp->common.blk, offset, size); + ret = blk_pwrite_zeroes(exp->common.blk, offset, size, + BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK); + if (ret == -ENOTSUP) { + /* + * fallocate() specifies to return EOPNOTSUPP for unsupported + * operations + */ + ret = -EOPNOTSUPP; + } + offset += size; length -= size; } while (ret == 0 && length > 0); diff --git a/block/export/vduse-blk.c b/block/export/vduse-blk.c index f7ae44e3ce..83b05548e7 100644 --- a/block/export/vduse-blk.c +++ b/block/export/vduse-blk.c @@ -31,7 +31,8 @@ typedef struct VduseBlkExport { VduseDev *dev; uint16_t num_queues; char *recon_file; - unsigned int inflight; + unsigned int inflight; /* atomic */ + bool vqs_started; } VduseBlkExport; typedef struct VduseBlkReq { @@ -41,13 +42,20 @@ typedef struct VduseBlkReq { static void vduse_blk_inflight_inc(VduseBlkExport *vblk_exp) { - vblk_exp->inflight++; + if (qatomic_fetch_inc(&vblk_exp->inflight) == 0) { + /* Prevent export from being deleted */ + blk_exp_ref(&vblk_exp->export); + } } static void vduse_blk_inflight_dec(VduseBlkExport *vblk_exp) { - if (--vblk_exp->inflight == 0) { + if (qatomic_fetch_dec(&vblk_exp->inflight) == 1) { + /* Wake AIO_WAIT_WHILE() */ aio_wait_kick(); + + /* Now the export can be deleted */ + blk_exp_unref(&vblk_exp->export); } } @@ -124,8 +132,12 @@ static void vduse_blk_enable_queue(VduseDev *dev, VduseVirtq *vq) { VduseBlkExport *vblk_exp = vduse_dev_get_priv(dev); + if (!vblk_exp->vqs_started) { + return; /* vduse_blk_drained_end() will start vqs later */ + } + aio_set_fd_handler(vblk_exp->export.ctx, vduse_queue_get_fd(vq), - true, on_vduse_vq_kick, NULL, NULL, NULL, vq); + on_vduse_vq_kick, NULL, NULL, NULL, vq); /* Make sure we don't miss any kick afer reconnecting */ eventfd_write(vduse_queue_get_fd(vq), 1); } @@ -133,9 +145,14 @@ static void vduse_blk_enable_queue(VduseDev *dev, VduseVirtq *vq) static void vduse_blk_disable_queue(VduseDev *dev, VduseVirtq *vq) { VduseBlkExport *vblk_exp = vduse_dev_get_priv(dev); + int fd = vduse_queue_get_fd(vq); - aio_set_fd_handler(vblk_exp->export.ctx, vduse_queue_get_fd(vq), - true, NULL, NULL, NULL, NULL, NULL); + if (fd < 0) { + return; + } + + aio_set_fd_handler(vblk_exp->export.ctx, fd, + NULL, NULL, NULL, NULL, NULL); } static const VduseOps vduse_blk_ops = { @@ -152,42 +169,19 @@ static void on_vduse_dev_kick(void *opaque) static void vduse_blk_attach_ctx(VduseBlkExport *vblk_exp, AioContext *ctx) { - int i; - aio_set_fd_handler(vblk_exp->export.ctx, vduse_dev_get_fd(vblk_exp->dev), - true, on_vduse_dev_kick, NULL, NULL, NULL, + on_vduse_dev_kick, NULL, NULL, NULL, vblk_exp->dev); - for (i = 0; i < vblk_exp->num_queues; i++) { - VduseVirtq *vq = vduse_dev_get_queue(vblk_exp->dev, i); - int fd = vduse_queue_get_fd(vq); - - if (fd < 0) { - continue; - } - aio_set_fd_handler(vblk_exp->export.ctx, fd, true, - on_vduse_vq_kick, NULL, NULL, NULL, vq); - } + /* Virtqueues are handled by vduse_blk_drained_end() */ } static void vduse_blk_detach_ctx(VduseBlkExport *vblk_exp) { - int i; - - for (i = 0; i < vblk_exp->num_queues; i++) { - VduseVirtq *vq = vduse_dev_get_queue(vblk_exp->dev, i); - int fd = vduse_queue_get_fd(vq); - - if (fd < 0) { - continue; - } - aio_set_fd_handler(vblk_exp->export.ctx, fd, - true, NULL, NULL, NULL, NULL, NULL); - } aio_set_fd_handler(vblk_exp->export.ctx, vduse_dev_get_fd(vblk_exp->dev), - true, NULL, NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL); - AIO_WAIT_WHILE(vblk_exp->export.ctx, vblk_exp->inflight > 0); + /* Virtqueues are handled by vduse_blk_drained_begin() */ } @@ -220,8 +214,55 @@ static void vduse_blk_resize(void *opaque) (char *)&config.capacity); } +static void vduse_blk_stop_virtqueues(VduseBlkExport *vblk_exp) +{ + for (uint16_t i = 0; i < vblk_exp->num_queues; i++) { + VduseVirtq *vq = vduse_dev_get_queue(vblk_exp->dev, i); + vduse_blk_disable_queue(vblk_exp->dev, vq); + } + + vblk_exp->vqs_started = false; +} + +static void vduse_blk_start_virtqueues(VduseBlkExport *vblk_exp) +{ + vblk_exp->vqs_started = true; + + for (uint16_t i = 0; i < vblk_exp->num_queues; i++) { + VduseVirtq *vq = vduse_dev_get_queue(vblk_exp->dev, i); + vduse_blk_enable_queue(vblk_exp->dev, vq); + } +} + +static void vduse_blk_drained_begin(void *opaque) +{ + BlockExport *exp = opaque; + VduseBlkExport *vblk_exp = container_of(exp, VduseBlkExport, export); + + vduse_blk_stop_virtqueues(vblk_exp); +} + +static void vduse_blk_drained_end(void *opaque) +{ + BlockExport *exp = opaque; + VduseBlkExport *vblk_exp = container_of(exp, VduseBlkExport, export); + + vduse_blk_start_virtqueues(vblk_exp); +} + +static bool vduse_blk_drained_poll(void *opaque) +{ + BlockExport *exp = opaque; + VduseBlkExport *vblk_exp = container_of(exp, VduseBlkExport, export); + + return qatomic_read(&vblk_exp->inflight) > 0; +} + static const BlockDevOps vduse_block_ops = { - .resize_cb = vduse_blk_resize, + .resize_cb = vduse_blk_resize, + .drained_begin = vduse_blk_drained_begin, + .drained_end = vduse_blk_drained_end, + .drained_poll = vduse_blk_drained_poll, }; static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, @@ -268,6 +309,7 @@ static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, vblk_exp->handler.serial = g_strdup(vblk_opts->serial ?: ""); vblk_exp->handler.logical_block_size = logical_block_size; vblk_exp->handler.writable = opts->writable; + vblk_exp->vqs_started = true; config.capacity = cpu_to_le64(blk_getlength(exp->blk) >> VIRTIO_BLK_SECTOR_BITS); @@ -322,14 +364,20 @@ static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, vduse_dev_setup_queue(vblk_exp->dev, i, queue_size); } - aio_set_fd_handler(exp->ctx, vduse_dev_get_fd(vblk_exp->dev), true, + aio_set_fd_handler(exp->ctx, vduse_dev_get_fd(vblk_exp->dev), on_vduse_dev_kick, NULL, NULL, NULL, vblk_exp->dev); blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, vblk_exp); - blk_set_dev_ops(exp->blk, &vduse_block_ops, exp); + /* + * We handle draining ourselves using an in-flight counter and by disabling + * virtqueue fd handlers. Do not queue BlockBackend requests, they need to + * complete so the in-flight counter reaches zero. + */ + blk_set_disable_request_queuing(exp->blk, true); + return 0; err: vduse_dev_destroy(vblk_exp->dev); @@ -344,9 +392,11 @@ static void vduse_blk_exp_delete(BlockExport *exp) VduseBlkExport *vblk_exp = container_of(exp, VduseBlkExport, export); int ret; + assert(qatomic_read(&vblk_exp->inflight) == 0); + + vduse_blk_detach_ctx(vblk_exp); blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, vblk_exp); - blk_set_dev_ops(exp->blk, NULL, NULL); ret = vduse_dev_destroy(vblk_exp->dev); if (ret != -EBUSY) { unlink(vblk_exp->recon_file); @@ -355,13 +405,12 @@ static void vduse_blk_exp_delete(BlockExport *exp) g_free(vblk_exp->handler.serial); } +/* Called with exp->ctx acquired */ static void vduse_blk_exp_request_shutdown(BlockExport *exp) { VduseBlkExport *vblk_exp = container_of(exp, VduseBlkExport, export); - aio_context_acquire(vblk_exp->export.ctx); - vduse_blk_detach_ctx(vblk_exp); - aio_context_acquire(vblk_exp->export.ctx); + vduse_blk_stop_virtqueues(vblk_exp); } const BlockExportDriver blk_exp_vduse_blk = { diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c index 3409d9e02e..81b59761e3 100644 --- a/block/export/vhost-user-blk-server.c +++ b/block/export/vhost-user-blk-server.c @@ -10,6 +10,7 @@ * later. See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include "block/block.h" #include "subprojects/libvhost-user/libvhost-user.h" /* only for the type definitions */ #include "standard-headers/linux/virtio_blk.h" @@ -49,7 +50,10 @@ static void vu_blk_req_complete(VuBlkReq *req, size_t in_len) free(req); } -/* Called with server refcount increased, must decrease before returning */ +/* + * Called with server in_flight counter increased, must decrease before + * returning. + */ static void coroutine_fn vu_blk_virtio_process_req(void *opaque) { VuBlkReq *req = opaque; @@ -67,12 +71,12 @@ static void coroutine_fn vu_blk_virtio_process_req(void *opaque) in_num, out_num); if (in_len < 0) { free(req); - vhost_user_server_unref(server); + vhost_user_server_dec_in_flight(server); return; } vu_blk_req_complete(req, in_len); - vhost_user_server_unref(server); + vhost_user_server_dec_in_flight(server); } static void vu_blk_process_vq(VuDev *vu_dev, int idx) @@ -94,7 +98,7 @@ static void vu_blk_process_vq(VuDev *vu_dev, int idx) Coroutine *co = qemu_coroutine_create(vu_blk_virtio_process_req, req); - vhost_user_server_ref(server); + vhost_user_server_inc_in_flight(server); qemu_coroutine_enter(co); } } @@ -208,15 +212,21 @@ static void blk_aio_attached(AioContext *ctx, void *opaque) { VuBlkExport *vexp = opaque; + /* + * The actual attach will happen in vu_blk_drained_end() and we just + * restore ctx here. + */ vexp->export.ctx = ctx; - vhost_user_server_attach_aio_context(&vexp->vu_server, ctx); } static void blk_aio_detach(void *opaque) { VuBlkExport *vexp = opaque; - vhost_user_server_detach_aio_context(&vexp->vu_server); + /* + * The actual detach already happened in vu_blk_drained_begin() but from + * this point on we must not access ctx anymore. + */ vexp->export.ctx = NULL; } @@ -251,6 +261,58 @@ static void vu_blk_exp_request_shutdown(BlockExport *exp) vhost_user_server_stop(&vexp->vu_server); } +static void vu_blk_exp_resize(void *opaque) +{ + VuBlkExport *vexp = opaque; + BlockDriverState *bs = blk_bs(vexp->handler.blk); + int64_t new_size = bdrv_getlength(bs); + + if (new_size < 0) { + error_printf("Failed to get length of block node '%s'", + bdrv_get_node_name(bs)); + return; + } + + vexp->blkcfg.capacity = cpu_to_le64(new_size >> VIRTIO_BLK_SECTOR_BITS); + + vu_config_change_msg(&vexp->vu_server.vu_dev); +} + +/* Called with vexp->export.ctx acquired */ +static void vu_blk_drained_begin(void *opaque) +{ + VuBlkExport *vexp = opaque; + + vhost_user_server_detach_aio_context(&vexp->vu_server); +} + +/* Called with vexp->export.blk AioContext acquired */ +static void vu_blk_drained_end(void *opaque) +{ + VuBlkExport *vexp = opaque; + + vhost_user_server_attach_aio_context(&vexp->vu_server, vexp->export.ctx); +} + +/* + * Ensures that bdrv_drained_begin() waits until in-flight requests complete. + * + * Called with vexp->export.ctx acquired. + */ +static bool vu_blk_drained_poll(void *opaque) +{ + VuBlkExport *vexp = opaque; + + return vhost_user_server_has_in_flight(&vexp->vu_server); +} + +static const BlockDevOps vu_blk_dev_ops = { + .drained_begin = vu_blk_drained_begin, + .drained_end = vu_blk_drained_end, + .drained_poll = vu_blk_drained_poll, + .resize_cb = vu_blk_exp_resize, +}; + static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, Error **errp) { @@ -292,6 +354,8 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, vexp); + blk_set_dev_ops(exp->blk, &vu_blk_dev_ops, vexp); + if (!vhost_user_server_start(&vexp->vu_server, vu_opts->addr, exp->ctx, num_queues, &vu_blk_iface, errp)) { blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, diff --git a/block/export/virtio-blk-handler.c b/block/export/virtio-blk-handler.c index 313666e8ab..bc1cec6757 100644 --- a/block/export/virtio-blk-handler.c +++ b/block/export/virtio-blk-handler.c @@ -22,8 +22,9 @@ struct virtio_blk_inhdr { unsigned char status; }; -static bool virtio_blk_sect_range_ok(BlockBackend *blk, uint32_t block_size, - uint64_t sector, size_t size) +static bool coroutine_fn +virtio_blk_sect_range_ok(BlockBackend *blk, uint32_t block_size, + uint64_t sector, size_t size) { uint64_t nb_sectors; uint64_t total_sectors; @@ -41,7 +42,7 @@ static bool virtio_blk_sect_range_ok(BlockBackend *blk, uint32_t block_size, if ((sector << VIRTIO_BLK_SECTOR_BITS) % block_size) { return false; } - blk_get_geometry(blk, &total_sectors); + blk_co_get_geometry(blk, &total_sectors); if (sector > total_sectors || nb_sectors > total_sectors - sector) { return false; } diff --git a/block/file-posix.c b/block/file-posix.c index 5760cf22d1..0ab158efba 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -68,6 +68,9 @@ #include #include #include +#if defined(CONFIG_BLKZONED) +#include +#endif #include #include #include @@ -157,6 +160,7 @@ typedef struct BDRVRawState { bool has_write_zeroes:1; bool use_linux_aio:1; bool use_linux_io_uring:1; + int64_t *offset; /* offset of zone append operation */ int page_cache_inconsistent; /* errno from fdatasync failure */ bool has_fallocate; bool needs_alignment; @@ -216,6 +220,13 @@ typedef struct RawPosixAIOData { PreallocMode prealloc; Error **errp; } truncate; + struct { + unsigned int *nr_zones; + BlockZoneDescriptor *zones; + } zone_report; + struct { + unsigned long op; + } zone_mgmt; }; } RawPosixAIOData; @@ -766,6 +777,18 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, goto fail; } } +#ifdef CONFIG_BLKZONED + /* + * The kernel page cache does not reliably work for writes to SWR zones + * of zoned block device because it can not guarantee the order of writes. + */ + if ((bs->bl.zoned != BLK_Z_NONE) && + (!(s->open_flags & O_DIRECT))) { + error_setg(errp, "The driver supports zoned devices, and it requires " + "cache.direct=on, which was not specified."); + return -EINVAL; /* No host kernel page cache */ + } +#endif if (S_ISBLK(st.st_mode)) { #ifdef __linux__ @@ -1202,15 +1225,91 @@ static int hdev_get_max_hw_transfer(int fd, struct stat *st) #endif } +/* + * Get a sysfs attribute value as character string. + */ +#ifdef CONFIG_LINUX +static int get_sysfs_str_val(struct stat *st, const char *attribute, + char **val) { + g_autofree char *sysfspath = NULL; + int ret; + size_t len; + + if (!S_ISBLK(st->st_mode)) { + return -ENOTSUP; + } + + sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/%s", + major(st->st_rdev), minor(st->st_rdev), + attribute); + ret = g_file_get_contents(sysfspath, val, &len, NULL); + if (ret == -1) { + return -ENOENT; + } + + /* The file is ended with '\n' */ + char *p; + p = *val; + if (*(p + len - 1) == '\n') { + *(p + len - 1) = '\0'; + } + return ret; +} +#endif + +#if defined(CONFIG_BLKZONED) +static int get_sysfs_zoned_model(struct stat *st, BlockZoneModel *zoned) +{ + g_autofree char *val = NULL; + int ret; + + ret = get_sysfs_str_val(st, "zoned", &val); + if (ret < 0) { + return ret; + } + + if (strcmp(val, "host-managed") == 0) { + *zoned = BLK_Z_HM; + } else if (strcmp(val, "host-aware") == 0) { + *zoned = BLK_Z_HA; + } else if (strcmp(val, "none") == 0) { + *zoned = BLK_Z_NONE; + } else { + return -ENOTSUP; + } + return 0; +} +#endif /* defined(CONFIG_BLKZONED) */ + +/* + * Get a sysfs attribute value as a long integer. + */ +#ifdef CONFIG_LINUX +static long get_sysfs_long_val(struct stat *st, const char *attribute) +{ + g_autofree char *str = NULL; + const char *end; + long val; + int ret; + + ret = get_sysfs_str_val(st, attribute, &str); + if (ret < 0) { + return ret; + } + + /* The file is ended with '\n', pass 'end' to accept that. */ + ret = qemu_strtol(str, &end, 10, &val); + if (ret == 0 && end && *end == '\0') { + ret = val; + } + return ret; +} +#endif + static int hdev_get_max_segments(int fd, struct stat *st) { #ifdef CONFIG_LINUX - char buf[32]; - const char *end; - char *sysfspath = NULL; int ret; - int sysfd = -1; - long max_segments; if (S_ISCHR(st->st_mode)) { if (ioctl(fd, SG_GET_SG_TABLESIZE, &ret) == 0) { @@ -1218,44 +1317,176 @@ static int hdev_get_max_segments(int fd, struct stat *st) } return -ENOTSUP; } - - if (!S_ISBLK(st->st_mode)) { - return -ENOTSUP; - } - - sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/max_segments", - major(st->st_rdev), minor(st->st_rdev)); - sysfd = open(sysfspath, O_RDONLY); - if (sysfd == -1) { - ret = -errno; - goto out; - } - ret = RETRY_ON_EINTR(read(sysfd, buf, sizeof(buf) - 1)); - if (ret < 0) { - ret = -errno; - goto out; - } else if (ret == 0) { - ret = -EIO; - goto out; - } - buf[ret] = 0; - /* The file is ended with '\n', pass 'end' to accept that. */ - ret = qemu_strtol(buf, &end, 10, &max_segments); - if (ret == 0 && end && *end == '\n') { - ret = max_segments; - } - -out: - if (sysfd != -1) { - close(sysfd); - } - g_free(sysfspath); - return ret; + return get_sysfs_long_val(st, "max_segments"); #else return -ENOTSUP; #endif } +#if defined(CONFIG_BLKZONED) +/* + * If the reset_all flag is true, then the wps of zone whose state is + * not readonly or offline should be all reset to the start sector. + * Else, take the real wp of the device. + */ +static int get_zones_wp(BlockDriverState *bs, int fd, int64_t offset, + unsigned int nrz, bool reset_all) +{ + struct blk_zone *blkz; + size_t rep_size; + uint64_t sector = offset >> BDRV_SECTOR_BITS; + BlockZoneWps *wps = bs->wps; + unsigned int j = offset / bs->bl.zone_size; + unsigned int n = 0, i = 0; + int ret; + rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone); + g_autofree struct blk_zone_report *rep = NULL; + + rep = g_malloc(rep_size); + blkz = (struct blk_zone *)(rep + 1); + while (n < nrz) { + memset(rep, 0, rep_size); + rep->sector = sector; + rep->nr_zones = nrz - n; + + do { + ret = ioctl(fd, BLKREPORTZONE, rep); + } while (ret != 0 && errno == EINTR); + if (ret != 0) { + error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d", + fd, offset, errno); + return -errno; + } + + if (!rep->nr_zones) { + break; + } + + for (i = 0; i < rep->nr_zones; ++i, ++n, ++j) { + /* + * The wp tracking cares only about sequential writes required and + * sequential write preferred zones so that the wp can advance to + * the right location. + * Use the most significant bit of the wp location to indicate the + * zone type: 0 for SWR/SWP zones and 1 for conventional zones. + */ + if (blkz[i].type == BLK_ZONE_TYPE_CONVENTIONAL) { + wps->wp[j] |= 1ULL << 63; + } else { + switch(blkz[i].cond) { + case BLK_ZONE_COND_FULL: + case BLK_ZONE_COND_READONLY: + /* Zone not writable */ + wps->wp[j] = (blkz[i].start + blkz[i].len) << BDRV_SECTOR_BITS; + break; + case BLK_ZONE_COND_OFFLINE: + /* Zone not writable nor readable */ + wps->wp[j] = (blkz[i].start) << BDRV_SECTOR_BITS; + break; + default: + if (reset_all) { + wps->wp[j] = blkz[i].start << BDRV_SECTOR_BITS; + } else { + wps->wp[j] = blkz[i].wp << BDRV_SECTOR_BITS; + } + break; + } + } + } + sector = blkz[i - 1].start + blkz[i - 1].len; + } + + return 0; +} + +static void update_zones_wp(BlockDriverState *bs, int fd, int64_t offset, + unsigned int nrz) +{ + if (get_zones_wp(bs, fd, offset, nrz, 0) < 0) { + error_report("update zone wp failed"); + } +} + +static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, + Error **errp) +{ + BDRVRawState *s = bs->opaque; + BlockZoneModel zoned; + int ret; + + bs->bl.zoned = BLK_Z_NONE; + + ret = get_sysfs_zoned_model(st, &zoned); + if (ret < 0 || zoned == BLK_Z_NONE) { + return; + } + bs->bl.zoned = zoned; + + ret = get_sysfs_long_val(st, "max_open_zones"); + if (ret >= 0) { + bs->bl.max_open_zones = ret; + } + + ret = get_sysfs_long_val(st, "max_active_zones"); + if (ret >= 0) { + bs->bl.max_active_zones = ret; + } + + /* + * The zoned device must at least have zone size and nr_zones fields. + */ + ret = get_sysfs_long_val(st, "chunk_sectors"); + if (ret < 0) { + error_setg_errno(errp, -ret, "Unable to read chunk_sectors " + "sysfs attribute"); + return; + } else if (!ret) { + error_setg(errp, "Read 0 from chunk_sectors sysfs attribute"); + return; + } + bs->bl.zone_size = ret << BDRV_SECTOR_BITS; + + ret = get_sysfs_long_val(st, "nr_zones"); + if (ret < 0) { + error_setg_errno(errp, -ret, "Unable to read nr_zones " + "sysfs attribute"); + return; + } else if (!ret) { + error_setg(errp, "Read 0 from nr_zones sysfs attribute"); + return; + } + bs->bl.nr_zones = ret; + + ret = get_sysfs_long_val(st, "zone_append_max_bytes"); + if (ret > 0) { + bs->bl.max_append_sectors = ret >> BDRV_SECTOR_BITS; + } + + ret = get_sysfs_long_val(st, "physical_block_size"); + if (ret >= 0) { + bs->bl.write_granularity = ret; + } + + /* The refresh_limits() function can be called multiple times. */ + g_free(bs->wps); + bs->wps = g_malloc(sizeof(BlockZoneWps) + + sizeof(int64_t) * bs->bl.nr_zones); + ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 0); + if (ret < 0) { + error_setg_errno(errp, -ret, "report wps failed"); + bs->wps = NULL; + return; + } + qemu_co_mutex_init(&bs->wps->colock); +} +#else /* !defined(CONFIG_BLKZONED) */ +static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, + Error **errp) +{ + bs->bl.zoned = BLK_Z_NONE; +} +#endif /* !defined(CONFIG_BLKZONED) */ + static void raw_refresh_limits(BlockDriverState *bs, Error **errp) { BDRVRawState *s = bs->opaque; @@ -1297,6 +1528,8 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp) bs->bl.max_hw_iov = ret; } } + + raw_refresh_zoned_limits(bs, &st, errp); } static int check_for_dasd(int fd) @@ -1320,9 +1553,12 @@ static int hdev_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz) BDRVRawState *s = bs->opaque; int ret; - /* If DASD, get blocksizes */ + /* If DASD or zoned devices, get blocksizes */ if (check_for_dasd(s->fd) < 0) { - return -ENOTSUP; + /* zoned devices are not DASD */ + if (bs->bl.zoned == BLK_Z_NONE) { + return -ENOTSUP; + } } ret = probe_logical_blocksize(s->fd, &bsz->log); if (ret < 0) { @@ -1463,7 +1699,7 @@ static ssize_t handle_aiocb_rw_vector(RawPosixAIOData *aiocb) ssize_t len; len = RETRY_ON_EINTR( - (aiocb->aio_type & QEMU_AIO_WRITE) ? + (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) ? qemu_pwritev(aiocb->aio_fildes, aiocb->io.iov, aiocb->io.niov, @@ -1492,7 +1728,7 @@ static ssize_t handle_aiocb_rw_linear(RawPosixAIOData *aiocb, char *buf) ssize_t len; while (offset < aiocb->aio_nbytes) { - if (aiocb->aio_type & QEMU_AIO_WRITE) { + if (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) { len = pwrite(aiocb->aio_fildes, (const char *)buf + offset, aiocb->aio_nbytes - offset, @@ -1585,7 +1821,7 @@ static int handle_aiocb_rw(void *opaque) } nbytes = handle_aiocb_rw_linear(aiocb, buf); - if (!(aiocb->aio_type & QEMU_AIO_WRITE)) { + if (!(aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND))) { char *p = buf; size_t count = aiocb->aio_nbytes, copy; int i; @@ -1790,6 +2026,147 @@ static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd, } #endif +/* + * parse_zone - Fill a zone descriptor + */ +#if defined(CONFIG_BLKZONED) +static inline int parse_zone(struct BlockZoneDescriptor *zone, + const struct blk_zone *blkz) { + zone->start = blkz->start << BDRV_SECTOR_BITS; + zone->length = blkz->len << BDRV_SECTOR_BITS; + zone->wp = blkz->wp << BDRV_SECTOR_BITS; + +#ifdef HAVE_BLK_ZONE_REP_CAPACITY + zone->cap = blkz->capacity << BDRV_SECTOR_BITS; +#else + zone->cap = blkz->len << BDRV_SECTOR_BITS; +#endif + + switch (blkz->type) { + case BLK_ZONE_TYPE_SEQWRITE_REQ: + zone->type = BLK_ZT_SWR; + break; + case BLK_ZONE_TYPE_SEQWRITE_PREF: + zone->type = BLK_ZT_SWP; + break; + case BLK_ZONE_TYPE_CONVENTIONAL: + zone->type = BLK_ZT_CONV; + break; + default: + error_report("Unsupported zone type: 0x%x", blkz->type); + return -ENOTSUP; + } + + switch (blkz->cond) { + case BLK_ZONE_COND_NOT_WP: + zone->state = BLK_ZS_NOT_WP; + break; + case BLK_ZONE_COND_EMPTY: + zone->state = BLK_ZS_EMPTY; + break; + case BLK_ZONE_COND_IMP_OPEN: + zone->state = BLK_ZS_IOPEN; + break; + case BLK_ZONE_COND_EXP_OPEN: + zone->state = BLK_ZS_EOPEN; + break; + case BLK_ZONE_COND_CLOSED: + zone->state = BLK_ZS_CLOSED; + break; + case BLK_ZONE_COND_READONLY: + zone->state = BLK_ZS_RDONLY; + break; + case BLK_ZONE_COND_FULL: + zone->state = BLK_ZS_FULL; + break; + case BLK_ZONE_COND_OFFLINE: + zone->state = BLK_ZS_OFFLINE; + break; + default: + error_report("Unsupported zone state: 0x%x", blkz->cond); + return -ENOTSUP; + } + return 0; +} +#endif + +#if defined(CONFIG_BLKZONED) +static int handle_aiocb_zone_report(void *opaque) +{ + RawPosixAIOData *aiocb = opaque; + int fd = aiocb->aio_fildes; + unsigned int *nr_zones = aiocb->zone_report.nr_zones; + BlockZoneDescriptor *zones = aiocb->zone_report.zones; + /* zoned block devices use 512-byte sectors */ + uint64_t sector = aiocb->aio_offset / 512; + + struct blk_zone *blkz; + size_t rep_size; + unsigned int nrz; + int ret; + unsigned int n = 0, i = 0; + + nrz = *nr_zones; + rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone); + g_autofree struct blk_zone_report *rep = NULL; + rep = g_malloc(rep_size); + + blkz = (struct blk_zone *)(rep + 1); + while (n < nrz) { + memset(rep, 0, rep_size); + rep->sector = sector; + rep->nr_zones = nrz - n; + + do { + ret = ioctl(fd, BLKREPORTZONE, rep); + } while (ret != 0 && errno == EINTR); + if (ret != 0) { + error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d", + fd, sector, errno); + return -errno; + } + + if (!rep->nr_zones) { + break; + } + + for (i = 0; i < rep->nr_zones; i++, n++) { + ret = parse_zone(&zones[n], &blkz[i]); + if (ret != 0) { + return ret; + } + + /* The next report should start after the last zone reported */ + sector = blkz[i].start + blkz[i].len; + } + } + + *nr_zones = n; + return 0; +} +#endif + +#if defined(CONFIG_BLKZONED) +static int handle_aiocb_zone_mgmt(void *opaque) +{ + RawPosixAIOData *aiocb = opaque; + int fd = aiocb->aio_fildes; + uint64_t sector = aiocb->aio_offset / 512; + int64_t nr_sectors = aiocb->aio_nbytes / 512; + struct blk_zone_range range; + int ret; + + /* Execute the operation */ + range.sector = sector; + range.nr_sectors = nr_sectors; + do { + ret = ioctl(fd, aiocb->zone_mgmt.op, &range); + } while (ret != 0 && errno == EINTR); + + return ret < 0 ? -errno : ret; +} +#endif + static int handle_aiocb_copy_range(void *opaque) { RawPosixAIOData *aiocb = opaque; @@ -2040,12 +2417,9 @@ out: return result; } -static int coroutine_fn raw_thread_pool_submit(BlockDriverState *bs, - ThreadPoolFunc func, void *arg) +static int coroutine_fn raw_thread_pool_submit(ThreadPoolFunc func, void *arg) { - /* @bs can be NULL, bdrv_get_aio_context() returns the main context then */ - ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); - return thread_pool_submit_co(pool, func, arg); + return thread_pool_submit_co(func, arg); } /* @@ -2075,9 +2449,19 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, { BDRVRawState *s = bs->opaque; RawPosixAIOData acb; + int ret; if (fd_open(bs) < 0) return -EIO; +#if defined(CONFIG_BLKZONED) + if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && bs->wps) { + qemu_co_mutex_lock(&bs->wps->colock); + if (type & QEMU_AIO_ZONE_APPEND && bs->bl.zone_size) { + int index = offset / bs->bl.zone_size; + offset = bs->wps->wp[index]; + } + } +#endif /* * When using O_DIRECT, the request must be aligned to be able to use @@ -2089,16 +2473,16 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, type |= QEMU_AIO_MISALIGNED; #ifdef CONFIG_LINUX_IO_URING } else if (s->use_linux_io_uring) { - LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); assert(qiov->size == bytes); - return luring_co_submit(bs, aio, s->fd, offset, qiov, type); + ret = luring_co_submit(bs, s->fd, offset, qiov, type); + goto out; #endif #ifdef CONFIG_LINUX_AIO } else if (s->use_linux_aio) { - LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); assert(qiov->size == bytes); - return laio_co_submit(bs, aio, s->fd, offset, qiov, type, + ret = laio_co_submit(s->fd, offset, qiov, type, s->aio_max_batch); + goto out; #endif } @@ -2115,7 +2499,41 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, }; assert(qiov->size == bytes); - return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb); + ret = raw_thread_pool_submit(handle_aiocb_rw, &acb); + goto out; /* Avoid the compiler err of unused label */ + +out: +#if defined(CONFIG_BLKZONED) +{ + BlockZoneWps *wps = bs->wps; + if (ret == 0) { + if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) + && wps && bs->bl.zone_size) { + uint64_t *wp = &wps->wp[offset / bs->bl.zone_size]; + if (!BDRV_ZT_IS_CONV(*wp)) { + if (type & QEMU_AIO_ZONE_APPEND) { + *s->offset = *wp; + trace_zbd_zone_append_complete(bs, *s->offset + >> BDRV_SECTOR_BITS); + } + /* Advance the wp if needed */ + if (offset + bytes > *wp) { + *wp = offset + bytes; + } + } + } + } else { + if (type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) { + update_zones_wp(bs, s->fd, 0, 1); + } + } + + if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && wps) { + qemu_co_mutex_unlock(&wps->colock); + } +} +#endif + return ret; } static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset, @@ -2137,14 +2555,12 @@ static void coroutine_fn raw_co_io_plug(BlockDriverState *bs) BDRVRawState __attribute__((unused)) *s = bs->opaque; #ifdef CONFIG_LINUX_AIO if (s->use_linux_aio) { - LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); - laio_io_plug(bs, aio); + laio_io_plug(); } #endif #ifdef CONFIG_LINUX_IO_URING if (s->use_linux_io_uring) { - LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); - luring_io_plug(bs, aio); + luring_io_plug(); } #endif } @@ -2154,14 +2570,12 @@ static void coroutine_fn raw_co_io_unplug(BlockDriverState *bs) BDRVRawState __attribute__((unused)) *s = bs->opaque; #ifdef CONFIG_LINUX_AIO if (s->use_linux_aio) { - LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); - laio_io_unplug(bs, aio, s->aio_max_batch); + laio_io_unplug(s->aio_max_batch); } #endif #ifdef CONFIG_LINUX_IO_URING if (s->use_linux_io_uring) { - LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); - luring_io_unplug(bs, aio); + luring_io_unplug(); } #endif } @@ -2185,11 +2599,10 @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) #ifdef CONFIG_LINUX_IO_URING if (s->use_linux_io_uring) { - LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); - return luring_co_submit(bs, aio, s->fd, 0, NULL, QEMU_AIO_FLUSH); + return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH); } #endif - return raw_thread_pool_submit(bs, handle_aiocb_flush, &acb); + return raw_thread_pool_submit(handle_aiocb_flush, &acb); } static void raw_aio_attach_aio_context(BlockDriverState *bs, @@ -2223,6 +2636,9 @@ static void raw_close(BlockDriverState *bs) BDRVRawState *s = bs->opaque; if (s->fd >= 0) { +#if defined(CONFIG_BLKZONED) + g_free(bs->wps); +#endif qemu_close(s->fd); s->fd = -1; } @@ -2251,7 +2667,7 @@ raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset, }, }; - return raw_thread_pool_submit(bs, handle_aiocb_truncate, &acb); + return raw_thread_pool_submit(handle_aiocb_truncate, &acb); } static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset, @@ -2980,6 +3396,171 @@ static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret) } } +/* + * zone report - Get a zone block device's information in the form + * of an array of zone descriptors. + * zones is an array of zone descriptors to hold zone information on reply; + * offset can be any byte within the entire size of the device; + * nr_zones is the maxium number of sectors the command should operate on. + */ +#if defined(CONFIG_BLKZONED) +static int coroutine_fn raw_co_zone_report(BlockDriverState *bs, int64_t offset, + unsigned int *nr_zones, + BlockZoneDescriptor *zones) { + BDRVRawState *s = bs->opaque; + RawPosixAIOData acb = (RawPosixAIOData) { + .bs = bs, + .aio_fildes = s->fd, + .aio_type = QEMU_AIO_ZONE_REPORT, + .aio_offset = offset, + .zone_report = { + .nr_zones = nr_zones, + .zones = zones, + }, + }; + + trace_zbd_zone_report(bs, *nr_zones, offset >> BDRV_SECTOR_BITS); + return raw_thread_pool_submit(handle_aiocb_zone_report, &acb); +} +#endif + +/* + * zone management operations - Execute an operation on a zone + */ +#if defined(CONFIG_BLKZONED) +static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, + int64_t offset, int64_t len) { + BDRVRawState *s = bs->opaque; + RawPosixAIOData acb; + int64_t zone_size, zone_size_mask; + const char *op_name; + unsigned long zo; + int ret; + BlockZoneWps *wps = bs->wps; + int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS; + + zone_size = bs->bl.zone_size; + zone_size_mask = zone_size - 1; + if (offset & zone_size_mask) { + error_report("sector offset %" PRId64 " is not aligned to zone size " + "%" PRId64 "", offset / 512, zone_size / 512); + return -EINVAL; + } + + if (((offset + len) < capacity && len & zone_size_mask) || + offset + len > capacity) { + error_report("number of sectors %" PRId64 " is not aligned to zone size" + " %" PRId64 "", len / 512, zone_size / 512); + return -EINVAL; + } + + uint32_t i = offset / bs->bl.zone_size; + uint32_t nrz = len / bs->bl.zone_size; + uint64_t *wp = &wps->wp[i]; + if (BDRV_ZT_IS_CONV(*wp) && len != capacity) { + error_report("zone mgmt operations are not allowed for conventional zones"); + return -EIO; + } + + switch (op) { + case BLK_ZO_OPEN: + op_name = "BLKOPENZONE"; + zo = BLKOPENZONE; + break; + case BLK_ZO_CLOSE: + op_name = "BLKCLOSEZONE"; + zo = BLKCLOSEZONE; + break; + case BLK_ZO_FINISH: + op_name = "BLKFINISHZONE"; + zo = BLKFINISHZONE; + break; + case BLK_ZO_RESET: + op_name = "BLKRESETZONE"; + zo = BLKRESETZONE; + break; + default: + error_report("Unsupported zone op: 0x%x", op); + return -ENOTSUP; + } + + acb = (RawPosixAIOData) { + .bs = bs, + .aio_fildes = s->fd, + .aio_type = QEMU_AIO_ZONE_MGMT, + .aio_offset = offset, + .aio_nbytes = len, + .zone_mgmt = { + .op = zo, + }, + }; + + trace_zbd_zone_mgmt(bs, op_name, offset >> BDRV_SECTOR_BITS, + len >> BDRV_SECTOR_BITS); + ret = raw_thread_pool_submit(handle_aiocb_zone_mgmt, &acb); + if (ret != 0) { + update_zones_wp(bs, s->fd, offset, i); + error_report("ioctl %s failed %d", op_name, ret); + return ret; + } + + if (zo == BLKRESETZONE && len == capacity) { + ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 1); + if (ret < 0) { + error_report("reporting single wp failed"); + return ret; + } + } else if (zo == BLKRESETZONE) { + for (unsigned int j = 0; j < nrz; ++j) { + wp[j] = offset + j * zone_size; + } + } else if (zo == BLKFINISHZONE) { + for (unsigned int j = 0; j < nrz; ++j) { + /* The zoned device allows the last zone smaller that the + * zone size. */ + wp[j] = MIN(offset + (j + 1) * zone_size, offset + len); + } + } + + return ret; +} +#endif + +#if defined(CONFIG_BLKZONED) +static int coroutine_fn raw_co_zone_append(BlockDriverState *bs, + int64_t *offset, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { + assert(flags == 0); + int64_t zone_size_mask = bs->bl.zone_size - 1; + int64_t iov_len = 0; + int64_t len = 0; + BDRVRawState *s = bs->opaque; + s->offset = offset; + + if (*offset & zone_size_mask) { + error_report("sector offset %" PRId64 " is not aligned to zone size " + "%" PRId32 "", *offset / 512, bs->bl.zone_size / 512); + return -EINVAL; + } + + int64_t wg = bs->bl.write_granularity; + int64_t wg_mask = wg - 1; + for (int i = 0; i < qiov->niov; i++) { + iov_len = qiov->iov[i].iov_len; + if (iov_len & wg_mask) { + error_report("len of IOVector[%d] %" PRId64 " is not aligned to " + "block size %" PRId64 "", i, iov_len, wg); + return -EINVAL; + } + len += iov_len; + } + + trace_zbd_zone_append(bs, *offset >> BDRV_SECTOR_BITS); + return raw_co_prw(bs, *offset, len, qiov, QEMU_AIO_ZONE_APPEND); +} +#endif + static coroutine_fn int raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes, bool blkdev) @@ -3000,7 +3581,7 @@ raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes, acb.aio_type |= QEMU_AIO_BLKDEV; } - ret = raw_thread_pool_submit(bs, handle_aiocb_discard, &acb); + ret = raw_thread_pool_submit(handle_aiocb_discard, &acb); raw_account_discard(s, bytes, ret); return ret; } @@ -3075,7 +3656,7 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, handler = handle_aiocb_write_zeroes; } - return raw_thread_pool_submit(bs, handler, &acb); + return raw_thread_pool_submit(handler, &acb); } static int coroutine_fn raw_co_pwrite_zeroes( @@ -3313,7 +3894,7 @@ raw_co_copy_range_to(BlockDriverState *bs, }, }; - return raw_thread_pool_submit(bs, handle_aiocb_copy_range, &acb); + return raw_thread_pool_submit(handle_aiocb_copy_range, &acb); } BlockDriver bdrv_file = { @@ -3643,7 +4224,7 @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) struct sg_io_hdr *io_hdr = buf; if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT || io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) { - return pr_manager_execute(s->pr_mgr, bdrv_get_aio_context(bs), + return pr_manager_execute(s->pr_mgr, qemu_get_current_aio_context(), s->fd, io_hdr); } } @@ -3659,7 +4240,7 @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) }, }; - return raw_thread_pool_submit(bs, handle_aiocb_ioctl, &acb); + return raw_thread_pool_submit(handle_aiocb_ioctl, &acb); } #endif /* linux */ @@ -3735,6 +4316,14 @@ static BlockDriver bdrv_host_device = { #ifdef __linux__ .bdrv_co_ioctl = hdev_co_ioctl, #endif + + /* zoned device */ +#if defined(CONFIG_BLKZONED) + /* zone management operations */ + .bdrv_co_zone_report = raw_co_zone_report, + .bdrv_co_zone_mgmt = raw_co_zone_mgmt, + .bdrv_co_zone_append = raw_co_zone_append, +#endif }; #if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) @@ -3743,6 +4332,12 @@ static void cdrom_parse_filename(const char *filename, QDict *options, { bdrv_parse_filename_strip_prefix(filename, "host_cdrom:", options); } + +static void cdrom_refresh_limits(BlockDriverState *bs, Error **errp) +{ + bs->bl.has_variable_length = true; + raw_refresh_limits(bs, errp); +} #endif #ifdef __linux__ @@ -3838,14 +4433,13 @@ static BlockDriver bdrv_host_cdrom = { .bdrv_co_preadv = raw_co_preadv, .bdrv_co_pwritev = raw_co_pwritev, .bdrv_co_flush_to_disk = raw_co_flush_to_disk, - .bdrv_refresh_limits = raw_refresh_limits, + .bdrv_refresh_limits = cdrom_refresh_limits, .bdrv_co_io_plug = raw_co_io_plug, .bdrv_co_io_unplug = raw_co_io_unplug, .bdrv_attach_aio_context = raw_aio_attach_aio_context, .bdrv_co_truncate = raw_co_truncate, .bdrv_co_getlength = raw_co_getlength, - .has_variable_length = true, .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size, /* removable device support */ @@ -3967,14 +4561,13 @@ static BlockDriver bdrv_host_cdrom = { .bdrv_co_preadv = raw_co_preadv, .bdrv_co_pwritev = raw_co_pwritev, .bdrv_co_flush_to_disk = raw_co_flush_to_disk, - .bdrv_refresh_limits = raw_refresh_limits, + .bdrv_refresh_limits = cdrom_refresh_limits, .bdrv_co_io_plug = raw_co_io_plug, .bdrv_co_io_unplug = raw_co_io_unplug, .bdrv_attach_aio_context = raw_aio_attach_aio_context, .bdrv_co_truncate = raw_co_truncate, .bdrv_co_getlength = raw_co_getlength, - .has_variable_length = true, .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size, /* removable device support */ diff --git a/block/file-win32.c b/block/file-win32.c index c7d0b85306..48b790d917 100644 --- a/block/file-win32.c +++ b/block/file-win32.c @@ -153,7 +153,6 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile, BlockCompletionFunc *cb, void *opaque, int type) { RawWin32AIOData *acb = g_new(RawWin32AIOData, 1); - ThreadPool *pool; acb->bs = bs; acb->hfile = hfile; @@ -168,8 +167,7 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile, acb->aio_offset = offset; trace_file_paio_submit(acb, opaque, offset, count, type); - pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); - return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque); + return thread_pool_submit_aio(aio_worker, acb, cb, opaque); } int qemu_ftruncate64(int fd, int64_t length) @@ -838,6 +836,7 @@ static void hdev_refresh_limits(BlockDriverState *bs, Error **errp) { /* XXX Does Windows support AIO on less than 512-byte alignment? */ bs->bl.request_alignment = 512; + bs->bl.has_variable_length = true; } static int hdev_open(BlockDriverState *bs, QDict *options, int flags, @@ -933,7 +932,6 @@ static BlockDriver bdrv_host_device = { .bdrv_attach_aio_context = raw_attach_aio_context, .bdrv_co_getlength = raw_co_getlength, - .has_variable_length = true, .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size, }; diff --git a/block/filter-compress.c b/block/filter-compress.c index ac285f4b66..320d9576fa 100644 --- a/block/filter-compress.c +++ b/block/filter-compress.c @@ -146,7 +146,6 @@ static BlockDriver bdrv_compress = { .bdrv_co_eject = compress_co_eject, .bdrv_co_lock_medium = compress_co_lock_medium, - .has_variable_length = true, .is_filter = true, }; diff --git a/block/graph-lock.c b/block/graph-lock.c index 454c31e691..a92c6ae219 100644 --- a/block/graph-lock.c +++ b/block/graph-lock.c @@ -30,8 +30,10 @@ BdrvGraphLock graph_lock; /* Protects the list of aiocontext and orphaned_reader_count */ static QemuMutex aio_context_list_lock; +#if 0 /* Written and read with atomic operations. */ static int has_writer; +#endif /* * A reader coroutine could move from an AioContext to another. @@ -88,6 +90,7 @@ void unregister_aiocontext(AioContext *ctx) g_free(ctx->bdrv_graph); } +#if 0 static uint32_t reader_count(void) { BdrvGraphRWlock *brdv_graph; @@ -105,10 +108,17 @@ static uint32_t reader_count(void) assert((int32_t)rd >= 0); return rd; } +#endif void bdrv_graph_wrlock(void) { GLOBAL_STATE_CODE(); + /* + * TODO Some callers hold an AioContext lock when this is called, which + * causes deadlocks. Reenable once the AioContext locking is cleaned up (or + * AioContext locks are gone). + */ +#if 0 assert(!qatomic_read(&has_writer)); /* Make sure that constantly arriving new I/O doesn't cause starvation */ @@ -127,7 +137,7 @@ void bdrv_graph_wrlock(void) * reader lock. */ qatomic_set(&has_writer, 0); - AIO_WAIT_WHILE(qemu_get_aio_context(), reader_count() >= 1); + AIO_WAIT_WHILE_UNLOCKED(NULL, reader_count() >= 1); qatomic_set(&has_writer, 1); /* @@ -139,11 +149,13 @@ void bdrv_graph_wrlock(void) } while (reader_count() >= 1); bdrv_drain_all_end(); +#endif } void bdrv_graph_wrunlock(void) { GLOBAL_STATE_CODE(); +#if 0 QEMU_LOCK_GUARD(&aio_context_list_lock); assert(qatomic_read(&has_writer)); @@ -155,18 +167,16 @@ void bdrv_graph_wrunlock(void) /* Wake up all coroutine that are waiting to read the graph */ qemu_co_enter_all(&reader_queue, &aio_context_list_lock); +#endif } void coroutine_fn bdrv_graph_co_rdlock(void) { + /* TODO Reenable when wrlock is reenabled */ +#if 0 BdrvGraphRWlock *bdrv_graph; bdrv_graph = qemu_get_current_aio_context()->bdrv_graph; - /* Do not lock if in main thread */ - if (qemu_in_main_thread()) { - return; - } - for (;;) { qatomic_set(&bdrv_graph->reader_count, bdrv_graph->reader_count + 1); @@ -223,18 +233,15 @@ void coroutine_fn bdrv_graph_co_rdlock(void) qemu_co_queue_wait(&reader_queue, &aio_context_list_lock); } } +#endif } void coroutine_fn bdrv_graph_co_rdunlock(void) { +#if 0 BdrvGraphRWlock *bdrv_graph; bdrv_graph = qemu_get_current_aio_context()->bdrv_graph; - /* Do not lock if in main thread */ - if (qemu_in_main_thread()) { - return; - } - qatomic_store_release(&bdrv_graph->reader_count, bdrv_graph->reader_count - 1); /* make sure writer sees reader_count before we check has_writer */ @@ -249,6 +256,7 @@ void coroutine_fn bdrv_graph_co_rdunlock(void) if (qatomic_read(&has_writer)) { aio_wait_kick(); } +#endif } void bdrv_graph_rdlock_main_loop(void) @@ -265,11 +273,20 @@ void bdrv_graph_rdunlock_main_loop(void) void assert_bdrv_graph_readable(void) { + /* reader_count() is slow due to aio_context_list_lock lock contention */ + /* TODO Reenable when wrlock is reenabled */ +#if 0 +#ifdef CONFIG_DEBUG_GRAPH_LOCK assert(qemu_in_main_thread() || reader_count()); +#endif +#endif } void assert_bdrv_graph_writable(void) { assert(qemu_in_main_thread()); + /* TODO Reenable when wrlock is reenabled */ +#if 0 assert(qatomic_read(&has_writer)); +#endif } diff --git a/block/io.c b/block/io.c index 8974d46941..540bf8d26d 100644 --- a/block/io.c +++ b/block/io.c @@ -60,7 +60,7 @@ static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) void bdrv_parent_drained_end_single(BdrvChild *c) { - IO_OR_GS_CODE(); + GLOBAL_STATE_CODE(); assert(c->quiesced_parent); c->quiesced_parent = false; @@ -108,7 +108,7 @@ static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, void bdrv_parent_drained_begin_single(BdrvChild *c) { - IO_OR_GS_CODE(); + GLOBAL_STATE_CODE(); assert(!c->quiesced_parent); c->quiesced_parent = true; @@ -160,7 +160,6 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) bool have_limits; GLOBAL_STATE_CODE(); - assume_graph_lock(); /* FIXME */ if (tran) { BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); @@ -190,6 +189,10 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) bdrv_merge_limits(&bs->bl, &c->bs->bl); have_limits = true; } + + if (c->role & BDRV_CHILD_FILTERED) { + bs->bl.has_variable_length |= c->bs->bl.has_variable_length; + } } if (!have_limits) { @@ -244,7 +247,7 @@ typedef struct { bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, bool ignore_bds_parents) { - IO_OR_GS_CODE(); + GLOBAL_STATE_CODE(); if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { return true; @@ -331,7 +334,8 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, if (ctx != co_ctx) { aio_context_release(ctx); } - replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data); + replay_bh_schedule_oneshot_event(qemu_get_aio_context(), + bdrv_co_drain_bh_cb, &data); qemu_coroutine_yield(); /* If we are resumed from some other event (such as an aio completion or a @@ -354,9 +358,10 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, return; } + GLOBAL_STATE_CODE(); + /* Stop things in parent-to-child order */ if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { - aio_disable_external(bdrv_get_aio_context(bs)); bdrv_parent_drained_begin(bs, parent); if (bs->drv && bs->drv->bdrv_drain_begin) { bs->drv->bdrv_drain_begin(bs); @@ -396,11 +401,14 @@ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) { int old_quiesce_counter; + IO_OR_GS_CODE(); + if (qemu_in_coroutine()) { bdrv_co_yield_to_drain(bs, false, parent, false); return; } assert(bs->quiesce_counter > 0); + GLOBAL_STATE_CODE(); /* Re-enable things in child-to-parent order */ old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); @@ -409,7 +417,6 @@ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) bs->drv->bdrv_drain_end(bs); } bdrv_parent_drained_end(bs, parent); - aio_enable_external(bdrv_get_aio_context(bs)); } } @@ -520,7 +527,7 @@ void bdrv_drain_all_begin(void) bdrv_drain_all_begin_nopoll(); /* Now poll the in-flight requests */ - AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); + AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll()); while ((bs = bdrv_next_all_states(bs))) { bdrv_drain_assert_idle(bs); @@ -723,10 +730,9 @@ BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) /** * Round a region to cluster boundaries */ -void coroutine_fn bdrv_round_to_clusters(BlockDriverState *bs, - int64_t offset, int64_t bytes, - int64_t *cluster_offset, - int64_t *cluster_bytes) +void coroutine_fn GRAPH_RDLOCK +bdrv_round_to_clusters(BlockDriverState *bs, int64_t offset, int64_t bytes, + int64_t *cluster_offset, int64_t *cluster_bytes) { BlockDriverInfo bdi; IO_CODE(); @@ -740,7 +746,7 @@ void coroutine_fn bdrv_round_to_clusters(BlockDriverState *bs, } } -static coroutine_fn int bdrv_get_cluster_size(BlockDriverState *bs) +static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs) { BlockDriverInfo bdi; int ret; @@ -1796,7 +1802,7 @@ fail: return ret; } -static inline int coroutine_fn +static inline int coroutine_fn GRAPH_RDLOCK bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes, BdrvTrackedRequest *req, int flags) { @@ -3111,6 +3117,74 @@ out: return co.ret; } +int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset, + unsigned int *nr_zones, + BlockZoneDescriptor *zones) +{ + BlockDriver *drv = bs->drv; + CoroutineIOCompletion co = { + .coroutine = qemu_coroutine_self(), + }; + IO_CODE(); + + bdrv_inc_in_flight(bs); + if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) { + co.ret = -ENOTSUP; + goto out; + } + co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones); +out: + bdrv_dec_in_flight(bs); + return co.ret; +} + +int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, + int64_t offset, int64_t len) +{ + BlockDriver *drv = bs->drv; + CoroutineIOCompletion co = { + .coroutine = qemu_coroutine_self(), + }; + IO_CODE(); + + bdrv_inc_in_flight(bs); + if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) { + co.ret = -ENOTSUP; + goto out; + } + co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len); +out: + bdrv_dec_in_flight(bs); + return co.ret; +} + +int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset, + QEMUIOVector *qiov, + BdrvRequestFlags flags) +{ + int ret; + BlockDriver *drv = bs->drv; + CoroutineIOCompletion co = { + .coroutine = qemu_coroutine_self(), + }; + IO_CODE(); + + ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL); + if (ret < 0) { + return ret; + } + + bdrv_inc_in_flight(bs); + if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) { + co.ret = -ENOTSUP; + goto out; + } + co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags); +out: + bdrv_dec_in_flight(bs); + return co.ret; +} + void *qemu_blockalign(BlockDriverState *bs, size_t size) { IO_CODE(); diff --git a/block/io_uring.c b/block/io_uring.c index 973e15d876..3a77480e16 100644 --- a/block/io_uring.c +++ b/block/io_uring.c @@ -18,6 +18,9 @@ #include "qapi/error.h" #include "trace.h" +/* Only used for assertions. */ +#include "qemu/coroutine_int.h" + /* io_uring ring size */ #define MAX_ENTRIES 128 @@ -50,10 +53,9 @@ typedef struct LuringState { struct io_uring ring; - /* io queue for submit at batch. Protected by AioContext lock. */ + /* No locking required, only accessed from AioContext home thread */ LuringQueue io_q; - /* I/O completion processing. Only runs in I/O thread. */ QEMUBH *completion_bh; } LuringState; @@ -209,6 +211,7 @@ end: * eventually runs later. Coroutines cannot be entered recursively * so avoid doing that! */ + assert(luringcb->co->ctx == s->aio_context); if (!qemu_coroutine_entered(luringcb->co)) { aio_co_wake(luringcb->co); } @@ -262,13 +265,11 @@ static int ioq_submit(LuringState *s) static void luring_process_completions_and_submit(LuringState *s) { - aio_context_acquire(s->aio_context); luring_process_completions(s); if (!s->io_q.plugged && s->io_q.in_queue > 0) { ioq_submit(s); } - aio_context_release(s->aio_context); } static void qemu_luring_completion_bh(void *opaque) @@ -306,14 +307,18 @@ static void ioq_init(LuringQueue *io_q) io_q->blocked = false; } -void luring_io_plug(BlockDriverState *bs, LuringState *s) +void luring_io_plug(void) { + AioContext *ctx = qemu_get_current_aio_context(); + LuringState *s = aio_get_linux_io_uring(ctx); trace_luring_io_plug(s); s->io_q.plugged++; } -void luring_io_unplug(BlockDriverState *bs, LuringState *s) +void luring_io_unplug(void) { + AioContext *ctx = qemu_get_current_aio_context(); + LuringState *s = aio_get_linux_io_uring(ctx); assert(s->io_q.plugged); trace_luring_io_unplug(s, s->io_q.blocked, s->io_q.plugged, s->io_q.in_queue, s->io_q.in_flight); @@ -345,6 +350,10 @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, io_uring_prep_writev(sqes, fd, luringcb->qiov->iov, luringcb->qiov->niov, offset); break; + case QEMU_AIO_ZONE_APPEND: + io_uring_prep_writev(sqes, fd, luringcb->qiov->iov, + luringcb->qiov->niov, offset); + break; case QEMU_AIO_READ: io_uring_prep_readv(sqes, fd, luringcb->qiov->iov, luringcb->qiov->niov, offset); @@ -373,10 +382,12 @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, return 0; } -int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, - uint64_t offset, QEMUIOVector *qiov, int type) +int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset, + QEMUIOVector *qiov, int type) { int ret; + AioContext *ctx = qemu_get_current_aio_context(); + LuringState *s = aio_get_linux_io_uring(ctx); LuringAIOCB luringcb = { .co = qemu_coroutine_self(), .ret = -EINPROGRESS, @@ -399,7 +410,7 @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, void luring_detach_aio_context(LuringState *s, AioContext *old_context) { - aio_set_fd_handler(old_context, s->ring.ring_fd, false, + aio_set_fd_handler(old_context, s->ring.ring_fd, NULL, NULL, NULL, NULL, s); qemu_bh_delete(s->completion_bh); s->aio_context = NULL; @@ -409,7 +420,7 @@ void luring_attach_aio_context(LuringState *s, AioContext *new_context) { s->aio_context = new_context; s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s); - aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false, + aio_set_fd_handler(s->aio_context, s->ring.ring_fd, qemu_luring_completion_cb, NULL, qemu_luring_poll_cb, qemu_luring_poll_ready, s); } diff --git a/block/iscsi.c b/block/iscsi.c index 9fc0bed90b..34f97ab646 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -363,7 +363,6 @@ iscsi_set_events(IscsiLun *iscsilun) if (ev != iscsilun->events) { aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsi), - false, (ev & POLLIN) ? iscsi_process_read : NULL, (ev & POLLOUT) ? iscsi_process_write : NULL, NULL, NULL, @@ -1540,7 +1539,7 @@ static void iscsi_detach_aio_context(BlockDriverState *bs) IscsiLun *iscsilun = bs->opaque; aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi), - false, NULL, NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL); iscsilun->events = 0; if (iscsilun->nop_timer) { diff --git a/block/linux-aio.c b/block/linux-aio.c index d2cfb7f523..916f001e32 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -16,6 +16,9 @@ #include "qemu/coroutine.h" #include "qapi/error.h" +/* Only used for assertions. */ +#include "qemu/coroutine_int.h" + #include /* @@ -56,10 +59,8 @@ struct LinuxAioState { io_context_t ctx; EventNotifier e; - /* io queue for submit at batch. Protected by AioContext lock. */ + /* No locking required, only accessed from AioContext home thread */ LaioQueue io_q; - - /* I/O completion processing. Only runs in I/O thread. */ QEMUBH *completion_bh; int event_idx; int event_max; @@ -102,6 +103,7 @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) * later. Coroutines cannot be entered recursively so avoid doing * that! */ + assert(laiocb->co->ctx == laiocb->ctx->aio_context); if (!qemu_coroutine_entered(laiocb->co)) { aio_co_wake(laiocb->co); } @@ -232,13 +234,11 @@ static void qemu_laio_process_completions(LinuxAioState *s) static void qemu_laio_process_completions_and_submit(LinuxAioState *s) { - aio_context_acquire(s->aio_context); qemu_laio_process_completions(s); if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { ioq_submit(s); } - aio_context_release(s->aio_context); } static void qemu_laio_completion_bh(void *opaque) @@ -354,14 +354,19 @@ static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch) return max_batch; } -void laio_io_plug(BlockDriverState *bs, LinuxAioState *s) +void laio_io_plug(void) { + AioContext *ctx = qemu_get_current_aio_context(); + LinuxAioState *s = aio_get_linux_aio(ctx); + s->io_q.plugged++; } -void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s, - uint64_t dev_max_batch) +void laio_io_unplug(uint64_t dev_max_batch) { + AioContext *ctx = qemu_get_current_aio_context(); + LinuxAioState *s = aio_get_linux_aio(ctx); + assert(s->io_q.plugged); s->io_q.plugged--; @@ -389,6 +394,9 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, case QEMU_AIO_WRITE: io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset); break; + case QEMU_AIO_ZONE_APPEND: + io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset); + break; case QEMU_AIO_READ: io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset); break; @@ -411,15 +419,15 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, return 0; } -int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, - uint64_t offset, QEMUIOVector *qiov, int type, - uint64_t dev_max_batch) +int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov, + int type, uint64_t dev_max_batch) { int ret; + AioContext *ctx = qemu_get_current_aio_context(); struct qemu_laiocb laiocb = { .co = qemu_coroutine_self(), .nbytes = qiov->size, - .ctx = s, + .ctx = aio_get_linux_aio(ctx), .ret = -EINPROGRESS, .is_read = (type == QEMU_AIO_READ), .qiov = qiov, @@ -438,7 +446,7 @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context) { - aio_set_event_notifier(old_context, &s->e, false, NULL, NULL, NULL); + aio_set_event_notifier(old_context, &s->e, NULL, NULL, NULL); qemu_bh_delete(s->completion_bh); s->aio_context = NULL; } @@ -447,7 +455,7 @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context) { s->aio_context = new_context; s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s); - aio_set_event_notifier(new_context, &s->e, false, + aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb, qemu_laio_poll_cb, qemu_laio_poll_ready); diff --git a/block/meson.build b/block/meson.build index 382bec0e7d..486dda8b85 100644 --- a/block/meson.build +++ b/block/meson.build @@ -38,11 +38,6 @@ block_ss.add(files( 'snapshot-access.c', 'throttle-groups.c', 'throttle.c', - 'vhdx-endian.c', - 'vhdx-log.c', - 'vhdx.c', - 'vmdk.c', - 'vpc.c', 'write-threshold.c', ), zstd, zlib, gnutls) @@ -55,6 +50,19 @@ endif if get_option('vdi').allowed() block_ss.add(files('vdi.c')) endif +if get_option('vhdx').allowed() + block_ss.add(files( + 'vhdx-endian.c', + 'vhdx-log.c', + 'vhdx.c' + )) +endif +if get_option('vmdk').allowed() + block_ss.add(files('vmdk.c')) +endif +if get_option('vpc').allowed() + block_ss.add(files('vpc.c')) +endif if get_option('cloop').allowed() block_ss.add(files('cloop.c')) endif @@ -84,7 +92,7 @@ block_ss.add(when: 'CONFIG_WIN32', if_true: files('file-win32.c', 'win32-aio.c') block_ss.add(when: 'CONFIG_POSIX', if_true: [files('file-posix.c'), coref, iokit]) block_ss.add(when: libiscsi, if_true: files('iscsi-opts.c')) block_ss.add(when: 'CONFIG_LINUX', if_true: files('nvme.c')) -if not get_option('replication').disabled() +if get_option('replication').allowed() block_ss.add(files('replication.c')) endif block_ss.add(when: libaio, if_true: files('linux-aio.c')) diff --git a/block/mirror.c b/block/mirror.c index 663e2b7002..d3cacd1708 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -270,8 +270,8 @@ static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, /* Round offset and/or bytes to target cluster if COW is needed, and * return the offset of the adjusted tail against original. */ -static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, - uint64_t *bytes) +static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset, + uint64_t *bytes) { bool need_cow; int ret = 0; @@ -471,12 +471,11 @@ static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, return bytes_handled; } -static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) +static void coroutine_fn mirror_iteration(MirrorBlockJob *s) { BlockDriverState *source = s->mirror_top_bs->backing->bs; MirrorOp *pseudo_op; int64_t offset; - uint64_t delay_ns = 0, ret = 0; /* At least the first dirty chunk is mirrored in one iteration. */ int nb_chunks = 1; bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); @@ -576,8 +575,10 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { int64_t target_offset; int64_t target_bytes; - bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, - &target_offset, &target_bytes); + WITH_GRAPH_RDLOCK_GUARD() { + bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, + &target_offset, &target_bytes); + } if (target_offset == offset && target_bytes == io_bytes) { mirror_method = ret & BDRV_BLOCK_ZERO ? @@ -606,16 +607,13 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) assert(io_bytes); offset += io_bytes; nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); - delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct); + block_job_ratelimit_processed_bytes(&s->common, io_bytes_acct); } - ret = delay_ns; fail: QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); qemu_co_queue_restart_all(&pseudo_op->waiting_requests); g_free(pseudo_op); - - return ret; } static void mirror_free_init(MirrorBlockJob *s) @@ -664,11 +662,15 @@ static int mirror_exit_common(Job *job) bool abort = job->ret < 0; int ret = 0; + GLOBAL_STATE_CODE(); + if (s->prepared) { return 0; } s->prepared = true; + aio_context_acquire(qemu_get_aio_context()); + mirror_top_bs = s->mirror_top_bs; bs_opaque = mirror_top_bs->opaque; src = mirror_top_bs->backing->bs; @@ -745,7 +747,10 @@ static int mirror_exit_common(Job *job) * Cannot use check_to_replace_node() here, because that would * check for an op blocker on @to_replace, and we have our own * there. + * + * TODO Pull out the writer lock from bdrv_replace_node() to here */ + bdrv_graph_rdlock_main_loop(); if (bdrv_recurse_can_replace(src, to_replace)) { bdrv_replace_node(to_replace, target_bs, &local_err); } else { @@ -754,6 +759,7 @@ static int mirror_exit_common(Job *job) "would not lead to an abrupt change of visible data", to_replace->node_name, target_bs->node_name); } + bdrv_graph_rdunlock_main_loop(); bdrv_drained_end(target_bs); if (local_err) { error_report_err(local_err); @@ -787,6 +793,8 @@ static int mirror_exit_common(Job *job) bdrv_unref(mirror_top_bs); bdrv_unref(src); + aio_context_release(qemu_get_aio_context()); + return ret; } @@ -886,9 +894,9 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) /* Called when going out of the streaming phase to flush the bulk of the * data to the medium, or just before completing. */ -static int mirror_flush(MirrorBlockJob *s) +static int coroutine_fn mirror_flush(MirrorBlockJob *s) { - int ret = blk_flush(s->target); + int ret = blk_co_flush(s->target); if (ret < 0) { if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { s->ret = ret; @@ -966,11 +974,13 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) */ bdrv_get_backing_filename(target_bs, backing_filename, sizeof(backing_filename)); + bdrv_graph_co_rdlock(); if (!bdrv_co_get_info(target_bs, &bdi) && bdi.cluster_size) { s->target_cluster_size = bdi.cluster_size; } else { s->target_cluster_size = BDRV_SECTOR_SIZE; } + bdrv_graph_co_rdunlock(); if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) && s->granularity < s->target_cluster_size) { s->buf_size = MAX(s->buf_size, s->target_cluster_size); @@ -1003,7 +1013,6 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) assert(!s->dbi); s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); for (;;) { - uint64_t delay_ns = 0; int64_t cnt, delta; bool should_complete; @@ -1043,7 +1052,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) mirror_wait_for_free_in_flight_slot(s); continue; } else if (cnt != 0) { - delay_ns = mirror_iteration(s); + mirror_iteration(s); } } @@ -1106,12 +1115,14 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) } if (job_is_ready(&s->common.job) && !should_complete) { - delay_ns = (s->in_flight == 0 && - cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); + if (s->in_flight == 0 && cnt == 0) { + trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job), + BLOCK_JOB_SLICE_TIME); + job_sleep_ns(&s->common.job, BLOCK_JOB_SLICE_TIME); + } + } else { + block_job_ratelimit_sleep(&s->common); } - trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job), - delay_ns); - job_sleep_ns(&s->common.job, delay_ns); s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } @@ -1416,7 +1427,7 @@ static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, return op; } -static void coroutine_fn active_write_settle(MirrorOp *op) +static void coroutine_fn GRAPH_RDLOCK active_write_settle(MirrorOp *op) { uint64_t start_chunk = op->offset / op->s->granularity; uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c index 6aa5f1be0c..ca2599de44 100644 --- a/block/monitor/block-hmp-cmds.c +++ b/block/monitor/block-hmp-cmds.c @@ -48,6 +48,7 @@ #include "qemu/option.h" #include "qemu/sockets.h" #include "qemu/cutils.h" +#include "qemu/error-report.h" #include "sysemu/sysemu.h" #include "monitor/monitor.h" #include "monitor/hmp.h" @@ -213,15 +214,17 @@ void hmp_commit(Monitor *mon, const QDict *qdict) error_report("Device '%s' not found", device); return; } - if (!blk_is_available(blk)) { - error_report("Device '%s' has no medium", device); - return; - } bs = bdrv_skip_implicit_filters(blk_bs(blk)); aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); + if (!blk_is_available(blk)) { + error_report("Device '%s' has no medium", device); + aio_context_release(aio_context); + return; + } + ret = bdrv_commit(bs); aio_context_release(aio_context); diff --git a/block/nbd.c b/block/nbd.c index bf2894ad5c..a3f8f8a9d5 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -322,6 +322,7 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, int ret; IO_CODE(); + assert_bdrv_graph_readable(); assert(!s->ioc); s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp); @@ -369,7 +370,7 @@ static bool nbd_client_connecting(BDRVNBDState *s) } /* Called with s->requests_lock taken. */ -static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s) +static void coroutine_fn GRAPH_RDLOCK nbd_reconnect_attempt(BDRVNBDState *s) { int ret; bool blocking = s->state == NBD_CLIENT_CONNECTING_WAIT; @@ -480,9 +481,9 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle) } } -static int coroutine_fn nbd_co_send_request(BlockDriverState *bs, - NBDRequest *request, - QEMUIOVector *qiov) +static int coroutine_fn GRAPH_RDLOCK +nbd_co_send_request(BlockDriverState *bs, NBDRequest *request, + QEMUIOVector *qiov) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; int rc, i = -1; @@ -1171,8 +1172,9 @@ static int coroutine_fn nbd_co_receive_blockstatus_reply(BDRVNBDState *s, return iter.ret; } -static int coroutine_fn nbd_co_request(BlockDriverState *bs, NBDRequest *request, - QEMUIOVector *write_qiov) +static int coroutine_fn GRAPH_RDLOCK +nbd_co_request(BlockDriverState *bs, NBDRequest *request, + QEMUIOVector *write_qiov) { int ret, request_ret; Error *local_err = NULL; @@ -1208,9 +1210,9 @@ static int coroutine_fn nbd_co_request(BlockDriverState *bs, NBDRequest *request return ret ? ret : request_ret; } -static int coroutine_fn nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, - int64_t bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags) +static int coroutine_fn GRAPH_RDLOCK +nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret, request_ret; Error *local_err = NULL; @@ -1266,9 +1268,9 @@ static int coroutine_fn nbd_client_co_preadv(BlockDriverState *bs, int64_t offse return ret ? ret : request_ret; } -static int coroutine_fn nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset, - int64_t bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags) +static int coroutine_fn GRAPH_RDLOCK +nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { @@ -1291,8 +1293,9 @@ static int coroutine_fn nbd_client_co_pwritev(BlockDriverState *bs, int64_t offs return nbd_co_request(bs, &request, qiov); } -static int coroutine_fn nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int64_t bytes, BdrvRequestFlags flags) +static int coroutine_fn GRAPH_RDLOCK +nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, + BdrvRequestFlags flags) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { @@ -1326,7 +1329,7 @@ static int coroutine_fn nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_ return nbd_co_request(bs, &request, NULL); } -static int coroutine_fn nbd_client_co_flush(BlockDriverState *bs) +static int coroutine_fn GRAPH_RDLOCK nbd_client_co_flush(BlockDriverState *bs) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { .type = NBD_CMD_FLUSH }; @@ -1341,8 +1344,8 @@ static int coroutine_fn nbd_client_co_flush(BlockDriverState *bs) return nbd_co_request(bs, &request, NULL); } -static int coroutine_fn nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, - int64_t bytes) +static int coroutine_fn GRAPH_RDLOCK +nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { @@ -1361,7 +1364,7 @@ static int coroutine_fn nbd_client_co_pdiscard(BlockDriverState *bs, int64_t off return nbd_co_request(bs, &request, NULL); } -static int coroutine_fn nbd_client_co_block_status( +static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status( BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { @@ -1920,11 +1923,6 @@ fail: return ret; } -static int coroutine_fn nbd_co_flush(BlockDriverState *bs) -{ - return nbd_client_co_flush(bs); -} - static void nbd_refresh_limits(BlockDriverState *bs, Error **errp) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; @@ -2120,7 +2118,7 @@ static BlockDriver bdrv_nbd = { .bdrv_co_pwritev = nbd_client_co_pwritev, .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes, .bdrv_close = nbd_close, - .bdrv_co_flush_to_os = nbd_co_flush, + .bdrv_co_flush_to_os = nbd_client_co_flush, .bdrv_co_pdiscard = nbd_client_co_pdiscard, .bdrv_refresh_limits = nbd_refresh_limits, .bdrv_co_truncate = nbd_co_truncate, @@ -2148,7 +2146,7 @@ static BlockDriver bdrv_nbd_tcp = { .bdrv_co_pwritev = nbd_client_co_pwritev, .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes, .bdrv_close = nbd_close, - .bdrv_co_flush_to_os = nbd_co_flush, + .bdrv_co_flush_to_os = nbd_client_co_flush, .bdrv_co_pdiscard = nbd_client_co_pdiscard, .bdrv_refresh_limits = nbd_refresh_limits, .bdrv_co_truncate = nbd_co_truncate, @@ -2176,7 +2174,7 @@ static BlockDriver bdrv_nbd_unix = { .bdrv_co_pwritev = nbd_client_co_pwritev, .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes, .bdrv_close = nbd_close, - .bdrv_co_flush_to_os = nbd_co_flush, + .bdrv_co_flush_to_os = nbd_client_co_flush, .bdrv_co_pdiscard = nbd_client_co_pdiscard, .bdrv_refresh_limits = nbd_refresh_limits, .bdrv_co_truncate = nbd_co_truncate, diff --git a/block/nfs.c b/block/nfs.c index 351dc6ec8d..8f89ece69f 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -195,7 +195,6 @@ static void nfs_set_events(NFSClient *client) int ev = nfs_which_events(client->context); if (ev != client->events) { aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), - false, (ev & POLLIN) ? nfs_process_read : NULL, (ev & POLLOUT) ? nfs_process_write : NULL, NULL, NULL, client); @@ -373,7 +372,7 @@ static void nfs_detach_aio_context(BlockDriverState *bs) NFSClient *client = bs->opaque; aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), - false, NULL, NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL); client->events = 0; } @@ -391,7 +390,7 @@ static void nfs_client_close(NFSClient *client) if (client->context) { qemu_mutex_lock(&client->mutex); aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), - false, NULL, NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL); qemu_mutex_unlock(&client->mutex); if (client->fh) { nfs_close(client->context, client->fh); @@ -726,10 +725,8 @@ nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data, if (task->ret < 0) { error_report("NFS Error: %s", nfs_get_error(nfs)); } - - /* Set task->complete before reading bs->wakeup. */ - qatomic_mb_set(&task->complete, 1); - bdrv_wakeup(task->bs); + replay_bh_schedule_oneshot_event(task->client->aio_context, + nfs_co_generic_bh_cb, task); } static int64_t coroutine_fn nfs_co_get_allocated_file_size(BlockDriverState *bs) @@ -743,15 +740,19 @@ static int64_t coroutine_fn nfs_co_get_allocated_file_size(BlockDriverState *bs) return client->st_blocks * 512; } - task.bs = bs; + nfs_co_init_task(bs, &task); task.st = &st; - if (nfs_fstat_async(client->context, client->fh, nfs_get_allocated_file_size_cb, - &task) != 0) { - return -ENOMEM; - } + WITH_QEMU_LOCK_GUARD(&client->mutex) { + if (nfs_fstat_async(client->context, client->fh, nfs_get_allocated_file_size_cb, + &task) != 0) { + return -ENOMEM; + } - nfs_set_events(client); - BDRV_POLL_WHILE(bs, !task.complete); + nfs_set_events(client); + } + while (!task.complete) { + qemu_coroutine_yield(); + } return (task.ret < 0 ? task.ret : st.st_blocks * 512); } diff --git a/block/nvme.c b/block/nvme.c index 5b744c2bda..17937d398d 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -862,7 +862,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, } aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier[MSIX_SHARED_IRQ_IDX], - false, nvme_handle_event, nvme_poll_cb, + nvme_handle_event, nvme_poll_cb, nvme_poll_ready); if (!nvme_identify(bs, namespace, errp)) { @@ -948,7 +948,7 @@ static void nvme_close(BlockDriverState *bs) g_free(s->queues); aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier[MSIX_SHARED_IRQ_IDX], - false, NULL, NULL, NULL); + NULL, NULL, NULL); event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]); qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map, 0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE); @@ -1546,7 +1546,7 @@ static void nvme_detach_aio_context(BlockDriverState *bs) aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier[MSIX_SHARED_IRQ_IDX], - false, NULL, NULL, NULL); + NULL, NULL, NULL); } static void nvme_attach_aio_context(BlockDriverState *bs, @@ -1556,7 +1556,7 @@ static void nvme_attach_aio_context(BlockDriverState *bs, s->aio_context = new_context; aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX], - false, nvme_handle_event, nvme_poll_cb, + nvme_handle_event, nvme_poll_cb, nvme_poll_ready); for (unsigned i = 0; i < s->queue_count; i++) { diff --git a/block/parallels.c b/block/parallels.c index 013684801a..d8a3f13e24 100644 --- a/block/parallels.c +++ b/block/parallels.c @@ -522,8 +522,8 @@ out: } -static int coroutine_fn parallels_co_create(BlockdevCreateOptions* opts, - Error **errp) +static int coroutine_fn GRAPH_UNLOCKED +parallels_co_create(BlockdevCreateOptions* opts, Error **errp) { BlockdevCreateOptionsParallels *parallels_opts; BlockDriverState *bs; @@ -613,8 +613,8 @@ static int coroutine_fn parallels_co_create(BlockdevCreateOptions* opts, ret = 0; out: - blk_unref(blk); - bdrv_unref(bs); + blk_co_unref(blk); + bdrv_co_unref(bs); return ret; exit: @@ -622,7 +622,7 @@ exit: goto out; } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED parallels_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { @@ -691,7 +691,7 @@ parallels_co_create_opts(BlockDriver *drv, const char *filename, done: qobject_unref(qdict); - bdrv_unref(bs); + bdrv_co_unref(bs); qapi_free_BlockdevCreateOptions(create_options); return ret; } diff --git a/block/preallocate.c b/block/preallocate.c index 71c3601809..4d82125036 100644 --- a/block/preallocate.c +++ b/block/preallocate.c @@ -558,7 +558,6 @@ BlockDriver bdrv_preallocate_filter = { .bdrv_set_perm = preallocate_set_perm, .bdrv_child_perm = preallocate_child_perm, - .has_variable_length = true, .is_filter = true, }; diff --git a/block/qapi-sysemu.c b/block/qapi-sysemu.c index 7bd7554150..ef07151892 100644 --- a/block/qapi-sysemu.c +++ b/block/qapi-sysemu.c @@ -362,7 +362,10 @@ void qmp_blockdev_change_medium(const char *device, qdict_put_str(options, "driver", format); } + aio_context_acquire(qemu_get_aio_context()); medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp); + aio_context_release(qemu_get_aio_context()); + if (!medium_bs) { goto fail; } @@ -517,6 +520,7 @@ void qmp_block_latency_histogram_set( bool has_boundaries, uint64List *boundaries, bool has_boundaries_read, uint64List *boundaries_read, bool has_boundaries_write, uint64List *boundaries_write, + bool has_boundaries_append, uint64List *boundaries_append, bool has_boundaries_flush, uint64List *boundaries_flush, Error **errp) { @@ -557,6 +561,16 @@ void qmp_block_latency_histogram_set( } } + if (has_boundaries || has_boundaries_append) { + ret = block_latency_histogram_set( + stats, BLOCK_ACCT_ZONE_APPEND, + has_boundaries_append ? boundaries_append : boundaries); + if (ret) { + error_setg(errp, "Device '%s' set append write boundaries fail", id); + return; + } + } + if (has_boundaries || has_boundaries_flush) { ret = block_latency_histogram_set( stats, BLOCK_ACCT_FLUSH, diff --git a/block/qapi.c b/block/qapi.c index c84147849d..f34f95e0ef 100644 --- a/block/qapi.c +++ b/block/qapi.c @@ -533,27 +533,36 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ]; ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE]; + ds->zone_append_bytes = stats->nr_bytes[BLOCK_ACCT_ZONE_APPEND]; ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP]; ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ]; ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE]; + ds->zone_append_operations = stats->nr_ops[BLOCK_ACCT_ZONE_APPEND]; ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP]; ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ]; ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE]; + ds->failed_zone_append_operations = + stats->failed_ops[BLOCK_ACCT_ZONE_APPEND]; ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH]; ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP]; ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ]; ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE]; + ds->invalid_zone_append_operations = + stats->invalid_ops[BLOCK_ACCT_ZONE_APPEND]; ds->invalid_flush_operations = stats->invalid_ops[BLOCK_ACCT_FLUSH]; ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP]; ds->rd_merged = stats->merged[BLOCK_ACCT_READ]; ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE]; + ds->zone_append_merged = stats->merged[BLOCK_ACCT_ZONE_APPEND]; ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP]; ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH]; ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE]; + ds->zone_append_total_time_ns = + stats->total_time_ns[BLOCK_ACCT_ZONE_APPEND]; ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ]; ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH]; ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP]; @@ -571,6 +580,7 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ]; TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE]; + TimedAverage *zap = &ts->latency[BLOCK_ACCT_ZONE_APPEND]; TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH]; dev_stats->interval_length = ts->interval_length; @@ -583,6 +593,10 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) dev_stats->max_wr_latency_ns = timed_average_max(wr); dev_stats->avg_wr_latency_ns = timed_average_avg(wr); + dev_stats->min_zone_append_latency_ns = timed_average_min(zap); + dev_stats->max_zone_append_latency_ns = timed_average_max(zap); + dev_stats->avg_zone_append_latency_ns = timed_average_avg(zap); + dev_stats->min_flush_latency_ns = timed_average_min(fl); dev_stats->max_flush_latency_ns = timed_average_max(fl); dev_stats->avg_flush_latency_ns = timed_average_avg(fl); @@ -591,6 +605,8 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) block_acct_queue_depth(ts, BLOCK_ACCT_READ); dev_stats->avg_wr_queue_depth = block_acct_queue_depth(ts, BLOCK_ACCT_WRITE); + dev_stats->avg_zone_append_queue_depth = + block_acct_queue_depth(ts, BLOCK_ACCT_ZONE_APPEND); QAPI_LIST_PREPEND(ds->timed_stats, dev_stats); } @@ -600,12 +616,14 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_READ]); ds->wr_latency_histogram = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_WRITE]); + ds->zone_append_latency_histogram + = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_ZONE_APPEND]); ds->flush_latency_histogram = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_FLUSH]); } -static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs, - bool blk_level) +static BlockStats * GRAPH_RDLOCK +bdrv_query_bds_stats(BlockDriverState *bs, bool blk_level) { BdrvChild *parent_child; BlockDriverState *filter_or_cow_bs; @@ -713,6 +731,8 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes, BlockBackend *blk; BlockDriverState *bs; + GRAPH_RDLOCK_GUARD_MAINLOOP(); + /* Just to be safe if query_nodes is not always initialized */ if (has_query_nodes && query_nodes) { for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) { diff --git a/block/qcow.c b/block/qcow.c index 490e4f819e..3644bbf5cb 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -800,8 +800,8 @@ static void qcow_close(BlockDriverState *bs) error_free(s->migration_blocker); } -static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, - Error **errp) +static int coroutine_fn GRAPH_UNLOCKED +qcow_co_create(BlockdevCreateOptions *opts, Error **errp) { BlockdevCreateOptionsQcow *qcow_opts; int header_size, backing_filename_len, l1_size, shift, i; @@ -915,13 +915,13 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, g_free(tmp); ret = 0; exit: - blk_unref(qcow_blk); - bdrv_unref(bs); + blk_co_unref(qcow_blk); + bdrv_co_unref(bs); qcrypto_block_free(crypto); return ret; } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED qcow_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { @@ -1015,7 +1015,7 @@ qcow_co_create_opts(BlockDriver *drv, const char *filename, fail: g_free(backing_fmt); qobject_unref(qdict); - bdrv_unref(bs); + bdrv_co_unref(bs); qapi_free_BlockdevCreateOptions(create_options); return ret; } diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c index 5f456a2785..a952fd58d8 100644 --- a/block/qcow2-bitmap.c +++ b/block/qcow2-bitmap.c @@ -1221,7 +1221,7 @@ out: } /* Checks to see if it's safe to resize bitmaps */ -int qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp) +int coroutine_fn qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp) { BDRVQcow2State *s = bs->opaque; Qcow2BitmapList *bm_list; diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c index a9e6622fe3..39cda7f907 100644 --- a/block/qcow2-cluster.c +++ b/block/qcow2-cluster.c @@ -1126,7 +1126,7 @@ err: * Frees the allocated clusters because the request failed and they won't * actually be linked. */ -void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m) +void coroutine_fn qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m) { BDRVQcow2State *s = bs->opaque; if (!has_data_file(bs) && !m->keep_old_clusters) { @@ -1156,9 +1156,11 @@ void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m) * * Returns 0 on success, -errno on failure. */ -static int calculate_l2_meta(BlockDriverState *bs, uint64_t host_cluster_offset, - uint64_t guest_offset, unsigned bytes, - uint64_t *l2_slice, QCowL2Meta **m, bool keep_old) +static int coroutine_fn calculate_l2_meta(BlockDriverState *bs, + uint64_t host_cluster_offset, + uint64_t guest_offset, unsigned bytes, + uint64_t *l2_slice, QCowL2Meta **m, + bool keep_old) { BDRVQcow2State *s = bs->opaque; int sc_index, l2_index = offset_to_l2_slice_index(s, guest_offset); @@ -1599,8 +1601,10 @@ out: * function has been waiting for another request and the allocation must be * restarted, but the whole request should not be failed. */ -static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, - uint64_t *host_offset, uint64_t *nb_clusters) +static int coroutine_fn do_alloc_cluster_offset(BlockDriverState *bs, + uint64_t guest_offset, + uint64_t *host_offset, + uint64_t *nb_clusters) { BDRVQcow2State *s = bs->opaque; @@ -2065,8 +2069,9 @@ static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, return nb_clusters; } -static int zero_l2_subclusters(BlockDriverState *bs, uint64_t offset, - unsigned nb_subclusters) +static int coroutine_fn +zero_l2_subclusters(BlockDriverState *bs, uint64_t offset, + unsigned nb_subclusters) { BDRVQcow2State *s = bs->opaque; uint64_t *l2_slice; diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c index b092f89da9..4cf91bd955 100644 --- a/block/qcow2-refcount.c +++ b/block/qcow2-refcount.c @@ -1030,8 +1030,8 @@ int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size) return offset; } -int64_t qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, - int64_t nb_clusters) +int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, + int64_t nb_clusters) { BDRVQcow2State *s = bs->opaque; uint64_t cluster_index, refcount; @@ -1069,7 +1069,7 @@ int64_t qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, /* only used to allocate compressed sectors. We try to allocate contiguous sectors. size must be <= cluster_size */ -int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size) +int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size) { BDRVQcow2State *s = bs->opaque; int64_t offset; @@ -3685,7 +3685,7 @@ out: return ret; } -int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size) +int64_t coroutine_fn qcow2_get_last_cluster(BlockDriverState *bs, int64_t size) { BDRVQcow2State *s = bs->opaque; int64_t i; @@ -3715,7 +3715,7 @@ int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs) qemu_co_mutex_assert_locked(&s->lock); - file_length = bdrv_getlength(bs->file->bs); + file_length = bdrv_co_getlength(bs->file->bs); if (file_length < 0) { return file_length; } diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c index 62e8a0335d..92e47978bf 100644 --- a/block/qcow2-snapshot.c +++ b/block/qcow2-snapshot.c @@ -77,10 +77,11 @@ void qcow2_free_snapshots(BlockDriverState *bs) * qcow2_check_refcounts() does not do anything with snapshots' * extra data.) */ -static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, - int *nb_clusters_reduced, - int *extra_data_dropped, - Error **errp) +static coroutine_fn GRAPH_RDLOCK +int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, + int *nb_clusters_reduced, + int *extra_data_dropped, + Error **errp) { BDRVQcow2State *s = bs->opaque; QCowSnapshotHeader h; @@ -108,7 +109,7 @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, /* Read statically sized part of the snapshot header */ offset = ROUND_UP(offset, 8); - ret = bdrv_pread(bs->file, offset, sizeof(h), &h, 0); + ret = bdrv_co_pread(bs->file, offset, sizeof(h), &h, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read snapshot table"); goto fail; @@ -146,8 +147,8 @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, } /* Read known extra data */ - ret = bdrv_pread(bs->file, offset, - MIN(sizeof(extra), sn->extra_data_size), &extra, 0); + ret = bdrv_co_pread(bs->file, offset, + MIN(sizeof(extra), sn->extra_data_size), &extra, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read snapshot table"); goto fail; @@ -184,8 +185,8 @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, /* Store unknown extra data */ unknown_extra_data_size = sn->extra_data_size - sizeof(extra); sn->unknown_extra_data = g_malloc(unknown_extra_data_size); - ret = bdrv_pread(bs->file, offset, unknown_extra_data_size, - sn->unknown_extra_data, 0); + ret = bdrv_co_pread(bs->file, offset, unknown_extra_data_size, + sn->unknown_extra_data, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read snapshot table"); @@ -196,7 +197,7 @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, /* Read snapshot ID */ sn->id_str = g_malloc(id_str_size + 1); - ret = bdrv_pread(bs->file, offset, id_str_size, sn->id_str, 0); + ret = bdrv_co_pread(bs->file, offset, id_str_size, sn->id_str, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read snapshot table"); goto fail; @@ -206,7 +207,7 @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair, /* Read snapshot name */ sn->name = g_malloc(name_size + 1); - ret = bdrv_pread(bs->file, offset, name_size, sn->name, 0); + ret = bdrv_co_pread(bs->file, offset, name_size, sn->name, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read snapshot table"); goto fail; @@ -261,7 +262,7 @@ fail: return ret; } -int qcow2_read_snapshots(BlockDriverState *bs, Error **errp) +int coroutine_fn qcow2_read_snapshots(BlockDriverState *bs, Error **errp) { return qcow2_do_read_snapshots(bs, false, NULL, NULL, errp); } diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c index 953bbe6df8..d6071a1eae 100644 --- a/block/qcow2-threads.c +++ b/block/qcow2-threads.c @@ -43,7 +43,6 @@ qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, void *arg) { int ret; BDRVQcow2State *s = bs->opaque; - ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); qemu_co_mutex_lock(&s->lock); while (s->nb_threads >= QCOW2_MAX_THREADS) { @@ -52,7 +51,7 @@ qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, void *arg) s->nb_threads++; qemu_co_mutex_unlock(&s->lock); - ret = thread_pool_submit_co(pool, func, arg); + ret = thread_pool_submit_co(func, arg); qemu_co_mutex_lock(&s->lock); s->nb_threads--; diff --git a/block/qcow2.c b/block/qcow2.c index 30fd53fa64..7f3948360d 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -118,8 +118,9 @@ static int qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, } -static int qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, - void *opaque, Error **errp) +static int coroutine_fn GRAPH_RDLOCK +qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, void *opaque, + Error **errp) { BlockDriverState *bs = opaque; BDRVQcow2State *s = bs->opaque; @@ -144,9 +145,7 @@ static int qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, */ clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0); - ret = bdrv_pwrite_zeroes(bs->file, - ret, - clusterlen, 0); + ret = bdrv_co_pwrite_zeroes(bs->file, ret, clusterlen, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not zero fill encryption header"); return -1; @@ -156,9 +155,11 @@ static int qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, } -static int qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, - const uint8_t *buf, size_t buflen, - void *opaque, Error **errp) +/* The graph lock must be held when called in coroutine context */ +static int coroutine_mixed_fn +qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, + const uint8_t *buf, size_t buflen, + void *opaque, Error **errp) { BlockDriverState *bs = opaque; BDRVQcow2State *s = bs->opaque; @@ -199,10 +200,10 @@ qcow2_extract_crypto_opts(QemuOpts *opts, const char *fmt, Error **errp) * unknown magic is skipped (future extension this version knows nothing about) * return 0 upon success, non-0 otherwise */ -static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, - uint64_t end_offset, void **p_feature_table, - int flags, bool *need_update_header, - Error **errp) +static int coroutine_fn GRAPH_RDLOCK +qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, + uint64_t end_offset, void **p_feature_table, + int flags, bool *need_update_header, Error **errp) { BDRVQcow2State *s = bs->opaque; QCowExtension ext; @@ -228,7 +229,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, printf("attempting to read extended header in offset %lu\n", offset); #endif - ret = bdrv_pread(bs->file, offset, sizeof(ext), &ext, 0); + ret = bdrv_co_pread(bs->file, offset, sizeof(ext), &ext, 0); if (ret < 0) { error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " "pread fail from offset %" PRIu64, offset); @@ -256,7 +257,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, sizeof(bs->backing_format)); return 2; } - ret = bdrv_pread(bs->file, offset, ext.len, bs->backing_format, 0); + ret = bdrv_co_pread(bs->file, offset, ext.len, bs->backing_format, 0); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " "Could not read format name"); @@ -272,7 +273,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, case QCOW2_EXT_MAGIC_FEATURE_TABLE: if (p_feature_table != NULL) { void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); - ret = bdrv_pread(bs->file, offset, ext.len, feature_table, 0); + ret = bdrv_co_pread(bs->file, offset, ext.len, feature_table, 0); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " "Could not read table"); @@ -298,7 +299,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, return -EINVAL; } - ret = bdrv_pread(bs->file, offset, ext.len, &s->crypto_header, 0); + ret = bdrv_co_pread(bs->file, offset, ext.len, &s->crypto_header, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Unable to read CRYPTO header extension"); @@ -354,7 +355,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, break; } - ret = bdrv_pread(bs->file, offset, ext.len, &bitmaps_ext, 0); + ret = bdrv_co_pread(bs->file, offset, ext.len, &bitmaps_ext, 0); if (ret < 0) { error_setg_errno(errp, -ret, "bitmaps_ext: " "Could not read ext header"); @@ -418,7 +419,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, case QCOW2_EXT_MAGIC_DATA_FILE: { s->image_data_file = g_malloc0(ext.len + 1); - ret = bdrv_pread(bs->file, offset, ext.len, s->image_data_file, 0); + ret = bdrv_co_pread(bs->file, offset, ext.len, s->image_data_file, 0); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: Could not read data file name"); @@ -442,7 +443,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, uext->len = ext.len; QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); - ret = bdrv_pread(bs->file, offset, uext->len, uext->data, 0); + ret = bdrv_co_pread(bs->file, offset, uext->len, uext->data, 0); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: unknown extension: " "Could not read data"); @@ -1241,8 +1242,9 @@ static void qcow2_update_options_abort(BlockDriverState *bs, qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); } -static int qcow2_update_options(BlockDriverState *bs, QDict *options, - int flags, Error **errp) +static int coroutine_fn +qcow2_update_options(BlockDriverState *bs, QDict *options, int flags, + Error **errp) { Qcow2ReopenState r = {}; int ret; @@ -1617,9 +1619,11 @@ qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, if (open_data_file) { /* Open external data file */ + bdrv_graph_co_rdunlock(); s->data_file = bdrv_co_open_child(NULL, options, "data-file", bs, &child_of_bds, BDRV_CHILD_DATA, true, errp); + bdrv_graph_co_rdlock(); if (*errp) { ret = -EINVAL; goto fail; @@ -1627,10 +1631,12 @@ qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { if (!s->data_file && s->image_data_file) { + bdrv_graph_co_rdunlock(); s->data_file = bdrv_co_open_child(s->image_data_file, options, "data-file", bs, &child_of_bds, BDRV_CHILD_DATA, false, errp); + bdrv_graph_co_rdlock(); if (!s->data_file) { ret = -EINVAL; goto fail; @@ -1855,7 +1861,9 @@ qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, fail: g_free(s->image_data_file); if (open_data_file && has_data_file(bs)) { + bdrv_graph_co_rdunlock(); bdrv_unref_child(bs, s->data_file); + bdrv_graph_co_rdlock(); s->data_file = NULL; } g_free(s->unknown_header_fields); @@ -1890,12 +1898,14 @@ static void coroutine_fn qcow2_open_entry(void *opaque) QCow2OpenCo *qoc = opaque; BDRVQcow2State *s = qoc->bs->opaque; - assume_graph_lock(); /* FIXME */ + GRAPH_RDLOCK_GUARD(); qemu_co_mutex_lock(&s->lock); qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, true, qoc->errp); qemu_co_mutex_unlock(&s->lock); + + aio_wait_kick(); } static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, @@ -1919,14 +1929,13 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, /* Initialise locks */ qemu_co_mutex_init(&s->lock); - if (qemu_in_coroutine()) { - /* From bdrv_co_create. */ - qcow2_open_entry(&qoc); - } else { - assert(qemu_get_current_aio_context() == qemu_get_aio_context()); - qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc)); - BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); - } + assert(!qemu_in_coroutine()); + assert(qemu_get_current_aio_context() == qemu_get_aio_context()); + + aio_co_enter(bdrv_get_aio_context(bs), + qemu_coroutine_create(qcow2_open_entry, &qoc)); + AIO_WAIT_WHILE_UNLOCKED(NULL, qoc.ret == -EINPROGRESS); + return qoc.ret; } @@ -2088,11 +2097,10 @@ static void qcow2_join_options(QDict *options, QDict *old_options) } } -static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs, - bool want_zero, - int64_t offset, int64_t count, - int64_t *pnum, int64_t *map, - BlockDriverState **file) +static int coroutine_fn GRAPH_RDLOCK +qcow2_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset, + int64_t count, int64_t *pnum, int64_t *map, + BlockDriverState **file) { BDRVQcow2State *s = bs->opaque; uint64_t host_offset; @@ -3140,9 +3148,10 @@ static int qcow2_change_backing_file(BlockDriverState *bs, return qcow2_update_header(bs); } -static int qcow2_set_up_encryption(BlockDriverState *bs, - QCryptoBlockCreateOptions *cryptoopts, - Error **errp) +static int coroutine_fn GRAPH_RDLOCK +qcow2_set_up_encryption(BlockDriverState *bs, + QCryptoBlockCreateOptions *cryptoopts, + Error **errp) { BDRVQcow2State *s = bs->opaque; QCryptoBlock *crypto = NULL; @@ -3234,7 +3243,7 @@ preallocate_co(BlockDriverState *bs, uint64_t offset, uint64_t new_length, * all of the allocated clusters (otherwise we get failing reads after * EOF). Extend the image to the last allocated sector. */ - file_length = bdrv_getlength(s->data_file->bs); + file_length = bdrv_co_getlength(s->data_file->bs); if (file_length < 0) { error_setg_errno(errp, -file_length, "Could not get file size"); ret = file_length; @@ -3429,7 +3438,7 @@ static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, return refcount_bits; } -static int coroutine_fn +static int coroutine_fn GRAPH_UNLOCKED qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) { BlockdevCreateOptionsQcow2 *qcow2_opts; @@ -3705,7 +3714,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) goto out; } - blk_unref(blk); + blk_co_unref(blk); blk = NULL; /* @@ -3727,8 +3736,10 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) goto out; } + bdrv_graph_co_rdlock(); ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); if (ret < 0) { + bdrv_graph_co_rdunlock(); error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " "header and refcount table"); goto out; @@ -3746,6 +3757,8 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) /* Create a full header (including things like feature table) */ ret = qcow2_update_header(blk_bs(blk)); + bdrv_graph_co_rdunlock(); + if (ret < 0) { error_setg_errno(errp, -ret, "Could not update qcow2 header"); goto out; @@ -3779,13 +3792,16 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) /* Want encryption? There you go. */ if (qcow2_opts->encrypt) { + bdrv_graph_co_rdlock(); ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp); + bdrv_graph_co_rdunlock(); + if (ret < 0) { goto out; } } - blk_unref(blk); + blk_co_unref(blk); blk = NULL; /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. @@ -3810,13 +3826,13 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) ret = 0; out: - blk_unref(blk); - bdrv_unref(bs); - bdrv_unref(data_bs); + blk_co_unref(blk); + bdrv_co_unref(bs); + bdrv_co_unref(data_bs); return ret; } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED qcow2_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { @@ -3936,15 +3952,17 @@ qcow2_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, ret = qcow2_co_create(create_options, errp); finish: if (ret < 0) { + bdrv_graph_co_rdlock(); bdrv_co_delete_file_noerr(bs); bdrv_co_delete_file_noerr(data_bs); + bdrv_graph_co_rdunlock(); } else { ret = 0; } qobject_unref(qdict); - bdrv_unref(bs); - bdrv_unref(data_bs); + bdrv_co_unref(bs); + bdrv_co_unref(data_bs); qapi_free_BlockdevCreateOptions(create_options); return ret; } @@ -4097,7 +4115,7 @@ qcow2_co_copy_range_from(BlockDriverState *bs, case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: if (bs->backing && bs->backing->bs) { - int64_t backing_length = bdrv_getlength(bs->backing->bs); + int64_t backing_length = bdrv_co_getlength(bs->backing->bs); if (src_offset >= backing_length) { cur_write_flags |= BDRV_REQ_ZERO_WRITE; } else { @@ -4292,7 +4310,7 @@ qcow2_co_truncate(BlockDriverState *bs, int64_t offset, bool exact, goto fail; } - old_file_size = bdrv_getlength(bs->file->bs); + old_file_size = bdrv_co_getlength(bs->file->bs); if (old_file_size < 0) { error_setg_errno(errp, -old_file_size, "Failed to inquire current file length"); @@ -4385,7 +4403,7 @@ qcow2_co_truncate(BlockDriverState *bs, int64_t offset, bool exact, break; } - old_file_size = bdrv_getlength(bs->file->bs); + old_file_size = bdrv_co_getlength(bs->file->bs); if (old_file_size < 0) { error_setg_errno(errp, -old_file_size, "Failed to inquire current file length"); @@ -4693,7 +4711,7 @@ qcow2_co_pwritev_compressed_part(BlockDriverState *bs, * align end of file to a sector boundary to ease reading with * sector based I/Os */ - int64_t len = bdrv_getlength(bs->file->bs); + int64_t len = bdrv_co_getlength(bs->file->bs); if (len < 0) { return len; } diff --git a/block/qcow2.h b/block/qcow2.h index c59e33c01c..4f67eb912a 100644 --- a/block/qcow2.h +++ b/block/qcow2.h @@ -862,9 +862,9 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t offset, uint64_t new_refblock_offset); int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size); -int64_t qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, - int64_t nb_clusters); -int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size); +int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, + int64_t nb_clusters); +int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size); void qcow2_free_clusters(BlockDriverState *bs, int64_t offset, int64_t size, enum qcow2_discard_type type); @@ -894,8 +894,10 @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order, BlockDriverAmendStatusCB *status_cb, void *cb_opaque, Error **errp); int coroutine_fn GRAPH_RDLOCK qcow2_shrink_reftable(BlockDriverState *bs); -int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size); -int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs); +int64_t coroutine_fn qcow2_get_last_cluster(BlockDriverState *bs, int64_t size); + +int coroutine_fn GRAPH_RDLOCK +qcow2_detect_metadata_preallocation(BlockDriverState *bs); /* qcow2-cluster.c functions */ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, @@ -924,7 +926,7 @@ void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry, int coroutine_fn GRAPH_RDLOCK qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m); -void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m); +void coroutine_fn qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m); int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, uint64_t bytes, enum qcow2_discard_type type, bool full_discard); @@ -951,7 +953,8 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs, Error **errp); void qcow2_free_snapshots(BlockDriverState *bs); -int qcow2_read_snapshots(BlockDriverState *bs, Error **errp); +int coroutine_fn GRAPH_RDLOCK +qcow2_read_snapshots(BlockDriverState *bs, Error **errp); int qcow2_write_snapshots(BlockDriverState *bs); int coroutine_fn GRAPH_RDLOCK @@ -994,7 +997,7 @@ bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool qcow2_get_bitmap_info_list(BlockDriverState *bs, Qcow2BitmapInfoList **info_list, Error **errp); int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp); -int qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp); +int coroutine_fn qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp); bool qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs, bool release_stored, Error **errp); int qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp); diff --git a/block/qed.c b/block/qed.c index ed94bb61ca..9a0350b534 100644 --- a/block/qed.c +++ b/block/qed.c @@ -557,11 +557,13 @@ typedef struct QEDOpenCo { int ret; } QEDOpenCo; -static void coroutine_fn GRAPH_RDLOCK bdrv_qed_open_entry(void *opaque) +static void coroutine_fn bdrv_qed_open_entry(void *opaque) { QEDOpenCo *qoc = opaque; BDRVQEDState *s = qoc->bs->opaque; + GRAPH_RDLOCK_GUARD(); + qemu_co_mutex_lock(&s->table_lock); qoc->ret = bdrv_qed_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp); qemu_co_mutex_unlock(&s->table_lock); @@ -579,22 +581,17 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, }; int ret; - assume_graph_lock(); /* FIXME */ - ret = bdrv_open_file_child(NULL, options, "file", bs, errp); if (ret < 0) { return ret; } bdrv_qed_init_state(bs); - if (qemu_in_coroutine()) { - bdrv_qed_open_entry(&qoc); - } else { - assert(qemu_get_current_aio_context() == qemu_get_aio_context()); - qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc)); - BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); - } + assert(!qemu_in_coroutine()); + assert(qemu_get_current_aio_context() == qemu_get_aio_context()); + qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc)); BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); + return qoc.ret; } @@ -633,8 +630,8 @@ static void bdrv_qed_close(BlockDriverState *bs) qemu_vfree(s->l1_table); } -static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts, - Error **errp) +static int coroutine_fn GRAPH_UNLOCKED +bdrv_qed_co_create(BlockdevCreateOptions *opts, Error **errp) { BlockdevCreateOptionsQed *qed_opts; BlockBackend *blk = NULL; @@ -749,12 +746,12 @@ static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts, ret = 0; /* success */ out: g_free(l1_table); - blk_unref(blk); - bdrv_unref(bs); + blk_co_unref(blk); + bdrv_co_unref(bs); return ret; } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED bdrv_qed_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { @@ -820,7 +817,7 @@ bdrv_qed_co_create_opts(BlockDriver *drv, const char *filename, fail: qobject_unref(qdict); - bdrv_unref(bs); + bdrv_co_unref(bs); qapi_free_BlockdevCreateOptions(create_options); return ret; } diff --git a/block/quorum.c b/block/quorum.c index ff5a0a2da3..f28758cf2b 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -825,8 +825,8 @@ static coroutine_fn GRAPH_RDLOCK int quorum_co_flush(BlockDriverState *bs) return result; } -static bool quorum_recurse_can_replace(BlockDriverState *bs, - BlockDriverState *to_replace) +static bool GRAPH_RDLOCK +quorum_recurse_can_replace(BlockDriverState *bs, BlockDriverState *to_replace) { BDRVQuorumState *s = bs->opaque; int i; diff --git a/block/raw-format.c b/block/raw-format.c index 66783ed8e7..e4f35268e6 100644 --- a/block/raw-format.c +++ b/block/raw-format.c @@ -317,6 +317,28 @@ raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) return bdrv_co_pdiscard(bs->file, offset, bytes); } +static int coroutine_fn GRAPH_RDLOCK +raw_co_zone_report(BlockDriverState *bs, int64_t offset, + unsigned int *nr_zones, + BlockZoneDescriptor *zones) +{ + return bdrv_co_zone_report(bs->file->bs, offset, nr_zones, zones); +} + +static int coroutine_fn GRAPH_RDLOCK +raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, + int64_t offset, int64_t len) +{ + return bdrv_co_zone_mgmt(bs->file->bs, op, offset, len); +} + +static int coroutine_fn GRAPH_RDLOCK +raw_co_zone_append(BlockDriverState *bs,int64_t *offset, QEMUIOVector *qiov, + BdrvRequestFlags flags) +{ + return bdrv_co_zone_append(bs->file->bs, offset, qiov, flags); +} + static int64_t coroutine_fn GRAPH_RDLOCK raw_co_getlength(BlockDriverState *bs) { @@ -369,7 +391,7 @@ static BlockMeasureInfo *raw_measure(QemuOpts *opts, BlockDriverState *in_bs, return info; } -static int coroutine_fn +static int coroutine_fn GRAPH_RDLOCK raw_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) { return bdrv_co_get_info(bs->file->bs, bdi); @@ -377,6 +399,8 @@ raw_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) static void raw_refresh_limits(BlockDriverState *bs, Error **errp) { + bs->bl.has_variable_length = bs->file->bs->bl.has_variable_length; + if (bs->probed) { /* To make it easier to protect the first sector, any probed * image is restricted to read-modify-write on sub-sector @@ -433,7 +457,7 @@ static int raw_has_zero_init(BlockDriverState *bs) return bdrv_has_zero_init(bs->file->bs); } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED raw_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { @@ -444,6 +468,7 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVRawState *s = bs->opaque; + AioContext *ctx; bool has_size; uint64_t offset, size; BdrvChildRole file_role; @@ -491,7 +516,11 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags, bs->file->bs->filename); } + ctx = bdrv_get_aio_context(bs); + aio_context_acquire(ctx); ret = raw_apply_options(bs, s, offset, has_size, size, errp); + aio_context_release(ctx); + if (ret < 0) { return ret; } @@ -606,6 +635,7 @@ static void raw_child_perm(BlockDriverState *bs, BdrvChild *c, BlockDriver bdrv_raw = { .format_name = "raw", .instance_size = sizeof(BDRVRawState), + .supports_zoned_children = true, .bdrv_probe = &raw_probe, .bdrv_reopen_prepare = &raw_reopen_prepare, .bdrv_reopen_commit = &raw_reopen_commit, @@ -617,13 +647,15 @@ BlockDriver bdrv_raw = { .bdrv_co_pwritev = &raw_co_pwritev, .bdrv_co_pwrite_zeroes = &raw_co_pwrite_zeroes, .bdrv_co_pdiscard = &raw_co_pdiscard, + .bdrv_co_zone_report = &raw_co_zone_report, + .bdrv_co_zone_mgmt = &raw_co_zone_mgmt, + .bdrv_co_zone_append = &raw_co_zone_append, .bdrv_co_block_status = &raw_co_block_status, .bdrv_co_copy_range_from = &raw_co_copy_range_from, .bdrv_co_copy_range_to = &raw_co_copy_range_to, .bdrv_co_truncate = &raw_co_truncate, .bdrv_co_getlength = &raw_co_getlength, .is_format = true, - .has_variable_length = true, .bdrv_measure = &raw_measure, .bdrv_co_get_info = &raw_co_get_info, .bdrv_refresh_limits = &raw_refresh_limits, diff --git a/block/replication.c b/block/replication.c index de01f96184..ea4bf1aa80 100644 --- a/block/replication.c +++ b/block/replication.c @@ -762,7 +762,6 @@ static BlockDriver bdrv_replication = { .is_filter = true, - .has_variable_length = true, .strong_runtime_opts = replication_strong_runtime_opts, }; diff --git a/block/ssh.c b/block/ssh.c index b3b3352075..2748253d4a 100644 --- a/block/ssh.c +++ b/block/ssh.c @@ -1019,7 +1019,7 @@ static void restart_coroutine(void *opaque) AioContext *ctx = bdrv_get_aio_context(bs); trace_ssh_restart_coroutine(restart->co); - aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL, NULL); + aio_set_fd_handler(ctx, s->sock, NULL, NULL, NULL, NULL, NULL); aio_co_wake(restart->co); } @@ -1049,7 +1049,7 @@ static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs) trace_ssh_co_yield(s->sock, rd_handler, wr_handler); aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, - false, rd_handler, wr_handler, NULL, NULL, &restart); + rd_handler, wr_handler, NULL, NULL, &restart); qemu_coroutine_yield(); trace_ssh_co_yield_back(s->sock); } diff --git a/block/stream.c b/block/stream.c index 7f9e1ecdbb..e522bbdec5 100644 --- a/block/stream.c +++ b/block/stream.c @@ -131,7 +131,6 @@ static int coroutine_fn stream_run(Job *job, Error **errp) BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs); int64_t len; int64_t offset = 0; - uint64_t delay_ns = 0; int error = 0; int64_t n = 0; /* bytes */ @@ -155,7 +154,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp) /* Note that even when no rate limit is applied we need to yield * with no pending I/O here so that bdrv_drain_all() returns. */ - job_sleep_ns(&s->common.job, delay_ns); + block_job_ratelimit_sleep(&s->common); if (job_is_cancelled(&s->common.job)) { break; } @@ -204,9 +203,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp) /* Publish progress */ job_progress_update(&s->common.job, n); if (copy) { - delay_ns = block_job_ratelimit_get_delay(&s->common, n); - } else { - delay_ns = 0; + block_job_ratelimit_processed_bytes(&s->common, n); } } diff --git a/block/trace-events b/block/trace-events index 48dbf10c66..32665158d6 100644 --- a/block/trace-events +++ b/block/trace-events @@ -209,6 +209,10 @@ file_FindEjectableOpticalMedia(const char *media) "Matching using %s" file_setup_cdrom(const char *partition) "Using %s as optical disc" file_hdev_is_sg(int type, int version) "SG device found: type=%d, version=%d" file_flush_fdatasync_failed(int err) "errno %d" +zbd_zone_report(void *bs, unsigned int nr_zones, int64_t sector) "bs %p report %d zones starting at sector offset 0x%" PRIx64 "" +zbd_zone_mgmt(void *bs, const char *op_name, int64_t sector, int64_t len) "bs %p %s starts at sector offset 0x%" PRIx64 " over a range of 0x%" PRIx64 " sectors" +zbd_zone_append(void *bs, int64_t sector) "bs %p append at sector offset 0x%" PRIx64 "" +zbd_zone_append_complete(void *bs, int64_t sector) "bs %p returns append sector 0x%" PRIx64 "" # ssh.c sftp_error(const char *op, const char *ssh_err, int ssh_err_code, int sftp_err_code) "%s failed: %s (libssh error code: %d, sftp error code: %d)" diff --git a/block/vdi.c b/block/vdi.c index f2434d6153..6c35309e04 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -734,8 +734,9 @@ nonallocating_write: return ret; } -static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options, - size_t block_size, Error **errp) +static int coroutine_fn GRAPH_UNLOCKED +vdi_co_do_create(BlockdevCreateOptions *create_options, size_t block_size, + Error **errp) { BlockdevCreateOptionsVdi *vdi_opts; int ret = 0; @@ -886,19 +887,19 @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options, ret = 0; exit: - blk_unref(blk); - bdrv_unref(bs_file); + blk_co_unref(blk); + bdrv_co_unref(bs_file); g_free(bmap); return ret; } -static int coroutine_fn vdi_co_create(BlockdevCreateOptions *create_options, - Error **errp) +static int coroutine_fn GRAPH_UNLOCKED +vdi_co_create(BlockdevCreateOptions *create_options, Error **errp) { return vdi_co_do_create(create_options, DEFAULT_CLUSTER_SIZE, errp); } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED vdi_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { @@ -975,7 +976,7 @@ vdi_co_create_opts(BlockDriver *drv, const char *filename, done: qobject_unref(qdict); qapi_free_BlockdevCreateOptions(create_options); - bdrv_unref(bs_file); + bdrv_co_unref(bs_file); return ret; } diff --git a/block/vhdx-log.c b/block/vhdx-log.c index c48cf65d62..38148f107a 100644 --- a/block/vhdx-log.c +++ b/block/vhdx-log.c @@ -981,7 +981,7 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, sector_write = merged_sector; } else if (i == sectors - 1 && trailing_length) { /* partial sector at the end of the buffer */ - ret = bdrv_pread(bs->file, file_offset, + ret = bdrv_pread(bs->file, file_offset + trailing_length, VHDX_LOG_SECTOR_SIZE - trailing_length, merged_sector + trailing_length, 0); if (ret < 0) { diff --git a/block/vhdx.c b/block/vhdx.c index 81420722a1..89913cba87 100644 --- a/block/vhdx.c +++ b/block/vhdx.c @@ -1506,14 +1506,17 @@ exit: * There are 2 headers, and the highest sequence number will represent * the active header */ -static int vhdx_create_new_headers(BlockBackend *blk, uint64_t image_size, - uint32_t log_size) +static int coroutine_fn GRAPH_UNLOCKED +vhdx_create_new_headers(BlockBackend *blk, uint64_t image_size, + uint32_t log_size) { BlockDriverState *bs = blk_bs(blk); BdrvChild *child; int ret = 0; VHDXHeader *hdr = NULL; + GRAPH_RDLOCK_GUARD(); + hdr = g_new0(VHDXHeader, 1); hdr->signature = VHDX_HEADER_SIGNATURE; @@ -1897,8 +1900,8 @@ exit: * .---- ~ ----------- ~ ------------ ~ ---------------- ~ -----------. * 1MB */ -static int coroutine_fn vhdx_co_create(BlockdevCreateOptions *opts, - Error **errp) +static int coroutine_fn GRAPH_UNLOCKED +vhdx_co_create(BlockdevCreateOptions *opts, Error **errp) { BlockdevCreateOptionsVhdx *vhdx_opts; BlockBackend *blk = NULL; @@ -2053,13 +2056,13 @@ static int coroutine_fn vhdx_co_create(BlockdevCreateOptions *opts, ret = 0; delete_and_exit: - blk_unref(blk); - bdrv_unref(bs); + blk_co_unref(blk); + bdrv_co_unref(bs); g_free(creator); return ret; } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED vhdx_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { @@ -2144,7 +2147,7 @@ vhdx_co_create_opts(BlockDriver *drv, const char *filename, fail: qobject_unref(qdict); - bdrv_unref(bs); + bdrv_co_unref(bs); qapi_free_BlockdevCreateOptions(create_options); return ret; } diff --git a/block/vmdk.c b/block/vmdk.c index f5f49018fe..e3e86608ec 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -376,7 +376,7 @@ out: return ret; } -static int vmdk_is_cid_valid(BlockDriverState *bs) +static int coroutine_fn vmdk_is_cid_valid(BlockDriverState *bs) { BDRVVmdkState *s = bs->opaque; uint32_t cur_pcid; @@ -2165,10 +2165,9 @@ vmdk_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, return ret; } -static int vmdk_init_extent(BlockBackend *blk, - int64_t filesize, bool flat, - bool compress, bool zeroed_grain, - Error **errp) +static int GRAPH_UNLOCKED +vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress, + bool zeroed_grain, Error **errp) { int ret, i; VMDK4Header header; @@ -2277,7 +2276,7 @@ exit: return ret; } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED vmdk_create_extent(const char *filename, int64_t filesize, bool flat, bool compress, bool zeroed_grain, BlockBackend **pbb, QemuOpts *opts, Error **errp) @@ -2306,7 +2305,7 @@ exit: if (pbb) { *pbb = blk; } else { - blk_unref(blk); + blk_co_unref(blk); blk = NULL; } } @@ -2358,7 +2357,7 @@ static int filename_decompose(const char *filename, char *path, char *prefix, * non-split format. * idx >= 1: get the n-th extent if in a split subformat */ -typedef BlockBackend * coroutine_fn /* GRAPH_RDLOCK */ +typedef BlockBackend * coroutine_fn GRAPH_UNLOCKED_PTR (*vmdk_create_extent_fn)(int64_t size, int idx, bool flat, bool split, bool compress, bool zeroed_grain, void *opaque, Error **errp); @@ -2374,7 +2373,7 @@ static void vmdk_desc_add_extent(GString *desc, g_free(basename); } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED vmdk_co_do_create(int64_t size, BlockdevVmdkSubformat subformat, BlockdevVmdkAdapterType adapter_type, @@ -2516,12 +2515,12 @@ vmdk_co_do_create(int64_t size, if (strcmp(blk_bs(backing)->drv->format_name, "vmdk")) { error_setg(errp, "Invalid backing file format: %s. Must be vmdk", blk_bs(backing)->drv->format_name); - blk_unref(backing); + blk_co_unref(backing); ret = -EINVAL; goto exit; } ret = vmdk_read_cid(blk_bs(backing), 0, &parent_cid); - blk_unref(backing); + blk_co_unref(backing); if (ret) { error_setg(errp, "Failed to read parent CID"); goto exit; @@ -2542,14 +2541,14 @@ vmdk_co_do_create(int64_t size, blk_bs(extent_blk)->filename); created_size += cur_size; extent_idx++; - blk_unref(extent_blk); + blk_co_unref(extent_blk); } /* Check whether we got excess extents */ extent_blk = extent_fn(-1, extent_idx, flat, split, compress, zeroed_grain, opaque, NULL); if (extent_blk) { - blk_unref(extent_blk); + blk_co_unref(extent_blk); error_setg(errp, "List of extents contains unused extents"); ret = -EINVAL; goto exit; @@ -2590,7 +2589,7 @@ vmdk_co_do_create(int64_t size, ret = 0; exit: if (blk) { - blk_unref(blk); + blk_co_unref(blk); } g_free(desc); g_free(parent_desc_line); @@ -2605,7 +2604,7 @@ typedef struct { QemuOpts *opts; } VMDKCreateOptsData; -static BlockBackend * coroutine_fn GRAPH_RDLOCK +static BlockBackend * coroutine_fn GRAPH_UNLOCKED vmdk_co_create_opts_cb(int64_t size, int idx, bool flat, bool split, bool compress, bool zeroed_grain, void *opaque, Error **errp) @@ -2641,13 +2640,13 @@ vmdk_co_create_opts_cb(int64_t size, int idx, bool flat, bool split, errp)) { goto exit; } - bdrv_unref(bs); + bdrv_co_unref(bs); exit: g_free(ext_filename); return blk; } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED vmdk_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { @@ -2756,11 +2755,9 @@ exit: return ret; } -static BlockBackend * coroutine_fn vmdk_co_create_cb(int64_t size, int idx, - bool flat, bool split, - bool compress, - bool zeroed_grain, - void *opaque, Error **errp) +static BlockBackend * coroutine_fn GRAPH_UNLOCKED +vmdk_co_create_cb(int64_t size, int idx, bool flat, bool split, bool compress, + bool zeroed_grain, void *opaque, Error **errp) { int ret; BlockDriverState *bs; @@ -2797,19 +2794,19 @@ static BlockBackend * coroutine_fn vmdk_co_create_cb(int64_t size, int idx, return NULL; } blk_set_allow_write_beyond_eof(blk, true); - bdrv_unref(bs); + bdrv_co_unref(bs); if (size != -1) { ret = vmdk_init_extent(blk, size, flat, compress, zeroed_grain, errp); if (ret) { - blk_unref(blk); + blk_co_unref(blk); blk = NULL; } } return blk; } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED vmdk_co_create(BlockdevCreateOptions *create_options, Error **errp) { BlockdevCreateOptionsVmdk *opts; @@ -2845,7 +2842,7 @@ static void vmdk_close(BlockDriverState *bs) error_free(s->migration_blocker); } -static int64_t coroutine_fn +static int64_t coroutine_fn GRAPH_RDLOCK vmdk_co_get_allocated_file_size(BlockDriverState *bs) { int i; diff --git a/block/vpc.c b/block/vpc.c index b89b0ff8e2..7ee7c7b4e0 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -967,8 +967,8 @@ static int calculate_rounded_image_size(BlockdevCreateOptionsVpc *vpc_opts, return 0; } -static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts, - Error **errp) +static int coroutine_fn GRAPH_UNLOCKED +vpc_co_create(BlockdevCreateOptions *opts, Error **errp) { BlockdevCreateOptionsVpc *vpc_opts; BlockBackend *blk = NULL; @@ -1082,12 +1082,12 @@ static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts, } out: - blk_unref(blk); - bdrv_unref(bs); + blk_co_unref(blk); + bdrv_co_unref(bs); return ret; } -static int coroutine_fn GRAPH_RDLOCK +static int coroutine_fn GRAPH_UNLOCKED vpc_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp) { @@ -1162,7 +1162,7 @@ vpc_co_create_opts(BlockDriver *drv, const char *filename, fail: qobject_unref(qdict); - bdrv_unref(bs); + bdrv_co_unref(bs); qapi_free_BlockdevCreateOptions(create_options); return ret; } diff --git a/block/vvfat.c b/block/vvfat.c index d7d775bd2c..0ddc91fc09 100644 --- a/block/vvfat.c +++ b/block/vvfat.c @@ -1053,7 +1053,7 @@ static BDRVVVFATState *vvv = NULL; #endif static int enable_write_target(BlockDriverState *bs, Error **errp); -static int is_consistent(BDRVVVFATState *s); +static int coroutine_fn is_consistent(BDRVVVFATState *s); static QemuOptsList runtime_opts = { .name = "vvfat", @@ -1469,8 +1469,8 @@ static void print_mapping(const mapping_t* mapping) } #endif -static int vvfat_read(BlockDriverState *bs, int64_t sector_num, - uint8_t *buf, int nb_sectors) +static int coroutine_fn GRAPH_RDLOCK +vvfat_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { BDRVVVFATState *s = bs->opaque; int i; @@ -1490,8 +1490,8 @@ static int vvfat_read(BlockDriverState *bs, int64_t sector_num, DLOG(fprintf(stderr, "sectors %" PRId64 "+%" PRId64 " allocated\n", sector_num, n >> BDRV_SECTOR_BITS)); - if (bdrv_pread(s->qcow, sector_num * BDRV_SECTOR_SIZE, n, - buf + i * 0x200, 0) < 0) { + if (bdrv_co_pread(s->qcow, sector_num * BDRV_SECTOR_SIZE, n, + buf + i * 0x200, 0) < 0) { return -1; } i += (n >> BDRV_SECTOR_BITS) - 1; @@ -1532,7 +1532,7 @@ static int vvfat_read(BlockDriverState *bs, int64_t sector_num, return 0; } -static int coroutine_fn +static int coroutine_fn GRAPH_RDLOCK vvfat_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { @@ -1796,8 +1796,8 @@ static inline uint32_t modified_fat_get(BDRVVVFATState* s, } } -static inline bool cluster_was_modified(BDRVVVFATState *s, - uint32_t cluster_num) +static inline bool coroutine_fn GRAPH_RDLOCK +cluster_was_modified(BDRVVVFATState *s, uint32_t cluster_num) { int was_modified = 0; int i; @@ -1852,8 +1852,8 @@ typedef enum { * Further, the files/directories handled by this function are * assumed to be *not* deleted (and *only* those). */ -static uint32_t get_cluster_count_for_direntry(BDRVVVFATState* s, - direntry_t* direntry, const char* path) +static uint32_t coroutine_fn GRAPH_RDLOCK +get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const char* path) { /* * This is a little bit tricky: @@ -1979,9 +1979,9 @@ static uint32_t get_cluster_count_for_direntry(BDRVVVFATState* s, if (res) { return -1; } - res = bdrv_pwrite(s->qcow, offset * BDRV_SECTOR_SIZE, - BDRV_SECTOR_SIZE, s->cluster_buffer, - 0); + res = bdrv_co_pwrite(s->qcow, offset * BDRV_SECTOR_SIZE, + BDRV_SECTOR_SIZE, s->cluster_buffer, + 0); if (res < 0) { return -2; } @@ -2011,8 +2011,8 @@ static uint32_t get_cluster_count_for_direntry(BDRVVVFATState* s, * It returns 0 upon inconsistency or error, and the number of clusters * used by the directory, its subdirectories and their files. */ -static int check_directory_consistency(BDRVVVFATState *s, - int cluster_num, const char* path) +static int coroutine_fn GRAPH_RDLOCK +check_directory_consistency(BDRVVVFATState *s, int cluster_num, const char* path) { int ret = 0; unsigned char* cluster = g_malloc(s->cluster_size); @@ -2138,7 +2138,8 @@ DLOG(fprintf(stderr, "check direntry %d:\n", i); print_direntry(direntries + i)) } /* returns 1 on success */ -static int is_consistent(BDRVVVFATState* s) +static int coroutine_fn GRAPH_RDLOCK +is_consistent(BDRVVVFATState* s) { int i, check; int used_clusters_count = 0; @@ -2414,8 +2415,8 @@ static int commit_mappings(BDRVVVFATState* s, return 0; } -static int commit_direntries(BDRVVVFATState* s, - int dir_index, int parent_mapping_index) +static int coroutine_fn GRAPH_RDLOCK +commit_direntries(BDRVVVFATState* s, int dir_index, int parent_mapping_index) { direntry_t* direntry = array_get(&(s->directory), dir_index); uint32_t first_cluster = dir_index == 0 ? 0 : begin_of_direntry(direntry); @@ -2504,8 +2505,8 @@ static int commit_direntries(BDRVVVFATState* s, /* commit one file (adjust contents, adjust mapping), return first_mapping_index */ -static int commit_one_file(BDRVVVFATState* s, - int dir_index, uint32_t offset) +static int coroutine_fn GRAPH_RDLOCK +commit_one_file(BDRVVVFATState* s, int dir_index, uint32_t offset) { direntry_t* direntry = array_get(&(s->directory), dir_index); uint32_t c = begin_of_direntry(direntry); @@ -2770,7 +2771,7 @@ static int handle_renames_and_mkdirs(BDRVVVFATState* s) /* * TODO: make sure that the short name is not matching *another* file */ -static int handle_commits(BDRVVVFATState* s) +static int coroutine_fn GRAPH_RDLOCK handle_commits(BDRVVVFATState* s) { int i, fail = 0; @@ -2784,13 +2785,10 @@ static int handle_commits(BDRVVVFATState* s) fail = -2; break; case ACTION_WRITEOUT: { -#ifndef NDEBUG - /* these variables are only used by assert() below */ direntry_t* entry = array_get(&(s->directory), commit->param.writeout.dir_index); uint32_t begin = begin_of_direntry(entry); mapping_t* mapping = find_mapping_for_cluster(s, begin); -#endif assert(mapping); assert(mapping->begin == begin); @@ -2916,7 +2914,7 @@ static int handle_deletes(BDRVVVFATState* s) * - recurse direntries from root (using bs->bdrv_pread) * - delete files corresponding to mappings marked as deleted */ -static int do_commit(BDRVVVFATState* s) +static int coroutine_fn GRAPH_RDLOCK do_commit(BDRVVVFATState* s) { int ret = 0; @@ -2966,7 +2964,7 @@ DLOG(checkpoint()); return 0; } -static int try_commit(BDRVVVFATState* s) +static int coroutine_fn GRAPH_RDLOCK try_commit(BDRVVVFATState* s) { vvfat_close_current_file(s); DLOG(checkpoint()); @@ -2975,8 +2973,9 @@ DLOG(checkpoint()); return do_commit(s); } -static int vvfat_write(BlockDriverState *bs, int64_t sector_num, - const uint8_t *buf, int nb_sectors) +static int coroutine_fn GRAPH_RDLOCK +vvfat_write(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors) { BDRVVVFATState *s = bs->opaque; int i, ret; @@ -3085,8 +3084,8 @@ DLOG(checkpoint()); * Use qcow backend. Commit later. */ DLOG(fprintf(stderr, "Write to qcow backend: %d + %d\n", (int)sector_num, nb_sectors)); - ret = bdrv_pwrite(s->qcow, sector_num * BDRV_SECTOR_SIZE, - nb_sectors * BDRV_SECTOR_SIZE, buf, 0); + ret = bdrv_co_pwrite(s->qcow, sector_num * BDRV_SECTOR_SIZE, + nb_sectors * BDRV_SECTOR_SIZE, buf, 0); if (ret < 0) { fprintf(stderr, "Error writing to qcow backend\n"); return ret; @@ -3106,7 +3105,7 @@ DLOG(checkpoint()); return 0; } -static int coroutine_fn +static int coroutine_fn GRAPH_RDLOCK vvfat_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { diff --git a/block/win32-aio.c b/block/win32-aio.c index ee87d6048f..6327861e1d 100644 --- a/block/win32-aio.c +++ b/block/win32-aio.c @@ -174,7 +174,7 @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile) void win32_aio_detach_aio_context(QEMUWin32AIOState *aio, AioContext *old_context) { - aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL, NULL); + aio_set_event_notifier(old_context, &aio->e, NULL, NULL, NULL); aio->aio_ctx = NULL; } @@ -182,8 +182,8 @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio, AioContext *new_context) { aio->aio_ctx = new_context; - aio_set_event_notifier(new_context, &aio->e, false, - win32_aio_completion_cb, NULL, NULL); + aio_set_event_notifier(new_context, &aio->e, win32_aio_completion_cb, + NULL, NULL); } QEMUWin32AIOState *win32_aio_init(void) diff --git a/blockdev.c b/blockdev.c index d7b5c18f0a..db2725fe74 100644 --- a/blockdev.c +++ b/blockdev.c @@ -153,12 +153,22 @@ void blockdev_mark_auto_del(BlockBackend *blk) JOB_LOCK_GUARD(); - for (job = block_job_next_locked(NULL); job; - job = block_job_next_locked(job)) { - if (block_job_has_bdrv(job, blk_bs(blk))) { + do { + job = block_job_next_locked(NULL); + while (job && (job->job.cancelled || + job->job.deferred_to_main_loop || + !block_job_has_bdrv(job, blk_bs(blk)))) + { + job = block_job_next_locked(job); + } + if (job) { + /* + * This drops the job lock temporarily and polls, so we need to + * restart processing the list from the start after this. + */ job_cancel_locked(&job->job, false); } - } + } while (job); dinfo->auto_del = 1; } @@ -652,6 +662,7 @@ err_no_opts: /* Takes the ownership of bs_opts */ BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp) { + BlockDriverState *bs; int bdrv_flags = 0; GLOBAL_STATE_CODE(); @@ -666,7 +677,11 @@ BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp) bdrv_flags |= BDRV_O_INACTIVE; } - return bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp); + aio_context_acquire(qemu_get_aio_context()); + bs = bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp); + aio_context_release(qemu_get_aio_context()); + + return bs; } void blockdev_close_all_bdrv_states(void) @@ -1178,79 +1193,22 @@ out_aio_context: return NULL; } -/* New and old BlockDriverState structs for atomic group operations */ - -typedef struct BlkActionState BlkActionState; - -/** - * BlkActionOps: - * Table of operations that define an Action. - * - * @instance_size: Size of state struct, in bytes. - * @prepare: Prepare the work, must NOT be NULL. - * @commit: Commit the changes, can be NULL. - * @abort: Abort the changes on fail, can be NULL. - * @clean: Clean up resources after all transaction actions have called - * commit() or abort(). Can be NULL. - * - * Only prepare() may fail. In a single transaction, only one of commit() or - * abort() will be called. clean() will always be called if it is present. - * - * Always run under BQL. - */ -typedef struct BlkActionOps { - size_t instance_size; - void (*prepare)(BlkActionState *common, Error **errp); - void (*commit)(BlkActionState *common); - void (*abort)(BlkActionState *common); - void (*clean)(BlkActionState *common); -} BlkActionOps; - -/** - * BlkActionState: - * Describes one Action's state within a Transaction. - * - * @action: QAPI-defined enum identifying which Action to perform. - * @ops: Table of ActionOps this Action can perform. - * @block_job_txn: Transaction which this action belongs to. - * @entry: List membership for all Actions in this Transaction. - * - * This structure must be arranged as first member in a subclassed type, - * assuming that the compiler will also arrange it to the same offsets as the - * base class. - */ -struct BlkActionState { - TransactionAction *action; - const BlkActionOps *ops; - JobTxn *block_job_txn; - TransactionProperties *txn_props; - QTAILQ_ENTRY(BlkActionState) entry; -}; - /* internal snapshot private data */ typedef struct InternalSnapshotState { - BlkActionState common; BlockDriverState *bs; QEMUSnapshotInfo sn; bool created; } InternalSnapshotState; +static void internal_snapshot_abort(void *opaque); +static void internal_snapshot_clean(void *opaque); +TransactionActionDrv internal_snapshot_drv = { + .abort = internal_snapshot_abort, + .clean = internal_snapshot_clean, +}; -static int action_check_completion_mode(BlkActionState *s, Error **errp) -{ - if (s->txn_props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) { - error_setg(errp, - "Action '%s' does not support Transaction property " - "completion-mode = %s", - TransactionActionKind_str(s->action->type), - ActionCompletionMode_str(s->txn_props->completion_mode)); - return -1; - } - return 0; -} - -static void internal_snapshot_prepare(BlkActionState *common, - Error **errp) +static void internal_snapshot_action(BlockdevSnapshotInternal *internal, + Transaction *tran, Error **errp) { Error *local_err = NULL; const char *device; @@ -1259,25 +1217,15 @@ static void internal_snapshot_prepare(BlkActionState *common, QEMUSnapshotInfo old_sn, *sn; bool ret; int64_t rt; - BlockdevSnapshotInternal *internal; - InternalSnapshotState *state; + InternalSnapshotState *state = g_new0(InternalSnapshotState, 1); AioContext *aio_context; int ret1; - g_assert(common->action->type == - TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC); - internal = common->action->u.blockdev_snapshot_internal_sync.data; - state = DO_UPCAST(InternalSnapshotState, common, common); + tran_add(tran, &internal_snapshot_drv, state); - /* 1. parse input */ device = internal->device; name = internal->name; - /* 2. check for validation */ - if (action_check_completion_mode(common, errp) < 0) { - return; - } - bs = qmp_get_root_bs(device, errp); if (!bs) { return; @@ -1353,10 +1301,9 @@ out: aio_context_release(aio_context); } -static void internal_snapshot_abort(BlkActionState *common) +static void internal_snapshot_abort(void *opaque) { - InternalSnapshotState *state = - DO_UPCAST(InternalSnapshotState, common, common); + InternalSnapshotState *state = opaque; BlockDriverState *bs = state->bs; QEMUSnapshotInfo *sn = &state->sn; AioContext *aio_context; @@ -1380,10 +1327,9 @@ static void internal_snapshot_abort(BlkActionState *common) aio_context_release(aio_context); } -static void internal_snapshot_clean(BlkActionState *common) +static void internal_snapshot_clean(void *opaque) { - InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState, - common, common); + g_autofree InternalSnapshotState *state = opaque; AioContext *aio_context; if (!state->bs) { @@ -1400,14 +1346,22 @@ static void internal_snapshot_clean(BlkActionState *common) /* external snapshot private data */ typedef struct ExternalSnapshotState { - BlkActionState common; BlockDriverState *old_bs; BlockDriverState *new_bs; bool overlay_appended; } ExternalSnapshotState; -static void external_snapshot_prepare(BlkActionState *common, - Error **errp) +static void external_snapshot_commit(void *opaque); +static void external_snapshot_abort(void *opaque); +static void external_snapshot_clean(void *opaque); +TransactionActionDrv external_snapshot_drv = { + .commit = external_snapshot_commit, + .abort = external_snapshot_abort, + .clean = external_snapshot_clean, +}; + +static void external_snapshot_action(TransactionAction *action, + Transaction *tran, Error **errp) { int ret; int flags = 0; @@ -1420,12 +1374,12 @@ static void external_snapshot_prepare(BlkActionState *common, const char *snapshot_ref; /* File name of the new image (for 'blockdev-snapshot-sync') */ const char *new_image_file; - ExternalSnapshotState *state = - DO_UPCAST(ExternalSnapshotState, common, common); - TransactionAction *action = common->action; + ExternalSnapshotState *state = g_new0(ExternalSnapshotState, 1); AioContext *aio_context; uint64_t perm, shared; + tran_add(tran, &external_snapshot_drv, state); + /* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar * purpose but a different set of parameters */ switch (action->type) { @@ -1452,9 +1406,6 @@ static void external_snapshot_prepare(BlkActionState *common, } /* start processing */ - if (action_check_completion_mode(common, errp) < 0) { - return; - } state->old_bs = bdrv_lookup_bs(device, node_name, errp); if (!state->old_bs) { @@ -1534,14 +1485,20 @@ static void external_snapshot_prepare(BlkActionState *common, } qdict_put_str(options, "driver", format); } + aio_context_release(aio_context); + aio_context_acquire(qemu_get_aio_context()); state->new_bs = bdrv_open(new_image_file, snapshot_ref, options, flags, errp); + aio_context_release(qemu_get_aio_context()); + /* We will manually add the backing_hd field to the bs later */ if (!state->new_bs) { - goto out; + return; } + aio_context_acquire(aio_context); + /* * Allow attaching a backing file to an overlay that's already in use only * if the parents don't assume that they are already seeing a valid image. @@ -1578,10 +1535,9 @@ out: aio_context_release(aio_context); } -static void external_snapshot_commit(BlkActionState *common) +static void external_snapshot_commit(void *opaque) { - ExternalSnapshotState *state = - DO_UPCAST(ExternalSnapshotState, common, common); + ExternalSnapshotState *state = opaque; AioContext *aio_context; aio_context = bdrv_get_aio_context(state->old_bs); @@ -1597,10 +1553,9 @@ static void external_snapshot_commit(BlkActionState *common) aio_context_release(aio_context); } -static void external_snapshot_abort(BlkActionState *common) +static void external_snapshot_abort(void *opaque) { - ExternalSnapshotState *state = - DO_UPCAST(ExternalSnapshotState, common, common); + ExternalSnapshotState *state = opaque; if (state->new_bs) { if (state->overlay_appended) { AioContext *aio_context; @@ -1640,10 +1595,9 @@ static void external_snapshot_abort(BlkActionState *common) } } -static void external_snapshot_clean(BlkActionState *common) +static void external_snapshot_clean(void *opaque) { - ExternalSnapshotState *state = - DO_UPCAST(ExternalSnapshotState, common, common); + g_autofree ExternalSnapshotState *state = opaque; AioContext *aio_context; if (!state->old_bs) { @@ -1660,7 +1614,6 @@ static void external_snapshot_clean(BlkActionState *common) } typedef struct DriveBackupState { - BlkActionState common; BlockDriverState *bs; BlockJob *job; } DriveBackupState; @@ -1671,10 +1624,20 @@ static BlockJob *do_backup_common(BackupCommon *backup, AioContext *aio_context, JobTxn *txn, Error **errp); -static void drive_backup_prepare(BlkActionState *common, Error **errp) +static void drive_backup_commit(void *opaque); +static void drive_backup_abort(void *opaque); +static void drive_backup_clean(void *opaque); +TransactionActionDrv drive_backup_drv = { + .commit = drive_backup_commit, + .abort = drive_backup_abort, + .clean = drive_backup_clean, +}; + +static void drive_backup_action(DriveBackup *backup, + JobTxn *block_job_txn, + Transaction *tran, Error **errp) { - DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); - DriveBackup *backup; + DriveBackupState *state = g_new0(DriveBackupState, 1); BlockDriverState *bs; BlockDriverState *target_bs; BlockDriverState *source = NULL; @@ -1688,8 +1651,7 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp) bool set_backing_hd = false; int ret; - assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP); - backup = common->action->u.drive_backup.data; + tran_add(tran, &drive_backup_drv, state); if (!backup->has_mode) { backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; @@ -1781,15 +1743,18 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp) if (format) { qdict_put_str(options, "driver", format); } + aio_context_release(aio_context); + aio_context_acquire(qemu_get_aio_context()); target_bs = bdrv_open(backup->target, NULL, options, flags, errp); + aio_context_release(qemu_get_aio_context()); + if (!target_bs) { - goto out; + return; } /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ old_context = bdrv_get_aio_context(target_bs); - aio_context_release(aio_context); aio_context_acquire(old_context); ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); @@ -1810,7 +1775,7 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp) state->job = do_backup_common(qapi_DriveBackup_base(backup), bs, target_bs, aio_context, - common->block_job_txn, errp); + block_job_txn, errp); unref: bdrv_unref(target_bs); @@ -1818,9 +1783,9 @@ out: aio_context_release(aio_context); } -static void drive_backup_commit(BlkActionState *common) +static void drive_backup_commit(void *opaque) { - DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); + DriveBackupState *state = opaque; AioContext *aio_context; aio_context = bdrv_get_aio_context(state->bs); @@ -1832,18 +1797,18 @@ static void drive_backup_commit(BlkActionState *common) aio_context_release(aio_context); } -static void drive_backup_abort(BlkActionState *common) +static void drive_backup_abort(void *opaque) { - DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); + DriveBackupState *state = opaque; if (state->job) { job_cancel_sync(&state->job->job, true); } } -static void drive_backup_clean(BlkActionState *common) +static void drive_backup_clean(void *opaque) { - DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); + g_autofree DriveBackupState *state = opaque; AioContext *aio_context; if (!state->bs) { @@ -1859,23 +1824,31 @@ static void drive_backup_clean(BlkActionState *common) } typedef struct BlockdevBackupState { - BlkActionState common; BlockDriverState *bs; BlockJob *job; } BlockdevBackupState; -static void blockdev_backup_prepare(BlkActionState *common, Error **errp) +static void blockdev_backup_commit(void *opaque); +static void blockdev_backup_abort(void *opaque); +static void blockdev_backup_clean(void *opaque); +TransactionActionDrv blockdev_backup_drv = { + .commit = blockdev_backup_commit, + .abort = blockdev_backup_abort, + .clean = blockdev_backup_clean, +}; + +static void blockdev_backup_action(BlockdevBackup *backup, + JobTxn *block_job_txn, + Transaction *tran, Error **errp) { - BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); - BlockdevBackup *backup; + BlockdevBackupState *state = g_new0(BlockdevBackupState, 1); BlockDriverState *bs; BlockDriverState *target_bs; AioContext *aio_context; AioContext *old_context; int ret; - assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP); - backup = common->action->u.blockdev_backup.data; + tran_add(tran, &blockdev_backup_drv, state); bs = bdrv_lookup_bs(backup->device, backup->device, errp); if (!bs) { @@ -1907,14 +1880,14 @@ static void blockdev_backup_prepare(BlkActionState *common, Error **errp) state->job = do_backup_common(qapi_BlockdevBackup_base(backup), bs, target_bs, aio_context, - common->block_job_txn, errp); + block_job_txn, errp); aio_context_release(aio_context); } -static void blockdev_backup_commit(BlkActionState *common) +static void blockdev_backup_commit(void *opaque) { - BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); + BlockdevBackupState *state = opaque; AioContext *aio_context; aio_context = bdrv_get_aio_context(state->bs); @@ -1926,18 +1899,18 @@ static void blockdev_backup_commit(BlkActionState *common) aio_context_release(aio_context); } -static void blockdev_backup_abort(BlkActionState *common) +static void blockdev_backup_abort(void *opaque) { - BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); + BlockdevBackupState *state = opaque; if (state->job) { job_cancel_sync(&state->job->job, true); } } -static void blockdev_backup_clean(BlkActionState *common) +static void blockdev_backup_clean(void *opaque) { - BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); + g_autofree BlockdevBackupState *state = opaque; AioContext *aio_context; if (!state->bs) { @@ -1953,27 +1926,26 @@ static void blockdev_backup_clean(BlkActionState *common) } typedef struct BlockDirtyBitmapState { - BlkActionState common; BdrvDirtyBitmap *bitmap; BlockDriverState *bs; HBitmap *backup; - bool prepared; bool was_enabled; } BlockDirtyBitmapState; -static void block_dirty_bitmap_add_prepare(BlkActionState *common, - Error **errp) +static void block_dirty_bitmap_add_abort(void *opaque); +TransactionActionDrv block_dirty_bitmap_add_drv = { + .abort = block_dirty_bitmap_add_abort, + .clean = g_free, +}; + +static void block_dirty_bitmap_add_action(BlockDirtyBitmapAdd *action, + Transaction *tran, Error **errp) { Error *local_err = NULL; - BlockDirtyBitmapAdd *action; - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1); - if (action_check_completion_mode(common, errp) < 0) { - return; - } + tran_add(tran, &block_dirty_bitmap_add_drv, state); - action = common->action->u.block_dirty_bitmap_add.data; /* AIO context taken and released within qmp_block_dirty_bitmap_add */ qmp_block_dirty_bitmap_add(action->node, action->name, action->has_granularity, action->granularity, @@ -1982,39 +1954,37 @@ static void block_dirty_bitmap_add_prepare(BlkActionState *common, &local_err); if (!local_err) { - state->prepared = true; + state->bitmap = block_dirty_bitmap_lookup(action->node, action->name, + NULL, &error_abort); } else { error_propagate(errp, local_err); } } -static void block_dirty_bitmap_add_abort(BlkActionState *common) +static void block_dirty_bitmap_add_abort(void *opaque) { - BlockDirtyBitmapAdd *action; - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = opaque; - action = common->action->u.block_dirty_bitmap_add.data; - /* Should not be able to fail: IF the bitmap was added via .prepare(), - * then the node reference and bitmap name must have been valid. - */ - if (state->prepared) { - qmp_block_dirty_bitmap_remove(action->node, action->name, &error_abort); + if (state->bitmap) { + bdrv_release_dirty_bitmap(state->bitmap); } } -static void block_dirty_bitmap_clear_prepare(BlkActionState *common, - Error **errp) +static void block_dirty_bitmap_restore(void *opaque); +static void block_dirty_bitmap_free_backup(void *opaque); +TransactionActionDrv block_dirty_bitmap_clear_drv = { + .abort = block_dirty_bitmap_restore, + .commit = block_dirty_bitmap_free_backup, + .clean = g_free, +}; + +static void block_dirty_bitmap_clear_action(BlockDirtyBitmap *action, + Transaction *tran, Error **errp) { - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); - BlockDirtyBitmap *action; + BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1); - if (action_check_completion_mode(common, errp) < 0) { - return; - } + tran_add(tran, &block_dirty_bitmap_clear_drv, state); - action = common->action->u.block_dirty_bitmap_clear.data; state->bitmap = block_dirty_bitmap_lookup(action->node, action->name, &state->bs, @@ -2030,36 +2000,35 @@ static void block_dirty_bitmap_clear_prepare(BlkActionState *common, bdrv_clear_dirty_bitmap(state->bitmap, &state->backup); } -static void block_dirty_bitmap_restore(BlkActionState *common) +static void block_dirty_bitmap_restore(void *opaque) { - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = opaque; if (state->backup) { bdrv_restore_dirty_bitmap(state->bitmap, state->backup); } } -static void block_dirty_bitmap_free_backup(BlkActionState *common) +static void block_dirty_bitmap_free_backup(void *opaque) { - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = opaque; hbitmap_free(state->backup); } -static void block_dirty_bitmap_enable_prepare(BlkActionState *common, - Error **errp) +static void block_dirty_bitmap_enable_abort(void *opaque); +TransactionActionDrv block_dirty_bitmap_enable_drv = { + .abort = block_dirty_bitmap_enable_abort, + .clean = g_free, +}; + +static void block_dirty_bitmap_enable_action(BlockDirtyBitmap *action, + Transaction *tran, Error **errp) { - BlockDirtyBitmap *action; - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1); - if (action_check_completion_mode(common, errp) < 0) { - return; - } + tran_add(tran, &block_dirty_bitmap_enable_drv, state); - action = common->action->u.block_dirty_bitmap_enable.data; state->bitmap = block_dirty_bitmap_lookup(action->node, action->name, NULL, @@ -2076,28 +2045,28 @@ static void block_dirty_bitmap_enable_prepare(BlkActionState *common, bdrv_enable_dirty_bitmap(state->bitmap); } -static void block_dirty_bitmap_enable_abort(BlkActionState *common) +static void block_dirty_bitmap_enable_abort(void *opaque) { - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = opaque; if (!state->was_enabled) { bdrv_disable_dirty_bitmap(state->bitmap); } } -static void block_dirty_bitmap_disable_prepare(BlkActionState *common, - Error **errp) +static void block_dirty_bitmap_disable_abort(void *opaque); +TransactionActionDrv block_dirty_bitmap_disable_drv = { + .abort = block_dirty_bitmap_disable_abort, + .clean = g_free, +}; + +static void block_dirty_bitmap_disable_action(BlockDirtyBitmap *action, + Transaction *tran, Error **errp) { - BlockDirtyBitmap *action; - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1); - if (action_check_completion_mode(common, errp) < 0) { - return; - } + tran_add(tran, &block_dirty_bitmap_disable_drv, state); - action = common->action->u.block_dirty_bitmap_disable.data; state->bitmap = block_dirty_bitmap_lookup(action->node, action->name, NULL, @@ -2114,46 +2083,48 @@ static void block_dirty_bitmap_disable_prepare(BlkActionState *common, bdrv_disable_dirty_bitmap(state->bitmap); } -static void block_dirty_bitmap_disable_abort(BlkActionState *common) +static void block_dirty_bitmap_disable_abort(void *opaque) { - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = opaque; if (state->was_enabled) { bdrv_enable_dirty_bitmap(state->bitmap); } } -static void block_dirty_bitmap_merge_prepare(BlkActionState *common, - Error **errp) +TransactionActionDrv block_dirty_bitmap_merge_drv = { + .commit = block_dirty_bitmap_free_backup, + .abort = block_dirty_bitmap_restore, + .clean = g_free, +}; + +static void block_dirty_bitmap_merge_action(BlockDirtyBitmapMerge *action, + Transaction *tran, Error **errp) { - BlockDirtyBitmapMerge *action; - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1); - if (action_check_completion_mode(common, errp) < 0) { - return; - } - - action = common->action->u.block_dirty_bitmap_merge.data; + tran_add(tran, &block_dirty_bitmap_merge_drv, state); state->bitmap = block_dirty_bitmap_merge(action->node, action->target, action->bitmaps, &state->backup, errp); } -static void block_dirty_bitmap_remove_prepare(BlkActionState *common, - Error **errp) +static void block_dirty_bitmap_remove_commit(void *opaque); +static void block_dirty_bitmap_remove_abort(void *opaque); +TransactionActionDrv block_dirty_bitmap_remove_drv = { + .commit = block_dirty_bitmap_remove_commit, + .abort = block_dirty_bitmap_remove_abort, + .clean = g_free, +}; + +static void block_dirty_bitmap_remove_action(BlockDirtyBitmap *action, + Transaction *tran, Error **errp) { - BlockDirtyBitmap *action; - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1); - if (action_check_completion_mode(common, errp) < 0) { - return; - } + tran_add(tran, &block_dirty_bitmap_remove_drv, state); - action = common->action->u.block_dirty_bitmap_remove.data; state->bitmap = block_dirty_bitmap_remove(action->node, action->name, false, &state->bs, errp); @@ -2163,10 +2134,9 @@ static void block_dirty_bitmap_remove_prepare(BlkActionState *common, } } -static void block_dirty_bitmap_remove_abort(BlkActionState *common) +static void block_dirty_bitmap_remove_abort(void *opaque) { - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = opaque; if (state->bitmap) { bdrv_dirty_bitmap_skip_store(state->bitmap, false); @@ -2174,210 +2144,156 @@ static void block_dirty_bitmap_remove_abort(BlkActionState *common) } } -static void block_dirty_bitmap_remove_commit(BlkActionState *common) +static void block_dirty_bitmap_remove_commit(void *opaque) { - BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, - common, common); + BlockDirtyBitmapState *state = opaque; bdrv_dirty_bitmap_set_busy(state->bitmap, false); bdrv_release_dirty_bitmap(state->bitmap); } -static void abort_prepare(BlkActionState *common, Error **errp) +static void abort_commit(void *opaque); +TransactionActionDrv abort_drv = { + .commit = abort_commit, +}; + +static void abort_action(Transaction *tran, Error **errp) { + tran_add(tran, &abort_drv, NULL); error_setg(errp, "Transaction aborted using Abort action"); } -static void abort_commit(BlkActionState *common) +static void abort_commit(void *opaque) { g_assert_not_reached(); /* this action never succeeds */ } -static const BlkActionOps actions[] = { - [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT] = { - .instance_size = sizeof(ExternalSnapshotState), - .prepare = external_snapshot_prepare, - .commit = external_snapshot_commit, - .abort = external_snapshot_abort, - .clean = external_snapshot_clean, - }, - [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC] = { - .instance_size = sizeof(ExternalSnapshotState), - .prepare = external_snapshot_prepare, - .commit = external_snapshot_commit, - .abort = external_snapshot_abort, - .clean = external_snapshot_clean, - }, - [TRANSACTION_ACTION_KIND_DRIVE_BACKUP] = { - .instance_size = sizeof(DriveBackupState), - .prepare = drive_backup_prepare, - .commit = drive_backup_commit, - .abort = drive_backup_abort, - .clean = drive_backup_clean, - }, - [TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP] = { - .instance_size = sizeof(BlockdevBackupState), - .prepare = blockdev_backup_prepare, - .commit = blockdev_backup_commit, - .abort = blockdev_backup_abort, - .clean = blockdev_backup_clean, - }, - [TRANSACTION_ACTION_KIND_ABORT] = { - .instance_size = sizeof(BlkActionState), - .prepare = abort_prepare, - .commit = abort_commit, - }, - [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC] = { - .instance_size = sizeof(InternalSnapshotState), - .prepare = internal_snapshot_prepare, - .abort = internal_snapshot_abort, - .clean = internal_snapshot_clean, - }, - [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ADD] = { - .instance_size = sizeof(BlockDirtyBitmapState), - .prepare = block_dirty_bitmap_add_prepare, - .abort = block_dirty_bitmap_add_abort, - }, - [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_CLEAR] = { - .instance_size = sizeof(BlockDirtyBitmapState), - .prepare = block_dirty_bitmap_clear_prepare, - .commit = block_dirty_bitmap_free_backup, - .abort = block_dirty_bitmap_restore, - }, - [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ENABLE] = { - .instance_size = sizeof(BlockDirtyBitmapState), - .prepare = block_dirty_bitmap_enable_prepare, - .abort = block_dirty_bitmap_enable_abort, - }, - [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_DISABLE] = { - .instance_size = sizeof(BlockDirtyBitmapState), - .prepare = block_dirty_bitmap_disable_prepare, - .abort = block_dirty_bitmap_disable_abort, - }, - [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_MERGE] = { - .instance_size = sizeof(BlockDirtyBitmapState), - .prepare = block_dirty_bitmap_merge_prepare, - .commit = block_dirty_bitmap_free_backup, - .abort = block_dirty_bitmap_restore, - }, - [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_REMOVE] = { - .instance_size = sizeof(BlockDirtyBitmapState), - .prepare = block_dirty_bitmap_remove_prepare, - .commit = block_dirty_bitmap_remove_commit, - .abort = block_dirty_bitmap_remove_abort, - }, - /* Where are transactions for MIRROR, COMMIT and STREAM? +static void transaction_action(TransactionAction *act, JobTxn *block_job_txn, + Transaction *tran, Error **errp) +{ + switch (act->type) { + case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT: + case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC: + external_snapshot_action(act, tran, errp); + return; + case TRANSACTION_ACTION_KIND_DRIVE_BACKUP: + drive_backup_action(act->u.drive_backup.data, + block_job_txn, tran, errp); + return; + case TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP: + blockdev_backup_action(act->u.blockdev_backup.data, + block_job_txn, tran, errp); + return; + case TRANSACTION_ACTION_KIND_ABORT: + abort_action(tran, errp); + return; + case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC: + internal_snapshot_action(act->u.blockdev_snapshot_internal_sync.data, + tran, errp); + return; + case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ADD: + block_dirty_bitmap_add_action(act->u.block_dirty_bitmap_add.data, + tran, errp); + return; + case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_CLEAR: + block_dirty_bitmap_clear_action(act->u.block_dirty_bitmap_clear.data, + tran, errp); + return; + case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ENABLE: + block_dirty_bitmap_enable_action(act->u.block_dirty_bitmap_enable.data, + tran, errp); + return; + case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_DISABLE: + block_dirty_bitmap_disable_action( + act->u.block_dirty_bitmap_disable.data, tran, errp); + return; + case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_MERGE: + block_dirty_bitmap_merge_action(act->u.block_dirty_bitmap_merge.data, + tran, errp); + return; + case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_REMOVE: + block_dirty_bitmap_remove_action(act->u.block_dirty_bitmap_remove.data, + tran, errp); + return; + /* + * Where are transactions for MIRROR, COMMIT and STREAM? * Although these blockjobs use transaction callbacks like the backup job, * these jobs do not necessarily adhere to transaction semantics. * These jobs may not fully undo all of their actions on abort, nor do they * necessarily work in transactions with more than one job in them. */ -}; - -/** - * Allocate a TransactionProperties structure if necessary, and fill - * that structure with desired defaults if they are unset. - */ -static TransactionProperties *get_transaction_properties( - TransactionProperties *props) -{ - if (!props) { - props = g_new0(TransactionProperties, 1); - } - - if (!props->has_completion_mode) { - props->has_completion_mode = true; - props->completion_mode = ACTION_COMPLETION_MODE_INDIVIDUAL; - } - - return props; + case TRANSACTION_ACTION_KIND__MAX: + default: + g_assert_not_reached(); + }; } + /* * 'Atomic' group operations. The operations are performed as a set, and if * any fail then we roll back all operations in the group. * * Always run under BQL. */ -void qmp_transaction(TransactionActionList *dev_list, - struct TransactionProperties *props, +void qmp_transaction(TransactionActionList *actions, + struct TransactionProperties *properties, Error **errp) { - TransactionActionList *dev_entry = dev_list; - bool has_props = !!props; + TransactionActionList *act; JobTxn *block_job_txn = NULL; - BlkActionState *state, *next; Error *local_err = NULL; + Transaction *tran; + ActionCompletionMode comp_mode = + properties ? properties->completion_mode : + ACTION_COMPLETION_MODE_INDIVIDUAL; GLOBAL_STATE_CODE(); - QTAILQ_HEAD(, BlkActionState) snap_bdrv_states; - QTAILQ_INIT(&snap_bdrv_states); - /* Does this transaction get canceled as a group on failure? * If not, we don't really need to make a JobTxn. */ - props = get_transaction_properties(props); - if (props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) { + if (comp_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) { + for (act = actions; act; act = act->next) { + TransactionActionKind type = act->value->type; + + if (type != TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP && + type != TRANSACTION_ACTION_KIND_DRIVE_BACKUP) + { + error_setg(errp, + "Action '%s' does not support transaction property " + "completion-mode = %s", + TransactionActionKind_str(type), + ActionCompletionMode_str(comp_mode)); + return; + } + } + block_job_txn = job_txn_new(); } /* drain all i/o before any operations */ bdrv_drain_all(); + tran = tran_new(); + /* We don't do anything in this loop that commits us to the operations */ - while (NULL != dev_entry) { - TransactionAction *dev_info = NULL; - const BlkActionOps *ops; - - dev_info = dev_entry->value; - dev_entry = dev_entry->next; - - assert(dev_info->type < ARRAY_SIZE(actions)); - - ops = &actions[dev_info->type]; - assert(ops->instance_size > 0); - - state = g_malloc0(ops->instance_size); - state->ops = ops; - state->action = dev_info; - state->block_job_txn = block_job_txn; - state->txn_props = props; - QTAILQ_INSERT_TAIL(&snap_bdrv_states, state, entry); - - state->ops->prepare(state, &local_err); + for (act = actions; act; act = act->next) { + transaction_action(act->value, block_job_txn, tran, &local_err); if (local_err) { error_propagate(errp, local_err); goto delete_and_fail; } } - QTAILQ_FOREACH(state, &snap_bdrv_states, entry) { - if (state->ops->commit) { - state->ops->commit(state); - } - } + tran_commit(tran); /* success */ goto exit; delete_and_fail: /* failure, and it is all-or-none; roll back all operations */ - QTAILQ_FOREACH_REVERSE(state, &snap_bdrv_states, entry) { - if (state->ops->abort) { - state->ops->abort(state); - } - } + tran_abort(tran); exit: - QTAILQ_FOREACH_SAFE(state, &snap_bdrv_states, entry, next) { - if (state->ops->clean) { - state->ops->clean(state); - } - g_free(state); - } - if (!has_props) { - qapi_free_TransactionProperties(props); - } job_txn_unref(block_job_txn); } @@ -2430,7 +2346,7 @@ void coroutine_fn qmp_block_resize(const char *device, const char *node_name, return; } - blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, errp); + blk = blk_co_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, errp); if (!blk) { return; } @@ -2445,7 +2361,7 @@ void coroutine_fn qmp_block_resize(const char *device, const char *node_name, bdrv_co_lock(bs); bdrv_drained_end(bs); - blk_unref(blk); + blk_co_unref(blk); bdrv_co_unlock(bs); } @@ -2951,6 +2867,9 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, BlockDriverState *unfiltered_bs; int job_flags = JOB_DEFAULT; + GLOBAL_STATE_CODE(); + GRAPH_RDLOCK_GUARD_MAINLOOP(); + if (!has_speed) { speed = 0; } @@ -3161,13 +3080,17 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) if (format) { qdict_put_str(options, "driver", format); } + aio_context_release(aio_context); /* Mirroring takes care of copy-on-write using the source's backing * file. */ + aio_context_acquire(qemu_get_aio_context()); target_bs = bdrv_open(arg->target, NULL, options, flags, errp); + aio_context_release(qemu_get_aio_context()); + if (!target_bs) { - goto out; + return; } zero_target = (arg->sync == MIRROR_SYNC_MODE_FULL && @@ -3177,7 +3100,6 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ old_context = bdrv_get_aio_context(target_bs); - aio_context_release(aio_context); aio_context_acquire(old_context); ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); diff --git a/blockjob.c b/blockjob.c index 659c3cb9de..913da3cbf7 100644 --- a/blockjob.c +++ b/blockjob.c @@ -319,10 +319,28 @@ static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) return block_job_set_speed_locked(job, speed, errp); } -int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n) +void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n) { IO_CODE(); - return ratelimit_calculate_delay(&job->limit, n); + ratelimit_calculate_delay(&job->limit, n); +} + +void block_job_ratelimit_sleep(BlockJob *job) +{ + uint64_t delay_ns; + + /* + * Sleep at least once. If the job is reentered early, keep waiting until + * we've waited for the full time that is necessary to keep the job at the + * right speed. + * + * Make sure to recalculate the delay after each (possibly interrupted) + * sleep because the speed can change while the job has yielded. + */ + do { + delay_ns = ratelimit_calculate_delay(&job->limit, 0); + job_sleep_ns(&job->job, delay_ns); + } while (delay_ns && !job_is_cancelled(&job->job)); } BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp) diff --git a/bsd-user/elfload.c b/bsd-user/elfload.c index fbcdc94b96..1f650bdde8 100644 --- a/bsd-user/elfload.c +++ b/bsd-user/elfload.c @@ -352,9 +352,10 @@ static abi_ulong load_elf_interp(struct elfhdr *interp_elf_ex, static int symfind(const void *s0, const void *s1) { - target_ulong addr = *(target_ulong *)s0; struct elf_sym *sym = (struct elf_sym *)s1; + __typeof(sym->st_value) addr = *(uint64_t *)s0; int result = 0; + if (addr < sym->st_value) { result = -1; } else if (addr >= sym->st_value + sym->st_size) { @@ -363,7 +364,7 @@ static int symfind(const void *s0, const void *s1) return result; } -static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr) +static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr) { #if ELF_CLASS == ELFCLASS32 struct elf_sym *syms = s->disas_symtab.elf32; diff --git a/bsd-user/freebsd/os-sys.c b/bsd-user/freebsd/os-sys.c index 1676ec10f8..df31706558 100644 --- a/bsd-user/freebsd/os-sys.c +++ b/bsd-user/freebsd/os-sys.c @@ -21,6 +21,577 @@ #include "qemu.h" #include "target_arch_sysarch.h" +#include + +/* + * Length for the fixed length types. + * 0 means variable length for strings and structures + * Compare with sys/kern_sysctl.c ctl_size + * Note: Not all types appear to be used in-tree. + */ +static const int guest_ctl_size[CTLTYPE + 1] = { + [CTLTYPE_INT] = sizeof(abi_int), + [CTLTYPE_UINT] = sizeof(abi_uint), + [CTLTYPE_LONG] = sizeof(abi_long), + [CTLTYPE_ULONG] = sizeof(abi_ulong), + [CTLTYPE_S8] = sizeof(int8_t), + [CTLTYPE_S16] = sizeof(int16_t), + [CTLTYPE_S32] = sizeof(int32_t), + [CTLTYPE_S64] = sizeof(int64_t), + [CTLTYPE_U8] = sizeof(uint8_t), + [CTLTYPE_U16] = sizeof(uint16_t), + [CTLTYPE_U32] = sizeof(uint32_t), + [CTLTYPE_U64] = sizeof(uint64_t), +}; + +static const int host_ctl_size[CTLTYPE + 1] = { + [CTLTYPE_INT] = sizeof(int), + [CTLTYPE_UINT] = sizeof(u_int), + [CTLTYPE_LONG] = sizeof(long), + [CTLTYPE_ULONG] = sizeof(u_long), + [CTLTYPE_S8] = sizeof(int8_t), + [CTLTYPE_S16] = sizeof(int16_t), + [CTLTYPE_S32] = sizeof(int32_t), + [CTLTYPE_S64] = sizeof(int64_t), + [CTLTYPE_U8] = sizeof(uint8_t), + [CTLTYPE_U16] = sizeof(uint16_t), + [CTLTYPE_U32] = sizeof(uint32_t), + [CTLTYPE_U64] = sizeof(uint64_t), +}; + +#ifdef TARGET_ABI32 +/* + * Limit the amount of available memory to be most of the 32-bit address + * space. 0x100c000 was arrived at through trial and error as a good + * definition of 'most'. + */ +static const abi_ulong guest_max_mem = UINT32_MAX - 0x100c000 + 1; + +static abi_ulong cap_memory(uint64_t mem) +{ + return MIN(guest_max_mem, mem); +} +#endif + +static abi_ulong scale_to_guest_pages(uint64_t pages) +{ + /* Scale pages from host to guest */ + pages = muldiv64(pages, qemu_real_host_page_size(), TARGET_PAGE_SIZE); +#ifdef TARGET_ABI32 + /* cap pages if need be */ + pages = MIN(pages, guest_max_mem / (abi_ulong)TARGET_PAGE_SIZE); +#endif + return pages; +} + +#ifdef TARGET_ABI32 +/* Used only for TARGET_ABI32 */ +static abi_long h2g_long_sat(long l) +{ + if (l > INT32_MAX) { + l = INT32_MAX; + } else if (l < INT32_MIN) { + l = INT32_MIN; + } + return l; +} + +static abi_ulong h2g_ulong_sat(u_long ul) +{ + return MIN(ul, UINT32_MAX); +} +#endif + +/* + * placeholder until bsd-user downstream upstreams this with its thread support + */ +#define bsd_get_ncpu() 1 + +/* + * This uses the undocumented oidfmt interface to find the kind of a requested + * sysctl, see /sys/kern/kern_sysctl.c:sysctl_sysctl_oidfmt() (compare to + * src/sbin/sysctl/sysctl.c) + */ +static int oidfmt(int *oid, int len, char *fmt, uint32_t *kind) +{ + int qoid[CTL_MAXNAME + 2]; + uint8_t buf[BUFSIZ]; + int i; + size_t j; + + qoid[0] = CTL_SYSCTL; + qoid[1] = CTL_SYSCTL_OIDFMT; + memcpy(qoid + 2, oid, len * sizeof(int)); + + j = sizeof(buf); + i = sysctl(qoid, len + 2, buf, &j, 0, 0); + if (i) { + return i; + } + + if (kind) { + *kind = *(uint32_t *)buf; + } + + if (fmt) { + strcpy(fmt, (char *)(buf + sizeof(uint32_t))); + } + return 0; +} + +/* + * Convert the old value from host to guest. + * + * For LONG and ULONG on ABI32, we need to 'down convert' the 8 byte quantities + * to 4 bytes. The caller setup a buffer in host memory to get this data from + * the kernel and pass it to us. We do the down conversion and adjust the length + * so the caller knows what to write as the returned length into the target when + * it copies the down converted values into the target. + * + * For normal integral types, we just need to byte swap. No size changes. + * + * For strings and node data, there's no conversion needed. + * + * For opaque data, per sysctl OID converts take care of it. + */ +static void h2g_old_sysctl(void *holdp, size_t *holdlen, uint32_t kind) +{ + size_t len; + int hlen, glen; + uint8_t *hp, *gp; + + /* + * Although rare, we can have arrays of sysctl. Both sysctl_old_ddb in + * kern_sysctl.c and show_var in sbin/sysctl/sysctl.c have code that loops + * this way. *holdlen has been set by the kernel to the host's length. + * Only LONG and ULONG on ABI32 have different sizes: see below. + */ + gp = hp = (uint8_t *)holdp; + len = 0; + hlen = host_ctl_size[kind & CTLTYPE]; + glen = guest_ctl_size[kind & CTLTYPE]; + + /* + * hlen == 0 for CTLTYPE_STRING and CTLTYPE_NODE, which need no conversion + * as well as CTLTYPE_OPAQUE, which needs special converters. + */ + if (hlen == 0) { + return; + } + + while (len < *holdlen) { + if (hlen == glen) { + switch (hlen) { + case 1: + /* Nothing needed: no byteswapping and assigning in place */ + break; + case 2: + *(uint16_t *)gp = tswap16(*(uint16_t *)hp); + break; + case 4: + *(uint32_t *)gp = tswap32(*(uint32_t *)hp); + break; + case 8: + *(uint64_t *)gp = tswap64(*(uint64_t *)hp); + break; + default: + g_assert_not_reached(); + } + } else { +#ifdef TARGET_ABI32 + /* + * Saturating assignment for the only two types that differ between + * 32-bit and 64-bit machines. All other integral types have the + * same, fixed size and will be converted w/o loss of precision + * in the above switch. + */ + switch (kind & CTLTYPE) { + case CTLTYPE_LONG: + *(abi_long *)gp = tswap32(h2g_long_sat(*(long *)hp)); + break; + case CTLTYPE_ULONG: + *(abi_ulong *)gp = tswap32(h2g_ulong_sat(*(u_long *)hp)); + break; + default: + g_assert_not_reached(); + } +#else + g_assert_not_reached(); +#endif + } + gp += glen; + hp += hlen; + len += hlen; + } +#ifdef TARGET_ABI32 + if (hlen != glen) { + *holdlen = (*holdlen / hlen) * glen; + } +#endif +} + +/* + * Convert the undocmented name2oid sysctl data for the target. + */ +static inline void sysctl_name2oid(uint32_t *holdp, size_t holdlen) +{ + size_t i, num = holdlen / sizeof(uint32_t); + + for (i = 0; i < num; i++) { + holdp[i] = tswap32(holdp[i]); + } +} + +static inline void sysctl_oidfmt(uint32_t *holdp) +{ + /* byte swap the kind */ + holdp[0] = tswap32(holdp[0]); +} + +static abi_long do_freebsd_sysctl_oid(CPUArchState *env, int32_t *snamep, + int32_t namelen, void *holdp, size_t *holdlenp, void *hnewp, + size_t newlen) +{ + uint32_t kind = 0; + abi_long ret; + size_t holdlen, oldlen; +#ifdef TARGET_ABI32 + void *old_holdp; +#endif + + holdlen = oldlen = *holdlenp; + oidfmt(snamep, namelen, NULL, &kind); + + /* Handle some arch/emulator dependent sysctl()'s here. */ + switch (snamep[0]) { + case CTL_KERN: + switch (snamep[1]) { + case KERN_USRSTACK: + if (oldlen) { + (*(abi_ulong *)holdp) = tswapal(TARGET_USRSTACK); + } + holdlen = sizeof(abi_ulong); + ret = 0; + goto out; + + case KERN_PS_STRINGS: + if (oldlen) { + (*(abi_ulong *)holdp) = tswapal(TARGET_PS_STRINGS); + } + holdlen = sizeof(abi_ulong); + ret = 0; + goto out; + + default: + break; + } + break; + + case CTL_HW: + switch (snamep[1]) { + case HW_MACHINE: + holdlen = sizeof(TARGET_HW_MACHINE); + if (holdp) { + strlcpy(holdp, TARGET_HW_MACHINE, oldlen); + } + ret = 0; + goto out; + + case HW_MACHINE_ARCH: + { + holdlen = sizeof(TARGET_HW_MACHINE_ARCH); + if (holdp) { + strlcpy(holdp, TARGET_HW_MACHINE_ARCH, oldlen); + } + ret = 0; + goto out; + } + case HW_NCPU: + if (oldlen) { + (*(abi_int *)holdp) = tswap32(bsd_get_ncpu()); + } + holdlen = sizeof(int32_t); + ret = 0; + goto out; +#if defined(TARGET_ARM) + case HW_FLOATINGPT: + if (oldlen) { + ARMCPU *cpu = env_archcpu(env); + *(abi_int *)holdp = cpu_isar_feature(aa32_vfp, cpu); + } + holdlen = sizeof(abi_int); + ret = 0; + goto out; +#endif + + +#ifdef TARGET_ABI32 + case HW_PHYSMEM: + case HW_USERMEM: + case HW_REALMEM: + holdlen = sizeof(abi_ulong); + ret = 0; + + if (oldlen) { + int mib[2] = {snamep[0], snamep[1]}; + unsigned long lvalue; + size_t len = sizeof(lvalue); + + if (sysctl(mib, 2, &lvalue, &len, NULL, 0) == -1) { + ret = -1; + } else { + lvalue = cap_memory(lvalue); + (*(abi_ulong *)holdp) = tswapal((abi_ulong)lvalue); + } + } + goto out; +#endif + + default: + { + static int oid_hw_availpages; + static int oid_hw_pagesizes; + + if (!oid_hw_availpages) { + int real_oid[CTL_MAXNAME + 2]; + size_t len = sizeof(real_oid) / sizeof(int); + + if (sysctlnametomib("hw.availpages", real_oid, &len) >= 0) { + oid_hw_availpages = real_oid[1]; + } + } + if (!oid_hw_pagesizes) { + int real_oid[CTL_MAXNAME + 2]; + size_t len = sizeof(real_oid) / sizeof(int); + + if (sysctlnametomib("hw.pagesizes", real_oid, &len) >= 0) { + oid_hw_pagesizes = real_oid[1]; + } + } + + if (oid_hw_availpages && snamep[1] == oid_hw_availpages) { + long lvalue; + size_t len = sizeof(lvalue); + + if (sysctlbyname("hw.availpages", &lvalue, &len, NULL, 0) == -1) { + ret = -1; + } else { + if (oldlen) { + lvalue = scale_to_guest_pages(lvalue); + (*(abi_ulong *)holdp) = tswapal((abi_ulong)lvalue); + } + holdlen = sizeof(abi_ulong); + ret = 0; + } + goto out; + } + + if (oid_hw_pagesizes && snamep[1] == oid_hw_pagesizes) { + if (oldlen) { + (*(abi_ulong *)holdp) = tswapal((abi_ulong)TARGET_PAGE_SIZE); + ((abi_ulong *)holdp)[1] = 0; + } + holdlen = sizeof(abi_ulong) * 2; + ret = 0; + goto out; + } + break; + } + } + break; + + default: + break; + } + +#ifdef TARGET_ABI32 + /* + * For long and ulong with a 64-bit host and a 32-bit target we have to do + * special things. holdlen here is the length provided by the target to the + * system call. So we allocate a buffer twice as large because longs are + * twice as big on the host which will be writing them. In h2g_old_sysctl + * we'll adjust them and adjust the length. + */ + if (kind == CTLTYPE_LONG || kind == CTLTYPE_ULONG) { + old_holdp = holdp; + holdlen = holdlen * 2; + holdp = g_malloc(holdlen); + } +#endif + + ret = get_errno(sysctl(snamep, namelen, holdp, &holdlen, hnewp, newlen)); + if (!ret && (holdp != 0)) { + + if (snamep[0] == CTL_SYSCTL) { + switch (snamep[1]) { + case CTL_SYSCTL_NEXT: + case CTL_SYSCTL_NAME2OID: + case CTL_SYSCTL_NEXTNOSKIP: + /* + * All of these return an OID array, so we need to convert to + * target. + */ + sysctl_name2oid(holdp, holdlen); + break; + + case CTL_SYSCTL_OIDFMT: + /* Handle oidfmt */ + sysctl_oidfmt(holdp); + break; + case CTL_SYSCTL_OIDDESCR: + case CTL_SYSCTL_OIDLABEL: + default: + /* Handle it based on the type */ + h2g_old_sysctl(holdp, &holdlen, kind); + /* NB: None of these are LONG or ULONG */ + break; + } + } else { + /* + * Need to convert from host to target. All the weird special cases + * are handled above. + */ + h2g_old_sysctl(holdp, &holdlen, kind); +#ifdef TARGET_ABI32 + /* + * For the 32-bit on 64-bit case, for longs we need to copy the + * now-converted buffer to the target and free the buffer. + */ + if (kind == CTLTYPE_LONG || kind == CTLTYPE_ULONG) { + memcpy(old_holdp, holdp, holdlen); + g_free(holdp); + holdp = old_holdp; + } +#endif + } + } + +out: + *holdlenp = holdlen; + return ret; +} + +/* + * This syscall was created to make sysctlbyname(3) more efficient, but we can't + * really provide it in bsd-user. Notably, we must always translate the names + * independently since some sysctl values have to be faked for the target + * environment, so it still has to break down to two syscalls for the underlying + * implementation. + */ +abi_long do_freebsd_sysctlbyname(CPUArchState *env, abi_ulong namep, + int32_t namelen, abi_ulong oldp, abi_ulong oldlenp, abi_ulong newp, + abi_ulong newlen) +{ + abi_long ret = -TARGET_EFAULT; + void *holdp = NULL, *hnewp = NULL; + char *snamep = NULL; + int oid[CTL_MAXNAME + 2]; + size_t holdlen, oidplen; + abi_ulong oldlen = 0; + + /* oldlenp is read/write, pre-check here for write */ + if (oldlenp) { + if (!access_ok(VERIFY_WRITE, oldlenp, sizeof(abi_ulong)) || + get_user_ual(oldlen, oldlenp)) { + goto out; + } + } + snamep = lock_user_string(namep); + if (snamep == NULL) { + goto out; + } + if (newp) { + hnewp = lock_user(VERIFY_READ, newp, newlen, 1); + if (hnewp == NULL) { + goto out; + } + } + if (oldp) { + holdp = lock_user(VERIFY_WRITE, oldp, oldlen, 0); + if (holdp == NULL) { + goto out; + } + } + holdlen = oldlen; + + oidplen = ARRAY_SIZE(oid); + if (sysctlnametomib(snamep, oid, &oidplen) != 0) { + ret = -TARGET_EINVAL; + goto out; + } + + ret = do_freebsd_sysctl_oid(env, oid, oidplen, holdp, &holdlen, hnewp, + newlen); + + /* + * writeability pre-checked above. __sysctl(2) returns ENOMEM and updates + * oldlenp for the proper size to use. + */ + if (oldlenp && (ret == 0 || ret == -TARGET_ENOMEM)) { + put_user_ual(holdlen, oldlenp); + } +out: + unlock_user(snamep, namep, 0); + unlock_user(holdp, oldp, ret == 0 ? holdlen : 0); + unlock_user(hnewp, newp, 0); + + return ret; +} + +abi_long do_freebsd_sysctl(CPUArchState *env, abi_ulong namep, int32_t namelen, + abi_ulong oldp, abi_ulong oldlenp, abi_ulong newp, abi_ulong newlen) +{ + abi_long ret = -TARGET_EFAULT; + void *hnamep, *holdp = NULL, *hnewp = NULL; + size_t holdlen; + abi_ulong oldlen = 0; + int32_t *snamep = g_malloc(sizeof(int32_t) * namelen), *p, *q, i; + + /* oldlenp is read/write, pre-check here for write */ + if (oldlenp) { + if (!access_ok(VERIFY_WRITE, oldlenp, sizeof(abi_ulong)) || + get_user_ual(oldlen, oldlenp)) { + goto out; + } + } + hnamep = lock_user(VERIFY_READ, namep, namelen, 1); + if (hnamep == NULL) { + goto out; + } + if (newp) { + hnewp = lock_user(VERIFY_READ, newp, newlen, 1); + if (hnewp == NULL) { + goto out; + } + } + if (oldp) { + holdp = lock_user(VERIFY_WRITE, oldp, oldlen, 0); + if (holdp == NULL) { + goto out; + } + } + holdlen = oldlen; + for (p = hnamep, q = snamep, i = 0; i < namelen; p++, i++, q++) { + *q = tswap32(*p); + } + + ret = do_freebsd_sysctl_oid(env, snamep, namelen, holdp, &holdlen, hnewp, + newlen); + + /* + * writeability pre-checked above. __sysctl(2) returns ENOMEM and updates + * oldlenp for the proper size to use. + */ + if (oldlenp && (ret == 0 || ret == -TARGET_ENOMEM)) { + put_user_ual(holdlen, oldlenp); + } + unlock_user(hnamep, namep, 0); + unlock_user(holdp, oldp, ret == 0 ? holdlen : 0); +out: + g_free(snamep); + return ret; +} + /* sysarch() is architecture dependent. */ abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2) { diff --git a/bsd-user/freebsd/os-syscall.c b/bsd-user/freebsd/os-syscall.c index 57996cad8a..c8f998ecec 100644 --- a/bsd-user/freebsd/os-syscall.c +++ b/bsd-user/freebsd/os-syscall.c @@ -38,6 +38,8 @@ #include #include +#include "include/gdbstub/syscalls.h" + #include "qemu.h" #include "signal-common.h" #include "user/syscall-trace.h" @@ -491,6 +493,21 @@ static abi_long freebsd_syscall(void *cpu_env, int num, abi_long arg1, ret = do_bsd_undelete(arg1); break; + /* + * sys{ctl, arch, call} + */ + case TARGET_FREEBSD_NR___sysctl: /* sysctl(3) */ + ret = do_freebsd_sysctl(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); + break; + + case TARGET_FREEBSD_NR___sysctlbyname: /* sysctlbyname(2) */ + ret = do_freebsd_sysctlbyname(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); + break; + + case TARGET_FREEBSD_NR_sysarch: /* sysarch(2) */ + ret = do_freebsd_sysarch(cpu_env, arg1, arg2); + break; + default: qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); ret = -TARGET_ENOSYS; @@ -512,7 +529,7 @@ abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg8) { CPUState *cpu = env_cpu(cpu_env); - int ret; + abi_long ret; trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); if (do_strace) { diff --git a/bsd-user/main.c b/bsd-user/main.c index 41290e16f9..cd8b2a670f 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -44,11 +44,12 @@ #include "trace/control.h" #include "crypto/init.h" #include "qemu/guest-random.h" +#include "gdbstub/user.h" #include "host-os.h" #include "target_arch_cpu.h" -int singlestep; +static bool opt_one_insn_per_tb; uintptr_t guest_base; bool have_guest_base; /* @@ -67,13 +68,9 @@ bool have_guest_base; # if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS # if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \ (TARGET_LONG_BITS == 32 || defined(TARGET_ABI32)) -/* - * There are a number of places where we assign reserved_va to a variable - * of type abi_ulong and expect it to fit. Avoid the last page. - */ -# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK) +# define MAX_RESERVED_VA 0xfffffffful # else -# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS) +# define MAX_RESERVED_VA ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) # endif # else # define MAX_RESERVED_VA 0 @@ -165,7 +162,8 @@ static void usage(void) "-d item1[,...] enable logging of specified items\n" " (use '-d help' for a list of log items)\n" "-D logfile write logs to 'logfile' (default stderr)\n" - "-singlestep always run in singlestep mode\n" + "-one-insn-per-tb run with one guest instruction per emulated TB\n" + "-singlestep deprecated synonym for -one-insn-per-tb\n" "-strace log system calls\n" "-trace [[enable=]][,events=][,file=]\n" " specify tracing options\n" @@ -388,8 +386,8 @@ int main(int argc, char **argv) (void) envlist_unsetenv(envlist, "LD_PRELOAD"); } else if (!strcmp(r, "seed")) { seed_optarg = optarg; - } else if (!strcmp(r, "singlestep")) { - singlestep = 1; + } else if (!strcmp(r, "singlestep") || !strcmp(r, "one-insn-per-tb")) { + opt_one_insn_per_tb = true; } else if (!strcmp(r, "strace")) { do_strace = 1; } else if (!strcmp(r, "trace")) { @@ -447,9 +445,12 @@ int main(int argc, char **argv) /* init tcg before creating CPUs and to get qemu_host_page_size */ { - AccelClass *ac = ACCEL_GET_CLASS(current_accel()); + AccelState *accel = current_accel(); + AccelClass *ac = ACCEL_GET_CLASS(accel); accel_init_interfaces(ac); + object_property_set_bool(OBJECT(accel), "one-insn-per-tb", + opt_one_insn_per_tb, &error_abort); ac->init_machine(NULL); } cpu = cpu_create(cpu_type); @@ -465,7 +466,7 @@ int main(int argc, char **argv) envlist_free(envlist); if (reserved_va) { - mmap_next_start = reserved_va; + mmap_next_start = reserved_va + 1; } { diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c index d6c5a344c9..565b9f97ed 100644 --- a/bsd-user/mmap.c +++ b/bsd-user/mmap.c @@ -118,7 +118,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) if (ret != 0) goto error; } - page_set_flags(start, start + len, prot | PAGE_VALID); + page_set_flags(start, start + len - 1, prot | PAGE_VALID); mmap_unlock(); return 0; error: @@ -234,7 +234,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, size = HOST_PAGE_ALIGN(size) + alignment; end_addr = start + size; if (end_addr > reserved_va) { - end_addr = reserved_va; + end_addr = reserved_va + 1; } addr = end_addr - qemu_host_page_size; @@ -243,7 +243,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, if (looped) { return (abi_ulong)-1; } - end_addr = reserved_va; + end_addr = reserved_va + 1; addr = end_addr - qemu_host_page_size; looped = 1; continue; @@ -656,7 +656,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, } } the_end1: - page_set_flags(start, start + len, prot | PAGE_VALID); + page_set_flags(start, start + len - 1, prot | PAGE_VALID); the_end: #ifdef DEBUG_MMAP printf("ret=0x" TARGET_ABI_FMT_lx "\n", start); @@ -767,7 +767,7 @@ int target_munmap(abi_ulong start, abi_ulong len) } if (ret == 0) { - page_set_flags(start, start + len, 0); + page_set_flags(start, start + len - 1, 0); } mmap_unlock(); return ret; diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h index 4e7b8b1c06..41d84e0b81 100644 --- a/bsd-user/qemu.h +++ b/bsd-user/qemu.h @@ -253,6 +253,11 @@ bool is_error(abi_long ret); int host_to_target_errno(int err); /* os-sys.c */ +abi_long do_freebsd_sysctl(CPUArchState *env, abi_ulong namep, int32_t namelen, + abi_ulong oldp, abi_ulong oldlenp, abi_ulong newp, abi_ulong newlen); +abi_long do_freebsd_sysctlbyname(CPUArchState *env, abi_ulong namep, + int32_t namelen, abi_ulong oldp, abi_ulong oldlenp, abi_ulong newp, + abi_ulong newlen); abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2); /* user access */ diff --git a/bsd-user/signal.c b/bsd-user/signal.c index 58a5386395..f4e078ee1d 100644 --- a/bsd-user/signal.c +++ b/bsd-user/signal.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qemu.h" +#include "gdbstub/user.h" #include "signal-common.h" #include "trace.h" #include "hw/core/tcg-cpu-ops.h" diff --git a/chardev/baum.c b/chardev/baum.c index 0a0d12661a..a1d9784d92 100644 --- a/chardev/baum.c +++ b/chardev/baum.c @@ -44,39 +44,39 @@ #define ESC 0x1B -#define BAUM_REQ_DisplayData 0x01 -#define BAUM_REQ_GetVersionNumber 0x05 -#define BAUM_REQ_GetKeys 0x08 -#define BAUM_REQ_SetMode 0x12 -#define BAUM_REQ_SetProtocol 0x15 -#define BAUM_REQ_GetDeviceIdentity 0x84 -#define BAUM_REQ_GetSerialNumber 0x8A +#define BAUM_REQ_DisplayData 0x01 +#define BAUM_REQ_GetVersionNumber 0x05 +#define BAUM_REQ_GetKeys 0x08 +#define BAUM_REQ_SetMode 0x12 +#define BAUM_REQ_SetProtocol 0x15 +#define BAUM_REQ_GetDeviceIdentity 0x84 +#define BAUM_REQ_GetSerialNumber 0x8A -#define BAUM_RSP_CellCount 0x01 -#define BAUM_RSP_VersionNumber 0x05 -#define BAUM_RSP_ModeSetting 0x11 -#define BAUM_RSP_CommunicationChannel 0x16 -#define BAUM_RSP_PowerdownSignal 0x17 -#define BAUM_RSP_HorizontalSensors 0x20 -#define BAUM_RSP_VerticalSensors 0x21 -#define BAUM_RSP_RoutingKeys 0x22 -#define BAUM_RSP_Switches 0x23 -#define BAUM_RSP_TopKeys 0x24 -#define BAUM_RSP_HorizontalSensor 0x25 -#define BAUM_RSP_VerticalSensor 0x26 -#define BAUM_RSP_RoutingKey 0x27 -#define BAUM_RSP_FrontKeys6 0x28 -#define BAUM_RSP_BackKeys6 0x29 -#define BAUM_RSP_CommandKeys 0x2B -#define BAUM_RSP_FrontKeys10 0x2C -#define BAUM_RSP_BackKeys10 0x2D -#define BAUM_RSP_EntryKeys 0x33 -#define BAUM_RSP_JoyStick 0x34 -#define BAUM_RSP_ErrorCode 0x40 -#define BAUM_RSP_InfoBlock 0x42 -#define BAUM_RSP_DeviceIdentity 0x84 -#define BAUM_RSP_SerialNumber 0x8A -#define BAUM_RSP_BluetoothName 0x8C +#define BAUM_RSP_CellCount 0x01 +#define BAUM_RSP_VersionNumber 0x05 +#define BAUM_RSP_ModeSetting 0x11 +#define BAUM_RSP_CommunicationChannel 0x16 +#define BAUM_RSP_PowerdownSignal 0x17 +#define BAUM_RSP_HorizontalSensors 0x20 +#define BAUM_RSP_VerticalSensors 0x21 +#define BAUM_RSP_RoutingKeys 0x22 +#define BAUM_RSP_Switches 0x23 +#define BAUM_RSP_TopKeys 0x24 +#define BAUM_RSP_HorizontalSensor 0x25 +#define BAUM_RSP_VerticalSensor 0x26 +#define BAUM_RSP_RoutingKey 0x27 +#define BAUM_RSP_FrontKeys6 0x28 +#define BAUM_RSP_BackKeys6 0x29 +#define BAUM_RSP_CommandKeys 0x2B +#define BAUM_RSP_FrontKeys10 0x2C +#define BAUM_RSP_BackKeys10 0x2D +#define BAUM_RSP_EntryKeys 0x33 +#define BAUM_RSP_JoyStick 0x34 +#define BAUM_RSP_ErrorCode 0x40 +#define BAUM_RSP_InfoBlock 0x42 +#define BAUM_RSP_DeviceIdentity 0x84 +#define BAUM_RSP_SerialNumber 0x8A +#define BAUM_RSP_BluetoothName 0x8C #define BAUM_TL1 0x01 #define BAUM_TL2 0x02 diff --git a/chardev/char-file.c b/chardev/char-file.c index 3a7b9caf6f..263e6da563 100644 --- a/chardev/char-file.c +++ b/chardev/char-file.c @@ -100,6 +100,7 @@ static void qemu_chr_parse_file_out(QemuOpts *opts, ChardevBackend *backend, Error **errp) { const char *path = qemu_opt_get(opts, "path"); + const char *inpath = qemu_opt_get(opts, "input-path"); ChardevFile *file; backend->type = CHARDEV_BACKEND_KIND_FILE; @@ -107,9 +108,16 @@ static void qemu_chr_parse_file_out(QemuOpts *opts, ChardevBackend *backend, error_setg(errp, "chardev: file: no filename given"); return; } +#ifdef _WIN32 + if (inpath) { + error_setg(errp, "chardev: file: input-path not supported on Windows"); + return; + } +#endif file = backend->u.file.data = g_new0(ChardevFile, 1); qemu_chr_parse_common(opts, qapi_ChardevFile_base(file)); file->out = g_strdup(path); + file->in = g_strdup(inpath); file->has_append = true; file->append = qemu_opt_get_bool(opts, "append", false); diff --git a/chardev/char-socket.c b/chardev/char-socket.c index c2265436ac..8c58532171 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -1065,6 +1065,7 @@ static void char_socket_finalize(Object *obj) qio_net_listener_set_client_func_full(s->listener, NULL, NULL, NULL, chr->gcontext); object_unref(OBJECT(s->listener)); + s->listener = NULL; } if (s->tls_creds) { object_unref(OBJECT(s->tls_creds)); diff --git a/chardev/char.c b/chardev/char.c index 11eab7764c..661ad8176a 100644 --- a/chardev/char.c +++ b/chardev/char.c @@ -805,6 +805,9 @@ QemuOptsList qemu_chardev_opts = { },{ .name = "path", .type = QEMU_OPT_STRING, + },{ + .name = "input-path", + .type = QEMU_OPT_STRING, },{ .name = "host", .type = QEMU_OPT_STRING, @@ -1175,12 +1178,10 @@ bool qmp_add_client_char(int fd, bool has_skipauth, bool skipauth, if (!s) { error_setg(errp, "protocol '%s' is invalid", protocol); - close(fd); return false; } if (qemu_chr_add_client(s, fd) < 0) { error_setg(errp, "failed to add client"); - close(fd); return false; } return true; diff --git a/configs/devices/aarch64-softmmu/default.mak b/configs/devices/aarch64-softmmu/default.mak index cf43ac8da1..f82a04c27d 100644 --- a/configs/devices/aarch64-softmmu/default.mak +++ b/configs/devices/aarch64-softmmu/default.mak @@ -3,6 +3,8 @@ # We support all the 32 bit boards so need all their config include ../arm-softmmu/default.mak -CONFIG_XLNX_ZYNQMP_ARM=y -CONFIG_XLNX_VERSAL=y -CONFIG_SBSA_REF=y +# These are selected by default when TCG is enabled, uncomment them to +# keep out of the build. +# CONFIG_XLNX_ZYNQMP_ARM=n +# CONFIG_XLNX_VERSAL=n +# CONFIG_SBSA_REF=n diff --git a/configs/devices/arm-softmmu/default.mak b/configs/devices/arm-softmmu/default.mak index 1b49a7830c..980c48a7d9 100644 --- a/configs/devices/arm-softmmu/default.mak +++ b/configs/devices/arm-softmmu/default.mak @@ -4,42 +4,43 @@ # CONFIG_TEST_DEVICES=n CONFIG_ARM_VIRT=y -CONFIG_CUBIEBOARD=y -CONFIG_EXYNOS4=y -CONFIG_HIGHBANK=y -CONFIG_INTEGRATOR=y -CONFIG_FSL_IMX31=y -CONFIG_MUSICPAL=y -CONFIG_MUSCA=y -CONFIG_CHEETAH=y -CONFIG_SX1=y -CONFIG_NSERIES=y -CONFIG_STELLARIS=y -CONFIG_STM32VLDISCOVERY=y -CONFIG_REALVIEW=y -CONFIG_VERSATILE=y -CONFIG_VEXPRESS=y -CONFIG_ZYNQ=y -CONFIG_MAINSTONE=y -CONFIG_GUMSTIX=y -CONFIG_SPITZ=y -CONFIG_TOSA=y -CONFIG_Z2=y -CONFIG_NPCM7XX=y -CONFIG_COLLIE=y -CONFIG_ASPEED_SOC=y -CONFIG_NETDUINO2=y -CONFIG_NETDUINOPLUS2=y -CONFIG_OLIMEX_STM32_H405=y -CONFIG_MPS2=y -CONFIG_RASPI=y -CONFIG_DIGIC=y -CONFIG_SABRELITE=y -CONFIG_EMCRAFT_SF2=y -CONFIG_MICROBIT=y -CONFIG_FSL_IMX25=y -CONFIG_FSL_IMX7=y -CONFIG_FSL_IMX6UL=y -CONFIG_SEMIHOSTING=y -CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y -CONFIG_ALLWINNER_H3=y + +# These are selected by default when TCG is enabled, uncomment them to +# keep out of the build. +# CONFIG_CUBIEBOARD=n +# CONFIG_EXYNOS4=n +# CONFIG_HIGHBANK=n +# CONFIG_INTEGRATOR=n +# CONFIG_FSL_IMX31=n +# CONFIG_MUSICPAL=n +# CONFIG_MUSCA=n +# CONFIG_CHEETAH=n +# CONFIG_SX1=n +# CONFIG_NSERIES=n +# CONFIG_STELLARIS=n +# CONFIG_STM32VLDISCOVERY=n +# CONFIG_REALVIEW=n +# CONFIG_VERSATILE=n +# CONFIG_VEXPRESS=n +# CONFIG_ZYNQ=n +# CONFIG_MAINSTONE=n +# CONFIG_GUMSTIX=n +# CONFIG_SPITZ=n +# CONFIG_TOSA=n +# CONFIG_Z2=n +# CONFIG_NPCM7XX=n +# CONFIG_COLLIE=n +# CONFIG_ASPEED_SOC=n +# CONFIG_NETDUINO2=n +# CONFIG_NETDUINOPLUS2=n +# CONFIG_OLIMEX_STM32_H405=n +# CONFIG_MPS2=n +# CONFIG_RASPI=n +# CONFIG_DIGIC=n +# CONFIG_SABRELITE=n +# CONFIG_EMCRAFT_SF2=n +# CONFIG_MICROBIT=n +# CONFIG_FSL_IMX25=n +# CONFIG_FSL_IMX7=n +# CONFIG_FSL_IMX6UL=n +# CONFIG_ALLWINNER_H3=n diff --git a/configs/targets/aarch64-linux-user.mak b/configs/targets/aarch64-linux-user.mak index db552f1839..ba8bc5fe3f 100644 --- a/configs/targets/aarch64-linux-user.mak +++ b/configs/targets/aarch64-linux-user.mak @@ -1,6 +1,6 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml TARGET_HAS_BFLT=y CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/aarch64-softmmu.mak b/configs/targets/aarch64-softmmu.mak index d489e6da83..b4338e9568 100644 --- a/configs/targets/aarch64-softmmu.mak +++ b/configs/targets/aarch64-softmmu.mak @@ -1,5 +1,5 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm TARGET_SUPPORTS_MTTCG=y -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml gdb-xml/aarch64-pauth.xml TARGET_NEED_FDT=y diff --git a/configs/targets/aarch64_be-linux-user.mak b/configs/targets/aarch64_be-linux-user.mak index dc78044fb1..acb5620cdb 100644 --- a/configs/targets/aarch64_be-linux-user.mak +++ b/configs/targets/aarch64_be-linux-user.mak @@ -1,7 +1,7 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm TARGET_BIG_ENDIAN=y -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml TARGET_HAS_BFLT=y CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/alpha-linux-user.mak b/configs/targets/alpha-linux-user.mak index 7e62fd796a..f7d3fb4afa 100644 --- a/configs/targets/alpha-linux-user.mak +++ b/configs/targets/alpha-linux-user.mak @@ -1,4 +1,3 @@ TARGET_ARCH=alpha TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl -TARGET_ALIGNED_ONLY=y diff --git a/configs/targets/alpha-softmmu.mak b/configs/targets/alpha-softmmu.mak index e4b874a19e..9dbe160740 100644 --- a/configs/targets/alpha-softmmu.mak +++ b/configs/targets/alpha-softmmu.mak @@ -1,3 +1,2 @@ TARGET_ARCH=alpha -TARGET_ALIGNED_ONLY=y TARGET_SUPPORTS_MTTCG=y diff --git a/configs/targets/hexagon-linux-user.mak b/configs/targets/hexagon-linux-user.mak index 003ed0a408..2765a4c563 100644 --- a/configs/targets/hexagon-linux-user.mak +++ b/configs/targets/hexagon-linux-user.mak @@ -1 +1,2 @@ TARGET_ARCH=hexagon +TARGET_XML_FILES=gdb-xml/hexagon-core.xml gdb-xml/hexagon-hvx.xml diff --git a/configs/targets/hppa-linux-user.mak b/configs/targets/hppa-linux-user.mak index db873a8796..361ea39d71 100644 --- a/configs/targets/hppa-linux-user.mak +++ b/configs/targets/hppa-linux-user.mak @@ -1,5 +1,4 @@ TARGET_ARCH=hppa TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL=syscall.tbl -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configs/targets/hppa-softmmu.mak b/configs/targets/hppa-softmmu.mak index 44f07b0332..a41662aa99 100644 --- a/configs/targets/hppa-softmmu.mak +++ b/configs/targets/hppa-softmmu.mak @@ -1,4 +1,3 @@ TARGET_ARCH=hppa -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y TARGET_SUPPORTS_MTTCG=y diff --git a/configs/targets/mips-linux-user.mak b/configs/targets/mips-linux-user.mak index 71fa77d464..b4569a9893 100644 --- a/configs/targets/mips-linux-user.mak +++ b/configs/targets/mips-linux-user.mak @@ -2,5 +2,4 @@ TARGET_ARCH=mips TARGET_ABI_MIPSO32=y TARGET_SYSTBL_ABI=o32 TARGET_SYSTBL=syscall_o32.tbl -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configs/targets/mips-softmmu.mak b/configs/targets/mips-softmmu.mak index 7787a4d94c..d34b4083fc 100644 --- a/configs/targets/mips-softmmu.mak +++ b/configs/targets/mips-softmmu.mak @@ -1,4 +1,3 @@ TARGET_ARCH=mips -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y TARGET_SUPPORTS_MTTCG=y diff --git a/configs/targets/mips64-linux-user.mak b/configs/targets/mips64-linux-user.mak index 5a4771f22d..d2ff509a11 100644 --- a/configs/targets/mips64-linux-user.mak +++ b/configs/targets/mips64-linux-user.mak @@ -3,5 +3,4 @@ TARGET_ABI_MIPSN64=y TARGET_BASE_ARCH=mips TARGET_SYSTBL_ABI=n64 TARGET_SYSTBL=syscall_n64.tbl -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configs/targets/mips64-softmmu.mak b/configs/targets/mips64-softmmu.mak index 568d66650c..12d9483bf0 100644 --- a/configs/targets/mips64-softmmu.mak +++ b/configs/targets/mips64-softmmu.mak @@ -1,4 +1,3 @@ TARGET_ARCH=mips64 TARGET_BASE_ARCH=mips -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configs/targets/mips64el-linux-user.mak b/configs/targets/mips64el-linux-user.mak index f348f35997..f9efeec8ea 100644 --- a/configs/targets/mips64el-linux-user.mak +++ b/configs/targets/mips64el-linux-user.mak @@ -3,4 +3,3 @@ TARGET_ABI_MIPSN64=y TARGET_BASE_ARCH=mips TARGET_SYSTBL_ABI=n64 TARGET_SYSTBL=syscall_n64.tbl -TARGET_ALIGNED_ONLY=y diff --git a/configs/targets/mips64el-softmmu.mak b/configs/targets/mips64el-softmmu.mak index 5a52aa4b64..8d9ab3ddc4 100644 --- a/configs/targets/mips64el-softmmu.mak +++ b/configs/targets/mips64el-softmmu.mak @@ -1,4 +1,3 @@ TARGET_ARCH=mips64 TARGET_BASE_ARCH=mips -TARGET_ALIGNED_ONLY=y TARGET_NEED_FDT=y diff --git a/configs/targets/mipsel-linux-user.mak b/configs/targets/mipsel-linux-user.mak index e23793070c..e8d7241d31 100644 --- a/configs/targets/mipsel-linux-user.mak +++ b/configs/targets/mipsel-linux-user.mak @@ -2,4 +2,3 @@ TARGET_ARCH=mips TARGET_ABI_MIPSO32=y TARGET_SYSTBL_ABI=o32 TARGET_SYSTBL=syscall_o32.tbl -TARGET_ALIGNED_ONLY=y diff --git a/configs/targets/mipsel-softmmu.mak b/configs/targets/mipsel-softmmu.mak index c7c41f4fb7..0829659fc2 100644 --- a/configs/targets/mipsel-softmmu.mak +++ b/configs/targets/mipsel-softmmu.mak @@ -1,3 +1,2 @@ TARGET_ARCH=mips -TARGET_ALIGNED_ONLY=y TARGET_SUPPORTS_MTTCG=y diff --git a/configs/targets/mipsn32-linux-user.mak b/configs/targets/mipsn32-linux-user.mak index 1e80b302fc..206095da64 100644 --- a/configs/targets/mipsn32-linux-user.mak +++ b/configs/targets/mipsn32-linux-user.mak @@ -4,5 +4,4 @@ TARGET_ABI32=y TARGET_BASE_ARCH=mips TARGET_SYSTBL_ABI=n32 TARGET_SYSTBL=syscall_n32.tbl -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configs/targets/mipsn32el-linux-user.mak b/configs/targets/mipsn32el-linux-user.mak index f31a9c394b..ca2a3ed753 100644 --- a/configs/targets/mipsn32el-linux-user.mak +++ b/configs/targets/mipsn32el-linux-user.mak @@ -4,4 +4,3 @@ TARGET_ABI32=y TARGET_BASE_ARCH=mips TARGET_SYSTBL_ABI=n32 TARGET_SYSTBL=syscall_n32.tbl -TARGET_ALIGNED_ONLY=y diff --git a/configs/targets/nios2-softmmu.mak b/configs/targets/nios2-softmmu.mak index 5823fc02c8..c99ae3777e 100644 --- a/configs/targets/nios2-softmmu.mak +++ b/configs/targets/nios2-softmmu.mak @@ -1,3 +1,2 @@ TARGET_ARCH=nios2 -TARGET_ALIGNED_ONLY=y TARGET_NEED_FDT=y diff --git a/configs/targets/s390x-linux-user.mak b/configs/targets/s390x-linux-user.mak index e2978248ed..24c04c8589 100644 --- a/configs/targets/s390x-linux-user.mak +++ b/configs/targets/s390x-linux-user.mak @@ -2,4 +2,4 @@ TARGET_ARCH=s390x TARGET_SYSTBL_ABI=common,64 TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y -TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-gs.xml +TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-virt-kvm.xml gdb-xml/s390-gs.xml diff --git a/configs/targets/s390x-softmmu.mak b/configs/targets/s390x-softmmu.mak index 258b4cf358..70d2f9f0ba 100644 --- a/configs/targets/s390x-softmmu.mak +++ b/configs/targets/s390x-softmmu.mak @@ -1,4 +1,4 @@ TARGET_ARCH=s390x TARGET_BIG_ENDIAN=y TARGET_SUPPORTS_MTTCG=y -TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-gs.xml +TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-virt-kvm.xml gdb-xml/s390-gs.xml diff --git a/configs/targets/sh4-linux-user.mak b/configs/targets/sh4-linux-user.mak index 0152d6621e..9908887566 100644 --- a/configs/targets/sh4-linux-user.mak +++ b/configs/targets/sh4-linux-user.mak @@ -1,5 +1,4 @@ TARGET_ARCH=sh4 TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl -TARGET_ALIGNED_ONLY=y TARGET_HAS_BFLT=y diff --git a/configs/targets/sh4-softmmu.mak b/configs/targets/sh4-softmmu.mak index 95896376c4..f9d62d91e4 100644 --- a/configs/targets/sh4-softmmu.mak +++ b/configs/targets/sh4-softmmu.mak @@ -1,2 +1 @@ TARGET_ARCH=sh4 -TARGET_ALIGNED_ONLY=y diff --git a/configs/targets/sh4eb-linux-user.mak b/configs/targets/sh4eb-linux-user.mak index 6724165efe..9db6b3609c 100644 --- a/configs/targets/sh4eb-linux-user.mak +++ b/configs/targets/sh4eb-linux-user.mak @@ -1,6 +1,5 @@ TARGET_ARCH=sh4 TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y TARGET_HAS_BFLT=y diff --git a/configs/targets/sh4eb-softmmu.mak b/configs/targets/sh4eb-softmmu.mak index dc8b30bf7a..226b1fc698 100644 --- a/configs/targets/sh4eb-softmmu.mak +++ b/configs/targets/sh4eb-softmmu.mak @@ -1,3 +1,2 @@ TARGET_ARCH=sh4 -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configs/targets/sparc-linux-user.mak b/configs/targets/sparc-linux-user.mak index 00e7bc1f07..abcfb8fc62 100644 --- a/configs/targets/sparc-linux-user.mak +++ b/configs/targets/sparc-linux-user.mak @@ -1,5 +1,4 @@ TARGET_ARCH=sparc TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL=syscall.tbl -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configs/targets/sparc-softmmu.mak b/configs/targets/sparc-softmmu.mak index a849190f01..454eb35499 100644 --- a/configs/targets/sparc-softmmu.mak +++ b/configs/targets/sparc-softmmu.mak @@ -1,3 +1,2 @@ TARGET_ARCH=sparc -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configs/targets/sparc32plus-linux-user.mak b/configs/targets/sparc32plus-linux-user.mak index a65c0951a1..6cc8fa516b 100644 --- a/configs/targets/sparc32plus-linux-user.mak +++ b/configs/targets/sparc32plus-linux-user.mak @@ -4,5 +4,4 @@ TARGET_BASE_ARCH=sparc TARGET_ABI_DIR=sparc TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL=syscall.tbl -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configs/targets/sparc64-linux-user.mak b/configs/targets/sparc64-linux-user.mak index 20fcb93fa4..52f05ec000 100644 --- a/configs/targets/sparc64-linux-user.mak +++ b/configs/targets/sparc64-linux-user.mak @@ -3,5 +3,4 @@ TARGET_BASE_ARCH=sparc TARGET_ABI_DIR=sparc TARGET_SYSTBL_ABI=common,64 TARGET_SYSTBL=syscall.tbl -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configs/targets/sparc64-softmmu.mak b/configs/targets/sparc64-softmmu.mak index c626ac3eae..d3f8a3b710 100644 --- a/configs/targets/sparc64-softmmu.mak +++ b/configs/targets/sparc64-softmmu.mak @@ -1,4 +1,3 @@ TARGET_ARCH=sparc64 TARGET_BASE_ARCH=sparc -TARGET_ALIGNED_ONLY=y TARGET_BIG_ENDIAN=y diff --git a/configure b/configure index 440851e4e4..09ae5ac141 100755 --- a/configure +++ b/configure @@ -4,9 +4,8 @@ # # Unset some variables known to interfere with behavior of common tools, -# just as autoconf does. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS +# just as autoconf does. Unlike autoconf, we assume that unset exists. +unset CLICOLOR_FORCE GREP_OPTIONS BASH_ENV ENV MAIL MAILPATH CDPATH # Don't allow CCACHE, if present, to use cached results of compile tests! export CCACHE_RECACHE=yes @@ -79,7 +78,6 @@ fi TMPB="qemu-conf" TMPC="${TMPDIR1}/${TMPB}.c" TMPO="${TMPDIR1}/${TMPB}.o" -TMPM="${TMPDIR1}/${TMPB}.m" TMPE="${TMPDIR1}/${TMPB}.exe" rm -f config.log @@ -125,62 +123,20 @@ lines: ${BASH_LINENO[*]}" $compiler "$@" >> config.log 2>&1 || return $? } -do_compiler_werror() { - # Run the compiler, capturing its output to the log. First argument - # is compiler binary to execute. - compiler="$1" - shift - if test -n "$BASH_VERSION"; then eval ' - echo >>config.log " -funcs: ${FUNCNAME[*]} -lines: ${BASH_LINENO[*]}" - '; fi - echo $compiler "$@" >> config.log - $compiler "$@" >> config.log 2>&1 || return $? - # Test passed. If this is an --enable-werror build, rerun - # the test with -Werror and bail out if it fails. This - # makes warning-generating-errors in configure test code - # obvious to developers. - if test "$werror" != "yes"; then - return 0 - fi - # Don't bother rerunning the compile if we were already using -Werror - case "$*" in - *-Werror*) - return 0 - ;; - esac - echo $compiler -Werror "$@" >> config.log - $compiler -Werror "$@" >> config.log 2>&1 && return $? - error_exit "configure test passed without -Werror but failed with -Werror." \ - "This is probably a bug in the configure script. The failing command" \ - "will be at the bottom of config.log." \ - "You can run configure with --disable-werror to bypass this check." -} - do_cc() { - do_compiler_werror "$cc" $CPU_CFLAGS "$@" -} - -do_objc() { - do_compiler_werror "$objcc" $CPU_CFLAGS "$@" -} - -# Append $2 to the variable named $1, with space separation -add_to() { - eval $1=\${$1:+\"\$$1 \"}\$2 + do_compiler "$cc" $CPU_CFLAGS "$@" } compile_object() { local_cflags="$1" - do_cc $CFLAGS $EXTRA_CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -c -o $TMPO $TMPC + do_cc $CFLAGS $EXTRA_CFLAGS $local_cflags -c -o $TMPO $TMPC } compile_prog() { local_cflags="$1" local_ldflags="$2" - do_cc $CFLAGS $EXTRA_CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -o $TMPE $TMPC \ - $LDFLAGS $EXTRA_LDFLAGS $CONFIGURE_LDFLAGS $QEMU_LDFLAGS $local_ldflags + do_cc $CFLAGS $EXTRA_CFLAGS $local_cflags -o $TMPE $TMPC \ + $LDFLAGS $EXTRA_LDFLAGS $local_ldflags } # symbolically link $1 to $2. Portable version of "ln -sf". @@ -220,30 +176,18 @@ then error_exit "main directory cannot contain spaces nor colons" fi +# parse CC options first; some compiler tests are used to establish +# some defaults, based on the host environment + # default parameters cpu="" -static="no" cross_compile="no" cross_prefix="" host_cc="cc" -stack_protector="" -safe_stack="" -use_containers="yes" -gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb") - -if test -e "$source_path/.git" -then - git_submodules_action="update" -else - git_submodules_action="ignore" -fi - -git_submodules="ui/keycodemapdb" -git="git" - -# Don't accept a target_list environment variable. -unset target_list -unset target_list_exclude +EXTRA_CFLAGS="" +EXTRA_CXXFLAGS="" +EXTRA_OBJCFLAGS="" +EXTRA_LDFLAGS="" # Default value for a variable defining feature "foo". # * foo="no" feature will only be used if --enable-foo arg is given @@ -256,54 +200,8 @@ unset target_list_exclude # Always add --enable-foo and --disable-foo command line args. # Distributions want to ensure that several features are compiled in, and it # is impossible without a --enable-foo that exits if a feature is not found. - default_feature="" -# parse CC options second -for opt do - optarg=$(expr "x$opt" : 'x[^=]*=\(.*\)') - case "$opt" in - --without-default-features) - default_feature="no" - ;; - esac -done -EXTRA_CFLAGS="" -EXTRA_CXXFLAGS="" -EXTRA_OBJCFLAGS="" -EXTRA_LDFLAGS="" - -debug_tcg="no" -sanitizers="no" -tsan="no" -fortify_source="yes" -EXESUF="" -modules="no" -prefix="/usr/local" -qemu_suffix="qemu" -softmmu="yes" -linux_user="" -bsd_user="" -pie="" -coroutine="" -plugins="$default_feature" -meson="" -ninja="" -bindir="bin" -skip_meson=no -vfio_user_server="disabled" - -# The following Meson options are handled manually (still they -# are included in the automatically generated help message) - -# 1. Track which submodules are needed -fdt="auto" - -# 2. Automatically enable/disable other options -tcg="auto" -cfi="false" - -# parse CC options second for opt do optarg=$(expr "x$opt" : 'x[^=]*=\(.*\)') case "$opt" in @@ -314,6 +212,8 @@ for opt do ;; --cxx=*) CXX="$optarg" ;; + --objcc=*) objcc="$optarg" + ;; --cpu=*) cpu="$optarg" ;; --extra-cflags=*) @@ -340,9 +240,60 @@ for opt do --cross-prefix-*) cc_arch=${opt#--cross-prefix-}; cc_arch=${cc_arch%%=*} eval "cross_prefix_${cc_arch}=\$optarg" ;; + --without-default-features) default_feature="no" + ;; esac done + +if test -e "$source_path/.git" +then + git_submodules_action="update" +else + git_submodules_action="ignore" +fi + +git_submodules="subprojects/keycodemapdb" +git="git" +debug_tcg="no" +docs="auto" +EXESUF="" +prefix="/usr/local" +qemu_suffix="qemu" +softmmu="yes" +linux_user="" +bsd_user="" +plugins="$default_feature" +ninja="" +python= +pypi="enabled" +bindir="bin" +skip_meson=no +vfio_user_server="disabled" +use_containers="yes" +gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb") +gdb_arches="" +werror="" + +# Don't accept a target_list environment variable. +unset target_list +unset target_list_exclude + +# The following Meson options are handled manually (still they +# are included in the automatically generated help message) + +# 1. Track which submodules are needed +fdt="auto" + +# 2. Automatically enable/disable other options +tcg="auto" +cfi="false" + +# 3. Need to check for -static-pie before Meson runs. Also, +# Meson has PIE as a boolean rather than enabled/disabled/auto. +pie="" +static="no" + # Preferred compiler: # ${CC} (if set) # ${cross_prefix}gcc (if cross-prefix specified) @@ -359,6 +310,21 @@ else cxx="${CXX-${cross_prefix}g++}" fi +# Preferred ObjC compiler: +# $objcc (if set, i.e. via --objcc option) +# ${cross_prefix}clang (if cross-prefix specified) +# clang (if available) +# $cc +if test -z "${objcc}${cross_prefix}"; then + if has clang; then + objcc=clang + else + objcc="$cc" + fi +else + objcc="${objcc-${cross_prefix}clang}" +fi + ar="${AR-${cross_prefix}ar}" as="${AS-${cross_prefix}as}" ccas="${CCAS-$cc}" @@ -371,28 +337,9 @@ strip="${STRIP-${cross_prefix}strip}" widl="${WIDL-${cross_prefix}widl}" windres="${WINDRES-${cross_prefix}windres}" windmc="${WINDMC-${cross_prefix}windmc}" -pkg_config_exe="${PKG_CONFIG-${cross_prefix}pkg-config}" -query_pkg_config() { - "${pkg_config_exe}" ${QEMU_PKG_CONFIG_FLAGS} "$@" -} -pkg_config=query_pkg_config +pkg_config="${PKG_CONFIG-${cross_prefix}pkg-config}" sdl2_config="${SDL2_CONFIG-${cross_prefix}sdl2-config}" -# default flags for all hosts -# We use -fwrapv to tell the compiler that we require a C dialect where -# left shift of signed integers is well defined and has the expected -# 2s-complement style results. (Both clang and gcc agree that it -# provides these semantics.) -QEMU_CFLAGS="-fno-strict-aliasing -fno-common -fwrapv" -QEMU_CFLAGS="-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE $QEMU_CFLAGS" - -QEMU_LDFLAGS= - -# Flags that are needed during configure but later taken care of by Meson -CONFIGURE_CFLAGS="-std=gnu11 -Wall" -CONFIGURE_LDFLAGS= - - check_define() { cat > $TMPC < $TMPC < -int main(void) { return 0; } -EOF - compile_object -} - write_c_skeleton() { cat > $TMPC <= 3.7. + # NB: a True python conditional creates a non-zero return code (Failure) + "$1" -c 'import sys; sys.exit(sys.version_info < (3,7))' +} -# We prefer python 3.x. A bare 'python' is traditionally -# python 2.x, but some distros have it as python 3.x, so -# we check that too -python= -explicit_python=no -for binary in "${PYTHON-python3}" python -do - if has "$binary" - then - python=$(command -v "$binary") - break +first_python= +if test -z "${PYTHON}"; then + # A bare 'python' is traditionally python 2.x, but some distros + # have it as python 3.x, so check in both places. + for binary in python3 python python3.11 python3.10 python3.9 python3.8 python3.7; do + if has "$binary"; then + python=$(command -v "$binary") + if check_py_version "$python"; then + # This one is good. + first_python= + break + else + first_python=$python + fi + fi + done +else + # Same as above, but only check the environment variable. + has "${PYTHON}" || error_exit "The PYTHON environment variable does not point to an executable" + python=$(command -v "$PYTHON") + if check_py_version "$python"; then + # This one is good. + first_python= + else + first_python=$first_python fi -done - +fi # Check for ancillary tools used in testing genisoimage= @@ -622,41 +565,16 @@ do fi done -# Default objcc to clang if available, otherwise use CC -if has clang; then - objcc=clang -else - objcc="$cc" -fi - if test "$mingw32" = "yes" ; then EXESUF=".exe" - # MinGW needs -mthreads for TLS and macro _MT. - CONFIGURE_CFLAGS="-mthreads $CONFIGURE_CFLAGS" prefix="/qemu" bindir="" qemu_suffix="" fi -werror="" as_shared_lib="no" as_static_lib="no" -meson_option_build_array() { - printf '[' - (if test "$targetos" == windows; then - IFS=\; - else - IFS=: - fi - for e in $1; do - e=${e/'\'/'\\'} - e=${e/\"/'\"'} - printf '"""%s""",' "$e" - done) - printf ']\n' -} - meson_option_build_array() { printf '[' (if test "$targetos" = windows; then @@ -705,18 +623,16 @@ for opt do ;; --cxx=*) ;; - --objcc=*) objcc="$optarg" + --objcc=*) ;; - --make=*) make="$optarg" + --make=*) ;; --install=*) ;; - --python=*) python="$optarg" ; explicit_python=yes + --python=*) python="$optarg" ;; --skip-meson) skip_meson=yes ;; - --meson=*) meson="$optarg" - ;; --ninja=*) ninja="$optarg" ;; --smbd=*) smbd="$optarg" @@ -733,15 +649,9 @@ for opt do ;; --cross-prefix-*) ;; - --enable-debug-info) meson_option_add -Ddebug=true + --enable-docs) docs=enabled ;; - --disable-debug-info) meson_option_add -Ddebug=false - ;; - --enable-modules) - modules="yes" - ;; - --disable-modules) - modules="no" + --disable-docs) docs=disabled ;; --cpu=*) ;; @@ -773,9 +683,7 @@ for opt do ;; --without-default-features) # processed above ;; - --static) - static="yes" - QEMU_PKG_CONFIG_FLAGS="--static $QEMU_PKG_CONFIG_FLAGS" + --static) static="yes" ;; --bindir=*) bindir="$optarg" ;; @@ -798,17 +706,9 @@ for opt do --enable-debug) # Enable debugging options that aren't excessively noisy debug_tcg="yes" + meson_option_parse --enable-debug-graph-lock "" meson_option_parse --enable-debug-mutex "" meson_option_add -Doptimization=0 - fortify_source="no" - ;; - --enable-sanitizers) sanitizers="yes" - ;; - --disable-sanitizers) sanitizers="no" - ;; - --enable-tsan) tsan="yes" - ;; - --disable-tsan) tsan="no" ;; --disable-tcg) tcg="disabled" plugins="no" @@ -840,14 +740,6 @@ for opt do ;; --disable-werror) werror="no" ;; - --enable-stack-protector) stack_protector="yes" - ;; - --disable-stack-protector) stack_protector="no" - ;; - --enable-safe-stack) safe_stack="yes" - ;; - --disable-safe-stack) safe_stack="no" - ;; --enable-cfi) cfi="true"; meson_option_add -Db_lto=true @@ -862,13 +754,15 @@ for opt do ;; --enable-fdt=*) fdt="$optarg" ;; - --with-coroutine=*) coroutine="$optarg" - ;; --with-git=*) git="$optarg" ;; --with-git-submodules=*) git_submodules_action="$optarg" ;; + --disable-pypi) pypi="disabled" + ;; + --enable-pypi) pypi="enabled" + ;; --enable-plugins) if test "$mingw32" = "yes"; then error_exit "TCG plugins not currently supported on Windows platforms" else @@ -916,7 +810,7 @@ case $git_submodules_action in fi ;; ignore) - if ! test -f "$source_path/ui/keycodemapdb/README" + if ! test -f "$source_path/subprojects/keycodemapdb/README" then echo echo "ERROR: missing GIT submodules" @@ -1008,9 +902,7 @@ Advanced options (experts only): --cross-cc-ARCH=CC use compiler when building ARCH guest test cases --cross-cc-cflags-ARCH= use compiler flags when building ARCH guest tests --cross-prefix-ARCH=PREFIX cross compiler prefix when building ARCH guest test cases - --make=MAKE use specified make [$make] --python=PYTHON use specified python [$python] - --meson=MESON use specified meson [$meson] --ninja=NINJA use specified ninja [$ninja] --smbd=SMBD use specified smbd [$smbd] --with-git=GIT use specified git [$git] @@ -1026,13 +918,8 @@ Advanced options (experts only): desired devices in configs/devices/) --with-devices-ARCH=NAME override default configs/devices --enable-debug enable common debug build options - --enable-sanitizers enable default sanitizers - --enable-tsan enable thread sanitizer --disable-werror disable compilation abort on warning - --disable-stack-protector disable compiler-provided stack protection --cpu=CPU Build for host CPU [$cpu] - --with-coroutine=BACKEND coroutine backend. Supported options: - ucontext, sigaltstack, windows --enable-plugins enable plugins via shared library loading --disable-containers don't use containers for cross-building @@ -1045,11 +932,7 @@ cat << EOF linux-user all linux usermode emulation targets bsd-user all BSD usermode emulation targets pie Position Independent Executables - modules modules support (non-Windows) debug-tcg TCG debugging (default is disabled) - debug-info debugging information - safe-stack SafeStack Stack Smash Protection. Depends on - clang/llvm and requires coroutine backend ucontext. NOTE: The object files are built at the place where configure is launched EOF @@ -1061,63 +944,98 @@ rm -f ./*/config-devices.mak.d if test -z "$python" then - error_exit "Python not found. Use --python=/path/to/python" -fi -if ! has "$make" -then - error_exit "GNU make ($make) not found" + # If first_python is set, there was a binary somewhere even though + # it was not suitable. Use it for the error message. + if test -n "$first_python"; then + error_exit "Cannot use '$first_python', Python >= 3.7 is required." \ + "Use --python=/path/to/python to specify a supported Python." + else + error_exit "Python not found. Use --python=/path/to/python" + fi fi -# Note that if the Python conditional here evaluates True we will exit -# with status 1 which is a shell 'false' value. -if ! $python -c 'import sys; sys.exit(sys.version_info < (3,6))'; then - error_exit "Cannot use '$python', Python >= 3.6 is required." \ - "Use --python=/path/to/python to specify a supported Python." +if ! check_py_version "$python"; then + error_exit "Cannot use '$python', Python >= 3.7 is required." \ + "Use --python=/path/to/python to specify a supported Python." \ + "Maybe try:" \ + " openSUSE Leap 15.3+: zypper install python39" \ + " CentOS 8: dnf install python38" +fi + +# Resolve PATH +python="$(command -v "$python")" + +# Create a Python virtual environment using our configured python. +# The stdout of this script will be the location of a symlink that +# points to the configured Python. +# Entry point scripts for pip, meson, and sphinx are generated if those +# packages are present. + +# Defaults assumed for now: +# - venv is cleared if it exists already; +# - venv is allowed to use system packages; +# - all setup can be performed offline; +# - missing packages may be fetched from PyPI, +# unless --disable-pypi is passed. +# - pip is not installed into the venv when possible, +# but ensurepip is called as a fallback when necessary. + +echo "python determined to be '$python'" +echo "python version: $($python --version)" + +python="$($python -B "${source_path}/python/scripts/mkvenv.py" create pyvenv)" +if test "$?" -ne 0 ; then + error_exit "python venv creation failed" fi # Suppress writing compiled files python="$python -B" +mkvenv="$python ${source_path}/python/scripts/mkvenv.py" -if test -z "$meson"; then - if test "$explicit_python" = no && has meson && version_ge "$(meson --version)" 0.61.5; then - meson=meson - elif test "$git_submodules_action" != 'ignore' ; then - meson=git - elif test -e "${source_path}/meson/meson.py" ; then - meson=internal - else - if test "$explicit_python" = yes; then - error_exit "--python requires using QEMU's embedded Meson distribution, but it was not found." - else - error_exit "Meson not found. Use --meson=/path/to/meson" +mkvenv_flags="" +if test "$pypi" = "enabled" ; then + mkvenv_flags="--online" +fi + +if ! $mkvenv ensure \ + $mkvenv_flags \ + --dir "${source_path}/python/wheels" \ + --diagnose "meson" \ + "meson>=0.63.0" ; +then + exit 1 +fi + +# At this point, we expect Meson to be installed and available. +# We expect mkvenv or pip to have created pyvenv/bin/meson for us. +# We ignore PATH completely here: we want to use the venv's Meson +# *exclusively*. + +meson="$(cd pyvenv/bin; pwd)/meson" + +# Conditionally ensure Sphinx is installed. + +mkvenv_flags="" +if test "$pypi" = "enabled" -a "$docs" = "enabled" ; then + mkvenv_flags="--online" +fi + +if test "$docs" != "disabled" ; then + if ! $mkvenv ensure \ + $mkvenv_flags \ + --diagnose "sphinx-build" \ + "sphinx>=1.6.0" "sphinx-rtd-theme>=0.5.0"; + then + if test "$docs" = "enabled" ; then + exit 1 fi - fi -else - # Meson uses its own Python interpreter to invoke other Python scripts, - # but the user wants to use the one they specified with --python. - # - # We do not want to override the distro Python interpreter (and sometimes - # cannot: for example in Homebrew /usr/bin/meson is a bash script), so - # just require --meson=git|internal together with --python. - if test "$explicit_python" = yes; then - case "$meson" in - git | internal) ;; - *) error_exit "--python requires using QEMU's embedded Meson distribution." ;; - esac + echo "Sphinx not found/usable, disabling docs." + docs=disabled + else + docs=enabled fi fi -if test "$meson" = git; then - git_submodules="${git_submodules} meson" -fi - -case "$meson" in - git | internal) - meson="$python ${source_path}/meson/meson.py" - ;; - *) meson=$(command -v "$meson") ;; -esac - # Probe for ninja if test -z "$ninja"; then @@ -1132,20 +1050,6 @@ if test -z "$ninja"; then fi fi -# Check that the C compiler works. Doing this here before testing -# the host CPU ensures that we had a valid CC to autodetect the -# $cpu var (and we should bail right here if that's not the case). -# It also allows the help message to be printed without a CC. -write_c_skeleton; -if compile_object ; then - : C compiler works ok -else - error_exit "\"$cc\" either does not exist or does not work" -fi -if ! compile_prog ; then - error_exit "\"$cc\" cannot build an executable (is your linker broken?)" -fi - # Consult white-list to determine whether to enable werror # by default. Only enable by default for git builds if test -z "$werror" ; then @@ -1165,144 +1069,7 @@ if test "$targetos" = "bogus"; then error_exit "Unrecognized host OS (uname -s reports '$(uname -s)')" fi -# Check whether the compiler matches our minimum requirements: -cat > $TMPC << EOF -#if defined(__clang_major__) && defined(__clang_minor__) -# ifdef __apple_build_version__ -# if __clang_major__ < 12 || (__clang_major__ == 12 && __clang_minor__ < 0) -# error You need at least XCode Clang v12.0 to compile QEMU -# endif -# else -# if __clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 0) -# error You need at least Clang v10.0 to compile QEMU -# endif -# endif -#elif defined(__GNUC__) && defined(__GNUC_MINOR__) -# if __GNUC__ < 7 || (__GNUC__ == 7 && __GNUC_MINOR__ < 4) -# error You need at least GCC v7.4.0 to compile QEMU -# endif -#else -# error You either need GCC or Clang to compiler QEMU -#endif -int main (void) { return 0; } -EOF -if ! compile_prog "" "" ; then - error_exit "You need at least GCC v7.4 or Clang v10.0 (or XCode Clang v12.0)" -fi - -# Accumulate -Wfoo and -Wno-bar separately. -# We will list all of the enable flags first, and the disable flags second. -# Note that we do not add -Werror, because that would enable it for all -# configure tests. If a configure test failed due to -Werror this would -# just silently disable some features, so it's too error prone. - -warn_flags= -add_to warn_flags -Wundef -add_to warn_flags -Wwrite-strings -add_to warn_flags -Wmissing-prototypes -add_to warn_flags -Wstrict-prototypes -add_to warn_flags -Wredundant-decls -add_to warn_flags -Wold-style-declaration -add_to warn_flags -Wold-style-definition -add_to warn_flags -Wtype-limits -add_to warn_flags -Wformat-security -add_to warn_flags -Wformat-y2k -add_to warn_flags -Winit-self -add_to warn_flags -Wignored-qualifiers -add_to warn_flags -Wempty-body -add_to warn_flags -Wnested-externs -add_to warn_flags -Wendif-labels -add_to warn_flags -Wexpansion-to-defined -add_to warn_flags -Wimplicit-fallthrough=2 -add_to warn_flags -Wmissing-format-attribute -add_to warn_flags -Wthread-safety - -nowarn_flags= -add_to nowarn_flags -Wno-initializer-overrides -add_to nowarn_flags -Wno-missing-include-dirs -add_to nowarn_flags -Wno-shift-negative-value -add_to nowarn_flags -Wno-string-plus-int -add_to nowarn_flags -Wno-typedef-redefinition -add_to nowarn_flags -Wno-tautological-type-limit-compare -add_to nowarn_flags -Wno-psabi -add_to nowarn_flags -Wno-gnu-variable-sized-type-not-at-end - -gcc_flags="$warn_flags $nowarn_flags" - -cc_has_warning_flag() { - write_c_skeleton; - - # Use the positive sense of the flag when testing for -Wno-wombat - # support (gcc will happily accept the -Wno- form of unknown - # warning options). - optflag="$(echo $1 | sed -e 's/^-Wno-/-W/')" - compile_prog "-Werror $optflag" "" -} - -objcc_has_warning_flag() { - cat > $TMPM < $TMPC << EOF -int main(int argc, char *argv[]) -{ - char arr[64], *p = arr, *c = argv[argc - 1]; - while (*c) { - *p++ = *c++; - } - return 0; -} -EOF - gcc_flags="-fstack-protector-strong -fstack-protector-all" - sp_on=0 - for flag in $gcc_flags; do - # We need to check both a compile and a link, since some compiler - # setups fail only on a .c->.o compile and some only at link time - if compile_object "-Werror $flag" && - compile_prog "-Werror $flag" ""; then - QEMU_CFLAGS="$QEMU_CFLAGS $flag" - QEMU_LDFLAGS="$QEMU_LDFLAGS $flag" - sp_on=1 - break - fi - done - if test "$stack_protector" = yes; then - if test $sp_on = 0; then - error_exit "Stack protector not supported" - fi - fi -fi - -# Our module code doesn't support Windows -if test "$modules" = "yes" && test "$mingw32" = "yes" ; then - error_exit "Modules are not available for Windows" -fi - -# Static linking is not possible with plugins, modules or PIE if test "$static" = "yes" ; then - if test "$modules" = "yes" ; then - error_exit "static and modules are mutually incompatible" - fi if test "$plugins" = "yes"; then error_exit "static and plugins are mutually incompatible" else @@ -1322,59 +1089,26 @@ static THREAD int tls_var; int main(void) { return tls_var; } EOF -# Meson currently only handles pie as a boolean for now so if we have -# explicitly disabled PIE we need to extend our cflags because it wont. if test "$static" = "yes"; then if test "$pie" != "no" && compile_prog "-Werror -fPIE -DPIE" "-static-pie"; then - CONFIGURE_CFLAGS="-fPIE -DPIE $CONFIGURE_CFLAGS" pie="yes" elif test "$pie" = "yes"; then error_exit "-static-pie not available due to missing toolchain support" else pie="no" - QEMU_CFLAGS="-fno-pie -no-pie $QEMU_CFLAGS" fi -elif test "$pie" = "no"; then - if compile_prog "-Werror -fno-pie" "-no-pie"; then - CONFIGURE_CFLAGS="-fno-pie $CONFIGURE_CFLAGS" - CONFIGURE_LDFLAGS="-no-pie $CONFIGURE_LDFLAGS" - QEMU_CFLAGS="-fno-pie -no-pie $QEMU_CFLAGS" +elif test "$pie" != "no"; then + if compile_prog "-Werror -fPIE -DPIE" "-pie"; then + pie="yes" + elif test "$pie" = "yes"; then + error_exit "PIE not available due to missing toolchain support" + else + echo "Disabling PIE due to missing toolchain support" + pie="no" fi -elif compile_prog "-Werror -fPIE -DPIE" "-pie"; then - CONFIGURE_CFLAGS="-fPIE -DPIE $CONFIGURE_CFLAGS" - CONFIGURE_LDFLAGS="-pie $CONFIGURE_LDFLAGS" - pie="yes" -elif test "$pie" = "yes"; then - error_exit "PIE not available due to missing toolchain support" -else - echo "Disabling PIE due to missing toolchain support" - pie="no" fi ########################################## -# __sync_fetch_and_and requires at least -march=i486. Many toolchains -# use i686 as default anyway, but for those that don't, an explicit -# specification is necessary - -if test "$cpu" = "i386"; then - cat > $TMPC << EOF -static int sfaa(int *ptr) -{ - return __sync_fetch_and_and(ptr, 0); -} - -int main(void) -{ - int val = 42; - val = __sync_val_compare_and_swap(&val, 0, 1); - sfaa(&val); - return val; -} -EOF - if ! compile_prog "" "" ; then - QEMU_CFLAGS="-march=i486 $QEMU_CFLAGS" - fi -fi if test -z "${target_list+xxx}" ; then default_targets=yes @@ -1446,218 +1180,16 @@ EOF fi fi -########################################## -# pkg-config probe - -if ! has "$pkg_config_exe"; then - error_exit "pkg-config binary '$pkg_config_exe' not found" -fi - -########################################## -# glib support probe - -# When bumping glib_req_ver, please check also whether we should increase -# the _WIN32_WINNT setting in osdep.h according to the value from glib -glib_req_ver=2.56 -glib_modules=gthread-2.0 -if test "$modules" = yes; then - glib_modules="$glib_modules gmodule-export-2.0" -elif test "$plugins" = "yes"; then - glib_modules="$glib_modules gmodule-no-export-2.0" -fi - -for i in $glib_modules; do - if $pkg_config --atleast-version=$glib_req_ver $i; then - glib_cflags=$($pkg_config --cflags $i) - glib_libs=$($pkg_config --libs $i) - else - error_exit "glib-$glib_req_ver $i is required to compile QEMU" - fi -done - -glib_bindir="$($pkg_config --variable=bindir glib-2.0)" -if test -z "$glib_bindir" ; then - glib_bindir="$($pkg_config --variable=prefix glib-2.0)"/bin -fi - -# This workaround is required due to a bug in pkg-config file for glib as it -# doesn't define GLIB_STATIC_COMPILATION for pkg-config --static - -if test "$static" = yes && test "$mingw32" = yes; then - glib_cflags="-DGLIB_STATIC_COMPILATION $glib_cflags" -fi - -# Sanity check that the current size_t matches the -# size that glib thinks it should be. This catches -# problems on multi-arch where people try to build -# 32-bit QEMU while pointing at 64-bit glib headers -cat > $TMPC < -#include - -#define QEMU_BUILD_BUG_ON(x) \ - typedef char qemu_build_bug_on[(x)?-1:1] __attribute__((unused)); - -int main(void) { - QEMU_BUILD_BUG_ON(sizeof(size_t) != GLIB_SIZEOF_SIZE_T); - return 0; -} -EOF - -if ! compile_prog "$glib_cflags" "$glib_libs" ; then - error_exit "sizeof(size_t) doesn't match GLIB_SIZEOF_SIZE_T."\ - "You probably need to set PKG_CONFIG_LIBDIR"\ - "to point to the right pkg-config files for your"\ - "build target" -fi - -# Silence clang warnings triggered by glib < 2.57.2 -cat > $TMPC << EOF -#include -typedef struct Foo { - int i; -} Foo; -static void foo_free(Foo *f) -{ - g_free(f); -} -G_DEFINE_AUTOPTR_CLEANUP_FUNC(Foo, foo_free) -int main(void) { return 0; } -EOF -if ! compile_prog "$glib_cflags -Werror" "$glib_libs" ; then - if cc_has_warning_flag "-Wno-unused-function"; then - glib_cflags="$glib_cflags -Wno-unused-function" - CONFIGURE_CFLAGS="$CONFIGURE_CFLAGS -Wno-unused-function" - fi -fi - ########################################## # fdt probe case "$fdt" in auto | enabled | internal) # Simpler to always update submodule, even if not needed. - git_submodules="${git_submodules} dtc" + git_submodules="${git_submodules} subprojects/dtc" ;; esac -########################################## -# check and set a backend for coroutine - -# We prefer ucontext, but it's not always possible. The fallback -# is sigcontext. On Windows the only valid backend is the Windows -# specific one. - -ucontext_works=no -if test "$darwin" != "yes"; then - cat > $TMPC << EOF -#include -#ifdef __stub_makecontext -#error Ignoring glibc stub makecontext which will always fail -#endif -int main(void) { makecontext(0, 0, 0); return 0; } -EOF - if compile_prog "" "" ; then - ucontext_works=yes - fi -fi - -if test "$coroutine" = ""; then - if test "$mingw32" = "yes"; then - coroutine=win32 - elif test "$ucontext_works" = "yes"; then - coroutine=ucontext - else - coroutine=sigaltstack - fi -else - case $coroutine in - windows) - if test "$mingw32" != "yes"; then - error_exit "'windows' coroutine backend only valid for Windows" - fi - # Unfortunately the user visible backend name doesn't match the - # coroutine-*.c filename for this case, so we have to adjust it here. - coroutine=win32 - ;; - ucontext) - if test "$ucontext_works" != "yes"; then - error_exit "'ucontext' backend requested but makecontext not available" - fi - ;; - sigaltstack) - if test "$mingw32" = "yes"; then - error_exit "only the 'windows' coroutine backend is valid for Windows" - fi - ;; - *) - error_exit "unknown coroutine backend $coroutine" - ;; - esac -fi - -################################################## -# SafeStack - - -if test "$safe_stack" = "yes"; then -cat > $TMPC << EOF -int main(void) -{ -#if ! __has_feature(safe_stack) -#error SafeStack Disabled -#endif - return 0; -} -EOF - flag="-fsanitize=safe-stack" - # Check that safe-stack is supported and enabled. - if compile_prog "-Werror $flag" "$flag"; then - # Flag needed both at compilation and at linking - QEMU_CFLAGS="$QEMU_CFLAGS $flag" - QEMU_LDFLAGS="$QEMU_LDFLAGS $flag" - else - error_exit "SafeStack not supported by your compiler" - fi - if test "$coroutine" != "ucontext"; then - error_exit "SafeStack is only supported by the coroutine backend ucontext" - fi -else -cat > $TMPC << EOF -int main(void) -{ -#if defined(__has_feature) -#if __has_feature(safe_stack) -#error SafeStack Enabled -#endif -#endif - return 0; -} -EOF -if test "$safe_stack" = "no"; then - # Make sure that safe-stack is disabled - if ! compile_prog "-Werror" ""; then - # SafeStack was already enabled, try to explicitly remove the feature - flag="-fno-sanitize=safe-stack" - if ! compile_prog "-Werror $flag" "$flag"; then - error_exit "Configure cannot disable SafeStack" - fi - QEMU_CFLAGS="$QEMU_CFLAGS $flag" - QEMU_LDFLAGS="$QEMU_LDFLAGS $flag" - fi -else # "$safe_stack" = "" - # Set safe_stack to yes or no based on pre-existing flags - if compile_prog "-Werror" ""; then - safe_stack="no" - else - safe_stack="yes" - if test "$coroutine" != "ucontext"; then - error_exit "SafeStack is only supported by the coroutine backend ucontext" - fi - fi -fi -fi - ######################################## # check if ccache is interfering with # semantic analysis of macros @@ -1686,95 +1218,11 @@ if ! compile_object "-Werror"; then ccache_cpp2=yes fi -################################################# -# clang does not support glibc + FORTIFY_SOURCE. - -if test "$fortify_source" != "no"; then - if echo | $cc -dM -E - | grep __clang__ > /dev/null 2>&1 ; then - fortify_source="no"; - elif test -n "$cxx" && has $cxx && - echo | $cxx -dM -E - | grep __clang__ >/dev/null 2>&1 ; then - fortify_source="no"; - else - fortify_source="yes" - fi -fi - -########################################## -# checks for sanitizers - -have_asan=no -have_ubsan=no -have_asan_iface_h=no -have_asan_iface_fiber=no - -if test "$sanitizers" = "yes" ; then - write_c_skeleton - if compile_prog "$CPU_CFLAGS -Werror -fsanitize=address" ""; then - have_asan=yes - fi - - # we could use a simple skeleton for flags checks, but this also - # detect the static linking issue of ubsan, see also: - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84285 - cat > $TMPC << EOF -#include -int main(void) { - void *tmp = malloc(10); - if (tmp != NULL) { - return *(int *)(tmp + 2); - } - return 1; -} -EOF - if compile_prog "$CPU_CFLAGS -Werror -fsanitize=undefined" ""; then - have_ubsan=yes - fi - - if check_include "sanitizer/asan_interface.h" ; then - have_asan_iface_h=yes - fi - - cat > $TMPC << EOF -#include -int main(void) { - __sanitizer_start_switch_fiber(0, 0, 0); - return 0; -} -EOF - if compile_prog "$CPU_CFLAGS -Werror -fsanitize=address" "" ; then - have_asan_iface_fiber=yes - fi -fi - -# Thread sanitizer is, for now, much noisier than the other sanitizers; -# keep it separate until that is not the case. -if test "$tsan" = "yes" && test "$sanitizers" = "yes"; then - error_exit "TSAN is not supported with other sanitiziers." -fi -have_tsan=no -have_tsan_iface_fiber=no -if test "$tsan" = "yes" ; then - write_c_skeleton - if compile_prog "$CPU_CFLAGS -Werror -fsanitize=thread" "" ; then - have_tsan=yes - fi - cat > $TMPC << EOF -#include -int main(void) { - __tsan_create_fiber(0); - return 0; -} -EOF - if compile_prog "$CPU_CFLAGS -Werror -fsanitize=thread" "" ; then - have_tsan_iface_fiber=yes - fi -fi - ########################################## # functions to probe cross compilers container="no" +runc="" if test $use_containers = "yes" && (has "docker" || has "podman"); then case $($python "$source_path"/tests/docker/docker.py probe) in *docker) container=docker ;; @@ -1783,6 +1231,7 @@ if test $use_containers = "yes" && (has "docker" || has "podman"); then esac if test "$container" != "no"; then docker_py="$python $source_path/tests/docker/docker.py --engine $container" + runc=$($python "$source_path"/tests/docker/docker.py probe) fi fi @@ -1818,7 +1267,7 @@ fi : ${cross_cc_armeb="$cross_cc_arm"} : ${cross_cc_cflags_armeb="-mbig-endian"} : ${cross_cc_hexagon="hexagon-unknown-linux-musl-clang"} -: ${cross_cc_cflags_hexagon="-mv67 -O2 -static"} +: ${cross_cc_cflags_hexagon="-mv73 -O2 -static"} : ${cross_cc_cflags_i386="-m32"} : ${cross_cc_cflags_ppc="-m32 -mbig-endian"} : ${cross_cc_cflags_ppc64="-m64 -mbig-endian"} @@ -1873,13 +1322,6 @@ probe_target_compiler() { container_cross_ranlib= container_cross_strip= - # We shall skip configuring the target compiler if the user didn't - # bother enabling an appropriate guest. This avoids building - # extraneous firmware images and tests. - if test "${target_list#*$1}" = "$1"; then - return 1 - fi - target_arch=${1%%-*} case $target_arch in aarch64) container_hosts="x86_64 aarch64" ;; @@ -2194,42 +1636,6 @@ case "$vfio_user_server" in ;; esac -########################################## -# End of CC checks -# After here, no more $cc or $ld runs - -write_c_skeleton - -if test "$fortify_source" = "yes" ; then - QEMU_CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $QEMU_CFLAGS" -fi - -if test "$have_asan" = "yes"; then - QEMU_CFLAGS="-fsanitize=address $QEMU_CFLAGS" - QEMU_LDFLAGS="-fsanitize=address $QEMU_LDFLAGS" - if test "$have_asan_iface_h" = "no" ; then - echo "ASAN build enabled, but ASAN header missing." \ - "Without code annotation, the report may be inferior." - elif test "$have_asan_iface_fiber" = "no" ; then - echo "ASAN build enabled, but ASAN header is too old." \ - "Without code annotation, the report may be inferior." - fi -fi -if test "$have_tsan" = "yes" ; then - if test "$have_tsan_iface_fiber" = "yes" ; then - QEMU_CFLAGS="-fsanitize=thread $QEMU_CFLAGS" - QEMU_LDFLAGS="-fsanitize=thread $QEMU_LDFLAGS" - else - error_exit "Cannot enable TSAN due to missing fiber annotation interface." - fi -elif test "$tsan" = "yes" ; then - error_exit "Cannot enable TSAN due to missing sanitize thread interface." -fi -if test "$have_ubsan" = "yes"; then - QEMU_CFLAGS="-fsanitize=undefined $QEMU_CFLAGS" - QEMU_LDFLAGS="-fsanitize=undefined $QEMU_LDFLAGS" -fi - ####################################### # cross-compiled firmware targets @@ -2242,6 +1648,7 @@ fi # tests might fail. Prefer to keep the relevant files in their own # directory and symlink the directory instead. LINKS="Makefile" +LINKS="$LINKS docs/config" LINKS="$LINKS pc-bios/optionrom/Makefile" LINKS="$LINKS pc-bios/s390-ccw/Makefile" LINKS="$LINKS pc-bios/vof/Makefile" @@ -2252,7 +1659,6 @@ LINKS="$LINKS python" LINKS="$LINKS contrib/plugins/Makefile " for f in $LINKS ; do if [ -e "$source_path/$f" ]; then - mkdir -p "$(dirname ./"$f")" symlink "$source_path/$f" "$f" fi done @@ -2343,30 +1749,14 @@ fi if test "$solaris" = "yes" ; then echo "CONFIG_SOLARIS=y" >> $config_host_mak fi -if test "$static" = "yes" ; then - echo "CONFIG_STATIC=y" >> $config_host_mak -fi echo "SRC_PATH=$source_path" >> $config_host_mak echo "TARGET_DIRS=$target_list" >> $config_host_mak -if test "$modules" = "yes"; then - echo "CONFIG_MODULES=y" >> $config_host_mak -fi # XXX: suppress that if [ "$bsd" = "yes" ] ; then echo "CONFIG_BSD=y" >> $config_host_mak fi -echo "CONFIG_COROUTINE_BACKEND=$coroutine" >> $config_host_mak - -if test "$have_asan_iface_fiber" = "yes" ; then - echo "CONFIG_ASAN_IFACE_FIBER=y" >> $config_host_mak -fi - -if test "$have_tsan" = "yes" && test "$have_tsan_iface_fiber" = "yes" ; then - echo "CONFIG_TSAN=y" >> $config_host_mak -fi - if test "$plugins" = "yes" ; then echo "CONFIG_PLUGIN=y" >> $config_host_mak fi @@ -2375,6 +1765,7 @@ if test -n "$gdb_bin"; then gdb_version=$($gdb_bin --version | head -n 1) if version_ge ${gdb_version##* } 9.1; then echo "HAVE_GDB_BIN=$gdb_bin" >> $config_host_mak + gdb_arches=$($python "$source_path/scripts/probe-gdb-support.py" $gdb_bin) else gdb_bin="" fi @@ -2382,6 +1773,7 @@ fi if test "$container" != no; then echo "ENGINE=$container" >> $config_host_mak + echo "RUNC=$runc" >> $config_host_mak fi if test "$as_shared_lib" = "yes" ; then @@ -2392,19 +1784,12 @@ if test "$as_static_lib" = "yes" ; then fi echo "ROMS=$roms" >> $config_host_mak -echo "MAKE=$make" >> $config_host_mak echo "PYTHON=$python" >> $config_host_mak echo "GENISOIMAGE=$genisoimage" >> $config_host_mak echo "MESON=$meson" >> $config_host_mak echo "NINJA=$ninja" >> $config_host_mak +echo "PKG_CONFIG=${pkg_config}" >> $config_host_mak echo "CC=$cc" >> $config_host_mak -echo "QEMU_CFLAGS=$QEMU_CFLAGS" >> $config_host_mak -echo "QEMU_OBJCFLAGS=$QEMU_OBJCFLAGS" >> $config_host_mak -echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak -echo "GLIB_LIBS=$glib_libs" >> $config_host_mak -echo "GLIB_BINDIR=$glib_bindir" >> $config_host_mak -echo "GLIB_VERSION=$($pkg_config --modversion glib-2.0)" >> $config_host_mak -echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak echo "EXESUF=$EXESUF" >> $config_host_mak # use included Linux headers @@ -2458,10 +1843,6 @@ if test "$ccache_cpp2" = "yes"; then echo "export CCACHE_CPP2=y" >> $config_host_mak fi -if test "$safe_stack" = "yes"; then - echo "CONFIG_SAFESTACK=y" >> $config_host_mak -fi - # tests/tcg configuration (config_host_mak=tests/tcg/config-host.mak mkdir -p tests/tcg @@ -2506,6 +1887,12 @@ for target in $target_list; do write_target_makefile "build-tcg-tests-$target" >> "$config_target_mak" echo "BUILD_STATIC=$build_static" >> "$config_target_mak" echo "QEMU=$PWD/$qemu" >> "$config_target_mak" + + # will GDB work with these binaries? + if test "${gdb_arches#*$arch}" != "$gdb_arches"; then + echo "HOST_GDB_SUPPORTS_ARCH=y" >> "$config_target_mak" + fi + echo "run-tcg-tests-$target: $qemu\$(EXESUF)" >> Makefile.prereqs tcg_tests_targets="$tcg_tests_targets $target" fi @@ -2544,7 +1931,7 @@ if test "$skip_meson" = no; then test -n "$objcc" && echo "objc = [$(meson_quote $objcc $CPU_CFLAGS)]" >> $cross echo "ar = [$(meson_quote $ar)]" >> $cross echo "nm = [$(meson_quote $nm)]" >> $cross - echo "pkgconfig = [$(meson_quote $pkg_config_exe)]" >> $cross + echo "pkgconfig = [$(meson_quote $pkg_config)]" >> $cross echo "ranlib = [$(meson_quote $ranlib)]" >> $cross if has $sdl2_config; then echo "sdl2-config = [$(meson_quote $sdl2_config)]" >> $cross @@ -2578,14 +1965,20 @@ if test "$skip_meson" = no; then rm -rf meson-private meson-info meson-logs + # Prevent meson from automatically downloading wrapped subprojects when missing. + # You can use 'meson subprojects download' before running configure. + meson_option_add "--wrap-mode=nodownload" + # Built-in options test "$bindir" != "bin" && meson_option_add "-Dbindir=$bindir" test "$default_feature" = no && meson_option_add -Dauto_features=disabled + test "$static" = yes && meson_option_add -Dprefer_static=true test "$pie" = no && meson_option_add -Db_pie=false test "$werror" = yes && meson_option_add -Dwerror=true # QEMU options test "$cfi" != false && meson_option_add "-Dcfi=$cfi" + test "$docs" != auto && meson_option_add "-Ddocs=$docs" test "$fdt" != auto && meson_option_add "-Dfdt=$fdt" test -n "${LIB_FUZZING_ENGINE+xxx}" && meson_option_add "-Dfuzzing_engine=$LIB_FUZZING_ENGINE" test "$qemu_suffix" != qemu && meson_option_add "-Dqemu_suffix=$qemu_suffix" @@ -2599,6 +1992,14 @@ if test "$skip_meson" = no; then if test "$?" -ne 0 ; then error_exit "meson setup failed" fi +else + if test -f meson-private/cmd_line.txt; then + # Adjust old command line options that were removed + # sed -i is not portable + perl -i -ne ' + /^sphinx_build/ && next; + print;' meson-private/cmd_line.txt + fi fi # Save the configure command line for later reuse. @@ -2635,7 +2036,6 @@ preserve_env CXXFLAGS preserve_env LD preserve_env LDFLAGS preserve_env LD_LIBRARY_PATH -preserve_env MAKE preserve_env NM preserve_env OBJCFLAGS preserve_env OBJCOPY diff --git a/contrib/elf2dmp/addrspace.c b/contrib/elf2dmp/addrspace.c index 53ded17061..0b04cba00e 100644 --- a/contrib/elf2dmp/addrspace.c +++ b/contrib/elf2dmp/addrspace.c @@ -11,6 +11,7 @@ static struct pa_block *pa_space_find_block(struct pa_space *ps, uint64_t pa) { size_t i; + for (i = 0; i < ps->block_nr; i++) { if (ps->block[i].paddr <= pa && pa <= ps->block[i].paddr + ps->block[i].size) { diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c index d77b8f98f7..89f0c69ab0 100644 --- a/contrib/elf2dmp/main.c +++ b/contrib/elf2dmp/main.c @@ -17,6 +17,7 @@ #define SYM_URL_BASE "https://msdl.microsoft.com/download/symbols/" #define PDB_NAME "ntkrnlmp.pdb" +#define PE_NAME "ntoskrnl.exe" #define INITIAL_MXCSR 0x1f80 @@ -282,14 +283,16 @@ static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps, }; for (i = 0; i < ps->block_nr; i++) { - h.PhysicalMemoryBlock.NumberOfPages += ps->block[i].size / ELF2DMP_PAGE_SIZE; + h.PhysicalMemoryBlock.NumberOfPages += + ps->block[i].size / ELF2DMP_PAGE_SIZE; h.PhysicalMemoryBlock.Run[i] = (WinDumpPhyMemRun64) { .BasePage = ps->block[i].paddr / ELF2DMP_PAGE_SIZE, .PageCount = ps->block[i].size / ELF2DMP_PAGE_SIZE, }; } - h.RequiredDumpSpace += h.PhysicalMemoryBlock.NumberOfPages << ELF2DMP_PAGE_BITS; + h.RequiredDumpSpace += + h.PhysicalMemoryBlock.NumberOfPages << ELF2DMP_PAGE_BITS; *hdr = h; @@ -299,7 +302,8 @@ static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps, static int fill_context(KDDEBUGGER_DATA64 *kdbg, struct va_space *vs, QEMU_Elf *qe) { - int i; + int i; + for (i = 0; i < qe->state_nr; i++) { uint64_t Prcb; uint64_t Context; @@ -330,6 +334,45 @@ static int fill_context(KDDEBUGGER_DATA64 *kdbg, return 0; } +static int pe_get_data_dir_entry(uint64_t base, void *start_addr, int idx, + void *entry, size_t size, struct va_space *vs) +{ + const char e_magic[2] = "MZ"; + const char Signature[4] = "PE\0\0"; + IMAGE_DOS_HEADER *dos_hdr = start_addr; + IMAGE_NT_HEADERS64 nt_hdrs; + IMAGE_FILE_HEADER *file_hdr = &nt_hdrs.FileHeader; + IMAGE_OPTIONAL_HEADER64 *opt_hdr = &nt_hdrs.OptionalHeader; + IMAGE_DATA_DIRECTORY *data_dir = nt_hdrs.OptionalHeader.DataDirectory; + + QEMU_BUILD_BUG_ON(sizeof(*dos_hdr) >= ELF2DMP_PAGE_SIZE); + + if (memcmp(&dos_hdr->e_magic, e_magic, sizeof(e_magic))) { + return 1; + } + + if (va_space_rw(vs, base + dos_hdr->e_lfanew, + &nt_hdrs, sizeof(nt_hdrs), 0)) { + return 1; + } + + if (memcmp(&nt_hdrs.Signature, Signature, sizeof(Signature)) || + file_hdr->Machine != 0x8664 || opt_hdr->Magic != 0x020b) { + return 1; + } + + if (va_space_rw(vs, + base + data_dir[idx].VirtualAddress, + entry, size, 0)) { + return 1; + } + + printf("Data directory entry #%d: RVA = 0x%08"PRIx32"\n", idx, + (uint32_t)data_dir[idx].VirtualAddress); + + return 0; +} + static int write_dump(struct pa_space *ps, WinDumpHeader64 *hdr, const char *name) { @@ -363,45 +406,38 @@ static int write_dump(struct pa_space *ps, return fclose(dmp_file); } +static bool pe_check_export_name(uint64_t base, void *start_addr, + struct va_space *vs) +{ + IMAGE_EXPORT_DIRECTORY export_dir; + const char *pe_name; + + if (pe_get_data_dir_entry(base, start_addr, IMAGE_FILE_EXPORT_DIRECTORY, + &export_dir, sizeof(export_dir), vs)) { + return false; + } + + pe_name = va_space_resolve(vs, base + export_dir.Name); + if (!pe_name) { + return false; + } + + return !strcmp(pe_name, PE_NAME); +} + static int pe_get_pdb_symstore_hash(uint64_t base, void *start_addr, char *hash, struct va_space *vs) { - const char e_magic[2] = "MZ"; - const char Signature[4] = "PE\0\0"; const char sign_rsds[4] = "RSDS"; - IMAGE_DOS_HEADER *dos_hdr = start_addr; - IMAGE_NT_HEADERS64 nt_hdrs; - IMAGE_FILE_HEADER *file_hdr = &nt_hdrs.FileHeader; - IMAGE_OPTIONAL_HEADER64 *opt_hdr = &nt_hdrs.OptionalHeader; - IMAGE_DATA_DIRECTORY *data_dir = nt_hdrs.OptionalHeader.DataDirectory; IMAGE_DEBUG_DIRECTORY debug_dir; OMFSignatureRSDS rsds; char *pdb_name; size_t pdb_name_sz; size_t i; - QEMU_BUILD_BUG_ON(sizeof(*dos_hdr) >= ELF2DMP_PAGE_SIZE); - - if (memcmp(&dos_hdr->e_magic, e_magic, sizeof(e_magic))) { - return 1; - } - - if (va_space_rw(vs, base + dos_hdr->e_lfanew, - &nt_hdrs, sizeof(nt_hdrs), 0)) { - return 1; - } - - if (memcmp(&nt_hdrs.Signature, Signature, sizeof(Signature)) || - file_hdr->Machine != 0x8664 || opt_hdr->Magic != 0x020b) { - return 1; - } - - printf("Debug Directory RVA = 0x%08"PRIx32"\n", - (uint32_t)data_dir[IMAGE_FILE_DEBUG_DIRECTORY].VirtualAddress); - - if (va_space_rw(vs, - base + data_dir[IMAGE_FILE_DEBUG_DIRECTORY].VirtualAddress, - &debug_dir, sizeof(debug_dir), 0)) { + if (pe_get_data_dir_entry(base, start_addr, IMAGE_FILE_DEBUG_DIRECTORY, + &debug_dir, sizeof(debug_dir), vs)) { + eprintf("Failed to get Debug Directory\n"); return 1; } @@ -473,6 +509,7 @@ int main(int argc, char *argv[]) uint64_t KdDebuggerDataBlock; KDDEBUGGER_DATA64 *kdbg; uint64_t KdVersionBlock; + bool kernel_found = false; if (argc != 3) { eprintf("usage:\n\t%s elf_file dmp_file\n", argv[0]); @@ -520,11 +557,14 @@ int main(int argc, char *argv[]) } if (*(uint16_t *)nt_start_addr == 0x5a4d) { /* MZ */ - break; + if (pe_check_export_name(KernBase, nt_start_addr, &vs)) { + kernel_found = true; + break; + } } } - if (!nt_start_addr) { + if (!kernel_found) { eprintf("Failed to find NT kernel image\n"); err = 1; goto out_ps; diff --git a/contrib/elf2dmp/pe.h b/contrib/elf2dmp/pe.h index c2a4a6ba7c..71126af1ac 100644 --- a/contrib/elf2dmp/pe.h +++ b/contrib/elf2dmp/pe.h @@ -33,75 +33,90 @@ typedef struct IMAGE_DOS_HEADER { } __attribute__ ((packed)) IMAGE_DOS_HEADER; typedef struct IMAGE_FILE_HEADER { - uint16_t Machine; - uint16_t NumberOfSections; - uint32_t TimeDateStamp; - uint32_t PointerToSymbolTable; - uint32_t NumberOfSymbols; - uint16_t SizeOfOptionalHeader; - uint16_t Characteristics; + uint16_t Machine; + uint16_t NumberOfSections; + uint32_t TimeDateStamp; + uint32_t PointerToSymbolTable; + uint32_t NumberOfSymbols; + uint16_t SizeOfOptionalHeader; + uint16_t Characteristics; } __attribute__ ((packed)) IMAGE_FILE_HEADER; typedef struct IMAGE_DATA_DIRECTORY { - uint32_t VirtualAddress; - uint32_t Size; + uint32_t VirtualAddress; + uint32_t Size; } __attribute__ ((packed)) IMAGE_DATA_DIRECTORY; #define IMAGE_NUMBEROF_DIRECTORY_ENTRIES 16 typedef struct IMAGE_OPTIONAL_HEADER64 { - uint16_t Magic; /* 0x20b */ - uint8_t MajorLinkerVersion; - uint8_t MinorLinkerVersion; - uint32_t SizeOfCode; - uint32_t SizeOfInitializedData; - uint32_t SizeOfUninitializedData; - uint32_t AddressOfEntryPoint; - uint32_t BaseOfCode; - uint64_t ImageBase; - uint32_t SectionAlignment; - uint32_t FileAlignment; - uint16_t MajorOperatingSystemVersion; - uint16_t MinorOperatingSystemVersion; - uint16_t MajorImageVersion; - uint16_t MinorImageVersion; - uint16_t MajorSubsystemVersion; - uint16_t MinorSubsystemVersion; - uint32_t Win32VersionValue; - uint32_t SizeOfImage; - uint32_t SizeOfHeaders; - uint32_t CheckSum; - uint16_t Subsystem; - uint16_t DllCharacteristics; - uint64_t SizeOfStackReserve; - uint64_t SizeOfStackCommit; - uint64_t SizeOfHeapReserve; - uint64_t SizeOfHeapCommit; - uint32_t LoaderFlags; - uint32_t NumberOfRvaAndSizes; - IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES]; + uint16_t Magic; /* 0x20b */ + uint8_t MajorLinkerVersion; + uint8_t MinorLinkerVersion; + uint32_t SizeOfCode; + uint32_t SizeOfInitializedData; + uint32_t SizeOfUninitializedData; + uint32_t AddressOfEntryPoint; + uint32_t BaseOfCode; + uint64_t ImageBase; + uint32_t SectionAlignment; + uint32_t FileAlignment; + uint16_t MajorOperatingSystemVersion; + uint16_t MinorOperatingSystemVersion; + uint16_t MajorImageVersion; + uint16_t MinorImageVersion; + uint16_t MajorSubsystemVersion; + uint16_t MinorSubsystemVersion; + uint32_t Win32VersionValue; + uint32_t SizeOfImage; + uint32_t SizeOfHeaders; + uint32_t CheckSum; + uint16_t Subsystem; + uint16_t DllCharacteristics; + uint64_t SizeOfStackReserve; + uint64_t SizeOfStackCommit; + uint64_t SizeOfHeapReserve; + uint64_t SizeOfHeapCommit; + uint32_t LoaderFlags; + uint32_t NumberOfRvaAndSizes; + IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES]; } __attribute__ ((packed)) IMAGE_OPTIONAL_HEADER64; typedef struct IMAGE_NT_HEADERS64 { - uint32_t Signature; - IMAGE_FILE_HEADER FileHeader; - IMAGE_OPTIONAL_HEADER64 OptionalHeader; + uint32_t Signature; + IMAGE_FILE_HEADER FileHeader; + IMAGE_OPTIONAL_HEADER64 OptionalHeader; } __attribute__ ((packed)) IMAGE_NT_HEADERS64; +typedef struct IMAGE_EXPORT_DIRECTORY { + uint32_t Characteristics; + uint32_t TimeDateStamp; + uint16_t MajorVersion; + uint16_t MinorVersion; + uint32_t Name; + uint32_t Base; + uint32_t NumberOfFunctions; + uint32_t NumberOfNames; + uint32_t AddressOfFunctions; + uint32_t AddressOfNames; + uint32_t AddressOfNameOrdinals; +} __attribute__ ((packed)) IMAGE_EXPORT_DIRECTORY; + typedef struct IMAGE_DEBUG_DIRECTORY { - uint32_t Characteristics; - uint32_t TimeDateStamp; - uint16_t MajorVersion; - uint16_t MinorVersion; - uint32_t Type; - uint32_t SizeOfData; - uint32_t AddressOfRawData; - uint32_t PointerToRawData; + uint32_t Characteristics; + uint32_t TimeDateStamp; + uint16_t MajorVersion; + uint16_t MinorVersion; + uint32_t Type; + uint32_t SizeOfData; + uint32_t AddressOfRawData; + uint32_t PointerToRawData; } __attribute__ ((packed)) IMAGE_DEBUG_DIRECTORY; #define IMAGE_DEBUG_TYPE_CODEVIEW 2 #endif +#define IMAGE_FILE_EXPORT_DIRECTORY 0 #define IMAGE_FILE_DEBUG_DIRECTORY 6 typedef struct guid_t { diff --git a/contrib/gitdm/domain-map b/contrib/gitdm/domain-map index 3727918641..3e31a06245 100644 --- a/contrib/gitdm/domain-map +++ b/contrib/gitdm/domain-map @@ -4,7 +4,12 @@ # This maps email domains to nice easy to read company names # +linux.alibaba.com Alibaba +amazon.com Amazon +amazon.co.uk Amazon +amazon.de Amazon amd.com AMD +aspeedtech.com ASPEED Technology Inc. baidu.com Baidu bytedance.com ByteDance cmss.chinamobile.com China Mobile @@ -12,6 +17,7 @@ citrix.com Citrix crudebyte.com Crudebyte chinatelecom.cn China Telecom eldorado.org.br Instituto de Pesquisas Eldorado +fb.com Facebook fujitsu.com Fujitsu google.com Google greensocs.com GreenSocs @@ -31,15 +37,18 @@ oracle.com Oracle proxmox.com Proxmox quicinc.com Qualcomm Innovation Center redhat.com Red Hat +rev.ng rev.ng Labs rt-rk.com RT-RK samsung.com Samsung siemens.com Siemens sifive.com SiFive suse.com SUSE suse.de SUSE +syrmia.com SYRMIA +ventanamicro.com Ventana Micro Systems virtuozzo.com Virtuozzo +vrull.eu VRULL wdc.com Western Digital windriver.com Wind River -xilinx.com Xilinx yadro.com YADRO yandex-team.ru Yandex diff --git a/contrib/gitdm/group-map-alibaba b/contrib/gitdm/group-map-alibaba new file mode 100644 index 0000000000..4c34446d34 --- /dev/null +++ b/contrib/gitdm/group-map-alibaba @@ -0,0 +1,7 @@ +# +# Alibaba contributors including its subsidiaries +# + +# c-sky.com, now part of T-Head, wholly-owned entity of Alibaba Group +ren_guo@c-sky.com +zhiwei_liu@c-sky.com diff --git a/contrib/gitdm/group-map-amd b/contrib/gitdm/group-map-amd new file mode 100644 index 0000000000..bda4239a8a --- /dev/null +++ b/contrib/gitdm/group-map-amd @@ -0,0 +1,8 @@ +# AMD acquired Xilinx and contributors have been slowly updating emails + +edgar.iglesias@xilinx.com +fnu.vikram@xilinx.com +francisco.iglesias@xilinx.com +sai.pavan.boddu@xilinx.com +stefano.stabellini@xilinx.com +tong.ho@xilinx.com diff --git a/contrib/gitdm/group-map-facebook b/contrib/gitdm/group-map-facebook new file mode 100644 index 0000000000..38589f8fb9 --- /dev/null +++ b/contrib/gitdm/group-map-facebook @@ -0,0 +1,5 @@ +# +# Some Facebook contributors also occasionally use personal email addresses. +# + +peter@pjd.dev diff --git a/contrib/gitdm/group-map-ibm b/contrib/gitdm/group-map-ibm index da62fa3f44..24d8dc1b86 100644 --- a/contrib/gitdm/group-map-ibm +++ b/contrib/gitdm/group-map-ibm @@ -12,3 +12,4 @@ jcfaracco@gmail.com joel@jms.id.au sjitindarsingh@gmail.com tommusta@gmail.com +idan.horowitz@gmail.com diff --git a/contrib/gitdm/group-map-individuals b/contrib/gitdm/group-map-individuals index 53883cc526..d7116f5444 100644 --- a/contrib/gitdm/group-map-individuals +++ b/contrib/gitdm/group-map-individuals @@ -37,3 +37,8 @@ akihiko.odaki@gmail.com paul@nowt.org git@xen0n.name simon@simonsafar.com +research_trasio@irq.a4lg.com +shentey@gmail.com +bmeng@tinylab.org +strahinja.p.jankovic@gmail.com +Jason@zx2c4.com diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile index 23e0396687..b2b9db9f51 100644 --- a/contrib/plugins/Makefile +++ b/contrib/plugins/Makefile @@ -3,7 +3,7 @@ # This Makefile example is fairly independent from the main makefile # so users can take and adapt it for their build. We only really # include config-host.mak so we don't have to repeat probing for -# cflags that the main configure has already done for us. +# programs that the main configure has already done for us. # BUILD_DIR := $(CURDIR)/../.. @@ -26,9 +26,8 @@ SONAMES := $(addsuffix .so,$(addprefix lib,$(NAMES))) # The main QEMU uses Glib extensively so it's perfectly fine to use it # in plugins (which many example do). -CFLAGS = $(GLIB_CFLAGS) -CFLAGS += -fPIC -Wall $(filter -W%, $(QEMU_CFLAGS)) -CFLAGS += $(if $(findstring no-psabi,$(QEMU_CFLAGS)),-Wpsabi) +CFLAGS := $(shell $(PKG_CONFIG) --cflags glib-2.0) +CFLAGS += -fPIC -Wall CFLAGS += $(if $(CONFIG_DEBUG_TCG), -ggdb -O0) CFLAGS += -I$(SRC_PATH)/include/qemu diff --git a/cpu.c b/cpu.c index c34e6f7c24..348c23b3f6 100644 --- a/cpu.c +++ b/cpu.c @@ -31,16 +31,18 @@ #include "hw/core/sysemu-cpu-ops.h" #include "exec/address-spaces.h" #endif +#include "sysemu/cpus.h" #include "sysemu/tcg.h" -#include "sysemu/kvm.h" -#include "sysemu/replay.h" +#include "exec/replay-core.h" #include "exec/cpu-common.h" #include "exec/exec-all.h" +#include "exec/tb-flush.h" #include "exec/translate-all.h" #include "exec/log.h" #include "hw/core/accel-cpu.h" #include "trace/trace-root.h" #include "qemu/accel.h" +#include "qemu/plugin.h" //// --- Begin LibAFL code --- @@ -580,7 +582,7 @@ const char *parse_cpu_option(const char *cpu_option) return cpu_type; } -void list_cpus(const char *optarg) +void list_cpus(void) { /* XXX: implement xxx_cpu_list for targets that still miss it */ #if defined(cpu_list) @@ -636,86 +638,20 @@ void libafl_breakpoint_invalidate(CPUState *cpu, target_ulong pc) //// --- End LibAFL code --- #endif -/* Add a breakpoint. */ -int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, - CPUBreakpoint **breakpoint) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - CPUBreakpoint *bp; - - if (cc->gdb_adjust_breakpoint) { - pc = cc->gdb_adjust_breakpoint(cpu, pc); - } - - bp = g_malloc(sizeof(*bp)); - - bp->pc = pc; - bp->flags = flags; - - /* keep all GDB-injected breakpoints in front */ - if (flags & BP_GDB) { - QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); - } else { - QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); - } - - if (breakpoint) { - *breakpoint = bp; - } - - trace_breakpoint_insert(cpu->cpu_index, pc, flags); - return 0; -} - -/* Remove a specific breakpoint. */ -int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - CPUBreakpoint *bp; - - if (cc->gdb_adjust_breakpoint) { - pc = cc->gdb_adjust_breakpoint(cpu, pc); - } - - QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { - if (bp->pc == pc && bp->flags == flags) { - cpu_breakpoint_remove_by_ref(cpu, bp); - return 0; - } - } - return -ENOENT; -} - -/* Remove a specific breakpoint by reference. */ -void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp) -{ - QTAILQ_REMOVE(&cpu->breakpoints, bp, entry); - - trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags); - g_free(bp); -} - -/* Remove all matching breakpoints. */ -void cpu_breakpoint_remove_all(CPUState *cpu, int mask) -{ - CPUBreakpoint *bp, *next; - - QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { - if (bp->flags & mask) { - cpu_breakpoint_remove_by_ref(cpu, bp); - } - } -} - /* enable or disable single step mode. EXCP_DEBUG is returned by the CPU loop after each instruction */ void cpu_single_step(CPUState *cpu, int enabled) { if (cpu->singlestep_enabled != enabled) { cpu->singlestep_enabled = enabled; - if (kvm_enabled()) { - kvm_update_guest_debug(cpu, 0); + +#if !defined(CONFIG_USER_ONLY) + const AccelOpsClass *ops = cpus_get_accel(); + if (ops->update_guest_debug) { + ops->update_guest_debug(cpu); } +#endif + trace_breakpoint_singlestep(cpu->cpu_index, enabled); } } @@ -808,6 +744,11 @@ bool target_words_bigendian(void) #endif } +const char *target_name(void) +{ + return TARGET_NAME; +} + void page_size_init(void) { /* NOTE: we can always suppose that qemu_host_page_size >= diff --git a/cpus-common.c b/cpus-common.c index 39f355de98..45c745ecf6 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -23,8 +23,9 @@ #include "hw/core/cpu.h" #include "sysemu/cpus.h" #include "qemu/lockable.h" +#include "trace/trace-root.h" -static QemuMutex qemu_cpu_list_lock; +QemuMutex qemu_cpu_list_lock; static QemuCond exclusive_cond; static QemuCond exclusive_resume; static QemuCond qemu_work_cond; @@ -156,7 +157,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, wi.exclusive = false; queue_work_on_cpu(cpu, &wi); - while (!qatomic_mb_read(&wi.done)) { + while (!qatomic_load_acquire(&wi.done)) { CPUState *self_cpu = current_cpu; qemu_cond_wait(&qemu_work_cond, mutex); @@ -362,9 +363,80 @@ void process_queued_cpu_work(CPUState *cpu) if (wi->free) { g_free(wi); } else { - qatomic_mb_set(&wi->done, true); + qatomic_store_release(&wi->done, true); } } qemu_mutex_unlock(&cpu->work_mutex); qemu_cond_broadcast(&qemu_work_cond); } + +/* Add a breakpoint. */ +int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, + CPUBreakpoint **breakpoint) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUBreakpoint *bp; + + if (cc->gdb_adjust_breakpoint) { + pc = cc->gdb_adjust_breakpoint(cpu, pc); + } + + bp = g_malloc(sizeof(*bp)); + + bp->pc = pc; + bp->flags = flags; + + /* keep all GDB-injected breakpoints in front */ + if (flags & BP_GDB) { + QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); + } else { + QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); + } + + if (breakpoint) { + *breakpoint = bp; + } + + trace_breakpoint_insert(cpu->cpu_index, pc, flags); + return 0; +} + +/* Remove a specific breakpoint. */ +int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUBreakpoint *bp; + + if (cc->gdb_adjust_breakpoint) { + pc = cc->gdb_adjust_breakpoint(cpu, pc); + } + + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + if (bp->pc == pc && bp->flags == flags) { + cpu_breakpoint_remove_by_ref(cpu, bp); + return 0; + } + } + return -ENOENT; +} + +/* Remove a specific breakpoint by reference. */ +void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp) +{ + QTAILQ_REMOVE(&cpu->breakpoints, bp, entry); + + trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags); + g_free(bp); +} + +/* Remove all matching breakpoints. */ +void cpu_breakpoint_remove_all(CPUState *cpu, int mask) +{ + CPUBreakpoint *bp, *next; + + QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { + if (bp->flags & mask) { + cpu_breakpoint_remove_by_ref(cpu, bp); + } + } +} diff --git a/crypto/afalg.c b/crypto/afalg.c index 10046bb0ae..348301e703 100644 --- a/crypto/afalg.c +++ b/crypto/afalg.c @@ -59,7 +59,7 @@ qcrypto_afalg_socket_bind(const char *type, const char *name, if (bind(sbind, (const struct sockaddr *)&salg, sizeof(salg)) != 0) { error_setg_errno(errp, errno, "Failed to bind socket"); - closesocket(sbind); + close(sbind); return -1; } @@ -105,11 +105,11 @@ void qcrypto_afalg_comm_free(QCryptoAFAlg *afalg) } if (afalg->tfmfd != -1) { - closesocket(afalg->tfmfd); + close(afalg->tfmfd); } if (afalg->opfd != -1) { - closesocket(afalg->opfd); + close(afalg->opfd); } g_free(afalg); diff --git a/disas/disas-internal.h b/disas/disas-internal.h new file mode 100644 index 0000000000..84a01f126f --- /dev/null +++ b/disas/disas-internal.h @@ -0,0 +1,21 @@ +/* + * Definitions used internally in the disassembly code + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef DISAS_INTERNAL_H +#define DISAS_INTERNAL_H + +#include "disas/dis-asm.h" + +typedef struct CPUDebug { + struct disassemble_info info; + CPUState *cpu; +} CPUDebug; + +void disas_initialize_debug_target(CPUDebug *s, CPUState *cpu); +int disas_gstring_printf(FILE *stream, const char *fmt, ...) + G_GNUC_PRINTF(2, 3); + +#endif diff --git a/disas/disas-mon.c b/disas/disas-mon.c new file mode 100644 index 0000000000..48ac492c6c --- /dev/null +++ b/disas/disas-mon.c @@ -0,0 +1,65 @@ +/* + * Functions related to disassembly from the monitor + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "disas-internal.h" +#include "disas/disas.h" +#include "exec/memory.h" +#include "hw/core/cpu.h" +#include "monitor/monitor.h" + +static int +physical_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length, + struct disassemble_info *info) +{ + CPUDebug *s = container_of(info, CPUDebug, info); + MemTxResult res; + + res = address_space_read(s->cpu->as, memaddr, MEMTXATTRS_UNSPECIFIED, + myaddr, length); + return res == MEMTX_OK ? 0 : EIO; +} + +/* Disassembler for the monitor. */ +void monitor_disas(Monitor *mon, CPUState *cpu, uint64_t pc, + int nb_insn, bool is_physical) +{ + int count, i; + CPUDebug s; + g_autoptr(GString) ds = g_string_new(""); + + disas_initialize_debug_target(&s, cpu); + s.info.fprintf_func = disas_gstring_printf; + s.info.stream = (FILE *)ds; /* abuse this slot */ + + if (is_physical) { + s.info.read_memory_func = physical_read_memory; + } + s.info.buffer_vma = pc; + + if (s.info.cap_arch >= 0 && cap_disas_monitor(&s.info, pc, nb_insn)) { + monitor_puts(mon, ds->str); + return; + } + + if (!s.info.print_insn) { + monitor_printf(mon, "0x%08" PRIx64 + ": Asm output not supported on this arch\n", pc); + return; + } + + for (i = 0; i < nb_insn; i++) { + g_string_append_printf(ds, "0x%08" PRIx64 ": ", pc); + count = s.info.print_insn(pc, &s.info); + g_string_append_c(ds, '\n'); + if (count < 0) { + break; + } + pc += count; + } + + monitor_puts(mon, ds->str); +} diff --git a/disas.c b/disas/disas.c similarity index 78% rename from disas.c rename to disas/disas.c index b087c12c47..0d2d06c2ec 100644 --- a/disas.c +++ b/disas/disas.c @@ -1,16 +1,12 @@ /* General "disassemble this chunk" code. Used for debugging. */ #include "qemu/osdep.h" -#include "disas/dis-asm.h" +#include "disas/disas-internal.h" #include "elf.h" #include "qemu/qemu-print.h" - #include "disas/disas.h" #include "disas/capstone.h" - -typedef struct CPUDebug { - struct disassemble_info info; - CPUState *cpu; -} CPUDebug; +#include "hw/core/cpu.h" +#include "exec/memory.h" /* Filled in by elfload.c. Simplistic, but will do for now. */ struct syminfo *syminfos = NULL; @@ -119,18 +115,18 @@ static void initialize_debug(CPUDebug *s) s->info.symbol_at_address_func = symbol_at_address; } -static void initialize_debug_target(CPUDebug *s, CPUState *cpu) +void disas_initialize_debug_target(CPUDebug *s, CPUState *cpu) { initialize_debug(s); s->cpu = cpu; s->info.read_memory_func = target_read_memory; s->info.print_address_func = print_address; -#if TARGET_BIG_ENDIAN - s->info.endian = BFD_ENDIAN_BIG; -#else - s->info.endian = BFD_ENDIAN_LITTLE; -#endif + if (target_words_bigendian()) { + s->info.endian = BFD_ENDIAN_BIG; + } else { + s->info.endian = BFD_ENDIAN_LITTLE; + } CPUClass *cc = CPU_GET_CLASS(cpu); if (cc->disas_set_info) { @@ -168,7 +164,7 @@ static void initialize_debug_host(CPUDebug *s) # ifdef _ARCH_PPC64 s->info.cap_mode = CS_MODE_64; # endif -#elif defined(__riscv) && defined(CONFIG_RISCV_DIS) +#elif defined(__riscv) #if defined(_ILP32) || (__riscv_xlen == 32) s->info.print_insn = print_insn_riscv32; #elif defined(_LP64) @@ -204,14 +200,13 @@ static void initialize_debug_host(CPUDebug *s) } /* Disassemble this for me please... (debugging). */ -void target_disas(FILE *out, CPUState *cpu, target_ulong code, - target_ulong size) +void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size) { - target_ulong pc; + uint64_t pc; int count; CPUDebug s; - initialize_debug_target(&s, cpu); + disas_initialize_debug_target(&s, cpu); s.info.fprintf_func = fprintf; s.info.stream = out; s.info.buffer_vma = code; @@ -226,11 +221,12 @@ void target_disas(FILE *out, CPUState *cpu, target_ulong code, } for (pc = code; size > 0; pc += count, size -= count) { - fprintf(out, "0x" TARGET_FMT_lx ": ", pc); - count = s.info.print_insn(pc, &s.info); - fprintf(out, "\n"); - if (count < 0) - break; + fprintf(out, "0x%08" PRIx64 ": ", pc); + count = s.info.print_insn(pc, &s.info); + fprintf(out, "\n"); + if (count < 0) { + break; + } if (size < count) { fprintf(out, "Disassembler disagrees with translator over instruction " @@ -241,8 +237,7 @@ void target_disas(FILE *out, CPUState *cpu, target_ulong code, } } -static int G_GNUC_PRINTF(2, 3) -gstring_printf(FILE *stream, const char *fmt, ...) +int disas_gstring_printf(FILE *stream, const char *fmt, ...) { /* We abuse the FILE parameter to pass a GString. */ GString *s = (GString *)stream; @@ -272,8 +267,8 @@ char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size) CPUDebug s; GString *ds = g_string_new(NULL); - initialize_debug_target(&s, cpu); - s.info.fprintf_func = gstring_printf; + disas_initialize_debug_target(&s, cpu); + s.info.fprintf_func = disas_gstring_printf; s.info.stream = (FILE *)ds; /* abuse this slot */ s.info.buffer_vma = addr; s.info.buffer_length = size; @@ -292,7 +287,7 @@ char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size) } /* Disassemble this for me please... (debugging). */ -void disas(FILE *out, const void *code, unsigned long size) +void disas(FILE *out, const void *code, size_t size) { uintptr_t pc; int count; @@ -324,7 +319,7 @@ void disas(FILE *out, const void *code, unsigned long size) } /* Look up symbol for debugging purpose. Returns "" if unknown. */ -const char *lookup_symbol(target_ulong orig_addr) +const char *lookup_symbol(uint64_t orig_addr) { const char *symbol = ""; struct syminfo *s; @@ -338,61 +333,3 @@ const char *lookup_symbol(target_ulong orig_addr) return symbol; } - -#if !defined(CONFIG_USER_ONLY) - -#include "monitor/monitor.h" - -static int -physical_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length, - struct disassemble_info *info) -{ - CPUDebug *s = container_of(info, CPUDebug, info); - MemTxResult res; - - res = address_space_read(s->cpu->as, memaddr, MEMTXATTRS_UNSPECIFIED, - myaddr, length); - return res == MEMTX_OK ? 0 : EIO; -} - -/* Disassembler for the monitor. */ -void monitor_disas(Monitor *mon, CPUState *cpu, - target_ulong pc, int nb_insn, int is_physical) -{ - int count, i; - CPUDebug s; - g_autoptr(GString) ds = g_string_new(""); - - initialize_debug_target(&s, cpu); - s.info.fprintf_func = gstring_printf; - s.info.stream = (FILE *)ds; /* abuse this slot */ - - if (is_physical) { - s.info.read_memory_func = physical_read_memory; - } - s.info.buffer_vma = pc; - - if (s.info.cap_arch >= 0 && cap_disas_monitor(&s.info, pc, nb_insn)) { - monitor_puts(mon, ds->str); - return; - } - - if (!s.info.print_insn) { - monitor_printf(mon, "0x" TARGET_FMT_lx - ": Asm output not supported on this arch\n", pc); - return; - } - - for (i = 0; i < nb_insn; i++) { - g_string_append_printf(ds, "0x" TARGET_FMT_lx ": ", pc); - count = s.info.print_insn(pc, &s.info); - g_string_append_c(ds, '\n'); - if (count < 0) { - break; - } - pc += count; - } - - monitor_puts(mon, ds->str); -} -#endif diff --git a/disas/meson.build b/disas/meson.build index c865bdd882..832727e4b3 100644 --- a/disas/meson.build +++ b/disas/meson.build @@ -10,4 +10,8 @@ common_ss.add(when: 'CONFIG_RISCV_DIS', if_true: files('riscv.c')) common_ss.add(when: 'CONFIG_SH4_DIS', if_true: files('sh4.c')) common_ss.add(when: 'CONFIG_SPARC_DIS', if_true: files('sparc.c')) common_ss.add(when: 'CONFIG_XTENSA_DIS', if_true: files('xtensa.c')) -common_ss.add(when: capstone, if_true: files('capstone.c')) +common_ss.add(when: capstone, if_true: [files('capstone.c'), capstone]) +common_ss.add(files('disas.c')) + +softmmu_ss.add(files('disas-mon.c')) +specific_ss.add(capstone) diff --git a/disas/riscv.c b/disas/riscv.c index ddda687c13..d597161d46 100644 --- a/disas/riscv.c +++ b/disas/riscv.c @@ -163,6 +163,13 @@ typedef enum { rv_codec_v_i, rv_codec_vsetvli, rv_codec_vsetivli, + rv_codec_zcb_ext, + rv_codec_zcb_mul, + rv_codec_zcb_lb, + rv_codec_zcb_lh, + rv_codec_zcmp_cm_pushpop, + rv_codec_zcmp_cm_mv, + rv_codec_zcmt_jt, } rv_codec; typedef enum { @@ -935,6 +942,28 @@ typedef enum { rv_op_vsetvli = 766, rv_op_vsetivli = 767, rv_op_vsetvl = 768, + rv_op_c_zext_b = 769, + rv_op_c_sext_b = 770, + rv_op_c_zext_h = 771, + rv_op_c_sext_h = 772, + rv_op_c_zext_w = 773, + rv_op_c_not = 774, + rv_op_c_mul = 775, + rv_op_c_lbu = 776, + rv_op_c_lhu = 777, + rv_op_c_lh = 778, + rv_op_c_sb = 779, + rv_op_c_sh = 780, + rv_op_cm_push = 781, + rv_op_cm_pop = 782, + rv_op_cm_popret = 783, + rv_op_cm_popretz = 784, + rv_op_cm_mva01s = 785, + rv_op_cm_mvsa01 = 786, + rv_op_cm_jt = 787, + rv_op_cm_jalt = 788, + rv_op_czero_eqz = 789, + rv_op_czero_nez = 790, } rv_op; /* structures */ @@ -958,6 +987,7 @@ typedef struct { uint8_t rnum; uint8_t vm; uint32_t vzimm; + uint8_t rlist; } rv_decode; typedef struct { @@ -1014,6 +1044,7 @@ static const char rv_vreg_name_sym[32][4] = { #define rv_fmt_rd_offset "O\t0,o" #define rv_fmt_rd_rs1_rs2 "O\t0,1,2" #define rv_fmt_frd_rs1 "O\t3,1" +#define rv_fmt_frd_frs1 "O\t3,4" #define rv_fmt_rd_frs1 "O\t0,4" #define rv_fmt_rd_frs1_frs2 "O\t0,4,5" #define rv_fmt_frd_frs1_frs2 "O\t3,4,5" @@ -1070,6 +1101,10 @@ static const char rv_vreg_name_sym[32][4] = { #define rv_fmt_vd_vm "O\tDm" #define rv_fmt_vsetvli "O\t0,1,v" #define rv_fmt_vsetivli "O\t0,u,v" +#define rv_fmt_rs1_rs2_zce_ldst "O\t2,i(1)" +#define rv_fmt_push_rlist "O\tx,-i" +#define rv_fmt_pop_rlist "O\tx,i" +#define rv_fmt_zcmt_index "O\ti" /* pseudo-instruction constraints */ @@ -1580,15 +1615,15 @@ const rv_opcode_data opcode_data[] = { { "snez", rv_codec_r, rv_fmt_rd_rs2, NULL, 0, 0, 0 }, { "sltz", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, { "sgtz", rv_codec_r, rv_fmt_rd_rs2, NULL, 0, 0, 0 }, - { "fmv.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "fabs.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "fneg.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "fmv.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "fabs.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "fneg.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "fmv.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "fabs.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "fneg.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fmv.s", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 }, + { "fabs.s", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 }, + { "fneg.s", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 }, + { "fmv.d", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 }, + { "fabs.d", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 }, + { "fneg.d", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 }, + { "fmv.q", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 }, + { "fabs.q", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 }, + { "fneg.q", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 }, { "beqz", rv_codec_sb, rv_fmt_rs1_offset, NULL, 0, 0, 0 }, { "bnez", rv_codec_sb, rv_fmt_rs1_offset, NULL, 0, 0, 0 }, { "blez", rv_codec_sb, rv_fmt_rs2_offset, NULL, 0, 0, 0 }, @@ -1645,9 +1680,9 @@ const rv_opcode_data opcode_data[] = { { "max", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "maxu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "ctzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, { "cpopw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "slli.uw", rv_codec_i_sh5, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "slli.uw", rv_codec_i_sh6, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, { "add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "rolw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "rorw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, @@ -2065,7 +2100,29 @@ const rv_opcode_data opcode_data[] = { { "vsext.vf8", rv_codec_v_r, rv_fmt_vd_vs2_vm, NULL, rv_op_vsext_vf8, rv_op_vsext_vf8, 0 }, { "vsetvli", rv_codec_vsetvli, rv_fmt_vsetvli, NULL, rv_op_vsetvli, rv_op_vsetvli, 0 }, { "vsetivli", rv_codec_vsetivli, rv_fmt_vsetivli, NULL, rv_op_vsetivli, rv_op_vsetivli, 0 }, - { "vsetvl", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, rv_op_vsetvl, rv_op_vsetvl, 0 } + { "vsetvl", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, rv_op_vsetvl, rv_op_vsetvl, 0 }, + { "c.zext.b", rv_codec_zcb_ext, rv_fmt_rd, NULL, 0 }, + { "c.sext.b", rv_codec_zcb_ext, rv_fmt_rd, NULL, 0 }, + { "c.zext.h", rv_codec_zcb_ext, rv_fmt_rd, NULL, 0 }, + { "c.sext.h", rv_codec_zcb_ext, rv_fmt_rd, NULL, 0 }, + { "c.zext.w", rv_codec_zcb_ext, rv_fmt_rd, NULL, 0 }, + { "c.not", rv_codec_zcb_ext, rv_fmt_rd, NULL, 0 }, + { "c.mul", rv_codec_zcb_mul, rv_fmt_rd_rs2, NULL, 0, 0 }, + { "c.lbu", rv_codec_zcb_lb, rv_fmt_rs1_rs2_zce_ldst, NULL, 0, 0, 0 }, + { "c.lhu", rv_codec_zcb_lh, rv_fmt_rs1_rs2_zce_ldst, NULL, 0, 0, 0 }, + { "c.lh", rv_codec_zcb_lh, rv_fmt_rs1_rs2_zce_ldst, NULL, 0, 0, 0 }, + { "c.sb", rv_codec_zcb_lb, rv_fmt_rs1_rs2_zce_ldst, NULL, 0, 0, 0 }, + { "c.sh", rv_codec_zcb_lh, rv_fmt_rs1_rs2_zce_ldst, NULL, 0, 0, 0 }, + { "cm.push", rv_codec_zcmp_cm_pushpop, rv_fmt_push_rlist, NULL, 0, 0 }, + { "cm.pop", rv_codec_zcmp_cm_pushpop, rv_fmt_pop_rlist, NULL, 0, 0 }, + { "cm.popret", rv_codec_zcmp_cm_pushpop, rv_fmt_pop_rlist, NULL, 0, 0, 0 }, + { "cm.popretz", rv_codec_zcmp_cm_pushpop, rv_fmt_pop_rlist, NULL, 0, 0 }, + { "cm.mva01s", rv_codec_zcmp_cm_mv, rv_fmt_rd_rs2, NULL, 0, 0, 0 }, + { "cm.mvsa01", rv_codec_zcmp_cm_mv, rv_fmt_rd_rs2, NULL, 0, 0, 0 }, + { "cm.jt", rv_codec_zcmt_jt, rv_fmt_zcmt_index, NULL, 0 }, + { "cm.jalt", rv_codec_zcmt_jt, rv_fmt_zcmt_index, NULL, 0 }, + { "czero.eqz", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "czero.nez", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, }; /* CSR names */ @@ -2084,6 +2141,7 @@ static const char *csr_name(int csrno) case 0x000a: return "vxrm"; case 0x000f: return "vcsr"; case 0x0015: return "seed"; + case 0x0017: return "jvt"; case 0x0040: return "uscratch"; case 0x0041: return "uepc"; case 0x0042: return "ucause"; @@ -2306,6 +2364,24 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) op = rv_op_c_ld; } break; + case 4: + switch ((inst >> 10) & 0b111) { + case 0: op = rv_op_c_lbu; break; + case 1: + if (((inst >> 6) & 1) == 0) { + op = rv_op_c_lhu; + } else { + op = rv_op_c_lh; + } + break; + case 2: op = rv_op_c_sb; break; + case 3: + if (((inst >> 6) & 1) == 0) { + op = rv_op_c_sh; + } + break; + } + break; case 5: if (isa == rv128) { op = rv_op_c_sq; @@ -2362,6 +2438,17 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 3: op = rv_op_c_and; break; case 4: op = rv_op_c_subw; break; case 5: op = rv_op_c_addw; break; + case 6: op = rv_op_c_mul; break; + case 7: + switch ((inst >> 2) & 0b111) { + case 0: op = rv_op_c_zext_b; break; + case 1: op = rv_op_c_sext_b; break; + case 2: op = rv_op_c_zext_h; break; + case 3: op = rv_op_c_sext_h; break; + case 4: op = rv_op_c_zext_w; break; + case 5: op = rv_op_c_not; break; + } + break; } break; } @@ -2417,6 +2504,46 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) op = rv_op_c_sqsp; } else { op = rv_op_c_fsdsp; + if (((inst >> 12) & 0b01)) { + switch ((inst >> 8) & 0b01111) { + case 8: + if (((inst >> 4) & 0b01111) >= 4) { + op = rv_op_cm_push; + } + break; + case 10: + if (((inst >> 4) & 0b01111) >= 4) { + op = rv_op_cm_pop; + } + break; + case 12: + if (((inst >> 4) & 0b01111) >= 4) { + op = rv_op_cm_popretz; + } + break; + case 14: + if (((inst >> 4) & 0b01111) >= 4) { + op = rv_op_cm_popret; + } + break; + } + } else { + switch ((inst >> 10) & 0b011) { + case 0: + if (((inst >> 2) & 0xFF) >= 32) { + op = rv_op_cm_jalt; + } else { + op = rv_op_cm_jt; + } + break; + case 3: + switch ((inst >> 5) & 0b011) { + case 1: op = rv_op_cm_mvsa01; break; + case 3: op = rv_op_cm_mva01s; break; + } + break; + } + } } break; case 6: op = rv_op_c_swsp; break; @@ -2617,10 +2744,10 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) switch (((inst >> 12) & 0b111)) { case 0: op = rv_op_addiw; break; case 1: - switch (((inst >> 25) & 0b1111111)) { + switch (((inst >> 26) & 0b111111)) { case 0: op = rv_op_slliw; break; - case 4: op = rv_op_slli_uw; break; - case 48: + case 2: op = rv_op_slli_uw; break; + case 24: switch ((inst >> 20) & 0b11111) { case 0b00000: op = rv_op_clzw; break; case 0b00001: op = rv_op_ctzw; break; @@ -2791,6 +2918,8 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 45: op = rv_op_minu; break; case 46: op = rv_op_max; break; case 47: op = rv_op_maxu; break; + case 075: op = rv_op_czero_eqz; break; + case 077: op = rv_op_czero_nez; break; case 130: op = rv_op_sh1add; break; case 132: op = rv_op_sh2add; break; case 134: op = rv_op_sh3add; break; @@ -3661,6 +3790,21 @@ static uint32_t operand_crs2q(rv_inst inst) return (inst << 59) >> 61; } +static uint32_t calculate_xreg(uint32_t sreg) +{ + return sreg < 2 ? sreg + 8 : sreg + 16; +} + +static uint32_t operand_sreg1(rv_inst inst) +{ + return calculate_xreg((inst << 54) >> 61); +} + +static uint32_t operand_sreg2(rv_inst inst) +{ + return calculate_xreg((inst << 59) >> 61); +} + static uint32_t operand_crd(rv_inst inst) { return (inst << 52) >> 59; @@ -3883,6 +4027,46 @@ static uint32_t operand_vm(rv_inst inst) return (inst << 38) >> 63; } +static uint32_t operand_uimm_c_lb(rv_inst inst) +{ + return (((inst << 58) >> 63) << 1) | + ((inst << 57) >> 63); +} + +static uint32_t operand_uimm_c_lh(rv_inst inst) +{ + return (((inst << 58) >> 63) << 1); +} + +static uint32_t operand_zcmp_spimm(rv_inst inst) +{ + return ((inst << 60) >> 62) << 4; +} + +static uint32_t operand_zcmp_rlist(rv_inst inst) +{ + return ((inst << 56) >> 60); +} + +static uint32_t calculate_stack_adj(rv_isa isa, uint32_t rlist, uint32_t spimm) +{ + int xlen_bytes_log2 = isa == rv64 ? 3 : 2; + int regs = rlist == 15 ? 13 : rlist - 3; + uint32_t stack_adj_base = ROUND_UP(regs << xlen_bytes_log2, 16); + return stack_adj_base + spimm; +} + +static uint32_t operand_zcmp_stack_adj(rv_inst inst, rv_isa isa) +{ + return calculate_stack_adj(isa, operand_zcmp_rlist(inst), + operand_zcmp_spimm(inst)); +} + +static uint32_t operand_tbl_index(rv_inst inst) +{ + return ((inst << 54) >> 56); +} + /* decode operands */ static void decode_inst_operands(rv_decode *dec, rv_isa isa) @@ -4199,6 +4383,34 @@ static void decode_inst_operands(rv_decode *dec, rv_isa isa) dec->imm = operand_vimm(inst); dec->vzimm = operand_vzimm10(inst); break; + case rv_codec_zcb_lb: + dec->rs1 = operand_crs1q(inst) + 8; + dec->rs2 = operand_crs2q(inst) + 8; + dec->imm = operand_uimm_c_lb(inst); + break; + case rv_codec_zcb_lh: + dec->rs1 = operand_crs1q(inst) + 8; + dec->rs2 = operand_crs2q(inst) + 8; + dec->imm = operand_uimm_c_lh(inst); + break; + case rv_codec_zcb_ext: + dec->rd = operand_crs1q(inst) + 8; + break; + case rv_codec_zcb_mul: + dec->rd = operand_crs1rdq(inst) + 8; + dec->rs2 = operand_crs2q(inst) + 8; + break; + case rv_codec_zcmp_cm_pushpop: + dec->imm = operand_zcmp_stack_adj(inst, isa); + dec->rlist = operand_zcmp_rlist(inst); + break; + case rv_codec_zcmp_cm_mv: + dec->rd = operand_sreg1(inst); + dec->rs2 = operand_sreg2(inst); + break; + case rv_codec_zcmt_jt: + dec->imm = operand_tbl_index(inst); + break; }; } @@ -4358,6 +4570,9 @@ static void format_inst(char *buf, size_t buflen, size_t tab, rv_decode *dec) case ')': append(buf, ")", buflen); break; + case '-': + append(buf, "-", buflen); + break; case 'b': snprintf(tmp, sizeof(tmp), "%d", dec->bs); append(buf, tmp, buflen); @@ -4541,6 +4756,24 @@ static void format_inst(char *buf, size_t buflen, size_t tab, rv_decode *dec) append(buf, vma, buflen); break; } + case 'x': { + switch (dec->rlist) { + case 4: + snprintf(tmp, sizeof(tmp), "{ra}"); + break; + case 5: + snprintf(tmp, sizeof(tmp), "{ra, s0}"); + break; + case 15: + snprintf(tmp, sizeof(tmp), "{ra, s0-s11}"); + break; + default: + snprintf(tmp, sizeof(tmp), "{ra, s0-s%d}", dec->rlist - 5); + break; + } + append(buf, tmp, buflen); + break; + } default: break; } diff --git a/docs/COLO-FT.txt b/docs/COLO-FT.txt index 8ec653f81c..2e760a4aee 100644 --- a/docs/COLO-FT.txt +++ b/docs/COLO-FT.txt @@ -210,6 +210,7 @@ children.0=childs0 \ 3. On Secondary VM's QEMU monitor, issue command {"execute":"qmp_capabilities"} +{"execute": "migrate-set-capabilities", "arguments": {"capabilities": [ {"capability": "x-colo", "state": true } ] } } {"execute": "nbd-server-start", "arguments": {"addr": {"type": "inet", "data": {"host": "0.0.0.0", "port": "9999"} } } } {"execute": "nbd-server-add", "arguments": {"device": "parent0", "writable": true } } diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst index 20b97c3310..0e2cb9e770 100644 --- a/docs/about/build-platforms.rst +++ b/docs/about/build-platforms.rst @@ -67,7 +67,8 @@ Non-supported architectures may be removed in the future following the Linux OS, macOS, FreeBSD, NetBSD, OpenBSD ----------------------------------------- -The project aims to support the most recent major version at all times. Support +The project aims to support the most recent major version at all times for +up to five years after its initial release. Support for the previous major version will be dropped 2 years after the new major version is released or when the vendor itself drops support, whichever comes first. In this context, third-party efforts to extend the lifetime of a distro @@ -97,7 +98,7 @@ Python runtime option of the ``configure`` script to point QEMU to a supported version of the Python runtime. - As of QEMU |version|, the minimum supported version of Python is 3.6. + As of QEMU |version|, the minimum supported version of Python is 3.7. Python build dependencies Some of QEMU's build dependencies are written in Python. Usually these diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index 15084f7bea..e934e0a13a 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -111,6 +111,27 @@ Use ``-machine acpi=off`` instead. The HAXM project has been retired (see https://github.com/intel/haxm#status). Use "whpx" (on Windows) or "hvf" (on macOS) instead. +``-async-teardown`` (since 8.1) +''''''''''''''''''''''''''''''' + +Use ``-run-with async-teardown=on`` instead. + +``-singlestep`` (since 8.1) +''''''''''''''''''''''''''' + +The ``-singlestep`` option has been turned into an accelerator property, +and given a name that better reflects what it actually does. +Use ``-accel tcg,one-insn-per-tb=on`` instead. + +User-mode emulator command line arguments +----------------------------------------- + +``-singlestep`` (since 8.1) +''''''''''''''''''''''''''' + +The ``-singlestep`` option has been given a name that better reflects +what it actually does. For both linux-user and bsd-user, use the +new ``-one-insn-per-tb`` option instead. QEMU Machine Protocol (QMP) commands ------------------------------------ @@ -183,6 +204,29 @@ accepted incorrect commands will return an error. Users should make sure that all arguments passed to ``device_add`` are consistent with the documented property types. +``StatusInfo`` member ``singlestep`` (since 8.1) +'''''''''''''''''''''''''''''''''''''''''''''''' + +The ``singlestep`` member of the ``StatusInfo`` returned from the +``query-status`` command is deprecated. This member has a confusing +name and it never did what the documentation claimed or what its name +suggests. We do not believe that anybody is actually using the +information provided in this member. + +The information it reports is whether the TCG JIT is in "one +instruction per translated block" mode (which can be set on the +command line or via the HMP, but not via QMP). The information remains +available via the HMP 'info jit' command. + +Human Monitor Protocol (HMP) commands +------------------------------------- + +``singlestep`` (since 8.1) +'''''''''''''''''''''''''' + +The ``singlestep`` command has been replaced by the ``one-insn-per-tb`` +command, which has the same behaviour but a less misleading name. + Host Architectures ------------------ @@ -196,6 +240,17 @@ CI coverage support may bitrot away before the deprecation process completes. The little endian variants of MIPS (both 32 and 64 bit) are still a supported host architecture. +System emulation on 32-bit x86 hosts (since 8.0) +'''''''''''''''''''''''''''''''''''''''''''''''' + +Support for 32-bit x86 host deployments is increasingly uncommon in mainstream +OS distributions given the widespread availability of 64-bit x86 hardware. +The QEMU project no longer considers 32-bit x86 support for system emulation to +be an effective use of its limited resources, and thus intends to discontinue +it. Since all recent x86 hardware from the past >10 years is capable of the +64-bit x86 extensions, a corresponding 64-bit OS should be used instead. + + QEMU API (QAPI) events ---------------------- @@ -208,8 +263,8 @@ Use the more generic event ``DEVICE_UNPLUG_GUEST_ERROR`` instead. System emulator machines ------------------------ -Arm ``virt`` machine ``dtb-kaslr-seed`` property -'''''''''''''''''''''''''''''''''''''''''''''''' +Arm ``virt`` machine ``dtb-kaslr-seed`` property (since 7.1) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' The ``dtb-kaslr-seed`` property on the ``virt`` board has been deprecated; use the new name ``dtb-randomness`` instead. The new name @@ -273,6 +328,14 @@ from Intel that was not properly allocated. Since version 5.2, the controller has used a properly allocated identifier. Deprecate the ``use-intel-id`` machine compatibility parameter. +``-device cxl-type3,memdev=xxxx`` (since 8.0) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``cxl-type3`` device initially only used a single memory backend. With +the addition of volatile memory support, it is now necessary to distinguish +between persistent and volatile memory backends. As such, memdev is deprecated +in favor of persistent-memdev. + Block device options '''''''''''''''''''' diff --git a/docs/about/emulation.rst b/docs/about/emulation.rst index b510a54418..0ad0b86f0d 100644 --- a/docs/about/emulation.rst +++ b/docs/about/emulation.rst @@ -99,7 +99,7 @@ depending on the guest architecture. - Yes - A configurable 32 bit soft core now owned by Cadence -A number of features are are only available when running under +A number of features are only available when running under emulation including :ref:`Record/Replay` and :ref:`TCG Plugins`. .. _Semihosting: diff --git a/docs/conf.py b/docs/conf.py index 00767b0e24..e84a95e71c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -32,15 +32,6 @@ import sphinx from distutils.version import LooseVersion from sphinx.errors import ConfigError -# Make Sphinx fail cleanly if using an old Python, rather than obscurely -# failing because some code in one of our extensions doesn't work there. -# In newer versions of Sphinx this will display nicely; in older versions -# Sphinx will also produce a Python backtrace but at least the information -# gets printed... -if sys.version_info < (3,6): - raise ConfigError( - "QEMU requires a Sphinx that uses Python 3.6 or better\n") - # The per-manual conf.py will set qemu_docdir for a single-manual build; # otherwise set it here if this is an entire-manual-set build. # This is always the absolute path of the docs/ directory in the source tree. @@ -98,7 +89,7 @@ default_role = 'any' # General information about the project. project = u'QEMU' -copyright = u'2022, The QEMU Project Developers' +copyright = u'2023, The QEMU Project Developers' author = u'The QEMU Project Developers' # The version info for the project you're documenting, acts as replacement for diff --git a/docs/config/mach-virt-graphical.cfg b/docs/config/mach-virt-graphical.cfg index d6d31b17f5..eba76eb198 100644 --- a/docs/config/mach-virt-graphical.cfg +++ b/docs/config/mach-virt-graphical.cfg @@ -56,9 +56,11 @@ [machine] type = "virt" - accel = "kvm" gic-version = "host" +[accel] + accel = "kvm" + [memory] size = "1024" diff --git a/docs/config/mach-virt-serial.cfg b/docs/config/mach-virt-serial.cfg index 18a7c83731..324b0542ff 100644 --- a/docs/config/mach-virt-serial.cfg +++ b/docs/config/mach-virt-serial.cfg @@ -62,9 +62,11 @@ [machine] type = "virt" - accel = "kvm" gic-version = "host" +[accel] + accel = "kvm" + [memory] size = "1024" diff --git a/docs/config/q35-emulated.cfg b/docs/config/q35-emulated.cfg index 99ac918e78..c8806e6d36 100644 --- a/docs/config/q35-emulated.cfg +++ b/docs/config/q35-emulated.cfg @@ -61,6 +61,8 @@ [machine] type = "q35" + +[accel] accel = "kvm" [memory] diff --git a/docs/config/q35-virtio-graphical.cfg b/docs/config/q35-virtio-graphical.cfg index 4207f11e4f..148b5d2c5e 100644 --- a/docs/config/q35-virtio-graphical.cfg +++ b/docs/config/q35-virtio-graphical.cfg @@ -55,6 +55,8 @@ [machine] type = "q35" + +[accel] accel = "kvm" [memory] diff --git a/docs/config/q35-virtio-serial.cfg b/docs/config/q35-virtio-serial.cfg index d2830aec5e..023291390e 100644 --- a/docs/config/q35-virtio-serial.cfg +++ b/docs/config/q35-virtio-serial.cfg @@ -60,6 +60,8 @@ [machine] type = "q35" + +[accel] accel = "kvm" [memory] diff --git a/docs/devel/acpi-bits.rst b/docs/devel/acpi-bits.rst index 9eb4b9e666..9677b0098f 100644 --- a/docs/devel/acpi-bits.rst +++ b/docs/devel/acpi-bits.rst @@ -61,19 +61,19 @@ Under ``tests/avocado/`` as the root we have: :: $ make check-venv (needed only the first time to create the venv) - $ ./tests/venv/bin/avocado run -t acpi tests/avocado + $ ./pyvenv/bin/avocado run -t acpi tests/avocado The above will run all acpi avocado tests including this one. In order to run the individual tests, perform the following: :: - $ ./tests/venv/bin/avocado run tests/avocado/acpi-bits.py --tap - + $ ./pyvenv/bin/avocado run tests/avocado/acpi-bits.py --tap - The above will produce output in tap format. You can omit "--tap -" in the end and it will produce output like the following: :: - $ ./tests/venv/bin/avocado run tests/avocado/acpi-bits.py + $ ./pyvenv/bin/avocado run tests/avocado/acpi-bits.py Fetching asset from tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits JOB ID : eab225724da7b64c012c65705dc2fa14ab1defef JOB LOG : /home/anisinha/avocado/job-results/job-2022-10-10T17.58-eab2257/job.log @@ -135,7 +135,7 @@ Under ``tests/avocado/`` as the root we have: (c) They need not be loaded by avocado framework when running tests. -Author: Ani Sinha +Author: Ani Sinha References: ----------- diff --git a/docs/devel/atomics.rst b/docs/devel/atomics.rst index 7957310071..248076375b 100644 --- a/docs/devel/atomics.rst +++ b/docs/devel/atomics.rst @@ -27,7 +27,8 @@ provides macros that fall in three camps: - weak atomic access and manual memory barriers: ``qatomic_read()``, ``qatomic_set()``, ``smp_rmb()``, ``smp_wmb()``, ``smp_mb()``, - ``smp_mb_acquire()``, ``smp_mb_release()``, ``smp_read_barrier_depends()``; + ``smp_mb_acquire()``, ``smp_mb_release()``, ``smp_read_barrier_depends()``, + ``smp_mb__before_rmw()``, ``smp_mb__after_rmw()``; - sequentially consistent atomic access: everything else. @@ -219,10 +220,9 @@ They come in six kinds: retrieves the address to which the second load will be directed), the processor will guarantee that the first LOAD will appear to happen before the second with respect to the other components of the system. - However, this is not always true---for example, it was not true on - Alpha processors. Whenever this kind of access happens to shared - memory (that is not protected by a lock), a read barrier is needed, - and ``smp_read_barrier_depends()`` can be used instead of ``smp_rmb()``. + Therefore, unlike ``smp_rmb()`` or ``qatomic_load_acquire()``, + ``smp_read_barrier_depends()`` can be just a compiler barrier on + weakly-ordered architectures such as Arm or PPC[#]_. Note that the first load really has to have a _data_ dependency and not a control dependency. If the address for the second load is dependent @@ -230,6 +230,10 @@ They come in six kinds: than actually loading the address itself, then it's a _control_ dependency and a full read barrier or better is required. +.. [#] The DEC Alpha is an exception, because ``smp_read_barrier_depends()`` + needs a processor barrier. On strongly-ordered architectures such + as x86 or s390, ``smp_rmb()`` and ``qatomic_load_acquire()`` can + also be compiler barriers only. Memory barriers and ``qatomic_load_acquire``/``qatomic_store_release`` are mostly used when a data structure has one thread that is always a writer @@ -468,13 +472,19 @@ and memory barriers, and the equivalents in QEMU: In QEMU, the second kind is named ``atomic_OP_fetch``. - different atomic read-modify-write operations in Linux imply - a different set of memory barriers; in QEMU, all of them enforce - sequential consistency. + a different set of memory barriers. In QEMU, all of them enforce + sequential consistency: there is a single order in which the + program sees them happen. -- in QEMU, ``qatomic_read()`` and ``qatomic_set()`` do not participate in - the total ordering enforced by sequentially-consistent operations. - This is because QEMU uses the C11 memory model. The following example - is correct in Linux but not in QEMU: +- however, according to the C11 memory model that QEMU uses, this order + does not propagate to other memory accesses on either side of the + read-modify-write operation. As far as those are concerned, the + operation consist of just a load-acquire followed by a store-release. + Stores that precede the RMW operation, and loads that follow it, can + still be reordered and will happen *in the middle* of the read-modify-write + operation! + + Therefore, the following example is correct in Linux but not in QEMU: +----------------------------------+--------------------------------+ | Linux (correct) | QEMU (incorrect) | @@ -488,9 +498,24 @@ and memory barriers, and the equivalents in QEMU: because the read of ``y`` can be moved (by either the processor or the compiler) before the write of ``x``. - Fixing this requires an ``smp_mb()`` memory barrier between the write - of ``x`` and the read of ``y``. In the common case where only one thread - writes ``x``, it is also possible to write it like this: + Fixing this requires a full memory barrier between the write of ``x`` and + the read of ``y``. QEMU provides ``smp_mb__before_rmw()`` and + ``smp_mb__after_rmw()``; they act both as an optimization, + avoiding the memory barrier on processors where it is unnecessary, + and as a clarification of this corner case of the C11 memory model: + + +--------------------------------+ + | QEMU (correct) | + +================================+ + | :: | + | | + | a = qatomic_fetch_add(&x, 2);| + | smp_mb__after_rmw(); | + | b = qatomic_read(&y); | + +--------------------------------+ + + In the common case where only one thread writes ``x``, it is also possible + to write it like this: +--------------------------------+ | QEMU (correct) | diff --git a/docs/devel/build-system.rst b/docs/devel/build-system.rst index 1894721743..551c5a5ac0 100644 --- a/docs/devel/build-system.rst +++ b/docs/devel/build-system.rst @@ -4,30 +4,14 @@ The QEMU build system architecture This document aims to help developers understand the architecture of the QEMU build system. As with projects using GNU autotools, the QEMU build -system has two stages, first the developer runs the "configure" script +system has two stages; first the developer runs the "configure" script to determine the local build environment characteristics, then they run -"make" to build the project. There is about where the similarities with +"make" to build the project. This is about where the similarities with GNU autotools end, so try to forget what you know about them. +The two general ways to perform a build are as follows: -Stage 1: configure -================== - -The QEMU configure script is written directly in shell, and should be -compatible with any POSIX shell, hence it uses #!/bin/sh. An important -implication of this is that it is important to avoid using bash-isms on -development platforms where bash is the primary host. - -In contrast to autoconf scripts, QEMU's configure is expected to be -silent while it is checking for features. It will only display output -when an error occurs, or to show the final feature enablement summary -on completion. - -Because QEMU uses the Meson build system under the hood, only VPATH -builds are supported. There are two general ways to invoke configure & -perform a build: - - - VPATH, build artifacts outside of QEMU source tree entirely:: + - build artifacts outside of QEMU source tree entirely:: cd ../ mkdir build @@ -35,88 +19,122 @@ perform a build: ../qemu/configure make - - VPATH, build artifacts in a subdir of QEMU source tree:: + - build artifacts in a subdir of QEMU source tree:: mkdir build cd build ../configure make -The configure script automatically recognizes -command line options for which a same-named Meson option exists; -dashes in the command line are replaced with underscores. +Most of the actual build process uses Meson under the hood, therefore +build artifacts cannot be placed in the source tree itself. -Many checks on the compilation environment are still found in configure -rather than ``meson.build``, but new checks should be added directly to -``meson.build``. -Patches are also welcome to move existing checks from the configure -phase to ``meson.build``. When doing so, ensure that ``meson.build`` does -not use anymore the keys that you have removed from ``config-host.mak``. -Typically these will be replaced in ``meson.build`` by boolean variables, -``get_option('optname')`` invocations, or ``dep.found()`` expressions. -In general, the remaining checks have little or no interdependencies, -so they can be moved one by one. +Stage 1: configure +================== -Helper functions ----------------- +The configure script has five tasks: -The configure script provides a variety of helper functions to assist -developers in checking for system features: + - detect the host architecture -``do_cc $ARGS...`` - Attempt to run the system C compiler passing it $ARGS... + - list the targets for which to build emulators; the list of + targets also affects which firmware binaries and tests to build -``do_cxx $ARGS...`` - Attempt to run the system C++ compiler passing it $ARGS... + - find the compilers (native and cross) used to build executables, + firmware and tests. The results are written as either Makefile + fragments (``config-host.mak``) or a Meson machine file + (``config-meson.cross``) -``compile_object $CFLAGS`` - Attempt to compile a test program with the system C compiler using - $CFLAGS. The test program must have been previously written to a file - called $TMPC. The replacement in Meson is the compiler object ``cc``, - which has methods such as ``cc.compiles()``, - ``cc.check_header()``, ``cc.has_function()``. + - create a virtual environment in which all Python code runs during + the build, and possibly install packages into it from PyPI -``compile_prog $CFLAGS $LDFLAGS`` - Attempt to compile a test program with the system C compiler using - $CFLAGS and link it with the system linker using $LDFLAGS. The test - program must have been previously written to a file called $TMPC. - The replacement in Meson is ``cc.find_library()`` and ``cc.links()``. + - invoke Meson in the virtual environment, to perform the actual + configuration step for the emulator build + +The configure script automatically recognizes command line options for +which a same-named Meson option exists; dashes in the command line are +replaced with underscores. + +Almost all QEMU developers that need to modify the build system will +only be concerned with Meson, and therefore can skip the rest of this +section. + + +Modifying ``configure`` +----------------------- + +``configure`` is a shell script; it uses ``#!/bin/sh`` and therefore +should be compatible with any POSIX shell. It is important to avoid +using bash-isms to avoid breaking development platforms where bash is +the primary host. + +The configure script provides a variety of functions to help writing +portable shell code and providing consistent behavior across architectures +and operating systems: + +``error_exit $MESSAGE $MORE...`` + Print $MESSAGE to stderr, followed by $MORE... and then exit from the + configure script with non-zero status. ``has $COMMAND`` Determine if $COMMAND exists in the current environment, either as a shell builtin, or executable binary, returning 0 on success. The replacement in Meson is ``find_program()``. -``check_define $NAME`` - Determine if the macro $NAME is defined by the system C compiler +``probe_target_compiler $TARGET`` + Detect a cross compiler and cross tools for the QEMU target $TARGET (e.g., + ``$CPU-softmmu``, ``$CPU-linux-user``, ``$CPU-bsd-user``). If a working + compiler is present, return success and set variables ``$target_cc``, + ``$target_ar``, etc. to non-empty values. -``check_include $NAME`` - Determine if the include $NAME file is available to the system C - compiler. The replacement in Meson is ``cc.has_header()``. +``write_target_makefile`` + Write a Makefile fragment to stdout, exposing the result of the most + ``probe_target_compiler`` call as the usual Make variables (``CC``, + ``AR``, ``LD``, etc.). + + +Configure does not generally perform tests for compiler options beyond +basic checks to detect the host platform and ensure the compiler is +functioning. These are performed using a few more helper functions: + +``compile_object $CFLAGS`` + Attempt to compile a test program with the system C compiler using + $CFLAGS. The test program must have been previously written to a file + called $TMPC. + +``compile_prog $CFLAGS $LDFLAGS`` + Attempt to compile a test program with the system C compiler using + $CFLAGS and link it with the system linker using $LDFLAGS. The test + program must have been previously written to a file called $TMPC. + +``check_define $NAME`` + Determine if the macro $NAME is defined by the system C compiler. + +``do_compiler $CC $ARGS...`` + Attempt to run the C compiler $CC, passing it $ARGS... This function + does not use flags passed via options such as ``--extra-cflags``, and + therefore can be used to check for cross compilers. However, most + such checks are done at ``make`` time instead (see for example the + ``cc-option`` macro in ``pc-bios/option-rom/Makefile``). ``write_c_skeleton`` Write a minimal C program main() function to the temporary file - indicated by $TMPC + indicated by $TMPC. -``error_exit $MESSAGE $MORE...`` - Print $MESSAGE to stderr, followed by $MORE... and then exit from the - configure script with non-zero status -``query_pkg_config $ARGS...`` - Run pkg-config passing it $ARGS. If QEMU is doing a static build, - then --static will be automatically added to $ARGS +Python virtual environments and the QEMU build system +----------------------------------------------------- +TBD Stage 2: Meson ============== -The Meson build system is currently used to describe the build -process for: +The Meson build system describes the build and install process for: 1) executables, which include: - - Tools - ``qemu-img``, ``qemu-nbd``, ``qga`` (guest agent), etc + - Tools - ``qemu-img``, ``qemu-nbd``, ``qemu-ga`` (guest agent), etc - System emulators - ``qemu-system-$ARCH`` @@ -126,7 +144,8 @@ process for: 2) documentation -3) ROMs, which can be either installed as binary blobs or compiled +3) ROMs, whether provided as binary blobs in the QEMU distributions + or cross compiled under the direction of the configure script 4) other data files, such as icons or desktop files @@ -281,8 +300,7 @@ system/userspace emulation target Adding checks ------------- -New checks should be added to Meson. Compiler checks can be as simple as -the following:: +Compiler checks can be as simple as the following:: config_host_data.set('HAVE_BTRFS_H', cc.has_header('linux/btrfs.h')) @@ -311,8 +329,7 @@ dependency will be used:: sdl_image = not_found if not get_option('sdl_image').auto() or have_system sdl_image = dependency('SDL2_image', required: get_option('sdl_image'), - method: 'pkg-config', - static: enable_static) + method: 'pkg-config') endif This avoids warnings on static builds of user-mode emulators, for example. @@ -360,22 +377,30 @@ script, which may point to something other than the first python3 binary on the path. -Stage 3: makefiles -================== +Stage 3: Make +============= -The use of GNU make is required with the QEMU build system. +The next step in building QEMU is to invoke make. GNU Make is required +to build QEMU, and may be installed as ``gmake`` on some hosts. -The output of Meson is a build.ninja file, which is used with the Ninja -build system. QEMU uses a different approach, where Makefile rules are -synthesized from the build.ninja file. The main Makefile includes these -rules and wraps them so that e.g. submodules are built before QEMU. -The resulting build system is largely non-recursive in nature, in -contrast to common practices seen with automake. +The output of Meson is a ``build.ninja`` file, which is used with the +Ninja build tool. However, QEMU's build comprises other components than +just the emulators (namely firmware and the tests in ``tests/tcg``) which +need different cross compilers. The QEMU Makefile wraps both Ninja and +the smaller build systems for firmware and tests; it also takes care of +running ``configure`` again when the script changes. Apart from invoking +these sub-Makefiles, the resulting build is largely non-recursive. -Tests are also ran by the Makefile with the traditional ``make check`` -phony target, while benchmarks are run with ``make bench``. Meson test -suites such as ``unit`` can be ran with ``make check-unit`` too. It is also -possible to run tests defined in meson.build with ``meson test``. +Tests, whether defined in ``meson.build`` or not, are also ran by the +Makefile with the traditional ``make check`` phony target, while benchmarks +are run with ``make bench``. Meson test suites such as ``unit`` can be ran +with ``make check-unit``, and ``make check-tcg`` builds and runs "non-Meson" +tests for all targets. + +If desired, it is also possible to use ``ninja`` and ``meson test``, +respectively to build emulators and run tests defined in meson.build. +The main difference is that ``make`` needs the ``-jN`` flag in order to +enable parallel builds or tests. Useful make targets ------------------- @@ -387,6 +412,7 @@ Useful make targets Print the value of the variable VAR. Useful for debugging the build system. + Important files for the build system ==================================== @@ -400,8 +426,7 @@ number of dynamically created files listed later. ``Makefile`` The main entry point used when invoking make to build all the components of QEMU. The default 'all' target will naturally result in the build of - every component. Makefile takes care of recursively building submodules - directly via a non-recursive set of rules. + every component. ``*/meson.build`` The meson.build file in the root directory is the main entry point for the @@ -410,59 +435,92 @@ number of dynamically created files listed later. other meson.build files spread throughout the QEMU source tree. ``tests/Makefile.include`` - Rules for external test harnesses. These include the TCG tests, - ``qemu-iotests`` and the Avocado-based integration tests. + Rules for external test harnesses. These include the TCG tests + and the Avocado-based integration tests. ``tests/docker/Makefile.include`` - Rules for Docker tests. Like tests/Makefile, this file is included - directly by the top level Makefile, anything defined in this file will - influence the entire build system. + Rules for Docker tests. Like ``tests/Makefile.include``, this file is + included directly by the top level Makefile, anything defined in this + file will influence the entire build system. ``tests/vm/Makefile.include`` - Rules for VM-based tests. Like tests/Makefile, this file is included - directly by the top level Makefile, anything defined in this file will - influence the entire build system. + Rules for VM-based tests. Like ``tests/Makefile.include``, this file is + included directly by the top level Makefile, anything defined in this + file will influence the entire build system. Dynamically created files ------------------------- -The following files are generated dynamically by configure in order to -control the behaviour of the statically defined makefiles. This avoids -the need for QEMU makefiles to go through any pre-processing as seen -with autotools, where Makefile.am generates Makefile.in which generates -Makefile. +The following files are generated at run-time in order to control the +behaviour of the Makefiles. This avoids the need for QEMU makefiles to +go through any pre-processing as seen with autotools, where configure +generates ``Makefile`` from ``Makefile.in``. Built by configure: ``config-host.mak`` When configure has determined the characteristics of the build host it - will write a long list of variables to config-host.mak file. This - provides the various install directories, compiler / linker flags and a + will write them to this file for use in ``Makefile`` and to a smaller + extent ``meson.build``. These include the paths to various tools and a variety of ``CONFIG_*`` variables related to optionally enabled features. - This is imported by the top level Makefile and meson.build in order to - tailor the build output. - config-host.mak is also used as a dependency checking mechanism. If make + ``config-host.mak`` is also used as a dependency checking mechanism. If make sees that the modification timestamp on configure is newer than that on - config-host.mak, then configure will be re-run. + ``config-host.mak``, then configure will be re-run. - The variables defined here are those which are applicable to all QEMU - build outputs. Variables which are potentially different for each - emulator target are defined by the next file... + The variables defined here apply to all QEMU + build outputs. +``config-meson.cross`` + + A Meson "cross file" (or native file) used to communicate the paths to + the toolchain and other configuration options. + +``config.status`` + + A small shell script that will invoke configure again with the same + environment variables that were set during the first run. It's used to + rerun configure after changes to the source code, but it can also be + inspected manually to check the contents of the environment. + +``Makefile.prereqs`` + + A set of Makefile dependencies that order the build and execution of + firmware and tests after the container images and emulators that they + need. + +``pc-bios/*/config.mak``, ``tests/tcg/config-host.mak``, ``tests/tcg/*/config-target.mak`` + + Configuration variables used to build the firmware and TCG tests, + including paths to cross compilation toolchains. + +``pyvenv`` + + A Python virtual environment that is used for all Python code running + during the build. Using a virtual environment ensures that even code + that is run via ``sphinx-build``, ``meson`` etc. uses the same interpreter + and packages. Built by Meson: -``${TARGET-NAME}-config-devices.mak`` - TARGET-NAME is again the name of a system or userspace emulator. The - config-devices.mak file is automatically generated by make using the - scripts/make_device_config.sh program, feeding it the - default-configs/$TARGET-NAME file as input. +``config-host.h`` + Used by C code to determine the properties of the build environment + and the set of enabled features for the entire build. -``config-host.h``, ``$TARGET_NAME-config-target.h``, ``$TARGET_NAME-config-devices.h`` - These files are used by source code to determine what features are - enabled. They are generated from the contents of the corresponding - ``*.mak`` files using Meson's ``configure_file()`` function. +``${TARGET-NAME}-config-devices.mak`` + TARGET-NAME is the name of a system emulator. The file is + generated by Meson using files under ``configs/devices`` as input. + +``${TARGET-NAME}-config-target.mak`` + TARGET-NAME is the name of a system or usermode emulator. The file is + generated by Meson using files under ``configs/targets`` as input. + +``$TARGET_NAME-config-target.h``, ``$TARGET_NAME-config-devices.h`` + Used by C code to determine the properties and enabled + features for each target. enabled. They are generated from + the contents of the corresponding ``*.mak`` files using Meson's + ``configure_file()`` function; each target can include them using + the ``CONFIG_TARGET`` and ``CONFIG_DEVICES`` macro respectively. ``build.ninja`` The build rules. diff --git a/docs/devel/decodetree.rst b/docs/devel/decodetree.rst index 49ea50c2a7..e3392aa705 100644 --- a/docs/devel/decodetree.rst +++ b/docs/devel/decodetree.rst @@ -23,22 +23,42 @@ Fields Syntax:: - field_def := '%' identifier ( unnamed_field )* ( !function=identifier )? + field_def := '%' identifier ( field )* ( !function=identifier )? + field := unnamed_field | named_field unnamed_field := number ':' ( 's' ) number + named_field := identifier ':' ( 's' ) number For *unnamed_field*, the first number is the least-significant bit position of the field and the second number is the length of the field. If the 's' is -present, the field is considered signed. If multiple ``unnamed_fields`` are -present, they are concatenated. In this way one can define disjoint fields. +present, the field is considered signed. + +A *named_field* refers to some other field in the instruction pattern +or format. Regardless of the length of the other field where it is +defined, it will be inserted into this field with the specified +signedness and bit width. + +Field definitions that involve loops (i.e. where a field is defined +directly or indirectly in terms of itself) are errors. + +A format can include fields that refer to named fields that are +defined in the instruction pattern(s) that use the format. +Conversely, an instruction pattern can include fields that refer to +named fields that are defined in the format it uses. However you +cannot currently do both at once (i.e. pattern P uses format F; F has +a field A that refers to a named field B that is defined in P, and P +has a field C that refers to a named field D that is defined in F). + +If multiple ``fields`` are present, they are concatenated. +In this way one can define disjoint fields. If ``!function`` is specified, the concatenated result is passed through the named function, taking and returning an integral value. -One may use ``!function`` with zero ``unnamed_fields``. This case is called +One may use ``!function`` with zero ``fields``. This case is called a *parameter*, and the named function is only passed the ``DisasContext`` and returns an integral value extracted from there. -A field with no ``unnamed_fields`` and no ``!function`` is in error. +A field with no ``fields`` and no ``!function`` is in error. Field examples: @@ -56,6 +76,9 @@ Field examples: | %shimm8 5:s8 13:1 | expand_shimm8(sextract(i, 5, 8) << 1 | | | !function=expand_shimm8 | extract(i, 13, 1)) | +---------------------------+---------------------------------------------+ +| %sz_imm 10:2 sz:3 | expand_sz_imm(extract(i, 10, 2) << 3 | | +| !function=expand_sz_imm | extract(a->sz, 0, 3)) | ++---------------------------+---------------------------------------------+ Argument Sets ============= diff --git a/docs/devel/index-api.rst b/docs/devel/index-api.rst index 60c0d7459d..7108821746 100644 --- a/docs/devel/index-api.rst +++ b/docs/devel/index-api.rst @@ -12,3 +12,4 @@ generated from in-code annotations to function prototypes. memory modules ui + zoned-storage diff --git a/docs/devel/kconfig.rst b/docs/devel/kconfig.rst index cc1a456edf..e3a544e463 100644 --- a/docs/devel/kconfig.rst +++ b/docs/devel/kconfig.rst @@ -274,7 +274,7 @@ or commenting out lines in the second group. It is also possible to run QEMU's configure script with the ``--without-default-devices`` option. When this is done, everything defaults -to ``n`` unless it is ``select``ed or explicitly switched on in the +to ``n`` unless it is ``select``\ ed or explicitly switched on in the ``.mak`` files. In other words, ``default`` and ``imply`` directives are disabled. When QEMU is built with this option, the user will probably want to change some lines in the first group, for example like this:: @@ -282,9 +282,19 @@ want to change some lines in the first group, for example like this:: CONFIG_PCI_DEVICES=y #CONFIG_TEST_DEVICES=n -and/or pick a subset of the devices in those device groups. Right now -there is no single place that lists all the optional devices for -``CONFIG_PCI_DEVICES`` and ``CONFIG_TEST_DEVICES``. In the future, +and/or pick a subset of the devices in those device groups. Without +further modifications to ``configs/devices/``, a system emulator built +without default devices might not do much more than start an empty +machine, and even then only if ``--nodefaults`` is specified on the +command line. Starting a VM *without* ``--nodefaults`` is allowed to +fail, but should never abort. Failures in ``make check`` with +``--without-default-devices`` are considered bugs in the test code: +the tests should either use ``--nodefaults``, and should be skipped +if a necessary device is not present in the build. Such failures +should not be worked around with ``select`` directives. + +Right now there is no single place that lists all the optional devices +for ``CONFIG_PCI_DEVICES`` and ``CONFIG_TEST_DEVICES``. In the future, we expect that ``.mak`` files will be automatically generated, so that they will include all these symbols and some help text on what they do. diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst index ad5dfe133e..d2cefc77a2 100644 --- a/docs/devel/loads-stores.rst +++ b/docs/devel/loads-stores.rst @@ -297,31 +297,20 @@ swap: ``translator_ld{sign}{size}_swap(env, ptr, swap)`` Regexes for git grep - ``\`` -``helper_*_{ld,st}*_mmu`` +``helper_{ld,st}*_mmu`` ~~~~~~~~~~~~~~~~~~~~~~~~~ These functions are intended primarily to be called by the code -generated by the TCG backend. They may also be called by target -CPU helper function code. Like the ``cpu_{ld,st}_mmuidx_ra`` functions -they perform accesses by guest virtual address, with a given ``mmuidx``. +generated by the TCG backend. Like the ``cpu_{ld,st}_mmu`` functions +they perform accesses by guest virtual address, with a given ``MemOpIdx``. -These functions specify an ``opindex`` parameter which encodes -(among other things) the mmu index to use for the access. This parameter -should be created by calling ``make_memop_idx()``. +They differ from ``cpu_{ld,st}_mmu`` in that they take the endianness +of the operation only from the MemOpIdx, and loads extend the return +value to the size of a host general register (``tcg_target_ulong``). -The ``retaddr`` parameter should be the result of GETPC() called directly -from the top level HELPER(foo) function (or 0 if no guest CPU state -unwinding is required). +load: ``helper_ld{sign}{size}_mmu(env, addr, opindex, retaddr)`` -**TODO** The names of these functions are a bit odd for historical -reasons because they were originally expected to be called only from -within generated code. We should rename them to bring them more in -line with the other memory access functions. The explicit endianness -is the only feature they have beyond ``*_mmuidx_ra``. - -load: ``helper_{endian}_ld{sign}{size}_mmu(env, addr, opindex, retaddr)`` - -store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)`` +store: ``helper_{size}_mmu(env, addr, val, opindex, retaddr)`` ``sign`` - (empty) : for 32 or 64 bit sizes @@ -334,14 +323,9 @@ store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)`` - ``l`` : 32 bits - ``q`` : 64 bits -``endian`` - - ``le`` : little endian - - ``be`` : big endian - - ``ret`` : target endianness - Regexes for git grep - - ``\`` - - ``\`` + - ``\`` + - ``\`` ``address_space_*`` ~~~~~~~~~~~~~~~~~~~ diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt index 343120f2ef..a3e949f6b3 100644 --- a/docs/devel/multiple-iothreads.txt +++ b/docs/devel/multiple-iothreads.txt @@ -61,6 +61,7 @@ There are several old APIs that use the main loop AioContext: * LEGACY qemu_aio_set_event_notifier() - monitor an event notifier * LEGACY timer_new_ms() - create a timer * LEGACY qemu_bh_new() - create a BH + * LEGACY qemu_bh_new_guarded() - create a BH with a device re-entrancy guard * LEGACY qemu_aio_wait() - run an event loop iteration Since they implicitly work on the main loop they cannot be used in code that @@ -72,8 +73,14 @@ Instead, use the AioContext functions directly (see include/block/aio.h): * aio_set_event_notifier() - monitor an event notifier * aio_timer_new() - create a timer * aio_bh_new() - create a BH + * aio_bh_new_guarded() - create a BH with a device re-entrancy guard * aio_poll() - run an event loop iteration +The qemu_bh_new_guarded/aio_bh_new_guarded APIs accept a "MemReentrancyGuard" +argument, which is used to check for and prevent re-entrancy problems. For +BHs associated with devices, the reentrancy-guard is contained in the +corresponding DeviceState and named "mem_reentrancy_guard". + The AioContext can be obtained from the IOThread using iothread_get_aio_context() or for the main loop using qemu_get_aio_context(). Code that takes an AioContext argument works both in IOThreads or the main diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst index 23e7f2fb1c..7f78183cd4 100644 --- a/docs/devel/qapi-code-gen.rst +++ b/docs/devel/qapi-code-gen.rst @@ -545,7 +545,8 @@ Member 'allow-oob' declares whether the command supports out-of-band { 'command': 'migrate_recover', 'data': { 'uri': 'str' }, 'allow-oob': true } -See qmp-spec.txt for out-of-band execution syntax and semantics. +See the :doc:`/interop/qmp-spec` for out-of-band execution syntax +and semantics. Commands supporting out-of-band execution can still be executed in-band. @@ -805,9 +806,8 @@ gets its generated code guarded like this:: ... generated code ... #endif /* defined(HAVE_BAR) && defined(CONFIG_FOO) */ -Individual members of complex types, commands arguments, and -event-specific data can also be made conditional. This requires the -longhand form of MEMBER. +Individual members of complex types can also be made conditional. +This requires the longhand form of MEMBER. Example: a struct type with unconditional member 'foo' and conditional member 'bar' :: @@ -925,14 +925,17 @@ first character of the first line. The usual ****strong****, *\*emphasized\** and ````literal```` markup should be used. If you need a single literal ``*``, you will need to -backslash-escape it. As an extension beyond the usual rST syntax, you -can also use ``@foo`` to reference a name in the schema; this is rendered -the same way as ````foo````. +backslash-escape it. + +Use ``@foo`` to reference a name in the schema. This is an rST +extension. It is rendered the same way as ````foo````, but carries +additional meaning. Example:: ## # Some text foo with **bold** and *emphasis* + # # 1. with a list # 2. like that # @@ -945,6 +948,11 @@ Example:: # <- get that ## +For legibility, wrap text paragraphs so every line is at most 70 +characters long. + +Separate sentences with two spaces. + Definition documentation ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -961,57 +969,46 @@ commands and events), member (for structs and unions), branch (for alternates), or value (for enums), a description of each feature (if any), and finally optional tagged sections. -The description of an argument or feature 'name' starts with -'\@name:'. The description text can start on the line following the -'\@name:', in which case it must not be indented at all. It can also -start on the same line as the '\@name:'. In this case if it spans -multiple lines then second and subsequent lines must be indented to -line up with the first character of the first line of the -description:: +Descriptions start with '\@name:'. The description text should be +indented like this:: - # @argone: - # This is a two line description - # in the first style. - # - # @argtwo: This is a two line description - # in the second style. + # @name: Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed + # do eiusmod tempor incididunt ut labore et dolore magna aliqua. -The number of spaces between the ':' and the text is not significant. +.. FIXME The parser accepts these things in almost any order. -.. admonition:: FIXME - - The parser accepts these things in almost any order. - -.. admonition:: FIXME - - union branches should be described, too. +.. FIXME union branches should be described, too. Extensions added after the definition was first released carry a -'(since x.y.z)' comment. +"(since x.y.z)" comment. The feature descriptions must be preceded by a line "Features:", like this:: # Features: + # # @feature: Description text A tagged section starts with one of the following words: "Note:"/"Notes:", "Since:", "Example"/"Examples", "Returns:", "TODO:". The section ends with the start of a new section. -The text of a section can start on a new line, in -which case it must not be indented at all. It can also start -on the same line as the 'Note:', 'Returns:', etc tag. In this -case if it spans multiple lines then second and subsequent -lines must be indented to match the first, in the same way as -multiline argument descriptions. +The second and subsequent lines of sections other than +"Example"/"Examples" should be indented like this:: -A 'Since: x.y.z' tagged section lists the release that introduced the + # Note: Ut enim ad minim veniam, quis nostrud exercitation ullamco + # laboris nisi ut aliquip ex ea commodo consequat. + # + # Duis aute irure dolor in reprehenderit in voluptate velit esse + # cillum dolore eu fugiat nulla pariatur. + +A "Since: x.y.z" tagged section lists the release that introduced the definition. -An 'Example' or 'Examples' section is automatically rendered -entirely as literal fixed-width text. In other sections, -the text is formatted, and rST markup can be used. +An "Example" or "Examples" section is rendered entirely +as literal fixed-width text. "TODO" sections are not rendered at all +(they are for developers, not users of QMP). In other sections, the +text is formatted, and rST markup can be used. For example:: @@ -1021,7 +1018,7 @@ For example:: # Statistics of a virtual block device or a block backing device. # # @device: If the stats are for a virtual block device, the name - # corresponding to the virtual block device. + # corresponding to the virtual block device. # # @node-name: The node name of the device. (since 2.3) # @@ -1038,8 +1035,8 @@ For example:: # # Query the @BlockStats for all virtual block devices. # - # @query-nodes: If true, the command will query all the - # block nodes ... explain, explain ... (since 2.3) + # @query-nodes: If true, the command will query all the block nodes + # ... explain, explain ... (since 2.3) # # Returns: A list of @BlockStats for each virtual block devices. # @@ -1058,6 +1055,63 @@ For example:: 'returns': ['BlockStats'] } +Markup pitfalls +~~~~~~~~~~~~~~~ + +A blank line is required between list items and paragraphs. Without +it, the list may not be recognized, resulting in garbled output. Good +example:: + + # An event's state is modified if: + # + # - its name matches the @name pattern, and + # - if @vcpu is given, the event has the "vcpu" property. + +Without the blank line this would be a single paragraph. + +Indentation matters. Bad example:: + + # @none: None (no memory side cache in this proximity domain, + # or cache associativity unknown) + # (since 5.0) + +The last line's de-indent is wrong. The second and subsequent lines +need to line up with each other, like this:: + + # @none: None (no memory side cache in this proximity domain, + # or cache associativity unknown) + # (since 5.0) + +Section tags are case-sensitive and end with a colon. Good example:: + + # Since: 7.1 + +Bad examples (all ordinary paragraphs):: + + # since: 7.1 + + # Since 7.1 + + # Since : 7.1 + +Likewise, member descriptions require a colon. Good example:: + + # @interface-id: Interface ID + +Bad examples (all ordinary paragraphs):: + + # @interface-id Interface ID + + # @interface-id : Interface ID + +Undocumented members are not flagged, yet. Instead, the generated +documentation describes them as "Not documented". Think twice before +adding more undocumented members. + +When you change documentation comments, please check the generated +documentation comes out as intended! + + Client JSON Protocol introspection ================================== diff --git a/docs/devel/qom.rst b/docs/devel/qom.rst index 3e34b07c98..c9237950d0 100644 --- a/docs/devel/qom.rst +++ b/docs/devel/qom.rst @@ -1,3 +1,5 @@ +.. _qom: + =========================== The QEMU Object Model (QOM) =========================== diff --git a/docs/devel/style.rst b/docs/devel/style.rst index 68aa776930..aa5e083ff8 100644 --- a/docs/devel/style.rst +++ b/docs/devel/style.rst @@ -300,6 +300,20 @@ putting those into qemu/typedefs.h instead of including the header. Cyclic inclusion is forbidden. +Generative Includes +------------------- + +QEMU makes fairly extensive use of the macro pre-processor to +instantiate multiple similar functions. While such abuse of the macro +processor isn't discouraged it can make debugging and code navigation +harder. You should consider carefully if the same effect can be +achieved by making it easy for the compiler to constant fold or using +python scripting to generate grep friendly code. + +If you do use template header files they should be named with the +``.c.inc`` or ``.h.inc`` suffix to make it clear they are being +included for expansion. + C types ======= @@ -614,6 +628,97 @@ are still some caveats to beware of QEMU Specific Idioms ******************** +QEMU Object Model Declarations +============================== + +The QEMU Object Model (QOM) provides a framework for handling objects +in the base C language. The first declaration of a storage or class +structure should always be the parent and leave a visual space between +that declaration and the new code. It is also useful to separate +backing for properties (options driven by the user) and internal state +to make navigation easier. + +For a storage structure the first declaration should always be called +"parent_obj" and for a class structure the first member should always +be called "parent_class" as below: + +.. code-block:: c + + struct MyDeviceState { + DeviceState parent_obj; + + /* Properties */ + int prop_a; + char *prop_b; + /* Other stuff */ + int internal_state; + }; + + struct MyDeviceClass { + DeviceClass parent_class; + + void (*new_fn1)(void); + bool (*new_fn2)(CPUState *); + }; + +Note that there is no need to provide typedefs for QOM structures +since these are generated automatically by the QOM declaration macros. +See :ref:`qom` for more details. + +QEMU GUARD macros +================= + +QEMU provides a number of ``_GUARD`` macros intended to make the +handling of multiple exit paths easier. For example using +``QEMU_LOCK_GUARD`` to take a lock will ensure the lock is released on +exit from the function. + +.. code-block:: c + + static int my_critical_function(SomeState *s, void *data) + { + QEMU_LOCK_GUARD(&s->lock); + do_thing1(data); + if (check_state2(data)) { + return -1; + } + do_thing3(data); + return 0; + } + +will ensure s->lock is released however the function is exited. The +equivalent code without _GUARD macro makes us to carefully put +qemu_mutex_unlock() on all exit points: + +.. code-block:: c + + static int my_critical_function(SomeState *s, void *data) + { + qemu_mutex_lock(&s->lock); + do_thing1(data); + if (check_state2(data)) { + qemu_mutex_unlock(&s->lock); + return -1; + } + do_thing3(data); + qemu_mutex_unlock(&s->lock); + return 0; + } + +There are often ``WITH_`` forms of macros which more easily wrap +around a block inside a function. + +.. code-block:: c + + WITH_RCU_READ_LOCK_GUARD() { + QTAILQ_FOREACH_RCU(kid, &bus->children, sibling) { + err = do_the_thing(kid->child); + if (err < 0) { + return err; + } + } + } + Error handling and reporting ============================ diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst index 9adc0c9b6c..6a166c5665 100644 --- a/docs/devel/tcg-ops.rst +++ b/docs/devel/tcg-ops.rst @@ -7,67 +7,51 @@ TCG Intermediate Representation Introduction ============ -TCG (Tiny Code Generator) began as a generic backend for a C -compiler. It was simplified to be used in QEMU. It also has its roots -in the QOP code generator written by Paul Brook. +TCG (Tiny Code Generator) began as a generic backend for a C compiler. +It was simplified to be used in QEMU. It also has its roots in the +QOP code generator written by Paul Brook. Definitions =========== -TCG receives RISC-like *TCG ops* and performs some optimizations on them, -including liveness analysis and trivial constant expression -evaluation. TCG ops are then implemented in the host CPU back end, -also known as the TCG target. - -The TCG *target* is the architecture for which we generate the -code. It is of course not the same as the "target" of QEMU which is -the emulated architecture. As TCG started as a generic C backend used -for cross compiling, it is assumed that the TCG target is different -from the host, although it is never the case for QEMU. +The TCG *target* is the architecture for which we generate the code. +It is of course not the same as the "target" of QEMU which is the +emulated architecture. As TCG started as a generic C backend used +for cross compiling, the assumption was that TCG target might be +different from the host, although this is never the case for QEMU. In this document, we use *guest* to specify what architecture we are emulating; *target* always means the TCG target, the machine on which we are running QEMU. -A TCG *function* corresponds to a QEMU Translated Block (TB). - -A TCG *temporary* is a variable only live in a basic block. Temporaries are allocated explicitly in each function. - -A TCG *local temporary* is a variable only live in a function. Local temporaries are allocated explicitly in each function. - -A TCG *global* is a variable which is live in all the functions -(equivalent of a C global variable). They are defined before the -functions defined. A TCG global can be a memory location (e.g. a QEMU -CPU register), a fixed host register (e.g. the QEMU CPU state pointer) -or a memory location which is stored in a register outside QEMU TBs -(not implemented yet). - -A TCG *basic block* corresponds to a list of instructions terminated -by a branch instruction. - An operation with *undefined behavior* may result in a crash. An operation with *unspecified behavior* shall not crash. However, the result may be one of several possibilities so may be considered an *undefined result*. -Intermediate representation -=========================== +Basic Blocks +============ -Introduction ------------- +A TCG *basic block* is a single entry, multiple exit region which +corresponds to a list of instructions terminated by a label, or +any branch instruction. -TCG instructions operate on variables which are temporaries, local -temporaries or globals. TCG instructions and variables are strongly -typed. Two types are supported: 32 bit integers and 64 bit -integers. Pointers are defined as an alias to 32 bit or 64 bit -integers depending on the TCG target word size. +A TCG *extended basic block* is a single entry, multiple exit region +which corresponds to a list of instructions terminated by a label or +an unconditional branch. Specifically, an extended basic block is +a sequence of basic blocks connected by the fall-through paths of +zero or more conditional branch instructions. -Each instruction has a fixed number of output variable operands, input -variable operands and always constant operands. +Operations +========== -The notable exception is the call instruction which has a variable -number of outputs and inputs. +TCG instructions or *ops* operate on TCG *variables*, both of which +are strongly typed. Each instruction has a fixed number of output +variable operands, input variable operands and constant operands. +Vector instructions have a field specifying the element size within +the vector. The notable exception is the call instruction which has +a variable number of outputs and inputs. In the textual form, output operands usually come first, followed by input operands, followed by constant operands. The output type is @@ -77,68 +61,127 @@ included in the instruction name. Constants are prefixed with a '$'. add_i32 t0, t1, t2 /* (t0 <- t1 + t2) */ +Variables +========= -Assumptions ------------ +* ``TEMP_FIXED`` -Basic blocks -^^^^^^^^^^^^ + There is one TCG *fixed global* variable, ``cpu_env``, which is + live in all translation blocks, and holds a pointer to ``CPUArchState``. + This variable is held in a host cpu register at all times in all + translation blocks. -* Basic blocks end after branches (e.g. brcond_i32 instruction), - goto_tb and exit_tb instructions. +* ``TEMP_GLOBAL`` -* Basic blocks start after the end of a previous basic block, or at a - set_label instruction. + A TCG *global* is a variable which is live in all translation blocks, + and corresponds to memory location that is within ``CPUArchState``. + These may be specified as an offset from ``cpu_env``, in which case + they are called *direct globals*, or may be specified as an offset + from a direct global, in which case they are called *indirect globals*. + Even indirect globals should still reference memory within + ``CPUArchState``. All TCG globals are defined during + ``TCGCPUOps.initialize``, before any translation blocks are generated. -After the end of a basic block, the content of temporaries is -destroyed, but local temporaries and globals are preserved. +* ``TEMP_CONST`` -Floating point types -^^^^^^^^^^^^^^^^^^^^ + A TCG *constant* is a variable which is live throughout the entire + translation block, and contains a constant value. These variables + are allocated on demand during translation and are hashed so that + there is exactly one variable holding a given value. -* Floating point types are not supported yet +* ``TEMP_TB`` -Pointers -^^^^^^^^ + A TCG *translation block temporary* is a variable which is live + throughout the entire translation block, but dies on any exit. + These temporaries are allocated explicitly during translation. -* Depending on the TCG target, pointer size is 32 bit or 64 - bit. The type ``TCG_TYPE_PTR`` is an alias to ``TCG_TYPE_I32`` or - ``TCG_TYPE_I64``. +* ``TEMP_EBB`` + + A TCG *extended basic block temporary* is a variable which is live + throughout an extended basic block, but dies on any exit. + These temporaries are allocated explicitly during translation. + +Types +===== + +* ``TCG_TYPE_I32`` + + A 32-bit integer. + +* ``TCG_TYPE_I64`` + + A 64-bit integer. For 32-bit hosts, such variables are split into a pair + of variables with ``type=TCG_TYPE_I32`` and ``base_type=TCG_TYPE_I64``. + The ``temp_subindex`` for each indicates where it falls within the + host-endian representation. + +* ``TCG_TYPE_PTR`` + + An alias for ``TCG_TYPE_I32`` or ``TCG_TYPE_I64``, depending on the size + of a pointer for the host. + +* ``TCG_TYPE_REG`` + + An alias for ``TCG_TYPE_I32`` or ``TCG_TYPE_I64``, depending on the size + of the integer registers for the host. This may be larger + than ``TCG_TYPE_PTR`` depending on the host ABI. + +* ``TCG_TYPE_I128`` + + A 128-bit integer. For all hosts, such variables are split into a number + of variables with ``type=TCG_TYPE_REG`` and ``base_type=TCG_TYPE_I128``. + The ``temp_subindex`` for each indicates where it falls within the + host-endian representation. + +* ``TCG_TYPE_V64`` + + A 64-bit vector. This type is valid only if the TCG target + sets ``TCG_TARGET_HAS_v64``. + +* ``TCG_TYPE_V128`` + + A 128-bit vector. This type is valid only if the TCG target + sets ``TCG_TARGET_HAS_v128``. + +* ``TCG_TYPE_V256`` + + A 256-bit vector. This type is valid only if the TCG target + sets ``TCG_TARGET_HAS_v256``. Helpers -^^^^^^^ +======= -* Using the tcg_gen_helper_x_y it is possible to call any function - taking i32, i64 or pointer types. By default, before calling a helper, - all globals are stored at their canonical location and it is assumed - that the function can modify them. By default, the helper is allowed to - modify the CPU state or raise an exception. +Helpers are registered in a guest-specific ``helper.h``, +which is processed to generate ``tcg_gen_helper_*`` functions. +With these functions it is possible to call a function taking +i32, i64, i128 or pointer types. - This can be overridden using the following function modifiers: +By default, before calling a helper, all globals are stored at their +canonical location. By default, the helper is allowed to modify the +CPU state (including the state represented by tcg globals) +or may raise an exception. This default can be overridden using the +following function modifiers: - - ``TCG_CALL_NO_READ_GLOBALS`` means that the helper does not read globals, - either directly or via an exception. They will not be saved to their - canonical locations before calling the helper. +* ``TCG_CALL_NO_WRITE_GLOBALS`` - - ``TCG_CALL_NO_WRITE_GLOBALS`` means that the helper does not modify any globals. - They will only be saved to their canonical location before calling helpers, - but they won't be reloaded afterwards. + The helper does not modify any globals, but may read them. + Globals will be saved to their canonical location before calling helpers, + but need not be reloaded afterwards. - - ``TCG_CALL_NO_SIDE_EFFECTS`` means that the call to the function is removed if - the return value is not used. +* ``TCG_CALL_NO_READ_GLOBALS`` - Note that ``TCG_CALL_NO_READ_GLOBALS`` implies ``TCG_CALL_NO_WRITE_GLOBALS``. + The helper does not read globals, either directly or via an exception. + They will not be saved to their canonical locations before calling + the helper. This implies ``TCG_CALL_NO_WRITE_GLOBALS``. - On some TCG targets (e.g. x86), several calling conventions are - supported. +* ``TCG_CALL_NO_SIDE_EFFECTS`` -Branches -^^^^^^^^ - -* Use the instruction 'br' to jump to a label. + The call to the helper function may be removed if the return value is + not used. This means that it may not modify any CPU state nor may it + raise an exception. Code Optimizations ------------------- +================== When generating instructions, you can count on at least the following optimizations: @@ -629,19 +672,20 @@ QEMU specific operations | This operation is optional. If the TCG backend does not implement the goto_ptr opcode, emitting this op is equivalent to emitting exit_tb(0). - * - qemu_ld_i32/i64 *t0*, *t1*, *flags*, *memidx* + * - qemu_ld_i32/i64/i128 *t0*, *t1*, *flags*, *memidx* - qemu_st_i32/i64 *t0*, *t1*, *flags*, *memidx* + qemu_st_i32/i64/i128 *t0*, *t1*, *flags*, *memidx* qemu_st8_i32 *t0*, *t1*, *flags*, *memidx* - | Load data at the guest address *t1* into *t0*, or store data in *t0* at guest - address *t1*. The _i32/_i64 size applies to the size of the input/output + address *t1*. The _i32/_i64/_i128 size applies to the size of the input/output register *t0* only. The address *t1* is always sized according to the guest, and the width of the memory operation is controlled by *flags*. | | Both *t0* and *t1* may be split into little-endian ordered pairs of registers - if dealing with 64-bit quantities on a 32-bit host. + if dealing with 64-bit quantities on a 32-bit host, or 128-bit quantities on + a 64-bit host. | | The *memidx* selects the qemu tlb index to use (e.g. user or kernel access). The flags are the MemOp bits, selecting the sign, width, and endianness @@ -650,6 +694,8 @@ QEMU specific operations | For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a 64-bit memory access specified in *flags*. | + | For qemu_ld/st_i128, these are only supported for a 64-bit host. + | | For i386, qemu_st8_i32 is exactly like qemu_st_i32, except the size of the memory operation is known to be 8-bit. This allows the backend to provide a different set of register constraints. @@ -908,21 +954,6 @@ Recommended coding rules for best performance often modified, e.g. the integer registers and the condition codes. TCG will be able to use host registers to store them. -- Avoid globals stored in fixed registers. They must be used only to - store the pointer to the CPU state and possibly to store a pointer - to a register window. - -- Use temporaries. Use local temporaries only when really needed, - e.g. when you need to use a value after a jump. Local temporaries - introduce a performance hit in the current TCG implementation: their - content is saved to memory at end of each basic block. - -- Free temporaries and local temporaries when they are no longer used - (tcg_temp_free). Since tcg_const_x() also creates a temporary, you - should free it after it is used. Freeing temporaries does not yield - a better generated code, but it reduces the memory usage of TCG and - the speed of the translation. - - Don't hesitate to use helpers for complicated or seldom used guest instructions. There is little performance advantage in using TCG to implement guest instructions taking more than about twenty TCG @@ -932,10 +963,6 @@ Recommended coding rules for best performance the instruction is mostly doing loads and stores, and in those cases inline TCG may still be faster for longer sequences. -- The hard limit on the number of TCG instructions you can generate - per guest instruction is set by ``MAX_OP_PER_INSTR`` in ``exec-all.h`` -- - you cannot exceed this without risking a buffer overrun. - - Use the 'discard' instruction if you know that TCG won't be able to prove that a given global is "dead" at a given program point. The x86 guest uses it to improve the condition codes optimisation. diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst index e10c47b5a7..2cafec4178 100644 --- a/docs/devel/testing.rst +++ b/docs/devel/testing.rst @@ -429,49 +429,67 @@ using the ``lcitool`` program provided by the ``libvirt-ci`` project: https://gitlab.com/libvirt/libvirt-ci -In that project, there is a ``mappings.yml`` file defining the distro native -package names for a wide variety of third party projects. This is processed -in combination with a project defined list of build pre-requisites to determine -the list of native packages to install on each distribution. This can be used -to generate dockerfiles, VM package lists and Cirrus CI variables needed to -setup build environments across OS distributions with a consistent set of -packages present. - -When preparing a patch series that adds a new build pre-requisite to QEMU, -updates to various lcitool data files may be required. +``libvirt-ci`` contains an ``lcitool`` program as well as a list of +mappings to distribution package names for a wide variety of third +party projects. ``lcitool`` applies the mappings to a list of build +pre-requisites in ``tests/lcitool/projects/qemu.yml``, determines the +list of native packages to install on each distribution, and uses them +to generate build environments (dockerfiles and Cirrus CI variable files) +that are consistent across OS distribution. Adding new build pre-requisites ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +When preparing a patch series that adds a new build +pre-requisite to QEMU, the prerequisites should to be added to +``tests/lcitool/projects/qemu.yml`` in order to make the dependency +available in the CI build environments. + In the simple case where the pre-requisite is already known to ``libvirt-ci`` -the following steps are needed +the following steps are needed: * Edit ``tests/lcitool/projects/qemu.yml`` and add the pre-requisite * Run ``make lcitool-refresh`` to re-generate all relevant build environment manifests -In some cases ``libvirt-ci`` will not know about the build pre-requisite and -thus some extra preparation steps will be required first +It may be that ``libvirt-ci`` does not know about the new pre-requisite. +If that is the case, some extra preparation steps will be required +first to contribute the mapping to the ``libvirt-ci`` project: * Fork the ``libvirt-ci`` project on gitlab - * Edit the ``mappings.yml`` change to add an entry for the new build - prerequisite, listing its native package name on as many OS distros - as practical. + * Add an entry for the new build prerequisite to + ``lcitool/facts/mappings.yml``, listing its native package name on as + many OS distros as practical. Run ``python -m pytest --regenerate-output`` + and check that the changes are correct. - * Commit the ``mappings.yml`` change and submit a merge request to - the ``libvirt-ci`` project, noting in the description that this - is a new build pre-requisite desired for use with QEMU + * Commit the ``mappings.yml`` change together with the regenerated test + files, and submit a merge request to the ``libvirt-ci`` project. + Please note in the description that this is a new build pre-requisite + desired for use with QEMU. * CI pipeline will run to validate that the changes to ``mappings.yml`` are correct, by attempting to install the newly listed package on all OS distributions supported by ``libvirt-ci``. * Once the merge request is accepted, go back to QEMU and update - the ``libvirt-ci`` submodule to point to a commit that contains - the ``mappings.yml`` update. + the ``tests/lcitool/libvirt-ci`` submodule to point to a commit that + contains the ``mappings.yml`` update. Then add the prerequisite and + run ``make lcitool-refresh``. + + * Please also trigger gitlab container generation pipelines on your change + for as many OS distros as practical to make sure that there are no + obvious breakages when adding the new pre-requisite. Please see + `CI `__ documentation + page on how to trigger gitlab CI pipelines on your change. + +For enterprise distros that default to old, end-of-life versions of the +Python runtime, QEMU uses a separate set of mappings that work with more +recent versions. These can be found in ``tests/lcitool/mappings.yml``. +Modifying this file should not be necessary unless the new pre-requisite +is a Python library or tool. Adding new OS distros @@ -498,18 +516,20 @@ Assuming there is agreement to add a new OS distro then * Fork the ``libvirt-ci`` project on gitlab - * Add metadata under ``guests/lcitool/lcitool/ansible/group_vars/`` - for the new OS distro. There might be code changes required if - the OS distro uses a package format not currently known. The - ``libvirt-ci`` maintainers can advise on this when the issue - is file. + * Add metadata under ``lcitool/facts/targets/`` for the new OS + distro. There might be code changes required if the OS distro + uses a package format not currently known. The ``libvirt-ci`` + maintainers can advise on this when the issue is filed. - * Edit the ``mappings.yml`` change to update all the existing package - entries, providing details of the new OS distro + * Edit the ``lcitool/facts/mappings.yml`` change to add entries for + the new OS, listing the native package names for as many packages + as practical. Run ``python -m pytest --regenerate-output`` and + check that the changes are correct. - * Commit the ``mappings.yml`` change and submit a merge request to - the ``libvirt-ci`` project, noting in the description that this - is a new build pre-requisite desired for use with QEMU + * Commit the changes to ``lcitool/facts`` and the regenerated test + files, and submit a merge request to the ``libvirt-ci`` project. + Please note in the description that this is a new build pre-requisite + desired for use with QEMU * CI pipeline will run to validate that the changes to ``mappings.yml`` are correct, by attempting to install the newly listed package on @@ -574,13 +594,13 @@ https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual Thread Sanitizer in Docker ~~~~~~~~~~~~~~~~~~~~~~~~~~ -TSan is currently supported in the ubuntu2004 docker. +TSan is currently supported in the ubuntu2204 docker. The test-tsan test will build using TSan and then run make check. .. code:: - make docker-test-tsan@ubuntu2004 + make docker-test-tsan@ubuntu2204 TSan warnings under docker are placed in files located at build/tsan/. @@ -868,9 +888,9 @@ You can run the avocado tests simply by executing: make check-avocado -This involves the automatic creation of Python virtual environment -within the build tree (at ``tests/venv``) which will have all the -right dependencies, and will save tests results also within the +This involves the automatic installation, from PyPI, of all the +necessary avocado-framework dependencies into the QEMU venv within the +build tree (at ``./pyvenv``). Test results are also saved within the build tree (at ``tests/results``). Note: the build environment must be using a Python 3 stack, and have @@ -927,7 +947,7 @@ may be invoked by running: .. code:: - tests/venv/bin/avocado run $OPTION1 $OPTION2 tests/avocado/ + pyvenv/bin/avocado run $OPTION1 $OPTION2 tests/avocado/ Note that if ``make check-avocado`` was not executed before, it is possible to create the Python virtual environment with the dependencies @@ -942,20 +962,20 @@ a test file. To run tests from a single file within the build tree, use: .. code:: - tests/venv/bin/avocado run tests/avocado/$TESTFILE + pyvenv/bin/avocado run tests/avocado/$TESTFILE To run a single test within a test file, use: .. code:: - tests/venv/bin/avocado run tests/avocado/$TESTFILE:$TESTCLASS.$TESTNAME + pyvenv/bin/avocado run tests/avocado/$TESTFILE:$TESTCLASS.$TESTNAME Valid test names are visible in the output from any previous execution of Avocado or ``make check-avocado``, and can also be queried using: .. code:: - tests/venv/bin/avocado list tests/avocado + pyvenv/bin/avocado list tests/avocado Manual Installation ~~~~~~~~~~~~~~~~~~~ diff --git a/docs/devel/vfio-migration.rst b/docs/devel/vfio-migration.rst index c214c73e28..1b68ccf115 100644 --- a/docs/devel/vfio-migration.rst +++ b/docs/devel/vfio-migration.rst @@ -59,22 +59,37 @@ System memory dirty pages tracking ---------------------------------- A ``log_global_start`` and ``log_global_stop`` memory listener callback informs -the VFIO IOMMU module to start and stop dirty page tracking. A ``log_sync`` -memory listener callback marks those system memory pages as dirty which are -used for DMA by the VFIO device. The dirty pages bitmap is queried per -container. All pages pinned by the vendor driver through external APIs have to -be marked as dirty during migration. When there are CPU writes, CPU dirty page -tracking can identify dirtied pages, but any page pinned by the vendor driver -can also be written by the device. There is currently no device or IOMMU -support for dirty page tracking in hardware. +the VFIO dirty tracking module to start and stop dirty page tracking. A +``log_sync`` memory listener callback queries the dirty page bitmap from the +dirty tracking module and marks system memory pages which were DMA-ed by the +VFIO device as dirty. The dirty page bitmap is queried per container. + +Currently there are two ways dirty page tracking can be done: +(1) Device dirty tracking: +In this method the device is responsible to log and report its DMAs. This +method can be used only if the device is capable of tracking its DMAs. +Discovering device capability, starting and stopping dirty tracking, and +syncing the dirty bitmaps from the device are done using the DMA logging uAPI. +More info about the uAPI can be found in the comments of the +``vfio_device_feature_dma_logging_control`` and +``vfio_device_feature_dma_logging_report`` structures in the header file +linux-headers/linux/vfio.h. + +(2) VFIO IOMMU module: +In this method dirty tracking is done by IOMMU. However, there is currently no +IOMMU support for dirty page tracking. For this reason, all pages are +perpetually marked dirty, unless the device driver pins pages through external +APIs in which case only those pinned pages are perpetually marked dirty. + +If the above two methods are not supported, all pages are perpetually marked +dirty by QEMU. By default, dirty pages are tracked during pre-copy as well as stop-and-copy -phase. So, a page pinned by the vendor driver will be copied to the destination -in both phases. Copying dirty pages in pre-copy phase helps QEMU to predict if -it can achieve its downtime tolerances. If QEMU during pre-copy phase keeps -finding dirty pages continuously, then it understands that even in stop-and-copy -phase, it is likely to find dirty pages and can predict the downtime -accordingly. +phase. So, a page marked as dirty will be copied to the destination in both +phases. Copying dirty pages in pre-copy phase helps QEMU to predict if it can +achieve its downtime tolerances. If QEMU during pre-copy phase keeps finding +dirty pages continuously, then it understands that even in stop-and-copy phase, +it is likely to find dirty pages and can predict the downtime accordingly. QEMU also provides a per device opt-out option ``pre-copy-dirty-page-tracking`` which disables querying the dirty bitmap during pre-copy phase. If it is set to @@ -89,7 +104,8 @@ phase of migration. In that case, the unmap ioctl returns any dirty pages in that range and QEMU reports corresponding guest physical pages dirty. During stop-and-copy phase, an IOMMU notifier is used to get a callback for mapped pages and then dirty pages bitmap is fetched from VFIO IOMMU modules for those -mapped ranges. +mapped ranges. If device dirty tracking is enabled with vIOMMU, live migration +will be blocked. Flow of state changes during Live migration =========================================== diff --git a/docs/devel/zoned-storage.rst b/docs/devel/zoned-storage.rst new file mode 100644 index 0000000000..30296d3c85 --- /dev/null +++ b/docs/devel/zoned-storage.rst @@ -0,0 +1,62 @@ +============= +zoned-storage +============= + +Zoned Block Devices (ZBDs) divide the LBA space into block regions called zones +that are larger than the LBA size. They can only allow sequential writes, which +can reduce write amplification in SSDs, and potentially lead to higher +throughput and increased capacity. More details about ZBDs can be found at: + +https://zonedstorage.io/docs/introduction/zoned-storage + +1. Block layer APIs for zoned storage +------------------------------------- +QEMU block layer supports three zoned storage models: +- BLK_Z_HM: The host-managed zoned model only allows sequential writes access +to zones. It supports ZBD-specific I/O commands that can be used by a host to +manage the zones of a device. +- BLK_Z_HA: The host-aware zoned model allows random write operations in +zones, making it backward compatible with regular block devices. +- BLK_Z_NONE: The non-zoned model has no zones support. It includes both +regular and drive-managed ZBD devices. ZBD-specific I/O commands are not +supported. + +The block device information resides inside BlockDriverState. QEMU uses +BlockLimits struct(BlockDriverState::bl) that is continuously accessed by the +block layer while processing I/O requests. A BlockBackend has a root pointer to +a BlockDriverState graph(for example, raw format on top of file-posix). The +zoned storage information can be propagated from the leaf BlockDriverState all +the way up to the BlockBackend. If the zoned storage model in file-posix is +set to BLK_Z_HM, then block drivers will declare support for zoned host device. + +The block layer APIs support commands needed for zoned storage devices, +including report zones, four zone operations, and zone append. + +2. Emulating zoned storage controllers +-------------------------------------- +When the BlockBackend's BlockLimits model reports a zoned storage device, users +like the virtio-blk emulation or the qemu-io-cmds.c utility can use block layer +APIs for zoned storage emulation or testing. + +For example, to test zone_report on a null_blk device using qemu-io is:: + + $ path/to/qemu-io --image-opts -n driver=host_device,filename=/dev/nullb0 -c "zrp offset nr_zones" + +To expose the host's zoned block device through virtio-blk, the command line +can be (includes the -device parameter):: + + -blockdev node-name=drive0,driver=host_device,filename=/dev/nullb0,cache.direct=on \ + -device virtio-blk-pci,drive=drive0 + +Or only use the -drive parameter:: + + -driver driver=host_device,file=/dev/nullb0,if=virtio,cache.direct=on + +Additionally, QEMU has several ways of supporting zoned storage, including: +(1) Using virtio-scsi: --device scsi-block allows for the passing through of +SCSI ZBC devices, enabling the attachment of ZBC or ZAC HDDs to QEMU. +(2) PCI device pass-through: While NVMe ZNS emulation is available for testing +purposes, it cannot yet pass through a zoned device from the host. To pass on +the NVMe ZNS device to the guest, use VFIO PCI pass the entire NVMe PCI adapter +through to the guest. Likewise, an HDD HBA can be passed on to QEMU all HDDs +attached to the HBA. diff --git a/docs/interop/firmware.json b/docs/interop/firmware.json index 56814f02b3..cc8f869186 100644 --- a/docs/interop/firmware.json +++ b/docs/interop/firmware.json @@ -258,7 +258,7 @@ # # @mode: Describes how the firmware build handles code versus variable # storage. If not present, it must be treated as if it was -# configured with value ``split``. Since: 7.0.0 +# configured with value @split. Since: 7.0.0 # # @executable: Identifies the firmware executable. The @mode # indicates whether there will be an associated @@ -267,13 +267,13 @@ # -drive if=none,id=pflash0,readonly=on,file=@executable.@filename,format=@executable.@format # -machine pflash0=pflash0 # or equivalent -blockdev instead of -drive. When -# @mode is ``combined`` the executable must be +# @mode is @combined the executable must be # cloned before use and configured with readonly=off. # With QEMU versions older than 4.0, you have to use # -drive if=pflash,unit=0,readonly=on,file=@executable.@filename,format=@executable.@format # # @nvram-template: Identifies the NVRAM template compatible with -# @executable, when @mode is set to ``split``, +# @executable, when @mode is set to @split, # otherwise it should not be present. # Management software instantiates an # individual copy -- a specific NVRAM file -- from diff --git a/docs/interop/index.rst b/docs/interop/index.rst index 6351ff9ba6..ed65395bfb 100644 --- a/docs/interop/index.rst +++ b/docs/interop/index.rst @@ -15,6 +15,7 @@ are useful for making QEMU interoperate with other software. dbus-display live-block-operations pr-helper + qmp-spec qemu-ga qemu-ga-ref qemu-qmp-ref diff --git a/docs/interop/qcow2.txt b/docs/interop/qcow2.txt index f7dc304ff6..e7f036c286 100644 --- a/docs/interop/qcow2.txt +++ b/docs/interop/qcow2.txt @@ -214,14 +214,18 @@ version 2. type. If the incompatible bit "Compression type" is set: the field - must be present and non-zero (which means non-zlib + must be present and non-zero (which means non-deflate compression type). Otherwise, this field must not be present - or must be zero (which means zlib). + or must be zero (which means deflate). Available compression type values: - 0: zlib + 0: deflate 1: zstd + The deflate compression type is called "zlib" + in QEMU. However, clusters with the + deflate compression type do not have zlib headers. + === Header padding === diff --git a/docs/interop/qmp-intro.txt b/docs/interop/qmp-intro.txt deleted file mode 100644 index 1c745a7af0..0000000000 --- a/docs/interop/qmp-intro.txt +++ /dev/null @@ -1,88 +0,0 @@ - QEMU Machine Protocol - ===================== - -Introduction ------------- - -The QEMU Machine Protocol (QMP) allows applications to operate a -QEMU instance. - -QMP is JSON[1] based and features the following: - -- Lightweight, text-based, easy to parse data format -- Asynchronous messages support (ie. events) -- Capabilities Negotiation - -For detailed information on QMP's usage, please, refer to the following files: - -o qmp-spec.txt QEMU Machine Protocol current specification -o qemu-qmp-ref.html QEMU QMP commands and events (auto-generated at build-time) - -[1] https://www.json.org - -Usage ------ - -You can use the -qmp option to enable QMP. For example, the following -makes QMP available on localhost port 4444: - -$ qemu [...] -qmp tcp:localhost:4444,server=on,wait=off - -However, for more flexibility and to make use of more options, the -mon -command-line option should be used. For instance, the following example -creates one HMP instance (human monitor) on stdio and one QMP instance -on localhost port 4444: - -$ qemu [...] -chardev stdio,id=mon0 -mon chardev=mon0,mode=readline \ - -chardev socket,id=mon1,host=localhost,port=4444,server=on,wait=off \ - -mon chardev=mon1,mode=control,pretty=on - -Please, refer to QEMU's manpage for more information. - -Simple Testing --------------- - -To manually test QMP one can connect with telnet and issue commands by hand: - -$ telnet localhost 4444 -Trying 127.0.0.1... -Connected to localhost. -Escape character is '^]'. -{ - "QMP": { - "version": { - "qemu": { - "micro": 0, - "minor": 0, - "major": 3 - }, - "package": "v3.0.0" - }, - "capabilities": [ - "oob" - ] - } -} - -{ "execute": "qmp_capabilities" } -{ - "return": { - } -} - -{ "execute": "query-status" } -{ - "return": { - "status": "prelaunch", - "singlestep": false, - "running": false - } -} - -Please refer to docs/interop/qemu-qmp-ref.* for a complete command -reference, generated from qapi/qapi-schema.json. - -QMP wiki page -------------- - -https://wiki.qemu.org/QMP diff --git a/docs/interop/qmp-spec.txt b/docs/interop/qmp-spec.rst similarity index 56% rename from docs/interop/qmp-spec.txt rename to docs/interop/qmp-spec.rst index b0e8351d5b..563344160e 100644 --- a/docs/interop/qmp-spec.txt +++ b/docs/interop/qmp-spec.rst @@ -1,24 +1,26 @@ - QEMU Machine Protocol Specification +.. + Copyright (C) 2009-2016 Red Hat, Inc. -0. About This Document -====================== + This work is licensed under the terms of the GNU GPL, version 2 or + later. See the COPYING file in the top-level directory. -Copyright (C) 2009-2016 Red Hat, Inc. -This work is licensed under the terms of the GNU GPL, version 2 or -later. See the COPYING file in the top-level directory. +=================================== +QEMU Machine Protocol Specification +=================================== -1. Introduction -=============== - -This document specifies the QEMU Machine Protocol (QMP), a JSON-based +The QEMU Machine Protocol (QMP) is a JSON-based protocol which is available for applications to operate QEMU at the machine-level. It is also in use by the QEMU Guest Agent (QGA), which is available for host applications to interact with the guest -operating system. +operating system. This page specifies the general format of +the protocol; details of the commands and data structures can +be found in the :doc:`qemu-qmp-ref` and the :doc:`qemu-ga-ref`. -2. Protocol Specification -========================= +.. contents:: + +Protocol Specification +====================== This section details the protocol format. For the purpose of this document, "Server" is either QEMU or the QEMU Guest Agent, and @@ -30,9 +32,7 @@ following format: json-DATA-STRUCTURE-NAME Where DATA-STRUCTURE-NAME is any valid JSON data structure, as defined -by the JSON standard: - -http://www.ietf.org/rfc/rfc8259.txt +by the `JSON standard `_. The server expects its input to be encoded in UTF-8, and sends its output encoded in ASCII. @@ -45,83 +45,89 @@ important unless specifically documented otherwise. Repeating a key within a json-object gives unpredictable results. Also for convenience, the server will accept an extension of -'single-quoted' strings in place of the usual "double-quoted" +``'single-quoted'`` strings in place of the usual ``"double-quoted"`` json-string, and both input forms of strings understand an additional -escape sequence of "\'" for a single quote. The server will only use +escape sequence of ``\'`` for a single quote. The server will only use double quoting on output. -2.1 General Definitions ------------------------ - -2.1.1 All interactions transmitted by the Server are json-objects, always - terminating with CRLF - -2.1.2 All json-objects members are mandatory when not specified otherwise - -2.2 Server Greeting +General Definitions ------------------- +All interactions transmitted by the Server are json-objects, always +terminating with CRLF. + +All json-objects members are mandatory when not specified otherwise. + +Server Greeting +--------------- + Right when connected the Server will issue a greeting message, which signals that the connection has been successfully established and that the Server is ready for capabilities negotiation (for more information refer to section -'4. Capabilities Negotiation'). +`Capabilities Negotiation`_). The greeting message format is: -{ "QMP": { "version": json-object, "capabilities": json-array } } +:: - Where, + { "QMP": { "version": json-object, "capabilities": json-array } } -- The "version" member contains the Server's version information (the format - is the same of the query-version command) -- The "capabilities" member specify the availability of features beyond the +Where: + +- The ``version`` member contains the Server's version information (the format + is the same as for the query-version command). +- The ``capabilities`` member specifies the availability of features beyond the baseline specification; the order of elements in this array has no particular significance. -2.2.1 Capabilities ------------------- +Capabilities +------------ Currently supported capabilities are: -- "oob": the QMP server supports "out-of-band" (OOB) command - execution, as described in section "2.3.1 Out-of-band execution". +``oob`` + the QMP server supports "out-of-band" (OOB) command + execution, as described in section `Out-of-band execution`_. -2.3 Issuing Commands --------------------- +Issuing Commands +---------------- The format for command execution is: -{ "execute": json-string, "arguments": json-object, "id": json-value } +:: + + { "execute": json-string, "arguments": json-object, "id": json-value } or -{ "exec-oob": json-string, "arguments": json-object, "id": json-value } +:: - Where, + { "exec-oob": json-string, "arguments": json-object, "id": json-value } -- The "execute" or "exec-oob" member identifies the command to be +Where: + +- The ``execute`` or ``exec-oob`` member identifies the command to be executed by the server. The latter requests out-of-band execution. -- The "arguments" member is used to pass any arguments required for the +- The ``arguments`` member is used to pass any arguments required for the execution of the command, it is optional when no arguments are required. Each command documents what contents will be considered - valid when handling the json-argument -- The "id" member is a transaction identification associated with the + valid when handling the json-argument. +- The ``id`` member is a transaction identification associated with the command execution, it is optional and will be part of the response - if provided. The "id" member can be any json-value. A json-number + if provided. The ``id`` member can be any json-value. A json-number incremented for each successive command works fine. -The actual commands are documented in the QEMU QMP reference manual -docs/interop/qemu-qmp-ref.{7,html,info,pdf,txt}. +The actual commands are documented in the :doc:`qemu-qmp-ref`. -2.3.1 Out-of-band execution ---------------------------- +Out-of-band execution +--------------------- The server normally reads, executes and responds to one command after the other. The client therefore receives command responses in issue order. -With out-of-band execution enabled via capability negotiation (section -4.), the server reads and queues commands as they arrive. It executes +With out-of-band execution enabled via `capabilities negotiation`_, +the server reads and queues commands as they arrive. It executes commands from the queue one after the other. Commands executed out-of-band jump the queue: the command get executed right away, possibly overtaking prior in-band commands. The client may therefore @@ -129,8 +135,8 @@ receive such a command's response before responses from prior in-band commands. To be able to match responses back to their commands, the client needs -to pass "id" with out-of-band commands. Passing it with all commands -is recommended for clients that accept capability "oob". +to pass ``id`` with out-of-band commands. Passing it with all commands +is recommended for clients that accept capability ``oob``. If the client sends in-band commands faster than the server can execute them, the server will stop reading requests until the request @@ -140,57 +146,61 @@ To ensure commands to be executed out-of-band get read and executed, the client should have at most eight in-band commands in flight. Only a few commands support out-of-band execution. The ones that do -have "allow-oob": true in output of query-qmp-schema. +have ``"allow-oob": true`` in the output of ``query-qmp-schema``. -2.4 Commands Responses ----------------------- +Commands Responses +------------------ There are two possible responses which the Server will issue as the result of a command execution: success or error. -As long as the commands were issued with a proper "id" field, then the -same "id" field will be attached in the corresponding response message +As long as the commands were issued with a proper ``id`` field, then the +same ``id`` field will be attached in the corresponding response message so that requests and responses can match. Clients should drop all the -responses that have an unknown "id" field. +responses that have an unknown ``id`` field. -2.4.1 success -------------- +Success +------- The format of a success response is: -{ "return": json-value, "id": json-value } +:: - Where, + { "return": json-value, "id": json-value } -- The "return" member contains the data returned by the command, which +Where: + +- The ``return`` member contains the data returned by the command, which is defined on a per-command basis (usually a json-object or json-array of json-objects, but sometimes a json-number, json-string, or json-array of json-strings); it is an empty json-object if the - command does not return data -- The "id" member contains the transaction identification associated - with the command execution if issued by the Client + command does not return data. +- The ``id`` member contains the transaction identification associated + with the command execution if issued by the Client. -2.4.2 error ------------ +Error +----- The format of an error response is: -{ "error": { "class": json-string, "desc": json-string }, "id": json-value } +:: - Where, + { "error": { "class": json-string, "desc": json-string }, "id": json-value } -- The "class" member contains the error class name (eg. "GenericError") -- The "desc" member is a human-readable error message. Clients should +Where: + +- The ``class`` member contains the error class name (eg. ``"GenericError"``). +- The ``desc`` member is a human-readable error message. Clients should not attempt to parse this message. -- The "id" member contains the transaction identification associated with - the command execution if issued by the Client +- The ``id`` member contains the transaction identification associated with + the command execution if issued by the Client. -NOTE: Some errors can occur before the Server is able to read the "id" member, -in these cases the "id" member will not be part of the error response, even +NOTE: Some errors can occur before the Server is able to read the ``id`` member; +in these cases the ``id`` member will not be part of the error response, even if provided by the client. -2.5 Asynchronous events ------------------------ +Asynchronous events +------------------- As a result of state changes, the Server may send messages unilaterally to the Client at any time, when not in the middle of any other @@ -198,44 +208,45 @@ response. They are called "asynchronous events". The format of asynchronous events is: -{ "event": json-string, "data": json-object, - "timestamp": { "seconds": json-number, "microseconds": json-number } } +:: - Where, + { "event": json-string, "data": json-object, + "timestamp": { "seconds": json-number, "microseconds": json-number } } -- The "event" member contains the event's name -- The "data" member contains event specific data, which is defined in a - per-event basis, it is optional -- The "timestamp" member contains the exact time of when the event +Where: + +- The ``event`` member contains the event's name. +- The ``data`` member contains event specific data, which is defined in a + per-event basis. It is optional. +- The ``timestamp`` member contains the exact time of when the event occurred in the Server. It is a fixed json-object with time in seconds and microseconds relative to the Unix Epoch (1 Jan 1970); if there is a failure to retrieve host time, both members of the timestamp will be set to -1. -The actual asynchronous events are documented in the QEMU QMP -reference manual docs/interop/qemu-qmp-ref.{7,html,info,pdf,txt}. +The actual asynchronous events are documented in the :doc:`qemu-qmp-ref`. Some events are rate-limited to at most one per second. If additional "similar" events arrive within one second, all but the last one are dropped, and the last one is delayed. "Similar" normally means same event type. -2.6 Forcing the JSON parser into known-good state -------------------------------------------------- +Forcing the JSON parser into known-good state +--------------------------------------------- Incomplete or invalid input can leave the server's JSON parser in a state where it can't parse additional commands. To get it back into known-good state, the client should provoke a lexical error. The cleanest way to do that is sending an ASCII control character -other than '\t' (horizontal tab), '\r' (carriage return), or '\n' (new -line). +other than ``\t`` (horizontal tab), ``\r`` (carriage return), or +``\n`` (new line). Sadly, older versions of QEMU can fail to flag this as an error. If a client needs to deal with them, it should send a 0xFF byte. -2.7 QGA Synchronization ------------------------ +QGA Synchronization +------------------- When a client connects to QGA over a transport lacking proper connection semantics such as virtio-serial, QGA may have read partial @@ -243,86 +254,106 @@ input from a previous client. The client needs to force QGA's parser into known-good state using the previous section's technique. Moreover, the client may receive output a previous client didn't read. To help with skipping that output, QGA provides the -'guest-sync-delimited' command. Refer to its documentation for +``guest-sync-delimited`` command. Refer to its documentation for details. -3. QMP Examples -=============== +QMP Examples +============ This section provides some examples of real QMP usage, in all of them -"C" stands for "Client" and "S" stands for "Server". +``->`` marks text sent by the Client and ``<-`` marks replies by the Server. -3.1 Server greeting -------------------- +.. admonition:: Example -S: { "QMP": {"version": {"qemu": {"micro": 0, "minor": 0, "major": 3}, - "package": "v3.0.0"}, "capabilities": ["oob"] } } + Server greeting -3.2 Capabilities negotiation ----------------------------- + .. code-block:: QMP -C: { "execute": "qmp_capabilities", "arguments": { "enable": ["oob"] } } -S: { "return": {}} + <- { "QMP": {"version": {"qemu": {"micro": 0, "minor": 0, "major": 3}, + "package": "v3.0.0"}, "capabilities": ["oob"] } } -3.3 Simple 'stop' execution ---------------------------- +.. admonition:: Example -C: { "execute": "stop" } -S: { "return": {} } + Capabilities negotiation -3.4 KVM information -------------------- + .. code-block:: QMP -C: { "execute": "query-kvm", "id": "example" } -S: { "return": { "enabled": true, "present": true }, "id": "example"} + -> { "execute": "qmp_capabilities", "arguments": { "enable": ["oob"] } } + <- { "return": {}} -3.5 Parsing error ------------------- +.. admonition:: Example -C: { "execute": } -S: { "error": { "class": "GenericError", "desc": "Invalid JSON syntax" } } + Simple 'stop' execution -3.6 Powerdown event -------------------- + .. code-block:: QMP -S: { "timestamp": { "seconds": 1258551470, "microseconds": 802384 }, - "event": "POWERDOWN" } + -> { "execute": "stop" } + <- { "return": {} } -3.7 Out-of-band execution -------------------------- +.. admonition:: Example -C: { "exec-oob": "migrate-pause", "id": 42 } -S: { "id": 42, - "error": { "class": "GenericError", - "desc": "migrate-pause is currently only supported during postcopy-active state" } } + KVM information + + .. code-block:: QMP + + -> { "execute": "query-kvm", "id": "example" } + <- { "return": { "enabled": true, "present": true }, "id": "example"} + +.. admonition:: Example + + Parsing error + + .. code-block:: QMP + + -> { "execute": } + <- { "error": { "class": "GenericError", "desc": "JSON parse error, expecting value" } } + +.. admonition:: Example + + Powerdown event + + .. code-block:: QMP + + <- { "timestamp": { "seconds": 1258551470, "microseconds": 802384 }, + "event": "POWERDOWN" } + +.. admonition:: Example + + Out-of-band execution + + .. code-block:: QMP + + -> { "exec-oob": "migrate-pause", "id": 42 } + <- { "id": 42, + "error": { "class": "GenericError", + "desc": "migrate-pause is currently only supported during postcopy-active state" } } -4. Capabilities Negotiation -=========================== +Capabilities Negotiation +======================== When a Client successfully establishes a connection, the Server is in Capabilities Negotiation mode. -In this mode only the qmp_capabilities command is allowed to run, all -other commands will return the CommandNotFound error. Asynchronous +In this mode only the ``qmp_capabilities`` command is allowed to run; all +other commands will return the ``CommandNotFound`` error. Asynchronous messages are not delivered either. -Clients should use the qmp_capabilities command to enable capabilities -advertised in the Server's greeting (section '2.2 Server Greeting') they -support. +Clients should use the ``qmp_capabilities`` command to enable capabilities +advertised in the `Server Greeting`_ which they support. -When the qmp_capabilities command is issued, and if it does not return an -error, the Server enters in Command mode where capabilities changes take -effect, all commands (except qmp_capabilities) are allowed and asynchronous +When the ``qmp_capabilities`` command is issued, and if it does not return an +error, the Server enters Command mode where capabilities changes take +effect, all commands (except ``qmp_capabilities``) are allowed and asynchronous messages are delivered. -5 Compatibility Considerations -============================== +Compatibility Considerations +============================ All protocol changes or new features which modify the protocol format in an incompatible way are disabled by default and will be advertised by the -capabilities array (section '2.2 Server Greeting'). Thus, Clients can check +capabilities array (in the `Server Greeting`_). Thus, Clients can check that array and enable the capabilities they support. The QMP Server performs a type check on the arguments to a command. It @@ -337,12 +368,12 @@ However, Clients must not assume any particular: - Length of json-arrays - Size of json-objects; in particular, future versions of QEMU may add - new keys and Clients should be able to ignore them. + new keys and Clients should be able to ignore them - Order of json-object members or json-array elements - Amount of errors generated by a command, that is, new errors can be added to any existing command in newer versions of the Server -Any command or member name beginning with "x-" is deemed experimental, +Any command or member name beginning with ``x-`` is deemed experimental, and may be withdrawn or changed in an incompatible manner in a future release. @@ -350,8 +381,8 @@ Of course, the Server does guarantee to send valid JSON. But apart from this, a Client should be "conservative in what they send, and liberal in what they accept". -6. Downstream extension of QMP -============================== +Downstream extension of QMP +=========================== We recommend that downstream consumers of QEMU do *not* modify QMP. Management tools should be able to support both upstream and downstream @@ -363,23 +394,25 @@ avoid modifying QMP. Both upstream and downstream need to take care to preserve long-term compatibility and interoperability. To help with that, QMP reserves JSON object member names beginning with -'__' (double underscore) for downstream use ("downstream names"). This +``__`` (double underscore) for downstream use ("downstream names"). This means upstream will never use any downstream names for its commands, arguments, errors, asynchronous events, and so forth. -Any new names downstream wishes to add must begin with '__'. To +Any new names downstream wishes to add must begin with ``__``. To ensure compatibility with other downstreams, it is strongly -recommended that you prefix your downstream names with '__RFQDN_' where +recommended that you prefix your downstream names with ``__RFQDN_`` where RFQDN is a valid, reverse fully qualified domain name which you control. For example, a qemu-kvm specific monitor command would be: +:: + (qemu) __org.linux-kvm_enable_irqchip -Downstream must not change the server greeting (section 2.2) other than +Downstream must not change the `server greeting`_ other than to offer additional capabilities. But see below for why even that is discouraged. -Section '5 Compatibility Considerations' applies to downstream as well +The section `Compatibility Considerations`_ applies to downstream as well as to upstream, obviously. It follows that downstream must behave exactly like upstream for any input not containing members with downstream names ("downstream members"), except it may add members diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst index 3f18ab424e..5a070adbc1 100644 --- a/docs/interop/vhost-user.rst +++ b/docs/interop/vhost-user.rst @@ -130,18 +130,8 @@ A vring address description Note that a ring address is an IOVA if ``VIRTIO_F_IOMMU_PLATFORM`` has been negotiated. Otherwise it is a user address. -Memory regions description -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -+-------------+---------+---------+-----+---------+ -| num regions | padding | region0 | ... | region7 | -+-------------+---------+---------+-----+---------+ - -:num regions: a 32-bit number of regions - -:padding: 32-bit - -A region is: +Memory region description +^^^^^^^^^^^^^^^^^^^^^^^^^ +---------------+------+--------------+-------------+ | guest address | size | user address | mmap offset | @@ -155,22 +145,49 @@ A region is: :mmap offset: 64-bit offset where region starts in the mapped memory +When the ``VHOST_USER_PROTOCOL_F_XEN_MMAP`` protocol feature has been +successfully negotiated, the memory region description contains two extra +fields at the end. + ++---------------+------+--------------+-------------+----------------+-------+ +| guest address | size | user address | mmap offset | xen mmap flags | domid | ++---------------+------+--------------+-------------+----------------+-------+ + +:xen mmap flags: 32-bit bit field + +- Bit 0 is set for Xen foreign memory mapping. +- Bit 1 is set for Xen grant memory mapping. +- Bit 8 is set if the memory region can not be mapped in advance, and memory + areas within this region must be mapped / unmapped only when required by the + back-end. The back-end shouldn't try to map the entire region at once, as the + front-end may not allow it. The back-end should rather map only the required + amount of memory at once and unmap it after it is used. + +:domid: a 32-bit Xen hypervisor specific domain id. + Single memory region description ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -+---------+---------------+------+--------------+-------------+ -| padding | guest address | size | user address | mmap offset | -+---------+---------------+------+--------------+-------------+ ++---------+--------+ +| padding | region | ++---------+--------+ :padding: 64-bit -:guest address: a 64-bit guest address of the region +A region is represented by Memory region description. -:size: a 64-bit size +Multiple Memory regions description +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:user address: a 64-bit user address ++-------------+---------+---------+-----+---------+ +| num regions | padding | region0 | ... | region7 | ++-------------+---------+---------+-----+---------+ -:mmap offset: 64-bit offset where region starts in the mapped memory +:num regions: a 32-bit number of regions + +:padding: 32-bit + +A region is represented by Memory region description. Log description ^^^^^^^^^^^^^^^ @@ -315,7 +332,7 @@ in the ancillary data: * ``VHOST_USER_SET_VRING_KICK`` * ``VHOST_USER_SET_VRING_CALL`` * ``VHOST_USER_SET_VRING_ERR`` -* ``VHOST_USER_SET_SLAVE_REQ_FD`` +* ``VHOST_USER_SET_BACKEND_REQ_FD`` (previous name ``VHOST_USER_SET_SLAVE_REQ_FD``) * ``VHOST_USER_SET_INFLIGHT_FD`` (if ``VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD``) If *front-end* is unable to send the full message or receives a wrong @@ -516,7 +533,7 @@ expected to reply with a zero payload, non-zero otherwise. The back-end relies on the back-end communication channel (see :ref:`Back-end communication ` section below) to send IOTLB miss -and access failure events, by sending ``VHOST_USER_SLAVE_IOTLB_MSG`` +and access failure events, by sending ``VHOST_USER_BACKEND_IOTLB_MSG`` requests to the front-end with a ``struct vhost_iotlb_msg`` as payload. For miss events, the iotlb payload has to be filled with the miss message type (1), the I/O virtual address and the permissions @@ -540,15 +557,15 @@ Back-end communication ---------------------- An optional communication channel is provided if the back-end declares -``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` protocol feature, to allow the +``VHOST_USER_PROTOCOL_F_BACKEND_REQ`` protocol feature, to allow the back-end to make requests to the front-end. -The fd is provided via ``VHOST_USER_SET_SLAVE_REQ_FD`` ancillary data. +The fd is provided via ``VHOST_USER_SET_BACKEND_REQ_FD`` ancillary data. -A back-end may then send ``VHOST_USER_SLAVE_*`` messages to the front-end +A back-end may then send ``VHOST_USER_BACKEND_*`` messages to the front-end using this fd communication channel. -If ``VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD`` protocol feature is +If ``VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD`` protocol feature is negotiated, back-end can send file descriptors (at most 8 descriptors in each message) to front-end via ancillary data using this fd communication channel. @@ -835,7 +852,7 @@ Note that due to the fact that too many messages on the sockets can cause the sending application(s) to block, it is not advised to use this feature unless absolutely necessary. It is also considered an error to negotiate this feature without also negotiating -``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` and ``VHOST_USER_PROTOCOL_F_REPLY_ACK``, +``VHOST_USER_PROTOCOL_F_BACKEND_REQ`` and ``VHOST_USER_PROTOCOL_F_REPLY_ACK``, the former is necessary for getting a message channel from the back-end to the front-end, while the latter needs to be used with the in-band notification messages to block until they are processed, both to avoid @@ -855,18 +872,19 @@ Protocol features #define VHOST_USER_PROTOCOL_F_RARP 2 #define VHOST_USER_PROTOCOL_F_REPLY_ACK 3 #define VHOST_USER_PROTOCOL_F_MTU 4 - #define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5 + #define VHOST_USER_PROTOCOL_F_BACKEND_REQ 5 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6 #define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7 #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8 #define VHOST_USER_PROTOCOL_F_CONFIG 9 - #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10 + #define VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD 10 #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11 #define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12 #define VHOST_USER_PROTOCOL_F_RESET_DEVICE 13 #define VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS 14 #define VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS 15 #define VHOST_USER_PROTOCOL_F_STATUS 16 + #define VHOST_USER_PROTOCOL_F_XEN_MMAP 17 Front-end message types ----------------------- @@ -952,8 +970,8 @@ Front-end message types ``VHOST_USER_SET_MEM_TABLE`` :id: 5 :equivalent ioctl: ``VHOST_SET_MEM_TABLE`` - :request payload: memory regions description - :reply payload: (postcopy only) memory regions description + :request payload: multiple memory regions description + :reply payload: (postcopy only) multiple memory regions description Sets the memory map regions on the back-end so it can translate the vring addresses. In the ancillary data there is an array of file @@ -1059,8 +1077,8 @@ Front-end message types in the ancillary data. This signals that polling will be used instead of waiting for the call. Note that if the protocol features ``VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS`` and - ``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` have been negotiated this message - isn't necessary as the ``VHOST_USER_SLAVE_VRING_CALL`` message can be + ``VHOST_USER_PROTOCOL_F_BACKEND_REQ`` have been negotiated this message + isn't necessary as the ``VHOST_USER_BACKEND_VRING_CALL`` message can be used, it may however still be used to set an event file descriptor or to enable polling. @@ -1077,8 +1095,8 @@ Front-end message types invalid FD flag. This flag is set when there is no file descriptor in the ancillary data. Note that if the protocol features ``VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS`` and - ``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` have been negotiated this message - isn't necessary as the ``VHOST_USER_SLAVE_VRING_ERR`` message can be + ``VHOST_USER_PROTOCOL_F_BACKEND_REQ`` have been negotiated this message + isn't necessary as the ``VHOST_USER_BACKEND_VRING_ERR`` message can be used, it may however still be used to set an event file descriptor (which will be preferred over the message). @@ -1139,7 +1157,7 @@ Front-end message types respond with zero in case the specified MTU is valid, or non-zero otherwise. -``VHOST_USER_SET_SLAVE_REQ_FD`` +``VHOST_USER_SET_BACKEND_REQ_FD`` (previous name ``VHOST_USER_SET_SLAVE_REQ_FD``) :id: 21 :equivalent ioctl: N/A :request payload: N/A @@ -1150,7 +1168,7 @@ Front-end message types This request should be sent only when ``VHOST_USER_F_PROTOCOL_FEATURES`` has been negotiated, and protocol - feature bit ``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` bit is present in + feature bit ``VHOST_USER_PROTOCOL_F_BACKEND_REQ`` bit is present in ``VHOST_USER_GET_PROTOCOL_FEATURES``. If ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` is negotiated, the back-end must respond with zero for success, non-zero otherwise. @@ -1429,7 +1447,7 @@ Back-end message types For this type of message, the request is sent by the back-end and the reply is sent by the front-end. -``VHOST_USER_SLAVE_IOTLB_MSG`` +``VHOST_USER_BACKEND_IOTLB_MSG`` (previous name ``VHOST_USER_SLAVE_IOTLB_MSG``) :id: 1 :equivalent ioctl: N/A (equivalent to ``VHOST_IOTLB_MSG`` message type) :request payload: ``struct vhost_iotlb_msg`` @@ -1444,7 +1462,7 @@ is sent by the front-end. ``VIRTIO_F_IOMMU_PLATFORM`` feature has been successfully negotiated. -``VHOST_USER_SLAVE_CONFIG_CHANGE_MSG`` +``VHOST_USER_BACKEND_CONFIG_CHANGE_MSG`` (previous name ``VHOST_USER_SLAVE_CONFIG_CHANGE_MSG``) :id: 2 :equivalent ioctl: N/A :request payload: N/A @@ -1459,7 +1477,7 @@ is sent by the front-end. ``VHOST_USER_NEED_REPLY`` flag, the front-end must respond with zero when operation is successfully completed, or non-zero otherwise. -``VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG`` +``VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG`` (previous name ``VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG``) :id: 3 :equivalent ioctl: N/A :request payload: vring area description @@ -1482,7 +1500,7 @@ is sent by the front-end. ``VHOST_USER_PROTOCOL_F_HOST_NOTIFIER`` protocol feature has been successfully negotiated. -``VHOST_USER_SLAVE_VRING_CALL`` +``VHOST_USER_BACKEND_VRING_CALL`` (previous name ``VHOST_USER_SLAVE_VRING_CALL``) :id: 4 :equivalent ioctl: N/A :request payload: vring state description @@ -1496,7 +1514,7 @@ is sent by the front-end. The state.num field is currently reserved and must be set to 0. -``VHOST_USER_SLAVE_VRING_ERR`` +``VHOST_USER_BACKEND_VRING_ERR`` (previous name ``VHOST_USER_SLAVE_VRING_ERR``) :id: 5 :equivalent ioctl: N/A :request payload: vring state description diff --git a/docs/meson.build b/docs/meson.build index bbcdccce68..9040f860ae 100644 --- a/docs/meson.build +++ b/docs/meson.build @@ -1,10 +1,5 @@ -if get_option('sphinx_build') == '' - sphinx_build = find_program(['sphinx-build-3', 'sphinx-build'], - required: get_option('docs')) -else - sphinx_build = find_program(get_option('sphinx_build'), - required: get_option('docs')) -endif +sphinx_build = find_program(fs.parent(python.full_path()) / 'sphinx-build', + required: get_option('docs')) # Check if tools are available to build documentation. build_docs = false @@ -12,7 +7,19 @@ if sphinx_build.found() SPHINX_ARGS = ['env', 'CONFDIR=' + qemu_confdir, sphinx_build, '-q'] # If we're making warnings fatal, apply this to Sphinx runs as well if get_option('werror') - SPHINX_ARGS += [ '-W' ] + SPHINX_ARGS += [ '-W', '-Dkerneldoc_werror=1' ] + endif + + sphinx_version = run_command(SPHINX_ARGS + ['--version'], + check: true).stdout().split()[1] + if sphinx_version.version_compare('>=1.7.0') + SPHINX_ARGS += ['-j', 'auto'] + else + nproc = find_program('nproc') + if nproc.found() + jobs = run_command(nproc, check: true).stdout() + SPHINX_ARGS += ['-j', jobs] + endif endif # This is a bit awkward but works: create a trivial document and diff --git a/docs/pcie_sriov.txt b/docs/pcie_sriov.txt index 11158dbf88..7eff7f2703 100644 --- a/docs/pcie_sriov.txt +++ b/docs/pcie_sriov.txt @@ -9,10 +9,7 @@ virtual functions (VFs) for the main purpose of eliminating software overhead in I/O from virtual machines. QEMU now implements the basic common functionality to enable an emulated device -to support SR/IOV. Yet no fully implemented devices exists in QEMU, but a -proof-of-concept hack of the Intel igb can be found here: - -git://github.com/knuto/qemu.git sriov_patches_v5 +to support SR/IOV. Implementation ============== diff --git a/docs/specs/index.rst b/docs/specs/index.rst index a58d9311cb..e58be38c41 100644 --- a/docs/specs/index.rst +++ b/docs/specs/index.rst @@ -8,6 +8,9 @@ guest hardware that is specific to QEMU. .. toctree:: :maxdepth: 2 + pci-ids + pci-serial + pci-testdev ppc-xive ppc-spapr-xive ppc-spapr-numa diff --git a/docs/specs/pci-ids.rst b/docs/specs/pci-ids.rst new file mode 100644 index 0000000000..e302bea484 --- /dev/null +++ b/docs/specs/pci-ids.rst @@ -0,0 +1,98 @@ +================ +PCI IDs for QEMU +================ + +Red Hat, Inc. donates a part of its device ID range to QEMU, to be used for +virtual devices. The vendor IDs are 1af4 (formerly Qumranet ID) and 1b36. + +Contact Gerd Hoffmann to get a device ID assigned +for your devices. + +1af4 vendor ID +-------------- + +The 1000 -> 10ff device ID range is used as follows for virtio-pci devices. +Note that this allocation is separate from the virtio device IDs, which are +maintained as part of the virtio specification. + +1af4:1000 + network device (legacy) +1af4:1001 + block device (legacy) +1af4:1002 + balloon device (legacy) +1af4:1003 + console device (legacy) +1af4:1004 + SCSI host bus adapter device (legacy) +1af4:1005 + entropy generator device (legacy) +1af4:1009 + 9p filesystem device (legacy) +1af4:1012 + vsock device (bug compatibility) + +1af4:1040 to 1af4:10ef + ID range for modern virtio devices. The PCI device + ID is calculated from the virtio device ID by adding the + 0x1040 offset. The virtio IDs are defined in the virtio + specification. The Linux kernel has a header file with + defines for all virtio IDs (``linux/virtio_ids.h``); QEMU has a + copy in ``include/standard-headers/``. + +1af4:10f0 to 1a4f:10ff + Available for experimental usage without registration. Must get + official ID when the code leaves the test lab (i.e. when seeking + upstream merge or shipping a distro/product) to avoid conflicts. + +1af4:1100 + Used as PCI Subsystem ID for existing hardware devices emulated + by QEMU. + +1af4:1110 + ivshmem device (shared memory, ``docs/specs/ivshmem-spec.txt``) + +All other device IDs are reserved. + +1b36 vendor ID +-------------- + +The 0000 -> 00ff device ID range is used as follows for QEMU-specific +PCI devices (other than virtio): + +1b36:0001 + PCI-PCI bridge +1b36:0002 + PCI serial port (16550A) adapter (:doc:`pci-serial`) +1b36:0003 + PCI Dual-port 16550A adapter (:doc:`pci-serial`) +1b36:0004 + PCI Quad-port 16550A adapter (:doc:`pci-serial`) +1b36:0005 + PCI test device (:doc:`pci-testdev`) +1b36:0006 + PCI Rocker Ethernet switch device +1b36:0007 + PCI SD Card Host Controller Interface (SDHCI) +1b36:0008 + PCIe host bridge +1b36:0009 + PCI Expander Bridge (-device pxb) +1b36:000a + PCI-PCI bridge (multiseat) +1b36:000b + PCIe Expander Bridge (-device pxb-pcie) +1b36:000d + PCI xhci usb host adapter +1b36:000f + mdpy (mdev sample device), ``linux/samples/vfio-mdev/mdpy.c`` +1b36:0010 + PCIe NVMe device (``-device nvme``) +1b36:0011 + PCI PVPanic device (``-device pvpanic-pci``) +1b36:0012 + PCI ACPI ERST device (``-device acpi-erst``) + +All these devices are documented in :doc:`index`. + +The 0100 device ID is used for the QXL video card device. diff --git a/docs/specs/pci-ids.txt b/docs/specs/pci-ids.txt deleted file mode 100644 index e463c4cb3a..0000000000 --- a/docs/specs/pci-ids.txt +++ /dev/null @@ -1,70 +0,0 @@ - -PCI IDs for qemu -================ - -Red Hat, Inc. donates a part of its device ID range to qemu, to be used for -virtual devices. The vendor IDs are 1af4 (formerly Qumranet ID) and 1b36. - -Contact Gerd Hoffmann to get a device ID assigned -for your devices. - -1af4 vendor ID --------------- - -The 1000 -> 10ff device ID range is used as follows for virtio-pci devices. -Note that this allocation separate from the virtio device IDs, which are -maintained as part of the virtio specification. - -1af4:1000 network device (legacy) -1af4:1001 block device (legacy) -1af4:1002 balloon device (legacy) -1af4:1003 console device (legacy) -1af4:1004 SCSI host bus adapter device (legacy) -1af4:1005 entropy generator device (legacy) -1af4:1009 9p filesystem device (legacy) -1af4:1012 vsock device (bug compatibility) - -1af4:1040 Start of ID range for modern virtio devices. The PCI device - to ID is calculated from the virtio device ID by adding the -1af4:10ef 0x1040 offset. The virtio IDs are defined in the virtio - specification. The Linux kernel has a header file with - defines for all virtio IDs (linux/virtio_ids.h), qemu has a - copy in include/standard-headers/. - -1af4:10f0 Available for experimental usage without registration. Must get - to official ID when the code leaves the test lab (i.e. when seeking -1af4:10ff upstream merge or shipping a distro/product) to avoid conflicts. - -1af4:1100 Used as PCI Subsystem ID for existing hardware devices emulated - by qemu. - -1af4:1110 ivshmem device (shared memory, docs/specs/ivshmem-spec.txt) - -All other device IDs are reserved. - -1b36 vendor ID --------------- - -The 0000 -> 00ff device ID range is used as follows for QEMU-specific -PCI devices (other than virtio): - -1b36:0001 PCI-PCI bridge -1b36:0002 PCI serial port (16550A) adapter (docs/specs/pci-serial.txt) -1b36:0003 PCI Dual-port 16550A adapter (docs/specs/pci-serial.txt) -1b36:0004 PCI Quad-port 16550A adapter (docs/specs/pci-serial.txt) -1b36:0005 PCI test device (docs/specs/pci-testdev.txt) -1b36:0006 PCI Rocker Ethernet switch device -1b36:0007 PCI SD Card Host Controller Interface (SDHCI) -1b36:0008 PCIe host bridge -1b36:0009 PCI Expander Bridge (-device pxb) -1b36:000a PCI-PCI bridge (multiseat) -1b36:000b PCIe Expander Bridge (-device pxb-pcie) -1b36:000d PCI xhci usb host adapter -1b36:000f mdpy (mdev sample device), linux/samples/vfio-mdev/mdpy.c -1b36:0010 PCIe NVMe device (-device nvme) -1b36:0011 PCI PVPanic device (-device pvpanic-pci) -1b36:0012 PCI ACPI ERST device (-device acpi-erst) - -All these devices are documented in docs/specs. - -The 0100 device ID is used for the QXL video card device. diff --git a/docs/specs/pci-serial.rst b/docs/specs/pci-serial.rst new file mode 100644 index 0000000000..8d916a3669 --- /dev/null +++ b/docs/specs/pci-serial.rst @@ -0,0 +1,37 @@ +======================= +QEMU PCI serial devices +======================= + +QEMU implements some PCI serial devices which are simple PCI +wrappers around one or more 16550 UARTs. + +There is one single-port variant and two multiport-variants. Linux +guests work out-of-the box with all cards. There is a Windows inf file +(``docs/qemupciserial.inf``) to set up the cards in Windows guests. + + +Single-port card +---------------- + +Name: + ``pci-serial`` +PCI ID: + 1b36:0002 +PCI Region 0: + IO bar, 8 bytes long, with the 16550 UART mapped to it. +Interrupt: + Wired to pin A. + + +Multiport cards +--------------- + +Name: + ``pci-serial-2x``, ``pci-serial-4x`` +PCI ID: + 1b36:0003 (``-2x``) and 1b36:0004 (``-4x``) +PCI Region 0: + IO bar, with two or four 16550 UARTs mapped after each other. + The first is at offset 0, the second at offset 8, and so on. +Interrupt: + Wired to pin A. diff --git a/docs/specs/pci-serial.txt b/docs/specs/pci-serial.txt deleted file mode 100644 index 66c761f2b4..0000000000 --- a/docs/specs/pci-serial.txt +++ /dev/null @@ -1,34 +0,0 @@ - -QEMU pci serial devices -======================= - -There is one single-port variant and two muliport-variants. Linux -guests out-of-the box with all cards. There is a Windows inf file -(docs/qemupciserial.inf) to setup the single-port card in Windows -guests. - - -single-port card ----------------- - -Name: pci-serial -PCI ID: 1b36:0002 - -PCI Region 0: - IO bar, 8 bytes long, with the 16550 uart mapped to it. - Interrupt is wired to pin A. - - -multiport cards ---------------- - -Name: pci-serial-2x -PCI ID: 1b36:0003 - -Name: pci-serial-4x -PCI ID: 1b36:0004 - -PCI Region 0: - IO bar, with two/four 16550 uart mapped after each other. - The first is at offset 0, second at offset 8, ... - Interrupt is wired to pin A. diff --git a/docs/specs/pci-testdev.rst b/docs/specs/pci-testdev.rst new file mode 100644 index 0000000000..4b6d36543b --- /dev/null +++ b/docs/specs/pci-testdev.rst @@ -0,0 +1,39 @@ +==================== +QEMU PCI test device +==================== + +``pci-testdev`` is a device used for testing low level IO. + +The device implements up to three BARs: BAR0, BAR1 and BAR2. +Each of BAR 0+1 can be memory or IO. Guests must detect +BAR types and act accordingly. + +BAR 0+1 size is up to 4K bytes each. +BAR 0+1 starts with the following header: + +.. code-block:: c + + typedef struct PCITestDevHdr { + uint8_t test; /* write-only, starts a given test number */ + uint8_t width_type; /* + * read-only, type and width of access for a given test. + * 1,2,4 for byte,word or long write. + * any other value if test not supported on this BAR + */ + uint8_t pad0[2]; + uint32_t offset; /* read-only, offset in this BAR for a given test */ + uint32_t data; /* read-only, data to use for a given test */ + uint32_t count; /* for debugging. number of writes detected. */ + uint8_t name[]; /* for debugging. 0-terminated ASCII string. */ + } PCITestDevHdr; + +All registers are little endian. + +The device is expected to always implement tests 0 to N on each BAR, and to add new +tests with higher numbers. In this way a guest can scan test numbers until it +detects an access type that it does not support on this BAR, then stop. + +BAR2 is a 64bit memory BAR, without backing storage. It is disabled +by default and can be enabled using the ``membar=`` property. This +can be used to test whether guests handle PCI BARs of a specific +(possibly quite large) size correctly. diff --git a/docs/specs/pci-testdev.txt b/docs/specs/pci-testdev.txt deleted file mode 100644 index 4280a1e73c..0000000000 --- a/docs/specs/pci-testdev.txt +++ /dev/null @@ -1,31 +0,0 @@ -pci-test is a device used for testing low level IO - -device implements up to three BARs: BAR0, BAR1 and BAR2. -Each of BAR 0+1 can be memory or IO. Guests must detect -BAR types and act accordingly. - -BAR 0+1 size is up to 4K bytes each. -BAR 0+1 starts with the following header: - -typedef struct PCITestDevHdr { - uint8_t test; <- write-only, starts a given test number - uint8_t width_type; <- read-only, type and width of access for a given test. - 1,2,4 for byte,word or long write. - any other value if test not supported on this BAR - uint8_t pad0[2]; - uint32_t offset; <- read-only, offset in this BAR for a given test - uint32_t data; <- read-only, data to use for a given test - uint32_t count; <- for debugging. number of writes detected. - uint8_t name[]; <- for debugging. 0-terminated ASCII string. -} PCITestDevHdr; - -All registers are little endian. - -device is expected to always implement tests 0 to N on each BAR, and to add new -tests with higher numbers. In this way a guest can scan test numbers until it -detects an access type that it does not support on this BAR, then stop. - -BAR2 is a 64bit memory bar, without backing storage. It is disabled -by default and can be enabled using the membar= property. This -can be used to test whether guests handle pci bars of a specific -(possibly quite large) size correctly. diff --git a/docs/specs/tpm.rst b/docs/specs/tpm.rst index 535912a92b..efe124a148 100644 --- a/docs/specs/tpm.rst +++ b/docs/specs/tpm.rst @@ -21,12 +21,16 @@ QEMU files related to TPM TIS interface: - ``hw/tpm/tpm_tis_common.c`` - ``hw/tpm/tpm_tis_isa.c`` - ``hw/tpm/tpm_tis_sysbus.c`` + - ``hw/tpm/tpm_tis_i2c.c`` - ``hw/tpm/tpm_tis.h`` Both an ISA device and a sysbus device are available. The former is used with pc/q35 machine while the latter can be instantiated in the Arm virt machine. +An I2C device support is also provided which can be instantiated in the Arm +based emulation machines. This device only supports the TPM 2 protocol. + CRB interface ------------- @@ -348,6 +352,23 @@ In case an Arm virt machine is emulated, use the following command line: -drive if=pflash,format=raw,file=flash0.img,readonly=on \ -drive if=pflash,format=raw,file=flash1.img +In case a ast2600-evb bmc machine is emulated and you want to use a TPM device +attached to I2C bus, use the following command line: + +.. code-block:: console + + qemu-system-arm -M ast2600-evb -nographic \ + -kernel arch/arm/boot/zImage \ + -dtb arch/arm/boot/dts/aspeed-ast2600-evb.dtb \ + -initrd rootfs.cpio \ + -chardev socket,id=chrtpm,path=/tmp/mytpm1/swtpm-sock \ + -tpmdev emulator,id=tpm0,chardev=chrtpm \ + -device tpm-tis-i2c,tpmdev=tpm0,bus=aspeed.i2c.bus.12,address=0x2e + + For testing, use this command to load the driver to the correct address + + echo tpm_tis_i2c 0x2e > /sys/bus/i2c/devices/i2c-12/new_device + In case SeaBIOS is used as firmware, it should show the TPM menu item after entering the menu with 'ESC'. diff --git a/docs/sphinx/dbusdomain.py b/docs/sphinx/dbusdomain.py index 2ea95af623..9872fd5bf6 100644 --- a/docs/sphinx/dbusdomain.py +++ b/docs/sphinx/dbusdomain.py @@ -400,6 +400,10 @@ class DBusDomain(Domain): for refname, obj in self.objects.items(): yield (refname, refname, obj.objtype, obj.docname, obj.node_id, 1) + def merge_domaindata(self, docnames, otherdata): + for name, obj in otherdata['objects'].items(): + if obj.docname in docnames: + self.data['objects'][name] = obj def setup(app): app.add_domain(DBusDomain) diff --git a/docs/sphinx/fakedbusdoc.py b/docs/sphinx/fakedbusdoc.py index d2c5079046..2d2e6ef640 100644 --- a/docs/sphinx/fakedbusdoc.py +++ b/docs/sphinx/fakedbusdoc.py @@ -23,3 +23,8 @@ class FakeDBusDocDirective(Directive): def setup(app: Sphinx) -> Dict[str, Any]: """Register a fake dbus-doc directive with Sphinx""" app.add_directive("dbus-doc", FakeDBusDocDirective) + + return dict( + parallel_read_safe = True, + parallel_write_safe = True + ) diff --git a/docs/sphinx/kerneldoc.py b/docs/sphinx/kerneldoc.py index bf44215016..72c403a737 100644 --- a/docs/sphinx/kerneldoc.py +++ b/docs/sphinx/kerneldoc.py @@ -74,6 +74,10 @@ class KernelDocDirective(Directive): # Sphinx versions cmd += ['-sphinx-version', sphinx.__version__] + # Pass through the warnings-as-errors flag + if env.config.kerneldoc_werror: + cmd += ['-Werror'] + filename = env.config.kerneldoc_srctree + '/' + self.arguments[0] export_file_patterns = [] @@ -167,6 +171,7 @@ def setup(app): app.add_config_value('kerneldoc_bin', None, 'env') app.add_config_value('kerneldoc_srctree', None, 'env') app.add_config_value('kerneldoc_verbosity', 1, 'env') + app.add_config_value('kerneldoc_werror', 0, 'env') app.add_directive('kernel-doc', KernelDocDirective) diff --git a/docs/sphinx/qapidoc.py b/docs/sphinx/qapidoc.py index d791b59492..8f3b9997a1 100644 --- a/docs/sphinx/qapidoc.py +++ b/docs/sphinx/qapidoc.py @@ -268,6 +268,9 @@ class QAPISchemaGenRSTVisitor(QAPISchemaVisitor): """Return list of doctree nodes for additional sections""" nodelist = [] for section in doc.sections: + if section.name and section.name == 'TODO': + # Hide TODO: sections + continue snode = self._make_section(section.name) if section.name and section.name.startswith('Example'): snode += self._nodes_for_example(section.text) diff --git a/docs/sphinx/qmp_lexer.py b/docs/sphinx/qmp_lexer.py index f7e4c0e198..a59de8a079 100644 --- a/docs/sphinx/qmp_lexer.py +++ b/docs/sphinx/qmp_lexer.py @@ -41,3 +41,8 @@ def setup(sphinx): sphinx.add_lexer('QMP', QMPExampleLexer) except errors.VersionRequirementError: sphinx.add_lexer('QMP', QMPExampleLexer()) + + return dict( + parallel_read_safe = True, + parallel_write_safe = True + ) diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst index 6c5b05128e..d4e293e7f9 100644 --- a/docs/system/arm/aspeed.rst +++ b/docs/system/arm/aspeed.rst @@ -24,6 +24,8 @@ AST2500 SoC based machines : - ``sonorapass-bmc`` OCP SonoraPass BMC - ``fp5280g2-bmc`` Inspur FP5280G2 BMC - ``g220a-bmc`` Bytedance G220A BMC +- ``yosemitev2-bmc`` Facebook YosemiteV2 BMC +- ``tiogapass-bmc`` Facebook Tiogapass BMC AST2600 SoC based machines : diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst index 00c444042f..f4524b6d3e 100644 --- a/docs/system/arm/cpu-features.rst +++ b/docs/system/arm/cpu-features.rst @@ -177,39 +177,32 @@ are named with the prefix "kvm-". KVM VCPU features may be probed, enabled, and disabled in the same way as other CPU features. Below is the list of KVM VCPU features and their descriptions. - kvm-no-adjvtime By default kvm-no-adjvtime is disabled. This - means that by default the virtual time - adjustment is enabled (vtime is not *not* - adjusted). +``kvm-no-adjvtime`` + By default kvm-no-adjvtime is disabled. This means that by default + the virtual time adjustment is enabled (vtime is not *not* adjusted). - When virtual time adjustment is enabled each - time the VM transitions back to running state - the VCPU's virtual counter is updated to ensure - stopped time is not counted. This avoids time - jumps surprising guest OSes and applications, - as long as they use the virtual counter for - timekeeping. However it has the side effect of - the virtual and physical counters diverging. - All timekeeping based on the virtual counter - will appear to lag behind any timekeeping that - does not subtract VM stopped time. The guest - may resynchronize its virtual counter with - other time sources as needed. + When virtual time adjustment is enabled each time the VM transitions + back to running state the VCPU's virtual counter is updated to + ensure stopped time is not counted. This avoids time jumps + surprising guest OSes and applications, as long as they use the + virtual counter for timekeeping. However it has the side effect of + the virtual and physical counters diverging. All timekeeping based + on the virtual counter will appear to lag behind any timekeeping + that does not subtract VM stopped time. The guest may resynchronize + its virtual counter with other time sources as needed. - Enable kvm-no-adjvtime to disable virtual time - adjustment, also restoring the legacy (pre-5.0) - behavior. + Enable kvm-no-adjvtime to disable virtual time adjustment, also + restoring the legacy (pre-5.0) behavior. - kvm-steal-time Since v5.2, kvm-steal-time is enabled by - default when KVM is enabled, the feature is - supported, and the guest is 64-bit. +``kvm-steal-time`` + Since v5.2, kvm-steal-time is enabled by default when KVM is + enabled, the feature is supported, and the guest is 64-bit. - When kvm-steal-time is enabled a 64-bit guest - can account for time its CPUs were not running - due to the host not scheduling the corresponding - VCPU threads. The accounting statistics may - influence the guest scheduler behavior and/or be - exposed to the guest userspace. + When kvm-steal-time is enabled a 64-bit guest can account for time + its CPUs were not running due to the host not scheduling the + corresponding VCPU threads. The accounting statistics may influence + the guest scheduler behavior and/or be exposed to the guest + userspace. TCG VCPU Features ================= @@ -217,16 +210,15 @@ TCG VCPU Features TCG VCPU features are CPU features that are specific to TCG. Below is the list of TCG VCPU features and their descriptions. - pauth-impdef When ``FEAT_Pauth`` is enabled, either the - *impdef* (Implementation Defined) algorithm - is enabled or the *architected* QARMA algorithm - is enabled. By default the impdef algorithm - is disabled, and QARMA is enabled. +``pauth-impdef`` + When ``FEAT_Pauth`` is enabled, either the *impdef* (Implementation + Defined) algorithm is enabled or the *architected* QARMA algorithm + is enabled. By default the impdef algorithm is disabled, and QARMA + is enabled. - The architected QARMA algorithm has good - cryptographic properties, but can be quite slow - to emulate. The impdef algorithm used by QEMU - is non-cryptographic but significantly faster. + The architected QARMA algorithm has good cryptographic properties, + but can be quite slow to emulate. The impdef algorithm used by QEMU + is non-cryptographic but significantly faster. SVE CPU Properties ================== diff --git a/docs/system/arm/cubieboard.rst b/docs/system/arm/cubieboard.rst index 8d485f5435..58c4a2d3ea 100644 --- a/docs/system/arm/cubieboard.rst +++ b/docs/system/arm/cubieboard.rst @@ -15,3 +15,4 @@ Emulated devices: - USB controller - SATA controller - TWI (I2C) controller +- Watchdog timer diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 2062d71261..7338987875 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -56,6 +56,7 @@ the following architecture extensions: - FEAT_MTE3 (MTE Asymmetric Fault Handling) - FEAT_PAN (Privileged access never) - FEAT_PAN2 (AT S1E1R and AT S1E1W instruction variants affected by PSTATE.PAN) +- FEAT_PAN3 (Support for SCTLR_ELx.EPAN) - FEAT_PAuth (Pointer authentication) - FEAT_PMULL (PMULL, PMULL2 instructions) - FEAT_PMUv3p1 (PMU Extensions v3.1) diff --git a/docs/system/arm/orangepi.rst b/docs/system/arm/orangepi.rst index e5973600a1..9afa54213b 100644 --- a/docs/system/arm/orangepi.rst +++ b/docs/system/arm/orangepi.rst @@ -26,6 +26,7 @@ The Orange Pi PC machine supports the following devices: * System Control module * Security Identifier device * TWI (I2C) + * Watchdog timer Limitations """"""""""" diff --git a/docs/system/arm/sbsa.rst b/docs/system/arm/sbsa.rst index b499d7e927..016776aed8 100644 --- a/docs/system/arm/sbsa.rst +++ b/docs/system/arm/sbsa.rst @@ -27,6 +27,6 @@ The sbsa-ref board supports: - System bus EHCI controller - CDROM and hard disc on AHCI bus - E1000E ethernet card on PCIe bus - - VGA display adaptor on PCIe bus + - Bochs display adapter on PCIe bus - A generic SBSA watchdog device diff --git a/docs/system/device-emulation.rst b/docs/system/device-emulation.rst index 0506006056..8d4a1821fa 100644 --- a/docs/system/device-emulation.rst +++ b/docs/system/device-emulation.rst @@ -93,3 +93,5 @@ Emulated Devices devices/virtio-pmem.rst devices/vhost-user-rng.rst devices/canokey.rst + devices/usb-u2f.rst + devices/igb.rst diff --git a/docs/system/devices/cxl.rst b/docs/system/devices/cxl.rst index f25783a4ec..f12011e230 100644 --- a/docs/system/devices/cxl.rst +++ b/docs/system/devices/cxl.rst @@ -111,7 +111,7 @@ Interfaces provided include: CXL Root Ports (CXL RP) ~~~~~~~~~~~~~~~~~~~~~~~ -A CXL Root Port servers te same purpose as a PCIe Root Port. +A CXL Root Port serves the same purpose as a PCIe Root Port. There are a number of CXL specific Designated Vendor Specific Extended Capabilities (DVSEC) in PCIe Configuration Space and associated component register access via PCI bars. @@ -162,7 +162,7 @@ Example system Topology. x marks the match in each decoder level:: |<------------------SYSTEM PHYSICAL ADDRESS MAP (1)----------------->| | __________ __________________________________ __________ | | | | | | | | | - | | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 1 | | + | | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 2 | | | | HB0 only | | Configured to interleave memory | | HB1 only | | | | | | memory accesses across HB0/HB1 | | | | | |__________| |_____x____________________________| |__________| | @@ -208,8 +208,8 @@ Notes: (1) **3 CXL Fixed Memory Windows (CFMW)** corresponding to different ranges of the system physical address map. Each CFMW has particular interleave setup across the CXL Host Bridges (HB) - CFMW0 provides uninterleaved access to HB0, CFW2 provides - uninterleaved access to HB1. CFW1 provides interleaved memory access + CFMW0 provides uninterleaved access to HB0, CFMW2 provides + uninterleaved access to HB1. CFMW1 provides interleaved memory access across HB0 and HB1. (2) **Two CXL Host Bridges**. Each of these has 2 CXL Root Ports and @@ -247,7 +247,7 @@ Example topology involving a switch:: |<------------------SYSTEM PHYSICAL ADDRESS MAP (1)----------------->| | __________ __________________________________ __________ | | | | | | | | | - | | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 1 | | + | | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 2 | | | | HB0 only | | Configured to interleave memory | | HB1 only | | | | | | memory accesses across HB0/HB1 | | | | | |____x_____| |__________________________________| |__________| | @@ -300,22 +300,43 @@ Example topology involving a switch:: Example command lines --------------------- -A very simple setup with just one directly attached CXL Type 3 device:: +A very simple setup with just one directly attached CXL Type 3 Persistent Memory device:: - qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ + qemu-system-x86_64 -M q35,cxl=on -m 4G,maxmem=8G,slots=8 -smp 4 \ ... -object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest.raw,size=256M \ -object memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa.raw,size=256M \ -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ - -device cxl-type3,bus=root_port13,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \ + -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \ + -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G + +A very simple setup with just one directly attached CXL Type 3 Volatile Memory device:: + + qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ + ... + -object memory-backend-ram,id=vmem0,share=on,size=256M \ + -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ + -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ + -device cxl-type3,bus=root_port13,volatile-memdev=vmem0,id=cxl-vmem0 \ + -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G + +The same volatile setup may optionally include an LSA region:: + + qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ + ... + -object memory-backend-ram,id=vmem0,share=on,size=256M \ + -object memory-backend-file,id=cxl-lsa0,share=on,mem-path=/tmp/lsa.raw,size=256M \ + -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ + -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ + -device cxl-type3,bus=root_port13,volatile-memdev=vmem0,lsa=cxl-lsa0,id=cxl-vmem0 \ -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G A setup suitable for 4 way interleave. Only one fixed window provided, to enable 2 way interleave across 2 CXL host bridges. Each host bridge has 2 CXL Root Ports, with the CXL Type3 device directly attached (no switches).:: - qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ + qemu-system-x86_64 -M q35,cxl=on -m 4G,maxmem=8G,slots=8 -smp 4 \ ... -object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest.raw,size=256M \ -object memory-backend-file,id=cxl-mem2,share=on,mem-path=/tmp/cxltest2.raw,size=256M \ @@ -328,18 +349,18 @@ the CXL Type3 device directly attached (no switches).:: -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ -device pxb-cxl,bus_nr=222,bus=pcie.0,id=cxl.2 \ -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ - -device cxl-type3,bus=root_port13,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \ + -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \ -device cxl-rp,port=1,bus=cxl.1,id=root_port14,chassis=0,slot=3 \ - -device cxl-type3,bus=root_port14,memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem1 \ + -device cxl-type3,bus=root_port14,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem1 \ -device cxl-rp,port=0,bus=cxl.2,id=root_port15,chassis=0,slot=5 \ - -device cxl-type3,bus=root_port15,memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem2 \ + -device cxl-type3,bus=root_port15,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem2 \ -device cxl-rp,port=1,bus=cxl.2,id=root_port16,chassis=0,slot=6 \ - -device cxl-type3,bus=root_port16,memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3 \ + -device cxl-type3,bus=root_port16,persistent-memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3 \ -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.targets.1=cxl.2,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave:: - qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \ + qemu-system-x86_64 -M q35,cxl=on -m 4G,maxmem=8G,slots=8 -smp 4 \ ... -object memory-backend-file,id=cxl-mem0,share=on,mem-path=/tmp/cxltest.raw,size=256M \ -object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest1.raw,size=256M \ @@ -354,15 +375,23 @@ An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave:: -device cxl-rp,port=1,bus=cxl.1,id=root_port1,chassis=0,slot=1 \ -device cxl-upstream,bus=root_port0,id=us0 \ -device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \ - -device cxl-type3,bus=swport0,memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0,size=256M \ + -device cxl-type3,bus=swport0,persistent-memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0 \ -device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \ - -device cxl-type3,bus=swport1,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1,size=256M \ + -device cxl-type3,bus=swport1,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1 \ -device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \ - -device cxl-type3,bus=swport2,memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2,size=256M \ + -device cxl-type3,bus=swport2,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2 \ -device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \ - -device cxl-type3,bus=swport3,memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3,size=256M \ + -device cxl-type3,bus=swport3,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3 \ -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k +Deprecations +------------ + +The Type 3 device [memdev] attribute has been deprecated in favor of the +[persistent-memdev] attributes. [memdev] will default to a persistent memory +device for backward compatibility and is incapable of being used in combination +with [persistent-memdev]. + Kernel Configuration Options ---------------------------- diff --git a/docs/system/devices/igb.rst b/docs/system/devices/igb.rst new file mode 100644 index 0000000000..04e79dfe54 --- /dev/null +++ b/docs/system/devices/igb.rst @@ -0,0 +1,73 @@ +.. SPDX-License-Identifier: GPL-2.0-or-later +.. _igb: + +igb +--- + +igb is a family of Intel's gigabit ethernet controllers. In QEMU, 82576 +emulation is implemented in particular. Its datasheet is available at [1]_. + +This implementation is expected to be useful to test SR-IOV networking without +requiring physical hardware. + +Limitations +=========== + +This igb implementation was tested with Linux Test Project [2]_ and Windows HLK +[3]_ during the initial development. Later it was also tested with DPDK Test +Suite [4]_. The command used when testing with LTP is: + +.. code-block:: shell + + network.sh -6mta + +Be aware that this implementation lacks many functionalities available with the +actual hardware, and you may experience various failures if you try to use it +with a different operating system other than DPDK, Linux, and Windows or if you +try functionalities not covered by the tests. + +Using igb +========= + +Using igb should be nothing different from using another network device. See +:ref:`Network_emulation` in general. + +However, you may also need to perform additional steps to activate SR-IOV +feature on your guest. For Linux, refer to [5]_. + +Developing igb +============== + +igb is the successor of e1000e, and e1000e is the successor of e1000 in turn. +As these devices are very similar, if you make a change for igb and the same +change can be applied to e1000e and e1000, please do so. + +Please do not forget to run tests before submitting a change. As tests included +in QEMU is very minimal, run some application which is likely to be affected by +the change to confirm it works in an integrated system. + +Testing igb +=========== + +A qtest of the basic functionality is available. Run the below at the build +directory: + +.. code-block:: shell + + meson test qtest-x86_64/qos-test + +ethtool can test register accesses, interrupts, etc. It is automated as an +Avocado test and can be ran with the following command: + +.. code:: shell + + make check-avocado AVOCADO_TESTS=tests/avocado/netdev-ethtool.py + +References +========== + +.. [1] https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eb-gigabit-ethernet-controller-datasheet.pdf +.. [2] https://github.com/linux-test-project/ltp +.. [3] https://learn.microsoft.com/en-us/windows-hardware/test/hlk/ +.. [4] https://doc.dpdk.org/dts/gsg/ +.. [5] https://docs.kernel.org/PCI/pci-iov-howto.html diff --git a/docs/system/devices/ivshmem.rst b/docs/system/devices/ivshmem.rst index b03a48afa3..e7aaf34c20 100644 --- a/docs/system/devices/ivshmem.rst +++ b/docs/system/devices/ivshmem.rst @@ -1,5 +1,3 @@ -.. _pcsys_005fivshmem: - Inter-VM Shared Memory device ----------------------------- diff --git a/docs/system/devices/net.rst b/docs/system/devices/net.rst index 4b2640c448..2ab516d4b0 100644 --- a/docs/system/devices/net.rst +++ b/docs/system/devices/net.rst @@ -1,4 +1,4 @@ -.. _pcsys_005fnetwork: +.. _Network_Emulation: Network emulation ----------------- diff --git a/docs/system/devices/usb-u2f.rst b/docs/system/devices/usb-u2f.rst new file mode 100644 index 0000000000..4f57d5c8c3 --- /dev/null +++ b/docs/system/devices/usb-u2f.rst @@ -0,0 +1,93 @@ +Universal Second Factor (U2F) USB Key Device +============================================ + +U2F is an open authentication standard that enables relying parties +exposed to the internet to offer a strong second factor option for end +user authentication. + +The second factor is provided by a device implementing the U2F +protocol. In case of a USB U2F security key, it is a USB HID device +that implements the U2F protocol. + +QEMU supports both pass-through of a host U2F key device to a VM, +and software emulation of a U2F key. + +``u2f-passthru`` +---------------- + +The ``u2f-passthru`` device allows you to connect a real hardware +U2F key on your host to a guest VM. All requests made from the guest +are passed through to the physical security key connected to the +host machine and vice versa. + +In addition, the dedicated pass-through allows you to share a single +U2F security key with several guest VMs, which is not possible with a +simple host device assignment pass-through. + +You can specify the host U2F key to use with the ``hidraw`` +option, which takes the host path to a Linux ``/dev/hidrawN`` device: + +.. parsed-literal:: + |qemu_system| -usb -device u2f-passthru,hidraw=/dev/hidraw0 + +If you don't specify the device, the ``u2f-passthru`` device will +autoscan to take the first U2F device it finds on the host (this +requires a working libudev): + +.. parsed-literal:: + |qemu_system| -usb -device u2f-passthru + +``u2f-emulated`` +---------------- + +``u2f-emulated`` is a completely software emulated U2F device. +It uses `libu2f-emu `__ +for the U2F key emulation. libu2f-emu +provides a complete implementation of the U2F protocol device part for +all specified transports given by the FIDO Alliance. + +To work, an emulated U2F device must have four elements: + + * ec x509 certificate + * ec private key + * counter (four bytes value) + * 48 bytes of entropy (random bits) + +To use this type of device, these have to be configured, and these +four elements must be passed one way or another. + +Assuming that you have a working libu2f-emu installed on the host, +there are three possible ways to configure the ``u2f-emulated`` device: + + * ephemeral + * setup directory + * manual + +Ephemeral is the simplest way to configure; it lets the device generate +all the elements it needs for a single use of the lifetime of the device. +It is the default if you do not pass any other options to the device. + +.. parsed-literal:: + |qemu_system| -usb -device u2f-emulated + +You can pass the device the path of a setup directory on the host +using the ``dir`` option; the directory must contain these four files: + + * ``certificate.pem``: ec x509 certificate + * ``private-key.pem``: ec private key + * ``counter``: counter value + * ``entropy``: 48 bytes of entropy + +.. parsed-literal:: + |qemu_system| -usb -device u2f-emulated,dir=$dir + +You can also manually pass the device the paths to each of these files, +if you don't want them all to be in the same directory, using the options + + * ``cert`` + * ``priv`` + * ``counter`` + * ``entropy`` + +.. parsed-literal:: + |qemu_system| -usb -device u2f-emulated,cert=$DIR1/$FILE1,priv=$DIR2/$FILE2,counter=$DIR3/$FILE3,entropy=$DIR4/$FILE4 diff --git a/docs/system/devices/usb.rst b/docs/system/devices/usb.rst index 37cb9b33ae..a6ca7b0c37 100644 --- a/docs/system/devices/usb.rst +++ b/docs/system/devices/usb.rst @@ -1,5 +1,3 @@ -.. _pcsys_005fusb: - USB emulation ------------- @@ -209,7 +207,7 @@ option or the ``device_add`` monitor command. Available devices are: USB audio device ``u2f-{emulated,passthru}`` - Universal Second Factor device + :doc:`usb-u2f` ``canokey`` An Open-source Secure Key implementing FIDO2, OpenPGP, PIV and more. diff --git a/docs/system/gdb.rst b/docs/system/gdb.rst index 453eb73f6c..7d3718deef 100644 --- a/docs/system/gdb.rst +++ b/docs/system/gdb.rst @@ -46,6 +46,28 @@ Here are some useful tips in order to use gdb on system code: 3. Use ``set architecture i8086`` to dump 16 bit code. Then use ``x/10i $cs*16+$eip`` to dump the code at the PC position. +Breakpoint and Watchpoint support +================================= + +While GDB can always fall back to inserting breakpoints into memory +(if writable) other features are very much dependent on support of the +accelerator. For TCG system emulation we advertise an infinite number +of hardware assisted breakpoints and watchpoints. For other +accelerators it will depend on if support has been added (see +supports_guest_debug and related hooks in AccelOpsClass). + +As TCG cannot track all memory accesses in user-mode there is no +support for watchpoints. + +Relocating code +--------------- + +On modern kernels confusion can be caused by code being relocated by +features such as address space layout randomisation. To avoid +confusion when debugging such things you either need to update gdb's +view of where things are in memory or perhaps more trivially disable +ASLR when booting the system. + Debugging multicore machines ============================ diff --git a/docs/system/guest-loader.rst b/docs/system/guest-loader.rst index 9ef9776bf0..304ee5d531 100644 --- a/docs/system/guest-loader.rst +++ b/docs/system/guest-loader.rst @@ -14,7 +14,7 @@ The guest loader does two things: - load blobs (kernels and initial ram disks) into memory - sets platform FDT data so hypervisors can find and boot them -This is what is typically done by a boot-loader like grub using it's +This is what is typically done by a boot-loader like grub using its multi-boot capability. A typical example would look like: .. parsed-literal:: @@ -25,9 +25,9 @@ multi-boot capability. A typical example would look like: -device guest-loader,addr=0x47000000,initrd=rootfs.cpio In the above example the Xen hypervisor is loaded by the -kernel -parameter and passed it's boot arguments via -append. The Dom0 guest +parameter and passed its boot arguments via -append. The Dom0 guest is loaded into the areas of memory. Each blob will get -``/chosen/module@`` entry in the FDT to indicate it's location and +``/chosen/module@`` entry in the FDT to indicate its location and size. Additional information can be passed with by using additional arguments. diff --git a/docs/system/i386/xen.rst b/docs/system/i386/xen.rst new file mode 100644 index 0000000000..f06765e88c --- /dev/null +++ b/docs/system/i386/xen.rst @@ -0,0 +1,92 @@ +Xen HVM guest support +===================== + + +Description +----------- + +KVM has support for hosting Xen guests, intercepting Xen hypercalls and event +channel (Xen PV interrupt) delivery. This allows guests which expect to be +run under Xen to be hosted in QEMU under Linux/KVM instead. + +Using the split irqchip is mandatory for Xen support. + +Setup +----- + +Xen mode is enabled by setting the ``xen-version`` property of the KVM +accelerator, for example for Xen 4.10: + +.. parsed-literal:: + + |qemu_system| --accel kvm,xen-version=0x4000a,kernel-irqchip=split + +Additionally, virtual APIC support can be advertised to the guest through the +``xen-vapic`` CPU flag: + +.. parsed-literal:: + + |qemu_system| --accel kvm,xen-version=0x4000a,kernel-irqchip=split --cpu host,+xen_vapic + +When Xen support is enabled, QEMU changes hypervisor identification (CPUID +0x40000000..0x4000000A) to Xen. The KVM identification and features are not +advertised to a Xen guest. If Hyper-V is also enabled, the Xen identification +moves to leaves 0x40000100..0x4000010A. + +The Xen platform device is enabled automatically for a Xen guest. This allows +a guest to unplug all emulated devices, in order to use Xen PV block and network +drivers instead. Under Xen, the boot disk is typically available both via IDE +emulation, and as a PV block device. Guest bootloaders typically use IDE to load +the guest kernel, which then unplugs the IDE and continues with the Xen PV block +device. + +This configuration can be achieved as follows + +.. parsed-literal:: + + |qemu_system| -M pc --accel kvm,xen-version=0x4000a,kernel-irqchip=split \\ + -drive file=${GUEST_IMAGE},if=none,id=disk,file.locking=off -device xen-disk,drive=disk,vdev=xvda \\ + -drive file=${GUEST_IMAGE},index=2,media=disk,file.locking=off,if=ide + +It is necessary to use the pc machine type, as the q35 machine uses AHCI instead +of legacy IDE, and AHCI disks are not unplugged through the Xen PV unplug +mechanism. + +VirtIO devices can also be used; Linux guests may need to be dissuaded from +umplugging them by adding 'xen_emul_unplug=never' on their command line. + +Properties +---------- + +The following properties exist on the KVM accelerator object: + +``xen-version`` + This property contains the Xen version in ``XENVER_version`` form, with the + major version in the top 16 bits and the minor version in the low 16 bits. + Setting this property enables the Xen guest support. + +``xen-evtchn-max-pirq`` + Xen PIRQs represent an emulated physical interrupt, either GSI or MSI, which + can be routed to an event channel instead of to the emulated I/O or local + APIC. By default, QEMU permits only 256 PIRQs because this allows maximum + compatibility with 32-bit MSI where the higher bits of the PIRQ# would need + to be in the upper 64 bits of the MSI message. For guests with large numbers + of PCI devices (and none which are limited to 32-bit addressing) it may be + desirable to increase this value. + +``xen-gnttab-max-frames`` + Xen grant tables are the means by which a Xen guest grants access to its + memory for PV back ends (disk, network, etc.). Since QEMU only supports v1 + grant tables which are 8 bytes in size, each page (each frame) of the grant + table can reference 512 pages of guest memory. The default number of frames + is 64, allowing for 32768 pages of guest memory to be accessed by PV backends + through simultaneous grants. For guests with large numbers of PV devices and + high throughput, it may be desirable to increase this value. + +OS requirements +--------------- + +The minimal Xen support in the KVM accelerator requires the host to be running +Linux v5.12 or newer. Later versions add optimisations: Linux v5.17 added +acceleration of interrupt delivery via the Xen PIRQ mechanism, and Linux v5.19 +accelerated Xen PV timers and inter-processor interrupts (IPIs). diff --git a/docs/system/introduction.rst b/docs/system/introduction.rst index c8a9fe6c1d..3e256f8326 100644 --- a/docs/system/introduction.rst +++ b/docs/system/introduction.rst @@ -27,7 +27,7 @@ Tiny Code Generator (TCG) capable of emulating many CPUs. * - Hypervisor Framework (hvf) - MacOS - x86 (64 bit only), Arm (64 bit only) - * - Windows Hypervisor Platform (wphx) + * - Windows Hypervisor Platform (whpx) - Windows - x86 * - NetBSD Virtual Machine Monitor (nvmm) diff --git a/docs/system/keys.rst b/docs/system/keys.rst index e596ae6c4e..0fc17b994d 100644 --- a/docs/system/keys.rst +++ b/docs/system/keys.rst @@ -1,4 +1,4 @@ -.. _pcsys_005fkeys: +.. _GUI_keys: Keys in the graphical frontends ------------------------------- diff --git a/docs/system/linuxboot.rst b/docs/system/linuxboot.rst index 228650abc5..5db2e560dc 100644 --- a/docs/system/linuxboot.rst +++ b/docs/system/linuxboot.rst @@ -27,4 +27,4 @@ virtual serial port and the QEMU monitor to the console with the -append "root=/dev/hda console=ttyS0" -nographic Use Ctrl-a c to switch between the serial console and the monitor (see -:ref:`pcsys_005fkeys`). +:ref:`GUI_keys`). diff --git a/docs/system/loongarch/loongson3.rst b/docs/system/loongarch/virt.rst similarity index 51% rename from docs/system/loongarch/loongson3.rst rename to docs/system/loongarch/virt.rst index 489ea20f8f..c37268b404 100644 --- a/docs/system/loongarch/loongson3.rst +++ b/docs/system/loongarch/virt.rst @@ -19,14 +19,14 @@ The ``virt`` machine supports: - Fw_cfg device - PCI/PCIe devices - Memory device -- CPU device. Type: la464-loongarch-cpu. +- CPU device. Type: la464. CPU and machine Type -------------------- The ``qemu-system-loongarch64`` provides emulation for virt machine. You can specify the machine type ``virt`` and -cpu type ``la464-loongarch-cpu``. +cpu type ``la464``. Boot options ------------ @@ -35,95 +35,74 @@ We can boot the LoongArch virt machine by specifying the uefi bios, initrd, and linux kernel. And those source codes and binary files can be accessed by following steps. -(1) booting command: +(1) Build qemu-system-loongarch64: .. code-block:: bash - $ qemu-system-loongarch64 -machine virt -m 4G -cpu la464-loongarch-cpu \ - -smp 1 -bios QEMU_EFI.fd -kernel vmlinuz.efi -initrd initrd.img \ - -append "root=/dev/ram rdinit=/sbin/init console=ttyS0,115200" \ - --nographic - -Note: The running speed may be a little slow, as the performance of our -qemu and uefi bios is not perfect, and it is being fixed. - -(2) cross compiler tools: - -.. code-block:: bash - - wget https://github.com/loongson/build-tools/releases/download/ \ - 2022.05.29/loongarch64-clfs-5.0-cross-tools-gcc-full.tar.xz - - tar -vxf loongarch64-clfs-5.0-cross-tools-gcc-full.tar.xz - -(3) qemu compile configure option: - -.. code-block:: bash - - ./configure --disable-rdma --disable-pvrdma --prefix=usr \ + ./configure --disable-rdma --disable-pvrdma --prefix=/usr \ --target-list="loongarch64-softmmu" \ --disable-libiscsi --disable-libnfs --disable-libpmem \ --disable-glusterfs --enable-libusb --enable-usb-redir \ --disable-opengl --disable-xen --enable-spice \ --enable-debug --disable-capstone --disable-kvm \ --enable-profiler - make + make -j8 -(4) uefi bios source code and compile method: +(2) Set cross tools: .. code-block:: bash - git clone https://github.com/loongson/edk2-LoongarchVirt.git + wget https://github.com/loongson/build-tools/releases/download/2022.09.06/loongarch64-clfs-6.3-cross-tools-gcc-glibc.tar.xz - cd edk2-LoongarchVirt + tar -vxf loongarch64-clfs-6.3-cross-tools-gcc-glibc.tar.xz -C /opt - git submodule update --init + export PATH=/opt/cross-tools/bin:$PATH + export LD_LIBRARY_PATH=/opt/cross-tools/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=/opt/cross-tools/loongarch64-unknown-linux-gnu/lib/:$LD_LIBRARY_PATH - export PATH=$YOUR_COMPILER_PATH/bin:$PATH +Note: You need get the latest cross-tools at https://github.com/loongson/build-tools - export WORKSPACE=`pwd` +(3) Build BIOS: - export PACKAGES_PATH=$WORKSPACE/edk2-LoongarchVirt + See: https://github.com/tianocore/edk2-platforms/tree/master/Platform/Loongson/LoongArchQemuPkg#readme - export GCC5_LOONGARCH64_PREFIX=loongarch64-unknown-linux-gnu- +Note: To build the release version of the bios, set --buildtarget=RELEASE, + the bios file path: Build/LoongArchQemu/RELEASE_GCC5/FV/QEMU_EFI.fd - edk2-LoongarchVirt/edksetup.sh - - make -C edk2-LoongarchVirt/BaseTools - - build --buildtarget=DEBUG --tagname=GCC5 --arch=LOONGARCH64 --platform=OvmfPkg/LoongArchQemu/Loongson.dsc - - build --buildtarget=RELEASE --tagname=GCC5 --arch=LOONGARCH64 --platform=OvmfPkg/LoongArchQemu/Loongson.dsc - -The efi binary file path: - - Build/LoongArchQemu/DEBUG_GCC5/FV/QEMU_EFI.fd - - Build/LoongArchQemu/RELEASE_GCC5/FV/QEMU_EFI.fd - -(5) linux kernel source code and compile method: +(4) Build kernel: .. code-block:: bash git clone https://github.com/loongson/linux.git - export PATH=$YOUR_COMPILER_PATH/bin:$PATH + cd linux - export LD_LIBRARY_PATH=$YOUR_COMPILER_PATH/lib:$LD_LIBRARY_PATH - - export LD_LIBRARY_PATH=$YOUR_COMPILER_PATH/loongarch64-unknown-linux-gnu/lib/:$LD_LIBRARY_PATH + git checkout loongarch-next make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- loongson3_defconfig - make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- - - make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- install - - make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- modules_install + make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- -j32 Note: The branch of linux source code is loongarch-next. + the kernel file: arch/loongarch/boot/vmlinuz.efi -(6) initrd file: +(5) Get initrd: You can use busybox tool and the linux modules to make a initrd file. Or you can access the binary files: https://github.com/yangxiaojuan-loongson/qemu-binary + +.. code-block:: bash + + git clone https://github.com/yangxiaojuan-loongson/qemu-binary + +Note: the initrd file is ramdisk + +(6) Booting LoongArch: + +.. code-block:: bash + + $ ./build/qemu-system-loongarch64 -machine virt -m 4G -cpu la464 \ + -smp 1 -bios QEMU_EFI.fd -kernel vmlinuz.efi -initrd ramdisk \ + -serial stdio -monitor telnet:localhost:4495,server,nowait \ + -append "root=/dev/ram rdinit=/sbin/init console=ttyS0,115200" \ + --nographic diff --git a/docs/system/qemu-block-drivers.rst.inc b/docs/system/qemu-block-drivers.rst.inc index dfe5d2293d..105cb9679c 100644 --- a/docs/system/qemu-block-drivers.rst.inc +++ b/docs/system/qemu-block-drivers.rst.inc @@ -430,6 +430,12 @@ Hard disks you may corrupt your host data (use the ``-snapshot`` command line option or modify the device permissions accordingly). +Zoned block devices + Zoned block devices can be passed through to the guest if the emulated storage + controller supports zoned storage. Use ``--blockdev host_device, + node-name=drive0,filename=/dev/nullb0,cache.direct=on`` to pass through + ``/dev/nullb0`` as ``drive0``. + Windows ^^^^^^^ diff --git a/docs/system/target-i386.rst b/docs/system/target-i386.rst index e64c013077..1b8a1f248a 100644 --- a/docs/system/target-i386.rst +++ b/docs/system/target-i386.rst @@ -3,8 +3,6 @@ x86 System emulator ------------------- -.. _pcsys_005fdevices: - Board-specific documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -27,12 +25,11 @@ Architectural features i386/cpu i386/hyperv + i386/xen i386/kvm-pv i386/sgx i386/amd-memory-encryption -.. _pcsys_005freq: - OS requirements ~~~~~~~~~~~~~~~ diff --git a/docs/system/target-mips.rst b/docs/system/target-mips.rst index 138441bdec..83239fb9df 100644 --- a/docs/system/target-mips.rst +++ b/docs/system/target-mips.rst @@ -8,8 +8,6 @@ endian options, ``qemu-system-mips``, ``qemu-system-mipsel`` ``qemu-system-mips64`` and ``qemu-system-mips64el``. Five different machine types are emulated: -- A generic ISA PC-like machine \"mips\" - - The MIPS Malta prototype board \"malta\" - An ACER Pica \"pica61\". This machine needs the 64-bit emulator. @@ -19,18 +17,6 @@ machine types are emulated: - A MIPS Magnum R4000 machine \"magnum\". This machine needs the 64-bit emulator. -The generic emulation is supported by Debian 'Etch' and is able to -install Debian into a virtual disk image. The following devices are -emulated: - -- A range of MIPS CPUs, default is the 24Kf - -- PC style serial port - -- PC style IDE disk - -- NE2000 network card - The Malta emulation supports the following devices: - Core board with MIPS 24Kf CPU and Galileo system controller diff --git a/docs/u2f.txt b/docs/u2f.txt deleted file mode 100644 index 7f5813a0b7..0000000000 --- a/docs/u2f.txt +++ /dev/null @@ -1,110 +0,0 @@ -QEMU U2F Key Device Documentation. - -Contents -1. USB U2F key device -2. Building -3. Using u2f-emulated -4. Using u2f-passthru -5. Libu2f-emu - -1. USB U2F key device - -U2F is an open authentication standard that enables relying parties -exposed to the internet to offer a strong second factor option for end -user authentication. - -The standard brings many advantages to both parties, client and server, -allowing to reduce over-reliance on passwords, it increases authentication -security and simplifies passwords. - -The second factor is materialized by a device implementing the U2F -protocol. In case of a USB U2F security key, it is a USB HID device -that implements the U2F protocol. - -In QEMU, the USB U2F key device offers a dedicated support of U2F, allowing -guest USB FIDO/U2F security keys operating in two possible modes: -pass-through and emulated. - -The pass-through mode consists of passing all requests made from the guest -to the physical security key connected to the host machine and vice versa. -In addition, the dedicated pass-through allows to have a U2F security key -shared on several guests which is not possible with a simple host device -assignment pass-through. - -The emulated mode consists of completely emulating the behavior of an -U2F device through software part. Libu2f-emu is used for that. - - -2. Building - -To ensure the build of the u2f-emulated device variant which depends -on libu2f-emu: configuring and building: - - ./configure --enable-u2f && make - -The pass-through mode is built by default on Linux. To take advantage -of the autoscan option it provides, make sure you have a working libudev -installed on the host. - - -3. Using u2f-emulated - -To work, an emulated U2F device must have four elements: - * ec x509 certificate - * ec private key - * counter (four bytes value) - * 48 bytes of entropy (random bits) - -To use this type of device, this one has to be configured, and these -four elements must be passed one way or another. - -Assuming that you have a working libu2f-emu installed on the host. -There are three possible ways of configurations: - * ephemeral - * setup directory - * manual - -Ephemeral is the simplest way to configure, it lets the device generate -all the elements it needs for a single use of the lifetime of the device. - - qemu -usb -device u2f-emulated - -Setup directory allows to configure the device from a directory containing -four files: - * certificate.pem: ec x509 certificate - * private-key.pem: ec private key - * counter: counter value - * entropy: 48 bytes of entropy - - qemu -usb -device u2f-emulated,dir=$dir - -Manual allows to configure the device more finely by specifying each -of the elements necessary for the device: - * cert - * priv - * counter - * entropy - - qemu -usb -device u2f-emulated,cert=$DIR1/$FILE1,priv=$DIR2/$FILE2,counter=$DIR3/$FILE3,entropy=$DIR4/$FILE4 - - -4. Using u2f-passthru - -On the host specify the u2f-passthru device with a suitable hidraw: - - qemu -usb -device u2f-passthru,hidraw=/dev/hidraw0 - -Alternately, the u2f-passthru device can autoscan to take the first -U2F device it finds on the host (this requires a working libudev): - - qemu -usb -device u2f-passthru - - -5. Libu2f-emu - -The u2f-emulated device uses libu2f-emu for the U2F key emulation. Libu2f-emu -implements completely the U2F protocol device part for all specified -transport given by the FIDO Alliance. - -For more information about libu2f-emu see this page: -https://github.com/MattGorko/libu2f-emu. diff --git a/docs/user/main.rst b/docs/user/main.rst index 6f2ffa080f..f478635396 100644 --- a/docs/user/main.rst +++ b/docs/user/main.rst @@ -93,8 +93,13 @@ Debug options: ``-g port`` Wait gdb connection to port +``-one-insn-per-tb`` + Run the emulation with one guest instruction per translation block. + This slows down emulation a lot, but can be useful in some situations, + such as when trying to analyse the logs produced by the ``-d`` option. + ``-singlestep`` - Run the emulation in single step mode. + This is a deprecated synonym for the ``-one-insn-per-tb`` option. Environment variables: @@ -242,5 +247,10 @@ Debug options: ``-p pagesize`` Act as if the host page size was 'pagesize' bytes +``-one-insn-per-tb`` + Run the emulation with one guest instruction per translation block. + This slows down emulation a lot, but can be useful in some situations, + such as when trying to analyse the logs produced by the ``-d`` option. + ``-singlestep`` - Run the emulation in single step mode. + This is a deprecated synonym for the ``-one-insn-per-tb`` option. diff --git a/dump/dump-hmp-cmds.c b/dump/dump-hmp-cmds.c index e5053b04cd..b038785fee 100644 --- a/dump/dump-hmp-cmds.c +++ b/dump/dump-hmp-cmds.c @@ -1,5 +1,5 @@ /* - * Human Monitor Interface commands + * Windows crashdump (Human Monitor Interface commands) * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. diff --git a/dump/dump.c b/dump/dump.c index 1362810991..1f1a6edcab 100644 --- a/dump/dump.c +++ b/dump/dump.c @@ -14,11 +14,10 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "elf.h" -#include "exec/hwaddr.h" +#include "qemu/bswap.h" +#include "exec/target_page.h" #include "monitor/monitor.h" -#include "sysemu/kvm.h" #include "sysemu/dump.h" -#include "sysemu/memory_mapping.h" #include "sysemu/runstate.h" #include "sysemu/cpus.h" #include "qapi/error.h" @@ -29,10 +28,8 @@ #include "qemu/main-loop.h" #include "hw/misc/vmcoreinfo.h" #include "migration/blocker.h" - -#ifdef TARGET_X86_64 +#include "hw/core/cpu.h" #include "win_dump.h" -#endif #include #ifdef CONFIG_LZO @@ -907,13 +904,13 @@ static void get_note_sizes(DumpState *s, const void *note, if (dump_is_64bit(s)) { const Elf64_Nhdr *hdr = note; note_head_sz = sizeof(Elf64_Nhdr); - name_sz = tswap64(hdr->n_namesz); - desc_sz = tswap64(hdr->n_descsz); + name_sz = cpu_to_dump64(s, hdr->n_namesz); + desc_sz = cpu_to_dump64(s, hdr->n_descsz); } else { const Elf32_Nhdr *hdr = note; note_head_sz = sizeof(Elf32_Nhdr); - name_sz = tswap32(hdr->n_namesz); - desc_sz = tswap32(hdr->n_descsz); + name_sz = cpu_to_dump32(s, hdr->n_namesz); + desc_sz = cpu_to_dump32(s, hdr->n_descsz); } if (note_head_size) { @@ -1860,7 +1857,7 @@ static void dump_init(DumpState *s, int fd, bool has_format, } if (!s->dump_info.page_size) { - s->dump_info.page_size = TARGET_PAGE_SIZE; + s->dump_info.page_size = qemu_target_page_size(); } s->note_size = cpu_get_note_size(s->dump_info.d_class, @@ -2022,9 +2019,7 @@ static void dump_process(DumpState *s, Error **errp) DumpQueryResult *result = NULL; if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { -#ifdef TARGET_X86_64 create_win_dump(s, errp); -#endif } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) { create_kdump_vmcore(s, errp); } else { @@ -2127,12 +2122,10 @@ void qmp_dump_guest_memory(bool paging, const char *file, } #endif -#ifndef TARGET_X86_64 - if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { - error_setg(errp, "Windows dump is only available for x86-64"); + if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP + && !win_dump_available(errp)) { return; } -#endif #if !defined(WIN32) if (strstart(file, "fd:", &p)) { @@ -2214,10 +2207,9 @@ DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp) QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY); #endif - /* Windows dump is available only if target is x86_64 */ -#ifdef TARGET_X86_64 - QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP); -#endif + if (win_dump_available(NULL)) { + QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP); + } return cap; } diff --git a/dump/meson.build b/dump/meson.build index 2eff29c3ea..df52ee4268 100644 --- a/dump/meson.build +++ b/dump/meson.build @@ -1,4 +1,2 @@ -softmmu_ss.add(files('dump-hmp-cmds.c')) - -specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files('dump.c'), snappy, lzo]) -specific_ss.add(when: ['CONFIG_SOFTMMU', 'TARGET_X86_64'], if_true: files('win_dump.c')) +softmmu_ss.add([files('dump.c', 'dump-hmp-cmds.c'), snappy, lzo]) +specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: files('win_dump.c')) diff --git a/dump/win_dump.c b/dump/win_dump.c index f20b6051b6..b7bfaff379 100644 --- a/dump/win_dump.c +++ b/dump/win_dump.c @@ -1,5 +1,5 @@ /* - * Windows crashdump + * Windows crashdump (target specific implementations) * * Copyright (c) 2018 Virtuozzo International GmbH * @@ -9,19 +9,22 @@ */ #include "qemu/osdep.h" -#include "qemu/cutils.h" -#include "elf.h" -#include "exec/hwaddr.h" -#include "monitor/monitor.h" -#include "sysemu/kvm.h" #include "sysemu/dump.h" -#include "sysemu/memory_mapping.h" -#include "sysemu/cpus.h" #include "qapi/error.h" -#include "qapi/qmp/qerror.h" #include "qemu/error-report.h" -#include "hw/misc/vmcoreinfo.h" +#include "qapi/qmp/qerror.h" +#include "exec/cpu-defs.h" +#include "hw/core/cpu.h" +#include "qemu/win_dump_defs.h" #include "win_dump.h" +#include "cpu.h" + +#if defined(TARGET_X86_64) + +bool win_dump_available(Error **errp) +{ + return true; +} static size_t win_dump_ptr_size(bool x64) { @@ -475,3 +478,19 @@ out_cr3: return; } + +#else /* !TARGET_X86_64 */ + +bool win_dump_available(Error **errp) +{ + error_setg(errp, "Windows dump is only available for x86-64"); + + return false; +} + +void create_win_dump(DumpState *s, Error **errp) +{ + win_dump_available(errp); +} + +#endif diff --git a/dump/win_dump.h b/dump/win_dump.h index b8c25348f4..c9b49f87dc 100644 --- a/dump/win_dump.h +++ b/dump/win_dump.h @@ -11,7 +11,10 @@ #ifndef WIN_DUMP_H #define WIN_DUMP_H -#include "qemu/win_dump_defs.h" +#include "sysemu/dump.h" + +/* Check Windows dump availability for the current target */ +bool win_dump_available(Error **errp); void create_win_dump(DumpState *s, Error **errp); diff --git a/ebpf/rss.bpf.skeleton.h b/ebpf/rss.bpf.skeleton.h index 126683eb87..18eb2adb12 100644 --- a/ebpf/rss.bpf.skeleton.h +++ b/ebpf/rss.bpf.skeleton.h @@ -1,9 +1,10 @@ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ -/* THIS FILE IS AUTOGENERATED! */ +/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ #ifndef __RSS_BPF_SKEL_H__ #define __RSS_BPF_SKEL_H__ +#include #include #include @@ -12,8 +13,8 @@ struct rss_bpf { struct bpf_object *obj; struct { struct bpf_map *tap_rss_map_configurations; - struct bpf_map *tap_rss_map_indirection_table; struct bpf_map *tap_rss_map_toeplitz_key; + struct bpf_map *tap_rss_map_indirection_table; } maps; struct { struct bpf_program *tun_rss_steering_prog; @@ -21,6 +22,16 @@ struct rss_bpf { struct { struct bpf_link *tun_rss_steering_prog; } links; + +#ifdef __cplusplus + static inline struct rss_bpf *open(const struct bpf_object_open_opts *opts = nullptr); + static inline struct rss_bpf *open_and_load(); + static inline int load(struct rss_bpf *skel); + static inline int attach(struct rss_bpf *skel); + static inline void detach(struct rss_bpf *skel); + static inline void destroy(struct rss_bpf *skel); + static inline const void *elf_bytes(size_t *sz); +#endif /* __cplusplus */ }; static void @@ -40,18 +51,26 @@ static inline struct rss_bpf * rss_bpf__open_opts(const struct bpf_object_open_opts *opts) { struct rss_bpf *obj; + int err; obj = (struct rss_bpf *)calloc(1, sizeof(*obj)); - if (!obj) + if (!obj) { + errno = ENOMEM; return NULL; - if (rss_bpf__create_skeleton(obj)) - goto err; - if (bpf_object__open_skeleton(obj->skeleton, opts)) - goto err; + } + + err = rss_bpf__create_skeleton(obj); + if (err) + goto err_out; + + err = bpf_object__open_skeleton(obj->skeleton, opts); + if (err) + goto err_out; return obj; -err: +err_out: rss_bpf__destroy(obj); + errno = -err; return NULL; } @@ -71,12 +90,15 @@ static inline struct rss_bpf * rss_bpf__open_and_load(void) { struct rss_bpf *obj; + int err; obj = rss_bpf__open(); if (!obj) return NULL; - if (rss_bpf__load(obj)) { + err = rss_bpf__load(obj); + if (err) { rss_bpf__destroy(obj); + errno = -err; return NULL; } return obj; @@ -91,18 +113,22 @@ rss_bpf__attach(struct rss_bpf *obj) static inline void rss_bpf__detach(struct rss_bpf *obj) { - return bpf_object__detach_skeleton(obj->skeleton); + bpf_object__detach_skeleton(obj->skeleton); } +static inline const void *rss_bpf__elf_bytes(size_t *sz); + static inline int rss_bpf__create_skeleton(struct rss_bpf *obj) { struct bpf_object_skeleton *s; + int err; s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s)); - if (!s) - return -1; - obj->skeleton = s; + if (!s) { + err = -ENOMEM; + goto err; + } s->sz = sizeof(*s); s->name = "rss_bpf"; @@ -112,320 +138,855 @@ rss_bpf__create_skeleton(struct rss_bpf *obj) s->map_cnt = 3; s->map_skel_sz = sizeof(*s->maps); s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz); - if (!s->maps) + if (!s->maps) { + err = -ENOMEM; goto err; + } s->maps[0].name = "tap_rss_map_configurations"; s->maps[0].map = &obj->maps.tap_rss_map_configurations; - s->maps[1].name = "tap_rss_map_indirection_table"; - s->maps[1].map = &obj->maps.tap_rss_map_indirection_table; + s->maps[1].name = "tap_rss_map_toeplitz_key"; + s->maps[1].map = &obj->maps.tap_rss_map_toeplitz_key; - s->maps[2].name = "tap_rss_map_toeplitz_key"; - s->maps[2].map = &obj->maps.tap_rss_map_toeplitz_key; + s->maps[2].name = "tap_rss_map_indirection_table"; + s->maps[2].map = &obj->maps.tap_rss_map_indirection_table; /* programs */ s->prog_cnt = 1; s->prog_skel_sz = sizeof(*s->progs); s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz); - if (!s->progs) + if (!s->progs) { + err = -ENOMEM; goto err; + } s->progs[0].name = "tun_rss_steering_prog"; s->progs[0].prog = &obj->progs.tun_rss_steering_prog; s->progs[0].link = &obj->links.tun_rss_steering_prog; - s->data_sz = 8088; - s->data = (void *)"\ -\x7f\x45\x4c\x46\x02\x01\x01\0\0\0\0\0\0\0\0\0\x01\0\xf7\0\x01\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\x18\x1d\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\x40\0\x0a\0\ -\x01\0\xbf\x18\0\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\x4c\xff\0\0\0\0\xbf\xa7\ -\0\0\0\0\0\0\x07\x07\0\0\x4c\xff\xff\xff\x18\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x06\0\0\0\0\0\0\x18\x01\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x07\0\0\0\0\0\0\ -\x18\0\0\0\xff\xff\xff\xff\0\0\0\0\0\0\0\0\x15\x06\x66\x02\0\0\0\0\xbf\x79\0\0\ -\0\0\0\0\x15\x09\x64\x02\0\0\0\0\x71\x61\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\ -\0\x5d\x02\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xc0\xff\0\0\0\0\x7b\x1a\xb8\xff\ -\0\0\0\0\x7b\x1a\xb0\xff\0\0\0\0\x7b\x1a\xa8\xff\0\0\0\0\x7b\x1a\xa0\xff\0\0\0\ -\0\x63\x1a\x98\xff\0\0\0\0\x7b\x1a\x90\xff\0\0\0\0\x7b\x1a\x88\xff\0\0\0\0\x7b\ -\x1a\x80\xff\0\0\0\0\x7b\x1a\x78\xff\0\0\0\0\x7b\x1a\x70\xff\0\0\0\0\x7b\x1a\ -\x68\xff\0\0\0\0\x7b\x1a\x60\xff\0\0\0\0\x7b\x1a\x58\xff\0\0\0\0\x7b\x1a\x50\ -\xff\0\0\0\0\x15\x08\x4c\x02\0\0\0\0\x6b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\ -\0\x07\x03\0\0\xd0\xff\xff\xff\xbf\x81\0\0\0\0\0\0\xb7\x02\0\0\x0c\0\0\0\xb7\ -\x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\ -\x77\0\0\0\x20\0\0\0\x55\0\x11\0\0\0\0\0\xb7\x02\0\0\x10\0\0\0\x69\xa1\xd0\xff\ -\0\0\0\0\xbf\x13\0\0\0\0\0\0\xdc\x03\0\0\x10\0\0\0\x15\x03\x02\0\0\x81\0\0\x55\ -\x03\x0c\0\xa8\x88\0\0\xb7\x02\0\0\x14\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\ -\xd0\xff\xff\xff\xbf\x81\0\0\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\0\ -\x85\0\0\0\x44\0\0\0\x69\xa1\xd0\xff\0\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\ -\0\0\0\x15\0\x01\0\0\0\0\0\x05\0\x2f\x02\0\0\0\0\x15\x01\x2e\x02\0\0\0\0\x7b\ -\x9a\x30\xff\0\0\0\0\x15\x01\x57\0\x86\xdd\0\0\x55\x01\x3b\0\x08\0\0\0\x7b\x7a\ -\x20\xff\0\0\0\0\xb7\x07\0\0\x01\0\0\0\x73\x7a\x50\xff\0\0\0\0\xb7\x01\0\0\0\0\ -\0\0\x63\x1a\xe0\xff\0\0\0\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\ -\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\xff\xff\xbf\x81\0\0\0\0\0\0\xb7\x02\0\ -\0\0\0\0\0\xb7\x04\0\0\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\ -\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\x1a\x02\0\0\0\0\x69\xa1\xd6\xff\0\0\ -\0\0\x55\x01\x01\0\0\0\0\0\xb7\x07\0\0\0\0\0\0\x61\xa1\xdc\xff\0\0\0\0\x63\x1a\ -\x5c\xff\0\0\0\0\x61\xa1\xe0\xff\0\0\0\0\x63\x1a\x60\xff\0\0\0\0\x73\x7a\x56\ -\xff\0\0\0\0\x71\xa9\xd9\xff\0\0\0\0\x71\xa1\xd0\xff\0\0\0\0\x67\x01\0\0\x02\0\ -\0\0\x57\x01\0\0\x3c\0\0\0\x7b\x1a\x40\xff\0\0\0\0\x79\xa7\x20\xff\0\0\0\0\xbf\ -\x91\0\0\0\0\0\0\x57\x01\0\0\xff\0\0\0\x15\x01\x19\0\0\0\0\0\x71\xa1\x56\xff\0\ -\0\0\0\x55\x01\x17\0\0\0\0\0\x57\x09\0\0\xff\0\0\0\x15\x09\x7a\x01\x11\0\0\0\ -\x55\x09\x14\0\x06\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x53\xff\0\0\0\0\xb7\x01\ -\0\0\0\0\0\0\x63\x1a\xe0\xff\0\0\0\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\ -\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\xff\xff\xbf\x81\0\0\0\0\0\0\x79\ -\xa2\x40\xff\0\0\0\0\xb7\x04\0\0\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\ -\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\xf4\x01\0\0\0\0\x69\xa1\ -\xd0\xff\0\0\0\0\x6b\x1a\x58\xff\0\0\0\0\x69\xa1\xd2\xff\0\0\0\0\x6b\x1a\x5a\ -\xff\0\0\0\0\x71\xa1\x50\xff\0\0\0\0\x15\x01\xd4\0\0\0\0\0\x71\x62\x03\0\0\0\0\ -\0\x67\x02\0\0\x08\0\0\0\x71\x61\x02\0\0\0\0\0\x4f\x12\0\0\0\0\0\0\x71\x63\x04\ -\0\0\0\0\0\x71\x61\x05\0\0\0\0\0\x67\x01\0\0\x08\0\0\0\x4f\x31\0\0\0\0\0\0\x67\ -\x01\0\0\x10\0\0\0\x4f\x21\0\0\0\0\0\0\x71\xa2\x53\xff\0\0\0\0\x79\xa0\x30\xff\ -\0\0\0\0\x15\x02\x06\x01\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x02\0\0\0\x15\ -\x02\x03\x01\0\0\0\0\x61\xa1\x5c\xff\0\0\0\0\x63\x1a\xa0\xff\0\0\0\0\x61\xa1\ -\x60\xff\0\0\0\0\x63\x1a\xa4\xff\0\0\0\0\x69\xa1\x58\xff\0\0\0\0\x6b\x1a\xa8\ -\xff\0\0\0\0\x69\xa1\x5a\xff\0\0\0\0\x6b\x1a\xaa\xff\0\0\0\0\x05\0\x65\x01\0\0\ -\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x51\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\ -\xf0\xff\0\0\0\0\x7b\x1a\xe8\xff\0\0\0\0\x7b\x1a\xe0\xff\0\0\0\0\x7b\x1a\xd8\ -\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\ -\xff\xff\xb7\x01\0\0\x28\0\0\0\x7b\x1a\x40\xff\0\0\0\0\xbf\x81\0\0\0\0\0\0\xb7\ -\x02\0\0\0\0\0\0\xb7\x04\0\0\x28\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\ -\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\x10\x01\0\0\0\0\x79\xa1\xe0\ -\xff\0\0\0\0\x63\x1a\x64\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x68\xff\0\0\ -\0\0\x79\xa1\xd8\xff\0\0\0\0\x63\x1a\x5c\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\ -\x1a\x60\xff\0\0\0\0\x79\xa1\xe8\xff\0\0\0\0\x63\x1a\x6c\xff\0\0\0\0\x77\x01\0\ -\0\x20\0\0\0\x63\x1a\x70\xff\0\0\0\0\x79\xa1\xf0\xff\0\0\0\0\x63\x1a\x74\xff\0\ -\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x78\xff\0\0\0\0\x71\xa9\xd6\xff\0\0\0\0\ -\x25\x09\xff\0\x3c\0\0\0\xb7\x01\0\0\x01\0\0\0\x6f\x91\0\0\0\0\0\0\x18\x02\0\0\ -\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x5f\x21\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\0\ -\xf8\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x6b\x1a\xfe\xff\0\0\0\0\xb7\x01\0\0\x28\0\0\ -\0\x7b\x1a\x40\xff\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\x8c\xff\xff\xff\x7b\ -\x1a\x18\xff\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\x7c\xff\xff\xff\x7b\x1a\ -\x10\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\x28\xff\0\0\0\0\x7b\x7a\x20\xff\0\ -\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xfe\xff\xff\xff\xbf\x81\0\0\0\0\0\0\x79\ -\xa2\x40\xff\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\ -\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x15\0\x01\0\0\0\0\0\x05\0\x90\ -\x01\0\0\0\0\xbf\x91\0\0\0\0\0\0\x15\x01\x23\0\x3c\0\0\0\x15\x01\x59\0\x2c\0\0\ -\0\x55\x01\x5a\0\x2b\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xf8\xff\0\0\0\0\xbf\xa3\ -\0\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\xbf\x81\0\0\0\0\0\0\x79\xa2\x40\xff\0\ -\0\0\0\xb7\x04\0\0\x04\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\ -\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x03\x01\0\0\0\ -\0\x71\xa1\xfa\xff\0\0\0\0\x55\x01\x4b\0\x02\0\0\0\x71\xa1\xf9\xff\0\0\0\0\x55\ -\x01\x49\0\x02\0\0\0\x71\xa1\xfb\xff\0\0\0\0\x55\x01\x47\0\x01\0\0\0\x79\xa2\ -\x40\xff\0\0\0\0\x07\x02\0\0\x08\0\0\0\xbf\x81\0\0\0\0\0\0\x79\xa3\x18\xff\0\0\ -\0\0\xb7\x04\0\0\x10\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\ -\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\xf2\0\0\0\0\0\ -\xb7\x01\0\0\x01\0\0\0\x73\x1a\x55\xff\0\0\0\0\x05\0\x39\0\0\0\0\0\xb7\x01\0\0\ -\0\0\0\0\x6b\x1a\xf8\xff\0\0\0\0\xb7\x09\0\0\x02\0\0\0\xb7\x07\0\0\x1e\0\0\0\ -\x05\0\x0e\0\0\0\0\0\x79\xa2\x38\xff\0\0\0\0\x0f\x29\0\0\0\0\0\0\xbf\x92\0\0\0\ -\0\0\0\x07\x02\0\0\x01\0\0\0\x71\xa3\xff\xff\0\0\0\0\x67\x03\0\0\x03\0\0\0\x2d\ -\x23\x02\0\0\0\0\0\x79\xa7\x20\xff\0\0\0\0\x05\0\x2b\0\0\0\0\0\x07\x07\0\0\xff\ -\xff\xff\xff\xbf\x72\0\0\0\0\0\0\x67\x02\0\0\x20\0\0\0\x77\x02\0\0\x20\0\0\0\ -\x15\x02\xf9\xff\0\0\0\0\x7b\x9a\x38\xff\0\0\0\0\x79\xa1\x40\xff\0\0\0\0\x0f\ -\x19\0\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\xbf\x81\0\0\0\ -\0\0\0\xbf\x92\0\0\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\ -\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\ -\x55\x01\x94\0\0\0\0\0\x71\xa2\xf8\xff\0\0\0\0\x55\x02\x0f\0\xc9\0\0\0\x07\x09\ -\0\0\x02\0\0\0\xbf\x81\0\0\0\0\0\0\xbf\x92\0\0\0\0\0\0\x79\xa3\x10\xff\0\0\0\0\ -\xb7\x04\0\0\x10\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\ -\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x87\0\0\0\0\0\xb7\ -\x01\0\0\x01\0\0\0\x73\x1a\x54\xff\0\0\0\0\x79\xa7\x20\xff\0\0\0\0\x05\0\x07\0\ -\0\0\0\0\xb7\x09\0\0\x01\0\0\0\x15\x02\xd1\xff\0\0\0\0\x71\xa9\xf9\xff\0\0\0\0\ -\x07\x09\0\0\x02\0\0\0\x05\0\xce\xff\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x56\ -\xff\0\0\0\0\x71\xa1\xff\xff\0\0\0\0\x67\x01\0\0\x03\0\0\0\x79\xa2\x40\xff\0\0\ -\0\0\x0f\x12\0\0\0\0\0\0\x07\x02\0\0\x08\0\0\0\x7b\x2a\x40\xff\0\0\0\0\x71\xa9\ -\xfe\xff\0\0\0\0\x25\x09\x0e\0\x3c\0\0\0\xb7\x01\0\0\x01\0\0\0\x6f\x91\0\0\0\0\ -\0\0\x18\x02\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x5f\x21\0\0\0\0\0\0\x55\x01\x01\ -\0\0\0\0\0\x05\0\x07\0\0\0\0\0\x79\xa1\x28\xff\0\0\0\0\x07\x01\0\0\x01\0\0\0\ -\x7b\x1a\x28\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\ -\x82\xff\x0b\0\0\0\x05\0\x10\xff\0\0\0\0\x15\x09\xf8\xff\x87\0\0\0\x05\0\xfd\ -\xff\0\0\0\0\x71\xa1\x51\xff\0\0\0\0\x79\xa0\x30\xff\0\0\0\0\x15\x01\x17\x01\0\ -\0\0\0\x71\x62\x03\0\0\0\0\0\x67\x02\0\0\x08\0\0\0\x71\x61\x02\0\0\0\0\0\x4f\ -\x12\0\0\0\0\0\0\x71\x63\x04\0\0\0\0\0\x71\x61\x05\0\0\0\0\0\x67\x01\0\0\x08\0\ -\0\0\x4f\x31\0\0\0\0\0\0\x67\x01\0\0\x10\0\0\0\x4f\x21\0\0\0\0\0\0\x71\xa2\x53\ -\xff\0\0\0\0\x15\x02\x3d\0\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x10\0\0\0\ -\x15\x02\x3a\0\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\x5c\xff\xff\xff\x71\xa4\ -\x54\xff\0\0\0\0\xbf\x23\0\0\0\0\0\0\x15\x04\x02\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\ -\x07\x03\0\0\x7c\xff\xff\xff\x67\x01\0\0\x38\0\0\0\xc7\x01\0\0\x38\0\0\0\x65\ -\x01\x01\0\xff\xff\xff\xff\xbf\x32\0\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\ -\x6c\xff\xff\xff\x71\xa5\x55\xff\0\0\0\0\xbf\x34\0\0\0\0\0\0\x15\x05\x02\0\0\0\ -\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\x8c\xff\xff\xff\x65\x01\x01\0\xff\xff\xff\ -\xff\xbf\x43\0\0\0\0\0\0\x61\x21\x04\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\x24\0\ -\0\0\0\0\0\x4f\x41\0\0\0\0\0\0\x7b\x1a\xa0\xff\0\0\0\0\x61\x21\x08\0\0\0\0\0\ -\x61\x22\x0c\0\0\0\0\0\x67\x02\0\0\x20\0\0\0\x4f\x12\0\0\0\0\0\0\x7b\x2a\xa8\ -\xff\0\0\0\0\x61\x31\0\0\0\0\0\0\x61\x32\x04\0\0\0\0\0\x61\x34\x08\0\0\0\0\0\ -\x61\x33\x0c\0\0\0\0\0\x69\xa5\x5a\xff\0\0\0\0\x6b\x5a\xc2\xff\0\0\0\0\x69\xa5\ -\x58\xff\0\0\0\0\x6b\x5a\xc0\xff\0\0\0\0\x67\x03\0\0\x20\0\0\0\x4f\x43\0\0\0\0\ -\0\0\x7b\x3a\xb8\xff\0\0\0\0\x67\x02\0\0\x20\0\0\0\x4f\x12\0\0\0\0\0\0\x7b\x2a\ -\xb0\xff\0\0\0\0\x05\0\x6b\0\0\0\0\0\x71\xa2\x52\xff\0\0\0\0\x15\x02\x04\0\0\0\ -\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x04\0\0\0\x15\x02\x01\0\0\0\0\0\x05\0\xf7\ -\xfe\0\0\0\0\x57\x01\0\0\x01\0\0\0\x15\x01\xd3\0\0\0\0\0\x61\xa1\x5c\xff\0\0\0\ -\0\x63\x1a\xa0\xff\0\0\0\0\x61\xa1\x60\xff\0\0\0\0\x63\x1a\xa4\xff\0\0\0\0\x05\ -\0\x5e\0\0\0\0\0\x71\xa2\x52\xff\0\0\0\0\x15\x02\x1e\0\0\0\0\0\xbf\x12\0\0\0\0\ -\0\0\x57\x02\0\0\x20\0\0\0\x15\x02\x1b\0\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\ -\0\x5c\xff\xff\xff\x71\xa4\x54\xff\0\0\0\0\xbf\x23\0\0\0\0\0\0\x15\x04\x02\0\0\ -\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x7c\xff\xff\xff\x57\x01\0\0\0\x01\0\0\ -\x15\x01\x01\0\0\0\0\0\xbf\x32\0\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x6c\ -\xff\xff\xff\x71\xa5\x55\xff\0\0\0\0\xbf\x34\0\0\0\0\0\0\x15\x05\x02\0\0\0\0\0\ -\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\x8c\xff\xff\xff\x15\x01\xc3\xff\0\0\0\0\x05\0\ -\xc1\xff\0\0\0\0\xb7\x09\0\0\x3c\0\0\0\x79\xa7\x20\xff\0\0\0\0\x67\0\0\0\x20\0\ -\0\0\x77\0\0\0\x20\0\0\0\x15\0\xa5\xfe\0\0\0\0\x05\0\xb0\0\0\0\0\0\x15\x09\x07\ -\xff\x87\0\0\0\x05\0\xa2\xfe\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x08\0\0\0\ -\x15\x02\xab\0\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\x5c\xff\xff\xff\x71\xa4\ -\x54\xff\0\0\0\0\xbf\x23\0\0\0\0\0\0\x15\x04\x02\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\ -\x07\x03\0\0\x7c\xff\xff\xff\x57\x01\0\0\x40\0\0\0\x15\x01\x01\0\0\0\0\0\xbf\ -\x32\0\0\0\0\0\0\x61\x23\x04\0\0\0\0\0\x67\x03\0\0\x20\0\0\0\x61\x24\0\0\0\0\0\ -\0\x4f\x43\0\0\0\0\0\0\x7b\x3a\xa0\xff\0\0\0\0\x61\x23\x08\0\0\0\0\0\x61\x22\ -\x0c\0\0\0\0\0\x67\x02\0\0\x20\0\0\0\x4f\x32\0\0\0\0\0\0\x7b\x2a\xa8\xff\0\0\0\ -\0\x15\x01\x1c\0\0\0\0\0\x71\xa1\x55\xff\0\0\0\0\x15\x01\x1a\0\0\0\0\0\x61\xa1\ -\x98\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\x94\xff\0\0\0\0\x4f\x21\0\0\0\0\ -\0\0\x7b\x1a\xb8\xff\0\0\0\0\x61\xa1\x90\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\ -\xa2\x8c\xff\0\0\0\0\x05\0\x19\0\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x52\xff\ -\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\ -\x03\0\0\xd0\xff\xff\xff\xbf\x81\0\0\0\0\0\0\x79\xa2\x40\xff\0\0\0\0\xb7\x04\0\ -\0\x08\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\ -\0\0\0\x20\0\0\0\x55\0\x7d\0\0\0\0\0\x05\0\x88\xfe\0\0\0\0\xb7\x09\0\0\x2b\0\0\ -\0\x05\0\xc6\xff\0\0\0\0\x61\xa1\x78\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\ -\x74\xff\0\0\0\0\x4f\x21\0\0\0\0\0\0\x7b\x1a\xb8\xff\0\0\0\0\x61\xa1\x70\xff\0\ -\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\x6c\xff\0\0\0\0\x4f\x21\0\0\0\0\0\0\x7b\ -\x1a\xb0\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x07\x07\0\0\x04\0\0\0\x61\x03\0\0\0\0\ -\0\0\xb7\x05\0\0\0\0\0\0\x05\0\x4e\0\0\0\0\0\xaf\x52\0\0\0\0\0\0\xbf\x75\0\0\0\ -\0\0\0\x0f\x15\0\0\0\0\0\0\x71\x55\0\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\ -\0\0\0\0\0\x77\0\0\0\x07\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\ -\0\x39\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x02\0\0\0\0\0\0\xbf\ -\x50\0\0\0\0\0\0\x77\0\0\0\x06\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\ -\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3a\0\0\0\xc7\0\0\0\x3f\0\0\ -\0\x5f\x30\0\0\0\0\0\0\xaf\x02\0\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\ -\0\0\0\x77\0\0\0\x05\0\0\0\x57\0\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\ -\0\0\0\0\x67\0\0\0\x3b\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x02\0\ -\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x04\0\0\0\x57\0\ -\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3c\0\0\0\xc7\ -\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x02\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\ -\x77\0\0\0\x03\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\ -\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3d\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\ -\0\0\0\xaf\x02\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x02\0\0\0\x57\0\0\0\ -\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\ -\0\0\x3e\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x02\0\0\0\0\0\0\xbf\ -\x50\0\0\0\0\0\0\x77\0\0\0\x01\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\ -\x4f\x03\0\0\0\0\0\0\x57\x04\0\0\x01\0\0\0\x87\x04\0\0\0\0\0\0\x5f\x34\0\0\0\0\ -\0\0\xaf\x42\0\0\0\0\0\0\x57\x05\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x53\0\ -\0\0\0\0\0\x07\x01\0\0\x01\0\0\0\xbf\x25\0\0\0\0\0\0\x15\x01\x0b\0\x24\0\0\0\ -\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\xa0\xff\xff\xff\x0f\x12\0\0\0\0\0\0\x71\x24\0\ -\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x38\0\0\0\xc7\0\0\0\x38\0\0\0\xb7\x02\ -\0\0\0\0\0\0\x65\0\xa9\xff\xff\xff\xff\xff\xbf\x32\0\0\0\0\0\0\x05\0\xa7\xff\0\ -\0\0\0\xbf\x21\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x15\x01\ -\x0e\0\0\0\0\0\x71\x63\x06\0\0\0\0\0\x71\x64\x07\0\0\0\0\0\x67\x04\0\0\x08\0\0\ -\0\x4f\x34\0\0\0\0\0\0\x3f\x41\0\0\0\0\0\0\x2f\x41\0\0\0\0\0\0\x1f\x12\0\0\0\0\ -\0\0\x63\x2a\x50\xff\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\x50\xff\xff\xff\ -\x18\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\x55\0\x05\0\0\0\0\0\ -\x71\x61\x08\0\0\0\0\0\x71\x60\x09\0\0\0\0\0\x67\0\0\0\x08\0\0\0\x4f\x10\0\0\0\ -\0\0\0\x95\0\0\0\0\0\0\0\x69\0\0\0\0\0\0\0\x05\0\xfd\xff\0\0\0\0\x02\0\0\0\x04\ -\0\0\0\x0a\0\0\0\x01\0\0\0\0\0\0\0\x02\0\0\0\x04\0\0\0\x28\0\0\0\x01\0\0\0\0\0\ -\0\0\x02\0\0\0\x04\0\0\0\x02\0\0\0\x80\0\0\0\0\0\0\0\x47\x50\x4c\x20\x76\x32\0\ -\0\0\0\0\0\x10\0\0\0\0\0\0\0\x01\x7a\x52\0\x08\x7c\x0b\x01\x0c\0\0\0\x18\0\0\0\ -\x18\0\0\0\0\0\0\0\0\0\0\0\xd8\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\xa0\0\0\0\x04\0\xf1\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\x60\x02\0\0\0\0\x03\0\x20\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x3f\x02\0\0\0\0\ -\x03\0\xd0\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xed\x01\0\0\0\0\x03\0\x10\x10\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\xd4\x01\0\0\0\0\x03\0\x20\x10\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\xa3\x01\0\0\0\0\x03\0\xb8\x12\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x63\x01\0\0\0\0\ -\x03\0\x48\x10\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x2a\x01\0\0\0\0\x03\0\x10\x13\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\xe1\0\0\0\0\0\x03\0\xa0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\x2e\x02\0\0\0\0\x03\0\x28\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\x02\0\0\0\0\x03\ -\0\xc0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x36\x02\0\0\0\0\x03\0\xc8\x13\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\x22\x01\0\0\0\0\x03\0\xe8\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\x02\x01\0\0\0\0\x03\0\x40\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd9\0\0\0\0\0\x03\0\ -\xf8\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x26\x02\0\0\0\0\x03\0\x20\x0e\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\xcc\x01\0\0\0\0\x03\0\x60\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x9b\ -\x01\0\0\0\0\x03\0\xc8\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x5b\x01\0\0\0\0\x03\0\ -\x20\x07\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x7c\x01\0\0\0\0\x03\0\x48\x08\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\x53\x01\0\0\0\0\x03\0\xb8\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1a\ -\x01\0\0\0\0\x03\0\xe0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x84\x01\0\0\0\0\x03\0\ -\xb8\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1e\x02\0\0\0\0\x03\0\xd8\x09\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\xc4\x01\0\0\0\0\x03\0\x70\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x93\ -\x01\0\0\0\0\x03\0\xa8\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x74\x01\0\0\0\0\x03\0\ -\xf0\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x4b\x01\0\0\0\0\x03\0\0\x0a\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\x12\x01\0\0\0\0\x03\0\x10\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xfa\0\ -\0\0\0\0\x03\0\xc0\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x58\x02\0\0\0\0\x03\0\x88\ -\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x16\x02\0\0\0\0\x03\0\xb8\x0a\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\xe5\x01\0\0\0\0\x03\0\xc0\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xbc\x01\ -\0\0\0\0\x03\0\0\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x8b\x01\0\0\0\0\x03\0\x18\x0e\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd1\0\0\0\0\0\x03\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\x50\x02\0\0\0\0\x03\0\x20\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0e\x02\0\0\0\0\ -\x03\0\x48\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x6c\x01\0\0\0\0\x03\0\xb0\x04\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\x43\x01\0\0\0\0\x03\0\xc8\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\xc9\0\0\0\0\0\x03\0\xf8\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x06\x02\0\0\0\0\x03\ -\0\xd0\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x3b\x01\0\0\0\0\x03\0\x98\x0b\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\xf2\0\0\0\0\0\x03\0\xb8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x48\ -\x02\0\0\0\0\x03\0\xf0\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xfe\x01\0\0\0\0\x03\0\ -\xf8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xdd\x01\0\0\0\0\x03\0\0\x0c\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\xb4\x01\0\0\0\0\x03\0\x30\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0a\ -\x01\0\0\0\0\x03\0\x90\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc1\0\0\0\0\0\x03\0\xa8\ -\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xba\0\0\0\0\0\x03\0\xd0\x01\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\xf6\x01\0\0\0\0\x03\0\xe0\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xac\x01\0\ -\0\0\0\x03\0\x30\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x33\x01\0\0\0\0\x03\0\x80\x0e\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xea\0\0\0\0\0\x03\0\x98\x0e\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\x03\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x6b\0\0\0\x11\0\x06\ -\0\0\0\0\0\0\0\0\0\x07\0\0\0\0\0\0\0\x25\0\0\0\x11\0\x05\0\0\0\0\0\0\0\0\0\x14\ -\0\0\0\0\0\0\0\x82\0\0\0\x11\0\x05\0\x28\0\0\0\0\0\0\0\x14\0\0\0\0\0\0\0\x01\0\ -\0\0\x11\0\x05\0\x14\0\0\0\0\0\0\0\x14\0\0\0\0\0\0\0\x40\0\0\0\x12\0\x03\0\0\0\ -\0\0\0\0\0\0\xd8\x13\0\0\0\0\0\0\x28\0\0\0\0\0\0\0\x01\0\0\0\x3a\0\0\0\x50\0\0\ -\0\0\0\0\0\x01\0\0\0\x3c\0\0\0\x80\x13\0\0\0\0\0\0\x01\0\0\0\x3b\0\0\0\x1c\0\0\ -\0\0\0\0\0\x01\0\0\0\x38\0\0\0\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\ -\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\x5f\x6b\x65\x79\0\x2e\x74\x65\x78\x74\0\ -\x6d\x61\x70\x73\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x63\x6f\x6e\ -\x66\x69\x67\x75\x72\x61\x74\x69\x6f\x6e\x73\0\x74\x75\x6e\x5f\x72\x73\x73\x5f\ -\x73\x74\x65\x65\x72\x69\x6e\x67\x5f\x70\x72\x6f\x67\0\x2e\x72\x65\x6c\x74\x75\ -\x6e\x5f\x72\x73\x73\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\0\x5f\x6c\x69\x63\x65\ -\x6e\x73\x65\0\x2e\x72\x65\x6c\x2e\x65\x68\x5f\x66\x72\x61\x6d\x65\0\x74\x61\ -\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x69\x6e\x64\x69\x72\x65\x63\x74\x69\ -\x6f\x6e\x5f\x74\x61\x62\x6c\x65\0\x72\x73\x73\x2e\x62\x70\x66\x2e\x63\0\x2e\ -\x73\x74\x72\x74\x61\x62\0\x2e\x73\x79\x6d\x74\x61\x62\0\x4c\x42\x42\x30\x5f\ -\x39\0\x4c\x42\x42\x30\x5f\x38\x39\0\x4c\x42\x42\x30\x5f\x36\x39\0\x4c\x42\x42\ -\x30\x5f\x35\x39\0\x4c\x42\x42\x30\x5f\x31\x39\0\x4c\x42\x42\x30\x5f\x31\x30\ -\x39\0\x4c\x42\x42\x30\x5f\x39\x38\0\x4c\x42\x42\x30\x5f\x37\x38\0\x4c\x42\x42\ -\x30\x5f\x34\x38\0\x4c\x42\x42\x30\x5f\x31\x38\0\x4c\x42\x42\x30\x5f\x38\x37\0\ -\x4c\x42\x42\x30\x5f\x34\x37\0\x4c\x42\x42\x30\x5f\x33\x37\0\x4c\x42\x42\x30\ -\x5f\x31\x37\0\x4c\x42\x42\x30\x5f\x31\x30\x37\0\x4c\x42\x42\x30\x5f\x39\x36\0\ -\x4c\x42\x42\x30\x5f\x37\x36\0\x4c\x42\x42\x30\x5f\x36\x36\0\x4c\x42\x42\x30\ -\x5f\x34\x36\0\x4c\x42\x42\x30\x5f\x33\x36\0\x4c\x42\x42\x30\x5f\x32\x36\0\x4c\ -\x42\x42\x30\x5f\x31\x30\x36\0\x4c\x42\x42\x30\x5f\x36\x35\0\x4c\x42\x42\x30\ -\x5f\x34\x35\0\x4c\x42\x42\x30\x5f\x33\x35\0\x4c\x42\x42\x30\x5f\x34\0\x4c\x42\ -\x42\x30\x5f\x35\x34\0\x4c\x42\x42\x30\x5f\x34\x34\0\x4c\x42\x42\x30\x5f\x32\ -\x34\0\x4c\x42\x42\x30\x5f\x31\x30\x34\0\x4c\x42\x42\x30\x5f\x39\x33\0\x4c\x42\ -\x42\x30\x5f\x38\x33\0\x4c\x42\x42\x30\x5f\x35\x33\0\x4c\x42\x42\x30\x5f\x34\ -\x33\0\x4c\x42\x42\x30\x5f\x32\x33\0\x4c\x42\x42\x30\x5f\x31\x30\x33\0\x4c\x42\ -\x42\x30\x5f\x38\x32\0\x4c\x42\x42\x30\x5f\x35\x32\0\x4c\x42\x42\x30\x5f\x31\ -\x30\x32\0\x4c\x42\x42\x30\x5f\x39\x31\0\x4c\x42\x42\x30\x5f\x38\x31\0\x4c\x42\ -\x42\x30\x5f\x37\x31\0\x4c\x42\x42\x30\x5f\x36\x31\0\x4c\x42\x42\x30\x5f\x35\ -\x31\0\x4c\x42\x42\x30\x5f\x34\x31\0\x4c\x42\x42\x30\x5f\x32\x31\0\x4c\x42\x42\ -\x30\x5f\x31\x31\0\x4c\x42\x42\x30\x5f\x31\x31\x31\0\x4c\x42\x42\x30\x5f\x31\ -\x30\x31\0\x4c\x42\x42\x30\x5f\x38\x30\0\x4c\x42\x42\x30\x5f\x36\x30\0\x4c\x42\ -\x42\x30\x5f\x35\x30\0\x4c\x42\x42\x30\x5f\x31\x30\0\x4c\x42\x42\x30\x5f\x31\ -\x31\x30\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xaa\ -\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa0\x1a\0\0\0\0\0\0\x71\x02\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1a\0\0\0\x01\0\0\ -\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x5a\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\xd8\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x56\0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\x60\x1a\0\0\0\0\0\0\x30\0\0\0\0\0\0\0\x09\0\0\0\x03\0\0\0\x08\0\0\0\0\0\0\0\ -\x10\0\0\0\0\0\0\0\x20\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x18\ -\x14\0\0\0\0\0\0\x3c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\ -\0\0\0\x6c\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x54\x14\0\0\0\0\0\ -\0\x07\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x78\0\0\ -\0\x01\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x60\x14\0\0\0\0\0\0\x30\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x74\0\0\0\x09\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x90\x1a\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x09\0\0\0\ -\x07\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\xb2\0\0\0\x02\0\0\0\0\0\0\0\0\0\ -\0\0\0\0\0\0\0\0\0\0\x90\x14\0\0\0\0\0\0\xd0\x05\0\0\0\0\0\0\x01\0\0\0\x39\0\0\ -\0\x08\0\0\0\0\0\0\0\x18\0\0\0\0\0\0\0"; + s->data = (void *)rss_bpf__elf_bytes(&s->data_sz); + obj->skeleton = s; return 0; err: bpf_object__destroy_skeleton(s); - return -1; + return err; +} + +static inline const void *rss_bpf__elf_bytes(size_t *sz) +{ + *sz = 20440; + return (const void *)"\ +\x7f\x45\x4c\x46\x02\x01\x01\0\0\0\0\0\0\0\0\0\x01\0\xf7\0\x01\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\x98\x4c\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\x40\0\x0d\0\ +\x01\0\xbf\x19\0\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\x54\xff\0\0\0\0\xbf\xa7\ +\0\0\0\0\0\0\x07\x07\0\0\x54\xff\xff\xff\x18\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x06\0\0\0\0\0\0\x18\x01\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x08\0\0\0\0\0\0\ +\x18\0\0\0\xff\xff\xff\xff\0\0\0\0\0\0\0\0\x15\x06\x67\x02\0\0\0\0\xbf\x87\0\0\ +\0\0\0\0\x15\x07\x65\x02\0\0\0\0\x71\x61\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\ +\0\x5e\x02\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xc8\xff\0\0\0\0\x7b\x1a\xc0\xff\ +\0\0\0\0\x7b\x1a\xb8\xff\0\0\0\0\x7b\x1a\xb0\xff\0\0\0\0\x7b\x1a\xa8\xff\0\0\0\ +\0\x63\x1a\xa0\xff\0\0\0\0\x7b\x1a\x98\xff\0\0\0\0\x7b\x1a\x90\xff\0\0\0\0\x7b\ +\x1a\x88\xff\0\0\0\0\x7b\x1a\x80\xff\0\0\0\0\x7b\x1a\x78\xff\0\0\0\0\x7b\x1a\ +\x70\xff\0\0\0\0\x7b\x1a\x68\xff\0\0\0\0\x7b\x1a\x60\xff\0\0\0\0\x7b\x1a\x58\ +\xff\0\0\0\0\x15\x09\x4d\x02\0\0\0\0\x6b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\ +\0\x07\x03\0\0\xd0\xff\xff\xff\xbf\x91\0\0\0\0\0\0\xb7\x02\0\0\x0c\0\0\0\xb7\ +\x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\ +\x77\0\0\0\x20\0\0\0\x55\0\x42\x02\0\0\0\0\xb7\x02\0\0\x10\0\0\0\x69\xa1\xd0\ +\xff\0\0\0\0\xbf\x13\0\0\0\0\0\0\xdc\x03\0\0\x10\0\0\0\x15\x03\x02\0\0\x81\0\0\ +\x55\x03\x0b\0\xa8\x88\0\0\xb7\x02\0\0\x14\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\ +\0\xd0\xff\xff\xff\xbf\x91\0\0\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\ +\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\x32\x02\0\ +\0\0\0\x69\xa1\xd0\xff\0\0\0\0\x15\x01\x30\x02\0\0\0\0\x7b\x7a\x38\xff\0\0\0\0\ +\x7b\x9a\x40\xff\0\0\0\0\x15\x01\x55\0\x86\xdd\0\0\x55\x01\x39\0\x08\0\0\0\xb7\ +\x07\0\0\x01\0\0\0\x73\x7a\x58\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xe0\xff\ +\0\0\0\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\ +\x07\x03\0\0\xd0\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\xb7\x02\0\0\0\0\0\0\xb7\ +\x04\0\0\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\ +\0\x77\0\0\0\x20\0\0\0\x55\0\x1c\x02\0\0\0\0\x69\xa1\xd6\xff\0\0\0\0\x55\x01\ +\x01\0\0\0\0\0\xb7\x07\0\0\0\0\0\0\x61\xa1\xdc\xff\0\0\0\0\x63\x1a\x64\xff\0\0\ +\0\0\x61\xa1\xe0\xff\0\0\0\0\x63\x1a\x68\xff\0\0\0\0\x71\xa9\xd9\xff\0\0\0\0\ +\x73\x7a\x5e\xff\0\0\0\0\x71\xa1\xd0\xff\0\0\0\0\x67\x01\0\0\x02\0\0\0\x57\x01\ +\0\0\x3c\0\0\0\x7b\x1a\x48\xff\0\0\0\0\xbf\x91\0\0\0\0\0\0\x57\x01\0\0\xff\0\0\ +\0\x15\x01\x19\0\0\0\0\0\x57\x07\0\0\xff\0\0\0\x55\x07\x17\0\0\0\0\0\x57\x09\0\ +\0\xff\0\0\0\x15\x09\x5a\x01\x11\0\0\0\x55\x09\x14\0\x06\0\0\0\xb7\x01\0\0\x01\ +\0\0\0\x73\x1a\x5b\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xe0\xff\0\0\0\0\x7b\ +\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\ +\xd0\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\ +\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\ +\0\0\x20\0\0\0\x55\0\xf7\x01\0\0\0\0\x69\xa1\xd0\xff\0\0\0\0\x6b\x1a\x60\xff\0\ +\0\0\0\x69\xa1\xd2\xff\0\0\0\0\x6b\x1a\x62\xff\0\0\0\0\x71\xa1\x58\xff\0\0\0\0\ +\x15\x01\xdb\0\0\0\0\0\x71\x62\x03\0\0\0\0\0\x67\x02\0\0\x08\0\0\0\x71\x61\x02\ +\0\0\0\0\0\x4f\x12\0\0\0\0\0\0\x71\x63\x04\0\0\0\0\0\x71\x61\x05\0\0\0\0\0\x67\ +\x01\0\0\x08\0\0\0\x4f\x31\0\0\0\0\0\0\x67\x01\0\0\x10\0\0\0\x4f\x21\0\0\0\0\0\ +\0\x71\xa2\x5b\xff\0\0\0\0\x79\xa0\x38\xff\0\0\0\0\x15\x02\x0c\x01\0\0\0\0\xbf\ +\x12\0\0\0\0\0\0\x57\x02\0\0\x02\0\0\0\x15\x02\x09\x01\0\0\0\0\x61\xa1\x64\xff\ +\0\0\0\0\x63\x1a\xa8\xff\0\0\0\0\x61\xa1\x68\xff\0\0\0\0\x63\x1a\xac\xff\0\0\0\ +\0\x69\xa1\x60\xff\0\0\0\0\x6b\x1a\xb0\xff\0\0\0\0\x69\xa1\x62\xff\0\0\0\0\x6b\ +\x1a\xb2\xff\0\0\0\0\x05\0\x6b\x01\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x59\ +\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\xf0\xff\0\0\0\0\x7b\x1a\xe8\xff\0\0\0\ +\0\x7b\x1a\xe0\xff\0\0\0\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\ +\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\xff\xff\xb7\x01\0\0\x28\0\0\0\x7b\x1a\x48\ +\xff\0\0\0\0\xbf\x91\0\0\0\0\0\0\xb7\x02\0\0\0\0\0\0\xb7\x04\0\0\x28\0\0\0\xb7\ +\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\ +\x55\0\xfe\0\0\0\0\0\x79\xa1\xe0\xff\0\0\0\0\x63\x1a\x6c\xff\0\0\0\0\x77\x01\0\ +\0\x20\0\0\0\x63\x1a\x70\xff\0\0\0\0\x79\xa1\xd8\xff\0\0\0\0\x63\x1a\x64\xff\0\ +\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x68\xff\0\0\0\0\x79\xa1\xe8\xff\0\0\0\0\ +\x63\x1a\x74\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x78\xff\0\0\0\0\x79\xa1\ +\xf0\xff\0\0\0\0\x63\x1a\x7c\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x80\xff\ +\0\0\0\0\x71\xa9\xd6\xff\0\0\0\0\x25\x09\x13\x01\x3c\0\0\0\xb7\x01\0\0\x01\0\0\ +\0\x6f\x91\0\0\0\0\0\0\x18\x02\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x5f\x21\0\0\0\ +\0\0\0\x55\x01\x01\0\0\0\0\0\x05\0\x0c\x01\0\0\0\0\xb7\x01\0\0\0\0\0\0\x6b\x1a\ +\xfe\xff\0\0\0\0\xb7\x01\0\0\x28\0\0\0\x7b\x1a\x48\xff\0\0\0\0\xbf\xa1\0\0\0\0\ +\0\0\x07\x01\0\0\x94\xff\xff\xff\x7b\x1a\x20\xff\0\0\0\0\xbf\xa1\0\0\0\0\0\0\ +\x07\x01\0\0\x84\xff\xff\xff\x7b\x1a\x18\xff\0\0\0\0\x18\x07\0\0\x01\0\0\0\0\0\ +\0\0\0\x18\0\x1c\xb7\x02\0\0\0\0\0\0\x7b\x8a\x28\xff\0\0\0\0\x7b\x2a\x30\xff\0\ +\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xfe\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\ +\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\ +\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x15\0\x01\0\0\0\0\0\x05\0\ +\x91\x01\0\0\0\0\xbf\x91\0\0\0\0\0\0\x15\x01\x26\0\x3c\0\0\0\x15\x01\x5f\0\x2c\ +\0\0\0\x55\x01\x60\0\x2b\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xf8\xff\0\0\0\0\xbf\ +\xa3\0\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\x79\xa7\x40\xff\0\0\0\0\xbf\x71\0\ +\0\0\0\0\0\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\x04\0\0\0\xb7\x05\0\0\x01\0\0\0\ +\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\ +\0\0\0\x55\x01\x06\x01\0\0\0\0\x71\xa1\xfa\xff\0\0\0\0\x55\x01\x11\0\x02\0\0\0\ +\x71\xa1\xf9\xff\0\0\0\0\x55\x01\x0f\0\x02\0\0\0\x71\xa1\xfb\xff\0\0\0\0\x55\ +\x01\x0d\0\x01\0\0\0\x79\xa2\x48\xff\0\0\0\0\x07\x02\0\0\x08\0\0\0\xbf\x71\0\0\ +\0\0\0\0\x79\xa3\x20\xff\0\0\0\0\xb7\x04\0\0\x10\0\0\0\xb7\x05\0\0\x01\0\0\0\ +\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\ +\0\0\0\x55\x01\xf5\0\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x5d\xff\0\0\0\0\x18\ +\x07\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x05\0\x3c\0\0\0\0\0\xb7\x08\0\0\x02\0\0\ +\0\xb7\x07\0\0\0\0\0\0\x6b\x7a\xf8\xff\0\0\0\0\x05\0\x13\0\0\0\0\0\x0f\x81\0\0\ +\0\0\0\0\xbf\x12\0\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\x71\xa3\xff\xff\0\0\0\0\x67\ +\x03\0\0\x03\0\0\0\x3d\x32\x09\0\0\0\0\0\xbf\x72\0\0\0\0\0\0\x07\x02\0\0\x01\0\ +\0\0\x67\x07\0\0\x20\0\0\0\xbf\x73\0\0\0\0\0\0\x77\x03\0\0\x20\0\0\0\xbf\x27\0\ +\0\0\0\0\0\xbf\x18\0\0\0\0\0\0\xb7\x01\0\0\x1d\0\0\0\x2d\x31\x04\0\0\0\0\0\x79\ +\xa8\x28\xff\0\0\0\0\x18\x07\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x05\0\x25\0\0\0\ +\0\0\xbf\x89\0\0\0\0\0\0\x79\xa1\x48\xff\0\0\0\0\x0f\x19\0\0\0\0\0\0\xbf\xa3\0\ +\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\xbf\x92\0\0\0\0\ +\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\ +\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x79\0\0\0\0\0\ +\x71\xa2\xf8\xff\0\0\0\0\x55\x02\x0e\0\xc9\0\0\0\x07\x09\0\0\x02\0\0\0\x79\xa1\ +\x40\xff\0\0\0\0\xbf\x92\0\0\0\0\0\0\x79\xa3\x18\xff\0\0\0\0\xb7\x04\0\0\x10\0\ +\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\ +\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x6c\0\0\0\0\0\xb7\x01\0\0\x01\0\0\0\ +\x73\x1a\x5c\xff\0\0\0\0\x05\0\xde\xff\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x15\x02\ +\xcd\xff\0\0\0\0\x71\xa1\xf9\xff\0\0\0\0\x07\x01\0\0\x02\0\0\0\x05\0\xca\xff\0\ +\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x5e\xff\0\0\0\0\x71\xa1\xff\xff\0\0\0\0\ +\x67\x01\0\0\x03\0\0\0\x79\xa2\x48\xff\0\0\0\0\x0f\x12\0\0\0\0\0\0\x07\x02\0\0\ +\x08\0\0\0\x7b\x2a\x48\xff\0\0\0\0\x71\xa9\xfe\xff\0\0\0\0\x79\xa2\x30\xff\0\0\ +\0\0\x25\x09\x0c\0\x3c\0\0\0\xb7\x01\0\0\x01\0\0\0\x6f\x91\0\0\0\0\0\0\x5f\x71\ +\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\0\x07\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\ +\xbf\x21\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x7d\ +\xff\x0b\0\0\0\x71\xa7\x5e\xff\0\0\0\0\x05\0\x09\xff\0\0\0\0\x15\x09\xf8\xff\ +\x87\0\0\0\x05\0\xfc\xff\0\0\0\0\x71\xa1\x59\xff\0\0\0\0\x79\xa0\x38\xff\0\0\0\ +\0\x15\x01\x13\x01\0\0\0\0\x71\x62\x03\0\0\0\0\0\x67\x02\0\0\x08\0\0\0\x71\x61\ +\x02\0\0\0\0\0\x4f\x12\0\0\0\0\0\0\x71\x63\x04\0\0\0\0\0\x71\x61\x05\0\0\0\0\0\ +\x67\x01\0\0\x08\0\0\0\x4f\x31\0\0\0\0\0\0\x67\x01\0\0\x10\0\0\0\x4f\x21\0\0\0\ +\0\0\0\x71\xa2\x5b\xff\0\0\0\0\x15\x02\x42\0\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\ +\x02\0\0\x10\0\0\0\x15\x02\x3f\0\0\0\0\0\x57\x01\0\0\x80\0\0\0\xb7\x02\0\0\x10\ +\0\0\0\xb7\x03\0\0\x10\0\0\0\x15\x01\x01\0\0\0\0\0\xb7\x03\0\0\x30\0\0\0\x71\ +\xa4\x5d\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\0\0\0\0\0\0\xbf\xa3\0\0\0\0\ +\0\0\x07\x03\0\0\x64\xff\xff\xff\xbf\x34\0\0\0\0\0\0\x15\x01\x02\0\0\0\0\0\xbf\ +\xa4\0\0\0\0\0\0\x07\x04\0\0\x84\xff\xff\xff\x71\xa5\x5c\xff\0\0\0\0\xbf\x31\0\ +\0\0\0\0\0\x15\x05\x01\0\0\0\0\0\xbf\x41\0\0\0\0\0\0\x61\x14\x04\0\0\0\0\0\x67\ +\x04\0\0\x20\0\0\0\x61\x15\0\0\0\0\0\0\x4f\x54\0\0\0\0\0\0\x7b\x4a\xa8\xff\0\0\ +\0\0\x61\x14\x08\0\0\0\0\0\x61\x11\x0c\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x4f\x41\ +\0\0\0\0\0\0\x7b\x1a\xb0\xff\0\0\0\0\x0f\x23\0\0\0\0\0\0\x61\x31\0\0\0\0\0\0\ +\x61\x32\x04\0\0\0\0\0\x61\x34\x08\0\0\0\0\0\x61\x33\x0c\0\0\0\0\0\x69\xa5\x62\ +\xff\0\0\0\0\x6b\x5a\xca\xff\0\0\0\0\x69\xa5\x60\xff\0\0\0\0\x6b\x5a\xc8\xff\0\ +\0\0\0\x67\x03\0\0\x20\0\0\0\x4f\x43\0\0\0\0\0\0\x7b\x3a\xc0\xff\0\0\0\0\x67\ +\x02\0\0\x20\0\0\0\x4f\x12\0\0\0\0\0\0\x7b\x2a\xb8\xff\0\0\0\0\x05\0\x6b\0\0\0\ +\0\0\x71\xa2\x5a\xff\0\0\0\0\x15\x02\x04\0\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\ +\0\0\x04\0\0\0\x15\x02\x01\0\0\0\0\0\x05\0\xf1\xfe\0\0\0\0\x57\x01\0\0\x01\0\0\ +\0\x15\x01\xd0\0\0\0\0\0\x61\xa1\x64\xff\0\0\0\0\x63\x1a\xa8\xff\0\0\0\0\x61\ +\xa1\x68\xff\0\0\0\0\x63\x1a\xac\xff\0\0\0\0\x05\0\x5e\0\0\0\0\0\xb7\x09\0\0\ +\x3c\0\0\0\x79\xa8\x28\xff\0\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x15\ +\0\xac\xff\0\0\0\0\x05\0\xc5\0\0\0\0\0\x71\xa2\x5a\xff\0\0\0\0\x15\x02\x26\0\0\ +\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x20\0\0\0\x15\x02\x23\0\0\0\0\0\x57\x01\ +\0\0\0\x01\0\0\xb7\x02\0\0\x10\0\0\0\xb7\x03\0\0\x10\0\0\0\x15\x01\x01\0\0\0\0\ +\0\xb7\x03\0\0\x30\0\0\0\x71\xa4\x5d\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\ +\0\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x64\xff\xff\xff\xbf\x34\0\0\0\0\0\ +\0\x15\x01\x02\0\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\x84\xff\xff\xff\x71\ +\xa5\x5c\xff\0\0\0\0\xbf\x31\0\0\0\0\0\0\x15\x05\xbd\xff\0\0\0\0\x05\0\xbb\xff\ +\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x5a\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\ +\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\xff\xff\x79\xa1\ +\x40\xff\0\0\0\0\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\x08\0\0\0\xb7\x05\0\0\x01\ +\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\xa0\0\ +\0\0\0\0\x05\0\xa8\xfe\0\0\0\0\x15\x09\xf3\xfe\x87\0\0\0\x05\0\x83\xff\0\0\0\0\ +\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x08\0\0\0\x15\x02\x9a\0\0\0\0\0\x57\x01\0\0\ +\x40\0\0\0\xb7\x02\0\0\x0c\0\0\0\xb7\x03\0\0\x0c\0\0\0\x15\x01\x01\0\0\0\0\0\ +\xb7\x03\0\0\x2c\0\0\0\x71\xa4\x5c\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\0\ +\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x58\xff\xff\xff\x0f\x23\0\0\0\0\0\0\ +\x61\x32\x04\0\0\0\0\0\x67\x02\0\0\x20\0\0\0\x61\x34\0\0\0\0\0\0\x4f\x42\0\0\0\ +\0\0\0\x7b\x2a\xa8\xff\0\0\0\0\x61\x32\x08\0\0\0\0\0\x61\x33\x0c\0\0\0\0\0\x67\ +\x03\0\0\x20\0\0\0\x4f\x23\0\0\0\0\0\0\x7b\x3a\xb0\xff\0\0\0\0\x71\xa2\x5d\xff\ +\0\0\0\0\x15\x02\x0c\0\0\0\0\0\x15\x01\x0b\0\0\0\0\0\x61\xa1\xa0\xff\0\0\0\0\ +\x67\x01\0\0\x20\0\0\0\x61\xa2\x9c\xff\0\0\0\0\x4f\x21\0\0\0\0\0\0\x7b\x1a\xc0\ +\xff\0\0\0\0\x61\xa1\x98\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\x94\xff\0\0\ +\0\0\x05\0\x0a\0\0\0\0\0\xb7\x09\0\0\x2b\0\0\0\x05\0\xae\xff\0\0\0\0\x61\xa1\ +\x80\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\x7c\xff\0\0\0\0\x4f\x21\0\0\0\0\ +\0\0\x7b\x1a\xc0\xff\0\0\0\0\x61\xa1\x78\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\ +\xa2\x74\xff\0\0\0\0\x4f\x21\0\0\0\0\0\0\x7b\x1a\xb8\xff\0\0\0\0\xb7\x02\0\0\0\ +\0\0\0\x07\x08\0\0\x04\0\0\0\x61\x03\0\0\0\0\0\0\xb7\x05\0\0\0\0\0\0\xbf\xa1\0\ +\0\0\0\0\0\x07\x01\0\0\xa8\xff\xff\xff\x0f\x21\0\0\0\0\0\0\x71\x14\0\0\0\0\0\0\ +\xbf\x41\0\0\0\0\0\0\x67\x01\0\0\x38\0\0\0\xc7\x01\0\0\x3f\0\0\0\x5f\x31\0\0\0\ +\0\0\0\xaf\x51\0\0\0\0\0\0\xbf\x85\0\0\0\0\0\0\x0f\x25\0\0\0\0\0\0\x71\x55\0\0\ +\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x07\0\0\0\x4f\x03\ +\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x39\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\ +\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x06\0\0\0\ +\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\ +\0\0\x67\0\0\0\x3a\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\ +\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x05\0\0\0\x57\0\0\0\ +\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3b\0\0\0\xc7\0\0\ +\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\ +\x50\0\0\0\0\0\0\x77\0\0\0\x04\0\0\0\x57\0\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\ +\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3c\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\ +\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x03\0\0\0\x57\0\0\0\x01\0\ +\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\ +\x3d\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\ +\0\0\0\0\0\0\x77\0\0\0\x02\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\ +\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3e\0\0\0\xc7\0\0\0\x3f\0\0\0\ +\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x01\0\0\ +\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\x57\x04\0\0\ +\x01\0\0\0\x87\x04\0\0\0\0\0\0\x5f\x34\0\0\0\0\0\0\xaf\x41\0\0\0\0\0\0\x57\x05\ +\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x53\0\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\ +\xbf\x15\0\0\0\0\0\0\x15\x02\x01\0\x24\0\0\0\x05\0\xa9\xff\0\0\0\0\xbf\x12\0\0\ +\0\0\0\0\x67\x02\0\0\x20\0\0\0\x77\x02\0\0\x20\0\0\0\x15\x02\x0e\0\0\0\0\0\x71\ +\x63\x06\0\0\0\0\0\x71\x64\x07\0\0\0\0\0\x67\x04\0\0\x08\0\0\0\x4f\x34\0\0\0\0\ +\0\0\x3f\x42\0\0\0\0\0\0\x2f\x42\0\0\0\0\0\0\x1f\x21\0\0\0\0\0\0\x63\x1a\x58\ +\xff\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\x58\xff\xff\xff\x18\x01\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\x55\0\x05\0\0\0\0\0\x71\x61\x08\0\0\0\0\ +\0\x71\x60\x09\0\0\0\0\0\x67\0\0\0\x08\0\0\0\x4f\x10\0\0\0\0\0\0\x95\0\0\0\0\0\ +\0\0\x69\0\0\0\0\0\0\0\x05\0\xfd\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\x47\x50\x4c\x20\x76\x32\0\0\x9f\xeb\x01\0\x18\0\0\0\0\0\0\0\x10\x05\0\0\x10\ +\x05\0\0\x65\x11\0\0\0\0\0\0\0\0\0\x02\x03\0\0\0\x01\0\0\0\0\0\0\x01\x04\0\0\0\ +\x20\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x02\0\0\0\x05\0\0\0\ +\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\x02\x06\0\0\0\0\0\0\0\0\0\0\x03\0\ +\0\0\0\x02\0\0\0\x04\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\x02\x08\0\0\0\0\0\0\0\0\0\0\ +\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x0a\0\0\0\0\0\0\0\0\0\0\x02\x0a\0\0\0\0\0\0\0\ +\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x01\0\0\0\0\0\0\0\x04\0\0\x04\x20\0\0\0\ +\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\x40\0\0\0\x27\0\0\0\x07\0\0\0\ +\x80\0\0\0\x32\0\0\0\x09\0\0\0\xc0\0\0\0\x3e\0\0\0\0\0\0\x0e\x0b\0\0\0\x01\0\0\ +\0\0\0\0\0\0\0\0\x02\x0e\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\ +\x28\0\0\0\0\0\0\0\x04\0\0\x04\x20\0\0\0\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\ +\x05\0\0\0\x40\0\0\0\x27\0\0\0\x0d\0\0\0\x80\0\0\0\x32\0\0\0\x09\0\0\0\xc0\0\0\ +\0\x59\0\0\0\0\0\0\x0e\x0f\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\x02\x12\0\0\0\0\0\0\0\ +\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x80\0\0\0\0\0\0\0\x04\0\0\x04\x20\0\0\0\ +\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\x40\0\0\0\x27\0\0\0\x01\0\0\0\ +\x80\0\0\0\x32\0\0\0\x11\0\0\0\xc0\0\0\0\x72\0\0\0\0\0\0\x0e\x13\0\0\0\x01\0\0\ +\0\0\0\0\0\0\0\0\x02\x16\0\0\0\x90\0\0\0\x22\0\0\x04\xc0\0\0\0\x9a\0\0\0\x17\0\ +\0\0\0\0\0\0\x9e\0\0\0\x17\0\0\0\x20\0\0\0\xa7\0\0\0\x17\0\0\0\x40\0\0\0\xac\0\ +\0\0\x17\0\0\0\x60\0\0\0\xba\0\0\0\x17\0\0\0\x80\0\0\0\xc3\0\0\0\x17\0\0\0\xa0\ +\0\0\0\xd0\0\0\0\x17\0\0\0\xc0\0\0\0\xd9\0\0\0\x17\0\0\0\xe0\0\0\0\xe4\0\0\0\ +\x17\0\0\0\0\x01\0\0\xed\0\0\0\x17\0\0\0\x20\x01\0\0\xfd\0\0\0\x17\0\0\0\x40\ +\x01\0\0\x05\x01\0\0\x17\0\0\0\x60\x01\0\0\x0e\x01\0\0\x19\0\0\0\x80\x01\0\0\ +\x11\x01\0\0\x17\0\0\0\x20\x02\0\0\x16\x01\0\0\x17\0\0\0\x40\x02\0\0\x21\x01\0\ +\0\x17\0\0\0\x60\x02\0\0\x26\x01\0\0\x17\0\0\0\x80\x02\0\0\x2f\x01\0\0\x17\0\0\ +\0\xa0\x02\0\0\x37\x01\0\0\x17\0\0\0\xc0\x02\0\0\x3e\x01\0\0\x17\0\0\0\xe0\x02\ +\0\0\x49\x01\0\0\x17\0\0\0\0\x03\0\0\x53\x01\0\0\x1a\0\0\0\x20\x03\0\0\x5e\x01\ +\0\0\x1a\0\0\0\xa0\x03\0\0\x68\x01\0\0\x17\0\0\0\x20\x04\0\0\x74\x01\0\0\x17\0\ +\0\0\x40\x04\0\0\x7f\x01\0\0\x17\0\0\0\x60\x04\0\0\0\0\0\0\x1b\0\0\0\x80\x04\0\ +\0\x89\x01\0\0\x1d\0\0\0\xc0\x04\0\0\x90\x01\0\0\x17\0\0\0\0\x05\0\0\x99\x01\0\ +\0\x17\0\0\0\x20\x05\0\0\0\0\0\0\x1f\0\0\0\x40\x05\0\0\xa2\x01\0\0\x17\0\0\0\ +\x80\x05\0\0\xab\x01\0\0\x21\0\0\0\xa0\x05\0\0\xb7\x01\0\0\x1d\0\0\0\xc0\x05\0\ +\0\xc0\x01\0\0\0\0\0\x08\x18\0\0\0\xc6\x01\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\ +\0\0\0\0\0\0\x03\0\0\0\0\x17\0\0\0\x04\0\0\0\x05\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\ +\0\x17\0\0\0\x04\0\0\0\x04\0\0\0\0\0\0\0\x01\0\0\x05\x08\0\0\0\xd3\x01\0\0\x1c\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\x2a\0\0\0\xdd\x01\0\0\0\0\0\x08\x1e\0\0\0\xe3\ +\x01\0\0\0\0\0\x01\x08\0\0\0\x40\0\0\0\0\0\0\0\x01\0\0\x05\x08\0\0\0\xf6\x01\0\ +\0\x20\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\x2b\0\0\0\xf9\x01\0\0\0\0\0\x08\x22\0\0\ +\0\xfe\x01\0\0\0\0\0\x01\x01\0\0\0\x08\0\0\0\0\0\0\0\x01\0\0\x0d\x02\0\0\0\x0c\ +\x02\0\0\x15\0\0\0\x10\x02\0\0\x01\0\0\x0c\x23\0\0\0\x32\x11\0\0\0\0\0\x01\x01\ +\0\0\0\x08\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x25\0\0\0\x04\0\0\0\x07\0\0\0\x37\ +\x11\0\0\0\0\0\x0e\x26\0\0\0\x01\0\0\0\x40\x11\0\0\x03\0\0\x0f\0\0\0\0\x0c\0\0\ +\0\0\0\0\0\x20\0\0\0\x10\0\0\0\0\0\0\0\x20\0\0\0\x14\0\0\0\0\0\0\0\x20\0\0\0\ +\x46\x11\0\0\x01\0\0\x0f\0\0\0\0\x27\0\0\0\0\0\0\0\x07\0\0\0\x4e\x11\0\0\0\0\0\ +\x07\0\0\0\0\x5c\x11\0\0\0\0\0\x07\0\0\0\0\0\x69\x6e\x74\0\x5f\x5f\x41\x52\x52\ +\x41\x59\x5f\x53\x49\x5a\x45\x5f\x54\x59\x50\x45\x5f\x5f\0\x74\x79\x70\x65\0\ +\x6b\x65\x79\x5f\x73\x69\x7a\x65\0\x76\x61\x6c\x75\x65\x5f\x73\x69\x7a\x65\0\ +\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\0\x74\x61\x70\x5f\x72\x73\x73\x5f\ +\x6d\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\x75\x72\x61\x74\x69\x6f\x6e\x73\0\x74\ +\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\ +\x5f\x6b\x65\x79\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x69\x6e\x64\ +\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\x62\x6c\x65\0\x5f\x5f\x73\x6b\x5f\ +\x62\x75\x66\x66\0\x6c\x65\x6e\0\x70\x6b\x74\x5f\x74\x79\x70\x65\0\x6d\x61\x72\ +\x6b\0\x71\x75\x65\x75\x65\x5f\x6d\x61\x70\x70\x69\x6e\x67\0\x70\x72\x6f\x74\ +\x6f\x63\x6f\x6c\0\x76\x6c\x61\x6e\x5f\x70\x72\x65\x73\x65\x6e\x74\0\x76\x6c\ +\x61\x6e\x5f\x74\x63\x69\0\x76\x6c\x61\x6e\x5f\x70\x72\x6f\x74\x6f\0\x70\x72\ +\x69\x6f\x72\x69\x74\x79\0\x69\x6e\x67\x72\x65\x73\x73\x5f\x69\x66\x69\x6e\x64\ +\x65\x78\0\x69\x66\x69\x6e\x64\x65\x78\0\x74\x63\x5f\x69\x6e\x64\x65\x78\0\x63\ +\x62\0\x68\x61\x73\x68\0\x74\x63\x5f\x63\x6c\x61\x73\x73\x69\x64\0\x64\x61\x74\ +\x61\0\x64\x61\x74\x61\x5f\x65\x6e\x64\0\x6e\x61\x70\x69\x5f\x69\x64\0\x66\x61\ +\x6d\x69\x6c\x79\0\x72\x65\x6d\x6f\x74\x65\x5f\x69\x70\x34\0\x6c\x6f\x63\x61\ +\x6c\x5f\x69\x70\x34\0\x72\x65\x6d\x6f\x74\x65\x5f\x69\x70\x36\0\x6c\x6f\x63\ +\x61\x6c\x5f\x69\x70\x36\0\x72\x65\x6d\x6f\x74\x65\x5f\x70\x6f\x72\x74\0\x6c\ +\x6f\x63\x61\x6c\x5f\x70\x6f\x72\x74\0\x64\x61\x74\x61\x5f\x6d\x65\x74\x61\0\ +\x74\x73\x74\x61\x6d\x70\0\x77\x69\x72\x65\x5f\x6c\x65\x6e\0\x67\x73\x6f\x5f\ +\x73\x65\x67\x73\0\x67\x73\x6f\x5f\x73\x69\x7a\x65\0\x74\x73\x74\x61\x6d\x70\ +\x5f\x74\x79\x70\x65\0\x68\x77\x74\x73\x74\x61\x6d\x70\0\x5f\x5f\x75\x33\x32\0\ +\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\x74\0\x66\x6c\x6f\x77\x5f\x6b\x65\ +\x79\x73\0\x5f\x5f\x75\x36\x34\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x6c\x6f\ +\x6e\x67\x20\x6c\x6f\x6e\x67\0\x73\x6b\0\x5f\x5f\x75\x38\0\x75\x6e\x73\x69\x67\ +\x6e\x65\x64\x20\x63\x68\x61\x72\0\x73\x6b\x62\0\x74\x75\x6e\x5f\x72\x73\x73\ +\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\x5f\x70\x72\x6f\x67\0\x74\x75\x6e\x5f\x72\ +\x73\x73\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\0\x2f\x68\x6f\x6d\x65\x2f\x73\x68\ +\x72\x65\x65\x73\x68\x2f\x63\x2f\x71\x65\x6d\x75\x2f\x74\x6f\x6f\x6c\x73\x2f\ +\x65\x62\x70\x66\x2f\x72\x73\x73\x2e\x62\x70\x66\x2e\x63\0\x69\x6e\x74\x20\x74\ +\x75\x6e\x5f\x72\x73\x73\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\x5f\x70\x72\x6f\ +\x67\x28\x73\x74\x72\x75\x63\x74\x20\x5f\x5f\x73\x6b\x5f\x62\x75\x66\x66\x20\ +\x2a\x73\x6b\x62\x29\0\x20\x20\x20\x20\x5f\x5f\x75\x33\x32\x20\x6b\x65\x79\x20\ +\x3d\x20\x30\x3b\0\x20\x20\x20\x20\x63\x6f\x6e\x66\x69\x67\x20\x3d\x20\x62\x70\ +\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\x6b\x75\x70\x5f\x65\x6c\x65\x6d\x28\x26\ +\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\x75\ +\x72\x61\x74\x69\x6f\x6e\x73\x2c\x20\x26\x6b\x65\x79\x29\x3b\0\x20\x20\x20\x20\ +\x74\x6f\x65\x20\x3d\x20\x62\x70\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\x6b\x75\ +\x70\x5f\x65\x6c\x65\x6d\x28\x26\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\ +\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\x5f\x6b\x65\x79\x2c\x20\x26\x6b\x65\x79\ +\x29\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x63\x6f\x6e\x66\x69\x67\x20\x26\x26\ +\x20\x74\x6f\x65\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\ +\x21\x63\x6f\x6e\x66\x69\x67\x2d\x3e\x72\x65\x64\x69\x72\x65\x63\x74\x29\x20\ +\x7b\0\x20\x20\x20\x20\x5f\x5f\x75\x38\x20\x72\x73\x73\x5f\x69\x6e\x70\x75\x74\ +\x5b\x48\x41\x53\x48\x5f\x43\x41\x4c\x43\x55\x4c\x41\x54\x49\x4f\x4e\x5f\x42\ +\x55\x46\x46\x45\x52\x5f\x53\x49\x5a\x45\x5d\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\ +\x20\x20\x73\x74\x72\x75\x63\x74\x20\x70\x61\x63\x6b\x65\x74\x5f\x68\x61\x73\ +\x68\x5f\x69\x6e\x66\x6f\x5f\x74\x20\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\ +\x6f\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x21\x69\x6e\x66\ +\x6f\x20\x7c\x7c\x20\x21\x73\x6b\x62\x29\x20\x7b\0\x20\x20\x20\x20\x5f\x5f\x62\ +\x65\x31\x36\x20\x72\x65\x74\x20\x3d\x20\x30\x3b\0\x20\x20\x20\x20\x65\x72\x72\ +\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\ +\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x6f\x66\ +\x66\x73\x65\x74\x2c\x20\x26\x72\x65\x74\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\ +\x72\x65\x74\x29\x2c\0\x20\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\ +\0\x20\x20\x20\x20\x73\x77\x69\x74\x63\x68\x20\x28\x62\x70\x66\x5f\x6e\x74\x6f\ +\x68\x73\x28\x72\x65\x74\x29\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\ +\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\ +\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\ +\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x72\x65\x74\x2c\x20\x73\x69\x7a\x65\x6f\ +\x66\x28\x72\x65\x74\x29\x2c\0\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\ +\x65\x74\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x6c\x33\x5f\x70\x72\x6f\x74\x6f\ +\x63\x6f\x6c\x20\x3d\x3d\x20\x30\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\ +\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x34\x20\x3d\x20\x31\x3b\0\x20\ +\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x69\x70\x68\x64\x72\ +\x20\x69\x70\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\ +\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\ +\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x30\ +\x2c\x20\x26\x69\x70\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x69\x70\x29\x2c\0\x20\ +\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\ +\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x66\x72\x61\x67\ +\x6d\x65\x6e\x74\x65\x64\x20\x3d\x20\x21\x21\x69\x70\x2e\x66\x72\x61\x67\x5f\ +\x6f\x66\x66\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\ +\x6e\x5f\x73\x72\x63\x20\x3d\x20\x69\x70\x2e\x73\x61\x64\x64\x72\x3b\0\x20\x20\ +\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x6e\x5f\x64\x73\x74\x20\ +\x3d\x20\x69\x70\x2e\x64\x61\x64\x64\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\ +\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\x20\x69\x70\x2e\x70\x72\ +\x6f\x74\x6f\x63\x6f\x6c\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x6c\x34\x5f\x6f\ +\x66\x66\x73\x65\x74\x20\x3d\x20\x69\x70\x2e\x69\x68\x6c\x20\x2a\x20\x34\x3b\0\ +\x20\x20\x20\x20\x69\x66\x20\x28\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\ +\x20\x21\x3d\x20\x30\x20\x26\x26\x20\x21\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\ +\x66\x72\x61\x67\x6d\x65\x6e\x74\x65\x64\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\ +\x20\x20\x69\x66\x20\x28\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\ +\x3d\x20\x49\x50\x50\x52\x4f\x54\x4f\x5f\x54\x43\x50\x29\x20\x7b\0\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x74\ +\x63\x70\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x73\x74\x72\x75\x63\x74\x20\x74\x63\x70\x68\x64\x72\x20\x74\x63\x70\x20\x3d\ +\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\ +\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\ +\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x6c\x34\ +\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x74\x63\x70\x2c\x20\x73\x69\x7a\x65\ +\x6f\x66\x28\x74\x63\x70\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\x20\x20\x69\x66\x20\x28\ +\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\x76\x34\ +\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x70\x61\x63\x6b\ +\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x74\x63\x70\x20\x26\x26\0\x20\x20\ +\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x36\ +\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\ +\x20\x69\x70\x76\x36\x68\x64\x72\x20\x69\x70\x36\x20\x3d\x20\x7b\x7d\x3b\0\x20\ +\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\ +\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\ +\x76\x65\x28\x73\x6b\x62\x2c\x20\x30\x2c\x20\x26\x69\x70\x36\x2c\x20\x73\x69\ +\x7a\x65\x6f\x66\x28\x69\x70\x36\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\ +\x6e\x66\x6f\x2d\x3e\x69\x6e\x36\x5f\x73\x72\x63\x20\x3d\x20\x69\x70\x36\x2e\ +\x73\x61\x64\x64\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\ +\x3e\x69\x6e\x36\x5f\x64\x73\x74\x20\x3d\x20\x69\x70\x36\x2e\x64\x61\x64\x64\ +\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\ +\x6f\x6c\x20\x3d\x20\x69\x70\x36\x2e\x6e\x65\x78\x74\x68\x64\x72\x3b\0\x20\x20\ +\x20\x20\x73\x77\x69\x74\x63\x68\x20\x28\x68\x64\x72\x5f\x74\x79\x70\x65\x29\ +\x20\x7b\0\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x69\x70\x76\x36\x5f\x6f\ +\x70\x74\x5f\x68\x64\x72\x20\x65\x78\x74\x5f\x68\x64\x72\x20\x3d\x20\x7b\x7d\ +\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\ +\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\ +\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\ +\x74\x2c\x20\x26\x65\x78\x74\x5f\x68\x64\x72\x2c\0\x20\x20\x20\x20\x20\x20\x20\ +\x20\x69\x66\x20\x28\x2a\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\ +\x3d\x20\x49\x50\x50\x52\x4f\x54\x4f\x5f\x52\x4f\x55\x54\x49\x4e\x47\x29\x20\ +\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\ +\x20\x69\x70\x76\x36\x5f\x72\x74\x5f\x68\x64\x72\x20\x65\x78\x74\x5f\x72\x74\ +\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\ +\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\ +\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\ +\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x65\x78\x74\x5f\x72\x74\ +\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x28\x65\ +\x78\x74\x5f\x72\x74\x2e\x74\x79\x70\x65\x20\x3d\x3d\x20\x49\x50\x56\x36\x5f\ +\x53\x52\x43\x52\x54\x5f\x54\x59\x50\x45\x5f\x32\x29\x20\x26\x26\0\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x2a\x6c\ +\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x6f\x66\x66\x73\x65\x74\x6f\x66\ +\x28\x73\x74\x72\x75\x63\x74\x20\x72\x74\x32\x5f\x68\x64\x72\x2c\x20\x61\x64\ +\x64\x72\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\ +\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\ +\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\ +\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x36\x5f\ +\x65\x78\x74\x5f\x64\x73\x74\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x7d\x20\x5f\x5f\x61\x74\x74\x72\x69\x62\x75\x74\x65\x5f\ +\x5f\x28\x28\x70\x61\x63\x6b\x65\x64\x29\x29\x20\x6f\x70\x74\x20\x3d\x20\x7b\ +\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6f\ +\x70\x74\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x3d\x20\x28\x6f\x70\x74\x2e\x74\ +\x79\x70\x65\x20\x3d\x3d\x20\x49\x50\x56\x36\x5f\x54\x4c\x56\x5f\x50\x41\x44\ +\x31\x29\x20\x3f\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x69\x66\x20\x28\x6f\x70\x74\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x31\ +\x20\x3e\x3d\x20\x65\x78\x74\x5f\x68\x64\x72\x2e\x68\x64\x72\x6c\x65\x6e\x20\ +\x2a\x20\x38\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\ +\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\ +\x6b\x62\x2c\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x6f\x70\ +\x74\x5f\x6f\x66\x66\x73\x65\x74\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x6f\x70\x74\x2e\x74\x79\x70\x65\x20\ +\x3d\x3d\x20\x49\x50\x56\x36\x5f\x54\x4c\x56\x5f\x48\x41\x4f\x29\x20\x7b\0\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x6f\x70\ +\x74\x5f\x6f\x66\x66\x73\x65\x74\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\ +\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\ +\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\ +\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x36\x5f\x65\x78\x74\ +\x5f\x73\x72\x63\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x66\x72\x61\x67\x6d\x65\x6e\x74\ +\x65\x64\x20\x3d\x20\x74\x72\x75\x65\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x2a\ +\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x3d\x20\x28\x65\x78\x74\x5f\x68\ +\x64\x72\x2e\x68\x64\x72\x6c\x65\x6e\x20\x2b\x20\x31\x29\x20\x2a\x20\x38\x3b\0\ +\x20\x20\x20\x20\x20\x20\x20\x20\x2a\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\ +\x6c\x20\x3d\x20\x65\x78\x74\x5f\x68\x64\x72\x2e\x6e\x65\x78\x74\x68\x64\x72\ +\x3b\0\x20\x20\x20\x20\x66\x6f\x72\x20\x28\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\ +\x69\x6e\x74\x20\x69\x20\x3d\x20\x30\x3b\x20\x69\x20\x3c\x20\x49\x50\x36\x5f\ +\x45\x58\x54\x45\x4e\x53\x49\x4f\x4e\x53\x5f\x43\x4f\x55\x4e\x54\x3b\x20\x2b\ +\x2b\x69\x29\x20\x7b\0\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\x66\x20\ +\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\x76\ +\x36\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\ +\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\x76\ +\x36\x5f\x65\x78\x74\x5f\x64\x73\x74\x20\x26\x26\0\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x69\x66\x20\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\ +\x6f\x2e\x69\x73\x5f\x69\x70\x76\x36\x5f\x65\x78\x74\x5f\x73\x72\x63\x20\x26\ +\x26\0\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\x66\x20\ +\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x75\x64\x70\ +\x20\x26\x26\0\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\ +\x66\x20\x28\x63\x6f\x6e\x66\x69\x67\x2d\x3e\x68\x61\x73\x68\x5f\x74\x79\x70\ +\x65\x73\x20\x26\x20\x56\x49\x52\x54\x49\x4f\x5f\x4e\x45\x54\x5f\x52\x53\x53\ +\x5f\x48\x41\x53\x48\x5f\x54\x59\x50\x45\x5f\x49\x50\x76\x34\x29\x20\x7b\0\x20\ +\x20\x20\x20\x5f\x5f\x62\x75\x69\x6c\x74\x69\x6e\x5f\x6d\x65\x6d\x63\x70\x79\ +\x28\x26\x72\x73\x73\x5f\x69\x6e\x70\x75\x74\x5b\x2a\x62\x79\x74\x65\x73\x5f\ +\x77\x72\x69\x74\x74\x65\x6e\x5d\x2c\x20\x70\x74\x72\x2c\x20\x73\x69\x7a\x65\ +\x29\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\ +\x3e\x69\x73\x5f\x75\x64\x70\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x75\x64\x70\x68\x64\x72\x20\ +\x75\x64\x70\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\ +\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\ +\x62\x2c\x20\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x75\x64\x70\x2c\ +\x20\x73\x69\x7a\x65\x6f\x66\x28\x75\x64\x70\x29\x2c\0\x20\x20\x20\x20\x20\x20\ +\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\x66\x20\x28\x63\x6f\x6e\x66\x69\x67\ +\x2d\x3e\x68\x61\x73\x68\x5f\x74\x79\x70\x65\x73\x20\x26\x20\x56\x49\x52\x54\ +\x49\x4f\x5f\x4e\x45\x54\x5f\x52\x53\x53\x5f\x48\x41\x53\x48\x5f\x54\x59\x50\ +\x45\x5f\x49\x50\x76\x36\x29\x20\x7b\0\x20\x20\x20\x20\x66\x6f\x72\x20\x28\x62\ +\x79\x74\x65\x20\x3d\x20\x30\x3b\x20\x62\x79\x74\x65\x20\x3c\x20\x48\x41\x53\ +\x48\x5f\x43\x41\x4c\x43\x55\x4c\x41\x54\x49\x4f\x4e\x5f\x42\x55\x46\x46\x45\ +\x52\x5f\x53\x49\x5a\x45\x3b\x20\x62\x79\x74\x65\x2b\x2b\x29\x20\x7b\0\x20\x20\ +\x20\x20\x5f\x5f\x75\x33\x32\x20\x6c\x65\x66\x74\x6d\x6f\x73\x74\x5f\x33\x32\ +\x5f\x62\x69\x74\x73\x20\x3d\x20\x6b\x65\x79\x2d\x3e\x6c\x65\x66\x74\x6d\x6f\ +\x73\x74\x5f\x33\x32\x5f\x62\x69\x74\x73\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\ +\x5f\x5f\x75\x38\x20\x69\x6e\x70\x75\x74\x5f\x62\x79\x74\x65\x20\x3d\x20\x69\ +\x6e\x70\x75\x74\x5b\x62\x79\x74\x65\x5d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x69\x66\x20\x28\x69\x6e\x70\x75\x74\x5f\x62\x79\x74\x65\x20\ +\x26\x20\x28\x31\x20\x3c\x3c\x20\x37\x29\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\ +\x20\x20\x5f\x5f\x75\x38\x20\x6b\x65\x79\x5f\x62\x79\x74\x65\x20\x3d\x20\x6b\ +\x65\x79\x2d\x3e\x6e\x65\x78\x74\x5f\x62\x79\x74\x65\x5b\x62\x79\x74\x65\x5d\ +\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x28\x6c\x65\x66\x74\x6d\x6f\x73\x74\x5f\x33\x32\x5f\x62\x69\x74\x73\ +\x20\x3c\x3c\x20\x31\x29\x20\x7c\x20\x28\x28\x6b\x65\x79\x5f\x62\x79\x74\x65\ +\x20\x26\x20\x28\x31\x20\x3c\x3c\x20\x37\x29\x29\x20\x3e\x3e\x20\x37\x29\x3b\0\ +\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x68\x61\x73\x68\x29\x20\x7b\0\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x5f\x5f\x75\x33\x32\x20\x74\ +\x61\x62\x6c\x65\x5f\x69\x64\x78\x20\x3d\x20\x68\x61\x73\x68\x20\x25\x20\x63\ +\x6f\x6e\x66\x69\x67\x2d\x3e\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x73\ +\x5f\x6c\x65\x6e\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x71\x75\ +\x65\x75\x65\x20\x3d\x20\x62\x70\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\x6b\x75\ +\x70\x5f\x65\x6c\x65\x6d\x28\x26\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\ +\x5f\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\x62\x6c\x65\x2c\0\ +\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x71\x75\x65\ +\x75\x65\x29\x20\x7b\0\x7d\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x2a\x71\x75\x65\x75\x65\x3b\0\x63\ +\x68\x61\x72\0\x5f\x6c\x69\x63\x65\x6e\x73\x65\0\x2e\x6d\x61\x70\x73\0\x6c\x69\ +\x63\x65\x6e\x73\x65\0\x62\x70\x66\x5f\x66\x6c\x6f\x77\x5f\x6b\x65\x79\x73\0\ +\x62\x70\x66\x5f\x73\x6f\x63\x6b\0\0\0\0\x9f\xeb\x01\0\x20\0\0\0\0\0\0\0\x14\0\ +\0\0\x14\0\0\0\x6c\x0c\0\0\x80\x0c\0\0\0\0\0\0\x08\0\0\0\x26\x02\0\0\x01\0\0\0\ +\0\0\0\0\x24\0\0\0\x10\0\0\0\x26\x02\0\0\xc6\0\0\0\0\0\0\0\x37\x02\0\0\x61\x02\ +\0\0\0\x50\x08\0\x10\0\0\0\x37\x02\0\0\x92\x02\0\0\x0b\x68\x08\0\x20\0\0\0\x37\ +\x02\0\0\0\0\0\0\0\0\0\0\x28\0\0\0\x37\x02\0\0\xa5\x02\0\0\x0e\x74\x08\0\x50\0\ +\0\0\x37\x02\0\0\xea\x02\0\0\x0b\x78\x08\0\x88\0\0\0\x37\x02\0\0\x2a\x03\0\0\ +\x10\x80\x08\0\x90\0\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x98\0\0\0\x37\x02\0\0\x2a\ +\x03\0\0\x10\x80\x08\0\xa0\0\0\0\x37\x02\0\0\x43\x03\0\0\x16\x84\x08\0\xa8\0\0\ +\0\x37\x02\0\0\x43\x03\0\0\x0d\x84\x08\0\xc0\0\0\0\x37\x02\0\0\x64\x03\0\0\x0a\ +\xfc\x05\0\xe8\0\0\0\x37\x02\0\0\x9b\x03\0\0\x1f\x0c\x06\0\x38\x01\0\0\x37\x02\ +\0\0\xcb\x03\0\0\x0f\xa0\x04\0\x40\x01\0\0\x37\x02\0\0\xe4\x03\0\0\x0c\x20\x04\ +\0\x50\x01\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x58\x01\0\0\x37\x02\0\0\xf8\x03\0\0\ +\x0b\x2c\x04\0\x90\x01\0\0\x37\x02\0\0\x3e\x04\0\0\x09\x34\x04\0\xa0\x01\0\0\ +\x37\x02\0\0\x4d\x04\0\0\x0d\x44\x04\0\xb8\x01\0\0\x37\x02\0\0\x4d\x04\0\0\x05\ +\x44\x04\0\xd8\x01\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\xe0\x01\0\0\x37\x02\0\0\x6b\ +\x04\0\0\x0f\x58\x04\0\x10\x02\0\0\x37\x02\0\0\x3e\x04\0\0\x09\x70\x04\0\x18\ +\x02\0\0\x37\x02\0\0\xb5\x04\0\0\x0c\x80\x04\0\x20\x02\0\0\x37\x02\0\0\xc5\x04\ +\0\0\x09\xbc\x04\0\x50\x02\0\0\x37\x02\0\0\xe1\x04\0\0\x17\xd4\x04\0\x60\x02\0\ +\0\x37\x02\0\0\xfc\x04\0\0\x16\xdc\x04\0\x80\x02\0\0\x37\x02\0\0\xe1\x04\0\0\ +\x17\xd4\x04\0\x88\x02\0\0\x37\x02\0\0\x1a\x05\0\0\x0f\xe0\x04\0\xc0\x02\0\0\ +\x37\x02\0\0\x5d\x05\0\0\x0d\xe8\x04\0\xc8\x02\0\0\x37\x02\0\0\x70\x05\0\0\x24\ +\0\x05\0\xd0\x02\0\0\x37\x02\0\0\x70\x05\0\0\x20\0\x05\0\xe0\x02\0\0\x37\x02\0\ +\0\x9d\x05\0\0\x1b\xf8\x04\0\xe8\x02\0\0\x37\x02\0\0\x9d\x05\0\0\x16\xf8\x04\0\ +\xf0\x02\0\0\x37\x02\0\0\xbe\x05\0\0\x1b\xfc\x04\0\xf8\x02\0\0\x37\x02\0\0\xbe\ +\x05\0\0\x16\xfc\x04\0\0\x03\0\0\x37\x02\0\0\xdf\x05\0\0\x1a\x08\x05\0\x08\x03\ +\0\0\x37\x02\0\0\x70\x05\0\0\x1d\0\x05\0\x10\x03\0\0\x37\x02\0\0\x02\x06\0\0\ +\x18\x0c\x05\0\x18\x03\0\0\x37\x02\0\0\x02\x06\0\0\x1c\x0c\x05\0\x30\x03\0\0\ +\x37\x02\0\0\x22\x06\0\0\x15\x68\x05\0\x40\x03\0\0\x37\x02\0\0\x22\x06\0\0\x1a\ +\x68\x05\0\x58\x03\0\0\x37\x02\0\0\x56\x06\0\0\x0d\x6c\x05\0\x78\x03\0\0\x37\ +\x02\0\0\x80\x06\0\0\x1a\x70\x05\0\x88\x03\0\0\x37\x02\0\0\x9e\x06\0\0\x1b\x78\ +\x05\0\xa8\x03\0\0\x37\x02\0\0\x80\x06\0\0\x1a\x70\x05\0\xb0\x03\0\0\x37\x02\0\ +\0\xc2\x06\0\0\x13\x7c\x05\0\xe8\x03\0\0\x37\x02\0\0\x13\x07\0\0\x11\x84\x05\0\ +\xf0\x03\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x10\x04\0\0\x37\x02\0\0\x2a\x07\0\0\ +\x15\x28\x06\0\x18\x04\0\0\x37\x02\0\0\x2a\x07\0\0\x09\x28\x06\0\x20\x04\0\0\ +\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x04\0\0\x37\x02\0\0\x49\x07\0\0\x19\x2c\x06\0\ +\x80\x04\0\0\x37\x02\0\0\x49\x07\0\0\x20\x2c\x06\0\xa0\x04\0\0\x37\x02\0\0\0\0\ +\0\0\0\0\0\0\xf0\x04\0\0\x37\x02\0\0\x6b\x07\0\0\x17\x14\x05\0\0\x05\0\0\x37\ +\x02\0\0\x86\x07\0\0\x18\x1c\x05\0\x30\x05\0\0\x37\x02\0\0\x6b\x07\0\0\x17\x14\ +\x05\0\x48\x05\0\0\x37\x02\0\0\xa7\x07\0\0\x0f\x20\x05\0\x80\x05\0\0\x37\x02\0\ +\0\x5d\x05\0\0\x0d\x28\x05\0\x88\x05\0\0\x37\x02\0\0\xec\x07\0\0\x1d\x38\x05\0\ +\xc8\x05\0\0\x37\x02\0\0\x0f\x08\0\0\x1d\x3c\x05\0\x08\x06\0\0\x37\x02\0\0\x32\ +\x08\0\0\x1b\x44\x05\0\x10\x06\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\x58\ +\x06\0\0\x37\x02\0\0\x6d\x08\0\0\x19\xb8\x02\0\xd0\x06\0\0\x37\x02\0\0\0\0\0\0\ +\0\0\0\0\xd8\x06\0\0\x37\x02\0\0\x93\x08\0\0\x0f\xc8\x02\0\x10\x07\0\0\x37\x02\ +\0\0\x5d\x05\0\0\x0d\xd0\x02\0\x20\x07\0\0\x37\x02\0\0\xd8\x08\0\0\x0d\xe0\x02\ +\0\x40\x07\0\0\x37\x02\0\0\x07\x09\0\0\x20\xe4\x02\0\x68\x07\0\0\x37\x02\0\0\ +\x33\x09\0\0\x13\xec\x02\0\xa8\x07\0\0\x37\x02\0\0\x13\x07\0\0\x11\xf4\x02\0\ +\xb0\x07\0\0\x37\x02\0\0\x7b\x09\0\0\x19\x04\x03\0\xb8\x07\0\0\x37\x02\0\0\x7b\ +\x09\0\0\x34\x04\x03\0\xe0\x07\0\0\x37\x02\0\0\xb1\x09\0\0\x15\x18\x03\0\xf0\ +\x07\0\0\x37\x02\0\0\xf2\x09\0\0\x17\x14\x03\0\x30\x08\0\0\x37\x02\0\0\x29\x0a\ +\0\0\x15\x24\x03\0\x38\x08\0\0\x37\x02\0\0\x44\x0a\0\0\x27\x34\x03\0\x70\x08\0\ +\0\x37\x02\0\0\x6f\x0a\0\0\x27\x50\x03\0\x80\x08\0\0\x37\x02\0\0\x9f\x0a\0\0\ +\x1c\xb4\x03\0\x88\x08\0\0\x37\x02\0\0\xdb\x0a\0\0\x20\xc0\x03\0\x98\x08\0\0\ +\x37\x02\0\0\xdb\x0a\0\0\x2f\xc0\x03\0\xa0\x08\0\0\x37\x02\0\0\xdb\x0a\0\0\x36\ +\xc0\x03\0\xa8\x08\0\0\x37\x02\0\0\xdb\x0a\0\0\x15\xc0\x03\0\x18\x09\0\0\x37\ +\x02\0\0\x17\x0b\0\0\x43\x64\x03\0\x38\x09\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x40\ +\x09\0\0\x37\x02\0\0\x17\x0b\0\0\x17\x64\x03\0\x80\x09\0\0\x37\x02\0\0\x29\x0a\ +\0\0\x15\x6c\x03\0\x88\x09\0\0\x37\x02\0\0\x67\x0b\0\0\x19\x7c\x03\0\x90\x09\0\ +\0\x37\x02\0\0\x67\x0b\0\0\x15\x7c\x03\0\x98\x09\0\0\x37\x02\0\0\x97\x0b\0\0\ +\x19\x84\x03\0\xa0\x09\0\0\x37\x02\0\0\xc7\x0b\0\0\x1b\x80\x03\0\xe8\x09\0\0\ +\x37\x02\0\0\x02\x0c\0\0\x19\x94\x03\0\xf0\x09\0\0\x37\x02\0\0\x21\x0c\0\0\x2b\ +\xa4\x03\0\x10\x0a\0\0\x37\x02\0\0\x9f\x0a\0\0\x1f\xb4\x03\0\x30\x0a\0\0\x37\ +\x02\0\0\x50\x0c\0\0\x21\xd4\x03\0\x40\x0a\0\0\x37\x02\0\0\x78\x0c\0\0\x20\xe4\ +\x03\0\x48\x0a\0\0\x37\x02\0\0\x78\x0c\0\0\x2c\xe4\x03\0\x60\x0a\0\0\x37\x02\0\ +\0\x78\x0c\0\0\x14\xe4\x03\0\x70\x0a\0\0\x37\x02\0\0\xa8\x0c\0\0\x20\xe0\x03\0\ +\x80\x0a\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\xb0\x0a\0\0\x37\x02\0\0\xd0\ +\x0c\0\0\x38\xc0\x02\0\xd0\x0a\0\0\x37\x02\0\0\xd0\x0c\0\0\x05\xc0\x02\0\xe8\ +\x0a\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\xf8\x0a\0\0\x37\x02\0\0\x0e\x0d\ +\0\0\x1c\xc4\x06\0\x08\x0b\0\0\x37\x02\0\0\x0e\x0d\0\0\x10\xc4\x06\0\x10\x0b\0\ +\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x60\x0b\0\0\x37\x02\0\0\x49\x07\0\0\x19\xc8\x06\ +\0\x68\x0b\0\0\x37\x02\0\0\x49\x07\0\0\x20\xc8\x06\0\xa0\x0b\0\0\x37\x02\0\0\ +\x34\x0d\0\0\x2d\0\x07\0\xb0\x0b\0\0\x37\x02\0\0\x34\x0d\0\0\x1d\0\x07\0\xb8\ +\x0b\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\0\x07\0\xc8\x0b\0\0\x37\x02\0\0\x63\x0d\0\ +\0\x2d\xd4\x06\0\xf8\x0b\0\0\x37\x02\0\0\x63\x0d\0\0\x1d\xd4\x06\0\x08\x0c\0\0\ +\x37\x02\0\0\x63\x0d\0\0\x2d\xd4\x06\0\x18\x0c\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\ +\xe8\x0c\0\0\x37\x02\0\0\x92\x0d\0\0\x20\x68\x06\0\xf0\x0c\0\0\x37\x02\0\0\x92\ +\x0d\0\0\x27\x68\x06\0\x18\x0d\0\0\x37\x02\0\0\xbb\x0d\0\0\x27\xa4\x06\0\x20\ +\x0d\0\0\x37\x02\0\0\xbb\x0d\0\0\x14\xa4\x06\0\x28\x0d\0\0\x37\x02\0\0\x04\x0e\ +\0\0\x05\x98\x01\0\x38\x0d\0\0\x37\x02\0\0\x04\x0e\0\0\x05\x98\x01\0\x60\x0d\0\ +\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x0d\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x80\x0d\ +\0\0\x37\x02\0\0\x92\x0d\0\0\x20\x44\x07\0\x88\x0d\0\0\x37\x02\0\0\x92\x0d\0\0\ +\x27\x44\x07\0\xc0\x0d\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\x7c\x07\0\xd0\x0d\0\0\ +\x37\x02\0\0\x34\x0d\0\0\x1d\x7c\x07\0\xd8\x0d\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\ +\x7c\x07\0\xe8\x0d\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\x50\x07\0\x18\x0e\0\0\x37\ +\x02\0\0\x63\x0d\0\0\x1d\x50\x07\0\x28\x0e\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\x50\ +\x07\0\x40\x0e\0\0\x37\x02\0\0\x41\x0e\0\0\x1a\xa0\x05\0\x50\x0e\0\0\x37\x02\0\ +\0\x5f\x0e\0\0\x1b\xa8\x05\0\x60\x0e\0\0\x37\x02\0\0\x41\x0e\0\0\x1a\xa0\x05\0\ +\x68\x0e\0\0\x37\x02\0\0\x83\x0e\0\0\x13\xac\x05\0\xa0\x0e\0\0\x37\x02\0\0\x13\ +\x07\0\0\x11\xb4\x05\0\xb0\x0e\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\xc0\ +\x0e\0\0\x37\x02\0\0\xd4\x0e\0\0\x27\xc8\x07\0\xd0\x0e\0\0\x37\x02\0\0\xd4\x0e\ +\0\0\x14\xc8\x07\0\xf0\x0e\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\xcc\x07\0\0\x0f\0\0\ +\x37\x02\0\0\x63\x0d\0\0\x1d\xcc\x07\0\x08\x0f\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\ +\xcc\x07\0\x30\x0f\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x80\x0f\0\0\x37\x02\0\0\x34\ +\x0d\0\0\x1d\xf8\x07\0\x88\x0f\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\xf8\x07\0\x98\ +\x0f\0\0\x37\x02\0\0\x04\x0e\0\0\x05\x98\x01\0\xf0\x0f\0\0\x37\x02\0\0\x04\x0e\ +\0\0\x05\x98\x01\0\x30\x10\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x48\x10\0\0\x37\x02\ +\0\0\x1d\x0f\0\0\x05\xd0\x01\0\x50\x10\0\0\x37\x02\0\0\x5f\x0f\0\0\x23\xc4\x01\ +\0\x68\x10\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x10\0\0\x37\x02\0\0\x93\x0f\0\0\ +\x1b\xd4\x01\0\x90\x10\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\xa8\x10\0\0\ +\x37\x02\0\0\xe3\x0f\0\0\x19\xd8\x01\0\xc0\x10\0\0\x37\x02\0\0\x11\x10\0\0\x27\ +\xfc\x01\0\xc8\x10\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\xd8\x10\0\0\x37\ +\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\xe0\x10\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\ +\x01\0\x08\x11\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\x20\x11\0\0\x37\x02\0\ +\0\x11\x10\0\0\x27\xfc\x01\0\x28\x11\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\ +\x30\x11\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\x58\x11\0\0\x37\x02\0\0\x11\ +\x10\0\0\x27\xfc\x01\0\x60\x11\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\x78\ +\x11\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\x80\x11\0\0\x37\x02\0\0\xba\x0f\ +\0\0\x11\xe8\x01\0\xa8\x11\0\0\x37\x02\0\0\x11\x10\0\0\x27\xfc\x01\0\xb0\x11\0\ +\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\xc8\x11\0\0\x37\x02\0\0\x11\x10\0\0\ +\x2d\xfc\x01\0\xd0\x11\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\xf8\x11\0\0\ +\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\x10\x12\0\0\x37\x02\0\0\x11\x10\0\0\x27\ +\xfc\x01\0\x18\x12\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\x20\x12\0\0\x37\ +\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\x48\x12\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\ +\x01\0\x60\x12\0\0\x37\x02\0\0\x11\x10\0\0\x27\xfc\x01\0\x68\x12\0\0\x37\x02\0\ +\0\x11\x10\0\0\x2d\xfc\x01\0\x70\x12\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\ +\x98\x12\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\xb0\x12\0\0\x37\x02\0\0\x11\ +\x10\0\0\x27\xfc\x01\0\xb8\x12\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\xc0\ +\x12\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\xe0\x12\0\0\x37\x02\0\0\x11\x10\ +\0\0\x46\xfc\x01\0\xe8\x12\0\0\x37\x02\0\0\x11\x10\0\0\x27\xfc\x01\0\xf0\x12\0\ +\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\xf8\x12\0\0\x37\x02\0\0\x1d\x0f\0\0\ +\x3d\xd0\x01\0\x08\x13\0\0\x37\x02\0\0\x1d\x0f\0\0\x05\xd0\x01\0\x18\x13\0\0\ +\x37\x02\0\0\x5d\x10\0\0\x0d\x98\x08\0\x30\x13\0\0\x37\x02\0\0\x5d\x10\0\0\x0d\ +\x98\x08\0\x38\x13\0\0\x37\x02\0\0\x71\x10\0\0\x2e\x9c\x08\0\x58\x13\0\0\x37\ +\x02\0\0\x71\x10\0\0\x24\x9c\x08\0\x70\x13\0\0\x37\x02\0\0\x71\x10\0\0\x13\x9c\ +\x08\0\x80\x13\0\0\x37\x02\0\0\x71\x10\0\0\x2e\x9c\x08\0\x88\x13\0\0\x37\x02\0\ +\0\xb0\x10\0\0\x15\xa8\x08\0\xa0\x13\0\0\x37\x02\0\0\xf8\x10\0\0\x11\xb4\x08\0\ +\xa8\x13\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\xc8\x13\0\0\x37\x02\0\0\x11\x11\0\0\ +\x01\xd8\x08\0\xd0\x13\0\0\x37\x02\0\0\x13\x11\0\0\x18\xb8\x08\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\xde\0\0\0\0\0\x03\0\xc8\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x7a\x01\0\0\0\ +\0\x03\0\xb8\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xff\0\0\0\0\0\x03\0\xa8\x13\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\xc7\0\0\0\0\0\x03\0\xd0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\x2c\x02\0\0\0\0\x03\0\x20\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf7\0\0\0\0\0\x03\0\ +\xe8\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1c\x02\0\0\0\0\x03\0\x10\x04\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\x28\x01\0\0\0\0\x03\0\xe0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf3\ +\x01\0\0\0\0\x03\0\x30\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xeb\x01\0\0\0\0\x03\0\ +\x38\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x44\x02\0\0\0\0\x03\0\xf0\x03\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\xe3\x01\0\0\0\0\x03\0\xf8\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x18\ +\x01\0\0\0\0\x03\0\xe8\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x30\x01\0\0\0\0\x03\0\ +\xa0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa9\x01\0\0\0\0\x03\0\x40\x10\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\x51\x01\0\0\0\0\x03\0\x78\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x5c\ +\x02\0\0\0\0\x03\0\xb0\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\x02\0\0\0\0\x03\0\ +\x50\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc2\x01\0\0\0\0\x03\0\xc0\x06\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\x69\x01\0\0\0\0\x03\0\x20\x07\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\ +\x01\0\0\0\0\x03\0\x60\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x38\x01\0\0\0\0\x03\0\ +\x30\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x20\x01\0\0\0\0\x03\0\x40\x0a\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\xba\x01\0\0\0\0\x03\0\xe0\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa1\ +\x01\0\0\0\0\x03\0\x48\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\x01\0\0\0\0\x03\0\ +\x18\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xfb\x01\0\0\0\0\x03\0\x80\x08\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\x99\x01\0\0\0\0\x03\0\xf8\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x59\ +\x01\0\0\0\0\x03\0\x50\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x54\x02\0\0\0\0\x03\0\ +\x08\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xef\0\0\0\0\0\x03\0\xe8\x0a\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\x4c\x02\0\0\0\0\x03\0\xb0\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x24\ +\x02\0\0\0\0\x03\0\xd8\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x89\x01\0\0\0\0\x03\0\ +\x80\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x10\x01\0\0\0\0\x03\0\xb0\x0b\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\xd6\0\0\0\0\0\x03\0\xc8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x14\ +\x02\0\0\0\0\x03\0\xf8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb2\x01\0\0\0\0\x03\0\ +\x18\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xdb\x01\0\0\0\0\x03\0\x10\x0c\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\x3c\x02\0\0\0\0\x03\0\x18\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x91\ +\x01\0\0\0\0\x03\0\x60\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81\x01\0\0\0\0\x03\0\ +\xc0\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xe7\0\0\0\0\0\x03\0\xd0\x0d\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\x34\x02\0\0\0\0\x03\0\xe8\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd3\ +\x01\0\0\0\0\x03\0\x18\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\x01\0\0\0\0\x03\0\0\ +\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xce\0\0\0\0\0\x03\0\x18\x0f\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\x0b\x02\0\0\0\0\x03\0\xf0\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xca\x01\0\ +\0\0\0\x03\0\x30\x10\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x71\x01\0\0\0\0\x03\0\x60\x10\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x48\x01\0\0\0\0\x03\0\x18\x13\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\x64\x02\0\0\0\0\x03\0\xd0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x4e\0\0\0\ +\x12\0\x03\0\0\0\0\0\0\0\0\0\xe0\x13\0\0\0\0\0\0\x33\0\0\0\x11\0\x05\0\0\0\0\0\ +\0\0\0\0\x20\0\0\0\0\0\0\0\x01\0\0\0\x11\0\x05\0\x20\0\0\0\0\0\0\0\x20\0\0\0\0\ +\0\0\0\x90\0\0\0\x11\0\x05\0\x40\0\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\x87\0\0\0\x11\ +\0\x06\0\0\0\0\0\0\0\0\0\x07\0\0\0\0\0\0\0\x28\0\0\0\0\0\0\0\x01\0\0\0\x37\0\0\ +\0\x50\0\0\0\0\0\0\0\x01\0\0\0\x38\0\0\0\x88\x13\0\0\0\0\0\0\x01\0\0\0\x39\0\0\ +\0\xd8\x04\0\0\0\0\0\0\x04\0\0\0\x37\0\0\0\xe4\x04\0\0\0\0\0\0\x04\0\0\0\x38\0\ +\0\0\xf0\x04\0\0\0\0\0\0\x04\0\0\0\x39\0\0\0\x08\x05\0\0\0\0\0\0\x04\0\0\0\x3a\ +\0\0\0\x2c\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\0\0\0\0\0\0\0\x04\0\0\0\x01\0\ +\0\0\x50\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\ +\0\x70\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\x90\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\xb0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\xd0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\xf0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\x10\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\ +\0\x30\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\ +\0\0\x50\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x01\0\0\0\0\0\0\x04\0\0\0\x01\ +\0\0\0\x70\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x01\0\0\0\0\0\0\x04\0\0\0\ +\x01\0\0\0\x90\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x01\0\0\0\0\0\0\x04\0\0\ +\0\x01\0\0\0\xb0\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x01\0\0\0\0\0\0\x04\0\ +\0\0\x01\0\0\0\xd0\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x01\0\0\0\0\0\0\x04\ +\0\0\0\x01\0\0\0\xf0\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x02\0\0\0\0\0\0\x04\ +\0\0\0\x01\0\0\0\x10\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x02\0\0\0\0\0\0\ +\x04\0\0\0\x01\0\0\0\x30\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x02\0\0\0\0\0\ +\0\x04\0\0\0\x01\0\0\0\x50\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x02\0\0\0\0\ +\0\0\x04\0\0\0\x01\0\0\0\x70\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x02\0\0\0\ +\0\0\0\x04\0\0\0\x01\0\0\0\x90\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x02\0\0\ +\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x02\0\ +\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x02\ +\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x03\ +\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\ +\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\x40\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\ +\0\x60\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\ +\0\0\x80\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x03\0\0\0\0\0\0\x04\0\0\0\x01\ +\0\0\0\xa0\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x03\0\0\0\0\0\0\x04\0\0\0\ +\x01\0\0\0\xc0\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x03\0\0\0\0\0\0\x04\0\0\ +\0\x01\0\0\0\xe0\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x03\0\0\0\0\0\0\x04\0\ +\0\0\x01\0\0\0\0\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x04\0\0\0\0\0\0\x04\0\ +\0\0\x01\0\0\0\x20\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x04\0\0\0\0\0\0\x04\ +\0\0\0\x01\0\0\0\x40\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x04\0\0\0\0\0\0\ +\x04\0\0\0\x01\0\0\0\x60\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x04\0\0\0\0\0\ +\0\x04\0\0\0\x01\0\0\0\x80\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x04\0\0\0\0\ +\0\0\x04\0\0\0\x01\0\0\0\xa0\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x04\0\0\0\ +\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x04\0\0\ +\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x04\0\ +\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x05\0\ +\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x05\ +\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\ +\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\x70\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\ +\0\x90\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\ +\0\0\xb0\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x05\0\0\0\0\0\0\x04\0\0\0\x01\ +\0\0\0\xd0\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x05\0\0\0\0\0\0\x04\0\0\0\ +\x01\0\0\0\xf0\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x06\0\0\0\0\0\0\x04\0\0\0\ +\x01\0\0\0\x10\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x06\0\0\0\0\0\0\x04\0\0\ +\0\x01\0\0\0\x30\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x06\0\0\0\0\0\0\x04\0\ +\0\0\x01\0\0\0\x50\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x06\0\0\0\0\0\0\x04\ +\0\0\0\x01\0\0\0\x70\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x06\0\0\0\0\0\0\ +\x04\0\0\0\x01\0\0\0\x90\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x06\0\0\0\0\0\ +\0\x04\0\0\0\x01\0\0\0\xb0\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x06\0\0\0\0\ +\0\0\x04\0\0\0\x01\0\0\0\xd0\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x06\0\0\0\ +\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x07\0\0\0\ +\0\0\0\x04\0\0\0\x01\0\0\0\x10\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x07\0\0\ +\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x07\0\ +\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x07\ +\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\ +\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\xa0\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\ +\0\xc0\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\ +\0\0\xe0\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x07\0\0\0\0\0\0\x04\0\0\0\x01\ +\0\0\0\0\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x08\0\0\0\0\0\0\x04\0\0\0\x01\ +\0\0\0\x20\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x08\0\0\0\0\0\0\x04\0\0\0\ +\x01\0\0\0\x40\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x08\0\0\0\0\0\0\x04\0\0\ +\0\x01\0\0\0\x60\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x08\0\0\0\0\0\0\x04\0\ +\0\0\x01\0\0\0\x80\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x08\0\0\0\0\0\0\x04\ +\0\0\0\x01\0\0\0\xa0\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x08\0\0\0\0\0\0\ +\x04\0\0\0\x01\0\0\0\xc0\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x08\0\0\0\0\0\ +\0\x04\0\0\0\x01\0\0\0\xe0\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x08\0\0\0\0\ +\0\0\x04\0\0\0\x01\0\0\0\0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x09\0\0\0\0\ +\0\0\x04\0\0\0\x01\0\0\0\x20\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x09\0\0\0\ +\0\0\0\x04\0\0\0\x01\0\0\0\x40\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x09\0\0\ +\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x09\0\ +\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x09\ +\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\ +\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\xd0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\ +\0\xf0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\ +\0\x10\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\ +\0\0\x30\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x0a\0\0\0\0\0\0\x04\0\0\0\x01\ +\0\0\0\x50\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x0a\0\0\0\0\0\0\x04\0\0\0\ +\x01\0\0\0\x70\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x0a\0\0\0\0\0\0\x04\0\0\ +\0\x01\0\0\0\x90\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x0a\0\0\0\0\0\0\x04\0\ +\0\0\x01\0\0\0\xb0\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x0a\0\0\0\0\0\0\x04\ +\0\0\0\x01\0\0\0\xd0\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x0a\0\0\0\0\0\0\ +\x04\0\0\0\x01\0\0\0\xf0\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x0b\0\0\0\0\0\0\ +\x04\0\0\0\x01\0\0\0\x10\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x0b\0\0\0\0\0\ +\0\x04\0\0\0\x01\0\0\0\x30\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x0b\0\0\0\0\ +\0\0\x04\0\0\0\x01\0\0\0\x50\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x0b\0\0\0\ +\0\0\0\x04\0\0\0\x01\0\0\0\x70\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x0b\0\0\ +\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x0b\0\ +\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x0b\ +\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\ +\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\ +\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ +\x20\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\ +\0\x40\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\ +\0\0\x60\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x0c\0\0\0\0\0\0\x04\0\0\0\x01\ +\0\0\0\x80\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x0c\0\0\0\0\0\0\x04\0\0\0\ +\x01\0\0\0\x40\x41\x42\x43\x44\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\ +\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\x5f\x6b\x65\x79\0\x2e\x74\x65\x78\x74\0\ +\x2e\x72\x65\x6c\x2e\x42\x54\x46\x2e\x65\x78\x74\0\x2e\x6d\x61\x70\x73\0\x74\ +\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\x75\x72\ +\x61\x74\x69\x6f\x6e\x73\0\x74\x75\x6e\x5f\x72\x73\x73\x5f\x73\x74\x65\x65\x72\ +\x69\x6e\x67\x5f\x70\x72\x6f\x67\0\x2e\x72\x65\x6c\x74\x75\x6e\x5f\x72\x73\x73\ +\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\0\x2e\x6c\x6c\x76\x6d\x5f\x61\x64\x64\x72\ +\x73\x69\x67\0\x5f\x6c\x69\x63\x65\x6e\x73\x65\0\x74\x61\x70\x5f\x72\x73\x73\ +\x5f\x6d\x61\x70\x5f\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\ +\x62\x6c\x65\0\x2e\x73\x74\x72\x74\x61\x62\0\x2e\x73\x79\x6d\x74\x61\x62\0\x2e\ +\x72\x65\x6c\x2e\x42\x54\x46\0\x4c\x42\x42\x30\x5f\x39\0\x4c\x42\x42\x30\x5f\ +\x39\x39\0\x4c\x42\x42\x30\x5f\x37\x39\0\x4c\x42\x42\x30\x5f\x31\x30\x39\0\x4c\ +\x42\x42\x30\x5f\x38\x38\0\x4c\x42\x42\x30\x5f\x34\x38\0\x4c\x42\x42\x30\x5f\ +\x31\x38\0\x4c\x42\x42\x30\x5f\x31\x30\x38\0\x4c\x42\x42\x30\x5f\x39\x37\0\x4c\ +\x42\x42\x30\x5f\x37\x37\0\x4c\x42\x42\x30\x5f\x36\x37\0\x4c\x42\x42\x30\x5f\ +\x34\x37\0\x4c\x42\x42\x30\x5f\x31\x37\0\x4c\x42\x42\x30\x5f\x36\x36\0\x4c\x42\ +\x42\x30\x5f\x34\x36\0\x4c\x42\x42\x30\x5f\x33\x36\0\x4c\x42\x42\x30\x5f\x31\ +\x30\x36\0\x4c\x42\x42\x30\x5f\x35\x35\0\x4c\x42\x42\x30\x5f\x34\x35\0\x4c\x42\ +\x42\x30\x5f\x33\x35\0\x4c\x42\x42\x30\x5f\x32\x35\0\x4c\x42\x42\x30\x5f\x31\ +\x30\x35\0\x4c\x42\x42\x30\x5f\x34\0\x4c\x42\x42\x30\x5f\x39\x34\0\x4c\x42\x42\ +\x30\x5f\x38\x34\0\x4c\x42\x42\x30\x5f\x35\x34\0\x4c\x42\x42\x30\x5f\x34\x34\0\ +\x4c\x42\x42\x30\x5f\x33\x34\0\x4c\x42\x42\x30\x5f\x31\x30\x34\0\x4c\x42\x42\ +\x30\x5f\x38\x33\0\x4c\x42\x42\x30\x5f\x35\x33\0\x4c\x42\x42\x30\x5f\x32\x33\0\ +\x4c\x42\x42\x30\x5f\x31\x30\x33\0\x4c\x42\x42\x30\x5f\x39\x32\0\x4c\x42\x42\ +\x30\x5f\x38\x32\0\x4c\x42\x42\x30\x5f\x37\x32\0\x4c\x42\x42\x30\x5f\x36\x32\0\ +\x4c\x42\x42\x30\x5f\x35\x32\0\x4c\x42\x42\x30\x5f\x34\x32\0\x4c\x42\x42\x30\ +\x5f\x32\x32\0\x4c\x42\x42\x30\x5f\x31\x30\x32\0\x4c\x42\x42\x30\x5f\x38\x31\0\ +\x4c\x42\x42\x30\x5f\x36\x31\0\x4c\x42\x42\x30\x5f\x35\x31\0\x4c\x42\x42\x30\ +\x5f\x31\x31\0\x4c\x42\x42\x30\x5f\x39\x30\0\x4c\x42\x42\x30\x5f\x37\x30\0\x4c\ +\x42\x42\x30\x5f\x36\x30\0\x4c\x42\x42\x30\x5f\x35\x30\0\x4c\x42\x42\x30\x5f\ +\x34\x30\0\x4c\x42\x42\x30\x5f\x32\x30\0\x4c\x42\x42\x30\x5f\x31\x31\x30\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xae\0\0\0\x03\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x25\x4a\0\0\0\0\0\0\x6d\x02\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1a\0\0\0\x01\0\0\0\x06\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\x40\0\0\0\0\0\0\0\xe0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\x64\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\x3d\0\0\ +\0\0\0\0\x30\0\0\0\0\0\0\0\x0c\0\0\0\x03\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\ +\0\0\x2d\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x20\x14\0\0\0\0\0\0\ +\x60\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x88\0\0\0\ +\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x80\x14\0\0\0\0\0\0\x07\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc2\0\0\0\x01\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x88\x14\0\0\0\0\0\0\x8d\x16\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xbe\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\x70\x3d\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\x0c\0\0\0\x07\0\0\0\x08\ +\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x24\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\x18\x2b\0\0\0\0\0\0\xa0\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\x20\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb0\ +\x3d\0\0\0\0\0\0\x70\x0c\0\0\0\0\0\0\x0c\0\0\0\x09\0\0\0\x08\0\0\0\0\0\0\0\x10\ +\0\0\0\0\0\0\0\x79\0\0\0\x03\x4c\xff\x6f\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\0\x20\ +\x4a\0\0\0\0\0\0\x05\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\xb6\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb8\x37\0\0\0\0\0\0\ +\x88\x05\0\0\0\0\0\0\x01\0\0\0\x36\0\0\0\x08\0\0\0\0\0\0\0\x18\0\0\0\0\0\0\0"; +} + +#ifdef __cplusplus +struct rss_bpf *rss_bpf::open(const struct bpf_object_open_opts *opts) { return rss_bpf__open_opts(opts); } +struct rss_bpf *rss_bpf::open_and_load() { return rss_bpf__open_and_load(); } +int rss_bpf::load(struct rss_bpf *skel) { return rss_bpf__load(skel); } +int rss_bpf::attach(struct rss_bpf *skel) { return rss_bpf__attach(skel); } +void rss_bpf::detach(struct rss_bpf *skel) { rss_bpf__detach(skel); } +void rss_bpf::destroy(struct rss_bpf *skel) { rss_bpf__destroy(skel); } +const void *rss_bpf::elf_bytes(size_t *sz) { return rss_bpf__elf_bytes(sz); } +#endif /* __cplusplus */ + +__attribute__((unused)) static void +rss_bpf__assert(struct rss_bpf *s __attribute__((unused))) +{ +#ifdef __cplusplus +#define _Static_assert static_assert +#endif +#ifdef __cplusplus +#undef _Static_assert +#endif } #endif /* __RSS_BPF_SKEL_H__ */ diff --git a/fpu/softfloat.c b/fpu/softfloat.c index c7454c3eb1..108f9cb224 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -5135,7 +5135,7 @@ float32 float32_exp2(float32 a, float_status *status) float64_unpack_canonical(&rp, float64_one, status); for (i = 0 ; i < 15 ; i++) { float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status); - rp = *parts_muladd(&tp, &xp, &rp, 0, status); + rp = *parts_muladd(&tp, &xnp, &rp, 0, status); xnp = *parts_mul(&xnp, &xp, status); } diff --git a/gdb-xml/aarch64-pauth.xml b/gdb-xml/aarch64-pauth.xml new file mode 100644 index 0000000000..0a5c566d66 --- /dev/null +++ b/gdb-xml/aarch64-pauth.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + diff --git a/gdb-xml/hexagon-core.xml b/gdb-xml/hexagon-core.xml new file mode 100644 index 0000000000..e181163cff --- /dev/null +++ b/gdb-xml/hexagon-core.xml @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gdb-xml/hexagon-hvx.xml b/gdb-xml/hexagon-hvx.xml new file mode 100644 index 0000000000..5f2e220733 --- /dev/null +++ b/gdb-xml/hexagon-hvx.xml @@ -0,0 +1,96 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gdb-xml/s390-virt-kvm.xml b/gdb-xml/s390-virt-kvm.xml new file mode 100644 index 0000000000..a256eddaf5 --- /dev/null +++ b/gdb-xml/s390-virt-kvm.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/gdb-xml/s390-virt.xml b/gdb-xml/s390-virt.xml index e2e9a7ad3c..438eb68aab 100644 --- a/gdb-xml/s390-virt.xml +++ b/gdb-xml/s390-virt.xml @@ -11,8 +11,4 @@ - - - - diff --git a/gdbstub/gdbstub.c b/gdbstub/gdbstub.c index 42f8717ca5..2487a3d642 100644 --- a/gdbstub/gdbstub.c +++ b/gdbstub/gdbstub.c @@ -24,299 +24,27 @@ */ #include "qemu/osdep.h" -#include "qapi/error.h" -#include "qemu/error-report.h" #include "qemu/ctype.h" #include "qemu/cutils.h" #include "qemu/module.h" +#include "qemu/error-report.h" #include "trace.h" #include "exec/gdbstub.h" +#include "gdbstub/syscalls.h" #ifdef CONFIG_USER_ONLY -#include "qemu.h" +#include "gdbstub/user.h" #else -#include "monitor/monitor.h" -#include "chardev/char.h" -#include "chardev/char-fe.h" #include "hw/cpu/cluster.h" #include "hw/boards.h" #endif -#define MAX_PACKET_LENGTH 4096 - -#include "qemu/sockets.h" #include "sysemu/hw_accel.h" #include "sysemu/runstate.h" -#include "semihosting/semihost.h" -#include "exec/exec-all.h" +#include "exec/replay-core.h" #include "exec/hwaddr.h" -#include "sysemu/replay.h" #include "internals.h" -#ifdef CONFIG_USER_ONLY -#define GDB_ATTACHED "0" -#else -#define GDB_ATTACHED "1" -#endif - -#ifndef CONFIG_USER_ONLY -static int phy_memory_mode; -#endif - -static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr, - uint8_t *buf, int len, bool is_write) -{ - CPUClass *cc; - -#ifndef CONFIG_USER_ONLY - if (phy_memory_mode) { - if (is_write) { - cpu_physical_memory_write(addr, buf, len); - } else { - cpu_physical_memory_read(addr, buf, len); - } - return 0; - } -#endif - - cc = CPU_GET_CLASS(cpu); - if (cc->memory_rw_debug) { - return cc->memory_rw_debug(cpu, addr, buf, len, is_write); - } - return cpu_memory_rw_debug(cpu, addr, buf, len, is_write); -} - -/* Return the GDB index for a given vCPU state. - * - * For user mode this is simply the thread id. In system mode GDB - * numbers CPUs from 1 as 0 is reserved as an "any cpu" index. - */ -static inline int cpu_gdb_index(CPUState *cpu) -{ -#if defined(CONFIG_USER_ONLY) - TaskState *ts = (TaskState *) cpu->opaque; - return ts ? ts->ts_tid : -1; -#else - return cpu->cpu_index + 1; -#endif -} - -enum { - GDB_SIGNAL_0 = 0, - GDB_SIGNAL_INT = 2, - GDB_SIGNAL_QUIT = 3, - GDB_SIGNAL_TRAP = 5, - GDB_SIGNAL_ABRT = 6, - GDB_SIGNAL_ALRM = 14, - GDB_SIGNAL_IO = 23, - GDB_SIGNAL_XCPU = 24, - GDB_SIGNAL_UNKNOWN = 143 -}; - -#ifdef CONFIG_USER_ONLY - -/* Map target signal numbers to GDB protocol signal numbers and vice - * versa. For user emulation's currently supported systems, we can - * assume most signals are defined. - */ - -static int gdb_signal_table[] = { - 0, - TARGET_SIGHUP, - TARGET_SIGINT, - TARGET_SIGQUIT, - TARGET_SIGILL, - TARGET_SIGTRAP, - TARGET_SIGABRT, - -1, /* SIGEMT */ - TARGET_SIGFPE, - TARGET_SIGKILL, - TARGET_SIGBUS, - TARGET_SIGSEGV, - TARGET_SIGSYS, - TARGET_SIGPIPE, - TARGET_SIGALRM, - TARGET_SIGTERM, - TARGET_SIGURG, - TARGET_SIGSTOP, - TARGET_SIGTSTP, - TARGET_SIGCONT, - TARGET_SIGCHLD, - TARGET_SIGTTIN, - TARGET_SIGTTOU, - TARGET_SIGIO, - TARGET_SIGXCPU, - TARGET_SIGXFSZ, - TARGET_SIGVTALRM, - TARGET_SIGPROF, - TARGET_SIGWINCH, - -1, /* SIGLOST */ - TARGET_SIGUSR1, - TARGET_SIGUSR2, -#ifdef TARGET_SIGPWR - TARGET_SIGPWR, -#else - -1, -#endif - -1, /* SIGPOLL */ - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, -#ifdef __SIGRTMIN - __SIGRTMIN + 1, - __SIGRTMIN + 2, - __SIGRTMIN + 3, - __SIGRTMIN + 4, - __SIGRTMIN + 5, - __SIGRTMIN + 6, - __SIGRTMIN + 7, - __SIGRTMIN + 8, - __SIGRTMIN + 9, - __SIGRTMIN + 10, - __SIGRTMIN + 11, - __SIGRTMIN + 12, - __SIGRTMIN + 13, - __SIGRTMIN + 14, - __SIGRTMIN + 15, - __SIGRTMIN + 16, - __SIGRTMIN + 17, - __SIGRTMIN + 18, - __SIGRTMIN + 19, - __SIGRTMIN + 20, - __SIGRTMIN + 21, - __SIGRTMIN + 22, - __SIGRTMIN + 23, - __SIGRTMIN + 24, - __SIGRTMIN + 25, - __SIGRTMIN + 26, - __SIGRTMIN + 27, - __SIGRTMIN + 28, - __SIGRTMIN + 29, - __SIGRTMIN + 30, - __SIGRTMIN + 31, - -1, /* SIGCANCEL */ - __SIGRTMIN, - __SIGRTMIN + 32, - __SIGRTMIN + 33, - __SIGRTMIN + 34, - __SIGRTMIN + 35, - __SIGRTMIN + 36, - __SIGRTMIN + 37, - __SIGRTMIN + 38, - __SIGRTMIN + 39, - __SIGRTMIN + 40, - __SIGRTMIN + 41, - __SIGRTMIN + 42, - __SIGRTMIN + 43, - __SIGRTMIN + 44, - __SIGRTMIN + 45, - __SIGRTMIN + 46, - __SIGRTMIN + 47, - __SIGRTMIN + 48, - __SIGRTMIN + 49, - __SIGRTMIN + 50, - __SIGRTMIN + 51, - __SIGRTMIN + 52, - __SIGRTMIN + 53, - __SIGRTMIN + 54, - __SIGRTMIN + 55, - __SIGRTMIN + 56, - __SIGRTMIN + 57, - __SIGRTMIN + 58, - __SIGRTMIN + 59, - __SIGRTMIN + 60, - __SIGRTMIN + 61, - __SIGRTMIN + 62, - __SIGRTMIN + 63, - __SIGRTMIN + 64, - __SIGRTMIN + 65, - __SIGRTMIN + 66, - __SIGRTMIN + 67, - __SIGRTMIN + 68, - __SIGRTMIN + 69, - __SIGRTMIN + 70, - __SIGRTMIN + 71, - __SIGRTMIN + 72, - __SIGRTMIN + 73, - __SIGRTMIN + 74, - __SIGRTMIN + 75, - __SIGRTMIN + 76, - __SIGRTMIN + 77, - __SIGRTMIN + 78, - __SIGRTMIN + 79, - __SIGRTMIN + 80, - __SIGRTMIN + 81, - __SIGRTMIN + 82, - __SIGRTMIN + 83, - __SIGRTMIN + 84, - __SIGRTMIN + 85, - __SIGRTMIN + 86, - __SIGRTMIN + 87, - __SIGRTMIN + 88, - __SIGRTMIN + 89, - __SIGRTMIN + 90, - __SIGRTMIN + 91, - __SIGRTMIN + 92, - __SIGRTMIN + 93, - __SIGRTMIN + 94, - __SIGRTMIN + 95, - -1, /* SIGINFO */ - -1, /* UNKNOWN */ - -1, /* DEFAULT */ - -1, - -1, - -1, - -1, - -1, - -1 -#endif -}; -#else -/* In system mode we only need SIGINT and SIGTRAP; other signals - are not yet supported. */ - -enum { - TARGET_SIGINT = 2, - TARGET_SIGTRAP = 5 -}; - -static int gdb_signal_table[] = { - -1, - -1, - TARGET_SIGINT, - -1, - -1, - TARGET_SIGTRAP -}; -#endif - -#ifdef CONFIG_USER_ONLY -static int target_signal_to_gdb (int sig) -{ - int i; - for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++) - if (gdb_signal_table[i] == sig) - return i; - return GDB_SIGNAL_UNKNOWN; -} -#endif - -static int gdb_signal_to_target (int sig) -{ - if (sig < ARRAY_SIZE (gdb_signal_table)) - return gdb_signal_table[sig]; - else - return -1; -} - typedef struct GDBRegisterState { int base_reg; int num_regs; @@ -326,56 +54,9 @@ typedef struct GDBRegisterState { struct GDBRegisterState *next; } GDBRegisterState; -typedef struct GDBProcess { - uint32_t pid; - bool attached; +GDBState gdbserver_state; - char target_xml[1024]; -} GDBProcess; - -enum RSState { - RS_INACTIVE, - RS_IDLE, - RS_GETLINE, - RS_GETLINE_ESC, - RS_GETLINE_RLE, - RS_CHKSUM1, - RS_CHKSUM2, -}; -typedef struct GDBState { - bool init; /* have we been initialised? */ - CPUState *c_cpu; /* current CPU for step/continue ops */ - CPUState *g_cpu; /* current CPU for other ops */ - CPUState *query_cpu; /* for q{f|s}ThreadInfo */ - enum RSState state; /* parsing state */ - char line_buf[MAX_PACKET_LENGTH]; - int line_buf_index; - int line_sum; /* running checksum */ - int line_csum; /* checksum at the end of the packet */ - GByteArray *last_packet; - int signal; -#ifdef CONFIG_USER_ONLY - int fd; - char *socket_path; - int running_state; -#else - CharBackend chr; - Chardev *mon_chr; -#endif - bool multiprocess; - GDBProcess *processes; - int process_num; - char syscall_buf[256]; - gdb_syscall_complete_cb current_syscall_cb; - GString *str_buf; - GByteArray *mem_buf; - int sstep_flags; - int supported_sstep_flags; -} GDBState; - -static GDBState gdbserver_state; - -static void init_gdbserver_state(void) +void gdb_init_gdbserver_state(void) { g_assert(!gdbserver_state.init); memset(&gdbserver_state, 0, sizeof(GDBState)); @@ -394,211 +75,10 @@ static void init_gdbserver_state(void) gdbserver_state.sstep_flags &= gdbserver_state.supported_sstep_flags; } -#ifndef CONFIG_USER_ONLY -static void reset_gdbserver_state(void) -{ - g_free(gdbserver_state.processes); - gdbserver_state.processes = NULL; - gdbserver_state.process_num = 0; -} -#endif - bool gdb_has_xml; -#ifdef CONFIG_USER_ONLY - -static int get_char(void) -{ - uint8_t ch; - int ret; - - for(;;) { - ret = recv(gdbserver_state.fd, &ch, 1, 0); - if (ret < 0) { - if (errno == ECONNRESET) - gdbserver_state.fd = -1; - if (errno != EINTR) - return -1; - } else if (ret == 0) { - close(gdbserver_state.fd); - gdbserver_state.fd = -1; - return -1; - } else { - break; - } - } - return ch; -} -#endif - -/* - * Return true if there is a GDB currently connected to the stub - * and attached to a CPU - */ -static bool gdb_attached(void) -{ - return gdbserver_state.init && gdbserver_state.c_cpu; -} - -static enum { - GDB_SYS_UNKNOWN, - GDB_SYS_ENABLED, - GDB_SYS_DISABLED, -} gdb_syscall_mode; - -/* Decide if either remote gdb syscalls or native file IO should be used. */ -int use_gdb_syscalls(void) -{ - SemihostingTarget target = semihosting_get_target(); - if (target == SEMIHOSTING_TARGET_NATIVE) { - /* -semihosting-config target=native */ - return false; - } else if (target == SEMIHOSTING_TARGET_GDB) { - /* -semihosting-config target=gdb */ - return true; - } - - /* -semihosting-config target=auto */ - /* On the first call check if gdb is connected and remember. */ - if (gdb_syscall_mode == GDB_SYS_UNKNOWN) { - gdb_syscall_mode = gdb_attached() ? GDB_SYS_ENABLED : GDB_SYS_DISABLED; - } - return gdb_syscall_mode == GDB_SYS_ENABLED; -} - -static bool stub_can_reverse(void) -{ -#ifdef CONFIG_USER_ONLY - return false; -#else - return replay_mode == REPLAY_MODE_PLAY; -#endif -} - -/* Resume execution. */ -static inline void gdb_continue(void) -{ - -#ifdef CONFIG_USER_ONLY - gdbserver_state.running_state = 1; - trace_gdbstub_op_continue(); -#else - if (!runstate_needs_reset()) { - trace_gdbstub_op_continue(); - vm_start(); - } -#endif -} - -/* - * Resume execution, per CPU actions. For user-mode emulation it's - * equivalent to gdb_continue. - */ -static int gdb_continue_partial(char *newstates) -{ - CPUState *cpu; - int res = 0; -#ifdef CONFIG_USER_ONLY - /* - * This is not exactly accurate, but it's an improvement compared to the - * previous situation, where only one CPU would be single-stepped. - */ - CPU_FOREACH(cpu) { - if (newstates[cpu->cpu_index] == 's') { - trace_gdbstub_op_stepping(cpu->cpu_index); - cpu_single_step(cpu, gdbserver_state.sstep_flags); - } - } - gdbserver_state.running_state = 1; -#else - int flag = 0; - - if (!runstate_needs_reset()) { - bool step_requested = false; - CPU_FOREACH(cpu) { - if (newstates[cpu->cpu_index] == 's') { - step_requested = true; - break; - } - } - - if (vm_prepare_start(step_requested)) { - return 0; - } - - CPU_FOREACH(cpu) { - switch (newstates[cpu->cpu_index]) { - case 0: - case 1: - break; /* nothing to do here */ - case 's': - trace_gdbstub_op_stepping(cpu->cpu_index); - cpu_single_step(cpu, gdbserver_state.sstep_flags); - cpu_resume(cpu); - flag = 1; - break; - case 'c': - trace_gdbstub_op_continue_cpu(cpu->cpu_index); - cpu_resume(cpu); - flag = 1; - break; - default: - res = -1; - break; - } - } - } - if (flag) { - qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); - } -#endif - return res; -} - -static void put_buffer(const uint8_t *buf, int len) -{ -#ifdef CONFIG_USER_ONLY - int ret; - - while (len > 0) { - ret = send(gdbserver_state.fd, buf, len, 0); - if (ret < 0) { - if (errno != EINTR) - return; - } else { - buf += ret; - len -= ret; - } - } -#else - /* XXX this blocks entire thread. Rewrite to use - * qemu_chr_fe_write and background I/O callbacks */ - qemu_chr_fe_write_all(&gdbserver_state.chr, buf, len); -#endif -} - -static inline int fromhex(int v) -{ - if (v >= '0' && v <= '9') - return v - '0'; - else if (v >= 'A' && v <= 'F') - return v - 'A' + 10; - else if (v >= 'a' && v <= 'f') - return v - 'a' + 10; - else - return 0; -} - -static inline int tohex(int v) -{ - if (v < 10) - return v + '0'; - else - return v - 10 + 'a'; -} - /* writes 2*len+1 bytes in buf */ -static void memtohex(GString *buf, const uint8_t *mem, int len) +void gdb_memtohex(GString *buf, const uint8_t *mem, int len) { int i, c; for(i = 0; i < len; i++) { @@ -609,7 +89,7 @@ static void memtohex(GString *buf, const uint8_t *mem, int len) g_string_append_c(buf, '\0'); } -static void hextomem(GByteArray *mem, const char *buf, int len) +void gdb_hextomem(GByteArray *mem, const char *buf, int len) { int i; @@ -654,7 +134,7 @@ static void hexdump(const char *buf, int len, } /* return -1 if error, 0 if OK */ -static int put_packet_binary(const char *buf, int len, bool dump) +int gdb_put_packet_binary(const char *buf, int len, bool dump) { int csum, i; uint8_t footer[3]; @@ -678,37 +158,31 @@ static int put_packet_binary(const char *buf, int len, bool dump) footer[2] = tohex((csum) & 0xf); g_byte_array_append(gdbserver_state.last_packet, footer, 3); - put_buffer(gdbserver_state.last_packet->data, + gdb_put_buffer(gdbserver_state.last_packet->data, gdbserver_state.last_packet->len); -#ifdef CONFIG_USER_ONLY - i = get_char(); - if (i < 0) - return -1; - if (i == '+') + if (gdb_got_immediate_ack()) { break; -#else - break; -#endif + } } return 0; } /* return -1 if error, 0 if OK */ -static int put_packet(const char *buf) +int gdb_put_packet(const char *buf) { trace_gdbstub_io_reply(buf); - return put_packet_binary(buf, strlen(buf), false); + return gdb_put_packet_binary(buf, strlen(buf), false); } -static void put_strbuf(void) +void gdb_put_strbuf(void) { - put_packet(gdbserver_state.str_buf->str); + gdb_put_packet(gdbserver_state.str_buf->str); } /* Encode data using the encoding for 'x' packets. */ -static void memtox(GString *buf, const char *mem, int len) +void gdb_memtox(GString *buf, const char *mem, int len) { char c; @@ -765,7 +239,7 @@ static CPUState *find_cpu(uint32_t thread_id) CPUState *cpu; CPU_FOREACH(cpu) { - if (cpu_gdb_index(cpu) == thread_id) { + if (gdb_get_cpu_index(cpu) == thread_id) { return cpu; } } @@ -819,7 +293,7 @@ static CPUState *gdb_next_attached_cpu(CPUState *cpu) } /* Return the first attached cpu */ -static CPUState *gdb_first_attached_cpu(void) +CPUState *gdb_first_attached_cpu(void) { CPUState *cpu = first_cpu; GDBProcess *process = gdb_get_cpu_process(cpu); @@ -1025,7 +499,7 @@ static void gdb_process_breakpoint_remove_all(GDBProcess *p) } -static void gdb_set_cpu_pc(target_ulong pc) +static void gdb_set_cpu_pc(vaddr pc) { CPUState *cpu = gdbserver_state.c_cpu; @@ -1033,23 +507,16 @@ static void gdb_set_cpu_pc(target_ulong pc) cpu_set_pc(cpu, pc); } -static void gdb_append_thread_id(CPUState *cpu, GString *buf) +void gdb_append_thread_id(CPUState *cpu, GString *buf) { if (gdbserver_state.multiprocess) { g_string_append_printf(buf, "p%02x.%02x", - gdb_get_cpu_pid(cpu), cpu_gdb_index(cpu)); + gdb_get_cpu_pid(cpu), gdb_get_cpu_index(cpu)); } else { - g_string_append_printf(buf, "%02x", cpu_gdb_index(cpu)); + g_string_append_printf(buf, "%02x", gdb_get_cpu_index(cpu)); } } -typedef enum GDBThreadIdKind { - GDB_ONE_THREAD = 0, - GDB_ALL_THREADS, /* One process, all threads */ - GDB_ALL_PROCESSES, - GDB_READ_THREAD_ERR -} GDBThreadIdKind; - static GDBThreadIdKind read_thread_id(const char *buf, const char **end_buf, uint32_t *pid, uint32_t *tid) { @@ -1112,16 +579,7 @@ static int gdb_handle_vcont(const char *p) GDBProcess *process; CPUState *cpu; GDBThreadIdKind kind; -#ifdef CONFIG_USER_ONLY - int max_cpus = 1; /* global variable max_cpus exists only in system mode */ - - CPU_FOREACH(cpu) { - max_cpus = max_cpus <= cpu->cpu_index ? cpu->cpu_index + 1 : max_cpus; - } -#else - MachineState *ms = MACHINE(qdev_get_machine()); - unsigned int max_cpus = ms->smp.max_cpus; -#endif + unsigned int max_cpus = gdb_get_max_cpus(); /* uninitialised CPUs stay 0 */ newstates = g_new0(char, max_cpus); @@ -1230,20 +688,6 @@ out: return res; } -typedef union GdbCmdVariant { - const char *data; - uint8_t opcode; - unsigned long val_ul; - unsigned long long val_ull; - struct { - GDBThreadIdKind kind; - uint32_t pid; - uint32_t tid; - } thread_id; -} GdbCmdVariant; - -#define get_param(p, i) (&g_array_index(p, GdbCmdVariant, i)) - static const char *cmd_next_param(const char *param, const char delimiter) { static const char all_delimiters[] = ",;:="; @@ -1333,6 +777,10 @@ typedef void (*GdbCmdHandler)(GArray *params, void *user_ctx); /* * cmd_startswith -> cmd is compared using startswith * + * allow_stop_reply -> true iff the gdbstub can respond to this command with a + * "stop reply" packet. The list of commands that accept such response is + * defined at the GDB Remote Serial Protocol documentation. see: + * https://sourceware.org/gdb/onlinedocs/gdb/Stop-Reply-Packets.html#Stop-Reply-Packets. * * schema definitions: * Each schema parameter entry consists of 2 chars, @@ -1358,6 +806,7 @@ typedef struct GdbCmdParseEntry { const char *cmd; bool cmd_startswith; const char *schema; + bool allow_stop_reply; } GdbCmdParseEntry; static inline int startswith(const char *string, const char *pattern) @@ -1391,6 +840,7 @@ static int process_string_cmd(void *user_ctx, const char *data, } } + gdbserver_state.allow_stop_reply = cmd->allow_stop_reply; cmd->handler(params, user_ctx); return 0; } @@ -1410,7 +860,7 @@ static void run_cmd_parser(const char *data, const GdbCmdParseEntry *cmd) /* In case there was an error during the command parsing we must * send a NULL packet to indicate the command is not supported */ if (process_string_cmd(NULL, data, cmd, 1)) { - put_packet(""); + gdb_put_packet(""); } } @@ -1421,7 +871,7 @@ static void handle_detach(GArray *params, void *user_ctx) if (gdbserver_state.multiprocess) { if (!params->len) { - put_packet("E22"); + gdb_put_packet("E22"); return; } @@ -1442,10 +892,10 @@ static void handle_detach(GArray *params, void *user_ctx) if (!gdbserver_state.c_cpu) { /* No more process attached */ - gdb_syscall_mode = GDB_SYS_DISABLED; + gdb_disable_syscalls(); gdb_continue(); } - put_packet("OK"); + gdb_put_packet("OK"); } static void handle_thread_alive(GArray *params, void *user_ctx) @@ -1453,23 +903,23 @@ static void handle_thread_alive(GArray *params, void *user_ctx) CPUState *cpu; if (!params->len) { - put_packet("E22"); + gdb_put_packet("E22"); return; } if (get_param(params, 0)->thread_id.kind == GDB_READ_THREAD_ERR) { - put_packet("E22"); + gdb_put_packet("E22"); return; } cpu = gdb_get_cpu(get_param(params, 0)->thread_id.pid, get_param(params, 0)->thread_id.tid); if (!cpu) { - put_packet("E22"); + gdb_put_packet("E22"); return; } - put_packet("OK"); + gdb_put_packet("OK"); } static void handle_continue(GArray *params, void *user_ctx) @@ -1506,24 +956,24 @@ static void handle_set_thread(GArray *params, void *user_ctx) CPUState *cpu; if (params->len != 2) { - put_packet("E22"); + gdb_put_packet("E22"); return; } if (get_param(params, 1)->thread_id.kind == GDB_READ_THREAD_ERR) { - put_packet("E22"); + gdb_put_packet("E22"); return; } if (get_param(params, 1)->thread_id.kind != GDB_ONE_THREAD) { - put_packet("OK"); + gdb_put_packet("OK"); return; } cpu = gdb_get_cpu(get_param(params, 1)->thread_id.pid, get_param(params, 1)->thread_id.tid); if (!cpu) { - put_packet("E22"); + gdb_put_packet("E22"); return; } @@ -1534,14 +984,14 @@ static void handle_set_thread(GArray *params, void *user_ctx) switch (get_param(params, 0)->opcode) { case 'c': gdbserver_state.c_cpu = cpu; - put_packet("OK"); + gdb_put_packet("OK"); break; case 'g': gdbserver_state.g_cpu = cpu; - put_packet("OK"); + gdb_put_packet("OK"); break; default: - put_packet("E22"); + gdb_put_packet("E22"); break; } } @@ -1551,7 +1001,7 @@ static void handle_insert_bp(GArray *params, void *user_ctx) int res; if (params->len != 3) { - put_packet("E22"); + gdb_put_packet("E22"); return; } @@ -1560,14 +1010,14 @@ static void handle_insert_bp(GArray *params, void *user_ctx) get_param(params, 1)->val_ull, get_param(params, 2)->val_ull); if (res >= 0) { - put_packet("OK"); + gdb_put_packet("OK"); return; } else if (res == -ENOSYS) { - put_packet(""); + gdb_put_packet(""); return; } - put_packet("E22"); + gdb_put_packet("E22"); } static void handle_remove_bp(GArray *params, void *user_ctx) @@ -1575,7 +1025,7 @@ static void handle_remove_bp(GArray *params, void *user_ctx) int res; if (params->len != 3) { - put_packet("E22"); + gdb_put_packet("E22"); return; } @@ -1584,14 +1034,14 @@ static void handle_remove_bp(GArray *params, void *user_ctx) get_param(params, 1)->val_ull, get_param(params, 2)->val_ull); if (res >= 0) { - put_packet("OK"); + gdb_put_packet("OK"); return; } else if (res == -ENOSYS) { - put_packet(""); + gdb_put_packet(""); return; } - put_packet("E22"); + gdb_put_packet("E22"); } /* @@ -1610,20 +1060,20 @@ static void handle_set_reg(GArray *params, void *user_ctx) int reg_size; if (!gdb_has_xml) { - put_packet(""); + gdb_put_packet(""); return; } if (params->len != 2) { - put_packet("E22"); + gdb_put_packet("E22"); return; } reg_size = strlen(get_param(params, 1)->data) / 2; - hextomem(gdbserver_state.mem_buf, get_param(params, 1)->data, reg_size); + gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 1)->data, reg_size); gdb_write_register(gdbserver_state.g_cpu, gdbserver_state.mem_buf->data, get_param(params, 0)->val_ull); - put_packet("OK"); + gdb_put_packet("OK"); } static void handle_get_reg(GArray *params, void *user_ctx) @@ -1631,12 +1081,12 @@ static void handle_get_reg(GArray *params, void *user_ctx) int reg_size; if (!gdb_has_xml) { - put_packet(""); + gdb_put_packet(""); return; } if (!params->len) { - put_packet("E14"); + gdb_put_packet("E14"); return; } @@ -1644,75 +1094,77 @@ static void handle_get_reg(GArray *params, void *user_ctx) gdbserver_state.mem_buf, get_param(params, 0)->val_ull); if (!reg_size) { - put_packet("E14"); + gdb_put_packet("E14"); return; } else { g_byte_array_set_size(gdbserver_state.mem_buf, reg_size); } - memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data, reg_size); - put_strbuf(); + gdb_memtohex(gdbserver_state.str_buf, + gdbserver_state.mem_buf->data, reg_size); + gdb_put_strbuf(); } static void handle_write_mem(GArray *params, void *user_ctx) { if (params->len != 3) { - put_packet("E22"); + gdb_put_packet("E22"); return; } - /* hextomem() reads 2*len bytes */ + /* gdb_hextomem() reads 2*len bytes */ if (get_param(params, 1)->val_ull > strlen(get_param(params, 2)->data) / 2) { - put_packet("E22"); + gdb_put_packet("E22"); return; } - hextomem(gdbserver_state.mem_buf, get_param(params, 2)->data, - get_param(params, 1)->val_ull); - if (target_memory_rw_debug(gdbserver_state.g_cpu, - get_param(params, 0)->val_ull, - gdbserver_state.mem_buf->data, - gdbserver_state.mem_buf->len, true)) { - put_packet("E14"); + gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 2)->data, + get_param(params, 1)->val_ull); + if (gdb_target_memory_rw_debug(gdbserver_state.g_cpu, + get_param(params, 0)->val_ull, + gdbserver_state.mem_buf->data, + gdbserver_state.mem_buf->len, true)) { + gdb_put_packet("E14"); return; } - put_packet("OK"); + gdb_put_packet("OK"); } static void handle_read_mem(GArray *params, void *user_ctx) { if (params->len != 2) { - put_packet("E22"); + gdb_put_packet("E22"); return; } - /* memtohex() doubles the required space */ + /* gdb_memtohex() doubles the required space */ if (get_param(params, 1)->val_ull > MAX_PACKET_LENGTH / 2) { - put_packet("E22"); + gdb_put_packet("E22"); return; } g_byte_array_set_size(gdbserver_state.mem_buf, get_param(params, 1)->val_ull); - if (target_memory_rw_debug(gdbserver_state.g_cpu, - get_param(params, 0)->val_ull, - gdbserver_state.mem_buf->data, - gdbserver_state.mem_buf->len, false)) { - put_packet("E14"); + if (gdb_target_memory_rw_debug(gdbserver_state.g_cpu, + get_param(params, 0)->val_ull, + gdbserver_state.mem_buf->data, + gdbserver_state.mem_buf->len, false)) { + gdb_put_packet("E14"); return; } - memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data, + gdb_memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data, gdbserver_state.mem_buf->len); - put_strbuf(); + gdb_put_strbuf(); } static void handle_write_all_regs(GArray *params, void *user_ctx) { - target_ulong addr, len; + int reg_id; + size_t len; uint8_t *registers; int reg_size; @@ -1722,94 +1174,42 @@ static void handle_write_all_regs(GArray *params, void *user_ctx) cpu_synchronize_state(gdbserver_state.g_cpu); len = strlen(get_param(params, 0)->data) / 2; - hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len); + gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len); registers = gdbserver_state.mem_buf->data; - for (addr = 0; addr < gdbserver_state.g_cpu->gdb_num_g_regs && len > 0; - addr++) { - reg_size = gdb_write_register(gdbserver_state.g_cpu, registers, addr); + for (reg_id = 0; + reg_id < gdbserver_state.g_cpu->gdb_num_g_regs && len > 0; + reg_id++) { + reg_size = gdb_write_register(gdbserver_state.g_cpu, registers, reg_id); len -= reg_size; registers += reg_size; } - put_packet("OK"); + gdb_put_packet("OK"); } static void handle_read_all_regs(GArray *params, void *user_ctx) { - target_ulong addr, len; + int reg_id; + size_t len; cpu_synchronize_state(gdbserver_state.g_cpu); g_byte_array_set_size(gdbserver_state.mem_buf, 0); len = 0; - for (addr = 0; addr < gdbserver_state.g_cpu->gdb_num_g_regs; addr++) { + for (reg_id = 0; reg_id < gdbserver_state.g_cpu->gdb_num_g_regs; reg_id++) { len += gdb_read_register(gdbserver_state.g_cpu, gdbserver_state.mem_buf, - addr); + reg_id); } g_assert(len == gdbserver_state.mem_buf->len); - memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data, len); - put_strbuf(); + gdb_memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data, len); + gdb_put_strbuf(); } -static void handle_file_io(GArray *params, void *user_ctx) -{ - if (params->len >= 1 && gdbserver_state.current_syscall_cb) { - uint64_t ret; - int err; - - ret = get_param(params, 0)->val_ull; - if (params->len >= 2) { - err = get_param(params, 1)->val_ull; - } else { - err = 0; - } - - /* Convert GDB error numbers back to host error numbers. */ -#define E(X) case GDB_E##X: err = E##X; break - switch (err) { - case 0: - break; - E(PERM); - E(NOENT); - E(INTR); - E(BADF); - E(ACCES); - E(FAULT); - E(BUSY); - E(EXIST); - E(NODEV); - E(NOTDIR); - E(ISDIR); - E(INVAL); - E(NFILE); - E(MFILE); - E(FBIG); - E(NOSPC); - E(SPIPE); - E(ROFS); - E(NAMETOOLONG); - default: - err = EINVAL; - break; - } -#undef E - - gdbserver_state.current_syscall_cb(gdbserver_state.c_cpu, ret, err); - gdbserver_state.current_syscall_cb = NULL; - } - - if (params->len >= 3 && get_param(params, 2)->opcode == (uint8_t)'C') { - put_packet("T02"); - return; - } - - gdb_continue(); -} static void handle_step(GArray *params, void *user_ctx) { if (params->len) { - gdb_set_cpu_pc((target_ulong)get_param(params, 0)->val_ull); + gdb_set_cpu_pc(get_param(params, 0)->val_ull); } cpu_single_step(gdbserver_state.c_cpu, gdbserver_state.sstep_flags); @@ -1818,8 +1218,8 @@ static void handle_step(GArray *params, void *user_ctx) static void handle_backward(GArray *params, void *user_ctx) { - if (!stub_can_reverse()) { - put_packet("E22"); + if (!gdb_can_reverse()) { + gdb_put_packet("E22"); } if (params->len == 1) { switch (get_param(params, 0)->opcode) { @@ -1827,26 +1227,26 @@ static void handle_backward(GArray *params, void *user_ctx) if (replay_reverse_step()) { gdb_continue(); } else { - put_packet("E14"); + gdb_put_packet("E14"); } return; case 'c': if (replay_reverse_continue()) { gdb_continue(); } else { - put_packet("E14"); + gdb_put_packet("E14"); } return; } } /* Default invalid command */ - put_packet(""); + gdb_put_packet(""); } static void handle_v_cont_query(GArray *params, void *user_ctx) { - put_packet("vCont;c;C;s;S"); + gdb_put_packet("vCont;c;C;s;S"); } static void handle_v_cont(GArray *params, void *user_ctx) @@ -1859,9 +1259,9 @@ static void handle_v_cont(GArray *params, void *user_ctx) res = gdb_handle_vcont(get_param(params, 0)->data); if ((res == -EINVAL) || (res == -ERANGE)) { - put_packet("E22"); + gdb_put_packet("E22"); } else if (res) { - put_packet(""); + gdb_put_packet(""); } } @@ -1889,17 +1289,20 @@ static void handle_v_attach(GArray *params, void *user_ctx) gdbserver_state.g_cpu = cpu; gdbserver_state.c_cpu = cpu; - g_string_printf(gdbserver_state.str_buf, "T%02xthread:", GDB_SIGNAL_TRAP); - gdb_append_thread_id(cpu, gdbserver_state.str_buf); - g_string_append_c(gdbserver_state.str_buf, ';'); + if (gdbserver_state.allow_stop_reply) { + g_string_printf(gdbserver_state.str_buf, "T%02xthread:", GDB_SIGNAL_TRAP); + gdb_append_thread_id(cpu, gdbserver_state.str_buf); + g_string_append_c(gdbserver_state.str_buf, ';'); + gdbserver_state.allow_stop_reply = false; cleanup: - put_strbuf(); + gdb_put_strbuf(); + } } static void handle_v_kill(GArray *params, void *user_ctx) { /* Kill the target */ - put_packet("OK"); + gdb_put_packet("OK"); error_report("QEMU: Terminated via GDBstub"); gdb_exit(0); exit(0); @@ -1916,12 +1319,14 @@ static const GdbCmdParseEntry gdb_v_commands_table[] = { .handler = handle_v_cont, .cmd = "Cont", .cmd_startswith = 1, + .allow_stop_reply = true, .schema = "s0" }, { .handler = handle_v_attach, .cmd = "Attach;", .cmd_startswith = 1, + .allow_stop_reply = true, .schema = "l0" }, { @@ -1940,7 +1345,7 @@ static void handle_v_commands(GArray *params, void *user_ctx) if (process_string_cmd(NULL, get_param(params, 0)->data, gdb_v_commands_table, ARRAY_SIZE(gdb_v_commands_table))) { - put_packet(""); + gdb_put_packet(""); } } @@ -1958,7 +1363,7 @@ static void handle_query_qemu_sstepbits(GArray *params, void *user_ctx) SSTEP_NOTIMER); } - put_strbuf(); + gdb_put_strbuf(); } static void handle_set_qemu_sstep(GArray *params, void *user_ctx) @@ -1972,19 +1377,19 @@ static void handle_set_qemu_sstep(GArray *params, void *user_ctx) new_sstep_flags = get_param(params, 0)->val_ul; if (new_sstep_flags & ~gdbserver_state.supported_sstep_flags) { - put_packet("E22"); + gdb_put_packet("E22"); return; } gdbserver_state.sstep_flags = new_sstep_flags; - put_packet("OK"); + gdb_put_packet("OK"); } static void handle_query_qemu_sstep(GArray *params, void *user_ctx) { g_string_printf(gdbserver_state.str_buf, "0x%x", gdbserver_state.sstep_flags); - put_strbuf(); + gdb_put_strbuf(); } static void handle_query_curr_tid(GArray *params, void *user_ctx) @@ -2001,19 +1406,19 @@ static void handle_query_curr_tid(GArray *params, void *user_ctx) cpu = get_first_cpu_in_process(process); g_string_assign(gdbserver_state.str_buf, "QC"); gdb_append_thread_id(cpu, gdbserver_state.str_buf); - put_strbuf(); + gdb_put_strbuf(); } static void handle_query_threads(GArray *params, void *user_ctx) { if (!gdbserver_state.query_cpu) { - put_packet("l"); + gdb_put_packet("l"); return; } g_string_assign(gdbserver_state.str_buf, "m"); gdb_append_thread_id(gdbserver_state.query_cpu, gdbserver_state.str_buf); - put_strbuf(); + gdb_put_strbuf(); gdbserver_state.query_cpu = gdb_next_attached_cpu(gdbserver_state.query_cpu); } @@ -2030,7 +1435,7 @@ static void handle_query_thread_extra(GArray *params, void *user_ctx) if (!params->len || get_param(params, 0)->thread_id.kind == GDB_READ_THREAD_ERR) { - put_packet("E22"); + gdb_put_packet("E22"); return; } @@ -2055,19 +1460,13 @@ static void handle_query_thread_extra(GArray *params, void *user_ctx) cpu->halted ? "halted " : "running"); } trace_gdbstub_op_extra_info(rs->str); - memtohex(gdbserver_state.str_buf, (uint8_t *)rs->str, rs->len); - put_strbuf(); + gdb_memtohex(gdbserver_state.str_buf, (uint8_t *)rs->str, rs->len); + gdb_put_strbuf(); } //// --- Begin LibAFL code --- -struct libafl_custom_gdb_cmd { - int (*callback)(uint8_t*, size_t, void*); - void* data; - struct libafl_custom_gdb_cmd* next; -}; - -struct libafl_custom_gdb_cmd* libafl_qemu_gdb_cmds = NULL; +struct libafl_custom_gdb_cmd* libafl_qemu_gdb_cmds; void libafl_qemu_add_gdb_cmd(int (*callback)(uint8_t*, size_t, void*), void* data); void libafl_qemu_add_gdb_cmd(int (*callback)(uint8_t*, size_t, void*), void* data) @@ -2083,110 +1482,12 @@ void libafl_qemu_gdb_reply(const char* buf, size_t len); void libafl_qemu_gdb_reply(const char* buf, size_t len) { g_autoptr(GString) hex_buf = g_string_new("O"); - memtohex(hex_buf, (const uint8_t *) buf, len); - put_packet(hex_buf->str); + gdb_memtohex(hex_buf, (const uint8_t *) buf, len); + gdb_put_packet(hex_buf->str); } //// --- End LibAFL code --- -#ifdef CONFIG_USER_ONLY -static void handle_query_offsets(GArray *params, void *user_ctx) -{ - TaskState *ts; - - ts = gdbserver_state.c_cpu->opaque; - g_string_printf(gdbserver_state.str_buf, - "Text=" TARGET_ABI_FMT_lx - ";Data=" TARGET_ABI_FMT_lx - ";Bss=" TARGET_ABI_FMT_lx, - ts->info->code_offset, - ts->info->data_offset, - ts->info->data_offset); - put_strbuf(); -} - -//// --- Begin LibAFL code --- - -static void handle_query_rcmd(GArray *params, void *user_ctx) -{ - // const guint8 zero = 0; - size_t len; - - if (!params->len) { - put_packet("E22"); - return; - } - - len = strlen(get_param(params, 0)->data); - if (len % 2) { - put_packet("E01"); - return; - } - - g_assert(gdbserver_state.mem_buf->len == 0); - len = len / 2; - hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len); - // g_byte_array_append(gdbserver_state.mem_buf, &zero, 1); - - struct libafl_custom_gdb_cmd** c = &libafl_qemu_gdb_cmds; - int recognized = 0; - while (*c) { - recognized |= (*c)->callback(gdbserver_state.mem_buf->data, gdbserver_state.mem_buf->len, (*c)->data); - c = &(*c)->next; - } - - if (recognized) { - put_packet("OK"); - } else { - put_packet(""); - } -} - -//// --- End LibAFL code --- -#else -static void handle_query_rcmd(GArray *params, void *user_ctx) -{ - const guint8 zero = 0; - int len; - - if (!params->len) { - put_packet("E22"); - return; - } - - len = strlen(get_param(params, 0)->data); - if (len % 2) { - put_packet("E01"); - return; - } - - g_assert(gdbserver_state.mem_buf->len == 0); - len = len / 2; - hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len); - -//// --- Begin LibAFL code --- - - struct libafl_custom_gdb_cmd** c = &libafl_qemu_gdb_cmds; - int recognized = 0; - while (*c) { - recognized |= (*c)->callback(gdbserver_state.mem_buf->data, gdbserver_state.mem_buf->len, (*c)->data); - c = &(*c)->next; - } - - if (recognized) { - put_packet("OK"); - return; - } - -//// --- End LibAFL code --- - - g_byte_array_append(gdbserver_state.mem_buf, &zero, 1); - qemu_chr_be_write(gdbserver_state.mon_chr, gdbserver_state.mem_buf->data, - gdbserver_state.mem_buf->len); - put_packet("OK"); -} -#endif - static void handle_query_supported(GArray *params, void *user_ctx) { CPUClass *cc; @@ -2197,12 +1498,12 @@ static void handle_query_supported(GArray *params, void *user_ctx) g_string_append(gdbserver_state.str_buf, ";qXfer:features:read+"); } - if (stub_can_reverse()) { + if (gdb_can_reverse()) { g_string_append(gdbserver_state.str_buf, ";ReverseStep+;ReverseContinue+"); } -#ifdef CONFIG_USER_ONLY +#if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX) if (gdbserver_state.c_cpu->opaque) { g_string_append(gdbserver_state.str_buf, ";qXfer:auxv:read+"); } @@ -2214,7 +1515,7 @@ static void handle_query_supported(GArray *params, void *user_ctx) } g_string_append(gdbserver_state.str_buf, ";vContSupported+;multiprocess+"); - put_strbuf(); + gdb_put_strbuf(); } static void handle_query_xfer_features(GArray *params, void *user_ctx) @@ -2226,14 +1527,14 @@ static void handle_query_xfer_features(GArray *params, void *user_ctx) const char *p; if (params->len < 3) { - put_packet("E22"); + gdb_put_packet("E22"); return; } process = gdb_get_cpu_process(gdbserver_state.g_cpu); cc = CPU_GET_CLASS(gdbserver_state.g_cpu); if (!cc->gdb_core_xml_file) { - put_packet(""); + gdb_put_packet(""); return; } @@ -2241,7 +1542,7 @@ static void handle_query_xfer_features(GArray *params, void *user_ctx) p = get_param(params, 0)->data; xml = get_feature_xml(p, &p, process); if (!xml) { - put_packet("E00"); + gdb_put_packet("E00"); return; } @@ -2249,7 +1550,7 @@ static void handle_query_xfer_features(GArray *params, void *user_ctx) len = get_param(params, 2)->val_ul; total_len = strlen(xml); if (addr > total_len) { - put_packet("E00"); + gdb_put_packet("E00"); return; } @@ -2259,101 +1560,25 @@ static void handle_query_xfer_features(GArray *params, void *user_ctx) if (len < total_len - addr) { g_string_assign(gdbserver_state.str_buf, "m"); - memtox(gdbserver_state.str_buf, xml + addr, len); + gdb_memtox(gdbserver_state.str_buf, xml + addr, len); } else { g_string_assign(gdbserver_state.str_buf, "l"); - memtox(gdbserver_state.str_buf, xml + addr, total_len - addr); + gdb_memtox(gdbserver_state.str_buf, xml + addr, total_len - addr); } - put_packet_binary(gdbserver_state.str_buf->str, + gdb_put_packet_binary(gdbserver_state.str_buf->str, gdbserver_state.str_buf->len, true); } -#if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX_USER) -static void handle_query_xfer_auxv(GArray *params, void *user_ctx) -{ - TaskState *ts; - unsigned long offset, len, saved_auxv, auxv_len; - - if (params->len < 2) { - put_packet("E22"); - return; - } - - offset = get_param(params, 0)->val_ul; - len = get_param(params, 1)->val_ul; - ts = gdbserver_state.c_cpu->opaque; - saved_auxv = ts->info->saved_auxv; - auxv_len = ts->info->auxv_len; - - if (offset >= auxv_len) { - put_packet("E00"); - return; - } - - if (len > (MAX_PACKET_LENGTH - 5) / 2) { - len = (MAX_PACKET_LENGTH - 5) / 2; - } - - if (len < auxv_len - offset) { - g_string_assign(gdbserver_state.str_buf, "m"); - } else { - g_string_assign(gdbserver_state.str_buf, "l"); - len = auxv_len - offset; - } - - g_byte_array_set_size(gdbserver_state.mem_buf, len); - if (target_memory_rw_debug(gdbserver_state.g_cpu, saved_auxv + offset, - gdbserver_state.mem_buf->data, len, false)) { - put_packet("E14"); - return; - } - - memtox(gdbserver_state.str_buf, - (const char *)gdbserver_state.mem_buf->data, len); - put_packet_binary(gdbserver_state.str_buf->str, - gdbserver_state.str_buf->len, true); -} -#endif - -static void handle_query_attached(GArray *params, void *user_ctx) -{ - put_packet(GDB_ATTACHED); -} - static void handle_query_qemu_supported(GArray *params, void *user_ctx) { g_string_printf(gdbserver_state.str_buf, "sstepbits;sstep"); #ifndef CONFIG_USER_ONLY g_string_append(gdbserver_state.str_buf, ";PhyMemMode"); #endif - put_strbuf(); + gdb_put_strbuf(); } -#ifndef CONFIG_USER_ONLY -static void handle_query_qemu_phy_mem_mode(GArray *params, - void *user_ctx) -{ - g_string_printf(gdbserver_state.str_buf, "%d", phy_memory_mode); - put_strbuf(); -} - -static void handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx) -{ - if (!params->len) { - put_packet("E22"); - return; - } - - if (!get_param(params, 0)->val_ul) { - phy_memory_mode = 0; - } else { - phy_memory_mode = 1; - } - put_packet("OK"); -} -#endif - static const GdbCmdParseEntry gdb_gen_query_set_common_table[] = { /* Order is important if has same prefix */ { @@ -2393,12 +1618,12 @@ static const GdbCmdParseEntry gdb_gen_query_table[] = { }, #ifdef CONFIG_USER_ONLY { - .handler = handle_query_offsets, + .handler = gdb_handle_query_offsets, .cmd = "Offsets", }, #endif { - .handler = handle_query_rcmd, + .handler = gdb_handle_query_rcmd, .cmd = "Rcmd,", .cmd_startswith = 1, .schema = "s0" @@ -2420,21 +1645,21 @@ static const GdbCmdParseEntry gdb_gen_query_table[] = { .cmd_startswith = 1, .schema = "s:l,l0" }, -#if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX_USER) +#if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX) { - .handler = handle_query_xfer_auxv, + .handler = gdb_handle_query_xfer_auxv, .cmd = "Xfer:auxv:read::", .cmd_startswith = 1, .schema = "l,l0" }, #endif { - .handler = handle_query_attached, + .handler = gdb_handle_query_attached, .cmd = "Attached:", .cmd_startswith = 1 }, { - .handler = handle_query_attached, + .handler = gdb_handle_query_attached, .cmd = "Attached", }, { @@ -2443,7 +1668,7 @@ static const GdbCmdParseEntry gdb_gen_query_table[] = { }, #ifndef CONFIG_USER_ONLY { - .handler = handle_query_qemu_phy_mem_mode, + .handler = gdb_handle_query_qemu_phy_mem_mode, .cmd = "qemu.PhyMemMode", }, #endif @@ -2459,7 +1684,7 @@ static const GdbCmdParseEntry gdb_gen_set_table[] = { }, #ifndef CONFIG_USER_ONLY { - .handler = handle_set_qemu_phy_mem_mode, + .handler = gdb_handle_set_qemu_phy_mem_mode, .cmd = "qemu.PhyMemMode:", .cmd_startswith = 1, .schema = "l0" @@ -2482,7 +1707,7 @@ static void handle_gen_query(GArray *params, void *user_ctx) if (process_string_cmd(NULL, get_param(params, 0)->data, gdb_gen_query_table, ARRAY_SIZE(gdb_gen_query_table))) { - put_packet(""); + gdb_put_packet(""); } } @@ -2501,16 +1726,19 @@ static void handle_gen_set(GArray *params, void *user_ctx) if (process_string_cmd(NULL, get_param(params, 0)->data, gdb_gen_set_table, ARRAY_SIZE(gdb_gen_set_table))) { - put_packet(""); + gdb_put_packet(""); } } static void handle_target_halt(GArray *params, void *user_ctx) { - g_string_printf(gdbserver_state.str_buf, "T%02xthread:", GDB_SIGNAL_TRAP); - gdb_append_thread_id(gdbserver_state.c_cpu, gdbserver_state.str_buf); - g_string_append_c(gdbserver_state.str_buf, ';'); - put_strbuf(); + if (gdbserver_state.allow_stop_reply) { + g_string_printf(gdbserver_state.str_buf, "T%02xthread:", GDB_SIGNAL_TRAP); + gdb_append_thread_id(gdbserver_state.c_cpu, gdbserver_state.str_buf); + g_string_append_c(gdbserver_state.str_buf, ';'); + gdb_put_strbuf(); + gdbserver_state.allow_stop_reply = false; + } /* * Remove all the breakpoints when this query is issued, * because gdb is doing an initial connect and the state @@ -2527,14 +1755,15 @@ static int gdb_handle_packet(const char *line_buf) switch (line_buf[0]) { case '!': - put_packet("OK"); + gdb_put_packet("OK"); break; case '?': { static const GdbCmdParseEntry target_halted_cmd_desc = { .handler = handle_target_halt, .cmd = "?", - .cmd_startswith = 1 + .cmd_startswith = 1, + .allow_stop_reply = true, }; cmd_parser = &target_halted_cmd_desc; } @@ -2545,6 +1774,7 @@ static int gdb_handle_packet(const char *line_buf) .handler = handle_continue, .cmd = "c", .cmd_startswith = 1, + .allow_stop_reply = true, .schema = "L0" }; cmd_parser = &continue_cmd_desc; @@ -2556,6 +1786,7 @@ static int gdb_handle_packet(const char *line_buf) .handler = handle_cont_with_sig, .cmd = "C", .cmd_startswith = 1, + .allow_stop_reply = true, .schema = "l0" }; cmd_parser = &cont_with_sig_cmd_desc; @@ -2594,6 +1825,7 @@ static int gdb_handle_packet(const char *line_buf) .handler = handle_step, .cmd = "s", .cmd_startswith = 1, + .allow_stop_reply = true, .schema = "L0" }; cmd_parser = &step_cmd_desc; @@ -2613,7 +1845,7 @@ static int gdb_handle_packet(const char *line_buf) case 'F': { static const GdbCmdParseEntry file_io_cmd_desc = { - .handler = handle_file_io, + .handler = gdb_handle_file_io, .cmd = "F", .cmd_startswith = 1, .schema = "L,L,o0" @@ -2754,7 +1986,7 @@ static int gdb_handle_packet(const char *line_buf) break; default: /* put empty packet */ - put_packet(""); + gdb_put_packet(""); break; } @@ -2781,193 +2013,18 @@ void gdb_set_stop_cpu(CPUState *cpu) gdbserver_state.g_cpu = cpu; } -#ifndef CONFIG_USER_ONLY -static void gdb_vm_state_change(void *opaque, bool running, RunState state) -{ - CPUState *cpu = gdbserver_state.c_cpu; - g_autoptr(GString) buf = g_string_new(NULL); - g_autoptr(GString) tid = g_string_new(NULL); - const char *type; - int ret; - - if (running || gdbserver_state.state == RS_INACTIVE) { - return; - } - /* Is there a GDB syscall waiting to be sent? */ - if (gdbserver_state.current_syscall_cb) { - put_packet(gdbserver_state.syscall_buf); - return; - } - - if (cpu == NULL) { - /* No process attached */ - return; - } - - gdb_append_thread_id(cpu, tid); - - switch (state) { - case RUN_STATE_DEBUG: - if (cpu->watchpoint_hit) { - switch (cpu->watchpoint_hit->flags & BP_MEM_ACCESS) { - case BP_MEM_READ: - type = "r"; - break; - case BP_MEM_ACCESS: - type = "a"; - break; - default: - type = ""; - break; - } - trace_gdbstub_hit_watchpoint(type, cpu_gdb_index(cpu), - (target_ulong)cpu->watchpoint_hit->vaddr); - g_string_printf(buf, "T%02xthread:%s;%swatch:" TARGET_FMT_lx ";", - GDB_SIGNAL_TRAP, tid->str, type, - (target_ulong)cpu->watchpoint_hit->vaddr); - cpu->watchpoint_hit = NULL; - goto send_packet; - } else { - trace_gdbstub_hit_break(); - } - tb_flush(cpu); - ret = GDB_SIGNAL_TRAP; - break; - case RUN_STATE_PAUSED: - trace_gdbstub_hit_paused(); - ret = GDB_SIGNAL_INT; - break; - case RUN_STATE_SHUTDOWN: - trace_gdbstub_hit_shutdown(); - ret = GDB_SIGNAL_QUIT; - break; - case RUN_STATE_IO_ERROR: - trace_gdbstub_hit_io_error(); - ret = GDB_SIGNAL_IO; - break; - case RUN_STATE_WATCHDOG: - trace_gdbstub_hit_watchdog(); - ret = GDB_SIGNAL_ALRM; - break; - case RUN_STATE_INTERNAL_ERROR: - trace_gdbstub_hit_internal_error(); - ret = GDB_SIGNAL_ABRT; - break; - case RUN_STATE_SAVE_VM: - case RUN_STATE_RESTORE_VM: - return; - case RUN_STATE_FINISH_MIGRATE: - ret = GDB_SIGNAL_XCPU; - break; - default: - trace_gdbstub_hit_unknown(state); - ret = GDB_SIGNAL_UNKNOWN; - break; - } - gdb_set_stop_cpu(cpu); - g_string_printf(buf, "T%02xthread:%s;", ret, tid->str); - -send_packet: - put_packet(buf->str); - - /* disable single step if it was enabled */ - cpu_single_step(cpu, 0); -} -#endif - -/* Send a gdb syscall request. - This accepts limited printf-style format specifiers, specifically: - %x - target_ulong argument printed in hex. - %lx - 64-bit argument printed in hex. - %s - string pointer (target_ulong) and length (int) pair. */ -void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va) -{ - char *p; - char *p_end; - target_ulong addr; - uint64_t i64; - - if (!gdb_attached()) { - return; - } - - gdbserver_state.current_syscall_cb = cb; -#ifndef CONFIG_USER_ONLY - vm_stop(RUN_STATE_DEBUG); -#endif - p = &gdbserver_state.syscall_buf[0]; - p_end = &gdbserver_state.syscall_buf[sizeof(gdbserver_state.syscall_buf)]; - *(p++) = 'F'; - while (*fmt) { - if (*fmt == '%') { - fmt++; - switch (*fmt++) { - case 'x': - addr = va_arg(va, target_ulong); - p += snprintf(p, p_end - p, TARGET_FMT_lx, addr); - break; - case 'l': - if (*(fmt++) != 'x') - goto bad_format; - i64 = va_arg(va, uint64_t); - p += snprintf(p, p_end - p, "%" PRIx64, i64); - break; - case 's': - addr = va_arg(va, target_ulong); - p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x", - addr, va_arg(va, int)); - break; - default: - bad_format: - error_report("gdbstub: Bad syscall format string '%s'", - fmt - 1); - break; - } - } else { - *(p++) = *(fmt++); - } - } - *p = 0; -#ifdef CONFIG_USER_ONLY - put_packet(gdbserver_state.syscall_buf); - /* Return control to gdb for it to process the syscall request. - * Since the protocol requires that gdb hands control back to us - * using a "here are the results" F packet, we don't need to check - * gdb_handlesig's return value (which is the signal to deliver if - * execution was resumed via a continue packet). - */ - gdb_handlesig(gdbserver_state.c_cpu, 0); -#else - /* In this case wait to send the syscall packet until notification that - the CPU has stopped. This must be done because if the packet is sent - now the reply from the syscall request could be received while the CPU - is still in the running state, which can cause packets to be dropped - and state transition 'T' packets to be sent while the syscall is still - being processed. */ - qemu_cpu_kick(gdbserver_state.c_cpu); -#endif -} - -void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...) -{ - va_list va; - - va_start(va, fmt); - gdb_do_syscallv(cb, fmt, va); - va_end(va); -} - -static void gdb_read_byte(uint8_t ch) +void gdb_read_byte(uint8_t ch) { uint8_t reply; + gdbserver_state.allow_stop_reply = false; #ifndef CONFIG_USER_ONLY if (gdbserver_state.last_packet->len) { /* Waiting for a response to the last packet. If we see the start of a new command then abandon the previous response. */ if (ch == '-') { trace_gdbstub_err_got_nack(); - put_buffer(gdbserver_state.last_packet->data, + gdb_put_buffer(gdbserver_state.last_packet->data, gdbserver_state.last_packet->len); } else if (ch == '+') { trace_gdbstub_io_got_ack(); @@ -3089,12 +2146,12 @@ static void gdb_read_byte(uint8_t ch) trace_gdbstub_err_checksum_incorrect(gdbserver_state.line_sum, gdbserver_state.line_csum); /* send NAK reply */ reply = '-'; - put_buffer(&reply, 1); + gdb_put_buffer(&reply, 1); gdbserver_state.state = RS_IDLE; } else { /* send ACK reply */ reply = '+'; - put_buffer(&reply, 1); + gdb_put_buffer(&reply, 1); gdbserver_state.state = gdb_handle_packet(gdbserver_state.line_buf); } break; @@ -3104,39 +2161,12 @@ static void gdb_read_byte(uint8_t ch) } } -/* Tell the remote gdb that the process has exited. */ -void gdb_exit(int code) -{ - char buf[4]; - - if (!gdbserver_state.init) { - return; - } -#ifdef CONFIG_USER_ONLY - if (gdbserver_state.socket_path) { - unlink(gdbserver_state.socket_path); - } - if (gdbserver_state.fd < 0) { - return; - } -#endif - - trace_gdbstub_op_exiting((uint8_t)code); - - snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code); - put_packet(buf); - -#ifndef CONFIG_USER_ONLY - qemu_chr_fe_deinit(&gdbserver_state.chr, true); -#endif -} - /* * Create the process that will contain all the "orphan" CPUs (that are not * part of a CPU cluster). Note that if this process contains no CPUs, it won't * be attachable and thus will be invisible to the user. */ -static void create_default_process(GDBState *s) +void gdb_create_default_process(GDBState *s) { GDBProcess *process; int max_pid = 0; @@ -3156,447 +2186,3 @@ static void create_default_process(GDBState *s) process->target_xml[0] = '\0'; } -#ifdef CONFIG_USER_ONLY -int -gdb_handlesig(CPUState *cpu, int sig) -{ - char buf[256]; - int n; - - if (!gdbserver_state.init || gdbserver_state.fd < 0) { - return sig; - } - - /* disable single step if it was enabled */ - cpu_single_step(cpu, 0); - tb_flush(cpu); - - if (sig != 0) { - gdb_set_stop_cpu(cpu); - g_string_printf(gdbserver_state.str_buf, - "T%02xthread:", target_signal_to_gdb(sig)); - gdb_append_thread_id(cpu, gdbserver_state.str_buf); - g_string_append_c(gdbserver_state.str_buf, ';'); - put_strbuf(); - } - /* put_packet() might have detected that the peer terminated the - connection. */ - if (gdbserver_state.fd < 0) { - return sig; - } - - sig = 0; - gdbserver_state.state = RS_IDLE; - gdbserver_state.running_state = 0; - while (gdbserver_state.running_state == 0) { - n = read(gdbserver_state.fd, buf, 256); - if (n > 0) { - int i; - - for (i = 0; i < n; i++) { - gdb_read_byte(buf[i]); - } - } else { - /* XXX: Connection closed. Should probably wait for another - connection before continuing. */ - if (n == 0) { - close(gdbserver_state.fd); - } - gdbserver_state.fd = -1; - return sig; - } - } - sig = gdbserver_state.signal; - gdbserver_state.signal = 0; - return sig; -} - -/* Tell the remote gdb that the process has exited due to SIG. */ -void gdb_signalled(CPUArchState *env, int sig) -{ - char buf[4]; - - if (!gdbserver_state.init || gdbserver_state.fd < 0) { - return; - } - - snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb(sig)); - put_packet(buf); -} - -static void gdb_accept_init(int fd) -{ - init_gdbserver_state(); - create_default_process(&gdbserver_state); - gdbserver_state.processes[0].attached = true; - gdbserver_state.c_cpu = gdb_first_attached_cpu(); - gdbserver_state.g_cpu = gdbserver_state.c_cpu; - gdbserver_state.fd = fd; - gdb_has_xml = false; -} - -static bool gdb_accept_socket(int gdb_fd) -{ - int fd; - - for(;;) { - fd = accept(gdb_fd, NULL, NULL); - if (fd < 0 && errno != EINTR) { - perror("accept socket"); - return false; - } else if (fd >= 0) { - qemu_set_cloexec(fd); - break; - } - } - - gdb_accept_init(fd); - return true; -} - -static int gdbserver_open_socket(const char *path) -{ - struct sockaddr_un sockaddr = {}; - int fd, ret; - - fd = socket(AF_UNIX, SOCK_STREAM, 0); - if (fd < 0) { - perror("create socket"); - return -1; - } - - sockaddr.sun_family = AF_UNIX; - pstrcpy(sockaddr.sun_path, sizeof(sockaddr.sun_path) - 1, path); - ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)); - if (ret < 0) { - perror("bind socket"); - close(fd); - return -1; - } - ret = listen(fd, 1); - if (ret < 0) { - perror("listen socket"); - close(fd); - return -1; - } - - return fd; -} - -static bool gdb_accept_tcp(int gdb_fd) -{ - struct sockaddr_in sockaddr = {}; - socklen_t len; - int fd; - - for(;;) { - len = sizeof(sockaddr); - fd = accept(gdb_fd, (struct sockaddr *)&sockaddr, &len); - if (fd < 0 && errno != EINTR) { - perror("accept"); - return false; - } else if (fd >= 0) { - qemu_set_cloexec(fd); - break; - } - } - - /* set short latency */ - if (socket_set_nodelay(fd)) { - perror("setsockopt"); - close(fd); - return false; - } - - gdb_accept_init(fd); - return true; -} - -static int gdbserver_open_port(int port) -{ - struct sockaddr_in sockaddr; - int fd, ret; - - fd = socket(PF_INET, SOCK_STREAM, 0); - if (fd < 0) { - perror("socket"); - return -1; - } - qemu_set_cloexec(fd); - - socket_set_fast_reuse(fd); - - sockaddr.sin_family = AF_INET; - sockaddr.sin_port = htons(port); - sockaddr.sin_addr.s_addr = 0; - ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)); - if (ret < 0) { - perror("bind"); - close(fd); - return -1; - } - ret = listen(fd, 1); - if (ret < 0) { - perror("listen"); - close(fd); - return -1; - } - - return fd; -} - -int gdbserver_start(const char *port_or_path) -{ - int port = g_ascii_strtoull(port_or_path, NULL, 10); - int gdb_fd; - - if (port > 0) { - gdb_fd = gdbserver_open_port(port); - } else { - gdb_fd = gdbserver_open_socket(port_or_path); - } - - if (gdb_fd < 0) { - return -1; - } - - if (port > 0 && gdb_accept_tcp(gdb_fd)) { - return 0; - } else if (gdb_accept_socket(gdb_fd)) { - gdbserver_state.socket_path = g_strdup(port_or_path); - return 0; - } - - /* gone wrong */ - close(gdb_fd); - return -1; -} - -/* Disable gdb stub for child processes. */ -void gdbserver_fork(CPUState *cpu) -{ - if (!gdbserver_state.init || gdbserver_state.fd < 0) { - return; - } - close(gdbserver_state.fd); - gdbserver_state.fd = -1; - cpu_breakpoint_remove_all(cpu, BP_GDB); - cpu_watchpoint_remove_all(cpu, BP_GDB); -} -#else -static int gdb_chr_can_receive(void *opaque) -{ - /* We can handle an arbitrarily large amount of data. - Pick the maximum packet size, which is as good as anything. */ - return MAX_PACKET_LENGTH; -} - -static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size) -{ - int i; - - for (i = 0; i < size; i++) { - gdb_read_byte(buf[i]); - } -} - -static void gdb_chr_event(void *opaque, QEMUChrEvent event) -{ - int i; - GDBState *s = (GDBState *) opaque; - - switch (event) { - case CHR_EVENT_OPENED: - /* Start with first process attached, others detached */ - for (i = 0; i < s->process_num; i++) { - s->processes[i].attached = !i; - } - - s->c_cpu = gdb_first_attached_cpu(); - s->g_cpu = s->c_cpu; - - vm_stop(RUN_STATE_PAUSED); - replay_gdb_attached(); - gdb_has_xml = false; - break; - default: - break; - } -} - -static int gdb_monitor_write(Chardev *chr, const uint8_t *buf, int len) -{ - g_autoptr(GString) hex_buf = g_string_new("O"); - memtohex(hex_buf, buf, len); - put_packet(hex_buf->str); - return len; -} - -#ifndef _WIN32 -static void gdb_sigterm_handler(int signal) -{ - if (runstate_is_running()) { - vm_stop(RUN_STATE_PAUSED); - } -} -#endif - -static void gdb_monitor_open(Chardev *chr, ChardevBackend *backend, - bool *be_opened, Error **errp) -{ - *be_opened = false; -} - -static void char_gdb_class_init(ObjectClass *oc, void *data) -{ - ChardevClass *cc = CHARDEV_CLASS(oc); - - cc->internal = true; - cc->open = gdb_monitor_open; - cc->chr_write = gdb_monitor_write; -} - -#define TYPE_CHARDEV_GDB "chardev-gdb" - -static const TypeInfo char_gdb_type_info = { - .name = TYPE_CHARDEV_GDB, - .parent = TYPE_CHARDEV, - .class_init = char_gdb_class_init, -}; - -static int find_cpu_clusters(Object *child, void *opaque) -{ - if (object_dynamic_cast(child, TYPE_CPU_CLUSTER)) { - GDBState *s = (GDBState *) opaque; - CPUClusterState *cluster = CPU_CLUSTER(child); - GDBProcess *process; - - s->processes = g_renew(GDBProcess, s->processes, ++s->process_num); - - process = &s->processes[s->process_num - 1]; - - /* - * GDB process IDs -1 and 0 are reserved. To avoid subtle errors at - * runtime, we enforce here that the machine does not use a cluster ID - * that would lead to PID 0. - */ - assert(cluster->cluster_id != UINT32_MAX); - process->pid = cluster->cluster_id + 1; - process->attached = false; - process->target_xml[0] = '\0'; - - return 0; - } - - return object_child_foreach(child, find_cpu_clusters, opaque); -} - -static int pid_order(const void *a, const void *b) -{ - GDBProcess *pa = (GDBProcess *) a; - GDBProcess *pb = (GDBProcess *) b; - - if (pa->pid < pb->pid) { - return -1; - } else if (pa->pid > pb->pid) { - return 1; - } else { - return 0; - } -} - -static void create_processes(GDBState *s) -{ - object_child_foreach(object_get_root(), find_cpu_clusters, s); - - if (gdbserver_state.processes) { - /* Sort by PID */ - qsort(gdbserver_state.processes, gdbserver_state.process_num, sizeof(gdbserver_state.processes[0]), pid_order); - } - - create_default_process(s); -} - -int gdbserver_start(const char *device) -{ - trace_gdbstub_op_start(device); - - char gdbstub_device_name[128]; - Chardev *chr = NULL; - Chardev *mon_chr; - - if (!first_cpu) { - error_report("gdbstub: meaningless to attach gdb to a " - "machine without any CPU."); - return -1; - } - - if (!gdb_supports_guest_debug()) { - error_report("gdbstub: current accelerator doesn't support guest debugging"); - return -1; - } - - if (!device) - return -1; - if (strcmp(device, "none") != 0) { - if (strstart(device, "tcp:", NULL)) { - /* enforce required TCP attributes */ - snprintf(gdbstub_device_name, sizeof(gdbstub_device_name), - "%s,wait=off,nodelay=on,server=on", device); - device = gdbstub_device_name; - } -#ifndef _WIN32 - else if (strcmp(device, "stdio") == 0) { - struct sigaction act; - - memset(&act, 0, sizeof(act)); - act.sa_handler = gdb_sigterm_handler; - sigaction(SIGINT, &act, NULL); - } -#endif - /* - * FIXME: it's a bit weird to allow using a mux chardev here - * and implicitly setup a monitor. We may want to break this. - */ - chr = qemu_chr_new_noreplay("gdb", device, true, NULL); - if (!chr) - return -1; - } - - if (!gdbserver_state.init) { - init_gdbserver_state(); - - qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL); - - /* Initialize a monitor terminal for gdb */ - mon_chr = qemu_chardev_new(NULL, TYPE_CHARDEV_GDB, - NULL, NULL, &error_abort); - monitor_init_hmp(mon_chr, false, &error_abort); - } else { - qemu_chr_fe_deinit(&gdbserver_state.chr, true); - mon_chr = gdbserver_state.mon_chr; - reset_gdbserver_state(); - } - - create_processes(&gdbserver_state); - - if (chr) { - qemu_chr_fe_init(&gdbserver_state.chr, chr, &error_abort); - qemu_chr_fe_set_handlers(&gdbserver_state.chr, gdb_chr_can_receive, - gdb_chr_receive, gdb_chr_event, - NULL, &gdbserver_state, NULL, true); - } - gdbserver_state.state = chr ? RS_IDLE : RS_INACTIVE; - gdbserver_state.mon_chr = mon_chr; - gdbserver_state.current_syscall_cb = NULL; - - return 0; -} - -static void register_types(void) -{ - type_register_static(&char_gdb_type_info); -} - -type_init(register_types); -#endif diff --git a/gdbstub/internals.h b/gdbstub/internals.h index eabb0341d1..0079860139 100644 --- a/gdbstub/internals.h +++ b/gdbstub/internals.h @@ -6,12 +6,237 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _INTERNALS_H_ -#define _INTERNALS_H_ +#ifndef GDBSTUB_INTERNALS_H +#define GDBSTUB_INTERNALS_H +#include "exec/cpu-common.h" + +#define MAX_PACKET_LENGTH 4096 + +/* + * Shared structures and definitions + */ + +enum { + GDB_SIGNAL_0 = 0, + GDB_SIGNAL_INT = 2, + GDB_SIGNAL_QUIT = 3, + GDB_SIGNAL_TRAP = 5, + GDB_SIGNAL_ABRT = 6, + GDB_SIGNAL_ALRM = 14, + GDB_SIGNAL_IO = 23, + GDB_SIGNAL_XCPU = 24, + GDB_SIGNAL_UNKNOWN = 143 +}; + +typedef struct GDBProcess { + uint32_t pid; + bool attached; + + char target_xml[1024]; +} GDBProcess; + +enum RSState { + RS_INACTIVE, + RS_IDLE, + RS_GETLINE, + RS_GETLINE_ESC, + RS_GETLINE_RLE, + RS_CHKSUM1, + RS_CHKSUM2, +}; + +typedef struct GDBState { + bool init; /* have we been initialised? */ + CPUState *c_cpu; /* current CPU for step/continue ops */ + CPUState *g_cpu; /* current CPU for other ops */ + CPUState *query_cpu; /* for q{f|s}ThreadInfo */ + enum RSState state; /* parsing state */ + char line_buf[MAX_PACKET_LENGTH]; + int line_buf_index; + int line_sum; /* running checksum */ + int line_csum; /* checksum at the end of the packet */ + GByteArray *last_packet; + int signal; + bool multiprocess; + GDBProcess *processes; + int process_num; + GString *str_buf; + GByteArray *mem_buf; + int sstep_flags; + int supported_sstep_flags; + /* + * Whether we are allowed to send a stop reply packet at this moment. + * Must be set off after sending the stop reply itself. + */ + bool allow_stop_reply; +} GDBState; + +/* lives in main gdbstub.c */ +extern GDBState gdbserver_state; + +/* + * Inline utility function, convert from int to hex and back + */ + +static inline int fromhex(int v) +{ + if (v >= '0' && v <= '9') { + return v - '0'; + } else if (v >= 'A' && v <= 'F') { + return v - 'A' + 10; + } else if (v >= 'a' && v <= 'f') { + return v - 'a' + 10; + } else { + return 0; + } +} + +static inline int tohex(int v) +{ + if (v < 10) { + return v + '0'; + } else { + return v - 10 + 'a'; + } +} + +/* + * Connection helpers for both softmmu and user backends + */ + +void gdb_put_strbuf(void); +int gdb_put_packet(const char *buf); +int gdb_put_packet_binary(const char *buf, int len, bool dump); +void gdb_hextomem(GByteArray *mem, const char *buf, int len); +void gdb_memtohex(GString *buf, const uint8_t *mem, int len); +void gdb_memtox(GString *buf, const char *mem, int len); +void gdb_read_byte(uint8_t ch); + +/* + * Packet acknowledgement - we handle this slightly differently + * between user and softmmu mode, mainly to deal with the differences + * between the flexible chardev and the direct fd approaches. + * + * We currently don't support a negotiated QStartNoAckMode + */ + +/** + * gdb_got_immediate_ack() - check ok to continue + * + * Returns true to continue, false to re-transmit for user only, the + * softmmu stub always returns true. + */ +bool gdb_got_immediate_ack(void); +/* utility helpers */ +CPUState *gdb_first_attached_cpu(void); +void gdb_append_thread_id(CPUState *cpu, GString *buf); +int gdb_get_cpu_index(CPUState *cpu); +unsigned int gdb_get_max_cpus(void); /* both */ +bool gdb_can_reverse(void); /* softmmu, stub for user */ + +void gdb_create_default_process(GDBState *s); + +/* signal mapping, common for softmmu, specialised for user-mode */ +int gdb_signal_to_target(int sig); +int gdb_target_signal_to_gdb(int sig); + +int gdb_get_char(void); /* user only */ + +/** + * gdb_continue() - handle continue in mode specific way. + */ +void gdb_continue(void); + +/** + * gdb_continue_partial() - handle partial continue in mode specific way. + */ +int gdb_continue_partial(char *newstates); + +/* + * Helpers with separate softmmu and user implementations + */ +void gdb_put_buffer(const uint8_t *buf, int len); + +/* + * Command handlers - either specialised or softmmu or user only + */ +void gdb_init_gdbserver_state(void); + +typedef enum GDBThreadIdKind { + GDB_ONE_THREAD = 0, + GDB_ALL_THREADS, /* One process, all threads */ + GDB_ALL_PROCESSES, + GDB_READ_THREAD_ERR +} GDBThreadIdKind; + +typedef union GdbCmdVariant { + const char *data; + uint8_t opcode; + unsigned long val_ul; + unsigned long long val_ull; + struct { + GDBThreadIdKind kind; + uint32_t pid; + uint32_t tid; + } thread_id; +} GdbCmdVariant; + +#define get_param(p, i) (&g_array_index(p, GdbCmdVariant, i)) + +void gdb_handle_query_rcmd(GArray *params, void *user_ctx); /* softmmu */ +void gdb_handle_query_offsets(GArray *params, void *user_ctx); /* user */ +void gdb_handle_query_xfer_auxv(GArray *params, void *user_ctx); /*user */ + +void gdb_handle_query_attached(GArray *params, void *user_ctx); /* both */ + +/* softmmu only */ +void gdb_handle_query_qemu_phy_mem_mode(GArray *params, void *user_ctx); +void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx); + +/* sycall handling */ +void gdb_handle_file_io(GArray *params, void *user_ctx); +bool gdb_handled_syscall(void); +void gdb_disable_syscalls(void); +void gdb_syscall_reset(void); + +/* user/softmmu specific syscall handling */ +void gdb_syscall_handling(const char *syscall_packet); + +/* + * Break/Watch point support - there is an implementation for softmmu + * and user mode. + */ bool gdb_supports_guest_debug(void); -int gdb_breakpoint_insert(CPUState *cs, int type, hwaddr addr, hwaddr len); -int gdb_breakpoint_remove(CPUState *cs, int type, hwaddr addr, hwaddr len); +int gdb_breakpoint_insert(CPUState *cs, int type, vaddr addr, vaddr len); +int gdb_breakpoint_remove(CPUState *cs, int type, vaddr addr, vaddr len); void gdb_breakpoint_remove_all(CPUState *cs); -#endif /* _INTERNALS_H_ */ +/** + * gdb_target_memory_rw_debug() - handle debug access to memory + * @cs: CPUState + * @addr: nominal address, could be an entire physical address + * @buf: data + * @len: length of access + * @is_write: is it a write operation + * + * This function is specialised depending on the mode we are running + * in. For softmmu guests we can switch the interpretation of the + * address to a physical address. + */ +int gdb_target_memory_rw_debug(CPUState *cs, hwaddr addr, + uint8_t *buf, int len, bool is_write); + +//// --- Begin LibAFL code --- + +struct libafl_custom_gdb_cmd { + int (*callback)(uint8_t*, size_t, void*); + void* data; + struct libafl_custom_gdb_cmd* next; +}; + +extern struct libafl_custom_gdb_cmd* libafl_qemu_gdb_cmds; + +//// --- End LibAFL code --- + +#endif /* GDBSTUB_INTERNALS_H */ diff --git a/gdbstub/meson.build b/gdbstub/meson.build index fc895a2c39..cdb4d28691 100644 --- a/gdbstub/meson.build +++ b/gdbstub/meson.build @@ -4,6 +4,36 @@ # types such as hwaddr. # -specific_ss.add(files('gdbstub.c')) -softmmu_ss.add(files('softmmu.c')) -user_ss.add(files('user.c')) +# We need to build the core gdb code via a library to be able to tweak +# cflags so: + +gdb_user_ss = ss.source_set() +gdb_softmmu_ss = ss.source_set() + +# We build two versions of gdbstub, one for each mode +gdb_user_ss.add(files('gdbstub.c', 'user.c')) +gdb_softmmu_ss.add(files('gdbstub.c', 'softmmu.c')) + +gdb_user_ss = gdb_user_ss.apply(config_host, strict: false) +gdb_softmmu_ss = gdb_softmmu_ss.apply(config_host, strict: false) + +libgdb_user = static_library('gdb_user', + gdb_user_ss.sources() + genh, + name_suffix: 'fa', + c_args: '-DCONFIG_USER_ONLY', + build_by_default: have_user) + +libgdb_softmmu = static_library('gdb_softmmu', + gdb_softmmu_ss.sources() + genh, + name_suffix: 'fa', + build_by_default: have_system) + +gdb_user = declare_dependency(link_whole: libgdb_user) +user_ss.add(gdb_user) +gdb_softmmu = declare_dependency(link_whole: libgdb_softmmu) +softmmu_ss.add(gdb_softmmu) + +common_ss.add(files('syscalls.c')) + +# The user-target is specialised by the guest +specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-target.c')) diff --git a/gdbstub/softmmu.c b/gdbstub/softmmu.c index f208c6cf15..a4995d0e07 100644 --- a/gdbstub/softmmu.c +++ b/gdbstub/softmmu.c @@ -4,17 +4,643 @@ * Debug integration depends on support from the individual * accelerators so most of this involves calling the ops helpers. * + * Copyright (c) 2003-2005 Fabrice Bellard * Copyright (c) 2022 Linaro Ltd * - * SPDX-License-Identifier: GPL-2.0-or-later + * SPDX-License-Identifier: LGPL-2.0+ */ #include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/cutils.h" #include "exec/gdbstub.h" +#include "gdbstub/syscalls.h" #include "exec/hwaddr.h" +#include "exec/tb-flush.h" #include "sysemu/cpus.h" +#include "sysemu/runstate.h" +#include "sysemu/replay.h" +#include "hw/core/cpu.h" +#include "hw/cpu/cluster.h" +#include "hw/boards.h" +#include "chardev/char.h" +#include "chardev/char-fe.h" +#include "monitor/monitor.h" +#include "trace.h" #include "internals.h" +/* System emulation specific state */ +typedef struct { + CharBackend chr; + Chardev *mon_chr; +} GDBSystemState; + +GDBSystemState gdbserver_system_state; + +static void reset_gdbserver_state(void) +{ + g_free(gdbserver_state.processes); + gdbserver_state.processes = NULL; + gdbserver_state.process_num = 0; + gdbserver_state.allow_stop_reply = false; +} + +/* + * Return the GDB index for a given vCPU state. + * + * In system mode GDB numbers CPUs from 1 as 0 is reserved as an "any + * cpu" index. + */ +int gdb_get_cpu_index(CPUState *cpu) +{ + return cpu->cpu_index + 1; +} + +/* + * We check the status of the last message in the chardev receive code + */ +bool gdb_got_immediate_ack(void) +{ + return true; +} + +/* + * GDB Connection management. For system emulation we do all of this + * via our existing Chardev infrastructure which allows us to support + * network and unix sockets. + */ + +void gdb_put_buffer(const uint8_t *buf, int len) +{ + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ + qemu_chr_fe_write_all(&gdbserver_system_state.chr, buf, len); +} + +static void gdb_chr_event(void *opaque, QEMUChrEvent event) +{ + int i; + GDBState *s = (GDBState *) opaque; + + switch (event) { + case CHR_EVENT_OPENED: + /* Start with first process attached, others detached */ + for (i = 0; i < s->process_num; i++) { + s->processes[i].attached = !i; + } + + s->c_cpu = gdb_first_attached_cpu(); + s->g_cpu = s->c_cpu; + + vm_stop(RUN_STATE_PAUSED); + replay_gdb_attached(); + gdb_has_xml = false; + break; + default: + break; + } +} + +/* + * In softmmu mode we stop the VM and wait to send the syscall packet + * until notification that the CPU has stopped. This must be done + * because if the packet is sent now the reply from the syscall + * request could be received while the CPU is still in the running + * state, which can cause packets to be dropped and state transition + * 'T' packets to be sent while the syscall is still being processed. + */ +void gdb_syscall_handling(const char *syscall_packet) +{ + vm_stop(RUN_STATE_DEBUG); + qemu_cpu_kick(gdbserver_state.c_cpu); +} + +static void gdb_vm_state_change(void *opaque, bool running, RunState state) +{ + CPUState *cpu = gdbserver_state.c_cpu; + g_autoptr(GString) buf = g_string_new(NULL); + g_autoptr(GString) tid = g_string_new(NULL); + const char *type; + int ret; + + if (running || gdbserver_state.state == RS_INACTIVE) { + return; + } + + /* Is there a GDB syscall waiting to be sent? */ + if (gdb_handled_syscall()) { + return; + } + + if (cpu == NULL) { + /* No process attached */ + return; + } + + if (!gdbserver_state.allow_stop_reply) { + return; + } + + gdb_append_thread_id(cpu, tid); + + switch (state) { + case RUN_STATE_DEBUG: + if (cpu->watchpoint_hit) { + switch (cpu->watchpoint_hit->flags & BP_MEM_ACCESS) { + case BP_MEM_READ: + type = "r"; + break; + case BP_MEM_ACCESS: + type = "a"; + break; + default: + type = ""; + break; + } + trace_gdbstub_hit_watchpoint(type, + gdb_get_cpu_index(cpu), + cpu->watchpoint_hit->vaddr); + g_string_printf(buf, "T%02xthread:%s;%swatch:%" VADDR_PRIx ";", + GDB_SIGNAL_TRAP, tid->str, type, + cpu->watchpoint_hit->vaddr); + cpu->watchpoint_hit = NULL; + goto send_packet; + } else { + trace_gdbstub_hit_break(); + } + tb_flush(cpu); + ret = GDB_SIGNAL_TRAP; + break; + case RUN_STATE_PAUSED: + trace_gdbstub_hit_paused(); + ret = GDB_SIGNAL_INT; + break; + case RUN_STATE_SHUTDOWN: + trace_gdbstub_hit_shutdown(); + ret = GDB_SIGNAL_QUIT; + break; + case RUN_STATE_IO_ERROR: + trace_gdbstub_hit_io_error(); + ret = GDB_SIGNAL_IO; + break; + case RUN_STATE_WATCHDOG: + trace_gdbstub_hit_watchdog(); + ret = GDB_SIGNAL_ALRM; + break; + case RUN_STATE_INTERNAL_ERROR: + trace_gdbstub_hit_internal_error(); + ret = GDB_SIGNAL_ABRT; + break; + case RUN_STATE_SAVE_VM: + case RUN_STATE_RESTORE_VM: + return; + case RUN_STATE_FINISH_MIGRATE: + ret = GDB_SIGNAL_XCPU; + break; + default: + trace_gdbstub_hit_unknown(state); + ret = GDB_SIGNAL_UNKNOWN; + break; + } + gdb_set_stop_cpu(cpu); + g_string_printf(buf, "T%02xthread:%s;", ret, tid->str); + +send_packet: + gdb_put_packet(buf->str); + gdbserver_state.allow_stop_reply = false; + + /* disable single step if it was enabled */ + cpu_single_step(cpu, 0); +} + +#ifndef _WIN32 +static void gdb_sigterm_handler(int signal) +{ + if (runstate_is_running()) { + vm_stop(RUN_STATE_PAUSED); + } +} +#endif + +static int gdb_monitor_write(Chardev *chr, const uint8_t *buf, int len) +{ + g_autoptr(GString) hex_buf = g_string_new("O"); + gdb_memtohex(hex_buf, buf, len); + gdb_put_packet(hex_buf->str); + return len; +} + +static void gdb_monitor_open(Chardev *chr, ChardevBackend *backend, + bool *be_opened, Error **errp) +{ + *be_opened = false; +} + +static void char_gdb_class_init(ObjectClass *oc, void *data) +{ + ChardevClass *cc = CHARDEV_CLASS(oc); + + cc->internal = true; + cc->open = gdb_monitor_open; + cc->chr_write = gdb_monitor_write; +} + +#define TYPE_CHARDEV_GDB "chardev-gdb" + +static const TypeInfo char_gdb_type_info = { + .name = TYPE_CHARDEV_GDB, + .parent = TYPE_CHARDEV, + .class_init = char_gdb_class_init, +}; + +static int gdb_chr_can_receive(void *opaque) +{ + /* + * We can handle an arbitrarily large amount of data. + * Pick the maximum packet size, which is as good as anything. + */ + return MAX_PACKET_LENGTH; +} + +static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size) +{ + int i; + + for (i = 0; i < size; i++) { + gdb_read_byte(buf[i]); + } +} + +static int find_cpu_clusters(Object *child, void *opaque) +{ + if (object_dynamic_cast(child, TYPE_CPU_CLUSTER)) { + GDBState *s = (GDBState *) opaque; + CPUClusterState *cluster = CPU_CLUSTER(child); + GDBProcess *process; + + s->processes = g_renew(GDBProcess, s->processes, ++s->process_num); + + process = &s->processes[s->process_num - 1]; + + /* + * GDB process IDs -1 and 0 are reserved. To avoid subtle errors at + * runtime, we enforce here that the machine does not use a cluster ID + * that would lead to PID 0. + */ + assert(cluster->cluster_id != UINT32_MAX); + process->pid = cluster->cluster_id + 1; + process->attached = false; + process->target_xml[0] = '\0'; + + return 0; + } + + return object_child_foreach(child, find_cpu_clusters, opaque); +} + +static int pid_order(const void *a, const void *b) +{ + GDBProcess *pa = (GDBProcess *) a; + GDBProcess *pb = (GDBProcess *) b; + + if (pa->pid < pb->pid) { + return -1; + } else if (pa->pid > pb->pid) { + return 1; + } else { + return 0; + } +} + +static void create_processes(GDBState *s) +{ + object_child_foreach(object_get_root(), find_cpu_clusters, s); + + if (gdbserver_state.processes) { + /* Sort by PID */ + qsort(gdbserver_state.processes, + gdbserver_state.process_num, + sizeof(gdbserver_state.processes[0]), + pid_order); + } + + gdb_create_default_process(s); +} + +int gdbserver_start(const char *device) +{ + trace_gdbstub_op_start(device); + + char gdbstub_device_name[128]; + Chardev *chr = NULL; + Chardev *mon_chr; + + if (!first_cpu) { + error_report("gdbstub: meaningless to attach gdb to a " + "machine without any CPU."); + return -1; + } + + if (!gdb_supports_guest_debug()) { + error_report("gdbstub: current accelerator doesn't " + "support guest debugging"); + return -1; + } + + if (!device) { + return -1; + } + if (strcmp(device, "none") != 0) { + if (strstart(device, "tcp:", NULL)) { + /* enforce required TCP attributes */ + snprintf(gdbstub_device_name, sizeof(gdbstub_device_name), + "%s,wait=off,nodelay=on,server=on", device); + device = gdbstub_device_name; + } +#ifndef _WIN32 + else if (strcmp(device, "stdio") == 0) { + struct sigaction act; + + memset(&act, 0, sizeof(act)); + act.sa_handler = gdb_sigterm_handler; + sigaction(SIGINT, &act, NULL); + } +#endif + /* + * FIXME: it's a bit weird to allow using a mux chardev here + * and implicitly setup a monitor. We may want to break this. + */ + chr = qemu_chr_new_noreplay("gdb", device, true, NULL); + if (!chr) { + return -1; + } + } + + if (!gdbserver_state.init) { + gdb_init_gdbserver_state(); + + qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL); + + /* Initialize a monitor terminal for gdb */ + mon_chr = qemu_chardev_new(NULL, TYPE_CHARDEV_GDB, + NULL, NULL, &error_abort); + monitor_init_hmp(mon_chr, false, &error_abort); + } else { + qemu_chr_fe_deinit(&gdbserver_system_state.chr, true); + mon_chr = gdbserver_system_state.mon_chr; + reset_gdbserver_state(); + } + + create_processes(&gdbserver_state); + + if (chr) { + qemu_chr_fe_init(&gdbserver_system_state.chr, chr, &error_abort); + qemu_chr_fe_set_handlers(&gdbserver_system_state.chr, + gdb_chr_can_receive, + gdb_chr_receive, gdb_chr_event, + NULL, &gdbserver_state, NULL, true); + } + gdbserver_state.state = chr ? RS_IDLE : RS_INACTIVE; + gdbserver_system_state.mon_chr = mon_chr; + gdb_syscall_reset(); + + return 0; +} + +static void register_types(void) +{ + type_register_static(&char_gdb_type_info); +} + +type_init(register_types); + +/* Tell the remote gdb that the process has exited. */ +void gdb_exit(int code) +{ + char buf[4]; + + if (!gdbserver_state.init) { + return; + } + + trace_gdbstub_op_exiting((uint8_t)code); + + if (gdbserver_state.allow_stop_reply) { + snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code); + gdb_put_packet(buf); + gdbserver_state.allow_stop_reply = false; + } + + qemu_chr_fe_deinit(&gdbserver_system_state.chr, true); +} + +/* + * Memory access + */ +static int phy_memory_mode; + +int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr, + uint8_t *buf, int len, bool is_write) +{ + CPUClass *cc; + + if (phy_memory_mode) { + if (is_write) { + cpu_physical_memory_write(addr, buf, len); + } else { + cpu_physical_memory_read(addr, buf, len); + } + return 0; + } + + cc = CPU_GET_CLASS(cpu); + if (cc->memory_rw_debug) { + return cc->memory_rw_debug(cpu, addr, buf, len, is_write); + } + + return cpu_memory_rw_debug(cpu, addr, buf, len, is_write); +} + +/* + * cpu helpers + */ + +unsigned int gdb_get_max_cpus(void) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + return ms->smp.max_cpus; +} + +bool gdb_can_reverse(void) +{ + return replay_mode == REPLAY_MODE_PLAY; +} + +/* + * Softmmu specific command helpers + */ + +void gdb_handle_query_qemu_phy_mem_mode(GArray *params, + void *user_ctx) +{ + g_string_printf(gdbserver_state.str_buf, "%d", phy_memory_mode); + gdb_put_strbuf(); +} + +void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx) +{ + if (!params->len) { + gdb_put_packet("E22"); + return; + } + + if (!get_param(params, 0)->val_ul) { + phy_memory_mode = 0; + } else { + phy_memory_mode = 1; + } + gdb_put_packet("OK"); +} + +void gdb_handle_query_rcmd(GArray *params, void *user_ctx) +{ + const guint8 zero = 0; + int len; + + if (!params->len) { + gdb_put_packet("E22"); + return; + } + + len = strlen(get_param(params, 0)->data); + if (len % 2) { + gdb_put_packet("E01"); + return; + } + + g_assert(gdbserver_state.mem_buf->len == 0); + len = len / 2; + gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len); + + //// --- Begin LibAFL code --- + + struct libafl_custom_gdb_cmd** c = &libafl_qemu_gdb_cmds; + int recognized = 0; + while (*c) { + recognized |= (*c)->callback(gdbserver_state.mem_buf->data, gdbserver_state.mem_buf->len, (*c)->data); + c = &(*c)->next; + } + + if (recognized) { + gdb_put_packet("OK"); + return; + } + + //// --- End LibAFL code --- + + g_byte_array_append(gdbserver_state.mem_buf, &zero, 1); + qemu_chr_be_write(gdbserver_system_state.mon_chr, + gdbserver_state.mem_buf->data, + gdbserver_state.mem_buf->len); + gdb_put_packet("OK"); +} + +/* + * Execution state helpers + */ + +void gdb_handle_query_attached(GArray *params, void *user_ctx) +{ + gdb_put_packet("1"); +} + +void gdb_continue(void) +{ + if (!runstate_needs_reset()) { + trace_gdbstub_op_continue(); + vm_start(); + } +} + +/* + * Resume execution, per CPU actions. + */ +int gdb_continue_partial(char *newstates) +{ + CPUState *cpu; + int res = 0; + int flag = 0; + + if (!runstate_needs_reset()) { + bool step_requested = false; + CPU_FOREACH(cpu) { + if (newstates[cpu->cpu_index] == 's') { + step_requested = true; + break; + } + } + + if (vm_prepare_start(step_requested)) { + return 0; + } + + CPU_FOREACH(cpu) { + switch (newstates[cpu->cpu_index]) { + case 0: + case 1: + break; /* nothing to do here */ + case 's': + trace_gdbstub_op_stepping(cpu->cpu_index); + cpu_single_step(cpu, gdbserver_state.sstep_flags); + cpu_resume(cpu); + flag = 1; + break; + case 'c': + trace_gdbstub_op_continue_cpu(cpu->cpu_index); + cpu_resume(cpu); + flag = 1; + break; + default: + res = -1; + break; + } + } + } + if (flag) { + qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); + } + return res; +} + +/* + * Signal Handling - in system mode we only need SIGINT and SIGTRAP; other + * signals are not yet supported. + */ + +enum { + TARGET_SIGINT = 2, + TARGET_SIGTRAP = 5 +}; + +int gdb_signal_to_target(int sig) +{ + switch (sig) { + case 2: + return TARGET_SIGINT; + case 5: + return TARGET_SIGTRAP; + default: + return -1; + } +} + +/* + * Break/Watch point helpers + */ + bool gdb_supports_guest_debug(void) { const AccelOpsClass *ops = cpus_get_accel(); @@ -24,7 +650,7 @@ bool gdb_supports_guest_debug(void) return false; } -int gdb_breakpoint_insert(CPUState *cs, int type, hwaddr addr, hwaddr len) +int gdb_breakpoint_insert(CPUState *cs, int type, vaddr addr, vaddr len) { const AccelOpsClass *ops = cpus_get_accel(); if (ops->insert_breakpoint) { @@ -33,7 +659,7 @@ int gdb_breakpoint_insert(CPUState *cs, int type, hwaddr addr, hwaddr len) return -ENOSYS; } -int gdb_breakpoint_remove(CPUState *cs, int type, hwaddr addr, hwaddr len) +int gdb_breakpoint_remove(CPUState *cs, int type, vaddr addr, vaddr len) { const AccelOpsClass *ops = cpus_get_accel(); if (ops->remove_breakpoint) { diff --git a/gdbstub/syscalls.c b/gdbstub/syscalls.c new file mode 100644 index 0000000000..02e3a8f74c --- /dev/null +++ b/gdbstub/syscalls.c @@ -0,0 +1,205 @@ +/* + * GDB Syscall Handling + * + * GDB can execute syscalls on the guests behalf, currently used by + * the various semihosting extensions. + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2023 Linaro Ltd + * + * SPDX-License-Identifier: LGPL-2.0+ + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "semihosting/semihost.h" +#include "sysemu/runstate.h" +#include "gdbstub/user.h" +#include "gdbstub/syscalls.h" +#include "trace.h" +#include "internals.h" + +/* Syscall specific state */ +typedef struct { + char syscall_buf[256]; + gdb_syscall_complete_cb current_syscall_cb; +} GDBSyscallState; + +static GDBSyscallState gdbserver_syscall_state; + +/* + * Return true if there is a GDB currently connected to the stub + * and attached to a CPU + */ +static bool gdb_attached(void) +{ + return gdbserver_state.init && gdbserver_state.c_cpu; +} + +static enum { + GDB_SYS_UNKNOWN, + GDB_SYS_ENABLED, + GDB_SYS_DISABLED, +} gdb_syscall_mode; + +/* Decide if either remote gdb syscalls or native file IO should be used. */ +int use_gdb_syscalls(void) +{ + SemihostingTarget target = semihosting_get_target(); + if (target == SEMIHOSTING_TARGET_NATIVE) { + /* -semihosting-config target=native */ + return false; + } else if (target == SEMIHOSTING_TARGET_GDB) { + /* -semihosting-config target=gdb */ + return true; + } + + /* -semihosting-config target=auto */ + /* On the first call check if gdb is connected and remember. */ + if (gdb_syscall_mode == GDB_SYS_UNKNOWN) { + gdb_syscall_mode = gdb_attached() ? GDB_SYS_ENABLED : GDB_SYS_DISABLED; + } + return gdb_syscall_mode == GDB_SYS_ENABLED; +} + +/* called when the stub detaches */ +void gdb_disable_syscalls(void) +{ + gdb_syscall_mode = GDB_SYS_DISABLED; +} + +void gdb_syscall_reset(void) +{ + gdbserver_syscall_state.current_syscall_cb = NULL; +} + +bool gdb_handled_syscall(void) +{ + if (gdbserver_syscall_state.current_syscall_cb) { + gdb_put_packet(gdbserver_syscall_state.syscall_buf); + return true; + } + + return false; +} + +/* + * Send a gdb syscall request. + * This accepts limited printf-style format specifiers, specifically: + * %x - target_ulong argument printed in hex. + * %lx - 64-bit argument printed in hex. + * %s - string pointer (target_ulong) and length (int) pair. + */ +void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...) +{ + char *p, *p_end; + va_list va; + + if (!gdb_attached()) { + return; + } + + gdbserver_syscall_state.current_syscall_cb = cb; + va_start(va, fmt); + + p = gdbserver_syscall_state.syscall_buf; + p_end = p + sizeof(gdbserver_syscall_state.syscall_buf); + *(p++) = 'F'; + while (*fmt) { + if (*fmt == '%') { + uint64_t i64; + uint32_t i32; + + fmt++; + switch (*fmt++) { + case 'x': + i32 = va_arg(va, uint32_t); + p += snprintf(p, p_end - p, "%" PRIx32, i32); + break; + case 'l': + if (*(fmt++) != 'x') { + goto bad_format; + } + i64 = va_arg(va, uint64_t); + p += snprintf(p, p_end - p, "%" PRIx64, i64); + break; + case 's': + i64 = va_arg(va, uint64_t); + i32 = va_arg(va, uint32_t); + p += snprintf(p, p_end - p, "%" PRIx64 "/%x" PRIx32, i64, i32); + break; + default: + bad_format: + error_report("gdbstub: Bad syscall format string '%s'", + fmt - 1); + break; + } + } else { + *(p++) = *(fmt++); + } + } + *p = 0; + + va_end(va); + gdb_syscall_handling(gdbserver_syscall_state.syscall_buf); +} + +/* + * GDB Command Handlers + */ + +void gdb_handle_file_io(GArray *params, void *user_ctx) +{ + if (params->len >= 1 && gdbserver_syscall_state.current_syscall_cb) { + uint64_t ret; + int err; + + ret = get_param(params, 0)->val_ull; + if (params->len >= 2) { + err = get_param(params, 1)->val_ull; + } else { + err = 0; + } + + /* Convert GDB error numbers back to host error numbers. */ +#define E(X) case GDB_E##X: err = E##X; break + switch (err) { + case 0: + break; + E(PERM); + E(NOENT); + E(INTR); + E(BADF); + E(ACCES); + E(FAULT); + E(BUSY); + E(EXIST); + E(NODEV); + E(NOTDIR); + E(ISDIR); + E(INVAL); + E(NFILE); + E(MFILE); + E(FBIG); + E(NOSPC); + E(SPIPE); + E(ROFS); + E(NAMETOOLONG); + default: + err = EINVAL; + break; + } +#undef E + + gdbserver_syscall_state.current_syscall_cb(gdbserver_state.c_cpu, + ret, err); + gdbserver_syscall_state.current_syscall_cb = NULL; + } + + if (params->len >= 3 && get_param(params, 2)->opcode == (uint8_t)'C') { + gdb_put_packet("T02"); + return; + } + + gdb_continue(); +} diff --git a/gdbstub/trace-events b/gdbstub/trace-events index 03f0c303bf..0c18a4d70a 100644 --- a/gdbstub/trace-events +++ b/gdbstub/trace-events @@ -7,7 +7,6 @@ gdbstub_op_continue(void) "Continuing all CPUs" gdbstub_op_continue_cpu(int cpu_index) "Continuing CPU %d" gdbstub_op_stepping(int cpu_index) "Stepping CPU %d" gdbstub_op_extra_info(const char *info) "Thread extra info: %s" -gdbstub_hit_watchpoint(const char *type, int cpu_gdb_index, uint64_t vaddr) "Watchpoint hit, type=\"%s\" cpu=%d, vaddr=0x%" PRIx64 "" gdbstub_hit_internal_error(void) "RUN_STATE_INTERNAL_ERROR" gdbstub_hit_break(void) "RUN_STATE_DEBUG" gdbstub_hit_paused(void) "RUN_STATE_PAUSED" @@ -27,3 +26,6 @@ gdbstub_err_invalid_repeat(uint8_t ch) "got invalid RLE count: 0x%02x" gdbstub_err_invalid_rle(void) "got invalid RLE sequence" gdbstub_err_checksum_invalid(uint8_t ch) "got invalid command checksum digit: 0x%02x" gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packet with incorrect checksum, expected=0x%02x, received=0x%02x" + +# softmmu.c +gdbstub_hit_watchpoint(const char *type, int cpu_gdb_index, uint64_t vaddr) "Watchpoint hit, type=\"%s\" cpu=%d, vaddr=0x%" PRIx64 "" diff --git a/gdbstub/user-target.c b/gdbstub/user-target.c new file mode 100644 index 0000000000..3c659937c3 --- /dev/null +++ b/gdbstub/user-target.c @@ -0,0 +1,323 @@ +/* + * Target specific user-mode handling + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2022 Linaro Ltd + * + * SPDX-License-Identifier: LGPL-2.0+ + */ + +#include "qemu/osdep.h" +#include "exec/gdbstub.h" +#include "qemu.h" +#include "internals.h" + +/* + * Map target signal numbers to GDB protocol signal numbers and vice + * versa. For user emulation's currently supported systems, we can + * assume most signals are defined. + */ + +static int gdb_signal_table[] = { + 0, + TARGET_SIGHUP, + TARGET_SIGINT, + TARGET_SIGQUIT, + TARGET_SIGILL, + TARGET_SIGTRAP, + TARGET_SIGABRT, + -1, /* SIGEMT */ + TARGET_SIGFPE, + TARGET_SIGKILL, + TARGET_SIGBUS, + TARGET_SIGSEGV, + TARGET_SIGSYS, + TARGET_SIGPIPE, + TARGET_SIGALRM, + TARGET_SIGTERM, + TARGET_SIGURG, + TARGET_SIGSTOP, + TARGET_SIGTSTP, + TARGET_SIGCONT, + TARGET_SIGCHLD, + TARGET_SIGTTIN, + TARGET_SIGTTOU, + TARGET_SIGIO, + TARGET_SIGXCPU, + TARGET_SIGXFSZ, + TARGET_SIGVTALRM, + TARGET_SIGPROF, + TARGET_SIGWINCH, + -1, /* SIGLOST */ + TARGET_SIGUSR1, + TARGET_SIGUSR2, +#ifdef TARGET_SIGPWR + TARGET_SIGPWR, +#else + -1, +#endif + -1, /* SIGPOLL */ + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, +#ifdef __SIGRTMIN + __SIGRTMIN + 1, + __SIGRTMIN + 2, + __SIGRTMIN + 3, + __SIGRTMIN + 4, + __SIGRTMIN + 5, + __SIGRTMIN + 6, + __SIGRTMIN + 7, + __SIGRTMIN + 8, + __SIGRTMIN + 9, + __SIGRTMIN + 10, + __SIGRTMIN + 11, + __SIGRTMIN + 12, + __SIGRTMIN + 13, + __SIGRTMIN + 14, + __SIGRTMIN + 15, + __SIGRTMIN + 16, + __SIGRTMIN + 17, + __SIGRTMIN + 18, + __SIGRTMIN + 19, + __SIGRTMIN + 20, + __SIGRTMIN + 21, + __SIGRTMIN + 22, + __SIGRTMIN + 23, + __SIGRTMIN + 24, + __SIGRTMIN + 25, + __SIGRTMIN + 26, + __SIGRTMIN + 27, + __SIGRTMIN + 28, + __SIGRTMIN + 29, + __SIGRTMIN + 30, + __SIGRTMIN + 31, + -1, /* SIGCANCEL */ + __SIGRTMIN, + __SIGRTMIN + 32, + __SIGRTMIN + 33, + __SIGRTMIN + 34, + __SIGRTMIN + 35, + __SIGRTMIN + 36, + __SIGRTMIN + 37, + __SIGRTMIN + 38, + __SIGRTMIN + 39, + __SIGRTMIN + 40, + __SIGRTMIN + 41, + __SIGRTMIN + 42, + __SIGRTMIN + 43, + __SIGRTMIN + 44, + __SIGRTMIN + 45, + __SIGRTMIN + 46, + __SIGRTMIN + 47, + __SIGRTMIN + 48, + __SIGRTMIN + 49, + __SIGRTMIN + 50, + __SIGRTMIN + 51, + __SIGRTMIN + 52, + __SIGRTMIN + 53, + __SIGRTMIN + 54, + __SIGRTMIN + 55, + __SIGRTMIN + 56, + __SIGRTMIN + 57, + __SIGRTMIN + 58, + __SIGRTMIN + 59, + __SIGRTMIN + 60, + __SIGRTMIN + 61, + __SIGRTMIN + 62, + __SIGRTMIN + 63, + __SIGRTMIN + 64, + __SIGRTMIN + 65, + __SIGRTMIN + 66, + __SIGRTMIN + 67, + __SIGRTMIN + 68, + __SIGRTMIN + 69, + __SIGRTMIN + 70, + __SIGRTMIN + 71, + __SIGRTMIN + 72, + __SIGRTMIN + 73, + __SIGRTMIN + 74, + __SIGRTMIN + 75, + __SIGRTMIN + 76, + __SIGRTMIN + 77, + __SIGRTMIN + 78, + __SIGRTMIN + 79, + __SIGRTMIN + 80, + __SIGRTMIN + 81, + __SIGRTMIN + 82, + __SIGRTMIN + 83, + __SIGRTMIN + 84, + __SIGRTMIN + 85, + __SIGRTMIN + 86, + __SIGRTMIN + 87, + __SIGRTMIN + 88, + __SIGRTMIN + 89, + __SIGRTMIN + 90, + __SIGRTMIN + 91, + __SIGRTMIN + 92, + __SIGRTMIN + 93, + __SIGRTMIN + 94, + __SIGRTMIN + 95, + -1, /* SIGINFO */ + -1, /* UNKNOWN */ + -1, /* DEFAULT */ + -1, + -1, + -1, + -1, + -1, + -1 +#endif +}; + +int gdb_signal_to_target(int sig) +{ + if (sig < ARRAY_SIZE(gdb_signal_table)) { + return gdb_signal_table[sig]; + } else { + return -1; + } +} + +int gdb_target_signal_to_gdb(int sig) +{ + int i; + for (i = 0; i < ARRAY_SIZE(gdb_signal_table); i++) { + if (gdb_signal_table[i] == sig) { + return i; + } + } + return GDB_SIGNAL_UNKNOWN; +} + +int gdb_get_cpu_index(CPUState *cpu) +{ + TaskState *ts = (TaskState *) cpu->opaque; + return ts ? ts->ts_tid : -1; +} + +/* + * User-mode specific command helpers + */ + +void gdb_handle_query_offsets(GArray *params, void *user_ctx) +{ + TaskState *ts; + + ts = gdbserver_state.c_cpu->opaque; + g_string_printf(gdbserver_state.str_buf, + "Text=" TARGET_ABI_FMT_lx + ";Data=" TARGET_ABI_FMT_lx + ";Bss=" TARGET_ABI_FMT_lx, + ts->info->code_offset, + ts->info->data_offset, + ts->info->data_offset); + gdb_put_strbuf(); +} + +#if defined(CONFIG_LINUX) +/* Partial user only duplicate of helper in gdbstub.c */ +static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr, + uint8_t *buf, int len, bool is_write) +{ + CPUClass *cc; + cc = CPU_GET_CLASS(cpu); + if (cc->memory_rw_debug) { + return cc->memory_rw_debug(cpu, addr, buf, len, is_write); + } + return cpu_memory_rw_debug(cpu, addr, buf, len, is_write); +} + +void gdb_handle_query_xfer_auxv(GArray *params, void *user_ctx) +{ + TaskState *ts; + unsigned long offset, len, saved_auxv, auxv_len; + + if (params->len < 2) { + gdb_put_packet("E22"); + return; + } + + offset = get_param(params, 0)->val_ul; + len = get_param(params, 1)->val_ul; + ts = gdbserver_state.c_cpu->opaque; + saved_auxv = ts->info->saved_auxv; + auxv_len = ts->info->auxv_len; + + if (offset >= auxv_len) { + gdb_put_packet("E00"); + return; + } + + if (len > (MAX_PACKET_LENGTH - 5) / 2) { + len = (MAX_PACKET_LENGTH - 5) / 2; + } + + if (len < auxv_len - offset) { + g_string_assign(gdbserver_state.str_buf, "m"); + } else { + g_string_assign(gdbserver_state.str_buf, "l"); + len = auxv_len - offset; + } + + g_byte_array_set_size(gdbserver_state.mem_buf, len); + if (target_memory_rw_debug(gdbserver_state.g_cpu, saved_auxv + offset, + gdbserver_state.mem_buf->data, len, false)) { + gdb_put_packet("E14"); + return; + } + + gdb_memtox(gdbserver_state.str_buf, + (const char *)gdbserver_state.mem_buf->data, len); + gdb_put_packet_binary(gdbserver_state.str_buf->str, + gdbserver_state.str_buf->len, true); +} + +//// --- Begin LibAFL code --- + +void gdb_handle_query_rcmd(GArray *params, void *user_ctx) +{ + int len; + + if (!params->len) { + gdb_put_packet("E22"); + return; + } + + len = strlen(get_param(params, 0)->data); + if (len % 2) { + gdb_put_packet("E01"); + return; + } + + g_assert(gdbserver_state.mem_buf->len == 0); + len = len / 2; + gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len); + + //// --- Begin LibAFL code --- + + struct libafl_custom_gdb_cmd** c = &libafl_qemu_gdb_cmds; + int recognized = 0; + while (*c) { + recognized |= (*c)->callback(gdbserver_state.mem_buf->data, gdbserver_state.mem_buf->len, (*c)->data); + c = &(*c)->next; + } + + if (recognized) { + gdb_put_packet("OK"); + } else { + gdb_put_packet(""); + } +} + +//// --- End LibAFL code --- + +#endif diff --git a/gdbstub/user.c b/gdbstub/user.c index 033e5fdd71..5b375be1d9 100644 --- a/gdbstub/user.c +++ b/gdbstub/user.c @@ -3,24 +3,438 @@ * * We know for user-mode we are using TCG so we can call stuff directly. * + * Copyright (c) 2003-2005 Fabrice Bellard * Copyright (c) 2022 Linaro Ltd * - * SPDX-License-Identifier: GPL-2.0-or-later + * SPDX-License-Identifier: LGPL-2.0+ */ #include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qemu/sockets.h" #include "exec/hwaddr.h" +#include "exec/tb-flush.h" #include "exec/gdbstub.h" +#include "gdbstub/syscalls.h" +#include "gdbstub/user.h" #include "hw/core/cpu.h" +#include "trace.h" #include "internals.h" +/* User-mode specific state */ +typedef struct { + int fd; + char *socket_path; + int running_state; +} GDBUserState; + +static GDBUserState gdbserver_user_state; + +int gdb_get_char(void) +{ + uint8_t ch; + int ret; + + for (;;) { + ret = recv(gdbserver_user_state.fd, &ch, 1, 0); + if (ret < 0) { + if (errno == ECONNRESET) { + gdbserver_user_state.fd = -1; + } + if (errno != EINTR) { + return -1; + } + } else if (ret == 0) { + close(gdbserver_user_state.fd); + gdbserver_user_state.fd = -1; + return -1; + } else { + break; + } + } + return ch; +} + +bool gdb_got_immediate_ack(void) +{ + int i; + + i = gdb_get_char(); + if (i < 0) { + /* no response, continue anyway */ + return true; + } + + if (i == '+') { + /* received correctly, continue */ + return true; + } + + /* anything else, including '-' then try again */ + return false; +} + +void gdb_put_buffer(const uint8_t *buf, int len) +{ + int ret; + + while (len > 0) { + ret = send(gdbserver_user_state.fd, buf, len, 0); + if (ret < 0) { + if (errno != EINTR) { + return; + } + } else { + buf += ret; + len -= ret; + } + } +} + +/* Tell the remote gdb that the process has exited. */ +void gdb_exit(int code) +{ + char buf[4]; + + if (!gdbserver_state.init) { + return; + } + if (gdbserver_user_state.socket_path) { + unlink(gdbserver_user_state.socket_path); + } + if (gdbserver_user_state.fd < 0) { + return; + } + + trace_gdbstub_op_exiting((uint8_t)code); + + if (gdbserver_state.allow_stop_reply) { + snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code); + gdb_put_packet(buf); + gdbserver_state.allow_stop_reply = false; + } +} + +int gdb_handlesig(CPUState *cpu, int sig) +{ + char buf[256]; + int n; + + if (!gdbserver_state.init || gdbserver_user_state.fd < 0) { + return sig; + } + + /* disable single step if it was enabled */ + cpu_single_step(cpu, 0); + tb_flush(cpu); + + if (sig != 0) { + gdb_set_stop_cpu(cpu); + if (gdbserver_state.allow_stop_reply) { + g_string_printf(gdbserver_state.str_buf, + "T%02xthread:", gdb_target_signal_to_gdb(sig)); + gdb_append_thread_id(cpu, gdbserver_state.str_buf); + g_string_append_c(gdbserver_state.str_buf, ';'); + gdb_put_strbuf(); + gdbserver_state.allow_stop_reply = false; + } + } + /* + * gdb_put_packet() might have detected that the peer terminated the + * connection. + */ + if (gdbserver_user_state.fd < 0) { + return sig; + } + + sig = 0; + gdbserver_state.state = RS_IDLE; + gdbserver_user_state.running_state = 0; + while (gdbserver_user_state.running_state == 0) { + n = read(gdbserver_user_state.fd, buf, 256); + if (n > 0) { + int i; + + for (i = 0; i < n; i++) { + gdb_read_byte(buf[i]); + } + } else { + /* + * XXX: Connection closed. Should probably wait for another + * connection before continuing. + */ + if (n == 0) { + close(gdbserver_user_state.fd); + } + gdbserver_user_state.fd = -1; + return sig; + } + } + sig = gdbserver_state.signal; + gdbserver_state.signal = 0; + return sig; +} + +/* Tell the remote gdb that the process has exited due to SIG. */ +void gdb_signalled(CPUArchState *env, int sig) +{ + char buf[4]; + + if (!gdbserver_state.init || gdbserver_user_state.fd < 0 || + !gdbserver_state.allow_stop_reply) { + return; + } + + snprintf(buf, sizeof(buf), "X%02x", gdb_target_signal_to_gdb(sig)); + gdb_put_packet(buf); + gdbserver_state.allow_stop_reply = false; +} + +static void gdb_accept_init(int fd) +{ + gdb_init_gdbserver_state(); + gdb_create_default_process(&gdbserver_state); + gdbserver_state.processes[0].attached = true; + gdbserver_state.c_cpu = gdb_first_attached_cpu(); + gdbserver_state.g_cpu = gdbserver_state.c_cpu; + gdbserver_user_state.fd = fd; + gdb_has_xml = false; +} + +static bool gdb_accept_socket(int gdb_fd) +{ + int fd; + + for (;;) { + fd = accept(gdb_fd, NULL, NULL); + if (fd < 0 && errno != EINTR) { + perror("accept socket"); + return false; + } else if (fd >= 0) { + qemu_set_cloexec(fd); + break; + } + } + + gdb_accept_init(fd); + return true; +} + +static int gdbserver_open_socket(const char *path) +{ + struct sockaddr_un sockaddr = {}; + int fd, ret; + + fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (fd < 0) { + perror("create socket"); + return -1; + } + + sockaddr.sun_family = AF_UNIX; + pstrcpy(sockaddr.sun_path, sizeof(sockaddr.sun_path) - 1, path); + ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)); + if (ret < 0) { + perror("bind socket"); + close(fd); + return -1; + } + ret = listen(fd, 1); + if (ret < 0) { + perror("listen socket"); + close(fd); + return -1; + } + + return fd; +} + +static bool gdb_accept_tcp(int gdb_fd) +{ + struct sockaddr_in sockaddr = {}; + socklen_t len; + int fd; + + for (;;) { + len = sizeof(sockaddr); + fd = accept(gdb_fd, (struct sockaddr *)&sockaddr, &len); + if (fd < 0 && errno != EINTR) { + perror("accept"); + return false; + } else if (fd >= 0) { + qemu_set_cloexec(fd); + break; + } + } + + /* set short latency */ + if (socket_set_nodelay(fd)) { + perror("setsockopt"); + close(fd); + return false; + } + + gdb_accept_init(fd); + return true; +} + +static int gdbserver_open_port(int port) +{ + struct sockaddr_in sockaddr; + int fd, ret; + + fd = socket(PF_INET, SOCK_STREAM, 0); + if (fd < 0) { + perror("socket"); + return -1; + } + qemu_set_cloexec(fd); + + socket_set_fast_reuse(fd); + + sockaddr.sin_family = AF_INET; + sockaddr.sin_port = htons(port); + sockaddr.sin_addr.s_addr = 0; + ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)); + if (ret < 0) { + perror("bind"); + close(fd); + return -1; + } + ret = listen(fd, 1); + if (ret < 0) { + perror("listen"); + close(fd); + return -1; + } + + return fd; +} + +int gdbserver_start(const char *port_or_path) +{ + int port = g_ascii_strtoull(port_or_path, NULL, 10); + int gdb_fd; + + if (port > 0) { + gdb_fd = gdbserver_open_port(port); + } else { + gdb_fd = gdbserver_open_socket(port_or_path); + } + + if (gdb_fd < 0) { + return -1; + } + + if (port > 0 && gdb_accept_tcp(gdb_fd)) { + return 0; + } else if (gdb_accept_socket(gdb_fd)) { + gdbserver_user_state.socket_path = g_strdup(port_or_path); + return 0; + } + + /* gone wrong */ + close(gdb_fd); + return -1; +} + +/* Disable gdb stub for child processes. */ +void gdbserver_fork(CPUState *cpu) +{ + if (!gdbserver_state.init || gdbserver_user_state.fd < 0) { + return; + } + close(gdbserver_user_state.fd); + gdbserver_user_state.fd = -1; + cpu_breakpoint_remove_all(cpu, BP_GDB); + /* no cpu_watchpoint_remove_all for user-mode */ +} + +/* + * Execution state helpers + */ + +void gdb_handle_query_attached(GArray *params, void *user_ctx) +{ + gdb_put_packet("0"); +} + +void gdb_continue(void) +{ + gdbserver_user_state.running_state = 1; + trace_gdbstub_op_continue(); +} + +/* + * Resume execution, for user-mode emulation it's equivalent to + * gdb_continue. + */ +int gdb_continue_partial(char *newstates) +{ + CPUState *cpu; + int res = 0; + /* + * This is not exactly accurate, but it's an improvement compared to the + * previous situation, where only one CPU would be single-stepped. + */ + CPU_FOREACH(cpu) { + if (newstates[cpu->cpu_index] == 's') { + trace_gdbstub_op_stepping(cpu->cpu_index); + cpu_single_step(cpu, gdbserver_state.sstep_flags); + } + } + gdbserver_user_state.running_state = 1; + return res; +} + +/* + * Memory access helpers + */ +int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr, + uint8_t *buf, int len, bool is_write) +{ + CPUClass *cc; + + cc = CPU_GET_CLASS(cpu); + if (cc->memory_rw_debug) { + return cc->memory_rw_debug(cpu, addr, buf, len, is_write); + } + return cpu_memory_rw_debug(cpu, addr, buf, len, is_write); +} + +/* + * cpu helpers + */ + +unsigned int gdb_get_max_cpus(void) +{ + CPUState *cpu; + unsigned int max_cpus = 1; + + CPU_FOREACH(cpu) { + max_cpus = max_cpus <= cpu->cpu_index ? cpu->cpu_index + 1 : max_cpus; + } + + return max_cpus; +} + +/* replay not supported for user-mode */ +bool gdb_can_reverse(void) +{ + return false; +} + +/* + * Break/Watch point helpers + */ + bool gdb_supports_guest_debug(void) { /* user-mode == TCG == supported */ return true; } -int gdb_breakpoint_insert(CPUState *cs, int type, hwaddr addr, hwaddr len) +int gdb_breakpoint_insert(CPUState *cs, int type, vaddr addr, vaddr len) { CPUState *cpu; int err = 0; @@ -41,7 +455,7 @@ int gdb_breakpoint_insert(CPUState *cs, int type, hwaddr addr, hwaddr len) } } -int gdb_breakpoint_remove(CPUState *cs, int type, hwaddr addr, hwaddr len) +int gdb_breakpoint_remove(CPUState *cs, int type, vaddr addr, vaddr len) { CPUState *cpu; int err = 0; @@ -66,3 +480,17 @@ void gdb_breakpoint_remove_all(CPUState *cs) { cpu_breakpoint_remove_all(cs, BP_GDB); } + +/* + * For user-mode syscall support we send the system call immediately + * and then return control to gdb for it to process the syscall request. + * Since the protocol requires that gdb hands control back to us + * using a "here are the results" F packet, we don't need to check + * gdb_handlesig's return value (which is the signal to deliver if + * execution was resumed via a continue packet). + */ +void gdb_syscall_handling(const char *syscall_packet) +{ + gdb_put_packet(syscall_packet); + gdb_handlesig(gdbserver_state.c_cpu, 0); +} diff --git a/gitdm.config b/gitdm.config index 288b100d89..9db43ca142 100644 --- a/gitdm.config +++ b/gitdm.config @@ -31,8 +31,11 @@ EmailMap contrib/gitdm/domain-map # identifiable corporate emails. Please keep this list sorted. # +GroupMap contrib/gitdm/group-map-alibaba Alibaba +GroupMap contrib/gitdm/group-map-amd AMD GroupMap contrib/gitdm/group-map-cadence Cadence Design Systems GroupMap contrib/gitdm/group-map-codeweavers CodeWeavers +GroupMap contrib/gitdm/group-map-facebook Facebook GroupMap contrib/gitdm/group-map-ibm IBM GroupMap contrib/gitdm/group-map-janustech Janus Technologies GroupMap contrib/gitdm/group-map-netflix Netflix diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index 754b1e8408..47d63d26db 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -993,3 +993,17 @@ SRST ``info virtio-queue-element`` *path* *queue* [*index*] Display element of a given virtio queue ERST + + { + .name = "cryptodev", + .args_type = "", + .params = "", + .help = "show the crypto devices", + .cmd = hmp_info_cryptodev, + .flags = "p", + }, + +SRST + ``info cryptodev`` + Show the crypto devices. +ERST diff --git a/hmp-commands.hx b/hmp-commands.hx index fbb5daf09b..2cbd0f77a0 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -378,18 +378,35 @@ SRST only *tag* as parameter. ERST + { + .name = "one-insn-per-tb", + .args_type = "option:s?", + .params = "[on|off]", + .help = "run emulation with one guest instruction per translation block", + .cmd = hmp_one_insn_per_tb, + }, + +SRST +``one-insn-per-tb [off]`` + Run the emulation with one guest instruction per translation block. + This slows down emulation a lot, but can be useful in some situations, + such as when trying to analyse the logs produced by the ``-d`` option. + This only has an effect when using TCG, not with KVM or other accelerators. + + If called with option off, the emulation returns to normal mode. +ERST + { .name = "singlestep", .args_type = "option:s?", .params = "[on|off]", - .help = "run emulation in singlestep mode or switch to normal mode", - .cmd = hmp_singlestep, + .help = "deprecated synonym for one-insn-per-tb", + .cmd = hmp_one_insn_per_tb, }, SRST ``singlestep [off]`` - Run the emulation in single step mode. - If called with option off, the emulation returns to normal mode. + This is a deprecated synonym for the one-insn-per-tb command. ERST { @@ -1035,6 +1052,7 @@ SRST migration (or once already in postcopy). ERST +#ifdef CONFIG_REPLICATION { .name = "x_colo_lost_heartbeat", .args_type = "", @@ -1043,6 +1061,7 @@ ERST "a failover or takeover is needed.", .cmd = hmp_x_colo_lost_heartbeat, }, +#endif SRST ``x_colo_lost_heartbeat`` @@ -1486,6 +1505,7 @@ SRST Inject an MCE on the given CPU (x86 only). ERST +#ifdef CONFIG_POSIX { .name = "getfd", .args_type = "fdname:s", @@ -1501,6 +1521,7 @@ SRST mechanism on unix sockets, it is stored using the name *fdname* for later use by other monitor commands. ERST +#endif { .name = "closefd", @@ -1815,3 +1836,32 @@ SRST Dump the FDT in dtb format to *filename*. ERST #endif + +#if defined(CONFIG_XEN_EMU) + { + .name = "xen-event-inject", + .args_type = "port:i", + .params = "port", + .help = "inject event channel", + .cmd = hmp_xen_event_inject, + }, + +SRST +``xen-event-inject`` *port* + Notify guest via event channel on port *port*. +ERST + + + { + .name = "xen-event-list", + .args_type = "", + .params = "", + .help = "list event channel state", + .cmd = hmp_xen_event_list, + }, + +SRST +``xen-event-list`` + List event channels in the guest +ERST +#endif diff --git a/host/include/aarch64/host/atomic128-cas.h b/host/include/aarch64/host/atomic128-cas.h new file mode 100644 index 0000000000..58630107bc --- /dev/null +++ b/host/include/aarch64/host/atomic128-cas.h @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Compare-and-swap for 128-bit atomic operations, AArch64 version. + * + * Copyright (C) 2018, 2023 Linaro, Ltd. + * + * See docs/devel/atomics.rst for discussion about the guarantees each + * atomic primitive is meant to provide. + */ + +#ifndef AARCH64_ATOMIC128_CAS_H +#define AARCH64_ATOMIC128_CAS_H + +/* Through gcc 10, aarch64 has no support for 128-bit atomics. */ +#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) +#include "host/include/generic/host/atomic128-cas.h" +#else +static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +{ + uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp); + uint64_t newl = int128_getlo(new), newh = int128_gethi(new); + uint64_t oldl, oldh; + uint32_t tmp; + + asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t" + "cmp %[oldl], %[cmpl]\n\t" + "ccmp %[oldh], %[cmph], #0, eq\n\t" + "b.ne 1f\n\t" + "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t" + "cbnz %w[tmp], 0b\n" + "1:" + : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), + [oldl] "=&r"(oldl), [oldh] "=&r"(oldh) + : [cmpl] "r"(cmpl), [cmph] "r"(cmph), + [newl] "r"(newl), [newh] "r"(newh) + : "memory", "cc"); + + return int128_make128(oldl, oldh); +} + +# define CONFIG_CMPXCHG128 1 +# define HAVE_CMPXCHG128 1 +#endif + +#endif /* AARCH64_ATOMIC128_CAS_H */ diff --git a/host/include/aarch64/host/atomic128-ldst.h b/host/include/aarch64/host/atomic128-ldst.h new file mode 100644 index 0000000000..a08f62c40a --- /dev/null +++ b/host/include/aarch64/host/atomic128-ldst.h @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Load/store for 128-bit atomic operations, AArch64 version. + * + * Copyright (C) 2018, 2023 Linaro, Ltd. + * + * See docs/devel/atomics.rst for discussion about the guarantees each + * atomic primitive is meant to provide. + */ + +#ifndef AARCH64_ATOMIC128_LDST_H +#define AARCH64_ATOMIC128_LDST_H + +#include "host/cpuinfo.h" +#include "tcg/debug-assert.h" + +/* + * Through gcc 10, aarch64 has no support for 128-bit atomics. + * Through clang 16, without -march=armv8.4-a, __atomic_load_16 + * is incorrectly expanded to a read-write operation. + * + * Anyway, this method allows runtime detection of FEAT_LSE2. + */ + +#define HAVE_ATOMIC128_RO (cpuinfo & CPUINFO_LSE2) +#define HAVE_ATOMIC128_RW 1 + +static inline Int128 atomic16_read_ro(const Int128 *ptr) +{ + uint64_t l, h; + + tcg_debug_assert(HAVE_ATOMIC128_RO); + /* With FEAT_LSE2, 16-byte aligned LDP is atomic. */ + asm("ldp %[l], %[h], %[mem]" + : [l] "=r"(l), [h] "=r"(h) : [mem] "m"(*ptr)); + + return int128_make128(l, h); +} + +static inline Int128 atomic16_read_rw(Int128 *ptr) +{ + uint64_t l, h; + uint32_t tmp; + + if (cpuinfo & CPUINFO_LSE2) { + /* With FEAT_LSE2, 16-byte aligned LDP is atomic. */ + asm("ldp %[l], %[h], %[mem]" + : [l] "=r"(l), [h] "=r"(h) : [mem] "m"(*ptr)); + } else { + /* The load must be paired with the store to guarantee not tearing. */ + asm("0: ldxp %[l], %[h], %[mem]\n\t" + "stxp %w[tmp], %[l], %[h], %[mem]\n\t" + "cbnz %w[tmp], 0b" + : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), [l] "=&r"(l), [h] "=&r"(h)); + } + + return int128_make128(l, h); +} + +static inline void atomic16_set(Int128 *ptr, Int128 val) +{ + uint64_t l = int128_getlo(val), h = int128_gethi(val); + uint64_t t1, t2; + + if (cpuinfo & CPUINFO_LSE2) { + /* With FEAT_LSE2, 16-byte aligned STP is atomic. */ + asm("stp %[l], %[h], %[mem]" + : [mem] "=m"(*ptr) : [l] "r"(l), [h] "r"(h)); + } else { + /* Load into temporaries to acquire the exclusive access lock. */ + asm("0: ldxp %[t1], %[t2], %[mem]\n\t" + "stxp %w[t1], %[l], %[h], %[mem]\n\t" + "cbnz %w[t1], 0b" + : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2) + : [l] "r"(l), [h] "r"(h)); + } +} + +#endif /* AARCH64_ATOMIC128_LDST_H */ diff --git a/host/include/aarch64/host/cpuinfo.h b/host/include/aarch64/host/cpuinfo.h new file mode 100644 index 0000000000..82227890b4 --- /dev/null +++ b/host/include/aarch64/host/cpuinfo.h @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Host specific cpu indentification for AArch64. + */ + +#ifndef HOST_CPUINFO_H +#define HOST_CPUINFO_H + +#define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */ +#define CPUINFO_LSE (1u << 1) +#define CPUINFO_LSE2 (1u << 2) + +/* Initialized with a constructor. */ +extern unsigned cpuinfo; + +/* + * We cannot rely on constructor ordering, so other constructors must + * use the function interface rather than the variable above. + */ +unsigned cpuinfo_init(void); + +#endif /* HOST_CPUINFO_H */ diff --git a/host/include/aarch64/host/load-extract-al16-al8.h b/host/include/aarch64/host/load-extract-al16-al8.h new file mode 100644 index 0000000000..bd677c5e26 --- /dev/null +++ b/host/include/aarch64/host/load-extract-al16-al8.h @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Atomic extract 64 from 128-bit, AArch64 version. + * + * Copyright (C) 2023 Linaro, Ltd. + */ + +#ifndef AARCH64_LOAD_EXTRACT_AL16_AL8_H +#define AARCH64_LOAD_EXTRACT_AL16_AL8_H + +#include "host/cpuinfo.h" +#include "tcg/debug-assert.h" + +/** + * load_atom_extract_al16_or_al8: + * @pv: host address + * @s: object size in bytes, @s <= 8. + * + * Load @s bytes from @pv, when pv % s != 0. If [p, p+s-1] does not + * cross an 16-byte boundary then the access must be 16-byte atomic, + * otherwise the access must be 8-byte atomic. + */ +static inline uint64_t load_atom_extract_al16_or_al8(void *pv, int s) +{ + uintptr_t pi = (uintptr_t)pv; + __int128_t *ptr_align = (__int128_t *)(pi & ~7); + int shr = (pi & 7) * 8; + uint64_t l, h; + + /* + * With FEAT_LSE2, LDP is single-copy atomic if 16-byte aligned + * and single-copy atomic on the parts if 8-byte aligned. + * All we need do is align the pointer mod 8. + */ + tcg_debug_assert(HAVE_ATOMIC128_RO); + asm("ldp %0, %1, %2" : "=r"(l), "=r"(h) : "m"(*ptr_align)); + return (l >> shr) | (h << (-shr & 63)); +} + +#endif /* AARCH64_LOAD_EXTRACT_AL16_AL8_H */ diff --git a/host/include/aarch64/host/store-insert-al16.h b/host/include/aarch64/host/store-insert-al16.h new file mode 100644 index 0000000000..1943155bc6 --- /dev/null +++ b/host/include/aarch64/host/store-insert-al16.h @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Atomic store insert into 128-bit, AArch64 version. + * + * Copyright (C) 2023 Linaro, Ltd. + */ + +#ifndef AARCH64_STORE_INSERT_AL16_H +#define AARCH64_STORE_INSERT_AL16_H + +/** + * store_atom_insert_al16: + * @p: host address + * @val: shifted value to store + * @msk: mask for value to store + * + * Atomically store @val to @p masked by @msk. + */ +static inline void ATTRIBUTE_ATOMIC128_OPT +store_atom_insert_al16(Int128 *ps, Int128 val, Int128 msk) +{ + /* + * GCC only implements __sync* primitives for int128 on aarch64. + * We can do better without the barriers, and integrating the + * arithmetic into the load-exclusive/store-conditional pair. + */ + uint64_t tl, th, vl, vh, ml, mh; + uint32_t fail; + + qemu_build_assert(!HOST_BIG_ENDIAN); + vl = int128_getlo(val); + vh = int128_gethi(val); + ml = int128_getlo(msk); + mh = int128_gethi(msk); + + asm("0: ldxp %[l], %[h], %[mem]\n\t" + "bic %[l], %[l], %[ml]\n\t" + "bic %[h], %[h], %[mh]\n\t" + "orr %[l], %[l], %[vl]\n\t" + "orr %[h], %[h], %[vh]\n\t" + "stxp %w[f], %[l], %[h], %[mem]\n\t" + "cbnz %w[f], 0b\n" + : [mem] "+Q"(*ps), [f] "=&r"(fail), [l] "=&r"(tl), [h] "=&r"(th) + : [vl] "r"(vl), [vh] "r"(vh), [ml] "r"(ml), [mh] "r"(mh)); +} + +#endif /* AARCH64_STORE_INSERT_AL16_H */ diff --git a/host/include/generic/host/atomic128-cas.h b/host/include/generic/host/atomic128-cas.h new file mode 100644 index 0000000000..991d3da082 --- /dev/null +++ b/host/include/generic/host/atomic128-cas.h @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Compare-and-swap for 128-bit atomic operations, generic version. + * + * Copyright (C) 2018, 2023 Linaro, Ltd. + * + * See docs/devel/atomics.rst for discussion about the guarantees each + * atomic primitive is meant to provide. + */ + +#ifndef HOST_ATOMIC128_CAS_H +#define HOST_ATOMIC128_CAS_H + +#if defined(CONFIG_ATOMIC128) +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128Alias r, c, n; + + c.s = cmp; + n.s = new; + r.i = qatomic_cmpxchg__nocheck(ptr_align, c.i, n.i); + return r.s; +} +# define HAVE_CMPXCHG128 1 +#elif defined(CONFIG_CMPXCHG128) +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128Alias r, c, n; + + c.s = cmp; + n.s = new; + r.i = __sync_val_compare_and_swap_16(ptr_align, c.i, n.i); + return r.s; +} +# define HAVE_CMPXCHG128 1 +#else +/* Fallback definition that must be optimized away, or error. */ +Int128 QEMU_ERROR("unsupported atomic") + atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new); +# define HAVE_CMPXCHG128 0 +#endif + +#endif /* HOST_ATOMIC128_CAS_H */ diff --git a/host/include/generic/host/atomic128-ldst.h b/host/include/generic/host/atomic128-ldst.h new file mode 100644 index 0000000000..80fff0643a --- /dev/null +++ b/host/include/generic/host/atomic128-ldst.h @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Load/store for 128-bit atomic operations, generic version. + * + * Copyright (C) 2018, 2023 Linaro, Ltd. + * + * See docs/devel/atomics.rst for discussion about the guarantees each + * atomic primitive is meant to provide. + */ + +#ifndef HOST_ATOMIC128_LDST_H +#define HOST_ATOMIC128_LDST_H + +#if defined(CONFIG_ATOMIC128) +# define HAVE_ATOMIC128_RO 1 +# define HAVE_ATOMIC128_RW 1 + +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_read_ro(const Int128 *ptr) +{ + const __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128Alias r; + + r.i = qatomic_read__nocheck(ptr_align); + return r.s; +} + +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_read_rw(Int128 *ptr) +{ + return atomic16_read_ro(ptr); +} + +static inline void ATTRIBUTE_ATOMIC128_OPT +atomic16_set(Int128 *ptr, Int128 val) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128Alias v; + + v.s = val; + qatomic_set__nocheck(ptr_align, v.i); +} + +#elif defined(CONFIG_CMPXCHG128) +# define HAVE_ATOMIC128_RO 0 +# define HAVE_ATOMIC128_RW 1 + +Int128 QEMU_ERROR("unsupported atomic") atomic16_read_ro(const Int128 *ptr); + +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_read_rw(Int128 *ptr) +{ + /* Maybe replace 0 with 0, returning the old value. */ + Int128 z = int128_make64(0); + return atomic16_cmpxchg(ptr, z, z); +} + +static inline void ATTRIBUTE_ATOMIC128_OPT +atomic16_set(Int128 *ptr, Int128 val) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + __int128_t old; + Int128Alias new; + + new.s = val; + do { + old = *ptr_align; + } while (!__sync_bool_compare_and_swap_16(ptr_align, old, new.i)); +} + +#else +# define HAVE_ATOMIC128_RO 0 +# define HAVE_ATOMIC128_RW 0 + +/* Fallback definitions that must be optimized away, or error. */ +Int128 QEMU_ERROR("unsupported atomic") atomic16_read_ro(const Int128 *ptr); +Int128 QEMU_ERROR("unsupported atomic") atomic16_read_rw(Int128 *ptr); +void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val); +#endif + +#endif /* HOST_ATOMIC128_LDST_H */ diff --git a/host/include/generic/host/cpuinfo.h b/host/include/generic/host/cpuinfo.h new file mode 100644 index 0000000000..eca672064a --- /dev/null +++ b/host/include/generic/host/cpuinfo.h @@ -0,0 +1,4 @@ +/* + * No host specific cpu indentification. + * SPDX-License-Identifier: GPL-2.0-or-later + */ diff --git a/host/include/generic/host/load-extract-al16-al8.h b/host/include/generic/host/load-extract-al16-al8.h new file mode 100644 index 0000000000..d95556130f --- /dev/null +++ b/host/include/generic/host/load-extract-al16-al8.h @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Atomic extract 64 from 128-bit, generic version. + * + * Copyright (C) 2023 Linaro, Ltd. + */ + +#ifndef HOST_LOAD_EXTRACT_AL16_AL8_H +#define HOST_LOAD_EXTRACT_AL16_AL8_H + +/** + * load_atom_extract_al16_or_al8: + * @pv: host address + * @s: object size in bytes, @s <= 8. + * + * Load @s bytes from @pv, when pv % s != 0. If [p, p+s-1] does not + * cross an 16-byte boundary then the access must be 16-byte atomic, + * otherwise the access must be 8-byte atomic. + */ +static inline uint64_t ATTRIBUTE_ATOMIC128_OPT +load_atom_extract_al16_or_al8(void *pv, int s) +{ + uintptr_t pi = (uintptr_t)pv; + int o = pi & 7; + int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8; + Int128 r; + + pv = (void *)(pi & ~7); + if (pi & 8) { + uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8); + uint64_t a = qatomic_read__nocheck(p8); + uint64_t b = qatomic_read__nocheck(p8 + 1); + + if (HOST_BIG_ENDIAN) { + r = int128_make128(b, a); + } else { + r = int128_make128(a, b); + } + } else { + r = atomic16_read_ro(pv); + } + return int128_getlo(int128_urshift(r, shr)); +} + +#endif /* HOST_LOAD_EXTRACT_AL16_AL8_H */ diff --git a/host/include/generic/host/store-insert-al16.h b/host/include/generic/host/store-insert-al16.h new file mode 100644 index 0000000000..4a1662183d --- /dev/null +++ b/host/include/generic/host/store-insert-al16.h @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Atomic store insert into 128-bit, generic version. + * + * Copyright (C) 2023 Linaro, Ltd. + */ + +#ifndef HOST_STORE_INSERT_AL16_H +#define HOST_STORE_INSERT_AL16_H + +/** + * store_atom_insert_al16: + * @p: host address + * @val: shifted value to store + * @msk: mask for value to store + * + * Atomically store @val to @p masked by @msk. + */ +static inline void ATTRIBUTE_ATOMIC128_OPT +store_atom_insert_al16(Int128 *ps, Int128 val, Int128 msk) +{ +#if defined(CONFIG_ATOMIC128) + __uint128_t *pu; + Int128Alias old, new; + + /* With CONFIG_ATOMIC128, we can avoid the memory barriers. */ + pu = __builtin_assume_aligned(ps, 16); + old.u = *pu; + msk = int128_not(msk); + do { + new.s = int128_and(old.s, msk); + new.s = int128_or(new.s, val); + } while (!__atomic_compare_exchange_n(pu, &old.u, new.u, true, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)); +#else + Int128 old, new, cmp; + + ps = __builtin_assume_aligned(ps, 16); + old = *ps; + msk = int128_not(msk); + do { + cmp = old; + new = int128_and(old, msk); + new = int128_or(new, val); + old = atomic16_cmpxchg(ps, cmp, new); + } while (int128_ne(cmp, old)); +#endif +} + +#endif /* HOST_STORE_INSERT_AL16_H */ diff --git a/host/include/i386/host/cpuinfo.h b/host/include/i386/host/cpuinfo.h new file mode 100644 index 0000000000..a6537123cf --- /dev/null +++ b/host/include/i386/host/cpuinfo.h @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Host specific cpu indentification for x86. + */ + +#ifndef HOST_CPUINFO_H +#define HOST_CPUINFO_H + +/* Digested version of */ + +#define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */ +#define CPUINFO_CMOV (1u << 1) +#define CPUINFO_MOVBE (1u << 2) +#define CPUINFO_LZCNT (1u << 3) +#define CPUINFO_POPCNT (1u << 4) +#define CPUINFO_BMI1 (1u << 5) +#define CPUINFO_BMI2 (1u << 6) +#define CPUINFO_SSE2 (1u << 7) +#define CPUINFO_SSE4 (1u << 8) +#define CPUINFO_AVX1 (1u << 9) +#define CPUINFO_AVX2 (1u << 10) +#define CPUINFO_AVX512F (1u << 11) +#define CPUINFO_AVX512VL (1u << 12) +#define CPUINFO_AVX512BW (1u << 13) +#define CPUINFO_AVX512DQ (1u << 14) +#define CPUINFO_AVX512VBMI2 (1u << 15) +#define CPUINFO_ATOMIC_VMOVDQA (1u << 16) +#define CPUINFO_ATOMIC_VMOVDQU (1u << 17) + +/* Initialized with a constructor. */ +extern unsigned cpuinfo; + +/* + * We cannot rely on constructor ordering, so other constructors must + * use the function interface rather than the variable above. + */ +unsigned cpuinfo_init(void); + +#endif /* HOST_CPUINFO_H */ diff --git a/host/include/x86_64/host/atomic128-ldst.h b/host/include/x86_64/host/atomic128-ldst.h new file mode 100644 index 0000000000..adc9332f91 --- /dev/null +++ b/host/include/x86_64/host/atomic128-ldst.h @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Load/store for 128-bit atomic operations, x86_64 version. + * + * Copyright (C) 2023 Linaro, Ltd. + * + * See docs/devel/atomics.rst for discussion about the guarantees each + * atomic primitive is meant to provide. + */ + +#ifndef AARCH64_ATOMIC128_LDST_H +#define AARCH64_ATOMIC128_LDST_H + +#ifdef CONFIG_INT128_TYPE +#include "host/cpuinfo.h" +#include "tcg/debug-assert.h" + +/* + * Through clang 16, with -mcx16, __atomic_load_n is incorrectly + * expanded to a read-write operation: lock cmpxchg16b. + */ + +#define HAVE_ATOMIC128_RO likely(cpuinfo & CPUINFO_ATOMIC_VMOVDQA) +#define HAVE_ATOMIC128_RW 1 + +static inline Int128 atomic16_read_ro(const Int128 *ptr) +{ + Int128Alias r; + + tcg_debug_assert(HAVE_ATOMIC128_RO); + asm("vmovdqa %1, %0" : "=x" (r.i) : "m" (*ptr)); + + return r.s; +} + +static inline Int128 atomic16_read_rw(Int128 *ptr) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128Alias r; + + if (HAVE_ATOMIC128_RO) { + asm("vmovdqa %1, %0" : "=x" (r.i) : "m" (*ptr_align)); + } else { + r.i = __sync_val_compare_and_swap_16(ptr_align, 0, 0); + } + return r.s; +} + +static inline void atomic16_set(Int128 *ptr, Int128 val) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128Alias new = { .s = val }; + + if (HAVE_ATOMIC128_RO) { + asm("vmovdqa %1, %0" : "=m"(*ptr_align) : "x" (new.i)); + } else { + __int128_t old; + do { + old = *ptr_align; + } while (!__sync_bool_compare_and_swap_16(ptr_align, old, new.i)); + } +} +#else +/* Provide QEMU_ERROR stubs. */ +#include "host/include/generic/host/atomic128-ldst.h" +#endif + +#endif /* AARCH64_ATOMIC128_LDST_H */ diff --git a/host/include/x86_64/host/cpuinfo.h b/host/include/x86_64/host/cpuinfo.h new file mode 100644 index 0000000000..67debab9a0 --- /dev/null +++ b/host/include/x86_64/host/cpuinfo.h @@ -0,0 +1 @@ +#include "host/include/i386/host/cpuinfo.h" diff --git a/host/include/x86_64/host/load-extract-al16-al8.h b/host/include/x86_64/host/load-extract-al16-al8.h new file mode 100644 index 0000000000..31b6fe8c45 --- /dev/null +++ b/host/include/x86_64/host/load-extract-al16-al8.h @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Atomic extract 64 from 128-bit, x86_64 version. + * + * Copyright (C) 2023 Linaro, Ltd. + */ + +#ifndef X86_64_LOAD_EXTRACT_AL16_AL8_H +#define X86_64_LOAD_EXTRACT_AL16_AL8_H + +#ifdef CONFIG_INT128_TYPE +#include "host/cpuinfo.h" + +/** + * load_atom_extract_al16_or_al8: + * @pv: host address + * @s: object size in bytes, @s <= 8. + * + * Load @s bytes from @pv, when pv % s != 0. If [p, p+s-1] does not + * cross an 16-byte boundary then the access must be 16-byte atomic, + * otherwise the access must be 8-byte atomic. + */ +static inline uint64_t ATTRIBUTE_ATOMIC128_OPT +load_atom_extract_al16_or_al8(void *pv, int s) +{ + uintptr_t pi = (uintptr_t)pv; + __int128_t *ptr_align = (__int128_t *)(pi & ~7); + int shr = (pi & 7) * 8; + Int128Alias r; + + /* + * ptr_align % 16 is now only 0 or 8. + * If the host supports atomic loads with VMOVDQU, then always use that, + * making the branch highly predictable. Otherwise we must use VMOVDQA + * when ptr_align % 16 == 0 for 16-byte atomicity. + */ + if ((cpuinfo & CPUINFO_ATOMIC_VMOVDQU) || (pi & 8)) { + asm("vmovdqu %1, %0" : "=x" (r.i) : "m" (*ptr_align)); + } else { + asm("vmovdqa %1, %0" : "=x" (r.i) : "m" (*ptr_align)); + } + return int128_getlo(int128_urshift(r.s, shr)); +} +#else +/* Fallback definition that must be optimized away, or error. */ +uint64_t QEMU_ERROR("unsupported atomic") + load_atom_extract_al16_or_al8(void *pv, int s); +#endif + +#endif /* X86_64_LOAD_EXTRACT_AL16_AL8_H */ diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h index 2fce4140d1..1b0d805b9c 100644 --- a/hw/9pfs/9p.h +++ b/hw/9pfs/9p.h @@ -203,7 +203,7 @@ typedef struct V9fsDir { QemuMutex readdir_mutex_L; } V9fsDir; -static inline void v9fs_readdir_lock(V9fsDir *dir) +static inline void coroutine_fn v9fs_readdir_lock(V9fsDir *dir) { if (dir->proto_version == V9FS_PROTO_2000U) { qemu_co_mutex_lock(&dir->readdir_mutex_u); @@ -212,7 +212,7 @@ static inline void v9fs_readdir_lock(V9fsDir *dir) } } -static inline void v9fs_readdir_unlock(V9fsDir *dir) +static inline void coroutine_fn v9fs_readdir_unlock(V9fsDir *dir) { if (dir->proto_version == V9FS_PROTO_2000U) { qemu_co_mutex_unlock(&dir->readdir_mutex_u); diff --git a/hw/9pfs/codir.c b/hw/9pfs/codir.c index 7ba63be489..2068a4779d 100644 --- a/hw/9pfs/codir.c +++ b/hw/9pfs/codir.c @@ -68,9 +68,9 @@ int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp, * * See v9fs_co_readdir_many() (as its only user) below for details. */ -static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, - struct V9fsDirEnt **entries, off_t offset, - int32_t maxsize, bool dostat) +static int coroutine_fn +do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, struct V9fsDirEnt **entries, + off_t offset, int32_t maxsize, bool dostat) { V9fsState *s = pdu->s; V9fsString name; diff --git a/hw/9pfs/coth.c b/hw/9pfs/coth.c index 2802d41cce..598f46add9 100644 --- a/hw/9pfs/coth.c +++ b/hw/9pfs/coth.c @@ -41,6 +41,5 @@ static int coroutine_enter_func(void *arg) void co_run_in_worker_bh(void *opaque) { Coroutine *co = opaque; - thread_pool_submit_aio(aio_get_thread_pool(qemu_get_aio_context()), - coroutine_enter_func, co, coroutine_enter_cb, co); + thread_pool_submit_aio(coroutine_enter_func, co, coroutine_enter_cb, co); } diff --git a/hw/9pfs/meson.build b/hw/9pfs/meson.build index 12443b6ad5..fd37b7a02d 100644 --- a/hw/9pfs/meson.build +++ b/hw/9pfs/meson.build @@ -15,7 +15,7 @@ fs_ss.add(files( )) fs_ss.add(when: 'CONFIG_LINUX', if_true: files('9p-util-linux.c')) fs_ss.add(when: 'CONFIG_DARWIN', if_true: files('9p-util-darwin.c')) -fs_ss.add(when: 'CONFIG_XEN', if_true: files('xen-9p-backend.c')) +fs_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-9p-backend.c')) softmmu_ss.add_all(when: 'CONFIG_FSDEV_9P', if_true: fs_ss) specific_ss.add(when: 'CONFIG_VIRTIO_9P', if_true: files('virtio-9p-device.c')) diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events index 6c77966c0b..a12e55c165 100644 --- a/hw/9pfs/trace-events +++ b/hw/9pfs/trace-events @@ -48,3 +48,9 @@ v9fs_readlink(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d" v9fs_readlink_return(uint16_t tag, uint8_t id, char* target) "tag %d id %d name %s" v9fs_setattr(uint16_t tag, uint8_t id, int32_t fid, int32_t valid, int32_t mode, int32_t uid, int32_t gid, int64_t size, int64_t atime_sec, int64_t mtime_sec) "tag %u id %u fid %d iattr={valid %d mode %d uid %d gid %d size %"PRId64" atime=%"PRId64" mtime=%"PRId64" }" v9fs_setattr_return(uint16_t tag, uint8_t id) "tag %u id %u" + +# xen-9p-backend.c +xen_9pfs_alloc(char *name) "name %s" +xen_9pfs_connect(char *name) "name %s" +xen_9pfs_disconnect(char *name) "name %s" +xen_9pfs_free(char *name) "name %s" diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 65c4979c3c..4aa9c8c736 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -22,8 +22,11 @@ #include "qemu/config-file.h" #include "qemu/main-loop.h" #include "qemu/option.h" +#include "qemu/iov.h" #include "fsdev/qemu-fsdev.h" +#include "trace.h" + #define VERSIONS "1" #define MAX_RINGS 8 #define MAX_RING_ORDER 9 @@ -60,6 +63,7 @@ typedef struct Xen9pfsDev { int num_rings; Xen9pfsRing *rings; + MemReentrancyGuard mem_reentrancy_guard; } Xen9pfsDev; static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev); @@ -241,7 +245,7 @@ static void xen_9pfs_push_and_notify(V9fsPDU *pdu) xen_wmb(); ring->inprogress = false; - xenevtchn_notify(ring->evtchndev, ring->local_port); + qemu_xen_evtchn_notify(ring->evtchndev, ring->local_port); qemu_bh_schedule(ring->bh); } @@ -324,8 +328,8 @@ static void xen_9pfs_evtchn_event(void *opaque) Xen9pfsRing *ring = opaque; evtchn_port_t port; - port = xenevtchn_pending(ring->evtchndev); - xenevtchn_unmask(ring->evtchndev, port); + port = qemu_xen_evtchn_pending(ring->evtchndev); + qemu_xen_evtchn_unmask(ring->evtchndev, port); qemu_bh_schedule(ring->bh); } @@ -335,47 +339,51 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev) Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); int i; + trace_xen_9pfs_disconnect(xendev->name); + for (i = 0; i < xen_9pdev->num_rings; i++) { if (xen_9pdev->rings[i].evtchndev != NULL) { - qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), - NULL, NULL, NULL); - xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, - xen_9pdev->rings[i].local_port); + qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev), + NULL, NULL, NULL); + qemu_xen_evtchn_unbind(xen_9pdev->rings[i].evtchndev, + xen_9pdev->rings[i].local_port); xen_9pdev->rings[i].evtchndev = NULL; } - } -} - -static int xen_9pfs_free(struct XenLegacyDevice *xendev) -{ - Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); - int i; - - if (xen_9pdev->rings[0].evtchndev != NULL) { - xen_9pfs_disconnect(xendev); - } - - for (i = 0; i < xen_9pdev->num_rings; i++) { if (xen_9pdev->rings[i].data != NULL) { xen_be_unmap_grant_refs(&xen_9pdev->xendev, xen_9pdev->rings[i].data, + xen_9pdev->rings[i].intf->ref, (1 << xen_9pdev->rings[i].ring_order)); + xen_9pdev->rings[i].data = NULL; } if (xen_9pdev->rings[i].intf != NULL) { - xen_be_unmap_grant_refs(&xen_9pdev->xendev, - xen_9pdev->rings[i].intf, - 1); + xen_be_unmap_grant_ref(&xen_9pdev->xendev, + xen_9pdev->rings[i].intf, + xen_9pdev->rings[i].ref); + xen_9pdev->rings[i].intf = NULL; } if (xen_9pdev->rings[i].bh != NULL) { qemu_bh_delete(xen_9pdev->rings[i].bh); + xen_9pdev->rings[i].bh = NULL; } } g_free(xen_9pdev->id); + xen_9pdev->id = NULL; g_free(xen_9pdev->tag); + xen_9pdev->tag = NULL; g_free(xen_9pdev->path); + xen_9pdev->path = NULL; g_free(xen_9pdev->security_model); + xen_9pdev->security_model = NULL; g_free(xen_9pdev->rings); + xen_9pdev->rings = NULL; +} + +static int xen_9pfs_free(struct XenLegacyDevice *xendev) +{ + trace_xen_9pfs_free(xendev->name); + return 0; } @@ -387,6 +395,8 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev) V9fsState *s = &xen_9pdev->state; QemuOpts *fsdev; + trace_xen_9pfs_connect(xendev->name); + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings", &xen_9pdev->num_rings) == -1 || xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) { @@ -441,18 +451,20 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev) xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + XEN_FLEX_RING_SIZE(ring_order); - xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]); + xen_9pdev->rings[i].bh = qemu_bh_new_guarded(xen_9pfs_bh, + &xen_9pdev->rings[i], + &xen_9pdev->mem_reentrancy_guard); xen_9pdev->rings[i].out_cons = 0; xen_9pdev->rings[i].out_size = 0; xen_9pdev->rings[i].inprogress = false; - xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0); + xen_9pdev->rings[i].evtchndev = qemu_xen_evtchn_open(); if (xen_9pdev->rings[i].evtchndev == NULL) { goto out; } - qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev)); - xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain + qemu_set_cloexec(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev)); + xen_9pdev->rings[i].local_port = qemu_xen_evtchn_bind_interdomain (xen_9pdev->rings[i].evtchndev, xendev->dom, xen_9pdev->rings[i].evtchn); @@ -463,8 +475,8 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev) goto out; } xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); - qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), - xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]); + qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev), + xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]); } xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model"); @@ -494,6 +506,8 @@ out: static void xen_9pfs_alloc(struct XenLegacyDevice *xendev) { + trace_xen_9pfs_alloc(xendev->name); + xenstore_write_be_str(xendev, "versions", VERSIONS); xenstore_write_be_int(xendev, "max-rings", MAX_RINGS); xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER); diff --git a/hw/Kconfig b/hw/Kconfig index 38233bbb0f..ba62ff6417 100644 --- a/hw/Kconfig +++ b/hw/Kconfig @@ -41,6 +41,7 @@ source tpm/Kconfig source usb/Kconfig source virtio/Kconfig source vfio/Kconfig +source xen/Kconfig source watchdog/Kconfig # arch Kconfig diff --git a/hw/acpi/acpi-pci-hotplug-stub.c b/hw/acpi/acpi-pci-hotplug-stub.c index a43f6dafc9..dcee3ad7a1 100644 --- a/hw/acpi/acpi-pci-hotplug-stub.c +++ b/hw/acpi/acpi-pci-hotplug-stub.c @@ -5,8 +5,7 @@ const VMStateDescription vmstate_acpi_pcihp_pci_status; void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus, - MemoryRegion *address_space_io, bool bridges_enabled, - uint16_t io_base) + MemoryRegion *address_space_io, uint16_t io_base) { return; } @@ -36,8 +35,12 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, return; } -void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off) +void acpi_pcihp_reset(AcpiPciHpState *s) { return; } +bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus) +{ + return true; +} diff --git a/hw/acpi/cxl.c b/hw/acpi/cxl.c index 2bf8c07993..92b46bc932 100644 --- a/hw/acpi/cxl.c +++ b/hw/acpi/cxl.c @@ -30,9 +30,10 @@ #include "qapi/error.h" #include "qemu/uuid.h" -static void cedt_build_chbs(GArray *table_data, PXBDev *cxl) +static void cedt_build_chbs(GArray *table_data, PXBCXLDev *cxl) { - SysBusDevice *sbd = SYS_BUS_DEVICE(cxl->cxl.cxl_host_bridge); + PXBDev *pxb = PXB_DEV(cxl); + SysBusDevice *sbd = SYS_BUS_DEVICE(cxl->cxl_host_bridge); struct MemoryRegion *mr = sbd->mmio[0].memory; /* Type */ @@ -45,7 +46,7 @@ static void cedt_build_chbs(GArray *table_data, PXBDev *cxl) build_append_int_noprefix(table_data, 32, 2); /* UID - currently equal to bus number */ - build_append_int_noprefix(table_data, cxl->bus_nr, 4); + build_append_int_noprefix(table_data, pxb->bus_nr, 4); /* Version */ build_append_int_noprefix(table_data, 1, 4); @@ -112,7 +113,7 @@ static void cedt_build_cfmws(GArray *table_data, CXLState *cxls) /* Host Bridge List (list of UIDs - currently bus_nr) */ for (i = 0; i < fw->num_targets; i++) { g_assert(fw->target_hbs[i]); - build_append_int_noprefix(table_data, fw->target_hbs[i]->bus_nr, 4); + build_append_int_noprefix(table_data, PXB_DEV(fw->target_hbs[i])->bus_nr, 4); } } } @@ -121,7 +122,7 @@ static int cxl_foreach_pxb_hb(Object *obj, void *opaque) { Aml *cedt = opaque; - if (object_dynamic_cast(obj, TYPE_PXB_CXL_DEVICE)) { + if (object_dynamic_cast(obj, TYPE_PXB_CXL_DEV)) { cedt_build_chbs(cedt->buf, PXB_CXL_DEV(obj)); } diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c index a93c470e9d..25e2c7243e 100644 --- a/hw/acpi/ich9.c +++ b/hw/acpi/ich9.c @@ -36,7 +36,7 @@ #include "hw/acpi/acpi.h" #include "hw/acpi/ich9_tco.h" -#include "hw/i386/ich9.h" +#include "hw/southbridge/ich9.h" #include "hw/mem/pc-dimm.h" #include "hw/mem/nvdimm.h" @@ -218,7 +218,7 @@ static bool vmstate_test_use_pcihp(void *opaque) { ICH9LPCPMRegs *s = opaque; - return s->use_acpi_hotplug_bridge; + return s->acpi_pci_hotplug.use_acpi_hotplug_bridge; } static const VMStateDescription vmstate_pcihp_state = { @@ -277,8 +277,8 @@ static void pm_reset(void *opaque) } pm->smi_en_wmask = ~0; - if (pm->use_acpi_hotplug_bridge) { - acpi_pcihp_reset(&pm->acpi_pci_hotplug, true); + if (pm->acpi_pci_hotplug.use_acpi_hotplug_bridge) { + acpi_pcihp_reset(&pm->acpi_pci_hotplug); } acpi_update_sci(&pm->acpi_regs, pm->irq); @@ -291,9 +291,7 @@ static void pm_powerdown_req(Notifier *n, void *opaque) acpi_pm1_evt_power_down(&pm->acpi_regs); } -void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, - bool smm_enabled, - qemu_irq sci_irq) +void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq) { memory_region_init(&pm->io, OBJECT(lpc_pci), "ich9-pm", ICH9_PMIO_SIZE); memory_region_set_enabled(&pm->io, false); @@ -303,7 +301,7 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, acpi_pm_tmr_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io); acpi_pm1_evt_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io); acpi_pm1_cnt_init(&pm->acpi_regs, &pm->io, pm->disable_s3, pm->disable_s4, - pm->s4_val, !pm->smm_compat && !smm_enabled); + pm->s4_val, !pm->smm_compat && !pm->smm_enabled); acpi_gpe_init(&pm->acpi_regs, ICH9_PMIO_GPE0_LEN); memory_region_init_io(&pm->io_gpe, OBJECT(lpc_pci), &ich9_gpe_ops, pm, @@ -314,18 +312,15 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, "acpi-smi", 8); memory_region_add_subregion(&pm->io, ICH9_PMIO_SMI_EN, &pm->io_smi); - pm->smm_enabled = smm_enabled; - if (pm->enable_tco) { acpi_pm_tco_init(&pm->tco_regs, &pm->io); } - if (pm->use_acpi_hotplug_bridge) { + if (pm->acpi_pci_hotplug.use_acpi_hotplug_bridge) { acpi_pcihp_init(OBJECT(lpc_pci), &pm->acpi_pci_hotplug, pci_get_bus(lpc_pci), pci_address_space_io(lpc_pci), - true, ACPI_PCIHP_ADDR_ICH9); qbus_set_hotplug_handler(BUS(pci_get_bus(lpc_pci)), @@ -407,14 +402,14 @@ static bool ich9_pm_get_acpi_pci_hotplug(Object *obj, Error **errp) { ICH9LPCState *s = ICH9_LPC_DEVICE(obj); - return s->pm.use_acpi_hotplug_bridge; + return s->pm.acpi_pci_hotplug.use_acpi_hotplug_bridge; } static void ich9_pm_set_acpi_pci_hotplug(Object *obj, bool value, Error **errp) { ICH9LPCState *s = ICH9_LPC_DEVICE(obj); - s->pm.use_acpi_hotplug_bridge = value; + s->pm.acpi_pci_hotplug.use_acpi_hotplug_bridge = value; } static bool ich9_pm_get_keep_pci_slot_hpc(Object *obj, Error **errp) @@ -439,7 +434,7 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm) pm->disable_s3 = 0; pm->disable_s4 = 0; pm->s4_val = 2; - pm->use_acpi_hotplug_bridge = true; + pm->acpi_pci_hotplug.use_acpi_hotplug_bridge = true; pm->keep_pci_slot_hpc = true; pm->enable_tco = true; @@ -583,6 +578,12 @@ void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, } } +bool ich9_pm_is_hotpluggable_bus(HotplugHandler *hotplug_dev, BusState *bus) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev); + return acpi_pcihp_is_hotpluggbale_bus(&lpc->pm.acpi_pci_hotplug, bus); +} + void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list) { ICH9LPCState *s = ICH9_LPC_DEVICE(adev); diff --git a/hw/acpi/ich9_tco.c b/hw/acpi/ich9_tco.c index fbf97f81f4..1540f4fd46 100644 --- a/hw/acpi/ich9_tco.c +++ b/hw/acpi/ich9_tco.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" #include "sysemu/watchdog.h" -#include "hw/i386/ich9.h" +#include "hw/southbridge/ich9.h" #include "migration/vmstate.h" #include "hw/acpi/ich9_tco.h" diff --git a/hw/acpi/pci-bridge.c b/hw/acpi/pci-bridge.c index 5f3ee5157f..7baa7034a1 100644 --- a/hw/acpi/pci-bridge.c +++ b/hw/acpi/pci-bridge.c @@ -21,7 +21,17 @@ void build_pci_bridge_aml(AcpiDevAmlIf *adev, Aml *scope) { PCIBridge *br = PCI_BRIDGE(adev); - if (object_property_find(OBJECT(&br->sec_bus), ACPI_PCIHP_PROP_BSEL)) { - build_append_pci_bus_devices(scope, pci_bridge_get_sec_bus(br)); + if (!DEVICE(br)->hotplugged) { + PCIBus *sec_bus = pci_bridge_get_sec_bus(br); + + build_append_pci_bus_devices(scope, sec_bus); + + /* + * generate hotplug slots descriptors if + * bridge has ACPI PCI hotplug attached, + */ + if (object_property_find(OBJECT(sec_bus), ACPI_PCIHP_PROP_BSEL)) { + build_append_pcihp_slots(scope, sec_bus); + } } } diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index 5dc7377411..cdd6f775a1 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -54,21 +54,6 @@ typedef struct AcpiPciHpFind { PCIBus *bus; } AcpiPciHpFind; -static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data) -{ - return a - b; -} - -static GSequence *pci_acpi_index_list(void) -{ - static GSequence *used_acpi_index_list; - - if (!used_acpi_index_list) { - used_acpi_index_list = g_sequence_new(NULL); - } - return used_acpi_index_list; -} - static int acpi_pcihp_get_bsel(PCIBus *bus) { Error *local_err = NULL; @@ -136,20 +121,6 @@ static void acpi_set_pci_info(bool has_bridge_hotplug) } } -static void acpi_pcihp_disable_root_bus(void) -{ - Object *host = acpi_get_i386_pci_host(); - PCIBus *bus; - - bus = PCI_HOST_BRIDGE(host)->bus; - if (bus && qbus_is_hotpluggable(BUS(bus))) { - /* setting the hotplug handler to NULL makes the bus non-hotpluggable */ - qbus_set_hotplug_handler(BUS(bus), NULL); - } - - return; -} - static void acpi_pcihp_test_hotplug_bus(PCIBus *bus, void *opaque) { AcpiPciHpFind *find = opaque; @@ -291,17 +262,12 @@ static void acpi_pcihp_update(AcpiPciHpState *s) } } -void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off) +void acpi_pcihp_reset(AcpiPciHpState *s) { - if (acpihp_root_off) { - acpi_pcihp_disable_root_bus(); - } - acpi_set_pci_info(!s->legacy_piix); + acpi_set_pci_info(s->use_acpi_hotplug_bridge); acpi_pcihp_update(s); } -#define ONBOARD_INDEX_MAX (16 * 1024 - 1) - void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { @@ -314,34 +280,6 @@ void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev, ACPI_PCIHP_PROP_BSEL "' set"); return; } - - /* - * capped by systemd (see: udev-builtin-net_id.c) - * as it's the only known user honor it to avoid users - * misconfigure QEMU and then wonder why acpi-index doesn't work - */ - if (pdev->acpi_index > ONBOARD_INDEX_MAX) { - error_setg(errp, "acpi-index should be less or equal to %u", - ONBOARD_INDEX_MAX); - return; - } - - /* - * make sure that acpi-index is unique across all present PCI devices - */ - if (pdev->acpi_index) { - GSequence *used_indexes = pci_acpi_index_list(); - - if (g_sequence_lookup(used_indexes, GINT_TO_POINTER(pdev->acpi_index), - g_cmp_uint32, NULL)) { - error_setg(errp, "a PCI device with acpi-index = %" PRIu32 - " already exist", pdev->acpi_index); - return; - } - g_sequence_insert_sorted(used_indexes, - GINT_TO_POINTER(pdev->acpi_index), - g_cmp_uint32, NULL); - } } void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, @@ -361,17 +299,10 @@ void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, * Overwrite the default hotplug handler with the ACPI PCI one * for cold plugged bridges only. */ - if (!s->legacy_piix && + if (s->use_acpi_hotplug_bridge && object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { PCIBus *sec = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); - /* Remove all hot-plug handlers if hot-plug is disabled on slot */ - if (object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT) && - !PCIE_SLOT(pdev)->hotplug) { - qbus_set_hotplug_handler(BUS(sec), NULL); - return; - } - qbus_set_hotplug_handler(BUS(sec), OBJECT(hotplug_dev)); /* We don't have to overwrite any other hotplug handler yet */ assert(QLIST_EMPTY(&sec->child)); @@ -401,17 +332,6 @@ void acpi_pcihp_device_unplug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, trace_acpi_pci_unplug(PCI_SLOT(pdev->devfn), acpi_pcihp_get_bsel(pci_get_bus(pdev))); - /* - * clean up acpi-index so it could reused by another device - */ - if (pdev->acpi_index) { - GSequence *used_indexes = pci_acpi_index_list(); - - g_sequence_remove(g_sequence_lookup(used_indexes, - GINT_TO_POINTER(pdev->acpi_index), - g_cmp_uint32, NULL)); - } - qdev_unrealize(dev); } @@ -437,10 +357,38 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, * acpi_pcihp_eject_slot() when the operation is completed. */ pdev->qdev.pending_deleted_event = true; + /* if unplug was requested before OSPM is initialized, + * linux kernel will clear GPE0.sts[] bits during boot, which effectively + * hides unplug event. And than followup qmp_device_del() calls remain + * blocked by above flag permanently. + * Unblock qmp_device_del() by setting expire limit, so user can + * repeat unplug request later when OSPM has been booted. + */ + pdev->qdev.pending_deleted_expires_ms = + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); /* 1 msec */ + s->acpi_pcihp_pci_status[bsel].down |= (1U << slot); acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); } +bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus) +{ + Object *o = OBJECT(bus->parent); + + if (s->use_acpi_hotplug_bridge && + object_dynamic_cast(o, TYPE_PCI_BRIDGE)) { + if (object_dynamic_cast(o, TYPE_PCIE_SLOT) && !PCIE_SLOT(o)->hotplug) { + return false; + } + return true; + } + + if (s->use_acpi_root_pci_hotplug) { + return true; + } + return false; +} + static uint64_t pci_read(void *opaque, hwaddr addr, unsigned int size) { AcpiPciHpState *s = opaque; @@ -454,7 +402,7 @@ static uint64_t pci_read(void *opaque, hwaddr addr, unsigned int size) switch (addr) { case PCI_UP_BASE: val = s->acpi_pcihp_pci_status[bsel].up; - if (!s->legacy_piix) { + if (s->use_acpi_hotplug_bridge) { s->acpi_pcihp_pci_status[bsel].up = 0; } trace_acpi_pci_up_read(val); @@ -529,7 +477,8 @@ static void pci_write(void *opaque, hwaddr addr, uint64_t data, trace_acpi_pci_ej_write(addr, data); break; case PCI_SEL_BASE: - s->hotplug_select = s->legacy_piix ? ACPI_PCIHP_BSEL_DEFAULT : data; + s->hotplug_select = s->use_acpi_hotplug_bridge ? data : + ACPI_PCIHP_BSEL_DEFAULT; trace_acpi_pci_sel_write(addr, data); default: break; @@ -547,14 +496,13 @@ static const MemoryRegionOps acpi_pcihp_io_ops = { }; void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus, - MemoryRegion *address_space_io, bool bridges_enabled, + MemoryRegion *address_space_io, uint16_t io_base) { s->io_len = ACPI_PCIHP_SIZE; s->io_base = io_base; s->root = root_bus; - s->legacy_piix = !bridges_enabled; memory_region_init_io(&s->io, owner, &acpi_pcihp_io_ops, s, "acpi-pci-hotplug", s->io_len); diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c index eac2125abd..63d2113b86 100644 --- a/hw/acpi/piix4.c +++ b/hw/acpi/piix4.c @@ -170,14 +170,14 @@ static const VMStateDescription vmstate_pci_status = { static bool vmstate_test_use_acpi_hotplug_bridge(void *opaque, int version_id) { PIIX4PMState *s = opaque; - return s->use_acpi_hotplug_bridge; + return s->acpi_pci_hotplug.use_acpi_hotplug_bridge; } static bool vmstate_test_no_use_acpi_hotplug_bridge(void *opaque, int version_id) { PIIX4PMState *s = opaque; - return !s->use_acpi_hotplug_bridge; + return !s->acpi_pci_hotplug.use_acpi_hotplug_bridge; } static bool vmstate_test_use_memhp(void *opaque) @@ -234,7 +234,8 @@ static bool piix4_vmstate_need_smbus(void *opaque, int version_id) static bool vmstate_test_migrate_acpi_index(void *opaque, int version_id) { PIIX4PMState *s = PIIX4_PM(opaque); - return s->use_acpi_hotplug_bridge && !s->not_migrate_acpi_index; + return s->acpi_pci_hotplug.use_acpi_hotplug_bridge && + !s->not_migrate_acpi_index; } /* qemu-kvm 1.2 uses version 3 but advertised as 2 @@ -303,8 +304,9 @@ static void piix4_pm_reset(DeviceState *dev) acpi_update_sci(&s->ar, s->irq); pm_io_space_update(s); - if (s->use_acpi_hotplug_bridge || s->use_acpi_root_pci_hotplug) { - acpi_pcihp_reset(&s->acpi_pci_hotplug, !s->use_acpi_root_pci_hotplug); + if (s->acpi_pci_hotplug.use_acpi_hotplug_bridge || + s->acpi_pci_hotplug.use_acpi_root_pci_hotplug) { + acpi_pcihp_reset(&s->acpi_pci_hotplug); } } @@ -402,6 +404,13 @@ static void piix4_device_unplug_cb(HotplugHandler *hotplug_dev, } } +static bool piix4_is_hotpluggable_bus(HotplugHandler *hotplug_dev, + BusState *bus) +{ + PIIX4PMState *s = PIIX4_PM(hotplug_dev); + return acpi_pcihp_is_hotpluggbale_bus(&s->acpi_pci_hotplug, bus); +} + static void piix4_pm_machine_ready(Notifier *n, void *opaque) { PIIX4PMState *s = container_of(n, PIIX4PMState, machine_ready); @@ -487,12 +496,11 @@ static void piix4_pm_realize(PCIDevice *dev, Error **errp) qemu_add_machine_init_done_notifier(&s->machine_ready); if (xen_enabled()) { - s->use_acpi_hotplug_bridge = false; + s->acpi_pci_hotplug.use_acpi_hotplug_bridge = false; } piix4_acpi_system_hot_add_init(pci_address_space_io(dev), pci_get_bus(dev), s); - qbus_set_hotplug_handler(BUS(pci_get_bus(dev)), OBJECT(s)); piix4_pm_add_properties(s); } @@ -561,9 +569,11 @@ static void piix4_acpi_system_hot_add_init(MemoryRegion *parent, "acpi-gpe0", GPE_LEN); memory_region_add_subregion(parent, GPE_BASE, &s->io_gpe); - if (s->use_acpi_hotplug_bridge || s->use_acpi_root_pci_hotplug) { + if (s->acpi_pci_hotplug.use_acpi_hotplug_bridge || + s->acpi_pci_hotplug.use_acpi_root_pci_hotplug) { acpi_pcihp_init(OBJECT(s), &s->acpi_pci_hotplug, bus, parent, - s->use_acpi_hotplug_bridge, ACPI_PCIHP_ADDR_PIIX4); + ACPI_PCIHP_ADDR_PIIX4); + qbus_set_hotplug_handler(BUS(pci_get_bus(PCI_DEVICE(s))), OBJECT(s)); } s->cpu_hotplug_legacy = true; @@ -602,9 +612,9 @@ static Property piix4_pm_properties[] = { DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_DISABLED, PIIX4PMState, disable_s4, 0), DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_VAL, PIIX4PMState, s4_val, 2), DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, PIIX4PMState, - use_acpi_hotplug_bridge, true), + acpi_pci_hotplug.use_acpi_hotplug_bridge, true), DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCI_ROOTHP, PIIX4PMState, - use_acpi_root_pci_hotplug, true), + acpi_pci_hotplug.use_acpi_root_pci_hotplug, true), DEFINE_PROP_BOOL("memory-hotplug-support", PIIX4PMState, acpi_memory_hotplug.is_enabled, true), DEFINE_PROP_BOOL("smm-compat", PIIX4PMState, smm_compat, false), @@ -641,6 +651,7 @@ static void piix4_pm_class_init(ObjectClass *klass, void *data) hc->plug = piix4_device_plug_cb; hc->unplug_request = piix4_device_unplug_request_cb; hc->unplug = piix4_device_unplug_cb; + hc->is_hotpluggable_bus = piix4_is_hotpluggable_bus; adevc->ospm_status = piix4_ospm_status; adevc->send_event = piix4_send_gpe; adevc->madt_cpu = pc_madt_cpu_entry; diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c index 4161f559a7..03495e1e60 100644 --- a/hw/alpha/dp264.c +++ b/hw/alpha/dp264.c @@ -49,6 +49,7 @@ static void clipper_init(MachineState *machine) const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; + MachineClass *mc = MACHINE_GET_CLASS(machine); AlphaCPU *cpus[4]; PCIBus *pci_bus; PCIDevice *pci_dev; @@ -124,7 +125,7 @@ static void clipper_init(MachineState *machine) /* Network setup. e1000 is good enough, failing Tulip support. */ for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "e1000", NULL); + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); } /* Super I/O */ @@ -213,6 +214,7 @@ static void clipper_machine_init(MachineClass *mc) mc->is_default = true; mc->default_cpu_type = ALPHA_CPU_TYPE_NAME("ev67"); mc->default_ram_id = "ram"; + mc->default_nic = "e1000"; } DEFINE_MACHINE("clipper", clipper_machine_init) diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index b5aed4aff5..acc4371a4a 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -6,6 +6,7 @@ config ARM_VIRT imply VFIO_PLATFORM imply VFIO_XGMAC imply TPM_TIS_SYSBUS + imply TPM_TIS_I2C imply NVDIMM select ARM_GIC select ACPI @@ -34,20 +35,28 @@ config ARM_VIRT config CHEETAH bool + default y + depends on TCG && ARM select OMAP select TSC210X config CUBIEBOARD bool + default y + depends on TCG && ARM select ALLWINNER_A10 config DIGIC bool + default y + depends on TCG && ARM select PTIMER select PFLASH_CFI02 config EXYNOS4 bool + default y + depends on TCG && ARM imply I2C_DEVICES select A9MPCORE select I2C @@ -60,6 +69,8 @@ config EXYNOS4 config HIGHBANK bool + default y + depends on TCG && ARM select A9MPCORE select A15MPCORE select AHCI @@ -74,6 +85,8 @@ config HIGHBANK config INTEGRATOR bool + default y + depends on TCG && ARM select ARM_TIMER select INTEGRATOR_DEBUG select PL011 # UART @@ -86,12 +99,16 @@ config INTEGRATOR config MAINSTONE bool + default y + depends on TCG && ARM select PXA2XX select PFLASH_CFI01 select SMC91C111 config MUSCA bool + default y + depends on TCG && ARM select ARMSSE select PL011 select PL031 @@ -103,6 +120,8 @@ config MARVELL_88W8618 config MUSICPAL bool + default y + depends on TCG && ARM select OR_IRQ select BITBANG_I2C select MARVELL_88W8618 @@ -113,20 +132,28 @@ config MUSICPAL config NETDUINO2 bool + default y + depends on TCG && ARM select STM32F205_SOC config NETDUINOPLUS2 bool + default y + depends on TCG && ARM select STM32F405_SOC config OLIMEX_STM32_H405 bool + default y + depends on TCG && ARM select STM32F405_SOC config NSERIES bool + default y + depends on TCG && ARM select OMAP - select TMP105 # tempature sensor + select TMP105 # temperature sensor select BLIZZARD # LCD/TV controller select ONENAND select TSC210X # touchscreen/sensors/audio @@ -157,12 +184,16 @@ config PXA2XX config GUMSTIX bool + default y + depends on TCG && ARM select PFLASH_CFI01 select SMC91C111 select PXA2XX config TOSA bool + default y + depends on TCG && ARM select ZAURUS # scoop select MICRODRIVE select PXA2XX @@ -170,6 +201,8 @@ config TOSA config SPITZ bool + default y + depends on TCG && ARM select ADS7846 # touch-screen controller select MAX111X # A/D converter select WM8750 # audio codec @@ -182,6 +215,8 @@ config SPITZ config Z2 bool + default y + depends on TCG && ARM select PFLASH_CFI01 select WM8750 select PL011 # UART @@ -189,6 +224,8 @@ config Z2 config REALVIEW bool + default y + depends on TCG && ARM imply PCI_DEVICES imply PCI_TESTDEV imply I2C_DEVICES @@ -217,6 +254,8 @@ config REALVIEW config SBSA_REF bool + default y + depends on TCG && AARCH64 imply PCI_DEVICES select AHCI select ARM_SMMUV3 @@ -232,11 +271,15 @@ config SBSA_REF config SABRELITE bool + default y + depends on TCG && ARM select FSL_IMX6 select SSI_M25P80 config STELLARIS bool + default y + depends on TCG && ARM imply I2C_DEVICES select ARM_V7M select CMSDK_APB_WATCHDOG @@ -254,6 +297,8 @@ config STELLARIS config STM32VLDISCOVERY bool + default y + depends on TCG && ARM select STM32F100_SOC config STRONGARM @@ -262,16 +307,22 @@ config STRONGARM config COLLIE bool + default y + depends on TCG && ARM select PFLASH_CFI01 select ZAURUS # scoop select STRONGARM config SX1 bool + default y + depends on TCG && ARM select OMAP config VERSATILE bool + default y + depends on TCG && ARM select ARM_TIMER # sp804 select PFLASH_CFI01 select LSI_SCSI_PCI @@ -283,6 +334,8 @@ config VERSATILE config VEXPRESS bool + default y + depends on TCG && ARM select A9MPCORE select A15MPCORE select ARM_MPTIMER @@ -298,6 +351,8 @@ config VEXPRESS config ZYNQ bool + default y + depends on TCG && ARM select A9MPCORE select CADENCE # UART select PFLASH_CFI02 @@ -314,9 +369,9 @@ config ZYNQ config ARM_V7M bool # currently v7M must be included in a TCG build due to translate.c - default y if TCG && (ARM || AARCH64) + default y + depends on TCG && ARM select PTIMER - select ARM_COMPATIBLE_SEMIHOSTING config ALLWINNER_A10 bool @@ -325,6 +380,7 @@ config ALLWINNER_A10 select ALLWINNER_A10_PIC select ALLWINNER_A10_CCM select ALLWINNER_A10_DRAMC + select ALLWINNER_WDT select ALLWINNER_EMAC select ALLWINNER_I2C select AXP209_PMU @@ -333,9 +389,12 @@ config ALLWINNER_A10 config ALLWINNER_H3 bool + default y + depends on TCG && ARM select ALLWINNER_A10_PIT select ALLWINNER_SUN8I_EMAC select ALLWINNER_I2C + select ALLWINNER_WDT select SERIAL select ARM_TIMER select ARM_GIC @@ -346,6 +405,8 @@ config ALLWINNER_H3 config RASPI bool + default y + depends on TCG && ARM select FRAMEBUFFER select PL011 # UART select SDHCI @@ -376,6 +437,8 @@ config STM32F405_SOC config XLNX_ZYNQMP_ARM bool + default y + depends on TCG && AARCH64 select AHCI select ARM_GIC select CADENCE @@ -393,6 +456,8 @@ config XLNX_ZYNQMP_ARM config XLNX_VERSAL bool + default y + depends on TCG && AARCH64 select ARM_GIC select PL011 select CADENCE @@ -403,9 +468,12 @@ config XLNX_VERSAL select OR_IRQ select XLNX_BBRAM select XLNX_EFUSE_VERSAL + select XLNX_USB_SUBSYS config NPCM7XX bool + default y + depends on TCG && ARM select A9MPCORE select ADM1272 select ARM_GIC @@ -422,6 +490,8 @@ config NPCM7XX config FSL_IMX25 bool + default y + depends on TCG && ARM imply I2C_DEVICES select IMX select IMX_FEC @@ -431,6 +501,8 @@ config FSL_IMX25 config FSL_IMX31 bool + default y + depends on TCG && ARM imply I2C_DEVICES select SERIAL select IMX @@ -451,6 +523,8 @@ config FSL_IMX6 config ASPEED_SOC bool + default y + depends on TCG && ARM select DS1338 select FTGMAC100 select I2C @@ -471,6 +545,8 @@ config ASPEED_SOC config MPS2 bool + default y + depends on TCG && ARM imply I2C_DEVICES select ARMSSE select LAN9118 @@ -486,6 +562,8 @@ config MPS2 config FSL_IMX7 bool + default y + depends on TCG && ARM imply PCI_DEVICES imply TEST_DEVICES imply I2C_DEVICES @@ -504,6 +582,8 @@ config ARM_SMMUV3 config FSL_IMX6UL bool + default y + depends on TCG && ARM imply I2C_DEVICES select A15MPCORE select IMX @@ -515,6 +595,8 @@ config FSL_IMX6UL config MICROBIT bool + default y + depends on TCG && ARM select NRF51_SOC config NRF51_SOC @@ -526,6 +608,8 @@ config NRF51_SOC config EMCRAFT_SF2 bool + default y + depends on TCG && ARM select MSF2 select SSI_M25P80 diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c index b7ca795c71..b0ea3f7f66 100644 --- a/hw/arm/allwinner-a10.c +++ b/hw/arm/allwinner-a10.c @@ -38,6 +38,7 @@ #define AW_A10_EHCI_BASE 0x01c14000 #define AW_A10_OHCI_BASE 0x01c14400 #define AW_A10_SATA_BASE 0x01c18000 +#define AW_A10_WDT_BASE 0x01c20c90 #define AW_A10_RTC_BASE 0x01c20d00 #define AW_A10_I2C0_BASE 0x01c2ac00 @@ -92,6 +93,8 @@ static void aw_a10_init(Object *obj) object_initialize_child(obj, "mmc0", &s->mmc0, TYPE_AW_SDHOST_SUN4I); object_initialize_child(obj, "rtc", &s->rtc, TYPE_AW_RTC_SUN4I); + + object_initialize_child(obj, "wdt", &s->wdt, TYPE_AW_WDT_SUN4I); } static void aw_a10_realize(DeviceState *dev, Error **errp) @@ -203,6 +206,10 @@ static void aw_a10_realize(DeviceState *dev, Error **errp) sysbus_realize(SYS_BUS_DEVICE(&s->i2c0), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c0), 0, AW_A10_I2C0_BASE); sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c0), 0, qdev_get_gpio_in(dev, 7)); + + /* WDT */ + sysbus_realize(SYS_BUS_DEVICE(&s->wdt), &error_fatal); + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->wdt), 0, AW_A10_WDT_BASE, 1); } static void aw_a10_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c index bfce3c8d92..f05afddf7e 100644 --- a/hw/arm/allwinner-h3.c +++ b/hw/arm/allwinner-h3.c @@ -49,11 +49,14 @@ const hwaddr allwinner_h3_memmap[] = { [AW_H3_DEV_OHCI3] = 0x01c1d400, [AW_H3_DEV_CCU] = 0x01c20000, [AW_H3_DEV_PIT] = 0x01c20c00, + [AW_H3_DEV_WDT] = 0x01c20ca0, [AW_H3_DEV_UART0] = 0x01c28000, [AW_H3_DEV_UART1] = 0x01c28400, [AW_H3_DEV_UART2] = 0x01c28800, [AW_H3_DEV_UART3] = 0x01c28c00, [AW_H3_DEV_TWI0] = 0x01c2ac00, + [AW_H3_DEV_TWI1] = 0x01c2b000, + [AW_H3_DEV_TWI2] = 0x01c2b400, [AW_H3_DEV_EMAC] = 0x01c30000, [AW_H3_DEV_DRAMCOM] = 0x01c62000, [AW_H3_DEV_DRAMCTL] = 0x01c63000, @@ -64,6 +67,7 @@ const hwaddr allwinner_h3_memmap[] = { [AW_H3_DEV_GIC_VCPU] = 0x01c86000, [AW_H3_DEV_RTC] = 0x01f00000, [AW_H3_DEV_CPUCFG] = 0x01f01c00, + [AW_H3_DEV_R_TWI] = 0x01f02400, [AW_H3_DEV_SDRAM] = 0x40000000 }; @@ -107,8 +111,6 @@ struct AwH3Unimplemented { { "uart1", 0x01c28400, 1 * KiB }, { "uart2", 0x01c28800, 1 * KiB }, { "uart3", 0x01c28c00, 1 * KiB }, - { "twi1", 0x01c2b000, 1 * KiB }, - { "twi2", 0x01c2b400, 1 * KiB }, { "scr", 0x01c2c400, 1 * KiB }, { "gpu", 0x01c40000, 64 * KiB }, { "hstmr", 0x01c60000, 4 * KiB }, @@ -123,7 +125,6 @@ struct AwH3Unimplemented { { "r_prcm", 0x01f01400, 1 * KiB }, { "r_twd", 0x01f01800, 1 * KiB }, { "r_cir-rx", 0x01f02000, 1 * KiB }, - { "r_twi", 0x01f02400, 1 * KiB }, { "r_uart", 0x01f02800, 1 * KiB }, { "r_pio", 0x01f02c00, 1 * KiB }, { "r_pwm", 0x01f03800, 1 * KiB }, @@ -151,8 +152,11 @@ enum { AW_H3_GIC_SPI_UART2 = 2, AW_H3_GIC_SPI_UART3 = 3, AW_H3_GIC_SPI_TWI0 = 6, + AW_H3_GIC_SPI_TWI1 = 7, + AW_H3_GIC_SPI_TWI2 = 8, AW_H3_GIC_SPI_TIMER0 = 18, AW_H3_GIC_SPI_TIMER1 = 19, + AW_H3_GIC_SPI_R_TWI = 44, AW_H3_GIC_SPI_MMC0 = 60, AW_H3_GIC_SPI_EHCI0 = 72, AW_H3_GIC_SPI_OHCI0 = 73, @@ -227,7 +231,12 @@ static void allwinner_h3_init(Object *obj) object_initialize_child(obj, "rtc", &s->rtc, TYPE_AW_RTC_SUN6I); - object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C); + object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C_SUN6I); + object_initialize_child(obj, "twi1", &s->i2c1, TYPE_AW_I2C_SUN6I); + object_initialize_child(obj, "twi2", &s->i2c2, TYPE_AW_I2C_SUN6I); + object_initialize_child(obj, "r_twi", &s->r_twi, TYPE_AW_I2C_SUN6I); + + object_initialize_child(obj, "wdt", &s->wdt, TYPE_AW_WDT_SUN6I); } static void allwinner_h3_realize(DeviceState *dev, Error **errp) @@ -432,6 +441,26 @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c0), 0, qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TWI0)); + sysbus_realize(SYS_BUS_DEVICE(&s->i2c1), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c1), 0, s->memmap[AW_H3_DEV_TWI1]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c1), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TWI1)); + + sysbus_realize(SYS_BUS_DEVICE(&s->i2c2), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c2), 0, s->memmap[AW_H3_DEV_TWI2]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c2), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TWI2)); + + sysbus_realize(SYS_BUS_DEVICE(&s->r_twi), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->r_twi), 0, s->memmap[AW_H3_DEV_R_TWI]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->r_twi), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_R_TWI)); + + /* WDT */ + sysbus_realize(SYS_BUS_DEVICE(&s->wdt), &error_fatal); + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->wdt), 0, + s->memmap[AW_H3_DEV_WDT], 1); + /* Unimplemented devices */ for (i = 0; i < ARRAY_SIZE(unimplemented); i++) { create_unimplemented_device(unimplemented[i].device_name, diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index 27dda58338..0b29028fe1 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -200,33 +200,35 @@ struct AspeedMachineState { static void aspeed_write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) { - static const uint32_t poll_mailbox_ready[] = { + AddressSpace *as = arm_boot_address_space(cpu, info); + static const ARMInsnFixup poll_mailbox_ready[] = { /* * r2 = per-cpu go sign value * r1 = AST_SMP_MBOX_FIELD_ENTRY * r0 = AST_SMP_MBOX_FIELD_GOSIGN */ - 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5 */ - 0xe21000ff, /* ands r0, r0, #255 */ - 0xe59f201c, /* ldr r2, [pc, #28] */ - 0xe1822000, /* orr r2, r2, r0 */ + { 0xee100fb0 }, /* mrc p15, 0, r0, c0, c0, 5 */ + { 0xe21000ff }, /* ands r0, r0, #255 */ + { 0xe59f201c }, /* ldr r2, [pc, #28] */ + { 0xe1822000 }, /* orr r2, r2, r0 */ - 0xe59f1018, /* ldr r1, [pc, #24] */ - 0xe59f0018, /* ldr r0, [pc, #24] */ + { 0xe59f1018 }, /* ldr r1, [pc, #24] */ + { 0xe59f0018 }, /* ldr r0, [pc, #24] */ - 0xe320f002, /* wfe */ - 0xe5904000, /* ldr r4, [r0] */ - 0xe1520004, /* cmp r2, r4 */ - 0x1afffffb, /* bne */ - 0xe591f000, /* ldr pc, [r1] */ - AST_SMP_MBOX_GOSIGN, - AST_SMP_MBOX_FIELD_ENTRY, - AST_SMP_MBOX_FIELD_GOSIGN, + { 0xe320f002 }, /* wfe */ + { 0xe5904000 }, /* ldr r4, [r0] */ + { 0xe1520004 }, /* cmp r2, r4 */ + { 0x1afffffb }, /* bne */ + { 0xe591f000 }, /* ldr pc, [r1] */ + { AST_SMP_MBOX_GOSIGN }, + { AST_SMP_MBOX_FIELD_ENTRY }, + { AST_SMP_MBOX_FIELD_GOSIGN }, + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; - rom_add_blob_fixed("aspeed.smpboot", poll_mailbox_ready, - sizeof(poll_mailbox_ready), - info->smp_loader_start); + arm_write_bootloader("aspeed.smpboot", as, info->smp_loader_start, + poll_mailbox_ready, fixupcontext); } static void aspeed_reset_secondary(ARMCPU *cpu, @@ -241,12 +243,9 @@ static void aspeed_reset_secondary(ARMCPU *cpu, cpu_set_pc(cs, info->smp_loader_start); } -#define FIRMWARE_ADDR 0x0 - -static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size, +static void write_boot_rom(BlockBackend *blk, hwaddr addr, size_t rom_size, Error **errp) { - BlockBackend *blk = blk_by_legacy_dinfo(dinfo); g_autofree void *storage = NULL; int64_t size; @@ -272,6 +271,22 @@ static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size, rom_add_blob_fixed("aspeed.boot_rom", storage, rom_size, addr); } +/* + * Create a ROM and copy the flash contents at the expected address + * (0x0). Boots faster than execute-in-place. + */ +static void aspeed_install_boot_rom(AspeedSoCState *soc, BlockBackend *blk, + uint64_t rom_size) +{ + MemoryRegion *boot_rom = g_new(MemoryRegion, 1); + + memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", rom_size, + &error_abort); + memory_region_add_subregion_overlap(&soc->spi_boot_container, 0, + boot_rom, 1); + write_boot_rom(blk, ASPEED_SOC_SPI_BOOT_ADDR, rom_size, &error_abort); +} + void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, unsigned int count, int unit0) { @@ -293,7 +308,7 @@ void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, qdev_realize_and_unref(dev, BUS(s->spi), &error_fatal); cs_line = qdev_get_gpio_in_named(dev, SSI_GPIO_CS, 0); - sysbus_connect_irq(SYS_BUS_DEVICE(s), i + 1, cs_line); + qdev_connect_gpio_out_named(DEVICE(s), "cs", i, cs_line); } } @@ -332,7 +347,6 @@ static void aspeed_machine_init(MachineState *machine) AspeedMachineState *bmc = ASPEED_MACHINE(machine); AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(machine); AspeedSoCClass *sc; - DriveInfo *drive0 = drive_get(IF_MTD, 0, 0); int i; NICInfo *nd = &nd_table[0]; @@ -382,32 +396,6 @@ static void aspeed_machine_init(MachineState *machine) bmc->spi_model ? bmc->spi_model : amc->spi_model, 1, amc->num_cs); - /* Install first FMC flash content as a boot rom. */ - if (drive0) { - AspeedSMCFlash *fl = &bmc->soc.fmc.flashes[0]; - MemoryRegion *boot_rom = g_new(MemoryRegion, 1); - uint64_t size = memory_region_size(&fl->mmio); - - /* - * create a ROM region using the default mapping window size of - * the flash module. The window size is 64MB for the AST2400 - * SoC and 128MB for the AST2500 SoC, which is twice as big as - * needed by the flash modules of the Aspeed machines. - */ - if (ASPEED_MACHINE(machine)->mmio_exec) { - memory_region_init_alias(boot_rom, NULL, "aspeed.boot_rom", - &fl->mmio, 0, size); - memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, - boot_rom); - } else { - memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", - size, &error_abort); - memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, - boot_rom); - write_boot_rom(drive0, FIRMWARE_ADDR, size, &error_abort); - } - } - if (machine->kernel_filename && sc->num_cpus > 1) { /* With no u-boot we must set up a boot stub for the secondary CPU */ MemoryRegion *smpboot = g_new(MemoryRegion, 1); @@ -438,6 +426,16 @@ static void aspeed_machine_init(MachineState *machine) drive_get(IF_SD, 0, bmc->soc.sdhci.num_slots)); } + if (!bmc->mmio_exec) { + DriveInfo *mtd0 = drive_get(IF_MTD, 0, 0); + + if (mtd0) { + uint64_t rom_size = memory_region_size(&bmc->soc.spi_boot); + aspeed_install_boot_rom(&bmc->soc, blk_by_legacy_dinfo(mtd0), + rom_size); + } + } + arm_load_kernel(ARM_CPU(first_cpu), machine, &aspeed_board_binfo); } @@ -521,6 +519,20 @@ static void ast2600_evb_i2c_init(AspeedMachineState *bmc) TYPE_TMP105, 0x4d); } +static void yosemitev2_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 4), 0x51, 128 * KiB); + at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 8), 0x51, 128 * KiB, + yosemitev2_bmc_fruid, yosemitev2_bmc_fruid_len); + /* TMP421 */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "tmp421", 0x1f); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp421", 0x4e); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp421", 0x4f); + +} + static void romulus_bmc_i2c_init(AspeedMachineState *bmc) { AspeedSoCState *soc = &bmc->soc; @@ -530,6 +542,19 @@ static void romulus_bmc_i2c_init(AspeedMachineState *bmc) i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "ds1338", 0x32); } +static void tiogapass_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 4), 0x54, 128 * KiB); + at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 6), 0x54, 128 * KiB, + tiogapass_bmc_fruid, tiogapass_bmc_fruid_len); + /* TMP421 */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "tmp421", 0x1f); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), "tmp421", 0x4f); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), "tmp421", 0x4e); +} + static void create_pca9552(AspeedSoCState *soc, int bus_id, int addr) { i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, bus_id), @@ -840,42 +865,46 @@ static void fuji_bmc_i2c_init(AspeedMachineState *bmc) i2c_slave_create_simple(i2c[17], TYPE_LM75, 0x4c); i2c_slave_create_simple(i2c[17], TYPE_LM75, 0x4d); - at24c_eeprom_init(i2c[19], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[20], 0x50, 2 * KiB); - at24c_eeprom_init(i2c[22], 0x52, 2 * KiB); + /* + * EEPROM 24c64 size is 64Kbits or 8 Kbytes + * 24c02 size is 2Kbits or 256 bytes + */ + at24c_eeprom_init(i2c[19], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[20], 0x50, 256); + at24c_eeprom_init(i2c[22], 0x52, 256); i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x48); i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x49); i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x4a); i2c_slave_create_simple(i2c[3], TYPE_TMP422, 0x4c); - at24c_eeprom_init(i2c[8], 0x51, 64 * KiB); + at24c_eeprom_init(i2c[8], 0x51, 8 * KiB); i2c_slave_create_simple(i2c[8], TYPE_LM75, 0x4a); i2c_slave_create_simple(i2c[50], TYPE_LM75, 0x4c); - at24c_eeprom_init(i2c[50], 0x52, 64 * KiB); + at24c_eeprom_init(i2c[50], 0x52, 8 * KiB); i2c_slave_create_simple(i2c[51], TYPE_TMP75, 0x48); i2c_slave_create_simple(i2c[52], TYPE_TMP75, 0x49); i2c_slave_create_simple(i2c[59], TYPE_TMP75, 0x48); i2c_slave_create_simple(i2c[60], TYPE_TMP75, 0x49); - at24c_eeprom_init(i2c[65], 0x53, 64 * KiB); + at24c_eeprom_init(i2c[65], 0x53, 8 * KiB); i2c_slave_create_simple(i2c[66], TYPE_TMP75, 0x49); i2c_slave_create_simple(i2c[66], TYPE_TMP75, 0x48); - at24c_eeprom_init(i2c[68], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[69], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[70], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[71], 0x52, 64 * KiB); + at24c_eeprom_init(i2c[68], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[69], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[70], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[71], 0x52, 8 * KiB); - at24c_eeprom_init(i2c[73], 0x53, 64 * KiB); + at24c_eeprom_init(i2c[73], 0x53, 8 * KiB); i2c_slave_create_simple(i2c[74], TYPE_TMP75, 0x49); i2c_slave_create_simple(i2c[74], TYPE_TMP75, 0x48); - at24c_eeprom_init(i2c[76], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[77], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[78], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[79], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[28], 0x50, 2 * KiB); + at24c_eeprom_init(i2c[76], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[77], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[78], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[79], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[28], 0x50, 256); for (int i = 0; i < 8; i++) { at24c_eeprom_init(i2c[81 + i * 8], 0x56, 64 * KiB); @@ -1174,6 +1203,24 @@ static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data) aspeed_soc_num_cpus(amc->soc_name); }; +static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Facebook YosemiteV2 BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = AST2500_EVB_HW_STRAP1; + amc->hw_strap2 = 0; + amc->fmc_model = "n25q256a"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 2; + amc->i2c_init = yosemitev2_bmc_i2c_init; + mc->default_ram_size = 512 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1191,6 +1238,25 @@ static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data) aspeed_soc_num_cpus(amc->soc_name); }; +static void aspeed_machine_tiogapass_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Facebook Tiogapass BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = AST2500_EVB_HW_STRAP1; + amc->hw_strap2 = 0; + amc->fmc_model = "n25q256a"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 2; + amc->i2c_init = tiogapass_bmc_i2c_init; + mc->default_ram_size = 1 * GiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); + aspeed_soc_num_cpus(amc->soc_name); +}; + static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1562,10 +1628,18 @@ static const TypeInfo aspeed_machine_types[] = { .name = MACHINE_TYPE_NAME("ast2600-evb"), .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_ast2600_evb_class_init, + }, { + .name = MACHINE_TYPE_NAME("yosemitev2-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_yosemitev2_class_init, }, { .name = MACHINE_TYPE_NAME("tacoma-bmc"), .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_tacoma_class_init, + }, { + .name = MACHINE_TYPE_NAME("tiogapass-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_tiogapass_class_init, }, { .name = MACHINE_TYPE_NAME("g220a-bmc"), .parent = TYPE_ASPEED_MACHINE, diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index bb2769df04..1bf1246148 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -21,6 +21,7 @@ #define ASPEED_SOC_DPMCU_SIZE 0x00040000 static const hwaddr aspeed_soc_ast2600_memmap[] = { + [ASPEED_DEV_SPI_BOOT] = ASPEED_SOC_SPI_BOOT_ADDR, [ASPEED_DEV_SRAM] = 0x10000000, [ASPEED_DEV_DPMCU] = 0x18000000, /* 0x16000000 0x17FFFFFF : AHB BUS do LPC Bus bridge */ @@ -282,6 +283,12 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) qemu_irq irq; g_autofree char *sram_name = NULL; + /* Default boot region (SPI memory or ROMs) */ + memory_region_init(&s->spi_boot_container, OBJECT(s), + "aspeed.spi_boot_container", 0x10000000); + memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SPI_BOOT], + &s->spi_boot_container); + /* IO space */ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", sc->memmap[ASPEED_DEV_IOMEM], @@ -431,6 +438,12 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + /* Set up an alias on the FMC CE0 region (boot default) */ + MemoryRegion *fmc0_mmio = &s->fmc.flashes[0].mmio; + memory_region_init_alias(&s->spi_boot, OBJECT(s), "aspeed.spi_boot", + fmc0_mmio, 0, memory_region_size(fmc0_mmio)); + memory_region_add_subregion(&s->spi_boot_container, 0x0, &s->spi_boot); + /* SPI */ for (i = 0; i < sc->spis_num; i++) { object_property_set_link(OBJECT(&s->spi[i]), "dram", diff --git a/hw/arm/aspeed_eeprom.c b/hw/arm/aspeed_eeprom.c index 04463acc9d..dc33a88a54 100644 --- a/hw/arm/aspeed_eeprom.c +++ b/hw/arm/aspeed_eeprom.c @@ -6,6 +6,27 @@ #include "aspeed_eeprom.h" +/* Tiogapass BMC FRU */ +const uint8_t tiogapass_bmc_fruid[] = { + 0x01, 0x00, 0x00, 0x01, 0x0d, 0x00, 0x00, 0xf1, 0x01, 0x0c, 0x00, 0x36, + 0xe6, 0xd0, 0xc6, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x42, 0x4d, + 0x43, 0x20, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x4d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0xcd, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc3, 0x31, 0x2e, + 0x30, 0xc9, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc1, 0x39, 0x01, 0x0c, 0x00, 0xc6, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x54, 0x69, 0x6f, 0x67, 0x61, + 0x20, 0x50, 0x61, 0x73, 0x73, 0x20, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, + 0x32, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0xc4, 0x58, 0x58, 0x58, 0x32, 0xcd, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc7, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc3, 0x31, 0x2e, 0x30, 0xc9, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc8, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x20, 0x41, 0xc1, 0x45, +}; + const uint8_t fby35_nic_fruid[] = { 0x01, 0x00, 0x00, 0x01, 0x0f, 0x20, 0x00, 0xcf, 0x01, 0x0e, 0x19, 0xd7, 0x5e, 0xcf, 0xc8, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xdd, @@ -77,6 +98,30 @@ const uint8_t fby35_bmc_fruid[] = { 0x6e, 0x66, 0x69, 0x67, 0x20, 0x41, 0xc1, 0x45, }; +/* Yosemite V2 BMC FRU */ +const uint8_t yosemitev2_bmc_fruid[] = { + 0x01, 0x00, 0x00, 0x01, 0x0d, 0x00, 0x00, 0xf1, 0x01, 0x0c, 0x00, 0x36, + 0xe6, 0xd0, 0xc6, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x42, 0x61, + 0x73, 0x65, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x20, 0x4d, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xcd, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc3, 0x31, 0x2e, + 0x30, 0xc9, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc1, 0x39, 0x01, 0x0c, 0x00, 0xc6, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x59, 0x6f, 0x73, 0x65, 0x6d, + 0x69, 0x74, 0x65, 0x20, 0x56, 0x32, 0x20, 0x4d, 0x50, 0x00, 0x00, 0x00, + 0x00, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0xc4, 0x45, 0x56, 0x54, 0x32, 0xcd, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc7, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc3, 0x31, 0x2e, 0x30, 0xc9, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc8, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x20, 0x41, 0xc1, 0x45, +}; + +const size_t tiogapass_bmc_fruid_len = sizeof(tiogapass_bmc_fruid); const size_t fby35_nic_fruid_len = sizeof(fby35_nic_fruid); const size_t fby35_bb_fruid_len = sizeof(fby35_bb_fruid); const size_t fby35_bmc_fruid_len = sizeof(fby35_bmc_fruid); + +const size_t yosemitev2_bmc_fruid_len = sizeof(yosemitev2_bmc_fruid); diff --git a/hw/arm/aspeed_eeprom.h b/hw/arm/aspeed_eeprom.h index a0f848fa6e..86db6f0479 100644 --- a/hw/arm/aspeed_eeprom.h +++ b/hw/arm/aspeed_eeprom.h @@ -9,6 +9,9 @@ #include "qemu/osdep.h" +extern const uint8_t tiogapass_bmc_fruid[]; +extern const size_t tiogapass_bmc_fruid_len; + extern const uint8_t fby35_nic_fruid[]; extern const uint8_t fby35_bb_fruid[]; extern const uint8_t fby35_bmc_fruid[]; @@ -16,4 +19,7 @@ extern const size_t fby35_nic_fruid_len; extern const size_t fby35_bb_fruid_len; extern const size_t fby35_bmc_fruid_len; +extern const uint8_t yosemitev2_bmc_fruid[]; +extern const size_t yosemitev2_bmc_fruid_len; + #endif diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c index e884d6badc..bf22258de9 100644 --- a/hw/arm/aspeed_soc.c +++ b/hw/arm/aspeed_soc.c @@ -25,6 +25,7 @@ #define ASPEED_SOC_IOMEM_SIZE 0x00200000 static const hwaddr aspeed_soc_ast2400_memmap[] = { + [ASPEED_DEV_SPI_BOOT] = ASPEED_SOC_SPI_BOOT_ADDR, [ASPEED_DEV_IOMEM] = 0x1E600000, [ASPEED_DEV_FMC] = 0x1E620000, [ASPEED_DEV_SPI1] = 0x1E630000, @@ -59,6 +60,7 @@ static const hwaddr aspeed_soc_ast2400_memmap[] = { }; static const hwaddr aspeed_soc_ast2500_memmap[] = { + [ASPEED_DEV_SPI_BOOT] = ASPEED_SOC_SPI_BOOT_ADDR, [ASPEED_DEV_IOMEM] = 0x1E600000, [ASPEED_DEV_FMC] = 0x1E620000, [ASPEED_DEV_SPI1] = 0x1E630000, @@ -245,6 +247,12 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) Error *err = NULL; g_autofree char *sram_name = NULL; + /* Default boot region (SPI memory or ROMs) */ + memory_region_init(&s->spi_boot_container, OBJECT(s), + "aspeed.spi_boot_container", 0x10000000); + memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SPI_BOOT], + &s->spi_boot_container); + /* IO space */ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", sc->memmap[ASPEED_DEV_IOMEM], @@ -354,6 +362,12 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + /* Set up an alias on the FMC CE0 region (boot default) */ + MemoryRegion *fmc0_mmio = &s->fmc.flashes[0].mmio; + memory_region_init_alias(&s->spi_boot, OBJECT(s), "aspeed.spi_boot", + fmc0_mmio, 0, memory_region_size(fmc0_mmio)); + memory_region_add_subregion(&s->spi_boot_container, 0x0, &s->spi_boot); + /* SPI */ for (i = 0; i < sc->spis_num; i++) { if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c index 3c2a4160cd..0233038b95 100644 --- a/hw/arm/bcm2835_peripherals.c +++ b/hw/arm/bcm2835_peripherals.c @@ -90,6 +90,8 @@ static void bcm2835_peripherals_init(Object *obj) TYPE_BCM2835_PROPERTY); object_property_add_alias(obj, "board-rev", OBJECT(&s->property), "board-rev"); + object_property_add_alias(obj, "command-line", OBJECT(&s->property), + "command-line"); object_property_add_const_link(OBJECT(&s->property), "fb", OBJECT(&s->fb)); diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c index f894338fc6..166dc896c0 100644 --- a/hw/arm/bcm2836.c +++ b/hw/arm/bcm2836.c @@ -55,6 +55,8 @@ static void bcm2836_init(Object *obj) TYPE_BCM2835_PERIPHERALS); object_property_add_alias(obj, "board-rev", OBJECT(&s->peripherals), "board-rev"); + object_property_add_alias(obj, "command-line", OBJECT(&s->peripherals), + "command-line"); object_property_add_alias(obj, "vcram-size", OBJECT(&s->peripherals), "vcram-size"); } diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 1e021c4a34..720f22531a 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -60,26 +60,6 @@ AddressSpace *arm_boot_address_space(ARMCPU *cpu, return cpu_get_address_space(cs, asidx); } -typedef enum { - FIXUP_NONE = 0, /* do nothing */ - FIXUP_TERMINATOR, /* end of insns */ - FIXUP_BOARDID, /* overwrite with board ID number */ - FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */ - FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */ - FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */ - FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */ - FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */ - FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */ - FIXUP_BOOTREG, /* overwrite with boot register address */ - FIXUP_DSB, /* overwrite with correct DSB insn for cpu */ - FIXUP_MAX, -} FixupType; - -typedef struct ARMInsnFixup { - uint32_t insn; - FixupType fixup; -} ARMInsnFixup; - static const ARMInsnFixup bootloader_aarch64[] = { { 0x580000c0 }, /* ldr x0, arg ; Load the lower 32-bits of DTB */ { 0xaa1f03e1 }, /* mov x1, xzr */ @@ -150,9 +130,10 @@ static const ARMInsnFixup smpboot[] = { { 0, FIXUP_TERMINATOR } }; -static void write_bootloader(const char *name, hwaddr addr, - const ARMInsnFixup *insns, uint32_t *fixupcontext, - AddressSpace *as) +void arm_write_bootloader(const char *name, + AddressSpace *as, hwaddr addr, + const ARMInsnFixup *insns, + const uint32_t *fixupcontext) { /* Fix up the specified bootloader fragment and write it into * guest memory using rom_add_blob_fixed(). fixupcontext is @@ -214,8 +195,8 @@ static void default_write_secondary(ARMCPU *cpu, fixupcontext[FIXUP_DSB] = CP15_DSB_INSN; } - write_bootloader("smpboot", info->smp_loader_start, - smpboot, fixupcontext, as); + arm_write_bootloader("smpboot", as, info->smp_loader_start, + smpboot, fixupcontext); } void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu, @@ -689,7 +670,10 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, rom_ptr_for_as(as, addr, size)); - g_free(fdt); + if (fdt != ms->fdt) { + g_free(ms->fdt); + ms->fdt = fdt; + } return size; @@ -926,6 +910,12 @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base, return -1; } size = len; + + /* Unpack the image if it is a EFI zboot image */ + if (unpack_efi_zboot_image(&buffer, &size) < 0) { + g_free(buffer); + return -1; + } } /* check the arm64 magic header value -- very old kernels may not have it */ @@ -1177,8 +1167,8 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, fixupcontext[FIXUP_ENTRYPOINT_LO] = entry; fixupcontext[FIXUP_ENTRYPOINT_HI] = entry >> 32; - write_bootloader("bootloader", info->loader_start, - primary_loader, fixupcontext, as); + arm_write_bootloader("bootloader", as, info->loader_start, + primary_loader, fixupcontext); if (info->write_board_setup) { info->write_board_setup(cpu, info); diff --git a/hw/arm/collie.c b/hw/arm/collie.c index 9edff59370..a0ad1b8dc7 100644 --- a/hw/arm/collie.c +++ b/hw/arm/collie.c @@ -19,6 +19,8 @@ #include "exec/address-spaces.h" #include "cpu.h" #include "qom/object.h" +#include "qemu/error-report.h" + #define RAM_SIZE (512 * MiB) #define FLASH_SIZE (32 * MiB) diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c index 71a7df1508..8c7fa91529 100644 --- a/hw/arm/cubieboard.c +++ b/hw/arm/cubieboard.c @@ -17,6 +17,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "hw/boards.h" #include "hw/qdev-properties.h" #include "hw/arm/allwinner-a10.h" diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c index 6f2dda13f6..de39fb0ece 100644 --- a/hw/arm/exynos4210.c +++ b/hw/arm/exynos4210.c @@ -326,7 +326,7 @@ static int mapline_size(const int *mapline) /* * Initialize board IRQs. - * These IRQs contain splitted Int/External Combiner and External Gic IRQs. + * These IRQs contain split Int/External Combiner and External Gic IRQs. */ static void exynos4210_init_board_irqs(Exynos4210State *s) { @@ -744,7 +744,7 @@ static void exynos4210_realize(DeviceState *socdev, Error **errp) * - SDMA * - ADMA2 * - * As this part of the Exynos4210 is not publically available, + * As this part of the Exynos4210 is not publicly available, * we used the "HS-MMC Controller S3C2416X RISC Microprocessor" * public datasheet which is very similar (implementing * MMC Specification Version 4.0 being the only difference noted) diff --git a/hw/arm/fby35.c b/hw/arm/fby35.c index 90c04bbc33..f4600c290b 100644 --- a/hw/arm/fby35.c +++ b/hw/arm/fby35.c @@ -100,13 +100,7 @@ static void fby35_bmc_init(Fby35State *s) MemoryRegion *boot_rom = g_new(MemoryRegion, 1); uint64_t size = memory_region_size(&fl->mmio); - if (s->mmio_exec) { - memory_region_init_alias(boot_rom, NULL, "aspeed.boot_rom", - &fl->mmio, 0, size); - memory_region_add_subregion(&s->bmc_memory, FBY35_BMC_FIRMWARE_ADDR, - boot_rom); - } else { - + if (!s->mmio_exec) { memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", size, &error_abort); memory_region_add_subregion(&s->bmc_memory, FBY35_BMC_FIRMWARE_ADDR, diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c index 00dafe3f62..4fa7f0b95e 100644 --- a/hw/arm/fsl-imx6.c +++ b/hw/arm/fsl-imx6.c @@ -53,6 +53,8 @@ static void fsl_imx6_init(Object *obj) object_initialize_child(obj, "src", &s->src, TYPE_IMX6_SRC); + object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS); + for (i = 0; i < FSL_IMX6_NUM_UARTS; i++) { snprintf(name, NAME_SIZE, "uart%d", i + 1); object_initialize_child(obj, name, &s->uart[i], TYPE_IMX_SERIAL); @@ -390,6 +392,12 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_ENET_MAC_1588_IRQ)); + /* + * SNVS + */ + sysbus_realize(SYS_BUS_DEVICE(&s->snvs), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX6_SNVSHP_ADDR); + /* * Watchdog */ diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c index d88d6cc1c5..2189dcbb72 100644 --- a/hw/arm/fsl-imx6ul.c +++ b/hw/arm/fsl-imx6ul.c @@ -407,7 +407,23 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) /* * Ethernet + * + * We must use two loops since phy_connected affects the other interface + * and we have to set all properties before calling sysbus_realize(). */ + for (i = 0; i < FSL_IMX6UL_NUM_ETHS; i++) { + object_property_set_bool(OBJECT(&s->eth[i]), "phy-connected", + s->phy_connected[i], &error_abort); + /* + * If the MDIO bus on this controller is not connected, assume the + * other controller provides support for it. + */ + if (!s->phy_connected[i]) { + object_property_set_link(OBJECT(&s->eth[1 - i]), "phy-consumer", + OBJECT(&s->eth[i]), &error_abort); + } + } + for (i = 0; i < FSL_IMX6UL_NUM_ETHS; i++) { static const hwaddr FSL_IMX6UL_ENETn_ADDR[FSL_IMX6UL_NUM_ETHS] = { FSL_IMX6UL_ENET1_ADDR, @@ -620,6 +636,10 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) static Property fsl_imx6ul_properties[] = { DEFINE_PROP_UINT32("fec1-phy-num", FslIMX6ULState, phy_num[0], 0), DEFINE_PROP_UINT32("fec2-phy-num", FslIMX6ULState, phy_num[1], 1), + DEFINE_PROP_BOOL("fec1-phy-connected", FslIMX6ULState, phy_connected[0], + true), + DEFINE_PROP_BOOL("fec2-phy-connected", FslIMX6ULState, phy_connected[1], + true), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c index afc7480799..9e41d4b677 100644 --- a/hw/arm/fsl-imx7.c +++ b/hw/arm/fsl-imx7.c @@ -395,7 +395,23 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) /* * Ethernet + * + * We must use two loops since phy_connected affects the other interface + * and we have to set all properties before calling sysbus_realize(). */ + for (i = 0; i < FSL_IMX7_NUM_ETHS; i++) { + object_property_set_bool(OBJECT(&s->eth[i]), "phy-connected", + s->phy_connected[i], &error_abort); + /* + * If the MDIO bus on this controller is not connected, assume the + * other controller provides support for it. + */ + if (!s->phy_connected[i]) { + object_property_set_link(OBJECT(&s->eth[1 - i]), "phy-consumer", + OBJECT(&s->eth[i]), &error_abort); + } + } + for (i = 0; i < FSL_IMX7_NUM_ETHS; i++) { static const hwaddr FSL_IMX7_ENETn_ADDR[FSL_IMX7_NUM_ETHS] = { FSL_IMX7_ENET1_ADDR, @@ -601,6 +617,10 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) static Property fsl_imx7_properties[] = { DEFINE_PROP_UINT32("fec1-phy-num", FslIMX7State, phy_num[0], 0), DEFINE_PROP_UINT32("fec2-phy-num", FslIMX7State, phy_num[1], 1), + DEFINE_PROP_BOOL("fec1-phy-connected", FslIMX7State, phy_connected[0], + true), + DEFINE_PROP_BOOL("fec2-phy-connected", FslIMX7State, phy_connected[1], + true), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/arm/mcimx6ul-evk.c b/hw/arm/mcimx6ul-evk.c index d83c3c380e..3ac1e2ea9b 100644 --- a/hw/arm/mcimx6ul-evk.c +++ b/hw/arm/mcimx6ul-evk.c @@ -41,6 +41,8 @@ static void mcimx6ul_evk_init(MachineState *machine) object_property_add_child(OBJECT(machine), "soc", OBJECT(s)); object_property_set_uint(OBJECT(s), "fec1-phy-num", 2, &error_fatal); object_property_set_uint(OBJECT(s), "fec2-phy-num", 1, &error_fatal); + object_property_set_bool(OBJECT(s), "fec1-phy-connected", false, + &error_fatal); qdev_realize(DEVICE(s), NULL, &error_fatal); memory_region_add_subregion(get_system_memory(), FSL_IMX6UL_MMDC_ADDR, diff --git a/hw/arm/mcimx7d-sabre.c b/hw/arm/mcimx7d-sabre.c index 6182b15f19..d1778122b6 100644 --- a/hw/arm/mcimx7d-sabre.c +++ b/hw/arm/mcimx7d-sabre.c @@ -41,6 +41,8 @@ static void mcimx7d_sabre_init(MachineState *machine) s = FSL_IMX7(object_new(TYPE_FSL_IMX7)); object_property_add_child(OBJECT(machine), "soc", OBJECT(s)); + object_property_set_bool(OBJECT(s), "fec2-phy-connected", false, + &error_fatal); qdev_realize(DEVICE(s), NULL, &error_fatal); memory_region_add_subregion(get_system_memory(), FSL_IMX7_MMDC_ADDR, diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c index 06d9add7c7..58f3d30c9b 100644 --- a/hw/arm/musicpal.c +++ b/hw/arm/musicpal.c @@ -37,6 +37,8 @@ #include "qemu/cutils.h" #include "qom/object.h" #include "hw/net/mv88w8618_eth.h" +#include "qemu/error-report.h" + #define MP_MISC_BASE 0x80002000 #define MP_MISC_SIZE 0x00001000 @@ -98,7 +100,7 @@ #define MP_LCD_SPI_CMD 0x00104011 #define MP_LCD_SPI_INVALID 0x00000000 -/* Commmands */ +/* Commands */ #define MP_LCD_INST_SETPAGE0 0xB0 /* ... */ #define MP_LCD_INST_SETPAGE7 0xB7 diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c index 9b31207a06..2aef579aac 100644 --- a/hw/arm/npcm7xx_boards.c +++ b/hw/arm/npcm7xx_boards.c @@ -30,6 +30,8 @@ #include "sysemu/blockdev.h" #include "sysemu/sysemu.h" #include "sysemu/block-backend.h" +#include "qemu/error-report.h" + #define NPCM7XX_POWER_ON_STRAPS_DEFAULT ( \ NPCM7XX_PWRON_STRAP_SPI0F18 | \ diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c index c9df063a08..9e49e9e177 100644 --- a/hw/arm/nseries.c +++ b/hw/arm/nseries.c @@ -45,6 +45,8 @@ #include "hw/loader.h" #include "hw/sysbus.h" #include "qemu/log.h" +#include "qemu/error-report.h" + /* Nokia N8x0 support */ struct n800_s { diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c index 559c066ce9..d5438156ee 100644 --- a/hw/arm/omap1.c +++ b/hw/arm/omap1.c @@ -4057,7 +4057,7 @@ struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *dram, s->led[1] = omap_lpg_init(system_memory, 0xfffbd800, omap_findclk(s, "clk32-kHz")); - /* Register mappings not currenlty implemented: + /* Register mappings not currently implemented: * MCSI2 Comm fffb2000 - fffb27ff (not mapped on OMAP310) * MCSI1 Bluetooth fffb2800 - fffb2fff (not mapped on OMAP310) * USB W2FC fffb4000 - fffb47ff diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c index 366d6af1b6..d5a2ae7af6 100644 --- a/hw/arm/omap2.c +++ b/hw/arm/omap2.c @@ -2523,7 +2523,7 @@ struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sdram, omap_findclk(s, "func_96m_clk"), omap_findclk(s, "core_l4_iclk")); - /* All register mappings (includin those not currenlty implemented): + /* All register mappings (including those not currently implemented): * SystemControlMod 48000000 - 48000fff * SystemControlL4 48001000 - 48001fff * 32kHz Timer Mod 48004000 - 48004fff diff --git a/hw/arm/omap_sx1.c b/hw/arm/omap_sx1.c index e721292079..4bf1579f8c 100644 --- a/hw/arm/omap_sx1.c +++ b/hw/arm/omap_sx1.c @@ -37,6 +37,8 @@ #include "exec/address-spaces.h" #include "cpu.h" #include "qemu/cutils.h" +#include "qemu/error-report.h" + /*****************************************************************************/ /* Siemens SX1 Cellphone V1 */ diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c index 3ace474870..10653361ed 100644 --- a/hw/arm/orangepi.c +++ b/hw/arm/orangepi.c @@ -21,6 +21,7 @@ #include "qemu/units.h" #include "exec/address-spaces.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "hw/boards.h" #include "hw/qdev-properties.h" #include "hw/arm/allwinner-h3.h" diff --git a/hw/arm/palm.c b/hw/arm/palm.c index 1457f10c83..17c11ac4ce 100644 --- a/hw/arm/palm.c +++ b/hw/arm/palm.c @@ -32,6 +32,8 @@ #include "cpu.h" #include "qemu/cutils.h" #include "qom/object.h" +#include "qemu/error-report.h" + static uint64_t static_read(void *opaque, hwaddr offset, unsigned size) { diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c index 92d068d1f9..cc4c4ec9bf 100644 --- a/hw/arm/raspi.c +++ b/hw/arm/raspi.c @@ -16,6 +16,7 @@ #include "qemu/units.h" #include "qemu/cutils.h" #include "qapi/error.h" +#include "hw/arm/boot.h" #include "hw/arm/bcm2836.h" #include "hw/registerfields.h" #include "qemu/error-report.h" @@ -124,20 +125,22 @@ static const char *board_type(uint32_t board_rev) static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) { - static const uint32_t smpboot[] = { - 0xe1a0e00f, /* mov lr, pc */ - 0xe3a0fe00 + (BOARDSETUP_ADDR >> 4), /* mov pc, BOARDSETUP_ADDR */ - 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5;get core ID */ - 0xe7e10050, /* ubfx r0, r0, #0, #2 ;extract LSB */ - 0xe59f5014, /* ldr r5, =0x400000CC ;load mbox base */ - 0xe320f001, /* 1: yield */ - 0xe7953200, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core*/ - 0xe3530000, /* cmp r3, #0 ;spin while zero */ - 0x0afffffb, /* beq 1b */ - 0xe7853200, /* str r3, [r5, r0, lsl #4] ;clear mbox */ - 0xe12fff13, /* bx r3 ;jump to target */ - 0x400000cc, /* (constant: mailbox 3 read/clear base) */ + static const ARMInsnFixup smpboot[] = { + { 0xe1a0e00f }, /* mov lr, pc */ + { 0xe3a0fe00 + (BOARDSETUP_ADDR >> 4) }, /* mov pc, BOARDSETUP_ADDR */ + { 0xee100fb0 }, /* mrc p15, 0, r0, c0, c0, 5;get core ID */ + { 0xe7e10050 }, /* ubfx r0, r0, #0, #2 ;extract LSB */ + { 0xe59f5014 }, /* ldr r5, =0x400000CC ;load mbox base */ + { 0xe320f001 }, /* 1: yield */ + { 0xe7953200 }, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core */ + { 0xe3530000 }, /* cmp r3, #0 ;spin while zero */ + { 0x0afffffb }, /* beq 1b */ + { 0xe7853200 }, /* str r3, [r5, r0, lsl #4] ;clear mbox */ + { 0xe12fff13 }, /* bx r3 ;jump to target */ + { 0x400000cc }, /* (constant: mailbox 3 read/clear base) */ + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; /* check that we don't overrun board setup vectors */ QEMU_BUILD_BUG_ON(SMPBOOT_ADDR + sizeof(smpboot) > MVBAR_ADDR); @@ -145,9 +148,8 @@ static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) QEMU_BUILD_BUG_ON((BOARDSETUP_ADDR & 0xf) != 0 || (BOARDSETUP_ADDR >> 4) >= 0x100); - rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot), - info->smp_loader_start, - arm_boot_address_space(cpu, info)); + arm_write_bootloader("raspi_smpboot", arm_boot_address_space(cpu, info), + info->smp_loader_start, smpboot, fixupcontext); } static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info) @@ -161,26 +163,28 @@ static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info) * the primary CPU goes into the kernel. We put these variables inside * a rom blob, so that the reset for ROM contents zeroes them for us. */ - static const uint32_t smpboot[] = { - 0xd2801b05, /* mov x5, 0xd8 */ - 0xd53800a6, /* mrs x6, mpidr_el1 */ - 0x924004c6, /* and x6, x6, #0x3 */ - 0xd503205f, /* spin: wfe */ - 0xf86678a4, /* ldr x4, [x5,x6,lsl #3] */ - 0xb4ffffc4, /* cbz x4, spin */ - 0xd2800000, /* mov x0, #0x0 */ - 0xd2800001, /* mov x1, #0x0 */ - 0xd2800002, /* mov x2, #0x0 */ - 0xd2800003, /* mov x3, #0x0 */ - 0xd61f0080, /* br x4 */ + static const ARMInsnFixup smpboot[] = { + { 0xd2801b05 }, /* mov x5, 0xd8 */ + { 0xd53800a6 }, /* mrs x6, mpidr_el1 */ + { 0x924004c6 }, /* and x6, x6, #0x3 */ + { 0xd503205f }, /* spin: wfe */ + { 0xf86678a4 }, /* ldr x4, [x5,x6,lsl #3] */ + { 0xb4ffffc4 }, /* cbz x4, spin */ + { 0xd2800000 }, /* mov x0, #0x0 */ + { 0xd2800001 }, /* mov x1, #0x0 */ + { 0xd2800002 }, /* mov x2, #0x0 */ + { 0xd2800003 }, /* mov x3, #0x0 */ + { 0xd61f0080 }, /* br x4 */ + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; static const uint64_t spintables[] = { 0, 0, 0, 0 }; - rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot), - info->smp_loader_start, as); + arm_write_bootloader("raspi_smpboot", as, info->smp_loader_start, + smpboot, fixupcontext); rom_add_blob_fixed_as("raspi_spintables", spintables, sizeof(spintables), SPINTABLE_ADDR, as); } @@ -280,6 +284,8 @@ static void raspi_machine_init(MachineState *machine) object_property_add_const_link(OBJECT(&s->soc), "ram", OBJECT(machine->ram)); object_property_set_int(OBJECT(&s->soc), "board-rev", board_rev, &error_abort); + object_property_set_str(OBJECT(&s->soc), "command-line", + machine->kernel_cmdline, &error_abort); qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); /* Create and plug in the SD cards */ diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index f778cb6d09..de21200ff9 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -29,6 +29,7 @@ #include "exec/hwaddr.h" #include "kvm_arm.h" #include "hw/arm/boot.h" +#include "hw/arm/fdt.h" #include "hw/arm/smmuv3.h" #include "hw/block/flash.h" #include "hw/boards.h" @@ -168,6 +169,20 @@ static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx) return arm_cpu_mp_affinity(idx, clustersz); } +static void sbsa_fdt_add_gic_node(SBSAMachineState *sms) +{ + char *nodename; + + nodename = g_strdup_printf("/intc"); + qemu_fdt_add_subnode(sms->fdt, nodename); + qemu_fdt_setprop_sized_cells(sms->fdt, nodename, "reg", + 2, sbsa_ref_memmap[SBSA_GIC_DIST].base, + 2, sbsa_ref_memmap[SBSA_GIC_DIST].size, + 2, sbsa_ref_memmap[SBSA_GIC_REDIST].base, + 2, sbsa_ref_memmap[SBSA_GIC_REDIST].size); + + g_free(nodename); +} /* * Firmware on this machine only uses ACPI table to load OS, these limited * device tree nodes are just to let firmware know the info which varies from @@ -204,7 +219,7 @@ static void create_fdt(SBSAMachineState *sms) * fw compatibility. */ qemu_fdt_setprop_cell(fdt, "/", "machine-version-major", 0); - qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 0); + qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 1); if (ms->numa_state->have_numa_distance) { int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t); @@ -260,6 +275,8 @@ static void create_fdt(SBSAMachineState *sms) g_free(nodename); } + + sbsa_fdt_add_gic_node(sms); } #define SBSA_FLASH_SECTOR_SIZE (256 * KiB) @@ -554,7 +571,7 @@ static void create_ahci(const SBSAMachineState *sms) if (hd[i] == NULL) { continue; } - ide_create_drive(&ahci->dev[i].port, 0, hd[i]); + ide_bus_create_drive(&ahci->dev[i].port, 0, hd[i]); } } @@ -596,6 +613,7 @@ static void create_pcie(SBSAMachineState *sms) hwaddr size_mmio_high = sbsa_ref_memmap[SBSA_PCIE_MMIO_HIGH].size; hwaddr base_pio = sbsa_ref_memmap[SBSA_PCIE_PIO].base; int irq = sbsa_ref_irqmap[SBSA_PCIE]; + MachineClass *mc = MACHINE_GET_CLASS(sms); MemoryRegion *mmio_alias, *mmio_alias_high, *mmio_reg; MemoryRegion *ecam_alias, *ecam_reg; DeviceState *dev; @@ -641,14 +659,14 @@ static void create_pcie(SBSAMachineState *sms) NICInfo *nd = &nd_table[i]; if (!nd->model) { - nd->model = g_strdup("e1000e"); + nd->model = g_strdup(mc->default_nic); } pci_nic_init_nofail(nd, pci->bus, nd->model, NULL); } } - pci_create_simple(pci->bus, -1, "VGA"); + pci_create_simple(pci->bus, -1, "bochs-display"); create_smmu(sms, pci->bus); } @@ -852,12 +870,13 @@ static void sbsa_ref_class_init(ObjectClass *oc, void *data) mc->init = sbsa_ref_init; mc->desc = "QEMU 'SBSA Reference' ARM Virtual Machine"; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a57"); + mc->default_cpu_type = ARM_CPU_TYPE_NAME("neoverse-n1"); mc->max_cpus = 512; mc->pci_allow_0_address = true; mc->minimum_page_bits = 12; mc->block_default_type = IF_IDE; mc->no_cdrom = 1; + mc->default_nic = "e1000e"; mc->default_ram_size = 1 * GiB; mc->default_ram_id = "sbsa-ref.ram"; mc->default_cpus = 4; diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 0a5a60ca1e..5ab9d45d58 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -38,7 +38,7 @@ static guint smmu_iotlb_key_hash(gconstpointer v) /* Jenkins hash */ a = b = c = JHASH_INITVAL + sizeof(*key); - a += key->asid + key->level + key->tg; + a += key->asid + key->vmid + key->level + key->tg; b += extract64(key->iova, 0, 32); c += extract64(key->iova, 32, 32); @@ -53,13 +53,15 @@ static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2) SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2; return (k1->asid == k2->asid) && (k1->iova == k2->iova) && - (k1->level == k2->level) && (k1->tg == k2->tg); + (k1->level == k2->level) && (k1->tg == k2->tg) && + (k1->vmid == k2->vmid); } -SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova, +SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint8_t level) { - SMMUIOTLBKey key = {.asid = asid, .iova = iova, .tg = tg, .level = level}; + SMMUIOTLBKey key = {.asid = asid, .vmid = vmid, .iova = iova, + .tg = tg, .level = level}; return key; } @@ -78,7 +80,8 @@ SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, uint64_t mask = subpage_size - 1; SMMUIOTLBKey key; - key = smmu_get_iotlb_key(cfg->asid, iova & ~mask, tg, level); + key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, + iova & ~mask, tg, level); entry = g_hash_table_lookup(bs->iotlb, &key); if (entry) { break; @@ -88,13 +91,13 @@ SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, if (entry) { cfg->iotlb_hits++; - trace_smmu_iotlb_lookup_hit(cfg->asid, iova, + trace_smmu_iotlb_lookup_hit(cfg->asid, cfg->s2cfg.vmid, iova, cfg->iotlb_hits, cfg->iotlb_misses, 100 * cfg->iotlb_hits / (cfg->iotlb_hits + cfg->iotlb_misses)); } else { cfg->iotlb_misses++; - trace_smmu_iotlb_lookup_miss(cfg->asid, iova, + trace_smmu_iotlb_lookup_miss(cfg->asid, cfg->s2cfg.vmid, iova, cfg->iotlb_hits, cfg->iotlb_misses, 100 * cfg->iotlb_hits / (cfg->iotlb_hits + cfg->iotlb_misses)); @@ -111,8 +114,10 @@ void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new) smmu_iotlb_inv_all(bs); } - *key = smmu_get_iotlb_key(cfg->asid, new->entry.iova, tg, new->level); - trace_smmu_iotlb_insert(cfg->asid, new->entry.iova, tg, new->level); + *key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, new->entry.iova, + tg, new->level); + trace_smmu_iotlb_insert(cfg->asid, cfg->s2cfg.vmid, new->entry.iova, + tg, new->level); g_hash_table_insert(bs->iotlb, key, new); } @@ -131,7 +136,16 @@ static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, return SMMU_IOTLB_ASID(*iotlb_key) == asid; } -static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value, +static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, + gpointer user_data) +{ + uint16_t vmid = *(uint16_t *)user_data; + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; + + return SMMU_IOTLB_VMID(*iotlb_key) == vmid; +} + +static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, gpointer user_data) { SMMUTLBEntry *iter = (SMMUTLBEntry *)value; @@ -142,18 +156,21 @@ static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value, if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) { return false; } + if (info->vmid >= 0 && info->vmid != SMMU_IOTLB_VMID(iotlb_key)) { + return false; + } return ((info->iova & ~entry->addr_mask) == entry->iova) || ((entry->iova & ~info->mask) == info->iova); } -void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova, +void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, uint8_t tg, uint64_t num_pages, uint8_t ttl) { /* if tg is not set we use 4KB range invalidation */ uint8_t granule = tg ? tg * 2 + 10 : 12; if (ttl && (num_pages == 1) && (asid >= 0)) { - SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova, tg, ttl); + SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, iova, tg, ttl); if (g_hash_table_remove(s->iotlb, &key)) { return; @@ -166,10 +183,11 @@ void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova, SMMUIOTLBPageInvInfo info = { .asid = asid, .iova = iova, + .vmid = vmid, .mask = (num_pages * 1 << granule) - 1}; g_hash_table_foreach_remove(s->iotlb, - smmu_hash_remove_by_asid_iova, + smmu_hash_remove_by_asid_vmid_iova, &info); } @@ -179,6 +197,12 @@ void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid) g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); } +inline void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid) +{ + trace_smmu_iotlb_inv_vmid(vmid); + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); +} + /* VMSAv8-64 Translation */ /** @@ -264,7 +288,7 @@ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova) } /** - * smmu_ptw_64 - VMSAv8-64 Walk of the page tables for a given IOVA + * smmu_ptw_64_s1 - VMSAv8-64 Walk of the page tables for a given IOVA * @cfg: translation config * @iova: iova to translate * @perm: access type @@ -276,9 +300,9 @@ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova) * Upon success, @tlbe is filled with translated_addr and entry * permission rights. */ -static int smmu_ptw_64(SMMUTransCfg *cfg, - dma_addr_t iova, IOMMUAccessFlags perm, - SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) +static int smmu_ptw_64_s1(SMMUTransCfg *cfg, + dma_addr_t iova, IOMMUAccessFlags perm, + SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) { dma_addr_t baseaddr, indexmask; int stage = cfg->stage; @@ -291,14 +315,14 @@ static int smmu_ptw_64(SMMUTransCfg *cfg, } granule_sz = tt->granule_sz; - stride = granule_sz - 3; + stride = VMSA_STRIDE(granule_sz); inputsize = 64 - tt->tsz; level = 4 - (inputsize - 4) / stride; - indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; + indexmask = VMSA_IDXMSK(inputsize, stride, level); baseaddr = extract64(tt->ttb, 0, 48); baseaddr &= ~indexmask; - while (level <= 3) { + while (level < VMSA_LEVELS) { uint64_t subpage_size = 1ULL << level_shift(level, granule_sz); uint64_t mask = subpage_size - 1; uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz); @@ -309,7 +333,7 @@ static int smmu_ptw_64(SMMUTransCfg *cfg, if (get_pte(baseaddr, offset, &pte, info)) { goto error; } - trace_smmu_ptw_level(level, iova, subpage_size, + trace_smmu_ptw_level(stage, level, iova, subpage_size, baseaddr, offset, pte); if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) { @@ -358,6 +382,128 @@ static int smmu_ptw_64(SMMUTransCfg *cfg, info->type = SMMU_PTW_ERR_TRANSLATION; error: + info->stage = 1; + tlbe->entry.perm = IOMMU_NONE; + return -EINVAL; +} + +/** + * smmu_ptw_64_s2 - VMSAv8-64 Walk of the page tables for a given ipa + * for stage-2. + * @cfg: translation config + * @ipa: ipa to translate + * @perm: access type + * @tlbe: SMMUTLBEntry (out) + * @info: handle to an error info + * + * Return 0 on success, < 0 on error. In case of error, @info is filled + * and tlbe->perm is set to IOMMU_NONE. + * Upon success, @tlbe is filled with translated_addr and entry + * permission rights. + */ +static int smmu_ptw_64_s2(SMMUTransCfg *cfg, + dma_addr_t ipa, IOMMUAccessFlags perm, + SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) +{ + const int stage = 2; + int granule_sz = cfg->s2cfg.granule_sz; + /* ARM DDI0487I.a: Table D8-7. */ + int inputsize = 64 - cfg->s2cfg.tsz; + int level = get_start_level(cfg->s2cfg.sl0, granule_sz); + int stride = VMSA_STRIDE(granule_sz); + int idx = pgd_concat_idx(level, granule_sz, ipa); + /* + * Get the ttb from concatenated structure. + * The offset is the idx * size of each ttb(number of ptes * (sizeof(pte)) + */ + uint64_t baseaddr = extract64(cfg->s2cfg.vttb, 0, 48) + (1 << stride) * + idx * sizeof(uint64_t); + dma_addr_t indexmask = VMSA_IDXMSK(inputsize, stride, level); + + baseaddr &= ~indexmask; + + /* + * On input, a stage 2 Translation fault occurs if the IPA is outside the + * range configured by the relevant S2T0SZ field of the STE. + */ + if (ipa >= (1ULL << inputsize)) { + info->type = SMMU_PTW_ERR_TRANSLATION; + goto error; + } + + while (level < VMSA_LEVELS) { + uint64_t subpage_size = 1ULL << level_shift(level, granule_sz); + uint64_t mask = subpage_size - 1; + uint32_t offset = iova_level_offset(ipa, inputsize, level, granule_sz); + uint64_t pte, gpa; + dma_addr_t pte_addr = baseaddr + offset * sizeof(pte); + uint8_t s2ap; + + if (get_pte(baseaddr, offset, &pte, info)) { + goto error; + } + trace_smmu_ptw_level(stage, level, ipa, subpage_size, + baseaddr, offset, pte); + if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) { + trace_smmu_ptw_invalid_pte(stage, level, baseaddr, + pte_addr, offset, pte); + break; + } + + if (is_table_pte(pte, level)) { + baseaddr = get_table_pte_address(pte, granule_sz); + level++; + continue; + } else if (is_page_pte(pte, level)) { + gpa = get_page_pte_address(pte, granule_sz); + trace_smmu_ptw_page_pte(stage, level, ipa, + baseaddr, pte_addr, pte, gpa); + } else { + uint64_t block_size; + + gpa = get_block_pte_address(pte, level, granule_sz, + &block_size); + trace_smmu_ptw_block_pte(stage, level, baseaddr, + pte_addr, pte, ipa, gpa, + block_size >> 20); + } + + /* + * If S2AFFD and PTE.AF are 0 => fault. (5.2. Stream Table Entry) + * An Access fault takes priority over a Permission fault. + */ + if (!PTE_AF(pte) && !cfg->s2cfg.affd) { + info->type = SMMU_PTW_ERR_ACCESS; + goto error; + } + + s2ap = PTE_AP(pte); + if (is_permission_fault_s2(s2ap, perm)) { + info->type = SMMU_PTW_ERR_PERMISSION; + goto error; + } + + /* + * The address output from the translation causes a stage 2 Address + * Size fault if it exceeds the effective PA output range. + */ + if (gpa >= (1ULL << cfg->s2cfg.eff_ps)) { + info->type = SMMU_PTW_ERR_ADDR_SIZE; + goto error; + } + + tlbe->entry.translated_addr = gpa; + tlbe->entry.iova = ipa & ~mask; + tlbe->entry.addr_mask = mask; + tlbe->entry.perm = s2ap; + tlbe->level = level; + tlbe->granule = granule_sz; + return 0; + } + info->type = SMMU_PTW_ERR_TRANSLATION; + +error: + info->stage = 2; tlbe->entry.perm = IOMMU_NONE; return -EINVAL; } @@ -376,15 +522,26 @@ error: int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm, SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) { - if (!cfg->aa64) { + if (cfg->stage == 1) { + return smmu_ptw_64_s1(cfg, iova, perm, tlbe, info); + } else if (cfg->stage == 2) { /* - * This code path is not entered as we check this while decoding - * the configuration data in the derived SMMU model. + * If bypassing stage 1(or unimplemented), the input address is passed + * directly to stage 2 as IPA. If the input address of a transaction + * exceeds the size of the IAS, a stage 1 Address Size fault occurs. + * For AA64, IAS = OAS according to (IHI 0070.E.a) "3.4 Address sizes" */ - g_assert_not_reached(); + if (iova >= (1ULL << cfg->oas)) { + info->type = SMMU_PTW_ERR_ADDR_SIZE; + info->stage = 1; + tlbe->entry.perm = IOMMU_NONE; + return -EINVAL; + } + + return smmu_ptw_64_s2(cfg, iova, perm, tlbe, info); } - return smmu_ptw_64(cfg, iova, perm, tlbe, info); + g_assert_not_reached(); } /** @@ -467,20 +624,6 @@ IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid) return NULL; } -/* Unmap the whole notifier's range */ -static void smmu_unmap_notifier_range(IOMMUNotifier *n) -{ - IOMMUTLBEvent event; - - event.type = IOMMU_NOTIFIER_UNMAP; - event.entry.target_as = &address_space_memory; - event.entry.iova = n->start; - event.entry.perm = IOMMU_NONE; - event.entry.addr_mask = n->end - n->start; - - memory_region_notify_iommu_one(n, &event); -} - /* Unmap all notifiers attached to @mr */ static void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr) { @@ -488,7 +631,7 @@ static void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr) trace_smmu_inv_notifiers_mr(mr->parent_obj.name); IOMMU_NOTIFIER_FOREACH(n, mr) { - smmu_unmap_notifier_range(n); + memory_region_unmap_iommu_notifier_range(n); } } diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h index 2d75b31953..843bebb185 100644 --- a/hw/arm/smmu-internal.h +++ b/hw/arm/smmu-internal.h @@ -66,6 +66,8 @@ #define PTE_APTABLE(pte) \ (extract64(pte, 61, 2)) +#define PTE_AF(pte) \ + (extract64(pte, 10, 1)) /* * TODO: At the moment all transactions are considered as privileged (EL1) * as IOMMU translation callback does not pass user/priv attributes. @@ -73,6 +75,9 @@ #define is_permission_fault(ap, perm) \ (((perm) & IOMMU_WO) && ((ap) & 0x2)) +#define is_permission_fault_s2(s2ap, perm) \ + (!(((s2ap) & (perm)) == (perm))) + #define PTE_AP_TO_PERM(ap) \ (IOMMU_ACCESS_FLAG(true, !((ap) & 0x2))) @@ -96,10 +101,42 @@ uint64_t iova_level_offset(uint64_t iova, int inputsize, MAKE_64BIT_MASK(0, gsz - 3); } +/* FEAT_LPA2 and FEAT_TTST are not implemented. */ +static inline int get_start_level(int sl0 , int granule_sz) +{ + /* ARM DDI0487I.a: Table D8-12. */ + if (granule_sz == 12) { + return 2 - sl0; + } + /* ARM DDI0487I.a: Table D8-22 and Table D8-31. */ + return 3 - sl0; +} + +/* + * Index in a concatenated first level stage-2 page table. + * ARM DDI0487I.a: D8.2.2 Concatenated translation tables. + */ +static inline int pgd_concat_idx(int start_level, int granule_sz, + dma_addr_t ipa) +{ + uint64_t ret; + /* + * Get the number of bits handled by next levels, then any extra bits in + * the address should index the concatenated tables. This relation can be + * deduced from tables in ARM DDI0487I.a: D8.2.7-9 + */ + int shift = level_shift(start_level - 1, granule_sz); + + ret = ipa >> shift; + return ret; +} + #define SMMU_IOTLB_ASID(key) ((key).asid) +#define SMMU_IOTLB_VMID(key) ((key).vmid) typedef struct SMMUIOTLBPageInvInfo { int asid; + int vmid; uint64_t iova; uint64_t mask; } SMMUIOTLBPageInvInfo; diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h index e8f0ebf25e..6d1c1edab7 100644 --- a/hw/arm/smmuv3-internal.h +++ b/hw/arm/smmuv3-internal.h @@ -34,10 +34,12 @@ typedef enum SMMUTranslationStatus { /* MMIO Registers */ REG32(IDR0, 0x0) + FIELD(IDR0, S2P, 0 , 1) FIELD(IDR0, S1P, 1 , 1) FIELD(IDR0, TTF, 2 , 2) FIELD(IDR0, COHACC, 4 , 1) FIELD(IDR0, ASID16, 12, 1) + FIELD(IDR0, VMID16, 18, 1) FIELD(IDR0, TTENDIAN, 21, 2) FIELD(IDR0, STALL_MODEL, 24, 2) FIELD(IDR0, TERM_MODEL, 26, 1) @@ -524,9 +526,13 @@ typedef struct CD { #define STE_S2TG(x) extract32((x)->word[5], 14, 2) #define STE_S2PS(x) extract32((x)->word[5], 16, 3) #define STE_S2AA64(x) extract32((x)->word[5], 19, 1) -#define STE_S2HD(x) extract32((x)->word[5], 24, 1) -#define STE_S2HA(x) extract32((x)->word[5], 25, 1) -#define STE_S2S(x) extract32((x)->word[5], 26, 1) +#define STE_S2ENDI(x) extract32((x)->word[5], 20, 1) +#define STE_S2AFFD(x) extract32((x)->word[5], 21, 1) +#define STE_S2HD(x) extract32((x)->word[5], 23, 1) +#define STE_S2HA(x) extract32((x)->word[5], 24, 1) +#define STE_S2S(x) extract32((x)->word[5], 25, 1) +#define STE_S2R(x) extract32((x)->word[5], 26, 1) + #define STE_CTXPTR(x) \ ({ \ unsigned long addr; \ diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 270c80b665..932f009697 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -21,6 +21,7 @@ #include "hw/irq.h" #include "hw/sysbus.h" #include "migration/vmstate.h" +#include "hw/qdev-properties.h" #include "hw/qdev-core.h" #include "hw/pci/pci.h" #include "cpu.h" @@ -33,6 +34,9 @@ #include "smmuv3-internal.h" #include "smmu-internal.h" +#define PTW_RECORD_FAULT(cfg) (((cfg)->stage == 1) ? (cfg)->record_faults : \ + (cfg)->s2cfg.record_faults) + /** * smmuv3_trigger_irq - pulse @irq if enabled and update * GERROR register in case of GERROR interrupt @@ -238,14 +242,17 @@ void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) static void smmuv3_init_regs(SMMUv3State *s) { - /** - * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, - * multi-level stream table - */ - s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ + /* Based on sys property, the stages supported in smmu will be advertised.*/ + if (s->stage && !strcmp("2", s->stage)) { + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S2P, 1); + } else { + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); + } + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, VMID16, 1); /* 16-bit VMID */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ /* terminated transaction will always be aborted/error returned */ @@ -329,11 +336,138 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, return 0; } +/* + * Max valid value is 39 when SMMU_IDR3.STT == 0. + * In architectures after SMMUv3.0: + * - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this + * field is MAX(16, 64-IAS) + * - If STE.S2TG selects a 64KB granule, the minimum valid value for this field + * is (64-IAS). + * As we only support AA64, IAS = OAS. + */ +static bool s2t0sz_valid(SMMUTransCfg *cfg) +{ + if (cfg->s2cfg.tsz > 39) { + return false; + } + + if (cfg->s2cfg.granule_sz == 16) { + return (cfg->s2cfg.tsz >= 64 - oas2bits(SMMU_IDR5_OAS)); + } + + return (cfg->s2cfg.tsz >= MAX(64 - oas2bits(SMMU_IDR5_OAS), 16)); +} + +/* + * Return true if s2 page table config is valid. + * This checks with the configured start level, ias_bits and granularity we can + * have a valid page table as described in ARM ARM D8.2 Translation process. + * The idea here is to see for the highest possible number of IPA bits, how + * many concatenated tables we would need, if it is more than 16, then this is + * not possible. + */ +static bool s2_pgtable_config_valid(uint8_t sl0, uint8_t t0sz, uint8_t gran) +{ + int level = get_start_level(sl0, gran); + uint64_t ipa_bits = 64 - t0sz; + uint64_t max_ipa = (1ULL << ipa_bits) - 1; + int nr_concat = pgd_concat_idx(level, gran, max_ipa) + 1; + + return nr_concat <= VMSA_MAX_S2_CONCAT; +} + +static int decode_ste_s2_cfg(SMMUTransCfg *cfg, STE *ste) +{ + cfg->stage = 2; + + if (STE_S2AA64(ste) == 0x0) { + qemu_log_mask(LOG_UNIMP, + "SMMUv3 AArch32 tables not supported\n"); + g_assert_not_reached(); + } + + switch (STE_S2TG(ste)) { + case 0x0: /* 4KB */ + cfg->s2cfg.granule_sz = 12; + break; + case 0x1: /* 64KB */ + cfg->s2cfg.granule_sz = 16; + break; + case 0x2: /* 16KB */ + cfg->s2cfg.granule_sz = 14; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste)); + goto bad_ste; + } + + cfg->s2cfg.vttb = STE_S2TTB(ste); + + cfg->s2cfg.sl0 = STE_S2SL0(ste); + /* FEAT_TTST not supported. */ + if (cfg->s2cfg.sl0 == 0x3) { + qemu_log_mask(LOG_UNIMP, "SMMUv3 S2SL0 = 0x3 has no meaning!\n"); + goto bad_ste; + } + + /* For AA64, The effective S2PS size is capped to the OAS. */ + cfg->s2cfg.eff_ps = oas2bits(MIN(STE_S2PS(ste), SMMU_IDR5_OAS)); + /* + * It is ILLEGAL for the address in S2TTB to be outside the range + * described by the effective S2PS value. + */ + if (cfg->s2cfg.vttb & ~(MAKE_64BIT_MASK(0, cfg->s2cfg.eff_ps))) { + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 S2TTB too large 0x%" PRIx64 + ", effective PS %d bits\n", + cfg->s2cfg.vttb, cfg->s2cfg.eff_ps); + goto bad_ste; + } + + cfg->s2cfg.tsz = STE_S2T0SZ(ste); + + if (!s2t0sz_valid(cfg)) { + qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 bad STE S2T0SZ = %d\n", + cfg->s2cfg.tsz); + goto bad_ste; + } + + if (!s2_pgtable_config_valid(cfg->s2cfg.sl0, cfg->s2cfg.tsz, + cfg->s2cfg.granule_sz)) { + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 STE stage 2 config not valid!\n"); + goto bad_ste; + } + + /* Only LE supported(IDR0.TTENDIAN). */ + if (STE_S2ENDI(ste)) { + qemu_log_mask(LOG_GUEST_ERROR, + "SMMUv3 STE_S2ENDI only supports LE!\n"); + goto bad_ste; + } + + cfg->s2cfg.affd = STE_S2AFFD(ste); + + cfg->s2cfg.record_faults = STE_S2R(ste); + /* As stall is not supported. */ + if (STE_S2S(ste)) { + qemu_log_mask(LOG_UNIMP, "SMMUv3 Stall not implemented!\n"); + goto bad_ste; + } + + return 0; + +bad_ste: + return -EINVAL; +} + /* Returns < 0 in case of invalid STE, 0 otherwise */ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, STE *ste, SMMUEventInfo *event) { uint32_t config; + int ret; if (!STE_VALID(ste)) { if (!event->inval_ste_allowed) { @@ -354,10 +488,38 @@ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, return 0; } - if (STE_CFG_S2_ENABLED(config)) { - qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); + /* + * If a stage is enabled in SW while not advertised, throw bad ste + * according to user manual(IHI0070E) "5.2 Stream Table Entry". + */ + if (!STAGE1_SUPPORTED(s) && STE_CFG_S1_ENABLED(config)) { + qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S1 used but not supported.\n"); goto bad_ste; } + if (!STAGE2_SUPPORTED(s) && STE_CFG_S2_ENABLED(config)) { + qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S2 used but not supported.\n"); + goto bad_ste; + } + + if (STAGE2_SUPPORTED(s)) { + /* VMID is considered even if s2 is disabled. */ + cfg->s2cfg.vmid = STE_S2VMID(ste); + } else { + /* Default to -1 */ + cfg->s2cfg.vmid = -1; + } + + if (STE_CFG_S2_ENABLED(config)) { + /* + * Stage-1 OAS defaults to OAS even if not enabled as it would be used + * in input address check for stage-2. + */ + cfg->oas = oas2bits(SMMU_IDR5_OAS); + ret = decode_ste_s2_cfg(cfg, ste); + if (ret) { + goto bad_ste; + } + } if (STE_S1CDMAX(ste) != 0) { qemu_log_mask(LOG_UNIMP, @@ -559,6 +721,9 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, STE ste; CD cd; + /* ASID defaults to -1 (if s1 is not supported). */ + cfg->asid = -1; + ret = smmu_find_ste(s, sid, &ste, event); if (ret) { return ret; @@ -569,7 +734,7 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, return ret; } - if (cfg->aborted || cfg->bypassed) { + if (cfg->aborted || cfg->bypassed || (cfg->stage == 2)) { return 0; } @@ -656,6 +821,11 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, .addr_mask = ~(hwaddr)0, .perm = IOMMU_NONE, }; + /* + * Combined attributes used for TLB lookup, as only one stage is supported, + * it will hold attributes based on the enabled stage. + */ + SMMUTransTableInfo tt_combined; qemu_mutex_lock(&s->mutex); @@ -684,25 +854,45 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, goto epilogue; } - tt = select_tt(cfg, addr); - if (!tt) { - if (cfg->record_faults) { - event.type = SMMU_EVT_F_TRANSLATION; - event.u.f_translation.addr = addr; - event.u.f_translation.rnw = flag & 0x1; + if (cfg->stage == 1) { + /* Select stage1 translation table. */ + tt = select_tt(cfg, addr); + if (!tt) { + if (cfg->record_faults) { + event.type = SMMU_EVT_F_TRANSLATION; + event.u.f_translation.addr = addr; + event.u.f_translation.rnw = flag & 0x1; + } + status = SMMU_TRANS_ERROR; + goto epilogue; } - status = SMMU_TRANS_ERROR; - goto epilogue; - } + tt_combined.granule_sz = tt->granule_sz; + tt_combined.tsz = tt->tsz; - page_mask = (1ULL << (tt->granule_sz)) - 1; + } else { + /* Stage2. */ + tt_combined.granule_sz = cfg->s2cfg.granule_sz; + tt_combined.tsz = cfg->s2cfg.tsz; + } + /* + * TLB lookup looks for granule and input size for a translation stage, + * as only one stage is supported right now, choose the right values + * from the configuration. + */ + page_mask = (1ULL << tt_combined.granule_sz) - 1; aligned_addr = addr & ~page_mask; - cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr); + cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, aligned_addr); if (cached_entry) { if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { status = SMMU_TRANS_ERROR; - if (cfg->record_faults) { + /* + * We know that the TLB only contains either stage-1 or stage-2 as + * nesting is not supported. So it is sufficient to check the + * translation stage to know the TLB stage for now. + */ + event.u.f_walk_eabt.s2 = (cfg->stage == 2); + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_PERMISSION; event.u.f_permission.addr = addr; event.u.f_permission.rnw = flag & 0x1; @@ -716,6 +906,8 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, cached_entry = g_new0(SMMUTLBEntry, 1); if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { + /* All faults from PTW has S2 field. */ + event.u.f_walk_eabt.s2 = (ptw_info.stage == 2); g_free(cached_entry); switch (ptw_info.type) { case SMMU_PTW_ERR_WALK_EABT: @@ -726,28 +918,28 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, event.u.f_walk_eabt.addr2 = ptw_info.addr; break; case SMMU_PTW_ERR_TRANSLATION: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_TRANSLATION; event.u.f_translation.addr = addr; event.u.f_translation.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_ADDR_SIZE: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_ADDR_SIZE; event.u.f_addr_size.addr = addr; event.u.f_addr_size.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_ACCESS: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_ACCESS; event.u.f_access.addr = addr; event.u.f_access.rnw = flag & 0x1; } break; case SMMU_PTW_ERR_PERMISSION: - if (cfg->record_faults) { + if (PTW_RECORD_FAULT(cfg)) { event.type = SMMU_EVT_F_PERMISSION; event.u.f_permission.addr = addr; event.u.f_permission.rnw = flag & 0x1; @@ -808,18 +1000,21 @@ epilogue: * @mr: IOMMU mr region handle * @n: notifier to be called * @asid: address space ID or negative value if we don't care + * @vmid: virtual machine ID or negative value if we don't care * @iova: iova * @tg: translation granule (if communicated through range invalidation) * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1 */ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, IOMMUNotifier *n, - int asid, dma_addr_t iova, - uint8_t tg, uint64_t num_pages) + int asid, int vmid, + dma_addr_t iova, uint8_t tg, + uint64_t num_pages) { SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); IOMMUTLBEvent event; uint8_t granule; + SMMUv3State *s = sdev->smmu; if (!tg) { SMMUEventInfo event = {.inval_ste_allowed = true}; @@ -834,11 +1029,20 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, return; } - tt = select_tt(cfg, iova); - if (!tt) { + if (vmid >= 0 && cfg->s2cfg.vmid != vmid) { return; } - granule = tt->granule_sz; + + if (STAGE1_SUPPORTED(s)) { + tt = select_tt(cfg, iova); + if (!tt) { + return; + } + granule = tt->granule_sz; + } else { + granule = cfg->s2cfg.granule_sz; + } + } else { granule = tg * 2 + 10; } @@ -852,9 +1056,10 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, memory_region_notify_iommu_one(n, &event); } -/* invalidate an asid/iova range tuple in all mr's */ -static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, - uint8_t tg, uint64_t num_pages) +/* invalidate an asid/vmid/iova range tuple in all mr's */ +static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, int vmid, + dma_addr_t iova, uint8_t tg, + uint64_t num_pages) { SMMUDevice *sdev; @@ -862,20 +1067,20 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, IOMMUMemoryRegion *mr = &sdev->iommu; IOMMUNotifier *n; - trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova, - tg, num_pages); + trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, vmid, + iova, tg, num_pages); IOMMU_NOTIFIER_FOREACH(n, mr) { - smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages); + smmuv3_notify_iova(mr, n, asid, vmid, iova, tg, num_pages); } } } -static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) +static void smmuv3_range_inval(SMMUState *s, Cmd *cmd) { dma_addr_t end, addr = CMD_ADDR(cmd); uint8_t type = CMD_TYPE(cmd); - uint16_t vmid = CMD_VMID(cmd); + int vmid = -1; uint8_t scale = CMD_SCALE(cmd); uint8_t num = CMD_NUM(cmd); uint8_t ttl = CMD_TTL(cmd); @@ -884,15 +1089,21 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) uint64_t num_pages; uint8_t granule; int asid = -1; + SMMUv3State *smmuv3 = ARM_SMMUV3(s); + + /* Only consider VMID if stage-2 is supported. */ + if (STAGE2_SUPPORTED(smmuv3)) { + vmid = CMD_VMID(cmd); + } if (type == SMMU_CMD_TLBI_NH_VA) { asid = CMD_ASID(cmd); } if (!tg) { - trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); - smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1); - smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl); + trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); + smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1); + smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl); return; } @@ -908,9 +1119,9 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) uint64_t mask = dma_aligned_pow2_mask(addr, end, 64); num_pages = (mask + 1) >> granule; - trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); - smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages); - smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl); + trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); + smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, num_pages); + smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl); addr += mask + 1; } } @@ -1042,12 +1253,22 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) { uint16_t asid = CMD_ASID(&cmd); + if (!STAGE1_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + trace_smmuv3_cmdq_tlbi_nh_asid(asid); smmu_inv_notifiers_all(&s->smmu_state); smmu_iotlb_inv_asid(bs, asid); break; } case SMMU_CMD_TLBI_NH_ALL: + if (!STAGE1_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + QEMU_FALLTHROUGH; case SMMU_CMD_TLBI_NSNH_ALL: trace_smmuv3_cmdq_tlbi_nh(); smmu_inv_notifiers_all(&s->smmu_state); @@ -1055,7 +1276,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) break; case SMMU_CMD_TLBI_NH_VAA: case SMMU_CMD_TLBI_NH_VA: - smmuv3_s1_range_inval(bs, &cmd); + if (!STAGE1_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + smmuv3_range_inval(bs, &cmd); + break; + case SMMU_CMD_TLBI_S12_VMALL: + { + uint16_t vmid = CMD_VMID(&cmd); + + if (!STAGE2_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + trace_smmuv3_cmdq_tlbi_s12_vmid(vmid); + smmu_inv_notifiers_all(&s->smmu_state); + smmu_iotlb_inv_vmid(bs, vmid); + break; + } + case SMMU_CMD_TLBI_S2_IPA: + if (!STAGE2_SUPPORTED(s)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + /* + * As currently only either s1 or s2 are supported + * we can reuse same function for s2. + */ + smmuv3_range_inval(bs, &cmd); break; case SMMU_CMD_TLBI_EL3_ALL: case SMMU_CMD_TLBI_EL3_VA: @@ -1063,8 +1313,6 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) case SMMU_CMD_TLBI_EL2_ASID: case SMMU_CMD_TLBI_EL2_VA: case SMMU_CMD_TLBI_EL2_VAA: - case SMMU_CMD_TLBI_S12_VMALL: - case SMMU_CMD_TLBI_S2_IPA: case SMMU_CMD_ATC_INV: case SMMU_CMD_PRI_RESP: case SMMU_CMD_RESUME: @@ -1073,12 +1321,14 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) break; default: cmd_error = SMMU_CERROR_ILL; - qemu_log_mask(LOG_GUEST_ERROR, - "Illegal command type: %d\n", CMD_TYPE(&cmd)); break; } qemu_mutex_unlock(&s->mutex); if (cmd_error) { + if (cmd_error == SMMU_CERROR_ILL) { + qemu_log_mask(LOG_GUEST_ERROR, + "Illegal command type: %d\n", CMD_TYPE(&cmd)); + } break; } /* @@ -1555,6 +1805,17 @@ static const VMStateDescription vmstate_smmuv3 = { } }; +static Property smmuv3_properties[] = { + /* + * Stages of translation advertised. + * "1": Stage 1 + * "2": Stage 2 + * Defaults to stage 1 + */ + DEFINE_PROP_STRING("stage", SMMUv3State, stage), + DEFINE_PROP_END_OF_LIST() +}; + static void smmuv3_instance_init(Object *obj) { /* Nothing much to do here as of now */ @@ -1571,6 +1832,7 @@ static void smmuv3_class_init(ObjectClass *klass, void *data) &c->parent_phases); c->parent_realize = dc->realize; dc->realize = smmu_realize; + device_class_set_props(dc, smmuv3_properties); } static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, diff --git a/hw/arm/trace-events b/hw/arm/trace-events index 2dee296c8f..cdc1ea06a8 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -5,18 +5,19 @@ virt_acpi_setup(void) "No fw cfg or ACPI disabled. Bailing out." # smmu-common.c smmu_add_mr(const char *name) "%s" -smmu_ptw_level(int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64 +smmu_ptw_level(int stage, int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64 smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" offset=%d pte=0x%"PRIx64 smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64 smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB" smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 smmu_iotlb_inv_all(void) "IOTLB invalidate all" smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" +smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d" smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" -smmu_iotlb_lookup_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" -smmu_iotlb_lookup_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" -smmu_iotlb_insert(uint16_t asid, uint64_t addr, uint8_t tg, uint8_t level) "IOTLB ++ asid=%d addr=0x%"PRIx64" tg=%d level=%d" +smmu_iotlb_lookup_hit(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_lookup_miss(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_insert(uint16_t asid, uint16_t vmid, uint64_t addr, uint8_t tg, uint8_t level) "IOTLB ++ asid=%d vmid=%d addr=0x%"PRIx64" tg=%d level=%d" # smmuv3.c smmuv3_read_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)" @@ -45,11 +46,12 @@ smmuv3_cmdq_cfgi_ste_range(int start, int end) "start=0x%x - end=0x%x" smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x" smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" -smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" +smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" smmuv3_cmdq_tlbi_nh(void) "" smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" +smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d" smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" -smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64 +smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d vmid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64 diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c index 34b012b528..56abadd9b8 100644 --- a/hw/arm/vexpress.c +++ b/hw/arm/vexpress.c @@ -173,6 +173,11 @@ struct VexpressMachineClass { struct VexpressMachineState { MachineState parent; + MemoryRegion vram; + MemoryRegion sram; + MemoryRegion flashalias; + MemoryRegion lowram; + MemoryRegion a15sram; bool secure; bool virt; }; @@ -182,7 +187,7 @@ struct VexpressMachineState { #define TYPE_VEXPRESS_A15_MACHINE MACHINE_TYPE_NAME("vexpress-a15") OBJECT_DECLARE_TYPE(VexpressMachineState, VexpressMachineClass, VEXPRESS_MACHINE) -typedef void DBoardInitFn(const VexpressMachineState *machine, +typedef void DBoardInitFn(VexpressMachineState *machine, ram_addr_t ram_size, const char *cpu_type, qemu_irq *pic); @@ -263,14 +268,13 @@ static void init_cpus(MachineState *ms, const char *cpu_type, } } -static void a9_daughterboard_init(const VexpressMachineState *vms, +static void a9_daughterboard_init(VexpressMachineState *vms, ram_addr_t ram_size, const char *cpu_type, qemu_irq *pic) { MachineState *machine = MACHINE(vms); MemoryRegion *sysmem = get_system_memory(); - MemoryRegion *lowram = g_new(MemoryRegion, 1); ram_addr_t low_ram_size; if (ram_size > 0x40000000) { @@ -287,9 +291,9 @@ static void a9_daughterboard_init(const VexpressMachineState *vms, * address space should in theory be remappable to various * things including ROM or RAM; we always map the RAM there. */ - memory_region_init_alias(lowram, NULL, "vexpress.lowmem", machine->ram, - 0, low_ram_size); - memory_region_add_subregion(sysmem, 0x0, lowram); + memory_region_init_alias(&vms->lowram, NULL, "vexpress.lowmem", + machine->ram, 0, low_ram_size); + memory_region_add_subregion(sysmem, 0x0, &vms->lowram); memory_region_add_subregion(sysmem, 0x60000000, machine->ram); /* 0x1e000000 A9MPCore (SCU) private memory region */ @@ -348,14 +352,13 @@ static VEDBoardInfo a9_daughterboard = { .init = a9_daughterboard_init, }; -static void a15_daughterboard_init(const VexpressMachineState *vms, +static void a15_daughterboard_init(VexpressMachineState *vms, ram_addr_t ram_size, const char *cpu_type, qemu_irq *pic) { MachineState *machine = MACHINE(vms); MemoryRegion *sysmem = get_system_memory(); - MemoryRegion *sram = g_new(MemoryRegion, 1); { /* We have to use a separate 64 bit variable here to avoid the gcc @@ -386,9 +389,9 @@ static void a15_daughterboard_init(const VexpressMachineState *vms, /* 0x2b060000: SP805 watchdog: not modelled */ /* 0x2b0a0000: PL341 dynamic memory controller: not modelled */ /* 0x2e000000: system SRAM */ - memory_region_init_ram(sram, NULL, "vexpress.a15sram", 0x10000, + memory_region_init_ram(&vms->a15sram, NULL, "vexpress.a15sram", 0x10000, &error_fatal); - memory_region_add_subregion(sysmem, 0x2e000000, sram); + memory_region_add_subregion(sysmem, 0x2e000000, &vms->a15sram); /* 0x7ffb0000: DMA330 DMA controller: not modelled */ /* 0x7ffd0000: PL354 static memory controller: not modelled */ @@ -547,10 +550,6 @@ static void vexpress_common_init(MachineState *machine) I2CBus *i2c; ram_addr_t vram_size, sram_size; MemoryRegion *sysmem = get_system_memory(); - MemoryRegion *vram = g_new(MemoryRegion, 1); - MemoryRegion *sram = g_new(MemoryRegion, 1); - MemoryRegion *flashalias = g_new(MemoryRegion, 1); - MemoryRegion *flash0mem; const hwaddr *map = daughterboard->motherboard_map; int i; @@ -662,24 +661,25 @@ static void vexpress_common_init(MachineState *machine) if (map[VE_NORFLASHALIAS] != -1) { /* Map flash 0 as an alias into low memory */ + MemoryRegion *flash0mem; flash0mem = sysbus_mmio_get_region(SYS_BUS_DEVICE(pflash0), 0); - memory_region_init_alias(flashalias, NULL, "vexpress.flashalias", + memory_region_init_alias(&vms->flashalias, NULL, "vexpress.flashalias", flash0mem, 0, VEXPRESS_FLASH_SIZE); - memory_region_add_subregion(sysmem, map[VE_NORFLASHALIAS], flashalias); + memory_region_add_subregion(sysmem, map[VE_NORFLASHALIAS], &vms->flashalias); } dinfo = drive_get(IF_PFLASH, 0, 1); ve_pflash_cfi01_register(map[VE_NORFLASH1], "vexpress.flash1", dinfo); sram_size = 0x2000000; - memory_region_init_ram(sram, NULL, "vexpress.sram", sram_size, + memory_region_init_ram(&vms->sram, NULL, "vexpress.sram", sram_size, &error_fatal); - memory_region_add_subregion(sysmem, map[VE_SRAM], sram); + memory_region_add_subregion(sysmem, map[VE_SRAM], &vms->sram); vram_size = 0x800000; - memory_region_init_ram(vram, NULL, "vexpress.vram", vram_size, + memory_region_init_ram(&vms->vram, NULL, "vexpress.vram", vram_size, &error_fatal); - memory_region_add_subregion(sysmem, map[VE_VIDEORAM], vram); + memory_region_add_subregion(sysmem, map[VE_VIDEORAM], &vms->vram); /* 0x4e000000 LAN9118 Ethernet */ if (nd_table[0].used) { diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 4156111d49..4af0de8b24 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -694,7 +694,7 @@ static void build_append_gicr(GArray *table_data, uint64_t base, uint32_t size) build_append_int_noprefix(table_data, 0xE, 1); /* Type */ build_append_int_noprefix(table_data, 16, 1); /* Length */ build_append_int_noprefix(table_data, 0, 2); /* Reserved */ - /* Discovery Range Base Addres */ + /* Discovery Range Base Address */ build_append_int_noprefix(table_data, base, 8); build_append_int_noprefix(table_data, size, 4); /* Discovery Range Length */ } diff --git a/hw/arm/virt.c b/hw/arm/virt.c index ac626b3bef..9b9f7d9c68 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -204,16 +204,18 @@ static const int a15irqmap[] = { }; static const char *valid_cpus[] = { +#ifdef CONFIG_TCG ARM_CPU_TYPE_NAME("cortex-a7"), ARM_CPU_TYPE_NAME("cortex-a15"), ARM_CPU_TYPE_NAME("cortex-a35"), - ARM_CPU_TYPE_NAME("cortex-a53"), ARM_CPU_TYPE_NAME("cortex-a55"), - ARM_CPU_TYPE_NAME("cortex-a57"), ARM_CPU_TYPE_NAME("cortex-a72"), ARM_CPU_TYPE_NAME("cortex-a76"), ARM_CPU_TYPE_NAME("a64fx"), ARM_CPU_TYPE_NAME("neoverse-n1"), +#endif + ARM_CPU_TYPE_NAME("cortex-a53"), + ARM_CPU_TYPE_NAME("cortex-a57"), ARM_CPU_TYPE_NAME("host"), ARM_CPU_TYPE_NAME("max"), }; @@ -1424,6 +1426,7 @@ static void create_pcie(VirtMachineState *vms) int i, ecam_id; PCIHostState *pci; MachineState *ms = MACHINE(vms); + MachineClass *mc = MACHINE_GET_CLASS(ms); dev = qdev_new(TYPE_GPEX_HOST); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -1477,7 +1480,7 @@ static void create_pcie(VirtMachineState *vms) NICInfo *nd = &nd_table[i]; if (!nd->model) { - nd->model = g_strdup("virtio"); + nd->model = g_strdup(mc->default_nic); } pci_nic_init_nofail(nd, pci->bus, nd->model, NULL); @@ -2052,7 +2055,7 @@ static void machvirt_init(MachineState *machine) int pa_bits; /* - * Instanciate a temporary CPU object to find out about what + * Instantiate a temporary CPU object to find out about what * we are about to deal with. Once this is done, get rid of * the object. */ @@ -3031,6 +3034,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) mc->auto_enable_numa_with_memhp = true; mc->auto_enable_numa_with_memdev = true; mc->default_ram_id = "mach-virt.ram"; + mc->default_nic = "virtio-net-pci"; object_class_property_add(oc, "acpi", "OnOffAuto", virt_get_acpi, virt_set_acpi, @@ -3232,10 +3236,17 @@ static void machvirt_machine_init(void) } type_init(machvirt_machine_init); -static void virt_machine_8_0_options(MachineClass *mc) +static void virt_machine_8_1_options(MachineClass *mc) { } -DEFINE_VIRT_MACHINE_AS_LATEST(8, 0) +DEFINE_VIRT_MACHINE_AS_LATEST(8, 1) + +static void virt_machine_8_0_options(MachineClass *mc) +{ + virt_machine_8_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len); +} +DEFINE_VIRT_MACHINE(8, 0) static void virt_machine_7_2_options(MachineClass *mc) { diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c index 37fc9b919c..668a9d65a4 100644 --- a/hw/arm/xlnx-versal-virt.c +++ b/hw/arm/xlnx-versal-virt.c @@ -659,7 +659,7 @@ static void versal_virt_init(MachineState *machine) fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz); /* Make the APU cpu address space visible to virtio and other - * modules unaware of muliple address-spaces. */ + * modules unaware of multiple address-spaces. */ memory_region_add_subregion_overlap(get_system_memory(), 0, &s->soc.fpd.apu.mr, 0); diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index 335cfc417d..5905a33015 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -213,7 +213,7 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, const char *boot_cpu, Error **errp) { int i; - int num_rpus = MIN(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS, + int num_rpus = MIN((int)(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS), XLNX_ZYNQMP_NUM_RPU_CPUS); if (num_rpus <= 0) { diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c index 364cdfa733..c2a5ce062a 100644 --- a/hw/audio/ac97.c +++ b/hw/audio/ac97.c @@ -26,43 +26,7 @@ #include "qemu/module.h" #include "sysemu/dma.h" #include "qom/object.h" - -enum { - AC97_Reset = 0x00, - AC97_Master_Volume_Mute = 0x02, - AC97_Headphone_Volume_Mute = 0x04, - AC97_Master_Volume_Mono_Mute = 0x06, - AC97_Master_Tone_RL = 0x08, - AC97_PC_BEEP_Volume_Mute = 0x0A, - AC97_Phone_Volume_Mute = 0x0C, - AC97_Mic_Volume_Mute = 0x0E, - AC97_Line_In_Volume_Mute = 0x10, - AC97_CD_Volume_Mute = 0x12, - AC97_Video_Volume_Mute = 0x14, - AC97_Aux_Volume_Mute = 0x16, - AC97_PCM_Out_Volume_Mute = 0x18, - AC97_Record_Select = 0x1A, - AC97_Record_Gain_Mute = 0x1C, - AC97_Record_Gain_Mic_Mute = 0x1E, - AC97_General_Purpose = 0x20, - AC97_3D_Control = 0x22, - AC97_AC_97_RESERVED = 0x24, - AC97_Powerdown_Ctrl_Stat = 0x26, - AC97_Extended_Audio_ID = 0x28, - AC97_Extended_Audio_Ctrl_Stat = 0x2A, - AC97_PCM_Front_DAC_Rate = 0x2C, - AC97_PCM_Surround_DAC_Rate = 0x2E, - AC97_PCM_LFE_DAC_Rate = 0x30, - AC97_PCM_LR_ADC_Rate = 0x32, - AC97_MIC_ADC_Rate = 0x34, - AC97_6Ch_Vol_C_LFE_Mute = 0x36, - AC97_6Ch_Vol_L_R_Surround_Mute = 0x38, - AC97_Vendor_Reserved = 0x58, - AC97_Sigmatel_Analog = 0x6c, /* We emulate a Sigmatel codec */ - AC97_Sigmatel_Dac2Invert = 0x6e, /* We emulate a Sigmatel codec */ - AC97_Vendor_ID1 = 0x7c, - AC97_Vendor_ID2 = 0x7e -}; +#include "ac97.h" #define SOFT_VOLUME #define SR_FIFOE 16 /* rwc */ @@ -121,11 +85,6 @@ enum { #define BD_IOC (1 << 31) #define BD_BUP (1 << 30) -#define EACS_VRA 1 -#define EACS_VRM 8 - -#define MUTE_SHIFT 15 - #define TYPE_AC97 "AC97" OBJECT_DECLARE_SIMPLE_TYPE(AC97LinkState, AC97) @@ -1295,7 +1254,7 @@ static const MemoryRegionOps ac97_io_nabm_ops = { static void ac97_on_reset(DeviceState *dev) { - AC97LinkState *s = container_of(dev, AC97LinkState, dev.qdev); + AC97LinkState *s = AC97(dev); reset_bm_regs(s, &s->bm_regs[0]); reset_bm_regs(s, &s->bm_regs[1]); diff --git a/hw/audio/ac97.h b/hw/audio/ac97.h new file mode 100644 index 0000000000..0358b56ff4 --- /dev/null +++ b/hw/audio/ac97.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2006 InnoTek Systemberatung GmbH + * + * This file is part of VirtualBox Open Source Edition (OSE), as + * available from http://www.virtualbox.org. This file is free software; + * you can redistribute it and/or modify it under the terms of the GNU + * General Public License as published by the Free Software Foundation, + * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE + * distribution. VirtualBox OSE is distributed in the hope that it will + * be useful, but WITHOUT ANY WARRANTY of any kind. + * + * If you received this file as part of a commercial VirtualBox + * distribution, then only the terms of your commercial VirtualBox + * license agreement apply instead of the previous paragraph. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#ifndef AC97_H +#define AC97_H + +enum { + AC97_Reset = 0x00, + AC97_Master_Volume_Mute = 0x02, + AC97_Headphone_Volume_Mute = 0x04, + AC97_Master_Volume_Mono_Mute = 0x06, + AC97_Master_Tone_RL = 0x08, + AC97_PC_BEEP_Volume_Mute = 0x0A, + AC97_Phone_Volume_Mute = 0x0C, + AC97_Mic_Volume_Mute = 0x0E, + AC97_Line_In_Volume_Mute = 0x10, + AC97_CD_Volume_Mute = 0x12, + AC97_Video_Volume_Mute = 0x14, + AC97_Aux_Volume_Mute = 0x16, + AC97_PCM_Out_Volume_Mute = 0x18, + AC97_Record_Select = 0x1A, + AC97_Record_Gain_Mute = 0x1C, + AC97_Record_Gain_Mic_Mute = 0x1E, + AC97_General_Purpose = 0x20, + AC97_3D_Control = 0x22, + AC97_AC_97_RESERVED = 0x24, + AC97_Powerdown_Ctrl_Stat = 0x26, + AC97_Extended_Audio_ID = 0x28, + AC97_Extended_Audio_Ctrl_Stat = 0x2A, + AC97_PCM_Front_DAC_Rate = 0x2C, + AC97_PCM_Surround_DAC_Rate = 0x2E, + AC97_PCM_LFE_DAC_Rate = 0x30, + AC97_PCM_LR_ADC_Rate = 0x32, + AC97_MIC_ADC_Rate = 0x34, + AC97_6Ch_Vol_C_LFE_Mute = 0x36, + AC97_6Ch_Vol_L_R_Surround_Mute = 0x38, + AC97_Vendor_Reserved = 0x58, + AC97_Sigmatel_Analog = 0x6c, /* We emulate a Sigmatel codec */ + AC97_Sigmatel_Dac2Invert = 0x6e, /* We emulate a Sigmatel codec */ + AC97_Vendor_ID1 = 0x7c, + AC97_Vendor_ID2 = 0x7e +}; + +#define EACS_VRA 1 +#define EACS_VRM 8 + +#define MUTE_SHIFT 15 + +#endif /* AC97_H */ diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c index 7f17a72a9c..5c6d643732 100644 --- a/hw/audio/cs4231a.c +++ b/hw/audio/cs4231a.c @@ -668,16 +668,17 @@ static void cs4231a_initfn (Object *obj) static void cs4231a_realizefn (DeviceState *dev, Error **errp) { ISADevice *d = ISA_DEVICE (dev); + ISABus *bus = isa_bus_from_device(d); CSState *s = CS4231A (dev); IsaDmaClass *k; - s->isa_dma = isa_get_dma(isa_bus_from_device(d), s->dma); + s->isa_dma = isa_bus_get_dma(bus, s->dma); if (!s->isa_dma) { error_setg(errp, "ISA controller does not support DMA"); return; } - s->pic = isa_get_irq(d, s->irq); + s->pic = isa_bus_get_irq(bus, s->irq); k = ISADMA_GET_CLASS(s->isa_dma); k->register_channel(s->isa_dma, s->dma, cs_dma_read, s); diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c index 54cc19a637..4f738a0ad8 100644 --- a/hw/audio/es1370.c +++ b/hw/audio/es1370.c @@ -256,6 +256,9 @@ static void print_sctl (uint32_t val) #define lwarn(...) #endif +#define TYPE_ES1370 "ES1370" +OBJECT_DECLARE_SIMPLE_TYPE(ES1370State, ES1370) + struct chan { uint32_t shift; uint32_t leftover; @@ -278,7 +281,6 @@ struct ES1370State { uint32_t codec; uint32_t sctl; }; -typedef struct ES1370State ES1370State; struct chan_bits { uint32_t ctl_en; @@ -292,9 +294,6 @@ struct chan_bits { uint32_t *old_freq, uint32_t *new_freq); }; -#define TYPE_ES1370 "ES1370" -OBJECT_DECLARE_SIMPLE_TYPE(ES1370State, ES1370) - static void es1370_dac1_calc_freq (ES1370State *s, uint32_t ctl, uint32_t *old_freq, uint32_t *new_freq); static void es1370_dac2_and_adc_calc_freq (ES1370State *s, uint32_t ctl, @@ -844,7 +843,8 @@ static const VMStateDescription vmstate_es1370 = { static void es1370_on_reset(DeviceState *dev) { - ES1370State *s = container_of(dev, ES1370State, dev.qdev); + ES1370State *s = ES1370(dev); + es1370_reset (s); } diff --git a/hw/audio/gus.c b/hw/audio/gus.c index 42f010b671..787345ce54 100644 --- a/hw/audio/gus.c +++ b/hw/audio/gus.c @@ -236,11 +236,12 @@ static const MemoryRegionPortio gus_portio_list2[] = { static void gus_realizefn (DeviceState *dev, Error **errp) { ISADevice *d = ISA_DEVICE(dev); + ISABus *bus = isa_bus_from_device(d); GUSState *s = GUS (dev); IsaDmaClass *k; struct audsettings as; - s->isa_dma = isa_get_dma(isa_bus_from_device(d), s->emu.gusdma); + s->isa_dma = isa_bus_get_dma(bus, s->emu.gusdma); if (!s->isa_dma) { error_setg(errp, "ISA controller does not support DMA"); return; @@ -282,7 +283,7 @@ static void gus_realizefn (DeviceState *dev, Error **errp) s->emu.himemaddr = s->himem; s->emu.gusdatapos = s->emu.himemaddr + 1024 * 1024 + 32; s->emu.opaque = s; - s->pic = isa_get_irq(d, s->emu.gusirq); + s->pic = isa_bus_get_irq(bus, s->emu.gusirq); AUD_set_active_out (s->voice, 1); } diff --git a/hw/audio/hda-codec.c b/hw/audio/hda-codec.c index feb8f9e2bb..c51d8ba617 100644 --- a/hw/audio/hda-codec.c +++ b/hw/audio/hda-codec.c @@ -145,7 +145,9 @@ static const char *fmt2name[] = { [ AUDIO_FORMAT_S32 ] = "PCM-S32", }; -typedef struct HDAAudioState HDAAudioState; +#define TYPE_HDA_AUDIO "hda-audio" +OBJECT_DECLARE_SIMPLE_TYPE(HDAAudioState, HDA_AUDIO) + typedef struct HDAAudioStream HDAAudioStream; struct HDAAudioStream { @@ -171,9 +173,6 @@ struct HDAAudioStream { int64_t buft_start; }; -#define TYPE_HDA_AUDIO "hda-audio" -OBJECT_DECLARE_SIMPLE_TYPE(HDAAudioState, HDA_AUDIO) - struct HDAAudioState { HDACodecDevice hda; const char *name; diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c index 2215386ddb..535ccccdc9 100644 --- a/hw/audio/sb16.c +++ b/hw/audio/sb16.c @@ -1398,17 +1398,18 @@ static void sb16_initfn (Object *obj) static void sb16_realizefn (DeviceState *dev, Error **errp) { ISADevice *isadev = ISA_DEVICE (dev); + ISABus *bus = isa_bus_from_device(isadev); SB16State *s = SB16 (dev); IsaDmaClass *k; - s->isa_hdma = isa_get_dma(isa_bus_from_device(isadev), s->hdma); - s->isa_dma = isa_get_dma(isa_bus_from_device(isadev), s->dma); + s->isa_hdma = isa_bus_get_dma(bus, s->hdma); + s->isa_dma = isa_bus_get_dma(bus, s->dma); if (!s->isa_dma || !s->isa_hdma) { error_setg(errp, "ISA controller does not support DMA"); return; } - s->pic = isa_get_irq(isadev, s->irq); + s->pic = isa_bus_get_irq(bus, s->irq); s->mixer_regs[0x80] = magic_of_irq (s->irq); s->mixer_regs[0x81] = (1 << s->dma) | (1 << s->hdma); diff --git a/hw/audio/trace-events b/hw/audio/trace-events index e0e71cd9b1..4dec48a4fd 100644 --- a/hw/audio/trace-events +++ b/hw/audio/trace-events @@ -11,3 +11,9 @@ hda_audio_running(const char *stream, int nr, bool running) "st %s, nr %d, run % hda_audio_format(const char *stream, int chan, const char *fmt, int freq) "st %s, %d x %s @ %d Hz" hda_audio_adjust(const char *stream, int pos) "st %s, pos %d" hda_audio_overrun(const char *stream) "st %s" + +#via-ac97.c +via_ac97_codec_write(uint8_t addr, uint16_t val) "0x%x <- 0x%x" +via_ac97_sgd_fetch(uint32_t curr, uint32_t addr, char stop, char eol, char flag, uint32_t len) "curr=0x%x addr=0x%x %c%c%c len=%d" +via_ac97_sgd_read(uint64_t addr, unsigned size, uint64_t val) "0x%"PRIx64" %d -> 0x%"PRIx64 +via_ac97_sgd_write(uint64_t addr, unsigned size, uint64_t val) "0x%"PRIx64" %d <- 0x%"PRIx64 diff --git a/hw/audio/via-ac97.c b/hw/audio/via-ac97.c index d1a856f63d..676254b7a4 100644 --- a/hw/audio/via-ac97.c +++ b/hw/audio/via-ac97.c @@ -1,39 +1,482 @@ /* * VIA south bridges sound support * + * Copyright (c) 2022-2023 BALATON Zoltan + * * This work is licensed under the GNU GPL license version 2 or later. */ /* - * TODO: This is entirely boiler plate just registering empty PCI devices - * with the right ID guests expect, functionality should be added here. + * TODO: This is only a basic implementation of one audio playback channel + * more functionality should be added here. */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/isa/vt82c686.h" -#include "hw/pci/pci_device.h" +#include "ac97.h" +#include "trace.h" + +#define CLEN_IS_EOL(x) ((x)->clen & BIT(31)) +#define CLEN_IS_FLAG(x) ((x)->clen & BIT(30)) +#define CLEN_IS_STOP(x) ((x)->clen & BIT(29)) +#define CLEN_LEN(x) ((x)->clen & 0xffffff) + +#define STAT_ACTIVE BIT(7) +#define STAT_PAUSED BIT(6) +#define STAT_TRIG BIT(3) +#define STAT_STOP BIT(2) +#define STAT_EOL BIT(1) +#define STAT_FLAG BIT(0) + +#define CNTL_START BIT(7) +#define CNTL_TERM BIT(6) +#define CNTL_PAUSE BIT(3) + +static void open_voice_out(ViaAC97State *s); + +static uint16_t codec_rates[] = { 8000, 11025, 16000, 22050, 32000, 44100, + 48000 }; + +#define CODEC_REG(s, o) ((s)->codec_regs[(o) / 2]) +#define CODEC_VOL(vol, mask) ((255 * ((vol) & mask)) / mask) + +static void codec_volume_set_out(ViaAC97State *s) +{ + int lvol, rvol, mute; + + lvol = 255 - CODEC_VOL(CODEC_REG(s, AC97_Master_Volume_Mute) >> 8, 0x1f); + lvol *= 255 - CODEC_VOL(CODEC_REG(s, AC97_PCM_Out_Volume_Mute) >> 8, 0x1f); + lvol /= 255; + rvol = 255 - CODEC_VOL(CODEC_REG(s, AC97_Master_Volume_Mute), 0x1f); + rvol *= 255 - CODEC_VOL(CODEC_REG(s, AC97_PCM_Out_Volume_Mute), 0x1f); + rvol /= 255; + mute = CODEC_REG(s, AC97_Master_Volume_Mute) >> MUTE_SHIFT; + mute |= CODEC_REG(s, AC97_PCM_Out_Volume_Mute) >> MUTE_SHIFT; + AUD_set_volume_out(s->vo, mute, lvol, rvol); +} + +static void codec_reset(ViaAC97State *s) +{ + memset(s->codec_regs, 0, sizeof(s->codec_regs)); + CODEC_REG(s, AC97_Reset) = 0x6a90; + CODEC_REG(s, AC97_Master_Volume_Mute) = 0x8000; + CODEC_REG(s, AC97_Headphone_Volume_Mute) = 0x8000; + CODEC_REG(s, AC97_Master_Volume_Mono_Mute) = 0x8000; + CODEC_REG(s, AC97_Phone_Volume_Mute) = 0x8008; + CODEC_REG(s, AC97_Mic_Volume_Mute) = 0x8008; + CODEC_REG(s, AC97_Line_In_Volume_Mute) = 0x8808; + CODEC_REG(s, AC97_CD_Volume_Mute) = 0x8808; + CODEC_REG(s, AC97_Video_Volume_Mute) = 0x8808; + CODEC_REG(s, AC97_Aux_Volume_Mute) = 0x8808; + CODEC_REG(s, AC97_PCM_Out_Volume_Mute) = 0x8808; + CODEC_REG(s, AC97_Record_Gain_Mute) = 0x8000; + CODEC_REG(s, AC97_Powerdown_Ctrl_Stat) = 0x000f; + CODEC_REG(s, AC97_Extended_Audio_ID) = 0x0a05; + CODEC_REG(s, AC97_Extended_Audio_Ctrl_Stat) = 0x0400; + CODEC_REG(s, AC97_PCM_Front_DAC_Rate) = 48000; + CODEC_REG(s, AC97_PCM_LR_ADC_Rate) = 48000; + /* Sigmatel 9766 (STAC9766) */ + CODEC_REG(s, AC97_Vendor_ID1) = 0x8384; + CODEC_REG(s, AC97_Vendor_ID2) = 0x7666; +} + +static uint16_t codec_read(ViaAC97State *s, uint8_t addr) +{ + return CODEC_REG(s, addr); +} + +static void codec_write(ViaAC97State *s, uint8_t addr, uint16_t val) +{ + trace_via_ac97_codec_write(addr, val); + switch (addr) { + case AC97_Reset: + codec_reset(s); + return; + case AC97_Master_Volume_Mute: + case AC97_PCM_Out_Volume_Mute: + if (addr == AC97_Master_Volume_Mute) { + if (val & BIT(13)) { + val |= 0x1f00; + } + if (val & BIT(5)) { + val |= 0x1f; + } + } + CODEC_REG(s, addr) = val & 0x9f1f; + codec_volume_set_out(s); + return; + case AC97_Extended_Audio_Ctrl_Stat: + CODEC_REG(s, addr) &= ~EACS_VRA; + CODEC_REG(s, addr) |= val & EACS_VRA; + if (!(val & EACS_VRA)) { + CODEC_REG(s, AC97_PCM_Front_DAC_Rate) = 48000; + CODEC_REG(s, AC97_PCM_LR_ADC_Rate) = 48000; + open_voice_out(s); + } + return; + case AC97_PCM_Front_DAC_Rate: + case AC97_PCM_LR_ADC_Rate: + if (CODEC_REG(s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRA) { + int i; + uint16_t rate = val; + + for (i = 0; i < ARRAY_SIZE(codec_rates) - 1; i++) { + if (rate < codec_rates[i] + + (codec_rates[i + 1] - codec_rates[i]) / 2) { + rate = codec_rates[i]; + break; + } + } + if (rate > 48000) { + rate = 48000; + } + CODEC_REG(s, addr) = rate; + open_voice_out(s); + } + return; + case AC97_Powerdown_Ctrl_Stat: + CODEC_REG(s, addr) = (val & 0xff00) | (CODEC_REG(s, addr) & 0xff); + return; + case AC97_Extended_Audio_ID: + case AC97_Vendor_ID1: + case AC97_Vendor_ID2: + /* Read only registers */ + return; + default: + qemu_log_mask(LOG_UNIMP, + "via-ac97: Unimplemented codec register 0x%x\n", addr); + CODEC_REG(s, addr) = val; + } +} + +static void fetch_sgd(ViaAC97SGDChannel *c, PCIDevice *d) +{ + uint32_t b[2]; + + if (c->curr < c->base) { + c->curr = c->base; + } + if (unlikely(pci_dma_read(d, c->curr, b, sizeof(b)) != MEMTX_OK)) { + qemu_log_mask(LOG_GUEST_ERROR, + "via-ac97: DMA error reading SGD table\n"); + return; + } + c->addr = le32_to_cpu(b[0]); + c->clen = le32_to_cpu(b[1]); + trace_via_ac97_sgd_fetch(c->curr, c->addr, CLEN_IS_STOP(c) ? 'S' : '-', + CLEN_IS_EOL(c) ? 'E' : '-', + CLEN_IS_FLAG(c) ? 'F' : '-', CLEN_LEN(c)); +} + +static void out_cb(void *opaque, int avail) +{ + ViaAC97State *s = opaque; + ViaAC97SGDChannel *c = &s->aur; + int temp, to_copy, copied; + bool stop = false; + uint8_t tmpbuf[4096]; + + if (c->stat & STAT_PAUSED) { + return; + } + c->stat |= STAT_ACTIVE; + while (avail && !stop) { + if (!c->clen) { + fetch_sgd(c, &s->dev); + } + temp = MIN(CLEN_LEN(c), avail); + while (temp) { + to_copy = MIN(temp, sizeof(tmpbuf)); + pci_dma_read(&s->dev, c->addr, tmpbuf, to_copy); + copied = AUD_write(s->vo, tmpbuf, to_copy); + if (!copied) { + stop = true; + break; + } + temp -= copied; + avail -= copied; + c->addr += copied; + c->clen -= copied; + } + if (CLEN_LEN(c) == 0) { + c->curr += 8; + if (CLEN_IS_EOL(c)) { + c->stat |= STAT_EOL; + if (c->type & CNTL_START) { + c->curr = c->base; + c->stat |= STAT_PAUSED; + } else { + c->stat &= ~STAT_ACTIVE; + AUD_set_active_out(s->vo, 0); + } + if (c->type & STAT_EOL) { + pci_set_irq(&s->dev, 1); + } + } + if (CLEN_IS_FLAG(c)) { + c->stat |= STAT_FLAG; + c->stat |= STAT_PAUSED; + if (c->type & STAT_FLAG) { + pci_set_irq(&s->dev, 1); + } + } + if (CLEN_IS_STOP(c)) { + c->stat |= STAT_STOP; + c->stat |= STAT_PAUSED; + } + c->clen = 0; + stop = true; + } + } +} + +static void open_voice_out(ViaAC97State *s) +{ + struct audsettings as = { + .freq = CODEC_REG(s, AC97_PCM_Front_DAC_Rate), + .nchannels = s->aur.type & BIT(4) ? 2 : 1, + .fmt = s->aur.type & BIT(5) ? AUDIO_FORMAT_S16 : AUDIO_FORMAT_S8, + .endianness = 0, + }; + s->vo = AUD_open_out(&s->card, s->vo, "via-ac97.out", s, out_cb, &as); +} + +static uint64_t sgd_read(void *opaque, hwaddr addr, unsigned size) +{ + ViaAC97State *s = opaque; + uint64_t val = 0; + + switch (addr) { + case 0: + val = s->aur.stat; + if (s->aur.type & CNTL_START) { + val |= STAT_TRIG; + } + break; + case 1: + val = s->aur.stat & STAT_PAUSED ? BIT(3) : 0; + break; + case 2: + val = s->aur.type; + break; + case 4: + val = s->aur.curr; + break; + case 0xc: + val = CLEN_LEN(&s->aur); + break; + case 0x10: + /* silence unimplemented log message that happens at every IRQ */ + break; + case 0x80: + val = s->ac97_cmd; + break; + case 0x84: + val = s->aur.stat & STAT_FLAG; + if (s->aur.stat & STAT_EOL) { + val |= BIT(4); + } + if (s->aur.stat & STAT_STOP) { + val |= BIT(8); + } + if (s->aur.stat & STAT_ACTIVE) { + val |= BIT(12); + } + break; + default: + qemu_log_mask(LOG_UNIMP, "via-ac97: Unimplemented register read 0x%" + HWADDR_PRIx"\n", addr); + } + trace_via_ac97_sgd_read(addr, size, val); + return val; +} + +static void sgd_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + ViaAC97State *s = opaque; + + trace_via_ac97_sgd_write(addr, size, val); + switch (addr) { + case 0: + if (val & STAT_STOP) { + s->aur.stat &= ~STAT_PAUSED; + } + if (val & STAT_EOL) { + s->aur.stat &= ~(STAT_EOL | STAT_PAUSED); + if (s->aur.type & STAT_EOL) { + pci_set_irq(&s->dev, 0); + } + } + if (val & STAT_FLAG) { + s->aur.stat &= ~(STAT_FLAG | STAT_PAUSED); + if (s->aur.type & STAT_FLAG) { + pci_set_irq(&s->dev, 0); + } + } + break; + case 1: + if (val & CNTL_START) { + AUD_set_active_out(s->vo, 1); + s->aur.stat = STAT_ACTIVE; + } + if (val & CNTL_TERM) { + AUD_set_active_out(s->vo, 0); + s->aur.stat &= ~(STAT_ACTIVE | STAT_PAUSED); + s->aur.clen = 0; + } + if (val & CNTL_PAUSE) { + AUD_set_active_out(s->vo, 0); + s->aur.stat &= ~STAT_ACTIVE; + s->aur.stat |= STAT_PAUSED; + } else if (!(val & CNTL_PAUSE) && (s->aur.stat & STAT_PAUSED)) { + AUD_set_active_out(s->vo, 1); + s->aur.stat |= STAT_ACTIVE; + s->aur.stat &= ~STAT_PAUSED; + } + break; + case 2: + { + uint32_t oldval = s->aur.type; + s->aur.type = val; + if ((oldval & 0x30) != (val & 0x30)) { + open_voice_out(s); + } + break; + } + case 4: + s->aur.base = val & ~1ULL; + s->aur.curr = s->aur.base; + break; + case 0x80: + if (val >> 30) { + /* we only have primary codec */ + break; + } + if (val & BIT(23)) { /* read reg */ + s->ac97_cmd = val & 0xc0ff0000ULL; + s->ac97_cmd |= codec_read(s, (val >> 16) & 0x7f); + s->ac97_cmd |= BIT(25); /* data valid */ + } else { + s->ac97_cmd = val & 0xc0ffffffULL; + codec_write(s, (val >> 16) & 0x7f, val); + } + break; + case 0xc: + case 0x84: + /* Read only */ + break; + default: + qemu_log_mask(LOG_UNIMP, "via-ac97: Unimplemented register write 0x%" + HWADDR_PRIx"\n", addr); + } +} + +static const MemoryRegionOps sgd_ops = { + .read = sgd_read, + .write = sgd_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t fm_read(void *opaque, hwaddr addr, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s: 0x%"HWADDR_PRIx" %d\n", __func__, addr, size); + return 0; +} + +static void fm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s: 0x%"HWADDR_PRIx" %d <= 0x%"PRIX64"\n", + __func__, addr, size, val); +} + +static const MemoryRegionOps fm_ops = { + .read = fm_read, + .write = fm_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t midi_read(void *opaque, hwaddr addr, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s: 0x%"HWADDR_PRIx" %d\n", __func__, addr, size); + return 0; +} + +static void midi_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s: 0x%"HWADDR_PRIx" %d <= 0x%"PRIX64"\n", + __func__, addr, size, val); +} + +static const MemoryRegionOps midi_ops = { + .read = midi_read, + .write = midi_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void via_ac97_reset(DeviceState *dev) +{ + ViaAC97State *s = VIA_AC97(dev); + + codec_reset(s); +} static void via_ac97_realize(PCIDevice *pci_dev, Error **errp) { - pci_set_word(pci_dev->config + PCI_COMMAND, - PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY); + ViaAC97State *s = VIA_AC97(pci_dev); + Object *o = OBJECT(s); + + /* + * Command register Bus Master bit is documented to be fixed at 0 but it's + * needed for PCI DMA to work in QEMU. The pegasos2 firmware writes 0 here + * and the AmigaOS driver writes 1 only enabling IO bit which works on + * real hardware. So set it here and fix it to 1 to allow DMA. + */ + pci_set_word(pci_dev->config + PCI_COMMAND, PCI_COMMAND_MASTER); + pci_set_word(pci_dev->wmask + PCI_COMMAND, PCI_COMMAND_IO); pci_set_word(pci_dev->config + PCI_STATUS, PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_MEDIUM); pci_set_long(pci_dev->config + PCI_INTERRUPT_PIN, 0x03); + pci_set_byte(pci_dev->config + 0x40, 1); /* codec ready */ + + memory_region_init_io(&s->sgd, o, &sgd_ops, s, "via-ac97.sgd", 256); + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->sgd); + memory_region_init_io(&s->fm, o, &fm_ops, s, "via-ac97.fm", 4); + pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->fm); + memory_region_init_io(&s->midi, o, &midi_ops, s, "via-ac97.midi", 4); + pci_register_bar(pci_dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &s->midi); + + AUD_register_card ("via-ac97", &s->card); } +static void via_ac97_exit(PCIDevice *dev) +{ + ViaAC97State *s = VIA_AC97(dev); + + AUD_close_out(&s->card, s->vo); + AUD_remove_card(&s->card); +} + +static Property via_ac97_properties[] = { + DEFINE_AUDIO_PROPERTIES(ViaAC97State, card), + DEFINE_PROP_END_OF_LIST(), +}; + static void via_ac97_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->realize = via_ac97_realize; + k->exit = via_ac97_exit; k->vendor_id = PCI_VENDOR_ID_VIA; k->device_id = PCI_DEVICE_ID_VIA_AC97; k->revision = 0x50; k->class_id = PCI_CLASS_MULTIMEDIA_AUDIO; + device_class_set_props(dc, via_ac97_properties); set_bit(DEVICE_CATEGORY_SOUND, dc->categories); dc->desc = "VIA AC97"; + dc->reset = via_ac97_reset; /* Reason: Part of a south bridge chip */ dc->user_creatable = false; } @@ -41,7 +484,7 @@ static void via_ac97_class_init(ObjectClass *klass, void *data) static const TypeInfo via_ac97_info = { .name = TYPE_VIA_AC97, .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(PCIDevice), + .instance_size = sizeof(ViaAC97State), .class_init = via_ac97_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, diff --git a/hw/block/block.c b/hw/block/block.c index af0710e477..9f52ee6e72 100644 --- a/hw/block/block.c +++ b/hw/block/block.c @@ -39,8 +39,7 @@ static int blk_pread_nonzeroes(BlockBackend *blk, hwaddr size, void *buf) return ret; } if (!(ret & BDRV_BLOCK_ZERO)) { - ret = bdrv_pread(bs->file, offset, bytes, - (uint8_t *) buf + offset, 0); + ret = blk_pread(blk, offset, bytes, (uint8_t *) buf + offset, 0); if (ret < 0) { return ret; } diff --git a/hw/block/dataplane/meson.build b/hw/block/dataplane/meson.build index 12c6a264f1..78d7ac1a11 100644 --- a/hw/block/dataplane/meson.build +++ b/hw/block/dataplane/meson.build @@ -1,2 +1,2 @@ specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c')) -specific_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c')) +specific_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c')) diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index b28d81737e..b90456c08c 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -127,7 +127,8 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, } else { s->ctx = qemu_get_aio_context(); } - s->bh = aio_bh_new(s->ctx, notify_guest_bh, s); + s->bh = aio_bh_new_guarded(s->ctx, notify_guest_bh, s, + &DEVICE(vdev)->mem_reentrancy_guard); s->batch_notify_vqs = bitmap_new(conf->num_queues); *dataplane = s; @@ -245,13 +246,15 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) } /* Get this show started by hooking up our callbacks */ - aio_context_acquire(s->ctx); - for (i = 0; i < nvqs; i++) { - VirtQueue *vq = virtio_get_queue(s->vdev, i); + if (!blk_in_drain(s->conf->conf.blk)) { + aio_context_acquire(s->ctx); + for (i = 0; i < nvqs; i++) { + VirtQueue *vq = virtio_get_queue(s->vdev, i); - virtio_queue_aio_attach_host_notifier(vq, s->ctx); + virtio_queue_aio_attach_host_notifier(vq, s->ctx); + } + aio_context_release(s->ctx); } - aio_context_release(s->ctx); return 0; fail_aio_context: @@ -286,8 +289,15 @@ static void virtio_blk_data_plane_stop_bh(void *opaque) for (i = 0; i < s->conf->num_queues; i++) { VirtQueue *vq = virtio_get_queue(s->vdev, i); + EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq); virtio_queue_aio_detach_host_notifier(vq, s->ctx); + + /* + * Test and clear notifier after disabling event, in case poll callback + * didn't have time to run. + */ + virtio_queue_host_notifier_read(host_notifier); } } @@ -314,8 +324,11 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev) s->stopping = true; trace_virtio_blk_data_plane_stop(s); + if (!blk_in_drain(s->conf->conf.blk)) { + aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s); + } + aio_context_acquire(s->ctx); - aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s); /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */ blk_drain(s->conf->conf.blk); diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index 2785b9e849..2597f38805 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -23,8 +23,9 @@ #include "qemu/main-loop.h" #include "qemu/memalign.h" #include "qapi/error.h" -#include "hw/xen/xen_common.h" +#include "hw/xen/xen.h" #include "hw/block/xen_blkif.h" +#include "hw/xen/interface/io/ring.h" #include "sysemu/block-backend.h" #include "sysemu/iothread.h" #include "xen-block.h" @@ -101,9 +102,9 @@ static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane) * re-use requests, allocate the memory once here. It will be freed * xen_block_dataplane_destroy() when the request list is freed. */ - request->buf = qemu_memalign(XC_PAGE_SIZE, + request->buf = qemu_memalign(XEN_PAGE_SIZE, BLKIF_MAX_SEGMENTS_PER_REQUEST * - XC_PAGE_SIZE); + XEN_PAGE_SIZE); dataplane->requests_total++; qemu_iovec_init(&request->v, 1); } else { @@ -185,7 +186,7 @@ static int xen_block_parse_request(XenBlockRequest *request) goto err; } if (request->req.seg[i].last_sect * dataplane->sector_size >= - XC_PAGE_SIZE) { + XEN_PAGE_SIZE) { error_report("error: page crossing"); goto err; } @@ -632,8 +633,9 @@ XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev, } else { dataplane->ctx = qemu_get_aio_context(); } - dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh, - dataplane); + dataplane->bh = aio_bh_new_guarded(dataplane->ctx, xen_block_dataplane_bh, + dataplane, + &DEVICE(xendev)->mem_reentrancy_guard); return dataplane; } @@ -662,6 +664,30 @@ void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane) g_free(dataplane); } +void xen_block_dataplane_detach(XenBlockDataPlane *dataplane) +{ + if (!dataplane || !dataplane->event_channel) { + return; + } + + /* Only reason for failure is a NULL channel */ + xen_device_set_event_channel_context(dataplane->xendev, + dataplane->event_channel, + NULL, &error_abort); +} + +void xen_block_dataplane_attach(XenBlockDataPlane *dataplane) +{ + if (!dataplane || !dataplane->event_channel) { + return; + } + + /* Only reason for failure is a NULL channel */ + xen_device_set_event_channel_context(dataplane->xendev, + dataplane->event_channel, + dataplane->ctx, &error_abort); +} + void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) { XenDevice *xendev; @@ -672,13 +698,11 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) xendev = dataplane->xendev; - aio_context_acquire(dataplane->ctx); - if (dataplane->event_channel) { - /* Only reason for failure is a NULL channel */ - xen_device_set_event_channel_context(xendev, dataplane->event_channel, - qemu_get_aio_context(), - &error_abort); + if (!blk_in_drain(dataplane->blk)) { + xen_block_dataplane_detach(dataplane); } + + aio_context_acquire(dataplane->ctx); /* Xen doesn't have multiple users for nodes, so this can't fail */ blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort); aio_context_release(dataplane->ctx); @@ -705,6 +729,7 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) Error *local_err = NULL; xen_device_unmap_grant_refs(xendev, dataplane->sring, + dataplane->ring_ref, dataplane->nr_ring_ref, &local_err); dataplane->sring = NULL; @@ -739,7 +764,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, dataplane->protocol = protocol; - ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref; + ring_size = XEN_PAGE_SIZE * dataplane->nr_ring_ref; switch (dataplane->protocol) { case BLKIF_PROTOCOL_NATIVE: { @@ -816,11 +841,9 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL); aio_context_release(old_context); - /* Only reason for failure is a NULL channel */ - aio_context_acquire(dataplane->ctx); - xen_device_set_event_channel_context(xendev, dataplane->event_channel, - dataplane->ctx, &error_abort); - aio_context_release(dataplane->ctx); + if (!blk_in_drain(dataplane->blk)) { + xen_block_dataplane_attach(dataplane); + } return; diff --git a/hw/block/dataplane/xen-block.h b/hw/block/dataplane/xen-block.h index 76dcd51c3d..7b8e9df09f 100644 --- a/hw/block/dataplane/xen-block.h +++ b/hw/block/dataplane/xen-block.h @@ -26,5 +26,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, unsigned int protocol, Error **errp); void xen_block_dataplane_stop(XenBlockDataPlane *dataplane); +void xen_block_dataplane_attach(XenBlockDataPlane *dataplane); +void xen_block_dataplane_detach(XenBlockDataPlane *dataplane); #endif /* HW_BLOCK_DATAPLANE_XEN_BLOCK_H */ diff --git a/hw/block/fdc-isa.c b/hw/block/fdc-isa.c index fee1ca68a8..7ec075e470 100644 --- a/hw/block/fdc-isa.c +++ b/hw/block/fdc-isa.c @@ -86,6 +86,7 @@ static const MemoryRegionPortio fdc_portio_list[] = { static void isabus_fdc_realize(DeviceState *dev, Error **errp) { ISADevice *isadev = ISA_DEVICE(dev); + ISABus *bus = isa_bus_from_device(isadev); FDCtrlISABus *isa = ISA_FDC(dev); FDCtrl *fdctrl = &isa->state; Error *err = NULL; @@ -94,11 +95,11 @@ static void isabus_fdc_realize(DeviceState *dev, Error **errp) isa->iobase, fdc_portio_list, fdctrl, "fdc"); - fdctrl->irq = isa_get_irq(isadev, isa->irq); + fdctrl->irq = isa_bus_get_irq(bus, isa->irq); fdctrl->dma_chann = isa->dma; if (fdctrl->dma_chann != -1) { IsaDmaClass *k; - fdctrl->dma = isa_get_dma(isa_bus_from_device(isadev), isa->dma); + fdctrl->dma = isa_bus_get_dma(bus, isa->dma); if (!fdctrl->dma) { error_setg(errp, "ISA controller does not support DMA"); return; diff --git a/hw/block/fdc.c b/hw/block/fdc.c index 64ae4a6899..d7cc4d3ec1 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -601,8 +601,8 @@ enum { }; enum { - FD_STATE_MULTI = 0x01, /* multi track flag */ - FD_STATE_FORMAT = 0x02, /* format flag */ + FD_STATE_MULTI = 0x01, /* multi track flag */ + FD_STATE_FORMAT = 0x02, /* format flag */ }; enum { diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index 802d2eb021..dc5ffbc4ff 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -24,6 +24,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "sysemu/block-backend.h" +#include "hw/block/block.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "hw/ssi/ssi.h" @@ -1615,8 +1616,7 @@ static void m25p80_realize(SSIPeripheral *ss, Error **errp) trace_m25p80_binding(s); s->storage = blk_blockalign(s->blk, s->size); - if (blk_pread(s->blk, 0, s->size, s->storage, 0) < 0) { - error_setg(errp, "failed to read the initial flash content"); + if (!blk_check_size_and_read_all(s->blk, s->storage, s->size, errp)) { return; } } else { diff --git a/hw/block/meson.build b/hw/block/meson.build index b434d5654c..cc2a75cc50 100644 --- a/hw/block/meson.build +++ b/hw/block/meson.build @@ -14,7 +14,7 @@ softmmu_ss.add(when: 'CONFIG_PFLASH_CFI02', if_true: files('pflash_cfi02.c')) softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c')) softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80_sfdp.c')) softmmu_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c')) -softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c')) +softmmu_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c')) softmmu_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c')) specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c', 'virtio-blk-common.c')) diff --git a/hw/block/nand.c b/hw/block/nand.c index 1aee1cb2b1..9c1b89cfa6 100644 --- a/hw/block/nand.c +++ b/hw/block/nand.c @@ -30,33 +30,33 @@ #include "qemu/module.h" #include "qom/object.h" -# define NAND_CMD_READ0 0x00 -# define NAND_CMD_READ1 0x01 -# define NAND_CMD_READ2 0x50 -# define NAND_CMD_LPREAD2 0x30 -# define NAND_CMD_NOSERIALREAD2 0x35 -# define NAND_CMD_RANDOMREAD1 0x05 -# define NAND_CMD_RANDOMREAD2 0xe0 -# define NAND_CMD_READID 0x90 -# define NAND_CMD_RESET 0xff -# define NAND_CMD_PAGEPROGRAM1 0x80 -# define NAND_CMD_PAGEPROGRAM2 0x10 -# define NAND_CMD_CACHEPROGRAM2 0x15 -# define NAND_CMD_BLOCKERASE1 0x60 -# define NAND_CMD_BLOCKERASE2 0xd0 -# define NAND_CMD_READSTATUS 0x70 -# define NAND_CMD_COPYBACKPRG1 0x85 +# define NAND_CMD_READ0 0x00 +# define NAND_CMD_READ1 0x01 +# define NAND_CMD_READ2 0x50 +# define NAND_CMD_LPREAD2 0x30 +# define NAND_CMD_NOSERIALREAD2 0x35 +# define NAND_CMD_RANDOMREAD1 0x05 +# define NAND_CMD_RANDOMREAD2 0xe0 +# define NAND_CMD_READID 0x90 +# define NAND_CMD_RESET 0xff +# define NAND_CMD_PAGEPROGRAM1 0x80 +# define NAND_CMD_PAGEPROGRAM2 0x10 +# define NAND_CMD_CACHEPROGRAM2 0x15 +# define NAND_CMD_BLOCKERASE1 0x60 +# define NAND_CMD_BLOCKERASE2 0xd0 +# define NAND_CMD_READSTATUS 0x70 +# define NAND_CMD_COPYBACKPRG1 0x85 -# define NAND_IOSTATUS_ERROR (1 << 0) -# define NAND_IOSTATUS_PLANE0 (1 << 1) -# define NAND_IOSTATUS_PLANE1 (1 << 2) -# define NAND_IOSTATUS_PLANE2 (1 << 3) -# define NAND_IOSTATUS_PLANE3 (1 << 4) +# define NAND_IOSTATUS_ERROR (1 << 0) +# define NAND_IOSTATUS_PLANE0 (1 << 1) +# define NAND_IOSTATUS_PLANE1 (1 << 2) +# define NAND_IOSTATUS_PLANE2 (1 << 3) +# define NAND_IOSTATUS_PLANE3 (1 << 4) # define NAND_IOSTATUS_READY (1 << 6) -# define NAND_IOSTATUS_UNPROTCT (1 << 7) +# define NAND_IOSTATUS_UNPROTCT (1 << 7) -# define MAX_PAGE 0x800 -# define MAX_OOB 0x40 +# define MAX_PAGE 0x800 +# define MAX_OOB 0x40 typedef struct NANDFlashState NANDFlashState; struct NANDFlashState { @@ -102,40 +102,40 @@ static void mem_and(uint8_t *dest, const uint8_t *src, size_t n) } } -# define NAND_NO_AUTOINCR 0x00000001 -# define NAND_BUSWIDTH_16 0x00000002 -# define NAND_NO_PADDING 0x00000004 -# define NAND_CACHEPRG 0x00000008 -# define NAND_COPYBACK 0x00000010 -# define NAND_IS_AND 0x00000020 -# define NAND_4PAGE_ARRAY 0x00000040 -# define NAND_NO_READRDY 0x00000100 -# define NAND_SAMSUNG_LP (NAND_NO_PADDING | NAND_COPYBACK) +# define NAND_NO_AUTOINCR 0x00000001 +# define NAND_BUSWIDTH_16 0x00000002 +# define NAND_NO_PADDING 0x00000004 +# define NAND_CACHEPRG 0x00000008 +# define NAND_COPYBACK 0x00000010 +# define NAND_IS_AND 0x00000020 +# define NAND_4PAGE_ARRAY 0x00000040 +# define NAND_NO_READRDY 0x00000100 +# define NAND_SAMSUNG_LP (NAND_NO_PADDING | NAND_COPYBACK) # define NAND_IO -# define PAGE(addr) ((addr) >> ADDR_SHIFT) -# define PAGE_START(page) (PAGE(page) * (NAND_PAGE_SIZE + OOB_SIZE)) -# define PAGE_MASK ((1 << ADDR_SHIFT) - 1) -# define OOB_SHIFT (PAGE_SHIFT - 5) -# define OOB_SIZE (1 << OOB_SHIFT) -# define SECTOR(addr) ((addr) >> (9 + ADDR_SHIFT - PAGE_SHIFT)) -# define SECTOR_OFFSET(addr) ((addr) & ((511 >> PAGE_SHIFT) << 8)) +# define PAGE(addr) ((addr) >> ADDR_SHIFT) +# define PAGE_START(page) (PAGE(page) * (NAND_PAGE_SIZE + OOB_SIZE)) +# define PAGE_MASK ((1 << ADDR_SHIFT) - 1) +# define OOB_SHIFT (PAGE_SHIFT - 5) +# define OOB_SIZE (1 << OOB_SHIFT) +# define SECTOR(addr) ((addr) >> (9 + ADDR_SHIFT - PAGE_SHIFT)) +# define SECTOR_OFFSET(addr) ((addr) & ((511 >> PAGE_SHIFT) << 8)) -# define NAND_PAGE_SIZE 256 -# define PAGE_SHIFT 8 -# define PAGE_SECTORS 1 -# define ADDR_SHIFT 8 +# define NAND_PAGE_SIZE 256 +# define PAGE_SHIFT 8 +# define PAGE_SECTORS 1 +# define ADDR_SHIFT 8 # include "nand.c" -# define NAND_PAGE_SIZE 512 -# define PAGE_SHIFT 9 -# define PAGE_SECTORS 1 -# define ADDR_SHIFT 8 +# define NAND_PAGE_SIZE 512 +# define PAGE_SHIFT 9 +# define PAGE_SECTORS 1 +# define ADDR_SHIFT 8 # include "nand.c" -# define NAND_PAGE_SIZE 2048 -# define PAGE_SHIFT 11 -# define PAGE_SECTORS 4 -# define ADDR_SHIFT 16 +# define NAND_PAGE_SIZE 2048 +# define PAGE_SHIFT 11 +# define PAGE_SECTORS 4 +# define ADDR_SHIFT 16 # include "nand.c" /* Information based on Linux drivers/mtd/nand/raw/nand_ids.c */ @@ -148,79 +148,79 @@ static const struct { } nand_flash_ids[0x100] = { [0 ... 0xff] = { 0 }, - [0x6b] = { 4, 8, 9, 4, 0 }, - [0xe3] = { 4, 8, 9, 4, 0 }, - [0xe5] = { 4, 8, 9, 4, 0 }, - [0xd6] = { 8, 8, 9, 4, 0 }, - [0xe6] = { 8, 8, 9, 4, 0 }, + [0x6b] = { 4, 8, 9, 4, 0 }, + [0xe3] = { 4, 8, 9, 4, 0 }, + [0xe5] = { 4, 8, 9, 4, 0 }, + [0xd6] = { 8, 8, 9, 4, 0 }, + [0xe6] = { 8, 8, 9, 4, 0 }, - [0x33] = { 16, 8, 9, 5, 0 }, - [0x73] = { 16, 8, 9, 5, 0 }, - [0x43] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x53] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x33] = { 16, 8, 9, 5, 0 }, + [0x73] = { 16, 8, 9, 5, 0 }, + [0x43] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x53] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x35] = { 32, 8, 9, 5, 0 }, - [0x75] = { 32, 8, 9, 5, 0 }, - [0x45] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x55] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x35] = { 32, 8, 9, 5, 0 }, + [0x75] = { 32, 8, 9, 5, 0 }, + [0x45] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x55] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x36] = { 64, 8, 9, 5, 0 }, - [0x76] = { 64, 8, 9, 5, 0 }, - [0x46] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x56] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x36] = { 64, 8, 9, 5, 0 }, + [0x76] = { 64, 8, 9, 5, 0 }, + [0x46] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x56] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x78] = { 128, 8, 9, 5, 0 }, - [0x39] = { 128, 8, 9, 5, 0 }, - [0x79] = { 128, 8, 9, 5, 0 }, - [0x72] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x49] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x74] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x59] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x78] = { 128, 8, 9, 5, 0 }, + [0x39] = { 128, 8, 9, 5, 0 }, + [0x79] = { 128, 8, 9, 5, 0 }, + [0x72] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x49] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x74] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x59] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x71] = { 256, 8, 9, 5, 0 }, + [0x71] = { 256, 8, 9, 5, 0 }, /* * These are the new chips with large page size. The pagesize and the * erasesize is determined from the extended id bytes */ -# define LP_OPTIONS (NAND_SAMSUNG_LP | NAND_NO_READRDY | NAND_NO_AUTOINCR) -# define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) +# define LP_OPTIONS (NAND_SAMSUNG_LP | NAND_NO_READRDY | NAND_NO_AUTOINCR) +# define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) /* 512 Megabit */ - [0xa2] = { 64, 8, 0, 0, LP_OPTIONS }, - [0xf2] = { 64, 8, 0, 0, LP_OPTIONS }, - [0xb2] = { 64, 16, 0, 0, LP_OPTIONS16 }, - [0xc2] = { 64, 16, 0, 0, LP_OPTIONS16 }, + [0xa2] = { 64, 8, 0, 0, LP_OPTIONS }, + [0xf2] = { 64, 8, 0, 0, LP_OPTIONS }, + [0xb2] = { 64, 16, 0, 0, LP_OPTIONS16 }, + [0xc2] = { 64, 16, 0, 0, LP_OPTIONS16 }, /* 1 Gigabit */ - [0xa1] = { 128, 8, 0, 0, LP_OPTIONS }, - [0xf1] = { 128, 8, 0, 0, LP_OPTIONS }, - [0xb1] = { 128, 16, 0, 0, LP_OPTIONS16 }, - [0xc1] = { 128, 16, 0, 0, LP_OPTIONS16 }, + [0xa1] = { 128, 8, 0, 0, LP_OPTIONS }, + [0xf1] = { 128, 8, 0, 0, LP_OPTIONS }, + [0xb1] = { 128, 16, 0, 0, LP_OPTIONS16 }, + [0xc1] = { 128, 16, 0, 0, LP_OPTIONS16 }, /* 2 Gigabit */ - [0xaa] = { 256, 8, 0, 0, LP_OPTIONS }, - [0xda] = { 256, 8, 0, 0, LP_OPTIONS }, - [0xba] = { 256, 16, 0, 0, LP_OPTIONS16 }, - [0xca] = { 256, 16, 0, 0, LP_OPTIONS16 }, + [0xaa] = { 256, 8, 0, 0, LP_OPTIONS }, + [0xda] = { 256, 8, 0, 0, LP_OPTIONS }, + [0xba] = { 256, 16, 0, 0, LP_OPTIONS16 }, + [0xca] = { 256, 16, 0, 0, LP_OPTIONS16 }, /* 4 Gigabit */ - [0xac] = { 512, 8, 0, 0, LP_OPTIONS }, - [0xdc] = { 512, 8, 0, 0, LP_OPTIONS }, - [0xbc] = { 512, 16, 0, 0, LP_OPTIONS16 }, - [0xcc] = { 512, 16, 0, 0, LP_OPTIONS16 }, + [0xac] = { 512, 8, 0, 0, LP_OPTIONS }, + [0xdc] = { 512, 8, 0, 0, LP_OPTIONS }, + [0xbc] = { 512, 16, 0, 0, LP_OPTIONS16 }, + [0xcc] = { 512, 16, 0, 0, LP_OPTIONS16 }, /* 8 Gigabit */ - [0xa3] = { 1024, 8, 0, 0, LP_OPTIONS }, - [0xd3] = { 1024, 8, 0, 0, LP_OPTIONS }, - [0xb3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, - [0xc3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, + [0xa3] = { 1024, 8, 0, 0, LP_OPTIONS }, + [0xd3] = { 1024, 8, 0, 0, LP_OPTIONS }, + [0xb3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, + [0xc3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, /* 16 Gigabit */ - [0xa5] = { 2048, 8, 0, 0, LP_OPTIONS }, - [0xd5] = { 2048, 8, 0, 0, LP_OPTIONS }, - [0xb5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, - [0xc5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, + [0xa5] = { 2048, 8, 0, 0, LP_OPTIONS }, + [0xd5] = { 2048, 8, 0, 0, LP_OPTIONS }, + [0xb5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, + [0xc5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, }; static void nand_reset(DeviceState *dev) @@ -812,4 +812,4 @@ static void glue(nand_init_, NAND_PAGE_SIZE)(NANDFlashState *s) # undef PAGE_SHIFT # undef PAGE_SECTORS # undef ADDR_SHIFT -#endif /* NAND_IO */ +#endif /* NAND_IO */ diff --git a/hw/block/onenand.c b/hw/block/onenand.c index 1fde975024..50d3d1c985 100644 --- a/hw/block/onenand.c +++ b/hw/block/onenand.c @@ -35,10 +35,10 @@ #include "qom/object.h" /* 11 for 2kB-page OneNAND ("2nd generation") and 10 for 1kB-page chips */ -#define PAGE_SHIFT 11 +#define PAGE_SHIFT 11 /* Fixed */ -#define BLOCK_SHIFT (PAGE_SHIFT + 6) +#define BLOCK_SHIFT (PAGE_SHIFT + 6) #define TYPE_ONE_NAND "onenand" OBJECT_DECLARE_SIMPLE_TYPE(OneNANDState, ONE_NAND) @@ -408,23 +408,23 @@ static void onenand_command(OneNANDState *s) int b; int sec; void *buf; -#define SETADDR(block, page) \ - sec = (s->addr[page] & 3) + \ - ((((s->addr[page] >> 2) & 0x3f) + \ - (((s->addr[block] & 0xfff) | \ - (s->addr[block] >> 15 ? \ - s->density_mask : 0)) << 6)) << (PAGE_SHIFT - 9)); -#define SETBUF_M() \ - buf = (s->bufaddr & 8) ? \ - s->data[(s->bufaddr >> 2) & 1][0] : s->boot[0]; \ +#define SETADDR(block, page) \ + sec = (s->addr[page] & 3) + \ + ((((s->addr[page] >> 2) & 0x3f) + \ + (((s->addr[block] & 0xfff) | \ + (s->addr[block] >> 15 ? s->density_mask : 0)) \ + << 6)) \ + << (PAGE_SHIFT - 9)); +#define SETBUF_M() \ + buf = (s->bufaddr & 8) ? s->data[(s->bufaddr >> 2) & 1][0] : s->boot[0]; \ buf += (s->bufaddr & 3) << 9; -#define SETBUF_S() \ - buf = (s->bufaddr & 8) ? \ - s->data[(s->bufaddr >> 2) & 1][1] : s->boot[1]; \ +#define SETBUF_S() \ + buf = (s->bufaddr & 8) ? \ + s->data[(s->bufaddr >> 2) & 1][1] : s->boot[1]; \ buf += (s->bufaddr & 3) << 4; switch (s->command) { - case 0x00: /* Load single/multiple sector data unit into buffer */ + case 0x00: /* Load single/multiple sector data unit into buffer */ SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) SETBUF_M() @@ -443,7 +443,7 @@ static void onenand_command(OneNANDState *s) */ s->intstatus |= ONEN_INT | ONEN_INT_LOAD; break; - case 0x13: /* Load single/multiple spare sector into buffer */ + case 0x13: /* Load single/multiple spare sector into buffer */ SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) SETBUF_S() @@ -456,7 +456,7 @@ static void onenand_command(OneNANDState *s) */ s->intstatus |= ONEN_INT | ONEN_INT_LOAD; break; - case 0x80: /* Program single/multiple sector data unit from buffer */ + case 0x80: /* Program single/multiple sector data unit from buffer */ SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) SETBUF_M() @@ -475,7 +475,7 @@ static void onenand_command(OneNANDState *s) */ s->intstatus |= ONEN_INT | ONEN_INT_PROG; break; - case 0x1a: /* Program single/multiple spare area sector from buffer */ + case 0x1a: /* Program single/multiple spare area sector from buffer */ SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) SETBUF_S() @@ -488,7 +488,7 @@ static void onenand_command(OneNANDState *s) */ s->intstatus |= ONEN_INT | ONEN_INT_PROG; break; - case 0x1b: /* Copy-back program */ + case 0x1b: /* Copy-back program */ SETBUF_S() SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) @@ -504,7 +504,7 @@ static void onenand_command(OneNANDState *s) s->intstatus |= ONEN_INT | ONEN_INT_PROG; break; - case 0x23: /* Unlock NAND array block(s) */ + case 0x23: /* Unlock NAND array block(s) */ s->intstatus |= ONEN_INT; /* XXX the previous (?) area should be locked automatically */ @@ -519,7 +519,7 @@ static void onenand_command(OneNANDState *s) s->wpstatus = s->blockwp[b] = ONEN_LOCK_UNLOCKED; } break; - case 0x27: /* Unlock All NAND array blocks */ + case 0x27: /* Unlock All NAND array blocks */ s->intstatus |= ONEN_INT; for (b = 0; b < s->blocks; b ++) { @@ -530,7 +530,7 @@ static void onenand_command(OneNANDState *s) } break; - case 0x2a: /* Lock NAND array block(s) */ + case 0x2a: /* Lock NAND array block(s) */ s->intstatus |= ONEN_INT; for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) { @@ -544,7 +544,7 @@ static void onenand_command(OneNANDState *s) s->wpstatus = s->blockwp[b] = ONEN_LOCK_LOCKED; } break; - case 0x2c: /* Lock-tight NAND array block(s) */ + case 0x2c: /* Lock-tight NAND array block(s) */ s->intstatus |= ONEN_INT; for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) { @@ -559,13 +559,13 @@ static void onenand_command(OneNANDState *s) } break; - case 0x71: /* Erase-Verify-Read */ + case 0x71: /* Erase-Verify-Read */ s->intstatus |= ONEN_INT; break; - case 0x95: /* Multi-block erase */ + case 0x95: /* Multi-block erase */ qemu_irq_pulse(s->intr); /* Fall through. */ - case 0x94: /* Block erase */ + case 0x94: /* Block erase */ sec = ((s->addr[ONEN_BUF_BLOCK] & 0xfff) | (s->addr[ONEN_BUF_BLOCK] >> 15 ? s->density_mask : 0)) << (BLOCK_SHIFT - 9); @@ -574,20 +574,20 @@ static void onenand_command(OneNANDState *s) s->intstatus |= ONEN_INT | ONEN_INT_ERASE; break; - case 0xb0: /* Erase suspend */ + case 0xb0: /* Erase suspend */ break; - case 0x30: /* Erase resume */ + case 0x30: /* Erase resume */ s->intstatus |= ONEN_INT | ONEN_INT_ERASE; break; - case 0xf0: /* Reset NAND Flash core */ + case 0xf0: /* Reset NAND Flash core */ onenand_reset(s, 0); break; - case 0xf3: /* Reset OneNAND */ + case 0xf3: /* Reset OneNAND */ onenand_reset(s, 0); break; - case 0x65: /* OTP Access */ + case 0x65: /* OTP Access */ s->intstatus |= ONEN_INT; s->blk_cur = NULL; s->current = s->otp; @@ -616,52 +616,52 @@ static uint64_t onenand_read(void *opaque, hwaddr addr, case 0x0000 ... 0xbffe: return lduw_le_p(s->boot[0] + addr); - case 0xf000: /* Manufacturer ID */ + case 0xf000: /* Manufacturer ID */ return s->id.man; - case 0xf001: /* Device ID */ + case 0xf001: /* Device ID */ return s->id.dev; - case 0xf002: /* Version ID */ + case 0xf002: /* Version ID */ return s->id.ver; /* TODO: get the following values from a real chip! */ - case 0xf003: /* Data Buffer size */ + case 0xf003: /* Data Buffer size */ return 1 << PAGE_SHIFT; - case 0xf004: /* Boot Buffer size */ + case 0xf004: /* Boot Buffer size */ return 0x200; - case 0xf005: /* Amount of buffers */ + case 0xf005: /* Amount of buffers */ return 1 | (2 << 8); - case 0xf006: /* Technology */ + case 0xf006: /* Technology */ return 0; - case 0xf100 ... 0xf107: /* Start addresses */ + case 0xf100 ... 0xf107: /* Start addresses */ return s->addr[offset - 0xf100]; - case 0xf200: /* Start buffer */ + case 0xf200: /* Start buffer */ return (s->bufaddr << 8) | ((s->count - 1) & (1 << (PAGE_SHIFT - 10))); - case 0xf220: /* Command */ + case 0xf220: /* Command */ return s->command; - case 0xf221: /* System Configuration 1 */ + case 0xf221: /* System Configuration 1 */ return s->config[0] & 0xffe0; - case 0xf222: /* System Configuration 2 */ + case 0xf222: /* System Configuration 2 */ return s->config[1]; - case 0xf240: /* Controller Status */ + case 0xf240: /* Controller Status */ return s->status; - case 0xf241: /* Interrupt */ + case 0xf241: /* Interrupt */ return s->intstatus; - case 0xf24c: /* Unlock Start Block Address */ + case 0xf24c: /* Unlock Start Block Address */ return s->unladdr[0]; - case 0xf24d: /* Unlock End Block Address */ + case 0xf24d: /* Unlock End Block Address */ return s->unladdr[1]; - case 0xf24e: /* Write Protection Status */ + case 0xf24e: /* Write Protection Status */ return s->wpstatus; - case 0xff00: /* ECC Status */ + case 0xff00: /* ECC Status */ return 0x00; - case 0xff01: /* ECC Result of main area data */ - case 0xff02: /* ECC Result of spare area data */ - case 0xff03: /* ECC Result of main area data */ - case 0xff04: /* ECC Result of spare area data */ + case 0xff01: /* ECC Result of main area data */ + case 0xff02: /* ECC Result of spare area data */ + case 0xff03: /* ECC Result of main area data */ + case 0xff04: /* ECC Result of spare area data */ qemu_log_mask(LOG_UNIMP, "onenand: ECC result registers unimplemented\n"); return 0x0000; @@ -696,15 +696,15 @@ static void onenand_write(void *opaque, hwaddr addr, } switch (value) { - case 0x00f0: /* Reset OneNAND */ + case 0x00f0: /* Reset OneNAND */ onenand_reset(s, 0); break; - case 0x00e0: /* Load Data into Buffer */ + case 0x00e0: /* Load Data into Buffer */ s->cycle = 1; break; - case 0x0090: /* Read Identification Data */ + case 0x0090: /* Read Identification Data */ memset(s->boot[0], 0, 3 << s->shift); s->boot[0][0 << s->shift] = s->id.man & 0xff; s->boot[0][1 << s->shift] = s->id.dev & 0xff; @@ -718,11 +718,11 @@ static void onenand_write(void *opaque, hwaddr addr, } break; - case 0xf100 ... 0xf107: /* Start addresses */ + case 0xf100 ... 0xf107: /* Start addresses */ s->addr[offset - 0xf100] = value; break; - case 0xf200: /* Start buffer */ + case 0xf200: /* Start buffer */ s->bufaddr = (value >> 8) & 0xf; if (PAGE_SHIFT == 11) s->count = (value & 3) ?: 4; @@ -730,36 +730,36 @@ static void onenand_write(void *opaque, hwaddr addr, s->count = (value & 1) ?: 2; break; - case 0xf220: /* Command */ + case 0xf220: /* Command */ if (s->intstatus & (1 << 15)) break; s->command = value; onenand_command(s); break; - case 0xf221: /* System Configuration 1 */ + case 0xf221: /* System Configuration 1 */ s->config[0] = value; onenand_intr_update(s); qemu_set_irq(s->rdy, (s->config[0] >> 7) & 1); break; - case 0xf222: /* System Configuration 2 */ + case 0xf222: /* System Configuration 2 */ s->config[1] = value; break; - case 0xf241: /* Interrupt */ + case 0xf241: /* Interrupt */ s->intstatus &= value; if ((1 << 15) & ~s->intstatus) s->status &= ~(ONEN_ERR_CMD | ONEN_ERR_ERASE | ONEN_ERR_PROG | ONEN_ERR_LOAD); onenand_intr_update(s); break; - case 0xf24c: /* Unlock Start Block Address */ + case 0xf24c: /* Unlock Start Block Address */ s->unladdr[0] = value & (s->blocks - 1); /* For some reason we have to set the end address to by default * be same as start because the software forgets to write anything * in there. */ s->unladdr[1] = value & (s->blocks - 1); break; - case 0xf24d: /* Unlock End Block Address */ + case 0xf24d: /* Unlock End Block Address */ s->unladdr[1] = value & (s->blocks - 1); break; diff --git a/hw/block/tc58128.c b/hw/block/tc58128.c index bfc27ad899..d350126b27 100644 --- a/hw/block/tc58128.c +++ b/hw/block/tc58128.c @@ -62,24 +62,24 @@ static void init_dev(tc58128_dev * dev, const char *filename) dev->flash_contents = g_malloc(FLASH_SIZE); memset(dev->flash_contents, 0xff, FLASH_SIZE); if (filename) { - /* Load flash image skipping the first block */ + /* Load flash image skipping the first block */ ret = load_image_size(filename, dev->flash_contents + 528 * 32, FLASH_SIZE - 528 * 32); - if (ret < 0) { + if (ret < 0) { if (!qtest_enabled()) { error_report("Could not load flash image %s", filename); exit(1); } - } else { - /* Build first block with number of blocks */ + } else { + /* Build first block with number of blocks */ blocks = DIV_ROUND_UP(ret, 528 * 32); - dev->flash_contents[0] = blocks & 0xff; - dev->flash_contents[1] = (blocks >> 8) & 0xff; - dev->flash_contents[2] = (blocks >> 16) & 0xff; - dev->flash_contents[3] = (blocks >> 24) & 0xff; - fprintf(stderr, "loaded %d bytes for %s into flash\n", ret, - filename); - } + dev->flash_contents[0] = blocks & 0xff; + dev->flash_contents[1] = (blocks >> 8) & 0xff; + dev->flash_contents[2] = (blocks >> 16) & 0xff; + dev->flash_contents[3] = (blocks >> 24) & 0xff; + fprintf(stderr, "loaded %d bytes for %s into flash\n", ret, + filename); + } } } @@ -87,26 +87,26 @@ static void handle_command(tc58128_dev * dev, uint8_t command) { switch (command) { case 0xff: - fprintf(stderr, "reset flash device\n"); - dev->state = WAIT; - break; + fprintf(stderr, "reset flash device\n"); + dev->state = WAIT; + break; case 0x00: - fprintf(stderr, "read mode 1\n"); - dev->state = READ1; - dev->address_cycle = 0; - break; + fprintf(stderr, "read mode 1\n"); + dev->state = READ1; + dev->address_cycle = 0; + break; case 0x01: - fprintf(stderr, "read mode 2\n"); - dev->state = READ2; - dev->address_cycle = 0; - break; + fprintf(stderr, "read mode 2\n"); + dev->state = READ2; + dev->address_cycle = 0; + break; case 0x50: - fprintf(stderr, "read mode 3\n"); - dev->state = READ3; - dev->address_cycle = 0; - break; + fprintf(stderr, "read mode 3\n"); + dev->state = READ3; + dev->address_cycle = 0; + break; default: - fprintf(stderr, "unknown flash command 0x%02x\n", command); + fprintf(stderr, "unknown flash command 0x%02x\n", command); abort(); } } @@ -117,28 +117,28 @@ static void handle_address(tc58128_dev * dev, uint8_t data) case READ1: case READ2: case READ3: - switch (dev->address_cycle) { - case 0: - dev->address = data; - if (dev->state == READ2) - dev->address |= 0x100; - else if (dev->state == READ3) - dev->address |= 0x200; - break; - case 1: - dev->address += data * 528 * 0x100; - break; - case 2: - dev->address += data * 528; - fprintf(stderr, "address pointer in flash: 0x%08x\n", - dev->address); - break; - default: - /* Invalid data */ + switch (dev->address_cycle) { + case 0: + dev->address = data; + if (dev->state == READ2) + dev->address |= 0x100; + else if (dev->state == READ3) + dev->address |= 0x200; + break; + case 1: + dev->address += data * 528 * 0x100; + break; + case 2: + dev->address += data * 528; + fprintf(stderr, "address pointer in flash: 0x%08x\n", + dev->address); + break; + default: + /* Invalid data */ abort(); - } - dev->address_cycle++; - break; + } + dev->address_cycle++; + break; default: abort(); } @@ -148,7 +148,7 @@ static uint8_t handle_read(tc58128_dev * dev) { #if 0 if (dev->address % 0x100000 == 0) - fprintf(stderr, "reading flash at address 0x%08x\n", dev->address); + fprintf(stderr, "reading flash at address 0x%08x\n", dev->address); #endif return dev->flash_contents[dev->address++]; } @@ -163,31 +163,31 @@ static int tc58128_cb(uint16_t porta, uint16_t portb, int dev; if ((porta & CE1) == 0) - dev = 0; + dev = 0; else if ((porta & CE2) == 0) - dev = 1; + dev = 1; else - return 0; /* No device selected */ + return 0; /* No device selected */ if ((porta & RE) && (porta & WE)) { - /* Nothing to do, assert ready and return to input state */ - *periph_portadir &= 0xff00; - *periph_portadir |= RDY(dev); - *periph_pdtra |= RDY(dev); - return 1; + /* Nothing to do, assert ready and return to input state */ + *periph_portadir &= 0xff00; + *periph_portadir |= RDY(dev); + *periph_pdtra |= RDY(dev); + return 1; } if (porta & CLE) { - /* Command */ - assert((porta & WE) == 0); - handle_command(&tc58128_devs[dev], porta & 0x00ff); + /* Command */ + assert((porta & WE) == 0); + handle_command(&tc58128_devs[dev], porta & 0x00ff); } else if (porta & ALE) { - assert((porta & WE) == 0); - handle_address(&tc58128_devs[dev], porta & 0x00ff); + assert((porta & WE) == 0); + handle_address(&tc58128_devs[dev], porta & 0x00ff); } else if ((porta & RE) == 0) { - *periph_portadir |= 0x00ff; - *periph_pdtra &= 0xff00; - *periph_pdtra |= handle_read(&tc58128_devs[dev]); + *periph_portadir |= 0x00ff; + *periph_pdtra &= 0xff00; + *periph_pdtra |= handle_read(&tc58128_devs[dev]); } else { abort(); } @@ -195,9 +195,9 @@ static int tc58128_cb(uint16_t porta, uint16_t portb, } static sh7750_io_device tc58128 = { - RE | WE, /* Port A triggers */ - 0, /* Port B triggers */ - tc58128_cb /* Callback */ + RE | WE, /* Port A triggers */ + 0, /* Port B triggers */ + tc58128_cb /* Callback */ }; int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2) diff --git a/hw/block/trace-events b/hw/block/trace-events index 2c45a62bd5..34be8b9135 100644 --- a/hw/block/trace-events +++ b/hw/block/trace-events @@ -44,9 +44,16 @@ pflash_write_unknown(const char *name, uint8_t cmd) "%s: unknown command 0x%02x" # virtio-blk.c virtio_blk_req_complete(void *vdev, void *req, int status) "vdev %p req %p status %d" virtio_blk_rw_complete(void *vdev, void *req, int ret) "vdev %p req %p ret %d" +virtio_blk_zone_report_complete(void *vdev, void *req, unsigned int nr_zones, int ret) "vdev %p req %p nr_zones %u ret %d" +virtio_blk_zone_mgmt_complete(void *vdev, void *req, int ret) "vdev %p req %p ret %d" +virtio_blk_zone_append_complete(void *vdev, void *req, int64_t sector, int ret) "vdev %p req %p, append sector 0x%" PRIx64 " ret %d" virtio_blk_handle_write(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu" virtio_blk_handle_read(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu" virtio_blk_submit_multireq(void *vdev, void *mrb, int start, int num_reqs, uint64_t offset, size_t size, bool is_write) "vdev %p mrb %p start %d num_reqs %d offset %"PRIu64" size %zu is_write %d" +virtio_blk_handle_zone_report(void *vdev, void *req, int64_t sector, unsigned int nr_zones) "vdev %p req %p sector 0x%" PRIx64 " nr_zones %u" +virtio_blk_handle_zone_mgmt(void *vdev, void *req, uint8_t op, int64_t sector, int64_t len) "vdev %p req %p op 0x%x sector 0x%" PRIx64 " len 0x%" PRIx64 "" +virtio_blk_handle_zone_reset_all(void *vdev, void *req, int64_t sector, int64_t len) "vdev %p req %p sector 0x%" PRIx64 " cap 0x%" PRIx64 "" +virtio_blk_handle_zone_append(void *vdev, void *req, int64_t sector) "vdev %p req %p, append sector 0x%" PRIx64 "" # hd-geometry.c hd_geometry_lchs_guess(void *blk, int cyls, int heads, int secs) "blk %p LCHS %d %d %d" diff --git a/hw/block/virtio-blk-common.c b/hw/block/virtio-blk-common.c index ac52d7c176..e2f8e2f6da 100644 --- a/hw/block/virtio-blk-common.c +++ b/hw/block/virtio-blk-common.c @@ -29,6 +29,8 @@ static const VirtIOFeature feature_sizes[] = { .end = endof(struct virtio_blk_config, discard_sector_alignment)}, {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES, .end = endof(struct virtio_blk_config, write_zeroes_may_unmap)}, + {.flags = 1ULL << VIRTIO_BLK_F_ZONED, + .end = endof(struct virtio_blk_config, zoned)}, {} }; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index cefca93b31..4ca66b5860 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -17,6 +17,7 @@ #include "qemu/module.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" +#include "block/block_int.h" #include "trace.h" #include "hw/block/block.h" #include "hw/qdev-properties.h" @@ -601,6 +602,351 @@ err: return err_status; } +typedef struct ZoneCmdData { + VirtIOBlockReq *req; + struct iovec *in_iov; + unsigned in_num; + union { + struct { + unsigned int nr_zones; + BlockZoneDescriptor *zones; + } zone_report_data; + struct { + int64_t offset; + } zone_append_data; + }; +} ZoneCmdData; + +/* + * check zoned_request: error checking before issuing requests. If all checks + * passed, return true. + * append: true if only zone append requests issued. + */ +static bool check_zoned_request(VirtIOBlock *s, int64_t offset, int64_t len, + bool append, uint8_t *status) { + BlockDriverState *bs = blk_bs(s->blk); + int index; + + if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) { + *status = VIRTIO_BLK_S_UNSUPP; + return false; + } + + if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS) + || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) { + *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + return false; + } + + if (append) { + if (bs->bl.write_granularity) { + if ((offset % bs->bl.write_granularity) != 0) { + *status = VIRTIO_BLK_S_ZONE_UNALIGNED_WP; + return false; + } + } + + index = offset / bs->bl.zone_size; + if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) { + *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + return false; + } + + if (len / 512 > bs->bl.max_append_sectors) { + if (bs->bl.max_append_sectors == 0) { + *status = VIRTIO_BLK_S_UNSUPP; + } else { + *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + } + return false; + } + } + return true; +} + +static void virtio_blk_zone_report_complete(void *opaque, int ret) +{ + ZoneCmdData *data = opaque; + VirtIOBlockReq *req = data->req; + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); + struct iovec *in_iov = data->in_iov; + unsigned in_num = data->in_num; + int64_t zrp_size, n, j = 0; + int64_t nz = data->zone_report_data.nr_zones; + int8_t err_status = VIRTIO_BLK_S_OK; + + trace_virtio_blk_zone_report_complete(vdev, req, nz, ret); + if (ret) { + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + goto out; + } + + struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) { + .nr_zones = cpu_to_le64(nz), + }; + zrp_size = sizeof(struct virtio_blk_zone_report) + + sizeof(struct virtio_blk_zone_descriptor) * nz; + n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr)); + if (n != sizeof(zrp_hdr)) { + virtio_error(vdev, "Driver provided input buffer that is too small!"); + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + goto out; + } + + for (size_t i = sizeof(zrp_hdr); i < zrp_size; + i += sizeof(struct virtio_blk_zone_descriptor), ++j) { + struct virtio_blk_zone_descriptor desc = + (struct virtio_blk_zone_descriptor) { + .z_start = cpu_to_le64(data->zone_report_data.zones[j].start + >> BDRV_SECTOR_BITS), + .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap + >> BDRV_SECTOR_BITS), + .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp + >> BDRV_SECTOR_BITS), + }; + + switch (data->zone_report_data.zones[j].type) { + case BLK_ZT_CONV: + desc.z_type = VIRTIO_BLK_ZT_CONV; + break; + case BLK_ZT_SWR: + desc.z_type = VIRTIO_BLK_ZT_SWR; + break; + case BLK_ZT_SWP: + desc.z_type = VIRTIO_BLK_ZT_SWP; + break; + default: + g_assert_not_reached(); + } + + switch (data->zone_report_data.zones[j].state) { + case BLK_ZS_RDONLY: + desc.z_state = VIRTIO_BLK_ZS_RDONLY; + break; + case BLK_ZS_OFFLINE: + desc.z_state = VIRTIO_BLK_ZS_OFFLINE; + break; + case BLK_ZS_EMPTY: + desc.z_state = VIRTIO_BLK_ZS_EMPTY; + break; + case BLK_ZS_CLOSED: + desc.z_state = VIRTIO_BLK_ZS_CLOSED; + break; + case BLK_ZS_FULL: + desc.z_state = VIRTIO_BLK_ZS_FULL; + break; + case BLK_ZS_EOPEN: + desc.z_state = VIRTIO_BLK_ZS_EOPEN; + break; + case BLK_ZS_IOPEN: + desc.z_state = VIRTIO_BLK_ZS_IOPEN; + break; + case BLK_ZS_NOT_WP: + desc.z_state = VIRTIO_BLK_ZS_NOT_WP; + break; + default: + g_assert_not_reached(); + } + + /* TODO: it takes O(n^2) time complexity. Optimizations required. */ + n = iov_from_buf(in_iov, in_num, i, &desc, sizeof(desc)); + if (n != sizeof(desc)) { + virtio_error(vdev, "Driver provided input buffer " + "for descriptors that is too small!"); + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + } + } + +out: + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); + g_free(data->zone_report_data.zones); + g_free(data); +} + +static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, + struct iovec *in_iov, + unsigned in_num) +{ + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + unsigned int nr_zones; + ZoneCmdData *data; + int64_t zone_size, offset; + uint8_t err_status; + + if (req->in_len < sizeof(struct virtio_blk_inhdr) + + sizeof(struct virtio_blk_zone_report) + + sizeof(struct virtio_blk_zone_descriptor)) { + virtio_error(vdev, "in buffer too small for zone report"); + return; + } + + /* start byte offset of the zone report */ + offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; + if (!check_zoned_request(s, offset, 0, false, &err_status)) { + goto out; + } + nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) - + sizeof(struct virtio_blk_zone_report)) / + sizeof(struct virtio_blk_zone_descriptor); + trace_virtio_blk_handle_zone_report(vdev, req, + offset >> BDRV_SECTOR_BITS, nr_zones); + + zone_size = sizeof(BlockZoneDescriptor) * nr_zones; + data = g_malloc(sizeof(ZoneCmdData)); + data->req = req; + data->in_iov = in_iov; + data->in_num = in_num; + data->zone_report_data.nr_zones = nr_zones; + data->zone_report_data.zones = g_malloc(zone_size), + + blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones, + data->zone_report_data.zones, + virtio_blk_zone_report_complete, data); + return; +out: + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); +} + +static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) +{ + VirtIOBlockReq *req = opaque; + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + int8_t err_status = VIRTIO_BLK_S_OK; + trace_virtio_blk_zone_mgmt_complete(vdev, req,ret); + + if (ret) { + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + } + + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); +} + +static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) +{ + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + BlockDriverState *bs = blk_bs(s->blk); + int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; + uint64_t len; + uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS; + uint8_t err_status = VIRTIO_BLK_S_OK; + + uint32_t type = virtio_ldl_p(vdev, &req->out.type); + if (type == VIRTIO_BLK_T_ZONE_RESET_ALL) { + /* Entire drive capacity */ + offset = 0; + len = capacity; + trace_virtio_blk_handle_zone_reset_all(vdev, req, 0, + bs->total_sectors); + } else { + if (bs->bl.zone_size > capacity - offset) { + /* The zoned device allows the last smaller zone. */ + len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1); + } else { + len = bs->bl.zone_size; + } + trace_virtio_blk_handle_zone_mgmt(vdev, req, op, + offset >> BDRV_SECTOR_BITS, + len >> BDRV_SECTOR_BITS); + } + + if (!check_zoned_request(s, offset, len, false, &err_status)) { + goto out; + } + + blk_aio_zone_mgmt(s->blk, op, offset, len, + virtio_blk_zone_mgmt_complete, req); + + return 0; +out: + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + return err_status; +} + +static void virtio_blk_zone_append_complete(void *opaque, int ret) +{ + ZoneCmdData *data = opaque; + VirtIOBlockReq *req = data->req; + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); + int64_t append_sector, n; + uint8_t err_status = VIRTIO_BLK_S_OK; + + if (ret) { + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + goto out; + } + + virtio_stq_p(vdev, &append_sector, + data->zone_append_data.offset >> BDRV_SECTOR_BITS); + n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector, + sizeof(append_sector)); + if (n != sizeof(append_sector)) { + virtio_error(vdev, "Driver provided input buffer less than size of " + "append_sector"); + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; + goto out; + } + trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret); + +out: + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); + g_free(data); +} + +static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, + struct iovec *out_iov, + struct iovec *in_iov, + uint64_t out_num, + unsigned in_num) { + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + uint8_t err_status = VIRTIO_BLK_S_OK; + + int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; + int64_t len = iov_size(out_iov, out_num); + + trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS); + if (!check_zoned_request(s, offset, len, true, &err_status)) { + goto out; + } + + ZoneCmdData *data = g_malloc(sizeof(ZoneCmdData)); + data->req = req; + data->in_iov = in_iov; + data->in_num = in_num; + data->zone_append_data.offset = offset; + qemu_iovec_init_external(&req->qiov, out_iov, out_num); + + block_acct_start(blk_get_stats(s->blk), &req->acct, len, + BLOCK_ACCT_ZONE_APPEND); + + blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0, + virtio_blk_zone_append_complete, data); + return 0; + +out: + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); + return err_status; +} + static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) { uint32_t type; @@ -687,6 +1033,24 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) case VIRTIO_BLK_T_FLUSH: virtio_blk_handle_flush(req, mrb); break; + case VIRTIO_BLK_T_ZONE_REPORT: + virtio_blk_handle_zone_report(req, in_iov, in_num); + break; + case VIRTIO_BLK_T_ZONE_OPEN: + virtio_blk_handle_zone_mgmt(req, BLK_ZO_OPEN); + break; + case VIRTIO_BLK_T_ZONE_CLOSE: + virtio_blk_handle_zone_mgmt(req, BLK_ZO_CLOSE); + break; + case VIRTIO_BLK_T_ZONE_FINISH: + virtio_blk_handle_zone_mgmt(req, BLK_ZO_FINISH); + break; + case VIRTIO_BLK_T_ZONE_RESET: + virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET); + break; + case VIRTIO_BLK_T_ZONE_RESET_ALL: + virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET); + break; case VIRTIO_BLK_T_SCSI_CMD: virtio_blk_handle_scsi(req); break; @@ -705,6 +1069,14 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) virtio_blk_free_request(req); break; } + case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT: + /* + * Passing out_iov/out_num and in_iov/in_num is not safe + * to access req->elem.out_sg directly because it may be + * modified by virtio_blk_handle_request(). + */ + virtio_blk_handle_zone_append(req, out_iov, in_iov, out_num, in_num); + break; /* * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement, @@ -890,6 +1262,7 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) { VirtIOBlock *s = VIRTIO_BLK(vdev); BlockConf *conf = &s->conf.conf; + BlockDriverState *bs = blk_bs(s->blk); struct virtio_blk_config blkcfg; uint64_t capacity; int64_t length; @@ -954,6 +1327,30 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) blkcfg.write_zeroes_may_unmap = 1; virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1); } + if (bs->bl.zoned != BLK_Z_NONE) { + switch (bs->bl.zoned) { + case BLK_Z_HM: + blkcfg.zoned.model = VIRTIO_BLK_Z_HM; + break; + case BLK_Z_HA: + blkcfg.zoned.model = VIRTIO_BLK_Z_HA; + break; + default: + g_assert_not_reached(); + } + + virtio_stl_p(vdev, &blkcfg.zoned.zone_sectors, + bs->bl.zone_size / 512); + virtio_stl_p(vdev, &blkcfg.zoned.max_active_zones, + bs->bl.max_active_zones); + virtio_stl_p(vdev, &blkcfg.zoned.max_open_zones, + bs->bl.max_open_zones); + virtio_stl_p(vdev, &blkcfg.zoned.write_granularity, blk_size); + virtio_stl_p(vdev, &blkcfg.zoned.max_append_sectors, + bs->bl.max_append_sectors); + } else { + blkcfg.zoned.model = VIRTIO_BLK_Z_NONE; + } memcpy(config, &blkcfg, s->config_size); } @@ -1109,8 +1506,44 @@ static void virtio_blk_resize(void *opaque) aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev); } +/* Suspend virtqueue ioeventfd processing during drain */ +static void virtio_blk_drained_begin(void *opaque) +{ + VirtIOBlock *s = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(opaque); + AioContext *ctx = blk_get_aio_context(s->conf.conf.blk); + + if (!s->dataplane || !s->dataplane_started) { + return; + } + + for (uint16_t i = 0; i < s->conf.num_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_detach_host_notifier(vq, ctx); + } +} + +/* Resume virtqueue ioeventfd processing after drain */ +static void virtio_blk_drained_end(void *opaque) +{ + VirtIOBlock *s = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(opaque); + AioContext *ctx = blk_get_aio_context(s->conf.conf.blk); + + if (!s->dataplane || !s->dataplane_started) { + return; + } + + for (uint16_t i = 0; i < s->conf.num_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_attach_host_notifier(vq, ctx); + } +} + static const BlockDevOps virtio_block_ops = { - .resize_cb = virtio_blk_resize, + .resize_cb = virtio_blk_resize, + .drained_begin = virtio_blk_drained_begin, + .drained_end = virtio_blk_drained_end, }; static void virtio_blk_device_realize(DeviceState *dev, Error **errp) @@ -1163,6 +1596,14 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) return; } + BlockDriverState *bs = blk_bs(conf->conf.blk); + if (bs->bl.zoned != BLK_Z_NONE) { + virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED); + if (bs->bl.zoned == BLK_Z_HM) { + virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD); + } + } + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) && (!conf->max_discard_sectors || conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) { diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c index 345b284d70..f099914831 100644 --- a/hw/block/xen-block.c +++ b/hw/block/xen-block.c @@ -19,7 +19,6 @@ #include "qapi/qmp/qdict.h" #include "qapi/qmp/qstring.h" #include "qom/object_interfaces.h" -#include "hw/xen/xen_common.h" #include "hw/block/xen_blkif.h" #include "hw/qdev-properties.h" #include "hw/xen/xen-block.h" @@ -84,7 +83,8 @@ static void xen_block_connect(XenDevice *xendev, Error **errp) g_free(ring_ref); return; } - } else if (order <= blockdev->props.max_ring_page_order) { + } else if (qemu_xen_gnttab_can_map_multi() && + order <= blockdev->props.max_ring_page_order) { unsigned int i; nr_ring_ref = 1 << order; @@ -189,8 +189,26 @@ static void xen_block_resize_cb(void *opaque) xen_device_backend_printf(xendev, "state", "%u", state); } +/* Suspend request handling */ +static void xen_block_drained_begin(void *opaque) +{ + XenBlockDevice *blockdev = opaque; + + xen_block_dataplane_detach(blockdev->dataplane); +} + +/* Resume request handling */ +static void xen_block_drained_end(void *opaque) +{ + XenBlockDevice *blockdev = opaque; + + xen_block_dataplane_attach(blockdev->dataplane); +} + static const BlockDevOps xen_block_dev_ops = { - .resize_cb = xen_block_resize_cb, + .resize_cb = xen_block_resize_cb, + .drained_begin = xen_block_drained_begin, + .drained_end = xen_block_drained_end, }; static void xen_block_realize(XenDevice *xendev, Error **errp) @@ -242,8 +260,6 @@ static void xen_block_realize(XenDevice *xendev, Error **errp) return; } - blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev); - if (conf->discard_granularity == -1) { conf->discard_granularity = conf->physical_block_size; } @@ -256,8 +272,12 @@ static void xen_block_realize(XenDevice *xendev, Error **errp) } xen_device_backend_printf(xendev, "feature-flush-cache", "%u", 1); - xen_device_backend_printf(xendev, "max-ring-page-order", "%u", - blockdev->props.max_ring_page_order); + + if (qemu_xen_gnttab_can_map_multi()) { + xen_device_backend_printf(xendev, "max-ring-page-order", "%u", + blockdev->props.max_ring_page_order); + } + xen_device_backend_printf(xendev, "info", "%u", blockdev->info); xen_device_frontend_printf(xendev, "virtual-device", "%lu", @@ -273,6 +293,8 @@ static void xen_block_realize(XenDevice *xendev, Error **errp) blockdev->dataplane = xen_block_dataplane_create(xendev, blk, conf->logical_block_size, blockdev->props.iothread); + + blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev); } static void xen_block_frontend_changed(XenDevice *xendev, diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c index c069a30842..807e398541 100644 --- a/hw/char/cadence_uart.c +++ b/hw/char/cadence_uart.c @@ -450,13 +450,15 @@ static MemTxResult uart_write(void *opaque, hwaddr offset, } break; case R_BRGR: /* Baud rate generator */ + value &= 0xffff; if (value >= 0x01) { - s->r[offset] = value & 0xFFFF; + s->r[offset] = value; } break; case R_BDIV: /* Baud rate divider */ + value &= 0xff; if (value >= 0x04) { - s->r[offset] = value & 0xFF; + s->r[offset] = value; } break; default: diff --git a/hw/char/meson.build b/hw/char/meson.build index 7b594f51b8..0807e00ae4 100644 --- a/hw/char/meson.build +++ b/hw/char/meson.build @@ -18,7 +18,7 @@ softmmu_ss.add(when: 'CONFIG_SERIAL_PCI', if_true: files('serial-pci.c')) softmmu_ss.add(when: 'CONFIG_SERIAL_PCI_MULTI', if_true: files('serial-pci-multi.c')) softmmu_ss.add(when: 'CONFIG_SHAKTI_UART', if_true: files('shakti_uart.c')) softmmu_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-console.c')) -softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen_console.c')) +softmmu_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen_console.c')) softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_uartlite.c')) softmmu_ss.add(when: 'CONFIG_AVR_USART', if_true: files('avr_usart.c')) @@ -32,10 +32,9 @@ softmmu_ss.add(when: 'CONFIG_SIFIVE_UART', if_true: files('sifive_uart.c')) softmmu_ss.add(when: 'CONFIG_SH_SCI', if_true: files('sh_serial.c')) softmmu_ss.add(when: 'CONFIG_STM32F2XX_USART', if_true: files('stm32f2xx_usart.c')) softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_MMUART', if_true: files('mchp_pfsoc_mmuart.c')) +softmmu_ss.add(when: 'CONFIG_HTIF', if_true: files('riscv_htif.c')) +softmmu_ss.add(when: 'CONFIG_GOLDFISH_TTY', if_true: files('goldfish_tty.c')) -specific_ss.add(when: 'CONFIG_HTIF', if_true: files('riscv_htif.c')) specific_ss.add(when: 'CONFIG_TERMINAL3270', if_true: files('terminal3270.c')) specific_ss.add(when: 'CONFIG_VIRTIO', if_true: files('virtio-serial-bus.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_vty.c')) - -specific_ss.add(when: 'CONFIG_GOLDFISH_TTY', if_true: files('goldfish_tty.c')) diff --git a/hw/char/parallel-isa.c b/hw/char/parallel-isa.c index 1ccbb96e70..547ae69304 100644 --- a/hw/char/parallel-isa.c +++ b/hw/char/parallel-isa.c @@ -21,7 +21,7 @@ static void parallel_init(ISABus *bus, int index, Chardev *chr) DeviceState *dev; ISADevice *isadev; - isadev = isa_new("isa-parallel"); + isadev = isa_new(TYPE_ISA_PARALLEL); dev = DEVICE(isadev); qdev_prop_set_uint32(dev, "index", index); qdev_prop_set_chr(dev, "chardev", chr); diff --git a/hw/char/parallel.c b/hw/char/parallel.c index 1c9ca47820..3d32589bb3 100644 --- a/hw/char/parallel.c +++ b/hw/char/parallel.c @@ -57,22 +57,22 @@ /* * These are the definitions for the Printer Status Register */ -#define PARA_STS_BUSY 0x80 /* Busy complement */ -#define PARA_STS_ACK 0x40 /* Acknowledge */ -#define PARA_STS_PAPER 0x20 /* Out of paper */ -#define PARA_STS_ONLINE 0x10 /* Online */ -#define PARA_STS_ERROR 0x08 /* Error complement */ -#define PARA_STS_TMOUT 0x01 /* EPP timeout */ +#define PARA_STS_BUSY 0x80 /* Busy complement */ +#define PARA_STS_ACK 0x40 /* Acknowledge */ +#define PARA_STS_PAPER 0x20 /* Out of paper */ +#define PARA_STS_ONLINE 0x10 /* Online */ +#define PARA_STS_ERROR 0x08 /* Error complement */ +#define PARA_STS_TMOUT 0x01 /* EPP timeout */ /* * These are the definitions for the Printer Control Register */ -#define PARA_CTR_DIR 0x20 /* Direction (1=read, 0=write) */ -#define PARA_CTR_INTEN 0x10 /* IRQ Enable */ -#define PARA_CTR_SELECT 0x08 /* Select In complement */ -#define PARA_CTR_INIT 0x04 /* Initialize Printer complement */ -#define PARA_CTR_AUTOLF 0x02 /* Auto linefeed complement */ -#define PARA_CTR_STROBE 0x01 /* Strobe complement */ +#define PARA_CTR_DIR 0x20 /* Direction (1=read, 0=write) */ +#define PARA_CTR_INTEN 0x10 /* IRQ Enable */ +#define PARA_CTR_SELECT 0x08 /* Select In complement */ +#define PARA_CTR_INIT 0x04 /* Initialize Printer complement */ +#define PARA_CTR_AUTOLF 0x02 /* Auto linefeed complement */ +#define PARA_CTR_STROBE 0x01 /* Strobe complement */ #define PARA_CTR_SIGNAL (PARA_CTR_SELECT|PARA_CTR_INIT|PARA_CTR_AUTOLF|PARA_CTR_STROBE) @@ -93,7 +93,6 @@ typedef struct ParallelState { PortioList portio_list; } ParallelState; -#define TYPE_ISA_PARALLEL "isa-parallel" OBJECT_DECLARE_SIMPLE_TYPE(ISAParallelState, ISA_PARALLEL) struct ISAParallelState { diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c index 098de50e35..37d3ccc76b 100644 --- a/hw/char/riscv_htif.c +++ b/hw/char/riscv_htif.c @@ -29,6 +29,8 @@ #include "chardev/char-fe.h" #include "qemu/timer.h" #include "qemu/error-report.h" +#include "exec/address-spaces.h" +#include "sysemu/dma.h" #define RISCV_DEBUG_HTIF 0 #define HTIF_DEBUG(fmt, ...) \ @@ -51,7 +53,10 @@ /* PK system call number */ #define PK_SYS_WRITE 64 -static uint64_t fromhost_addr, tohost_addr; +const char *sig_file; +uint8_t line_size = 16; + +static uint64_t fromhost_addr, tohost_addr, begin_sig_addr, end_sig_addr; void htif_symbol_callback(const char *st_name, int st_info, uint64_t st_value, uint64_t st_size) @@ -68,6 +73,10 @@ void htif_symbol_callback(const char *st_name, int st_info, uint64_t st_value, error_report("HTIF tohost must be 8 bytes"); exit(1); } + } else if (strcmp("begin_signature", st_name) == 0) { + begin_sig_addr = st_value; + } else if (strcmp("end_signature", st_name) == 0) { + end_sig_addr = st_value; } } @@ -163,6 +172,39 @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written) if (payload & 0x1) { /* exit code */ int exit_code = payload >> 1; + + /* + * Dump signature data if sig_file is specified and + * begin/end_signature symbols exist. + */ + if (sig_file && begin_sig_addr && end_sig_addr) { + uint64_t sig_len = end_sig_addr - begin_sig_addr; + char *sig_data = g_malloc(sig_len); + dma_memory_read(&address_space_memory, begin_sig_addr, + sig_data, sig_len, MEMTXATTRS_UNSPECIFIED); + FILE *signature = fopen(sig_file, "w"); + if (signature == NULL) { + error_report("Unable to open %s with error %s", + sig_file, strerror(errno)); + exit(1); + } + + for (int i = 0; i < sig_len; i += line_size) { + for (int j = line_size; j > 0; j--) { + if (i + j <= sig_len) { + fprintf(signature, "%02x", + sig_data[i + j - 1] & 0xff); + } else { + fprintf(signature, "%02x", 0); + } + } + fprintf(signature, "\n"); + } + + fclose(signature); + g_free(sig_data); + } + exit(exit_code); } else { uint64_t syscall[8]; diff --git a/hw/char/serial-pci-multi.c b/hw/char/serial-pci-multi.c index f18b8dcce5..5d65c534cb 100644 --- a/hw/char/serial-pci-multi.c +++ b/hw/char/serial-pci-multi.c @@ -25,7 +25,7 @@ * THE SOFTWARE. */ -/* see docs/specs/pci-serial.txt */ +/* see docs/specs/pci-serial.rst */ #include "qemu/osdep.h" #include "qapi/error.h" diff --git a/hw/char/serial-pci.c b/hw/char/serial-pci.c index 801b769aba..087da3059a 100644 --- a/hw/char/serial-pci.c +++ b/hw/char/serial-pci.c @@ -23,7 +23,7 @@ * THE SOFTWARE. */ -/* see docs/specs/pci-serial.txt */ +/* see docs/specs/pci-serial.rst */ #include "qemu/osdep.h" #include "qapi/error.h" diff --git a/hw/char/serial.c b/hw/char/serial.c index 41b5e61977..270e1b1094 100644 --- a/hw/char/serial.c +++ b/hw/char/serial.c @@ -38,20 +38,20 @@ #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" -#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ +#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ -#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */ -#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */ -#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */ -#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */ +#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */ +#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */ +#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */ +#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */ -#define UART_IIR_NO_INT 0x01 /* No interrupts pending */ -#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */ +#define UART_IIR_NO_INT 0x01 /* No interrupts pending */ +#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */ -#define UART_IIR_MSI 0x00 /* Modem status interrupt */ -#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */ -#define UART_IIR_RDI 0x04 /* Receiver data interrupt */ -#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */ +#define UART_IIR_MSI 0x00 /* Modem status interrupt */ +#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */ +#define UART_IIR_RDI 0x04 /* Receiver data interrupt */ +#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */ #define UART_IIR_CTI 0x0C /* Character Timeout Indication */ #define UART_IIR_FENF 0x80 /* Fifo enabled, but not functionning */ @@ -60,33 +60,33 @@ /* * These are the definitions for the Modem Control Register */ -#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */ -#define UART_MCR_OUT2 0x08 /* Out2 complement */ -#define UART_MCR_OUT1 0x04 /* Out1 complement */ -#define UART_MCR_RTS 0x02 /* RTS complement */ -#define UART_MCR_DTR 0x01 /* DTR complement */ +#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */ +#define UART_MCR_OUT2 0x08 /* Out2 complement */ +#define UART_MCR_OUT1 0x04 /* Out1 complement */ +#define UART_MCR_RTS 0x02 /* RTS complement */ +#define UART_MCR_DTR 0x01 /* DTR complement */ /* * These are the definitions for the Modem Status Register */ -#define UART_MSR_DCD 0x80 /* Data Carrier Detect */ -#define UART_MSR_RI 0x40 /* Ring Indicator */ -#define UART_MSR_DSR 0x20 /* Data Set Ready */ -#define UART_MSR_CTS 0x10 /* Clear to Send */ -#define UART_MSR_DDCD 0x08 /* Delta DCD */ -#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */ -#define UART_MSR_DDSR 0x02 /* Delta DSR */ -#define UART_MSR_DCTS 0x01 /* Delta CTS */ -#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */ +#define UART_MSR_DCD 0x80 /* Data Carrier Detect */ +#define UART_MSR_RI 0x40 /* Ring Indicator */ +#define UART_MSR_DSR 0x20 /* Data Set Ready */ +#define UART_MSR_CTS 0x10 /* Clear to Send */ +#define UART_MSR_DDCD 0x08 /* Delta DCD */ +#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */ +#define UART_MSR_DDSR 0x02 /* Delta DSR */ +#define UART_MSR_DCTS 0x01 /* Delta CTS */ +#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */ -#define UART_LSR_TEMT 0x40 /* Transmitter empty */ -#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ -#define UART_LSR_BI 0x10 /* Break interrupt indicator */ -#define UART_LSR_FE 0x08 /* Frame error indicator */ -#define UART_LSR_PE 0x04 /* Parity error indicator */ -#define UART_LSR_OE 0x02 /* Overrun error indicator */ -#define UART_LSR_DR 0x01 /* Receiver data ready */ -#define UART_LSR_INT_ANY 0x1E /* Any of the lsr-interrupt-triggering status bits */ +#define UART_LSR_TEMT 0x40 /* Transmitter empty */ +#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ +#define UART_LSR_BI 0x10 /* Break interrupt indicator */ +#define UART_LSR_FE 0x08 /* Frame error indicator */ +#define UART_LSR_PE 0x04 /* Parity error indicator */ +#define UART_LSR_OE 0x02 /* Overrun error indicator */ +#define UART_LSR_DR 0x01 /* Receiver data ready */ +#define UART_LSR_INT_ANY 0x1E /* Any of the lsr-interrupt-triggering status bits */ /* Interrupt trigger levels. The byte-counts are for 16550A - in newer UARTs the byte-count for each ITL is higher. */ diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index 7d4601cb5d..dd619f0731 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -985,7 +985,8 @@ static void virtser_port_device_realize(DeviceState *dev, Error **errp) return; } - port->bh = qemu_bh_new(flush_queued_data_bh, port); + port->bh = qemu_bh_new_guarded(flush_queued_data_bh, port, + &dev->mem_reentrancy_guard); port->elem = NULL; } diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c index 63153dfde4..810dae3f44 100644 --- a/hw/char/xen_console.c +++ b/hw/char/xen_console.c @@ -173,6 +173,41 @@ static void xencons_send(struct XenConsole *con) /* -------------------------------------------------------------------- */ +static int store_con_info(struct XenConsole *con) +{ + Chardev *cs = qemu_chr_fe_get_driver(&con->chr); + char *pts = NULL; + char *dom_path; + g_autoptr(GString) path = NULL; + + /* Only continue if we're talking to a pty. */ + if (!CHARDEV_IS_PTY(cs)) { + return 0; + } + pts = cs->filename + 4; + + dom_path = qemu_xen_xs_get_domain_path(xenstore, xen_domid); + if (!dom_path) { + return 0; + } + + path = g_string_new(dom_path); + free(dom_path); + + if (con->xendev.dev) { + g_string_append_printf(path, "/device/console/%d", con->xendev.dev); + } else { + g_string_append(path, "/console"); + } + g_string_append(path, "/tty"); + + if (xenstore_write_str(con->console, path->str, pts)) { + fprintf(stderr, "xenstore_write_str for '%s' fail", path->str); + return -1; + } + return 0; +} + static int con_init(struct XenLegacyDevice *xendev) { struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); @@ -181,7 +216,7 @@ static int con_init(struct XenLegacyDevice *xendev) const char *output; /* setup */ - dom = xs_get_domain_path(xenstore, con->xendev.dom); + dom = qemu_xen_xs_get_domain_path(xenstore, con->xendev.dom); if (!xendev->dev) { snprintf(con->console, sizeof(con->console), "%s/console", dom); } else { @@ -215,8 +250,7 @@ static int con_init(struct XenLegacyDevice *xendev) &error_abort); } - xenstore_store_pv_console_info(con->xendev.dev, - qemu_chr_fe_get_driver(&con->chr)); + store_con_info(con); out: g_free(type); @@ -237,9 +271,9 @@ static int con_initialise(struct XenLegacyDevice *xendev) if (!xendev->dev) { xen_pfn_t mfn = con->ring_ref; - con->sring = xenforeignmemory_map(xen_fmem, con->xendev.dom, - PROT_READ | PROT_WRITE, - 1, &mfn, NULL); + con->sring = qemu_xen_foreignmem_map(con->xendev.dom, NULL, + PROT_READ | PROT_WRITE, + 1, &mfn, NULL); } else { con->sring = xen_be_map_grant_ref(xendev, con->ring_ref, PROT_READ | PROT_WRITE); @@ -269,9 +303,9 @@ static void con_disconnect(struct XenLegacyDevice *xendev) if (con->sring) { if (!xendev->dev) { - xenforeignmemory_unmap(xen_fmem, con->sring, 1); + qemu_xen_foreignmem_unmap(con->sring, 1); } else { - xen_be_unmap_grant_ref(xendev, con->sring); + xen_be_unmap_grant_ref(xendev, con->sring, con->ring_ref); } con->sring = NULL; } diff --git a/hw/core/loader.c b/hw/core/loader.c index 173f8f67f6..8b7fd9e9e5 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -35,7 +35,7 @@ * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along @@ -44,6 +44,7 @@ #include "qemu/osdep.h" #include "qemu/datadir.h" +#include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/qapi-commands-machine.h" #include "qapi/type-helpers.h" @@ -210,8 +211,8 @@ static void bswap_ahdr(struct exec *e) #define ZMAGIC 0413 #define QMAGIC 0314 #define _N_HDROFF(x) (1024 - sizeof (struct exec)) -#define N_TXTOFF(x) \ - (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : \ +#define N_TXTOFF(x) \ + (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : \ (N_MAGIC(x) == QMAGIC ? 0 : sizeof (struct exec))) #define N_TXTADDR(x, target_page_size) (N_MAGIC(x) == QMAGIC ? target_page_size : 0) #define _N_SEGMENT_ROUND(x, target_page_size) (((x) + target_page_size - 1) & ~(target_page_size - 1)) @@ -300,10 +301,10 @@ static void *load_at(int fd, off_t offset, size_t size) #define ELF_CLASS ELFCLASS32 #include "elf.h" -#define SZ 32 +#define SZ 32 #define elf_word uint32_t -#define elf_sword int32_t -#define bswapSZs bswap32s +#define elf_sword int32_t +#define bswapSZs bswap32s #include "hw/elf_ops.h" #undef elfhdr @@ -316,16 +317,16 @@ static void *load_at(int fd, off_t offset, size_t size) #undef elf_sword #undef bswapSZs #undef SZ -#define elfhdr elf64_hdr -#define elf_phdr elf64_phdr -#define elf_note elf64_note -#define elf_shdr elf64_shdr -#define elf_sym elf64_sym +#define elfhdr elf64_hdr +#define elf_phdr elf64_phdr +#define elf_note elf64_note +#define elf_shdr elf64_shdr +#define elf_sym elf64_sym #define elf_rela elf64_rela #define elf_word uint64_t -#define elf_sword int64_t -#define bswapSZs bswap64s -#define SZ 64 +#define elf_sword int64_t +#define bswapSZs bswap64s +#define SZ 64 #include "hw/elf_ops.h" const char *load_elf_strerror(ssize_t error) @@ -527,7 +528,7 @@ static void bswap_uboot_header(uboot_image_header_t *hdr) } -#define ZALLOC_ALIGNMENT 16 +#define ZALLOC_ALIGNMENT 16 static void *zalloc(void *x, unsigned items, unsigned size) { @@ -547,13 +548,13 @@ static void zfree(void *x, void *addr) } -#define HEAD_CRC 2 -#define EXTRA_FIELD 4 -#define ORIG_NAME 8 -#define COMMENT 0x10 -#define RESERVED 0xe0 +#define HEAD_CRC 2 +#define EXTRA_FIELD 4 +#define ORIG_NAME 8 +#define COMMENT 0x10 +#define RESERVED 0xe0 -#define DEFLATED 8 +#define DEFLATED 8 ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen) { @@ -857,6 +858,97 @@ ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz) return bytes; } +/* The PE/COFF MS-DOS stub magic number */ +#define EFI_PE_MSDOS_MAGIC "MZ" + +/* + * The Linux header magic number for a EFI PE/COFF + * image targetting an unspecified architecture. + */ +#define EFI_PE_LINUX_MAGIC "\xcd\x23\x82\x81" + +/* + * Bootable Linux kernel images may be packaged as EFI zboot images, which are + * self-decompressing executables when loaded via EFI. The compressed payload + * can also be extracted from the image and decompressed by a non-EFI loader. + * + * The de facto specification for this format is at the following URL: + * + * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/firmware/efi/libstub/zboot-header.S + * + * This definition is based on Linux upstream commit 29636a5ce87beba. + */ +struct linux_efi_zboot_header { + uint8_t msdos_magic[2]; /* PE/COFF 'MZ' magic number */ + uint8_t reserved0[2]; + uint8_t zimg[4]; /* "zimg" for Linux EFI zboot images */ + uint32_t payload_offset; /* LE offset to compressed payload */ + uint32_t payload_size; /* LE size of the compressed payload */ + uint8_t reserved1[8]; + char compression_type[32]; /* Compression type, NUL terminated */ + uint8_t linux_magic[4]; /* Linux header magic */ + uint32_t pe_header_offset; /* LE offset to the PE header */ +}; + +/* + * Check whether *buffer points to a Linux EFI zboot image in memory. + * + * If it does, attempt to decompress it to a new buffer, and free the old one. + * If any of this fails, return an error to the caller. + * + * If the image is not a Linux EFI zboot image, do nothing and return success. + */ +ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size) +{ + const struct linux_efi_zboot_header *header; + uint8_t *data = NULL; + int ploff, plsize; + ssize_t bytes; + + /* ignore if this is too small to be a EFI zboot image */ + if (*size < sizeof(*header)) { + return 0; + } + + header = (struct linux_efi_zboot_header *)*buffer; + + /* ignore if this is not a Linux EFI zboot image */ + if (memcmp(&header->msdos_magic, EFI_PE_MSDOS_MAGIC, 2) != 0 || + memcmp(&header->zimg, "zimg", 4) != 0 || + memcmp(&header->linux_magic, EFI_PE_LINUX_MAGIC, 4) != 0) { + return 0; + } + + if (strcmp(header->compression_type, "gzip") != 0) { + fprintf(stderr, + "unable to handle EFI zboot image with \"%.*s\" compression\n", + (int)sizeof(header->compression_type) - 1, + header->compression_type); + return -1; + } + + ploff = ldl_le_p(&header->payload_offset); + plsize = ldl_le_p(&header->payload_size); + + if (ploff < 0 || plsize < 0 || ploff + plsize > *size) { + fprintf(stderr, "unable to handle corrupt EFI zboot image\n"); + return -1; + } + + data = g_malloc(LOAD_IMAGE_MAX_GUNZIP_BYTES); + bytes = gunzip(data, LOAD_IMAGE_MAX_GUNZIP_BYTES, *buffer + ploff, plsize); + if (bytes < 0) { + fprintf(stderr, "failed to decompress EFI zboot image\n"); + g_free(data); + return -1; + } + + g_free(*buffer); + *buffer = g_realloc(data, bytes); + *size = bytes; + return bytes; +} + /* * Functions for reboot-persistent memory regions. * - used for vga bios and option roms. diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c index 2d904747c0..3860a50c3b 100644 --- a/hw/core/machine-qmp-cmds.c +++ b/hw/core/machine-qmp-cmds.c @@ -28,18 +28,6 @@ #include "sysemu/runstate.h" #include "sysemu/sysemu.h" -static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu) -{ -#ifdef TARGET_S390X - S390CPU *s390_cpu = S390_CPU(cpu); - CPUS390XState *env = &s390_cpu->env; - - info->cpu_state = env->cpu_state; -#else - abort(); -#endif -} - /* * fast means: we NEVER interrupt vCPU threads to retrieve * information from KVM. @@ -49,7 +37,7 @@ CpuInfoFastList *qmp_query_cpus_fast(Error **errp) MachineState *ms = MACHINE(qdev_get_machine()); MachineClass *mc = MACHINE_GET_CLASS(ms); CpuInfoFastList *head = NULL, **tail = &head; - SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME, + SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, target_name(), -1, &error_abort); CPUState *cpu; @@ -68,8 +56,8 @@ CpuInfoFastList *qmp_query_cpus_fast(Error **errp) } value->target = target; - if (target == SYS_EMU_TARGET_S390X) { - cpustate_to_cpuinfo_s390(&value->u.s390x, cpu); + if (cpu->cc->query_cpu_fast) { + cpu->cc->query_cpu_fast(cpu, value); } QAPI_LIST_APPEND(tail, value); @@ -102,6 +90,7 @@ MachineInfoList *qmp_query_machines(Error **errp) info->hotpluggable_cpus = mc->has_hotpluggable_cpus; info->numa_mem_supported = mc->numa_mem_supported; info->deprecated = !!mc->deprecation_reason; + info->acpi = !!object_class_property_find(OBJECT_CLASS(mc), "acpi"); if (mc->default_cpu_type) { info->default_cpu_type = g_strdup(mc->default_cpu_type); } @@ -128,7 +117,7 @@ TargetInfo *qmp_query_target(Error **errp) { TargetInfo *info = g_malloc0(sizeof(*info)); - info->arch = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME, -1, + info->arch = qapi_enum_parse(&SysEmuTarget_lookup, target_name(), -1, &error_abort); return info; diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c index c3dab007da..89fe0cda42 100644 --- a/hw/core/machine-smp.c +++ b/hw/core/machine-smp.c @@ -20,6 +20,8 @@ #include "qemu/osdep.h" #include "hw/boards.h" #include "qapi/error.h" +#include "qemu/error-report.h" + /* * Report information of a machine's supported CPU topology hierarchy. diff --git a/hw/core/machine.c b/hw/core/machine.c index f29e700ee4..1000406211 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -39,14 +39,24 @@ #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-pci.h" +GlobalProperty hw_compat_8_0[] = { + { "migration", "multifd-flush-after-each-section", "on"}, +}; +const size_t hw_compat_8_0_len = G_N_ELEMENTS(hw_compat_8_0); + GlobalProperty hw_compat_7_2[] = { + { "e1000e", "migrate-timadj", "off" }, { "virtio-mem", "x-early-migration", "false" }, + { "migration", "x-preempt-pre-7-2", "true" }, + { TYPE_PCI_DEVICE, "x-pcie-err-unc-mask", "off" }, }; const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2); GlobalProperty hw_compat_7_1[] = { { "virtio-device", "queue_reset", "false" }, { "virtio-rng-pci", "vectors", "0" }, + { "virtio-rng-pci-transitional", "vectors", "0" }, + { "virtio-rng-pci-non-transitional", "vectors", "0" }, }; const size_t hw_compat_7_1_len = G_N_ELEMENTS(hw_compat_7_1); @@ -1328,6 +1338,14 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error * } } else if (machine_class->default_ram_id && machine->ram_size && numa_uses_legacy_mem()) { + if (object_property_find(object_get_objects_root(), + machine_class->default_ram_id)) { + error_setg(errp, "object name '%s' is reserved for the default" + " RAM backend, it can't be used for any other purposes." + " Change the object's 'id' to something else", + machine_class->default_ram_id); + return; + } if (!create_default_memdev(current_machine, mem_path, errp)) { return; } diff --git a/hw/core/meson.build b/hw/core/meson.build index 7a4d02b6c0..959bc924d4 100644 --- a/hw/core/meson.build +++ b/hw/core/meson.build @@ -41,17 +41,14 @@ softmmu_ss.add(files( 'gpio.c', 'loader.c', 'machine-hmp-cmds.c', + 'machine-qmp-cmds.c', 'machine.c', 'nmi.c', 'null-machine.c', + 'numa.c', 'qdev-fw.c', 'qdev-properties-system.c', 'sysbus.c', 'vm-change-state-handler.c', 'clock-vmstate.c', )) - -specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: files( - 'machine-qmp-cmds.c', - 'numa.c', -)) diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c index eb5ba1aff7..e03165febf 100644 --- a/hw/core/ptimer.c +++ b/hw/core/ptimer.c @@ -10,7 +10,7 @@ #include "hw/ptimer.h" #include "migration/vmstate.h" #include "qemu/host-utils.h" -#include "sysemu/replay.h" +#include "exec/replay-core.h" #include "sysemu/cpu-timers.h" #include "sysemu/qtest.h" #include "block/aio.h" diff --git a/hw/core/qdev.c b/hw/core/qdev.c index d759c4602c..43d863b0c5 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -330,7 +330,7 @@ bool qdev_machine_modified(void) return qdev_hot_added || qdev_hot_removed; } -BusState *qdev_get_parent_bus(DeviceState *dev) +BusState *qdev_get_parent_bus(const DeviceState *dev) { return dev->parent_bus; } diff --git a/hw/cxl/cxl-cdat.c b/hw/cxl/cxl-cdat.c index 137abd0992..d246d6885b 100644 --- a/hw/cxl/cxl-cdat.c +++ b/hw/cxl/cxl-cdat.c @@ -108,31 +108,21 @@ static void ct3_build_cdat(CDATObject *cdat, Error **errp) static void ct3_load_cdat(CDATObject *cdat, Error **errp) { g_autofree CDATEntry *cdat_st = NULL; + g_autofree char *buf = NULL; uint8_t sum = 0; int num_ent; - int i = 0, ent = 1, file_size = 0; + int i = 0, ent = 1; + gsize file_size = 0; CDATSubHeader *hdr; - FILE *fp = NULL; + GError *error = NULL; /* Read CDAT file and create its cache */ - fp = fopen(cdat->filename, "r"); - if (!fp) { - error_setg(errp, "CDAT: Unable to open file"); + if (!g_file_get_contents(cdat->filename, (gchar **)&buf, + &file_size, &error)) { + error_setg(errp, "CDAT: File read failed: %s", error->message); + g_error_free(error); return; } - - fseek(fp, 0, SEEK_END); - file_size = ftell(fp); - fseek(fp, 0, SEEK_SET); - cdat->buf = g_malloc0(file_size); - - if (fread(cdat->buf, file_size, 1, fp) == 0) { - error_setg(errp, "CDAT: File read failed"); - return; - } - - fclose(fp); - if (file_size < sizeof(CDATTableHeader)) { error_setg(errp, "CDAT: File too short"); return; @@ -140,9 +130,17 @@ static void ct3_load_cdat(CDATObject *cdat, Error **errp) i = sizeof(CDATTableHeader); num_ent = 1; while (i < file_size) { - hdr = (CDATSubHeader *)(cdat->buf + i); + hdr = (CDATSubHeader *)(buf + i); + if (i + sizeof(CDATSubHeader) > file_size) { + error_setg(errp, "CDAT: Truncated table"); + return; + } cdat_len_check(hdr, errp); i += hdr->length; + if (i > file_size) { + error_setg(errp, "CDAT: Truncated table"); + return; + } num_ent++; } if (i != file_size) { @@ -150,33 +148,26 @@ static void ct3_load_cdat(CDATObject *cdat, Error **errp) return; } - cdat_st = g_malloc0(sizeof(*cdat_st) * num_ent); - if (!cdat_st) { - error_setg(errp, "CDAT: Failed to allocate entry array"); - return; - } + cdat_st = g_new0(CDATEntry, num_ent); /* Set CDAT header, Entry = 0 */ - cdat_st[0].base = cdat->buf; + cdat_st[0].base = buf; cdat_st[0].length = sizeof(CDATTableHeader); i = 0; while (i < cdat_st[0].length) { - sum += cdat->buf[i++]; + sum += buf[i++]; } /* Read CDAT structures */ while (i < file_size) { - hdr = (CDATSubHeader *)(cdat->buf + i); - cdat_len_check(hdr, errp); - + hdr = (CDATSubHeader *)(buf + i); cdat_st[ent].base = hdr; cdat_st[ent].length = hdr->length; - while (cdat->buf + i < - (uint8_t *)cdat_st[ent].base + cdat_st[ent].length) { + while (buf + i < (char *)cdat_st[ent].base + cdat_st[ent].length) { assert(i < file_size); - sum += cdat->buf[i++]; + sum += buf[i++]; } ent++; @@ -187,6 +178,7 @@ static void ct3_load_cdat(CDATObject *cdat, Error **errp) } cdat->entry_len = num_ent; cdat->entry = g_steal_pointer(&cdat_st); + cdat->buf = g_steal_pointer(&buf); } void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp) @@ -218,7 +210,5 @@ void cxl_doe_cdat_release(CXLComponentState *cxl_cstate) cdat->free_cdat_table(cdat->built_buf, cdat->built_buf_len, cdat->private); } - if (cdat->buf) { - free(cdat->buf); - } + g_free(cdat->buf); } diff --git a/hw/cxl/cxl-component-utils.c b/hw/cxl/cxl-component-utils.c index 3edd303a33..378f1082ce 100644 --- a/hw/cxl/cxl-component-utils.c +++ b/hw/cxl/cxl-component-utils.c @@ -38,23 +38,25 @@ static void dumb_hdm_handler(CXLComponentState *cxl_cstate, hwaddr offset, ComponentRegisters *cregs = &cxl_cstate->crb; uint32_t *cache_mem = cregs->cache_mem_registers; bool should_commit = false; + bool should_uncommit = false; switch (offset) { case A_CXL_HDM_DECODER0_CTRL: should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); + should_uncommit = !should_commit; break; default: break; } - memory_region_transaction_begin(); - stl_le_p((uint8_t *)cache_mem + offset, value); if (should_commit) { - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); + value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0); + value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); + } else if (should_uncommit) { + value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0); + value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 0); } - memory_region_transaction_commit(); + stl_le_p((uint8_t *)cache_mem + offset, value); } static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value, @@ -141,17 +143,19 @@ static void ras_init_common(uint32_t *reg_state, uint32_t *write_msk) * Error status is RW1C but given bits are not yet set, it can * be handled as RO. */ - reg_state[R_CXL_RAS_UNC_ERR_STATUS] = 0; + stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, 0); + stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_STATUS, 0x1cfff); /* Bits 12-13 and 17-31 reserved in CXL 2.0 */ - reg_state[R_CXL_RAS_UNC_ERR_MASK] = 0x1cfff; - write_msk[R_CXL_RAS_UNC_ERR_MASK] = 0x1cfff; - reg_state[R_CXL_RAS_UNC_ERR_SEVERITY] = 0x1cfff; - write_msk[R_CXL_RAS_UNC_ERR_SEVERITY] = 0x1cfff; - reg_state[R_CXL_RAS_COR_ERR_STATUS] = 0; - reg_state[R_CXL_RAS_COR_ERR_MASK] = 0x7f; - write_msk[R_CXL_RAS_COR_ERR_MASK] = 0x7f; + stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff); + stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff); + stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff); + stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff); + stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, 0); + stl_le_p(write_msk + R_CXL_RAS_COR_ERR_STATUS, 0x7f); + stl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK, 0x7f); + stl_le_p(write_msk + R_CXL_RAS_COR_ERR_MASK, 0x7f); /* CXL switches and devices must set */ - reg_state[R_CXL_RAS_ERR_CAP_CTRL] = 0x00; + stl_le_p(reg_state + R_CXL_RAS_ERR_CAP_CTRL, 0x200); } static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk, diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c index 83ce7a8270..86e1cea8ce 100644 --- a/hw/cxl/cxl-device-utils.c +++ b/hw/cxl/cxl-device-utils.c @@ -267,5 +267,20 @@ void cxl_device_register_init_common(CXLDeviceState *cxl_dstate) cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000); memdev_reg_init_common(cxl_dstate); - assert(cxl_initialize_mailbox(cxl_dstate) == 0); + cxl_initialize_mailbox(cxl_dstate); +} + +uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate) +{ + uint64_t time, delta; + uint64_t final_time = 0; + + if (cxl_dstate->timestamp.set) { + /* Find the delta from the last time the host set the time. */ + time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + delta = time - cxl_dstate->timestamp.last_set; + final_time = cxl_dstate->timestamp.host_set + delta; + } + + return final_time; } diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c index 3c1ec8732a..034c7805b3 100644 --- a/hw/cxl/cxl-host.c +++ b/hw/cxl/cxl-host.c @@ -84,7 +84,7 @@ void cxl_fmws_link_targets(CXLState *cxl_state, Error **errp) bool ambig; o = object_resolve_path_type(fw->targets[i], - TYPE_PXB_CXL_DEVICE, + TYPE_PXB_CXL_DEV, &ambig); if (!o) { error_setg(errp, "Could not resolve CXLFM target %s", @@ -141,26 +141,33 @@ static PCIDevice *cxl_cfmws_find_device(CXLFixedWindow *fw, hwaddr addr) addr += fw->base; rb_index = (addr / cxl_decode_ig(fw->enc_int_gran)) % fw->num_targets; - hb = PCI_HOST_BRIDGE(fw->target_hbs[rb_index]->cxl.cxl_host_bridge); + hb = PCI_HOST_BRIDGE(fw->target_hbs[rb_index]->cxl_host_bridge); if (!hb || !hb->bus || !pci_bus_is_cxl(hb->bus)) { return NULL; } - hb_cstate = cxl_get_hb_cstate(hb); - if (!hb_cstate) { - return NULL; - } + if (cxl_get_hb_passthrough(hb)) { + rp = pcie_find_port_first(hb->bus); + if (!rp) { + return NULL; + } + } else { + hb_cstate = cxl_get_hb_cstate(hb); + if (!hb_cstate) { + return NULL; + } - cache_mem = hb_cstate->crb.cache_mem_registers; + cache_mem = hb_cstate->crb.cache_mem_registers; - target_found = cxl_hdm_find_target(cache_mem, addr, &target); - if (!target_found) { - return NULL; - } + target_found = cxl_hdm_find_target(cache_mem, addr, &target); + if (!target_found) { + return NULL; + } - rp = pcie_find_port_by_pn(hb->bus, target); - if (!rp) { - return NULL; + rp = pcie_find_port_by_pn(hb->bus, target); + if (!rp) { + return NULL; + } } d = pci_bridge_get_sec_bus(PCI_BRIDGE(rp))->devices[0]; diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index bc1bb18844..702e16ca20 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -12,15 +12,18 @@ #include "hw/pci/pci.h" #include "qemu/cutils.h" #include "qemu/log.h" +#include "qemu/units.h" #include "qemu/uuid.h" +#define CXL_CAPACITY_MULTIPLIER (256 * MiB) + /* * How to add a new command, example. The command set FOO, with cmd BAR. * 1. Add the command set and cmd to the enum. * FOO = 0x7f, * #define BAR 0 * 2. Implement the handler - * static ret_code cmd_foo_bar(struct cxl_cmd *cmd, + * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, * CXLDeviceState *cxl_dstate, uint16_t *len) * 3. Add the command to the cxl_cmd_set[][] * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, @@ -87,10 +90,10 @@ typedef enum { CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15, CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16, CXL_MBOX_MAX = 0x17 -} ret_code; +} CXLRetCode; struct cxl_cmd; -typedef ret_code (*opcode_handler)(struct cxl_cmd *cmd, +typedef CXLRetCode (*opcode_handler)(struct cxl_cmd *cmd, CXLDeviceState *cxl_dstate, uint16_t *len); struct cxl_cmd { const char *name; @@ -102,16 +105,16 @@ struct cxl_cmd { #define DEFINE_MAILBOX_HANDLER_ZEROED(name, size) \ uint16_t __zero##name = size; \ - static ret_code cmd_##name(struct cxl_cmd *cmd, \ - CXLDeviceState *cxl_dstate, uint16_t *len) \ + static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \ + CXLDeviceState *cxl_dstate, uint16_t *len) \ { \ *len = __zero##name; \ memset(cmd->payload, 0, *len); \ return CXL_MBOX_SUCCESS; \ } #define DEFINE_MAILBOX_HANDLER_NOP(name) \ - static ret_code cmd_##name(struct cxl_cmd *cmd, \ - CXLDeviceState *cxl_dstate, uint16_t *len) \ + static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \ + CXLDeviceState *cxl_dstate, uint16_t *len) \ { \ return CXL_MBOX_SUCCESS; \ } @@ -122,9 +125,9 @@ DEFINE_MAILBOX_HANDLER_ZEROED(events_get_interrupt_policy, 4); DEFINE_MAILBOX_HANDLER_NOP(events_set_interrupt_policy); /* 8.2.9.2.1 */ -static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint8_t slots_supported; @@ -138,7 +141,8 @@ static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, } QEMU_PACKED *fw_info; QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); - if (cxl_dstate->pmem_size < (256 << 20)) { + if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) || + (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) { return CXL_MBOX_INTERNAL_ERROR; } @@ -155,21 +159,12 @@ static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, } /* 8.2.9.3.1 */ -static ret_code cmd_timestamp_get(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_timestamp_get(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { - uint64_t time, delta; - uint64_t final_time = 0; + uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); - if (cxl_dstate->timestamp.set) { - /* First find the delta from the last time the host set the time. */ - time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - delta = time - cxl_dstate->timestamp.last_set; - final_time = cxl_dstate->timestamp.host_set + delta; - } - - /* Then adjust the actual time */ stq_le_p(cmd->payload, final_time); *len = 8; @@ -177,7 +172,7 @@ static ret_code cmd_timestamp_get(struct cxl_cmd *cmd, } /* 8.2.9.3.2 */ -static ret_code cmd_timestamp_set(struct cxl_cmd *cmd, +static CXLRetCode cmd_timestamp_set(struct cxl_cmd *cmd, CXLDeviceState *cxl_dstate, uint16_t *len) { @@ -190,12 +185,16 @@ static ret_code cmd_timestamp_set(struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -static QemuUUID cel_uuid; +/* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */ +static const QemuUUID cel_uuid = { + .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, + 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) +}; /* 8.2.9.4.1 */ -static ret_code cmd_logs_get_supported(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_logs_get_supported(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint16_t entries; @@ -216,9 +215,9 @@ static ret_code cmd_logs_get_supported(struct cxl_cmd *cmd, } /* 8.2.9.4.2 */ -static ret_code cmd_logs_get_log(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_logs_get_log(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { QemuUUID uuid; @@ -257,9 +256,9 @@ static ret_code cmd_logs_get_log(struct cxl_cmd *cmd, } /* 8.2.9.5.1.1 */ -static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_identify_memory_device(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { char fw_revision[0x10]; @@ -281,29 +280,29 @@ static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd, CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); - uint64_t size = cxl_dstate->pmem_size; - if (!QEMU_IS_ALIGNED(size, 256 << 20)) { + if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || + (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) { return CXL_MBOX_INTERNAL_ERROR; } id = (void *)cmd->payload; memset(id, 0, sizeof(*id)); - /* PMEM only */ snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); - id->total_capacity = size / (256 << 20); - id->persistent_capacity = size / (256 << 20); - id->lsa_size = cvc->get_lsa_size(ct3d); + stq_le_p(&id->total_capacity, cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&id->persistent_capacity, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&id->volatile_capacity, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); + stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); *len = sizeof(*id); return CXL_MBOX_SUCCESS; } -static ret_code cmd_ccls_get_partition_info(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_get_partition_info(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint64_t active_vmem; @@ -312,25 +311,28 @@ static ret_code cmd_ccls_get_partition_info(struct cxl_cmd *cmd, uint64_t next_pmem; } QEMU_PACKED *part_info = (void *)cmd->payload; QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); - uint64_t size = cxl_dstate->pmem_size; - if (!QEMU_IS_ALIGNED(size, 256 << 20)) { + if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || + (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) { return CXL_MBOX_INTERNAL_ERROR; } - /* PMEM only */ - part_info->active_vmem = 0; - part_info->next_vmem = 0; - part_info->active_pmem = size / (256 << 20); - part_info->next_pmem = 0; + stq_le_p(&part_info->active_vmem, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); + /* + * When both next_vmem and next_pmem are 0, there is no pending change to + * partitioning. + */ + stq_le_p(&part_info->next_vmem, 0); + stq_le_p(&part_info->active_pmem, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&part_info->next_pmem, 0); *len = sizeof(*part_info); return CXL_MBOX_SUCCESS; } -static ret_code cmd_ccls_get_lsa(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_get_lsa(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint32_t offset; @@ -353,9 +355,9 @@ static ret_code cmd_ccls_get_lsa(struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -static ret_code cmd_ccls_set_lsa(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_set_lsa(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct set_lsa_pl { uint32_t offset; @@ -455,11 +457,8 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) DOORBELL, 0); } -int cxl_initialize_mailbox(CXLDeviceState *cxl_dstate) +void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate) { - /* CXL 2.0: Table 169 Get Supported Logs Log Entry */ - const char *cel_uuidstr = "0da9c0b5-bf41-4b78-8f79-96b1623b3f17"; - for (int set = 0; set < 256; set++) { for (int cmd = 0; cmd < 256; cmd++) { if (cxl_cmd_set[set][cmd].handler) { @@ -473,6 +472,4 @@ int cxl_initialize_mailbox(CXLDeviceState *cxl_dstate) } } } - - return qemu_uuid_parse(cel_uuidstr, &cel_uuid); } diff --git a/hw/display/meson.build b/hw/display/meson.build index f470179122..17165bd536 100644 --- a/hw/display/meson.build +++ b/hw/display/meson.build @@ -14,7 +14,7 @@ softmmu_ss.add(when: 'CONFIG_PL110', if_true: files('pl110.c')) softmmu_ss.add(when: 'CONFIG_SII9022', if_true: files('sii9022.c')) softmmu_ss.add(when: 'CONFIG_SSD0303', if_true: files('ssd0303.c')) softmmu_ss.add(when: 'CONFIG_SSD0323', if_true: files('ssd0323.c')) -softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xenfb.c')) +softmmu_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xenfb.c')) softmmu_ss.add(when: 'CONFIG_VGA_PCI', if_true: files('vga-pci.c')) softmmu_ss.add(when: 'CONFIG_VGA_ISA', if_true: files('vga-isa.c')) @@ -36,7 +36,7 @@ softmmu_ss.add(when: 'CONFIG_CG3', if_true: files('cg3.c')) softmmu_ss.add(when: 'CONFIG_MACFB', if_true: files('macfb.c')) softmmu_ss.add(when: 'CONFIG_NEXTCUBE', if_true: files('next-fb.c')) -specific_ss.add(when: 'CONFIG_VGA', if_true: files('vga.c')) +softmmu_ss.add(when: 'CONFIG_VGA', if_true: files('vga.c')) if (config_all_devices.has_key('CONFIG_VGA_CIRRUS') or config_all_devices.has_key('CONFIG_VGA_PCI') or diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c index fcfd40c3ac..ec99ec887a 100644 --- a/hw/display/qxl-render.c +++ b/hw/display/qxl-render.c @@ -290,7 +290,7 @@ static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor, return c; fail: - cursor_put(c); + cursor_unref(c); return NULL; } @@ -336,7 +336,7 @@ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) } qemu_mutex_lock(&qxl->ssd.lock); if (qxl->ssd.cursor) { - cursor_put(qxl->ssd.cursor); + cursor_unref(qxl->ssd.cursor); } qxl->ssd.cursor = c; qxl->ssd.mouse_x = cmd->u.set.position.x; diff --git a/hw/display/qxl.c b/hw/display/qxl.c index ec712d3ca2..f1c0eb7dfc 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -299,7 +299,7 @@ void qxl_spice_reset_cursor(PCIQXLDevice *qxl) qxl->guest_cursor = 0; qemu_mutex_unlock(&qxl->track_lock); if (qxl->ssd.cursor) { - cursor_put(qxl->ssd.cursor); + cursor_unref(qxl->ssd.cursor); } qxl->ssd.cursor = cursor_builtin_hidden(); } @@ -2201,11 +2201,14 @@ static void qxl_realize_common(PCIQXLDevice *qxl, Error **errp) qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl); - qxl->update_irq = qemu_bh_new(qxl_update_irq_bh, qxl); + qxl->update_irq = qemu_bh_new_guarded(qxl_update_irq_bh, qxl, + &DEVICE(qxl)->mem_reentrancy_guard); qxl_reset_state(qxl); - qxl->update_area_bh = qemu_bh_new(qxl_render_update_area_bh, qxl); - qxl->ssd.cursor_bh = qemu_bh_new(qemu_spice_cursor_refresh_bh, &qxl->ssd); + qxl->update_area_bh = qemu_bh_new_guarded(qxl_render_update_area_bh, qxl, + &DEVICE(qxl)->mem_reentrancy_guard); + qxl->ssd.cursor_bh = qemu_bh_new_guarded(qemu_spice_cursor_refresh_bh, &qxl->ssd, + &DEVICE(qxl)->mem_reentrancy_guard); } static void qxl_realize_primary(PCIDevice *dev, Error **errp) diff --git a/hw/display/sm501.c b/hw/display/sm501.c index e1d0591d36..0eecd00701 100644 --- a/hw/display/sm501.c +++ b/hw/display/sm501.c @@ -28,6 +28,7 @@ #include "qapi/error.h" #include "qemu/log.h" #include "qemu/module.h" +#include "hw/usb/hcd-ohci.h" #include "hw/char/serial.h" #include "ui/console.h" #include "hw/sysbus.h" @@ -464,6 +465,7 @@ typedef struct SM501State { uint32_t last_width; uint32_t last_height; bool do_full_update; /* perform a full update next time */ + uint8_t use_pixman; I2CBus *i2c_bus; /* mmio registers */ @@ -691,7 +693,7 @@ static void sm501_2d_operation(SM501State *s) unsigned int dst_pitch = (s->twoD_pitch >> 16) & 0x1FFF; int crt = (s->dc_crt_control & SM501_DC_CRT_CONTROL_SEL) ? 1 : 0; int fb_len = get_width(s, crt) * get_height(s, crt) * get_bpp(s, crt); - bool overlap = false; + bool overlap = false, fallback = false; if ((s->twoD_stretch >> 16) & 0xF) { qemu_log_mask(LOG_UNIMP, "sm501: only XY addressing is supported.\n"); @@ -753,7 +755,7 @@ static void sm501_2d_operation(SM501State *s) } if ((rop_mode && rop == 0x5) || (!rop_mode && rop == 0x55)) { - /* Invert dest, is there a way to do this with pixman? */ + /* DSTINVERT, is there a way to do this with pixman? */ unsigned int x, y, i; uint8_t *d = s->local_mem + dst_base; @@ -763,6 +765,34 @@ static void sm501_2d_operation(SM501State *s) stn_he_p(&d[i], bypp, ~ldn_he_p(&d[i], bypp)); } } + } else if (!rop_mode && rop == 0x99) { + /* DSxn, is there a way to do this with pixman? */ + unsigned int x, y, i, j; + uint8_t *sp = s->local_mem + src_base; + uint8_t *d = s->local_mem + dst_base; + + for (y = 0; y < height; y++) { + i = (dst_x + (dst_y + y) * dst_pitch) * bypp; + j = (src_x + (src_y + y) * src_pitch) * bypp; + for (x = 0; x < width; x++, i += bypp, j += bypp) { + stn_he_p(&d[i], bypp, + ~(ldn_he_p(&sp[j], bypp) ^ ldn_he_p(&d[i], bypp))); + } + } + } else if (!rop_mode && rop == 0xee) { + /* SRCPAINT, is there a way to do this with pixman? */ + unsigned int x, y, i, j; + uint8_t *sp = s->local_mem + src_base; + uint8_t *d = s->local_mem + dst_base; + + for (y = 0; y < height; y++) { + i = (dst_x + (dst_y + y) * dst_pitch) * bypp; + j = (src_x + (src_y + y) * src_pitch) * bypp; + for (x = 0; x < width; x++, i += bypp, j += bypp) { + stn_he_p(&d[i], bypp, + ldn_he_p(&sp[j], bypp) | ldn_he_p(&d[i], bypp)); + } + } } else { /* Do copy src for unimplemented ops, better than unpainted area */ if ((rop_mode && (rop != 0xc || rop2_source_is_pattern)) || @@ -798,7 +828,7 @@ static void sm501_2d_operation(SM501State *s) de = db + (width + (height - 1) * dst_pitch) * bypp; overlap = (db < se && sb < de); } - if (overlap) { + if (overlap && (s->use_pixman & BIT(2))) { /* pixman can't do reverse blit: copy via temporary */ int tmp_stride = DIV_ROUND_UP(width * bypp, sizeof(uint32_t)); uint32_t *tmp = tmp_buf; @@ -806,25 +836,50 @@ static void sm501_2d_operation(SM501State *s) if (tmp_stride * sizeof(uint32_t) * height > sizeof(tmp_buf)) { tmp = g_malloc(tmp_stride * sizeof(uint32_t) * height); } - pixman_blt((uint32_t *)&s->local_mem[src_base], tmp, - src_pitch * bypp / sizeof(uint32_t), - tmp_stride, 8 * bypp, 8 * bypp, - src_x, src_y, 0, 0, width, height); - pixman_blt(tmp, (uint32_t *)&s->local_mem[dst_base], - tmp_stride, - dst_pitch * bypp / sizeof(uint32_t), - 8 * bypp, 8 * bypp, - 0, 0, dst_x, dst_y, width, height); + fallback = !pixman_blt((uint32_t *)&s->local_mem[src_base], + tmp, + src_pitch * bypp / sizeof(uint32_t), + tmp_stride, + 8 * bypp, 8 * bypp, + src_x, src_y, 0, 0, width, height); + if (!fallback) { + fallback = !pixman_blt(tmp, + (uint32_t *)&s->local_mem[dst_base], + tmp_stride, + dst_pitch * bypp / sizeof(uint32_t), + 8 * bypp, 8 * bypp, + 0, 0, dst_x, dst_y, width, height); + } if (tmp != tmp_buf) { g_free(tmp); } + } else if (!overlap && (s->use_pixman & BIT(1))) { + fallback = !pixman_blt((uint32_t *)&s->local_mem[src_base], + (uint32_t *)&s->local_mem[dst_base], + src_pitch * bypp / sizeof(uint32_t), + dst_pitch * bypp / sizeof(uint32_t), + 8 * bypp, 8 * bypp, src_x, src_y, + dst_x, dst_y, width, height); } else { - pixman_blt((uint32_t *)&s->local_mem[src_base], - (uint32_t *)&s->local_mem[dst_base], - src_pitch * bypp / sizeof(uint32_t), - dst_pitch * bypp / sizeof(uint32_t), - 8 * bypp, 8 * bypp, - src_x, src_y, dst_x, dst_y, width, height); + fallback = true; + } + if (fallback) { + uint8_t *sp = s->local_mem + src_base; + uint8_t *d = s->local_mem + dst_base; + unsigned int y, i, j; + for (y = 0; y < height; y++) { + if (overlap) { /* overlap also means rtl */ + i = (dst_y + height - 1 - y) * dst_pitch; + i = (dst_x + i) * bypp; + j = (src_y + height - 1 - y) * src_pitch; + j = (src_x + j) * bypp; + memmove(&d[i], &sp[j], width * bypp); + } else { + i = (dst_x + (dst_y + y) * dst_pitch) * bypp; + j = (src_x + (src_y + y) * src_pitch) * bypp; + memcpy(&d[i], &sp[j], width * bypp); + } + } } } break; @@ -839,13 +894,19 @@ static void sm501_2d_operation(SM501State *s) color = cpu_to_le16(color); } - if (width == 1 && height == 1) { - unsigned int i = (dst_x + dst_y * dst_pitch) * bypp; - stn_he_p(&s->local_mem[dst_base + i], bypp, color); - } else { - pixman_fill((uint32_t *)&s->local_mem[dst_base], - dst_pitch * bypp / sizeof(uint32_t), - 8 * bypp, dst_x, dst_y, width, height, color); + if (!(s->use_pixman & BIT(0)) || (width == 1 && height == 1) || + !pixman_fill((uint32_t *)&s->local_mem[dst_base], + dst_pitch * bypp / sizeof(uint32_t), 8 * bypp, + dst_x, dst_y, width, height, color)) { + /* fallback when pixman failed or we don't want to call it */ + uint8_t *d = s->local_mem + dst_base; + unsigned int x, y, i; + for (y = 0; y < height; y++) { + i = (dst_x + (dst_y + y) * dst_pitch) * bypp; + for (x = 0; x < width; x++, i += bypp) { + stn_he_p(&d[i], bypp, color); + } + } } break; } @@ -1943,15 +2004,14 @@ struct SM501SysBusState { /*< public >*/ SM501State state; uint32_t vram_size; - uint32_t base; SerialMM serial; + OHCISysBusState ohci; }; static void sm501_realize_sysbus(DeviceState *dev, Error **errp) { SM501SysBusState *s = SYSBUS_SM501(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - DeviceState *usb_dev; MemoryRegion *mr; sm501_init(&s->state, dev, s->vram_size); @@ -1964,13 +2024,10 @@ static void sm501_realize_sysbus(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->state.mmio_region); /* bridge to usb host emulation module */ - usb_dev = qdev_new("sysbus-ohci"); - qdev_prop_set_uint32(usb_dev, "num-ports", 2); - qdev_prop_set_uint64(usb_dev, "dma-offset", s->base); - sysbus_realize_and_unref(SYS_BUS_DEVICE(usb_dev), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->ohci), &error_fatal); memory_region_add_subregion(&s->state.mmio_region, SM501_USB_HOST, - sysbus_mmio_get_region(SYS_BUS_DEVICE(usb_dev), 0)); - sysbus_pass_irq(sbd, SYS_BUS_DEVICE(usb_dev)); + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->ohci), 0)); + sysbus_pass_irq(sbd, SYS_BUS_DEVICE(&s->ohci)); /* bridge to serial emulation module */ sysbus_realize(SYS_BUS_DEVICE(&s->serial), &error_fatal); @@ -1981,7 +2038,7 @@ static void sm501_realize_sysbus(DeviceState *dev, Error **errp) static Property sm501_sysbus_properties[] = { DEFINE_PROP_UINT32("vram-size", SM501SysBusState, vram_size, 0), - DEFINE_PROP_UINT32("base", SM501SysBusState, base, 0), + DEFINE_PROP_UINT8("x-pixman", SM501SysBusState, state.use_pixman, 7), DEFINE_PROP_END_OF_LIST(), }; @@ -2017,15 +2074,19 @@ static void sm501_sysbus_class_init(ObjectClass *klass, void *data) static void sm501_sysbus_init(Object *o) { SM501SysBusState *sm501 = SYSBUS_SM501(o); + OHCISysBusState *ohci = &sm501->ohci; SerialMM *smm = &sm501->serial; + object_initialize_child(o, "ohci", ohci, TYPE_SYSBUS_OHCI); + object_property_add_alias(o, "dma-offset", OBJECT(ohci), "dma-offset"); + qdev_prop_set_uint32(DEVICE(ohci), "num-ports", 2); + object_initialize_child(o, "serial", smm, TYPE_SERIAL_MM); qdev_set_legacy_instance_id(DEVICE(smm), SM501_UART0, 2); qdev_prop_set_uint8(DEVICE(smm), "regshift", 2); qdev_prop_set_uint8(DEVICE(smm), "endianness", DEVICE_LITTLE_ENDIAN); - object_property_add_alias(o, "chardev", - OBJECT(smm), "chardev"); + object_property_add_alias(o, "chardev", OBJECT(smm), "chardev"); } static const TypeInfo sm501_sysbus_info = { @@ -2065,6 +2126,7 @@ static void sm501_realize_pci(PCIDevice *dev, Error **errp) static Property sm501_pci_properties[] = { DEFINE_PROP_UINT32("vram-size", SM501PCIState, vram_size, 64 * MiB), + DEFINE_PROP_UINT8("x-pixman", SM501PCIState, state.use_pixman, 7), DEFINE_PROP_END_OF_LIST(), }; @@ -2105,11 +2167,18 @@ static void sm501_pci_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_sm501_pci; } +static void sm501_pci_init(Object *o) +{ + object_property_set_description(o, "x-pixman", "Use pixman for: " + "1: fill, 2: blit, 4: overlap blit"); +} + static const TypeInfo sm501_pci_info = { .name = TYPE_PCI_SM501, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(SM501PCIState), .class_init = sm501_pci_class_init, + .instance_init = sm501_pci_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, diff --git a/hw/display/vga.c b/hw/display/vga.c index 7a5fdff649..37557c3442 100644 --- a/hw/display/vga.c +++ b/hw/display/vga.c @@ -26,7 +26,9 @@ #include "qemu/units.h" #include "sysemu/reset.h" #include "qapi/error.h" +#include "hw/core/cpu.h" #include "hw/display/vga.h" +#include "hw/i386/x86.h" #include "hw/pci/pci.h" #include "vga_int.h" #include "vga_regs.h" @@ -2244,11 +2246,8 @@ bool vga_common_init(VGACommonState *s, Object *obj, Error **errp) * into a device attribute set by the machine/platform to remove * all target endian dependencies from this file. */ -#if TARGET_BIG_ENDIAN - s->default_endian_fb = true; -#else - s->default_endian_fb = false; -#endif + s->default_endian_fb = target_words_bigendian(); + vga_dirty_log_start(s); return true; @@ -2263,11 +2262,15 @@ static const MemoryRegionPortio vga_portio_list[] = { PORTIO_END_OF_LIST(), }; -static const MemoryRegionPortio vbe_portio_list[] = { +static const MemoryRegionPortio vbe_portio_list_x86[] = { { 0, 1, 2, .read = vbe_ioport_read_index, .write = vbe_ioport_write_index }, -# ifdef TARGET_I386 { 1, 1, 2, .read = vbe_ioport_read_data, .write = vbe_ioport_write_data }, -# endif + { 2, 1, 2, .read = vbe_ioport_read_data, .write = vbe_ioport_write_data }, + PORTIO_END_OF_LIST(), +}; + +static const MemoryRegionPortio vbe_portio_list_no_x86[] = { + { 0, 1, 2, .read = vbe_ioport_read_index, .write = vbe_ioport_write_index }, { 2, 1, 2, .read = vbe_ioport_read_data, .write = vbe_ioport_write_data }, PORTIO_END_OF_LIST(), }; @@ -2278,9 +2281,19 @@ MemoryRegion *vga_init_io(VGACommonState *s, Object *obj, const MemoryRegionPortio **vbe_ports) { MemoryRegion *vga_mem; + MachineState *ms = MACHINE(qdev_get_machine()); + + /* + * We unfortunately need two VBE lists since non-x86 machines might + * not be able to do 16-bit accesses at unaligned addresses (0x1cf) + */ + if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) { + *vbe_ports = vbe_portio_list_x86; + } else { + *vbe_ports = vbe_portio_list_no_x86; + } *vga_ports = vga_portio_list; - *vbe_ports = vbe_portio_list; vga_mem = g_malloc(sizeof(*vga_mem)); memory_region_init_io(vga_mem, obj, &vga_mem_ops, s, diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 5e15c79b94..66cddd94d9 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1289,6 +1289,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, /* load & apply scanout state */ vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + /* FIXME: should take scanout.r.{x,y} into account */ scanout = &g->parent_obj.scanout[i]; if (!scanout->resource_id) { continue; @@ -1339,8 +1340,10 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) g->ctrl_vq = virtio_get_queue(vdev, 0); g->cursor_vq = virtio_get_queue(vdev, 1); - g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); - g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); + g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g, + &qdev->mem_reentrancy_guard); + g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g, + &qdev->mem_reentrancy_guard); QTAILQ_INIT(&g->reslist); QTAILQ_INIT(&g->cmdq); QTAILQ_INIT(&g->fenceq); diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c index 59ae7f74b8..09591fbd39 100644 --- a/hw/display/vmware_vga.c +++ b/hw/display/vmware_vga.c @@ -550,12 +550,12 @@ static inline void vmsvga_cursor_define(struct vmsvga_state_s *s, default: fprintf(stderr, "%s: unhandled bpp %d, using fallback cursor\n", __func__, c->bpp); - cursor_put(qc); + cursor_unref(qc); qc = cursor_builtin_left_ptr(); } dpy_cursor_define(s->vga.con, qc); - cursor_put(qc); + cursor_unref(qc); } #endif diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c index 260eb38a76..0074a9b6f8 100644 --- a/hw/display/xenfb.c +++ b/hw/display/xenfb.c @@ -98,8 +98,9 @@ static int common_bind(struct common *c) if (xenstore_read_fe_int(&c->xendev, "event-channel", &c->xendev.remote_port) == -1) return -1; - c->page = xenforeignmemory_map(xen_fmem, c->xendev.dom, - PROT_READ | PROT_WRITE, 1, &mfn, NULL); + c->page = qemu_xen_foreignmem_map(c->xendev.dom, NULL, + PROT_READ | PROT_WRITE, 1, &mfn, + NULL); if (c->page == NULL) return -1; @@ -115,7 +116,7 @@ static void common_unbind(struct common *c) { xen_pv_unbind_evtchn(&c->xendev); if (c->page) { - xenforeignmemory_unmap(xen_fmem, c->page, 1); + qemu_xen_foreignmem_unmap(c->page, 1); c->page = NULL; } } @@ -488,27 +489,28 @@ static int xenfb_map_fb(struct XenFB *xenfb) } if (xenfb->pixels) { - munmap(xenfb->pixels, xenfb->fbpages * XC_PAGE_SIZE); + munmap(xenfb->pixels, xenfb->fbpages * XEN_PAGE_SIZE); xenfb->pixels = NULL; } - xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XC_PAGE_SIZE); + xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XEN_PAGE_SIZE); n_fbdirs = xenfb->fbpages * mode / 8; - n_fbdirs = DIV_ROUND_UP(n_fbdirs, XC_PAGE_SIZE); + n_fbdirs = DIV_ROUND_UP(n_fbdirs, XEN_PAGE_SIZE); pgmfns = g_new0(xen_pfn_t, n_fbdirs); fbmfns = g_new0(xen_pfn_t, xenfb->fbpages); xenfb_copy_mfns(mode, n_fbdirs, pgmfns, pd); - map = xenforeignmemory_map(xen_fmem, xenfb->c.xendev.dom, - PROT_READ, n_fbdirs, pgmfns, NULL); + map = qemu_xen_foreignmem_map(xenfb->c.xendev.dom, NULL, PROT_READ, + n_fbdirs, pgmfns, NULL); if (map == NULL) goto out; xenfb_copy_mfns(mode, xenfb->fbpages, fbmfns, map); - xenforeignmemory_unmap(xen_fmem, map, n_fbdirs); + qemu_xen_foreignmem_unmap(map, n_fbdirs); - xenfb->pixels = xenforeignmemory_map(xen_fmem, xenfb->c.xendev.dom, - PROT_READ, xenfb->fbpages, fbmfns, NULL); + xenfb->pixels = qemu_xen_foreignmem_map(xenfb->c.xendev.dom, NULL, + PROT_READ, xenfb->fbpages, + fbmfns, NULL); if (xenfb->pixels == NULL) goto out; @@ -526,8 +528,8 @@ static int xenfb_configure_fb(struct XenFB *xenfb, size_t fb_len_lim, { size_t mfn_sz = sizeof_field(struct xenfb_page, pd[0]); size_t pd_len = sizeof_field(struct xenfb_page, pd) / mfn_sz; - size_t fb_pages = pd_len * XC_PAGE_SIZE / mfn_sz; - size_t fb_len_max = fb_pages * XC_PAGE_SIZE; + size_t fb_pages = pd_len * XEN_PAGE_SIZE / mfn_sz; + size_t fb_len_max = fb_pages * XEN_PAGE_SIZE; int max_width, max_height; if (fb_len_lim > fb_len_max) { @@ -927,8 +929,8 @@ static void fb_disconnect(struct XenLegacyDevice *xendev) * Replacing the framebuffer with anonymous shared memory * instead. This releases the guest pages and keeps qemu happy. */ - xenforeignmemory_unmap(xen_fmem, fb->pixels, fb->fbpages); - fb->pixels = mmap(fb->pixels, fb->fbpages * XC_PAGE_SIZE, + qemu_xen_foreignmem_unmap(fb->pixels, fb->fbpages); + fb->pixels = mmap(fb->pixels, fb->fbpages * XEN_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0); if (fb->pixels == MAP_FAILED) { diff --git a/hw/dma/etraxfs_dma.c b/hw/dma/etraxfs_dma.c index 0fef00c6c9..a1068b19ea 100644 --- a/hw/dma/etraxfs_dma.c +++ b/hw/dma/etraxfs_dma.c @@ -160,38 +160,38 @@ enum { enum dma_ch_state { - RST = 1, - STOPPED = 2, - RUNNING = 4 + RST = 1, + STOPPED = 2, + RUNNING = 4 }; struct fs_dma_channel { - qemu_irq irq; - struct etraxfs_dma_client *client; + qemu_irq irq; + struct etraxfs_dma_client *client; - /* Internal status. */ - int stream_cmd_src; - enum dma_ch_state state; + /* Internal status. */ + int stream_cmd_src; + enum dma_ch_state state; - unsigned int input : 1; - unsigned int eol : 1; + unsigned int input : 1; + unsigned int eol : 1; - struct dma_descr_group current_g; - struct dma_descr_context current_c; - struct dma_descr_data current_d; + struct dma_descr_group current_g; + struct dma_descr_context current_c; + struct dma_descr_data current_d; - /* Control registers. */ - uint32_t regs[DMA_REG_MAX]; + /* Control registers. */ + uint32_t regs[DMA_REG_MAX]; }; struct fs_dma_ctrl { - MemoryRegion mmio; - int nr_channels; - struct fs_dma_channel *channels; + MemoryRegion mmio; + int nr_channels; + struct fs_dma_channel *channels; - QEMUBH *bh; + QEMUBH *bh; }; static void DMA_run(void *opaque); @@ -199,72 +199,72 @@ static int channel_out_run(struct fs_dma_ctrl *ctrl, int c); static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg) { - return ctrl->channels[c].regs[reg]; + return ctrl->channels[c].regs[reg]; } static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c) { - return channel_reg(ctrl, c, RW_CFG) & 2; + return channel_reg(ctrl, c, RW_CFG) & 2; } static inline int channel_en(struct fs_dma_ctrl *ctrl, int c) { - return (channel_reg(ctrl, c, RW_CFG) & 1) - && ctrl->channels[c].client; + return (channel_reg(ctrl, c, RW_CFG) & 1) + && ctrl->channels[c].client; } static inline int fs_channel(hwaddr addr) { - /* Every channel has a 0x2000 ctrl register map. */ - return addr >> 13; + /* Every channel has a 0x2000 ctrl register map. */ + return addr >> 13; } #ifdef USE_THIS_DEAD_CODE static void channel_load_g(struct fs_dma_ctrl *ctrl, int c) { - hwaddr addr = channel_reg(ctrl, c, RW_GROUP); + hwaddr addr = channel_reg(ctrl, c, RW_GROUP); - /* Load and decode. FIXME: handle endianness. */ + /* Load and decode. FIXME: handle endianness. */ cpu_physical_memory_read(addr, &ctrl->channels[c].current_g, sizeof(ctrl->channels[c].current_g)); } static void dump_c(int ch, struct dma_descr_context *c) { - printf("%s ch=%d\n", __func__, ch); - printf("next=%x\n", c->next); - printf("saved_data=%x\n", c->saved_data); - printf("saved_data_buf=%x\n", c->saved_data_buf); - printf("eol=%x\n", (uint32_t) c->eol); + printf("%s ch=%d\n", __func__, ch); + printf("next=%x\n", c->next); + printf("saved_data=%x\n", c->saved_data); + printf("saved_data_buf=%x\n", c->saved_data_buf); + printf("eol=%x\n", (uint32_t) c->eol); } static void dump_d(int ch, struct dma_descr_data *d) { - printf("%s ch=%d\n", __func__, ch); - printf("next=%x\n", d->next); - printf("buf=%x\n", d->buf); - printf("after=%x\n", d->after); - printf("intr=%x\n", (uint32_t) d->intr); - printf("out_eop=%x\n", (uint32_t) d->out_eop); - printf("in_eop=%x\n", (uint32_t) d->in_eop); - printf("eol=%x\n", (uint32_t) d->eol); + printf("%s ch=%d\n", __func__, ch); + printf("next=%x\n", d->next); + printf("buf=%x\n", d->buf); + printf("after=%x\n", d->after); + printf("intr=%x\n", (uint32_t) d->intr); + printf("out_eop=%x\n", (uint32_t) d->out_eop); + printf("in_eop=%x\n", (uint32_t) d->in_eop); + printf("eol=%x\n", (uint32_t) d->eol); } #endif static void channel_load_c(struct fs_dma_ctrl *ctrl, int c) { - hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN); + hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN); - /* Load and decode. FIXME: handle endianness. */ + /* Load and decode. FIXME: handle endianness. */ cpu_physical_memory_read(addr, &ctrl->channels[c].current_c, sizeof(ctrl->channels[c].current_c)); - D(dump_c(c, &ctrl->channels[c].current_c)); - /* I guess this should update the current pos. */ - ctrl->channels[c].regs[RW_SAVED_DATA] = - (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data; - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = - (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf; + D(dump_c(c, &ctrl->channels[c].current_c)); + /* I guess this should update the current pos. */ + ctrl->channels[c].regs[RW_SAVED_DATA] = + (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data; + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = + (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf; } static void channel_load_d(struct fs_dma_ctrl *ctrl, int c) @@ -303,273 +303,273 @@ static void channel_store_d(struct fs_dma_ctrl *ctrl, int c) static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c) { - /* FIXME: */ + /* FIXME: */ } static inline void channel_start(struct fs_dma_ctrl *ctrl, int c) { - if (ctrl->channels[c].client) - { - ctrl->channels[c].eol = 0; - ctrl->channels[c].state = RUNNING; - if (!ctrl->channels[c].input) - channel_out_run(ctrl, c); - } else - printf("WARNING: starting DMA ch %d with no client\n", c); + if (ctrl->channels[c].client) + { + ctrl->channels[c].eol = 0; + ctrl->channels[c].state = RUNNING; + if (!ctrl->channels[c].input) + channel_out_run(ctrl, c); + } else + printf("WARNING: starting DMA ch %d with no client\n", c); - qemu_bh_schedule_idle(ctrl->bh); + qemu_bh_schedule_idle(ctrl->bh); } static void channel_continue(struct fs_dma_ctrl *ctrl, int c) { - if (!channel_en(ctrl, c) - || channel_stopped(ctrl, c) - || ctrl->channels[c].state != RUNNING - /* Only reload the current data descriptor if it has eol set. */ - || !ctrl->channels[c].current_d.eol) { - D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n", - c, ctrl->channels[c].state, - channel_stopped(ctrl, c), - channel_en(ctrl,c), - ctrl->channels[c].eol)); - D(dump_d(c, &ctrl->channels[c].current_d)); - return; - } + if (!channel_en(ctrl, c) + || channel_stopped(ctrl, c) + || ctrl->channels[c].state != RUNNING + /* Only reload the current data descriptor if it has eol set. */ + || !ctrl->channels[c].current_d.eol) { + D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n", + c, ctrl->channels[c].state, + channel_stopped(ctrl, c), + channel_en(ctrl,c), + ctrl->channels[c].eol)); + D(dump_d(c, &ctrl->channels[c].current_d)); + return; + } - /* Reload the current descriptor. */ - channel_load_d(ctrl, c); + /* Reload the current descriptor. */ + channel_load_d(ctrl, c); - /* If the current descriptor cleared the eol flag and we had already - reached eol state, do the continue. */ - if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) { - D(printf("continue %d ok %x\n", c, - ctrl->channels[c].current_d.next)); - ctrl->channels[c].regs[RW_SAVED_DATA] = - (uint32_t)(unsigned long)ctrl->channels[c].current_d.next; - channel_load_d(ctrl, c); - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = - (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; + /* If the current descriptor cleared the eol flag and we had already + reached eol state, do the continue. */ + if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) { + D(printf("continue %d ok %x\n", c, + ctrl->channels[c].current_d.next)); + ctrl->channels[c].regs[RW_SAVED_DATA] = + (uint32_t)(unsigned long)ctrl->channels[c].current_d.next; + channel_load_d(ctrl, c); + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = + (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; - channel_start(ctrl, c); - } - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = - (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; + channel_start(ctrl, c); + } + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = + (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; } static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v) { - unsigned int cmd = v & ((1 << 10) - 1); + unsigned int cmd = v & ((1 << 10) - 1); - D(printf("%s ch=%d cmd=%x\n", - __func__, c, cmd)); - if (cmd & regk_dma_load_d) { - channel_load_d(ctrl, c); - if (cmd & regk_dma_burst) - channel_start(ctrl, c); - } + D(printf("%s ch=%d cmd=%x\n", + __func__, c, cmd)); + if (cmd & regk_dma_load_d) { + channel_load_d(ctrl, c); + if (cmd & regk_dma_burst) + channel_start(ctrl, c); + } - if (cmd & regk_dma_load_c) { - channel_load_c(ctrl, c); - } + if (cmd & regk_dma_load_c) { + channel_load_c(ctrl, c); + } } static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c) { - D(printf("%s %d\n", __func__, c)); - ctrl->channels[c].regs[R_INTR] &= - ~(ctrl->channels[c].regs[RW_ACK_INTR]); + D(printf("%s %d\n", __func__, c)); + ctrl->channels[c].regs[R_INTR] &= + ~(ctrl->channels[c].regs[RW_ACK_INTR]); - ctrl->channels[c].regs[R_MASKED_INTR] = - ctrl->channels[c].regs[R_INTR] - & ctrl->channels[c].regs[RW_INTR_MASK]; + ctrl->channels[c].regs[R_MASKED_INTR] = + ctrl->channels[c].regs[R_INTR] + & ctrl->channels[c].regs[RW_INTR_MASK]; - D(printf("%s: chan=%d masked_intr=%x\n", __func__, - c, - ctrl->channels[c].regs[R_MASKED_INTR])); + D(printf("%s: chan=%d masked_intr=%x\n", __func__, + c, + ctrl->channels[c].regs[R_MASKED_INTR])); - qemu_set_irq(ctrl->channels[c].irq, - !!ctrl->channels[c].regs[R_MASKED_INTR]); + qemu_set_irq(ctrl->channels[c].irq, + !!ctrl->channels[c].regs[R_MASKED_INTR]); } static int channel_out_run(struct fs_dma_ctrl *ctrl, int c) { - uint32_t len; - uint32_t saved_data_buf; - unsigned char buf[2 * 1024]; + uint32_t len; + uint32_t saved_data_buf; + unsigned char buf[2 * 1024]; - struct dma_context_metadata meta; - bool send_context = true; + struct dma_context_metadata meta; + bool send_context = true; - if (ctrl->channels[c].eol) - return 0; + if (ctrl->channels[c].eol) + return 0; - do { - bool out_eop; - D(printf("ch=%d buf=%x after=%x\n", - c, - (uint32_t)ctrl->channels[c].current_d.buf, - (uint32_t)ctrl->channels[c].current_d.after)); + do { + bool out_eop; + D(printf("ch=%d buf=%x after=%x\n", + c, + (uint32_t)ctrl->channels[c].current_d.buf, + (uint32_t)ctrl->channels[c].current_d.after)); - if (send_context) { - if (ctrl->channels[c].client->client.metadata_push) { - meta.metadata = ctrl->channels[c].current_d.md; - ctrl->channels[c].client->client.metadata_push( - ctrl->channels[c].client->client.opaque, - &meta); - } - send_context = false; - } + if (send_context) { + if (ctrl->channels[c].client->client.metadata_push) { + meta.metadata = ctrl->channels[c].current_d.md; + ctrl->channels[c].client->client.metadata_push( + ctrl->channels[c].client->client.opaque, + &meta); + } + send_context = false; + } - channel_load_d(ctrl, c); - saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); - len = (uint32_t)(unsigned long) - ctrl->channels[c].current_d.after; - len -= saved_data_buf; + channel_load_d(ctrl, c); + saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); + len = (uint32_t)(unsigned long) + ctrl->channels[c].current_d.after; + len -= saved_data_buf; - if (len > sizeof buf) - len = sizeof buf; - cpu_physical_memory_read (saved_data_buf, buf, len); + if (len > sizeof buf) + len = sizeof buf; + cpu_physical_memory_read (saved_data_buf, buf, len); - out_eop = ((saved_data_buf + len) == - ctrl->channels[c].current_d.after) && - ctrl->channels[c].current_d.out_eop; + out_eop = ((saved_data_buf + len) == + ctrl->channels[c].current_d.after) && + ctrl->channels[c].current_d.out_eop; - D(printf("channel %d pushes %x %u bytes eop=%u\n", c, - saved_data_buf, len, out_eop)); + D(printf("channel %d pushes %x %u bytes eop=%u\n", c, + saved_data_buf, len, out_eop)); - if (ctrl->channels[c].client->client.push) { - if (len > 0) { - ctrl->channels[c].client->client.push( - ctrl->channels[c].client->client.opaque, - buf, len, out_eop); - } - } else { - printf("WARNING: DMA ch%d dataloss," - " no attached client.\n", c); - } + if (ctrl->channels[c].client->client.push) { + if (len > 0) { + ctrl->channels[c].client->client.push( + ctrl->channels[c].client->client.opaque, + buf, len, out_eop); + } + } else { + printf("WARNING: DMA ch%d dataloss," + " no attached client.\n", c); + } - saved_data_buf += len; + saved_data_buf += len; - if (saved_data_buf == (uint32_t)(unsigned long) - ctrl->channels[c].current_d.after) { - /* Done. Step to next. */ - if (ctrl->channels[c].current_d.out_eop) { - send_context = true; - } - if (ctrl->channels[c].current_d.intr) { - /* data intr. */ - D(printf("signal intr %d eol=%d\n", - len, ctrl->channels[c].current_d.eol)); - ctrl->channels[c].regs[R_INTR] |= (1 << 2); - channel_update_irq(ctrl, c); - } - channel_store_d(ctrl, c); - if (ctrl->channels[c].current_d.eol) { - D(printf("channel %d EOL\n", c)); - ctrl->channels[c].eol = 1; + if (saved_data_buf == (uint32_t)(unsigned long) + ctrl->channels[c].current_d.after) { + /* Done. Step to next. */ + if (ctrl->channels[c].current_d.out_eop) { + send_context = true; + } + if (ctrl->channels[c].current_d.intr) { + /* data intr. */ + D(printf("signal intr %d eol=%d\n", + len, ctrl->channels[c].current_d.eol)); + ctrl->channels[c].regs[R_INTR] |= (1 << 2); + channel_update_irq(ctrl, c); + } + channel_store_d(ctrl, c); + if (ctrl->channels[c].current_d.eol) { + D(printf("channel %d EOL\n", c)); + ctrl->channels[c].eol = 1; - /* Mark the context as disabled. */ - ctrl->channels[c].current_c.dis = 1; - channel_store_c(ctrl, c); + /* Mark the context as disabled. */ + ctrl->channels[c].current_c.dis = 1; + channel_store_c(ctrl, c); - channel_stop(ctrl, c); - } else { - ctrl->channels[c].regs[RW_SAVED_DATA] = - (uint32_t)(unsigned long)ctrl-> - channels[c].current_d.next; - /* Load new descriptor. */ - channel_load_d(ctrl, c); - saved_data_buf = (uint32_t)(unsigned long) - ctrl->channels[c].current_d.buf; - } + channel_stop(ctrl, c); + } else { + ctrl->channels[c].regs[RW_SAVED_DATA] = + (uint32_t)(unsigned long)ctrl-> + channels[c].current_d.next; + /* Load new descriptor. */ + channel_load_d(ctrl, c); + saved_data_buf = (uint32_t)(unsigned long) + ctrl->channels[c].current_d.buf; + } - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = - saved_data_buf; - D(dump_d(c, &ctrl->channels[c].current_d)); - } - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; - } while (!ctrl->channels[c].eol); - return 1; + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = + saved_data_buf; + D(dump_d(c, &ctrl->channels[c].current_d)); + } + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; + } while (!ctrl->channels[c].eol); + return 1; } static int channel_in_process(struct fs_dma_ctrl *ctrl, int c, - unsigned char *buf, int buflen, int eop) + unsigned char *buf, int buflen, int eop) { - uint32_t len; - uint32_t saved_data_buf; + uint32_t len; + uint32_t saved_data_buf; - if (ctrl->channels[c].eol == 1) - return 0; + if (ctrl->channels[c].eol == 1) + return 0; - channel_load_d(ctrl, c); - saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); - len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after; - len -= saved_data_buf; - - if (len > buflen) - len = buflen; + channel_load_d(ctrl, c); + saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); + len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after; + len -= saved_data_buf; - cpu_physical_memory_write (saved_data_buf, buf, len); - saved_data_buf += len; + if (len > buflen) + len = buflen; - if (saved_data_buf == - (uint32_t)(unsigned long)ctrl->channels[c].current_d.after - || eop) { - uint32_t r_intr = ctrl->channels[c].regs[R_INTR]; + cpu_physical_memory_write (saved_data_buf, buf, len); + saved_data_buf += len; - D(printf("in dscr end len=%d\n", - ctrl->channels[c].current_d.after - - ctrl->channels[c].current_d.buf)); - ctrl->channels[c].current_d.after = saved_data_buf; + if (saved_data_buf == + (uint32_t)(unsigned long)ctrl->channels[c].current_d.after + || eop) { + uint32_t r_intr = ctrl->channels[c].regs[R_INTR]; - /* Done. Step to next. */ - if (ctrl->channels[c].current_d.intr) { - /* TODO: signal eop to the client. */ - /* data intr. */ - ctrl->channels[c].regs[R_INTR] |= 3; - } - if (eop) { - ctrl->channels[c].current_d.in_eop = 1; - ctrl->channels[c].regs[R_INTR] |= 8; - } - if (r_intr != ctrl->channels[c].regs[R_INTR]) - channel_update_irq(ctrl, c); + D(printf("in dscr end len=%d\n", + ctrl->channels[c].current_d.after + - ctrl->channels[c].current_d.buf)); + ctrl->channels[c].current_d.after = saved_data_buf; - channel_store_d(ctrl, c); - D(dump_d(c, &ctrl->channels[c].current_d)); + /* Done. Step to next. */ + if (ctrl->channels[c].current_d.intr) { + /* TODO: signal eop to the client. */ + /* data intr. */ + ctrl->channels[c].regs[R_INTR] |= 3; + } + if (eop) { + ctrl->channels[c].current_d.in_eop = 1; + ctrl->channels[c].regs[R_INTR] |= 8; + } + if (r_intr != ctrl->channels[c].regs[R_INTR]) + channel_update_irq(ctrl, c); - if (ctrl->channels[c].current_d.eol) { - D(printf("channel %d EOL\n", c)); - ctrl->channels[c].eol = 1; + channel_store_d(ctrl, c); + D(dump_d(c, &ctrl->channels[c].current_d)); - /* Mark the context as disabled. */ - ctrl->channels[c].current_c.dis = 1; - channel_store_c(ctrl, c); + if (ctrl->channels[c].current_d.eol) { + D(printf("channel %d EOL\n", c)); + ctrl->channels[c].eol = 1; - channel_stop(ctrl, c); - } else { - ctrl->channels[c].regs[RW_SAVED_DATA] = - (uint32_t)(unsigned long)ctrl-> - channels[c].current_d.next; - /* Load new descriptor. */ - channel_load_d(ctrl, c); - saved_data_buf = (uint32_t)(unsigned long) - ctrl->channels[c].current_d.buf; - } - } + /* Mark the context as disabled. */ + ctrl->channels[c].current_c.dis = 1; + channel_store_c(ctrl, c); - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; - return len; + channel_stop(ctrl, c); + } else { + ctrl->channels[c].regs[RW_SAVED_DATA] = + (uint32_t)(unsigned long)ctrl-> + channels[c].current_d.next; + /* Load new descriptor. */ + channel_load_d(ctrl, c); + saved_data_buf = (uint32_t)(unsigned long) + ctrl->channels[c].current_d.buf; + } + } + + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; + return len; } static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c) { - if (ctrl->channels[c].client->client.pull) { - ctrl->channels[c].client->client.pull( - ctrl->channels[c].client->client.opaque); - return 1; - } else - return 0; + if (ctrl->channels[c].client->client.pull) { + ctrl->channels[c].client->client.pull( + ctrl->channels[c].client->client.opaque); + return 1; + } else + return 0; } static uint32_t dma_rinvalid (void *opaque, hwaddr addr) @@ -581,33 +581,33 @@ static uint32_t dma_rinvalid (void *opaque, hwaddr addr) static uint64_t dma_read(void *opaque, hwaddr addr, unsigned int size) { - struct fs_dma_ctrl *ctrl = opaque; - int c; - uint32_t r = 0; + struct fs_dma_ctrl *ctrl = opaque; + int c; + uint32_t r = 0; - if (size != 4) { - dma_rinvalid(opaque, addr); - } + if (size != 4) { + dma_rinvalid(opaque, addr); + } - /* Make addr relative to this channel and bounded to nr regs. */ - c = fs_channel(addr); - addr &= 0xff; - addr >>= 2; - switch (addr) - { - case RW_STAT: - r = ctrl->channels[c].state & 7; - r |= ctrl->channels[c].eol << 5; - r |= ctrl->channels[c].stream_cmd_src << 8; - break; + /* Make addr relative to this channel and bounded to nr regs. */ + c = fs_channel(addr); + addr &= 0xff; + addr >>= 2; + switch (addr) + { + case RW_STAT: + r = ctrl->channels[c].state & 7; + r |= ctrl->channels[c].eol << 5; + r |= ctrl->channels[c].stream_cmd_src << 8; + break; - default: - r = ctrl->channels[c].regs[addr]; - D(printf("%s c=%d addr=" HWADDR_FMT_plx "\n", - __func__, c, addr)); - break; - } - return r; + default: + r = ctrl->channels[c].regs[addr]; + D(printf("%s c=%d addr=" HWADDR_FMT_plx "\n", + __func__, c, addr)); + break; + } + return r; } static void @@ -619,133 +619,133 @@ dma_winvalid (void *opaque, hwaddr addr, uint32_t value) static void dma_update_state(struct fs_dma_ctrl *ctrl, int c) { - if (ctrl->channels[c].regs[RW_CFG] & 2) - ctrl->channels[c].state = STOPPED; - if (!(ctrl->channels[c].regs[RW_CFG] & 1)) - ctrl->channels[c].state = RST; + if (ctrl->channels[c].regs[RW_CFG] & 2) + ctrl->channels[c].state = STOPPED; + if (!(ctrl->channels[c].regs[RW_CFG] & 1)) + ctrl->channels[c].state = RST; } static void dma_write(void *opaque, hwaddr addr, - uint64_t val64, unsigned int size) + uint64_t val64, unsigned int size) { - struct fs_dma_ctrl *ctrl = opaque; - uint32_t value = val64; - int c; + struct fs_dma_ctrl *ctrl = opaque; + uint32_t value = val64; + int c; - if (size != 4) { - dma_winvalid(opaque, addr, value); - } + if (size != 4) { + dma_winvalid(opaque, addr, value); + } /* Make addr relative to this channel and bounded to nr regs. */ - c = fs_channel(addr); - addr &= 0xff; - addr >>= 2; - switch (addr) - { - case RW_DATA: - ctrl->channels[c].regs[addr] = value; - break; + c = fs_channel(addr); + addr &= 0xff; + addr >>= 2; + switch (addr) + { + case RW_DATA: + ctrl->channels[c].regs[addr] = value; + break; - case RW_CFG: - ctrl->channels[c].regs[addr] = value; - dma_update_state(ctrl, c); - break; - case RW_CMD: - /* continue. */ - if (value & ~1) - printf("Invalid store to ch=%d RW_CMD %x\n", - c, value); - ctrl->channels[c].regs[addr] = value; - channel_continue(ctrl, c); - break; + case RW_CFG: + ctrl->channels[c].regs[addr] = value; + dma_update_state(ctrl, c); + break; + case RW_CMD: + /* continue. */ + if (value & ~1) + printf("Invalid store to ch=%d RW_CMD %x\n", + c, value); + ctrl->channels[c].regs[addr] = value; + channel_continue(ctrl, c); + break; - case RW_SAVED_DATA: - case RW_SAVED_DATA_BUF: - case RW_GROUP: - case RW_GROUP_DOWN: - ctrl->channels[c].regs[addr] = value; - break; + case RW_SAVED_DATA: + case RW_SAVED_DATA_BUF: + case RW_GROUP: + case RW_GROUP_DOWN: + ctrl->channels[c].regs[addr] = value; + break; - case RW_ACK_INTR: - case RW_INTR_MASK: - ctrl->channels[c].regs[addr] = value; - channel_update_irq(ctrl, c); - if (addr == RW_ACK_INTR) - ctrl->channels[c].regs[RW_ACK_INTR] = 0; - break; + case RW_ACK_INTR: + case RW_INTR_MASK: + ctrl->channels[c].regs[addr] = value; + channel_update_irq(ctrl, c); + if (addr == RW_ACK_INTR) + ctrl->channels[c].regs[RW_ACK_INTR] = 0; + break; - case RW_STREAM_CMD: - if (value & ~1023) - printf("Invalid store to ch=%d " - "RW_STREAMCMD %x\n", - c, value); - ctrl->channels[c].regs[addr] = value; - D(printf("stream_cmd ch=%d\n", c)); - channel_stream_cmd(ctrl, c, value); - break; + case RW_STREAM_CMD: + if (value & ~1023) + printf("Invalid store to ch=%d " + "RW_STREAMCMD %x\n", + c, value); + ctrl->channels[c].regs[addr] = value; + D(printf("stream_cmd ch=%d\n", c)); + channel_stream_cmd(ctrl, c, value); + break; - default: - D(printf("%s c=%d " HWADDR_FMT_plx "\n", - __func__, c, addr)); - break; - } + default: + D(printf("%s c=%d " HWADDR_FMT_plx "\n", + __func__, c, addr)); + break; + } } static const MemoryRegionOps dma_ops = { - .read = dma_read, - .write = dma_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 4 - } + .read = dma_read, + .write = dma_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4 + } }; static int etraxfs_dmac_run(void *opaque) { - struct fs_dma_ctrl *ctrl = opaque; - int i; - int p = 0; + struct fs_dma_ctrl *ctrl = opaque; + int i; + int p = 0; - for (i = 0; - i < ctrl->nr_channels; - i++) - { - if (ctrl->channels[i].state == RUNNING) - { - if (ctrl->channels[i].input) { - p += channel_in_run(ctrl, i); - } else { - p += channel_out_run(ctrl, i); - } - } - } - return p; + for (i = 0; + i < ctrl->nr_channels; + i++) + { + if (ctrl->channels[i].state == RUNNING) + { + if (ctrl->channels[i].input) { + p += channel_in_run(ctrl, i); + } else { + p += channel_out_run(ctrl, i); + } + } + } + return p; } int etraxfs_dmac_input(struct etraxfs_dma_client *client, - void *buf, int len, int eop) + void *buf, int len, int eop) { - return channel_in_process(client->ctrl, client->channel, - buf, len, eop); + return channel_in_process(client->ctrl, client->channel, + buf, len, eop); } /* Connect an IRQ line with a channel. */ void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input) { - struct fs_dma_ctrl *ctrl = opaque; - ctrl->channels[c].irq = *line; - ctrl->channels[c].input = input; + struct fs_dma_ctrl *ctrl = opaque; + ctrl->channels[c].irq = *line; + ctrl->channels[c].input = input; } void etraxfs_dmac_connect_client(void *opaque, int c, - struct etraxfs_dma_client *cl) + struct etraxfs_dma_client *cl) { - struct fs_dma_ctrl *ctrl = opaque; - cl->ctrl = ctrl; - cl->channel = c; - ctrl->channels[c].client = cl; + struct fs_dma_ctrl *ctrl = opaque; + cl->ctrl = ctrl; + cl->channel = c; + ctrl->channels[c].client = cl; } @@ -763,18 +763,18 @@ static void DMA_run(void *opaque) void *etraxfs_dmac_init(hwaddr base, int nr_channels) { - struct fs_dma_ctrl *ctrl = NULL; + struct fs_dma_ctrl *ctrl = NULL; - ctrl = g_malloc0(sizeof *ctrl); + ctrl = g_malloc0(sizeof *ctrl); - ctrl->bh = qemu_bh_new(DMA_run, ctrl); + ctrl->bh = qemu_bh_new(DMA_run, ctrl); - ctrl->nr_channels = nr_channels; - ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels); + ctrl->nr_channels = nr_channels; + ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels); - memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma", - nr_channels * 0x2000); - memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio); + memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma", + nr_channels * 0x2000); + memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio); - return ctrl; + return ctrl; } diff --git a/hw/dma/i82374.c b/hw/dma/i82374.c index 34c3aaf7d3..63734c22c9 100644 --- a/hw/dma/i82374.c +++ b/hw/dma/i82374.c @@ -125,7 +125,7 @@ static void i82374_realize(DeviceState *dev, Error **errp) I82374State *s = I82374(dev); ISABus *isa_bus = isa_bus_from_device(ISA_DEVICE(dev)); - if (isa_get_dma(isa_bus, 0)) { + if (isa_bus_get_dma(isa_bus, 0)) { error_setg(errp, "DMA already initialized on ISA bus"); return; } diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c index 6030c76435..12c90267df 100644 --- a/hw/dma/xilinx_axidma.c +++ b/hw/dma/xilinx_axidma.c @@ -168,6 +168,11 @@ static inline int stream_idle(struct Stream *s) return !!(s->regs[R_DMASR] & DMASR_IDLE); } +static inline int stream_halted(struct Stream *s) +{ + return !!(s->regs[R_DMASR] & DMASR_HALTED); +} + static void stream_reset(struct Stream *s) { s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */ @@ -269,7 +274,7 @@ static void stream_process_mem2s(struct Stream *s, StreamSink *tx_data_dev, uint64_t addr; bool eop; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { return; } @@ -326,7 +331,7 @@ static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf, unsigned int rxlen; size_t pos = 0; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { return 0; } @@ -407,7 +412,7 @@ xilinx_axidma_data_stream_can_push(StreamSink *obj, XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj); struct Stream *s = &ds->dma->streams[1]; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { ds->dma->notify = notify; ds->dma->notify_opaque = notify_opaque; return false; diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c index 031482d939..4470cfe985 100644 --- a/hw/gpio/max7310.c +++ b/hw/gpio/max7310.c @@ -49,22 +49,22 @@ static uint8_t max7310_rx(I2CSlave *i2c) MAX7310State *s = MAX7310(i2c); switch (s->command) { - case 0x00: /* Input port */ + case 0x00: /* Input port */ return s->level ^ s->polarity; - case 0x01: /* Output port */ + case 0x01: /* Output port */ return s->level & ~s->direction; - case 0x02: /* Polarity inversion */ + case 0x02: /* Polarity inversion */ return s->polarity; - case 0x03: /* Configuration */ + case 0x03: /* Configuration */ return s->direction; - case 0x04: /* Timeout */ + case 0x04: /* Timeout */ return s->status; - case 0xff: /* Reserved */ + case 0xff: /* Reserved */ return 0xff; default: @@ -95,7 +95,7 @@ static int max7310_tx(I2CSlave *i2c, uint8_t data) } switch (s->command) { - case 0x01: /* Output port */ + case 0x01: /* Output port */ for (diff = (data ^ s->level) & ~s->direction; diff; diff &= ~(1 << line)) { line = ctz32(diff); @@ -105,20 +105,20 @@ static int max7310_tx(I2CSlave *i2c, uint8_t data) s->level = (s->level & s->direction) | (data & ~s->direction); break; - case 0x02: /* Polarity inversion */ + case 0x02: /* Polarity inversion */ s->polarity = data; break; - case 0x03: /* Configuration */ + case 0x03: /* Configuration */ s->level &= ~(s->direction ^ data); s->direction = data; break; - case 0x04: /* Timeout */ + case 0x04: /* Timeout */ s->status = data; break; - case 0x00: /* Input port - ignore writes */ + case 0x00: /* Input port - ignore writes */ break; default: qemu_log_mask(LOG_UNIMP, "%s: Unsupported register 0x02%" PRIx8 "\n", diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index 7ac68c943f..b00a91ecfe 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -98,7 +98,7 @@ static ISABus *hppa_isa_bus(void) isa_irqs = i8259_init(isa_bus, /* qemu_allocate_irq(dino_set_isa_irq, s, 0)); */ NULL); - isa_bus_irqs(isa_bus, isa_irqs); + isa_bus_register_input_irqs(isa_bus, isa_irqs); return isa_bus; } @@ -177,6 +177,7 @@ static void machine_hppa_init(MachineState *machine) const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; + MachineClass *mc = MACHINE_GET_CLASS(machine); DeviceState *dev, *dino_dev, *lasi_dev; PCIBus *pci_bus; ISABus *isa_bus; @@ -272,7 +273,7 @@ static void machine_hppa_init(MachineState *machine) for (i = 0; i < nb_nics; i++) { if (!enable_lasi_lan()) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "tulip", NULL); + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); } } @@ -462,6 +463,7 @@ static void hppa_machine_init_class_init(ObjectClass *oc, void *data) mc->default_ram_size = 512 * MiB; mc->default_boot_order = "cd"; mc->default_ram_id = "ram"; + mc->default_nic = "tulip"; nc->nmi_monitor_handler = hppa_nmi; } diff --git a/hw/hyperv/syndbg.c b/hw/hyperv/syndbg.c index 94fe1b534b..065e12fb1e 100644 --- a/hw/hyperv/syndbg.c +++ b/hw/hyperv/syndbg.c @@ -340,7 +340,7 @@ static void hv_syndbg_realize(DeviceState *dev, Error **errp) syndbg->servaddr.sin_family = AF_INET; if (connect(syndbg->socket, (struct sockaddr *)&syndbg->servaddr, sizeof(syndbg->servaddr)) < 0) { - closesocket(syndbg->socket); + close(syndbg->socket); error_setg(errp, "%s failed to connect to socket", TYPE_HV_SYNDBG); return; } @@ -357,7 +357,7 @@ static void hv_syndbg_unrealize(DeviceState *dev) if (syndbg->socket > 0) { qemu_set_fd_handler(syndbg->socket, NULL, NULL, NULL); - closesocket(syndbg->socket); + close(syndbg->socket); } } diff --git a/hw/i2c/allwinner-i2c.c b/hw/i2c/allwinner-i2c.c index a435965836..9e8efa1d63 100644 --- a/hw/i2c/allwinner-i2c.c +++ b/hw/i2c/allwinner-i2c.c @@ -357,10 +357,16 @@ static void allwinner_i2c_write(void *opaque, hwaddr offset, s->stat = STAT_FROM_STA(STAT_IDLE); s->cntr &= ~TWI_CNTR_M_STP; } - if ((s->cntr & TWI_CNTR_INT_FLAG) == 0) { - /* Interrupt flag cleared */ + + if (!s->irq_clear_inverted && !(s->cntr & TWI_CNTR_INT_FLAG)) { + /* Write 0 to clear this flag */ + qemu_irq_lower(s->irq); + } else if (s->irq_clear_inverted && (s->cntr & TWI_CNTR_INT_FLAG)) { + /* Write 1 to clear this flag */ + s->cntr &= ~TWI_CNTR_INT_FLAG; qemu_irq_lower(s->irq); } + if ((s->cntr & TWI_CNTR_A_ACK) == 0) { if (STAT_TO_STA(s->stat) == STAT_M_DATA_RX_ACK) { s->stat = STAT_FROM_STA(STAT_M_DATA_RX_NACK); @@ -451,9 +457,23 @@ static const TypeInfo allwinner_i2c_type_info = { .class_init = allwinner_i2c_class_init, }; +static void allwinner_i2c_sun6i_init(Object *obj) +{ + AWI2CState *s = AW_I2C(obj); + + s->irq_clear_inverted = true; +} + +static const TypeInfo allwinner_i2c_sun6i_type_info = { + .name = TYPE_AW_I2C_SUN6I, + .parent = TYPE_AW_I2C, + .instance_init = allwinner_i2c_sun6i_init, +}; + static void allwinner_i2c_register_types(void) { type_register_static(&allwinner_i2c_type_info); + type_register_static(&allwinner_i2c_sun6i_type_info); } type_init(allwinner_i2c_register_types) diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c index c166fd20fa..1f071a3811 100644 --- a/hw/i2c/aspeed_i2c.c +++ b/hw/i2c/aspeed_i2c.c @@ -550,6 +550,8 @@ static void aspeed_i2c_bus_handle_cmd(AspeedI2CBus *bus, uint64_t value) } SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, M_STOP_CMD, 0); aspeed_i2c_set_state(bus, I2CD_IDLE); + + i2c_schedule_pending_master(bus->bus); } if (aspeed_i2c_bus_pkt_mode_en(bus)) { diff --git a/hw/i2c/core.c b/hw/i2c/core.c index d4ba8146bf..bed594fe59 100644 --- a/hw/i2c/core.c +++ b/hw/i2c/core.c @@ -185,22 +185,39 @@ int i2c_start_transfer(I2CBus *bus, uint8_t address, bool is_recv) void i2c_bus_master(I2CBus *bus, QEMUBH *bh) { + I2CPendingMaster *node = g_new(struct I2CPendingMaster, 1); + node->bh = bh; + + QSIMPLEQ_INSERT_TAIL(&bus->pending_masters, node, entry); +} + +void i2c_schedule_pending_master(I2CBus *bus) +{ + I2CPendingMaster *node; + if (i2c_bus_busy(bus)) { - I2CPendingMaster *node = g_new(struct I2CPendingMaster, 1); - node->bh = bh; - - QSIMPLEQ_INSERT_TAIL(&bus->pending_masters, node, entry); - + /* someone is already controlling the bus; wait for it to release it */ return; } - bus->bh = bh; + if (QSIMPLEQ_EMPTY(&bus->pending_masters)) { + return; + } + + node = QSIMPLEQ_FIRST(&bus->pending_masters); + bus->bh = node->bh; + + QSIMPLEQ_REMOVE_HEAD(&bus->pending_masters, entry); + g_free(node); + qemu_bh_schedule(bus->bh); } void i2c_bus_release(I2CBus *bus) { bus->bh = NULL; + + i2c_schedule_pending_master(bus); } int i2c_start_recv(I2CBus *bus, uint8_t address) @@ -234,16 +251,6 @@ void i2c_end_transfer(I2CBus *bus) g_free(node); } bus->broadcast = false; - - if (!QSIMPLEQ_EMPTY(&bus->pending_masters)) { - I2CPendingMaster *node = QSIMPLEQ_FIRST(&bus->pending_masters); - bus->bh = node->bh; - - QSIMPLEQ_REMOVE_HEAD(&bus->pending_masters, entry); - g_free(node); - - qemu_bh_schedule(bus->bh); - } } int i2c_send(I2CBus *bus, uint8_t data) diff --git a/hw/i2c/pmbus_device.c b/hw/i2c/pmbus_device.c index c3d6046784..44fe4eddbb 100644 --- a/hw/i2c/pmbus_device.c +++ b/hw/i2c/pmbus_device.c @@ -94,6 +94,13 @@ void pmbus_send64(PMBusDevice *pmdev, uint64_t data) void pmbus_send_string(PMBusDevice *pmdev, const char *data) { + if (!data) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: %s: uninitialised read from 0x%02x\n", + __func__, DEVICE(pmdev)->canonical_path, pmdev->code); + return; + } + size_t len = strlen(data); g_assert(len > 0); g_assert(len + pmdev->out_buf_len < SMBUS_DATA_MAX_LEN); diff --git a/hw/i2c/smbus_ich9.c b/hw/i2c/smbus_ich9.c index 52ba77f3fc..18d40e93c1 100644 --- a/hw/i2c/smbus_ich9.c +++ b/hw/i2c/smbus_ich9.c @@ -27,7 +27,7 @@ #include "migration/vmstate.h" #include "qemu/module.h" -#include "hw/i386/ich9.h" +#include "hw/southbridge/ich9.h" #include "qom/object.h" #include "hw/acpi/acpi_aml_interface.h" @@ -80,6 +80,18 @@ static void ich9_smbus_write_config(PCIDevice *d, uint32_t address, } } +static void ich9_smb_set_irq(PMSMBus *pmsmb, bool enabled) +{ + ICH9SMBState *s = pmsmb->opaque; + + if (enabled == s->irq_enabled) { + return; + } + + s->irq_enabled = enabled; + pci_set_irq(&s->dev, enabled); +} + static void ich9_smbus_realize(PCIDevice *d, Error **errp) { ICH9SMBState *s = ICH9_SMB_DEVICE(d); @@ -93,6 +105,9 @@ static void ich9_smbus_realize(PCIDevice *d, Error **errp) pm_smbus_init(&d->qdev, &s->smb, false); pci_register_bar(d, ICH9_SMB_SMB_BASE_BAR, PCI_BASE_ADDRESS_SPACE_IO, &s->smb.io); + + s->smb.set_irq = ich9_smb_set_irq; + s->smb.opaque = s; } static void build_ich9_smb_aml(AcpiDevAmlIf *adev, Aml *scope) @@ -125,28 +140,6 @@ static void ich9_smb_class_init(ObjectClass *klass, void *data) adevc->build_dev_aml = build_ich9_smb_aml; } -static void ich9_smb_set_irq(PMSMBus *pmsmb, bool enabled) -{ - ICH9SMBState *s = pmsmb->opaque; - - if (enabled == s->irq_enabled) { - return; - } - - s->irq_enabled = enabled; - pci_set_irq(&s->dev, enabled); -} - -I2CBus *ich9_smb_init(PCIBus *bus, int devfn, uint32_t smb_io_base) -{ - PCIDevice *d = - pci_create_simple_multifunction(bus, devfn, true, TYPE_ICH9_SMB_DEVICE); - ICH9SMBState *s = ICH9_SMB_DEVICE(d); - s->smb.set_irq = ich9_smb_set_irq; - s->smb.opaque = s; - return s->smb.smbus; -} - static const TypeInfo ich9_smb_info = { .name = TYPE_ICH9_SMB_DEVICE, .parent = TYPE_PCI_DEVICE, diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index 9fbfe748b5..9051083c1e 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -80,10 +80,10 @@ config I440FX config ISAPC bool + imply VGA_ISA select ISA_BUS select PC select IDE_ISA - select VGA_ISA # FIXME: it is in the same file as i440fx, and does not compile # if separated depends on I440FX @@ -136,3 +136,8 @@ config VMPORT config VMMOUSE bool depends on VMPORT + +config XEN_EMU + bool + default y + depends on KVM && (I386 || X86_64) diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index b67dcbbb37..512162003b 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -55,10 +55,11 @@ #include "hw/hyperv/vmbus-bridge.h" /* Supported chipsets: */ +#include "hw/southbridge/ich9.h" #include "hw/southbridge/piix.h" #include "hw/acpi/pcihp.h" #include "hw/i386/fw_cfg.h" -#include "hw/i386/ich9.h" +#include "hw/i386/pc.h" #include "hw/pci/pci_bus.h" #include "hw/pci-host/i440fx.h" #include "hw/pci-host/q35.h" @@ -372,6 +373,104 @@ Aml *aml_pci_device_dsm(void) return method; } +static void build_append_pci_dsm_func0_common(Aml *ctx, Aml *retvar) +{ + Aml *UUID, *ifctx1; + uint8_t byte_list[1] = { 0 }; /* nothing supported yet */ + + aml_append(ctx, aml_store(aml_buffer(1, byte_list), retvar)); + /* + * PCI Firmware Specification 3.1 + * 4.6. _DSM Definitions for PCI + */ + UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); + ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(0), UUID))); + { + /* call is for unsupported UUID, bail out */ + aml_append(ifctx1, aml_return(retvar)); + } + aml_append(ctx, ifctx1); + + ifctx1 = aml_if(aml_lless(aml_arg(1), aml_int(2))); + { + /* call is for unsupported REV, bail out */ + aml_append(ifctx1, aml_return(retvar)); + } + aml_append(ctx, ifctx1); +} + +static Aml *aml_pci_edsm(void) +{ + Aml *method, *ifctx; + Aml *zero = aml_int(0); + Aml *func = aml_arg(2); + Aml *ret = aml_local(0); + Aml *aidx = aml_local(1); + Aml *params = aml_arg(4); + + method = aml_method("EDSM", 5, AML_SERIALIZED); + + /* get supported functions */ + ifctx = aml_if(aml_equal(func, zero)); + { + /* 1: have supported functions */ + /* 7: support for function 7 */ + const uint8_t caps = 1 | BIT(7); + build_append_pci_dsm_func0_common(ifctx, ret); + aml_append(ifctx, aml_store(aml_int(caps), aml_index(ret, zero))); + aml_append(ifctx, aml_return(ret)); + } + aml_append(method, ifctx); + + /* handle specific functions requests */ + /* + * PCI Firmware Specification 3.1 + * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under + * Operating Systems + */ + ifctx = aml_if(aml_equal(func, aml_int(7))); + { + Aml *pkg = aml_package(2); + aml_append(pkg, zero); + /* optional, if not impl. should return null string */ + aml_append(pkg, aml_string("%s", "")); + aml_append(ifctx, aml_store(pkg, ret)); + + /* + * IASL is fine when initializing Package with computational data, + * however it makes guest unhappy /it fails to process such AML/. + * So use runtime assignment to set acpi-index after initializer + * to make OSPM happy. + */ + aml_append(ifctx, + aml_store(aml_derefof(aml_index(params, aml_int(0))), aidx)); + aml_append(ifctx, aml_store(aidx, aml_index(ret, zero))); + aml_append(ifctx, aml_return(ret)); + } + aml_append(method, ifctx); + + return method; +} + +static Aml *aml_pci_static_endpoint_dsm(PCIDevice *pdev) +{ + Aml *method; + + g_assert(pdev->acpi_index != 0); + method = aml_method("_DSM", 4, AML_SERIALIZED); + { + Aml *params = aml_local(0); + Aml *pkg = aml_package(1); + aml_append(pkg, aml_int(pdev->acpi_index)); + aml_append(method, aml_store(pkg, params)); + aml_append(method, + aml_return(aml_call5("EDSM", aml_arg(0), aml_arg(1), + aml_arg(2), aml_arg(3), params)) + ); + } + return method; +} + static void build_append_pcihp_notify_entry(Aml *method, int slot) { Aml *if_ctx; @@ -395,12 +494,6 @@ static bool is_devfn_ignored_generic(const int devfn, const PCIBus *bus) if (DEVICE(pdev)->hotplugged) { return true; } - } else if (!get_dev_aml_func(DEVICE(pdev))) { - /* - * Ignore all other devices on !0 functions unless they - * have AML description (i.e have get_dev_aml_func() != 0) - */ - return true; } } return false; @@ -427,12 +520,14 @@ static bool is_devfn_ignored_hotplug(const int devfn, const PCIBus *bus) return false; } -static void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus, - QObject *bsel) +void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus) { int devfn; Aml *dev, *notify_method = NULL, *method; + QObject *bsel = object_property_get_qobject(OBJECT(bus), + ACPI_PCIHP_PROP_BSEL, NULL); uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel)); + qobject_unref(bsel); aml_append(parent_scope, aml_name_decl("BSEL", aml_int(bsel_val))); notify_method = aml_method("DVNT", 2, AML_NOTSERIALIZED); @@ -477,12 +572,9 @@ static void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus, void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus) { - QObject *bsel; int devfn; Aml *dev; - bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL); - for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { /* ACPI spec: 1.0b: Table 6-2 _ADR Object Bus Types, PCI type */ int adr = PCI_SLOT(devfn) << 16 | PCI_FUNC(devfn); @@ -497,16 +589,16 @@ void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus) aml_append(dev, aml_name_decl("_ADR", aml_int(adr))); call_dev_aml_func(DEVICE(bus->devices[devfn]), dev); + /* add _DSM if device has acpi-index set */ + if (pdev->acpi_index && + !object_property_get_bool(OBJECT(pdev), "hotpluggable", + &error_abort)) { + aml_append(dev, aml_pci_static_endpoint_dsm(pdev)); + } /* device descriptor has been composed, add it into parent context */ aml_append(parent_scope, dev); } - - if (bsel) { - build_append_pcihp_slots(parent_scope, bus, bsel); - } - - qobject_unref(bsel); } static bool build_append_notfication_callback(Aml *parent_scope, @@ -516,16 +608,24 @@ static bool build_append_notfication_callback(Aml *parent_scope, PCIBus *sec; QObject *bsel; int nr_notifiers = 0; + GQueue *pcnt_bus_list = g_queue_new(); QLIST_FOREACH(sec, &bus->child, sibling) { Aml *br_scope = aml_scope("S%.02X", sec->parent_dev->devfn); - if (pci_bus_is_root(sec) || - !object_property_find(OBJECT(sec), ACPI_PCIHP_PROP_BSEL)) { + if (pci_bus_is_root(sec)) { continue; } nr_notifiers = nr_notifiers + build_append_notfication_callback(br_scope, sec); - aml_append(parent_scope, br_scope); + /* + * add new child scope to parent + * and keep track of bus that have PCNT, + * bus list is used later to call children PCNTs from this level PCNT + */ + if (nr_notifiers) { + g_queue_push_tail(pcnt_bus_list, sec); + aml_append(parent_scope, br_scope); + } } /* @@ -549,30 +649,25 @@ static bool build_append_notfication_callback(Aml *parent_scope, } /* Notify about child bus events in any case */ - QLIST_FOREACH(sec, &bus->child, sibling) { - if (pci_bus_is_root(sec) || - !object_property_find(OBJECT(sec), ACPI_PCIHP_PROP_BSEL)) { - continue; - } - + while ((sec = g_queue_pop_head(pcnt_bus_list))) { aml_append(method, aml_name("^S%.02X.PCNT", sec->parent_dev->devfn)); } aml_append(parent_scope, method); qobject_unref(bsel); + g_queue_free(pcnt_bus_list); return !!nr_notifiers; } static Aml *aml_pci_pdsm(void) { - Aml *method, *UUID, *ifctx, *ifctx1; + Aml *method, *ifctx, *ifctx1; Aml *ret = aml_local(0); Aml *caps = aml_local(1); Aml *acpi_index = aml_local(2); Aml *zero = aml_int(0); Aml *one = aml_int(1); Aml *func = aml_arg(2); - Aml *rev = aml_arg(1); Aml *params = aml_arg(4); Aml *bnum = aml_derefof(aml_index(params, aml_int(0))); Aml *sunum = aml_derefof(aml_index(params, aml_int(1))); @@ -582,29 +677,9 @@ static Aml *aml_pci_pdsm(void) /* get supported functions */ ifctx = aml_if(aml_equal(func, zero)); { - uint8_t byte_list[1] = { 0 }; /* nothing supported yet */ - aml_append(ifctx, aml_store(aml_buffer(1, byte_list), ret)); + build_append_pci_dsm_func0_common(ifctx, ret); + aml_append(ifctx, aml_store(zero, caps)); - - /* - * PCI Firmware Specification 3.1 - * 4.6. _DSM Definitions for PCI - */ - UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); - ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(0), UUID))); - { - /* call is for unsupported UUID, bail out */ - aml_append(ifctx1, aml_return(ret)); - } - aml_append(ifctx, ifctx1); - - ifctx1 = aml_if(aml_lless(rev, aml_int(2))); - { - /* call is for unsupported REV, bail out */ - aml_append(ifctx1, aml_return(ret)); - } - aml_append(ifctx, ifctx1); - aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sunum), acpi_index)); /* @@ -1387,6 +1462,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid))); + aml_append(dev, aml_pci_edsm()); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); @@ -1402,6 +1478,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid))); aml_append(dev, build_q35_osc_method(!pm->pcihp_bridge_en)); + aml_append(dev, aml_pci_edsm()); aml_append(sb_scope, dev); if (mcfg_valid) { aml_append(sb_scope, build_q35_dram_controller(&mcfg)); @@ -1513,7 +1590,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(pkg, aml_eisaid("PNP0A03")); aml_append(dev, aml_name_decl("_CID", pkg)); aml_append(dev, aml_name_decl("_ADR", aml_int(0))); - aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); build_cxl_osc_method(dev); } else if (pci_bus_is_express(bus)) { aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); @@ -1710,6 +1786,9 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, Aml *scope = aml_scope("PCI0"); /* Scan all PCI buses. Generate tables to support hotplug. */ build_append_pci_bus_devices(scope, bus); + if (object_property_find(OBJECT(bus), ACPI_PCIHP_PROP_BSEL)) { + build_append_pcihp_slots(scope, bus); + } aml_append(sb_scope, scope); } } @@ -2316,9 +2395,11 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, /* IVHD length */ build_append_int_noprefix(table_data, ivhd_table_len, 2); /* DeviceID */ - build_append_int_noprefix(table_data, s->devid, 2); + build_append_int_noprefix(table_data, + object_property_get_int(OBJECT(&s->pci), "addr", + &error_abort), 2); /* Capability offset */ - build_append_int_noprefix(table_data, s->capab_offset, 2); + build_append_int_noprefix(table_data, s->pci.capab_offset, 2); /* IOMMU base address */ build_append_int_noprefix(table_data, s->mmio.addr, 8); /* PCI Segment Group */ @@ -2616,7 +2697,8 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) int legacy_table_size = ROUND_UP(tables_blob->len - aml_len + legacy_aml_len, ACPI_BUILD_ALIGN_SIZE); - if (tables_blob->len > legacy_table_size) { + if ((tables_blob->len > legacy_table_size) && + !pcmc->resizable_acpi_blob) { /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */ warn_report("ACPI table size %u exceeds %d bytes," " migration may not work", @@ -2627,7 +2709,8 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) g_array_set_size(tables_blob, legacy_table_size); } else { /* Make sure we have a buffer in case we need to resize the tables. */ - if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { + if ((tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) && + !pcmc->resizable_acpi_blob) { /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */ warn_report("ACPI table size %u exceeds %d bytes," " migration may not work", diff --git a/hw/i386/acpi-common.c b/hw/i386/acpi-common.c index 52e5c1439a..8a0932fe84 100644 --- a/hw/i386/acpi-common.c +++ b/hw/i386/acpi-common.c @@ -102,7 +102,7 @@ void acpi_build_madt(GArray *table_data, BIOSLinker *linker, MachineClass *mc = MACHINE_GET_CLASS(x86ms); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(x86ms)); AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(adev); - AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = oem_id, + AcpiTable table = { .sig = "APIC", .rev = 3, .oem_id = oem_id, .oem_table_id = oem_table_id }; acpi_table_begin(&table, table_data); diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index bcd016f5c5..9c77304438 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -1509,23 +1509,48 @@ static void amdvi_init(AMDVIState *s) amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES, 0xffffffffffffffef, 0); amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67); +} + +static void amdvi_pci_realize(PCIDevice *pdev, Error **errp) +{ + AMDVIPCIState *s = AMD_IOMMU_PCI(pdev); + int ret; + + ret = pci_add_capability(pdev, AMDVI_CAPAB_ID_SEC, 0, + AMDVI_CAPAB_SIZE, errp); + if (ret < 0) { + return; + } + s->capab_offset = ret; + + ret = pci_add_capability(pdev, PCI_CAP_ID_MSI, 0, + AMDVI_CAPAB_REG_SIZE, errp); + if (ret < 0) { + return; + } + ret = pci_add_capability(pdev, PCI_CAP_ID_HT, 0, + AMDVI_CAPAB_REG_SIZE, errp); + if (ret < 0) { + return; + } + + if (msi_init(pdev, 0, 1, true, false, errp) < 0) { + return; + } /* reset device ident */ - pci_config_set_vendor_id(s->pci.dev.config, PCI_VENDOR_ID_AMD); - pci_config_set_prog_interface(s->pci.dev.config, 00); - pci_config_set_device_id(s->pci.dev.config, s->devid); - pci_config_set_class(s->pci.dev.config, 0x0806); + pci_config_set_prog_interface(pdev->config, 0); /* reset AMDVI specific capabilities, all r/o */ - pci_set_long(s->pci.dev.config + s->capab_offset, AMDVI_CAPAB_FEATURES); - pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_LOW, - s->mmio.addr & ~(0xffff0000)); - pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH, - (s->mmio.addr & ~(0xffff)) >> 16); - pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_RANGE, + pci_set_long(pdev->config + s->capab_offset, AMDVI_CAPAB_FEATURES); + pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_BAR_LOW, + AMDVI_BASE_ADDR & ~(0xffff0000)); + pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH, + (AMDVI_BASE_ADDR & ~(0xffff)) >> 16); + pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_RANGE, 0xff000000); - pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, 0); - pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, + pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_MISC, 0); + pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_MISC, AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR); } @@ -1539,7 +1564,6 @@ static void amdvi_sysbus_reset(DeviceState *dev) static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) { - int ret = 0; AMDVIState *s = AMD_IOMMU_DEVICE(dev); MachineState *ms = MACHINE(qdev_get_machine()); PCMachineState *pcms = PC_MACHINE(ms); @@ -1553,23 +1577,6 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) if (!qdev_realize(DEVICE(&s->pci), &bus->qbus, errp)) { return; } - ret = pci_add_capability(&s->pci.dev, AMDVI_CAPAB_ID_SEC, 0, - AMDVI_CAPAB_SIZE, errp); - if (ret < 0) { - return; - } - s->capab_offset = ret; - - ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_MSI, 0, - AMDVI_CAPAB_REG_SIZE, errp); - if (ret < 0) { - return; - } - ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_HT, 0, - AMDVI_CAPAB_REG_SIZE, errp); - if (ret < 0) { - return; - } /* Pseudo address space under root PCI bus. */ x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID); @@ -1581,8 +1588,6 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio); sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR); pci_setup_iommu(bus, amdvi_host_dma_iommu, s); - s->devid = object_property_get_int(OBJECT(&s->pci), "addr", &error_abort); - msi_init(&s->pci.dev, 0, 1, true, false, errp); amdvi_init(s); } @@ -1625,6 +1630,11 @@ static const TypeInfo amdvi_sysbus = { static void amdvi_pci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->vendor_id = PCI_VENDOR_ID_AMD; + k->class_id = 0x0806; + k->realize = amdvi_pci_realize; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h index 79d38a3e41..6da893ee57 100644 --- a/hw/i386/amd_iommu.h +++ b/hw/i386/amd_iommu.h @@ -300,27 +300,26 @@ struct irte_ga { OBJECT_DECLARE_SIMPLE_TYPE(AMDVIState, AMD_IOMMU_DEVICE) #define TYPE_AMD_IOMMU_PCI "AMDVI-PCI" +OBJECT_DECLARE_SIMPLE_TYPE(AMDVIPCIState, AMD_IOMMU_PCI) #define TYPE_AMD_IOMMU_MEMORY_REGION "amd-iommu-iommu-memory-region" typedef struct AMDVIAddressSpace AMDVIAddressSpace; /* functions to steal PCI config space */ -typedef struct AMDVIPCIState { +struct AMDVIPCIState { PCIDevice dev; /* The PCI device itself */ -} AMDVIPCIState; + uint32_t capab_offset; /* capability offset pointer */ +}; struct AMDVIState { X86IOMMUState iommu; /* IOMMU bus device */ AMDVIPCIState pci; /* IOMMU PCI device */ uint32_t version; - uint32_t capab_offset; /* capability offset pointer */ uint64_t mmio_addr; - uint32_t devid; /* auto-assigned devid */ - bool enabled; /* IOMMU enabled */ bool ats_enabled; /* address translation enabled */ bool cmdbuf_enabled; /* command buffer enabled */ diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 98a5c304a7..94d52f4205 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -64,8 +64,8 @@ struct vtd_as_key { struct vtd_iotlb_key { uint64_t gfn; uint32_t pasid; - uint32_t level; uint16_t sid; + uint8_t level; }; static void vtd_address_space_refresh_all(IntelIOMMUState *s); @@ -221,10 +221,11 @@ static gboolean vtd_iotlb_equal(gconstpointer v1, gconstpointer v2) static guint vtd_iotlb_hash(gconstpointer v) { const struct vtd_iotlb_key *key = v; + uint64_t hash64 = key->gfn | ((uint64_t)(key->sid) << VTD_IOTLB_SID_SHIFT) | + (uint64_t)(key->level - 1) << VTD_IOTLB_LVL_SHIFT | + (uint64_t)(key->pasid) << VTD_IOTLB_PASID_SHIFT; - return key->gfn | ((key->sid) << VTD_IOTLB_SID_SHIFT) | - (key->level) << VTD_IOTLB_LVL_SHIFT | - (key->pasid) << VTD_IOTLB_PASID_SHIFT; + return (guint)((hash64 >> 32) ^ (hash64 & 0xffffffffU)); } static gboolean vtd_as_equal(gconstpointer v1, gconstpointer v2) @@ -1530,13 +1531,17 @@ static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, return vtd_page_walk(s, ce, addr, addr + size, &info, vtd_as->pasid); } -static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) +static int vtd_address_space_sync(VTDAddressSpace *vtd_as) { int ret; VTDContextEntry ce; IOMMUNotifier *n; - if (!(vtd_as->iommu.iommu_notify_flags & IOMMU_NOTIFIER_IOTLB_EVENTS)) { + /* If no MAP notifier registered, we simply invalidate all the cache */ + if (!vtd_as_has_map_notifier(vtd_as)) { + IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { + memory_region_unmap_iommu_notifier_range(n); + } return 0; } @@ -2000,7 +2005,7 @@ static void vtd_iommu_replay_all(IntelIOMMUState *s) VTDAddressSpace *vtd_as; QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { - vtd_sync_shadow_page_table(vtd_as); + vtd_address_space_sync(vtd_as); } } @@ -2082,7 +2087,7 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s, * framework will skip MAP notifications if that * happened. */ - vtd_sync_shadow_page_table(vtd_as); + vtd_address_space_sync(vtd_as); } } } @@ -2140,7 +2145,7 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), vtd_as->devfn, &ce) && domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { - vtd_sync_shadow_page_table(vtd_as); + vtd_address_space_sync(vtd_as); } } } @@ -3179,6 +3184,7 @@ static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, { VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); IntelIOMMUState *s = vtd_as->iommu_state; + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); /* TODO: add support for VFIO and vhost users */ if (s->snoop_control) { @@ -3186,6 +3192,20 @@ static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, "Snoop Control with vhost or VFIO is not supported"); return -ENOTSUP; } + if (!s->caching_mode && (new & IOMMU_NOTIFIER_MAP)) { + error_setg_errno(errp, ENOTSUP, + "device %02x.%02x.%x requires caching mode", + pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + return -ENOTSUP; + } + if (!x86_iommu->dt_supported && (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP)) { + error_setg_errno(errp, ENOTSUP, + "device %02x.%02x.%x requires device IOTLB mode", + pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + return -ENOTSUP; + } /* Update per-address-space notifier flags */ vtd_as->notifier_flags = new; diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index f090e61e11..2e61eec2f5 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -114,9 +114,9 @@ VTD_INTERRUPT_ADDR_FIRST + 1) /* The shift of source_id in the key of IOTLB hash table */ -#define VTD_IOTLB_SID_SHIFT 20 -#define VTD_IOTLB_LVL_SHIFT 28 -#define VTD_IOTLB_PASID_SHIFT 30 +#define VTD_IOTLB_SID_SHIFT 26 +#define VTD_IOTLB_LVL_SHIFT 42 +#define VTD_IOTLB_PASID_SHIFT 44 #define VTD_IOTLB_MAX_SIZE 1024 /* Max size of the hash table */ /* IOTLB_REG */ diff --git a/hw/i386/kvm/ioapic.c b/hw/i386/kvm/ioapic.c index 272e26b4a2..cd5ea5d60b 100644 --- a/hw/i386/kvm/ioapic.c +++ b/hw/i386/kvm/ioapic.c @@ -12,9 +12,8 @@ #include "qemu/osdep.h" #include "monitor/monitor.h" -#include "hw/i386/x86.h" #include "hw/qdev-properties.h" -#include "hw/i386/ioapic_internal.h" +#include "hw/intc/ioapic_internal.h" #include "hw/intc/kvm_irqcount.h" #include "sysemu/kvm.h" diff --git a/hw/i386/kvm/meson.build b/hw/i386/kvm/meson.build index 95467f1ded..6621ba5cd7 100644 --- a/hw/i386/kvm/meson.build +++ b/hw/i386/kvm/meson.build @@ -4,5 +4,19 @@ i386_kvm_ss.add(when: 'CONFIG_APIC', if_true: files('apic.c')) i386_kvm_ss.add(when: 'CONFIG_I8254', if_true: files('i8254.c')) i386_kvm_ss.add(when: 'CONFIG_I8259', if_true: files('i8259.c')) i386_kvm_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic.c')) +i386_kvm_ss.add(when: 'CONFIG_XEN_EMU', if_true: files( + 'xen_overlay.c', + 'xen_evtchn.c', + 'xen_gnttab.c', + 'xen_xenstore.c', + 'xenstore_impl.c', + )) i386_ss.add_all(when: 'CONFIG_KVM', if_true: i386_kvm_ss) + +xen_stubs_ss = ss.source_set() +xen_stubs_ss.add(when: 'CONFIG_XEN_EMU', if_false: files( + 'xen-stubs.c', +)) + +specific_ss.add_all(when: 'CONFIG_SOFTMMU', if_true: xen_stubs_ss) diff --git a/hw/i386/kvm/trace-events b/hw/i386/kvm/trace-events new file mode 100644 index 0000000000..e4c82de6f3 --- /dev/null +++ b/hw/i386/kvm/trace-events @@ -0,0 +1,20 @@ +kvm_xen_map_pirq(int pirq, int gsi) "pirq %d gsi %d" +kvm_xen_unmap_pirq(int pirq, int gsi) "pirq %d gsi %d" +kvm_xen_get_free_pirq(int pirq, int type) "pirq %d type %d" +kvm_xen_bind_pirq(int pirq, int port) "pirq %d port %d" +kvm_xen_unmask_pirq(int pirq, char *dev, int vector) "pirq %d dev %s vector %d" +xenstore_error(unsigned int id, unsigned int tx_id, const char *err) "req %u tx %u err %s" +xenstore_read(unsigned int tx_id, const char *path) "tx %u path %s" +xenstore_write(unsigned int tx_id, const char *path) "tx %u path %s" +xenstore_mkdir(unsigned int tx_id, const char *path) "tx %u path %s" +xenstore_directory(unsigned int tx_id, const char *path) "tx %u path %s" +xenstore_directory_part(unsigned int tx_id, const char *path, unsigned int offset) "tx %u path %s offset %u" +xenstore_transaction_start(unsigned int new_tx) "new_tx %u" +xenstore_transaction_end(unsigned int tx_id, bool commit) "tx %u commit %d" +xenstore_rm(unsigned int tx_id, const char *path) "tx %u path %s" +xenstore_get_perms(unsigned int tx_id, const char *path) "tx %u path %s" +xenstore_set_perms(unsigned int tx_id, const char *path) "tx %u path %s" +xenstore_watch(const char *path, const char *token) "path %s token %s" +xenstore_unwatch(const char *path, const char *token) "path %s token %s" +xenstore_reset_watches(void) "" +xenstore_watch_event(const char *path, const char *token) "path %s token %s" diff --git a/hw/i386/kvm/trace.h b/hw/i386/kvm/trace.h new file mode 100644 index 0000000000..e55d0812fd --- /dev/null +++ b/hw/i386/kvm/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_i386_kvm.h" diff --git a/hw/i386/kvm/xen-stubs.c b/hw/i386/kvm/xen-stubs.c new file mode 100644 index 0000000000..ae406e0b02 --- /dev/null +++ b/hw/i386/kvm/xen-stubs.c @@ -0,0 +1,44 @@ +/* + * QEMU Xen emulation: QMP stubs + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-target.h" + +#include "xen_evtchn.h" + +void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector, + uint64_t addr, uint32_t data, bool is_masked) +{ +} + +void xen_evtchn_remove_pci_device(PCIDevice *dev) +{ +} + +bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data) +{ + return false; +} + +#ifdef TARGET_I386 +EvtchnInfoList *qmp_xen_event_list(Error **errp) +{ + error_setg(errp, "Xen event channel emulation not enabled"); + return NULL; +} + +void qmp_xen_event_inject(uint32_t port, Error **errp) +{ + error_setg(errp, "Xen event channel emulation not enabled"); +} +#endif diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c new file mode 100644 index 0000000000..3048329474 --- /dev/null +++ b/hw/i386/kvm/xen_evtchn.c @@ -0,0 +1,2357 @@ +/* + * QEMU Xen emulation: Event channel support + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qemu/module.h" +#include "qemu/lockable.h" +#include "qemu/main-loop.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "monitor/monitor.h" +#include "monitor/hmp.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qmp/qdict.h" +#include "qom/object.h" +#include "exec/target_page.h" +#include "exec/address-spaces.h" +#include "migration/vmstate.h" +#include "trace.h" + +#include "hw/sysbus.h" +#include "hw/xen/xen.h" +#include "hw/i386/x86.h" +#include "hw/i386/pc.h" +#include "hw/pci/pci.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/irq.h" +#include "hw/xen/xen_backend_ops.h" + +#include "xen_evtchn.h" +#include "xen_overlay.h" +#include "xen_xenstore.h" + +#include "sysemu/kvm.h" +#include "sysemu/kvm_xen.h" +#include +#include + +#include "hw/xen/interface/memory.h" +#include "hw/xen/interface/hvm/params.h" + +/* XX: For kvm_update_msi_routes_all() */ +#include "target/i386/kvm/kvm_i386.h" + +#define TYPE_XEN_EVTCHN "xen-evtchn" +OBJECT_DECLARE_SIMPLE_TYPE(XenEvtchnState, XEN_EVTCHN) + +typedef struct XenEvtchnPort { + uint32_t vcpu; /* Xen/ACPI vcpu_id */ + uint16_t type; /* EVTCHNSTAT_xxxx */ + uint16_t type_val; /* pirq# / virq# / remote port according to type */ +} XenEvtchnPort; + +/* 32-bit compatibility definitions, also used natively in 32-bit build */ +struct compat_arch_vcpu_info { + unsigned int cr2; + unsigned int pad[5]; +}; + +struct compat_vcpu_info { + uint8_t evtchn_upcall_pending; + uint8_t evtchn_upcall_mask; + uint16_t pad; + uint32_t evtchn_pending_sel; + struct compat_arch_vcpu_info arch; + struct vcpu_time_info time; +}; /* 64 bytes (x86) */ + +struct compat_arch_shared_info { + unsigned int max_pfn; + unsigned int pfn_to_mfn_frame_list_list; + unsigned int nmi_reason; + unsigned int p2m_cr3; + unsigned int p2m_vaddr; + unsigned int p2m_generation; + uint32_t wc_sec_hi; +}; + +struct compat_shared_info { + struct compat_vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS]; + uint32_t evtchn_pending[32]; + uint32_t evtchn_mask[32]; + uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ + uint32_t wc_sec; + uint32_t wc_nsec; + struct compat_arch_shared_info arch; +}; + +#define COMPAT_EVTCHN_2L_NR_CHANNELS 1024 + +/* Local private implementation of struct xenevtchn_handle */ +struct xenevtchn_handle { + evtchn_port_t be_port; + evtchn_port_t guest_port; /* Or zero for unbound */ + int fd; +}; + +/* + * For unbound/interdomain ports there are only two possible remote + * domains; self and QEMU. Use a single high bit in type_val for that, + * and the low bits for the remote port number (or 0 for unbound). + */ +#define PORT_INFO_TYPEVAL_REMOTE_QEMU 0x8000 +#define PORT_INFO_TYPEVAL_REMOTE_PORT_MASK 0x7FFF + +/* + * These 'emuirq' values are used by Xen in the LM stream... and yes, I am + * insane enough to think about guest-transparent live migration from actual + * Xen to QEMU, and ensuring that we can convert/consume the stream. + */ +#define IRQ_UNBOUND -1 +#define IRQ_PT -2 +#define IRQ_MSI_EMU -3 + + +struct pirq_info { + int gsi; + uint16_t port; + PCIDevice *dev; + int vector; + bool is_msix; + bool is_masked; + bool is_translated; +}; + +struct XenEvtchnState { + /*< private >*/ + SysBusDevice busdev; + /*< public >*/ + + uint64_t callback_param; + bool evtchn_in_kernel; + uint32_t callback_gsi; + + QEMUBH *gsi_bh; + + QemuMutex port_lock; + uint32_t nr_ports; + XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS]; + qemu_irq gsis[IOAPIC_NUM_PINS]; + + struct xenevtchn_handle *be_handles[EVTCHN_2L_NR_CHANNELS]; + + uint32_t nr_pirqs; + + /* Bitmap of allocated PIRQs (serialized) */ + uint16_t nr_pirq_inuse_words; + uint64_t *pirq_inuse_bitmap; + + /* GSI → PIRQ mapping (serialized) */ + uint16_t gsi_pirq[IOAPIC_NUM_PINS]; + + /* Per-GSI assertion state (serialized) */ + uint32_t pirq_gsi_set; + + /* Per-PIRQ information (rebuilt on migration, protected by BQL) */ + struct pirq_info *pirq; +}; + +#define pirq_inuse_word(s, pirq) (s->pirq_inuse_bitmap[((pirq) / 64)]) +#define pirq_inuse_bit(pirq) (1ULL << ((pirq) & 63)) + +#define pirq_inuse(s, pirq) (pirq_inuse_word(s, pirq) & pirq_inuse_bit(pirq)) + +struct XenEvtchnState *xen_evtchn_singleton; + +/* Top bits of callback_param are the type (HVM_PARAM_CALLBACK_TYPE_xxx) */ +#define CALLBACK_VIA_TYPE_SHIFT 56 + +static void unbind_backend_ports(XenEvtchnState *s); + +static int xen_evtchn_pre_load(void *opaque) +{ + XenEvtchnState *s = opaque; + + /* Unbind all the backend-side ports; they need to rebind */ + unbind_backend_ports(s); + + /* It'll be leaked otherwise. */ + g_free(s->pirq_inuse_bitmap); + s->pirq_inuse_bitmap = NULL; + + return 0; +} + +static int xen_evtchn_post_load(void *opaque, int version_id) +{ + XenEvtchnState *s = opaque; + uint32_t i; + + if (s->callback_param) { + xen_evtchn_set_callback_param(s->callback_param); + } + + /* Rebuild s->pirq[].port mapping */ + for (i = 0; i < s->nr_ports; i++) { + XenEvtchnPort *p = &s->port_table[i]; + + if (p->type == EVTCHNSTAT_pirq) { + assert(p->type_val); + assert(p->type_val < s->nr_pirqs); + + /* + * Set the gsi to IRQ_UNBOUND; it may be changed to an actual + * GSI# below, or to IRQ_MSI_EMU when the MSI table snooping + * catches up with it. + */ + s->pirq[p->type_val].gsi = IRQ_UNBOUND; + s->pirq[p->type_val].port = i; + } + } + /* Rebuild s->pirq[].gsi mapping */ + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + if (s->gsi_pirq[i]) { + s->pirq[s->gsi_pirq[i]].gsi = i; + } + } + return 0; +} + +static bool xen_evtchn_is_needed(void *opaque) +{ + return xen_mode == XEN_EMULATE; +} + +static const VMStateDescription xen_evtchn_port_vmstate = { + .name = "xen_evtchn_port", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(vcpu, XenEvtchnPort), + VMSTATE_UINT16(type, XenEvtchnPort), + VMSTATE_UINT16(type_val, XenEvtchnPort), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription xen_evtchn_vmstate = { + .name = "xen_evtchn", + .version_id = 1, + .minimum_version_id = 1, + .needed = xen_evtchn_is_needed, + .pre_load = xen_evtchn_pre_load, + .post_load = xen_evtchn_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64(callback_param, XenEvtchnState), + VMSTATE_UINT32(nr_ports, XenEvtchnState), + VMSTATE_STRUCT_VARRAY_UINT32(port_table, XenEvtchnState, nr_ports, 1, + xen_evtchn_port_vmstate, XenEvtchnPort), + VMSTATE_UINT16_ARRAY(gsi_pirq, XenEvtchnState, IOAPIC_NUM_PINS), + VMSTATE_VARRAY_UINT16_ALLOC(pirq_inuse_bitmap, XenEvtchnState, + nr_pirq_inuse_words, 0, + vmstate_info_uint64, uint64_t), + VMSTATE_UINT32(pirq_gsi_set, XenEvtchnState), + VMSTATE_END_OF_LIST() + } +}; + +static void xen_evtchn_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &xen_evtchn_vmstate; +} + +static const TypeInfo xen_evtchn_info = { + .name = TYPE_XEN_EVTCHN, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XenEvtchnState), + .class_init = xen_evtchn_class_init, +}; + +static struct evtchn_backend_ops emu_evtchn_backend_ops = { + .open = xen_be_evtchn_open, + .bind_interdomain = xen_be_evtchn_bind_interdomain, + .unbind = xen_be_evtchn_unbind, + .close = xen_be_evtchn_close, + .get_fd = xen_be_evtchn_fd, + .notify = xen_be_evtchn_notify, + .unmask = xen_be_evtchn_unmask, + .pending = xen_be_evtchn_pending, +}; + +static void gsi_assert_bh(void *opaque) +{ + struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0); + if (vi) { + xen_evtchn_set_callback_level(!!vi->evtchn_upcall_pending); + } +} + +void xen_evtchn_create(void) +{ + XenEvtchnState *s = XEN_EVTCHN(sysbus_create_simple(TYPE_XEN_EVTCHN, + -1, NULL)); + int i; + + xen_evtchn_singleton = s; + + qemu_mutex_init(&s->port_lock); + s->gsi_bh = aio_bh_new(qemu_get_aio_context(), gsi_assert_bh, s); + + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->gsis[i]); + } + + /* + * The Xen scheme for encoding PIRQ# into an MSI message is not + * compatible with 32-bit MSI, as it puts the high bits of the + * PIRQ# into the high bits of the MSI message address, instead of + * using the Extended Destination ID in address bits 4-11 which + * perhaps would have been a better choice. + * + * To keep life simple, kvm_accel_instance_init() initialises the + * default to 256. which conveniently doesn't need to set anything + * outside the low 32 bits of the address. It can be increased by + * setting the xen-evtchn-max-pirq property. + */ + s->nr_pirqs = kvm_xen_get_evtchn_max_pirq(); + + s->nr_pirq_inuse_words = DIV_ROUND_UP(s->nr_pirqs, 64); + s->pirq_inuse_bitmap = g_new0(uint64_t, s->nr_pirq_inuse_words); + s->pirq = g_new0(struct pirq_info, s->nr_pirqs); + + /* Set event channel functions for backend drivers to use */ + xen_evtchn_ops = &emu_evtchn_backend_ops; +} + +void xen_evtchn_connect_gsis(qemu_irq *system_gsis) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int i; + + if (!s) { + return; + } + + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(s), i, system_gsis[i]); + } +} + +static void xen_evtchn_register_types(void) +{ + type_register_static(&xen_evtchn_info); +} + +type_init(xen_evtchn_register_types) + +static int set_callback_pci_intx(XenEvtchnState *s, uint64_t param) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + uint8_t pin = param & 3; + uint8_t devfn = (param >> 8) & 0xff; + uint16_t bus = (param >> 16) & 0xffff; + uint16_t domain = (param >> 32) & 0xffff; + PCIDevice *pdev; + PCIINTxRoute r; + + if (domain || !pcms) { + return 0; + } + + pdev = pci_find_device(pcms->bus, bus, devfn); + if (!pdev) { + return 0; + } + + r = pci_device_route_intx_to_irq(pdev, pin); + if (r.mode != PCI_INTX_ENABLED) { + return 0; + } + + /* + * Hm, can we be notified of INTX routing changes? Not without + * *owning* the device and being allowed to overwrite its own + * ->intx_routing_notifier, AFAICT. So let's not. + */ + return r.irq; +} + +void xen_evtchn_set_callback_level(int level) +{ + XenEvtchnState *s = xen_evtchn_singleton; + if (!s) { + return; + } + + /* + * We get to this function in a number of ways: + * + * • From I/O context, via PV backend drivers sending a notification to + * the guest. + * + * • From guest vCPU context, via loopback interdomain event channels + * (or theoretically even IPIs but guests don't use those with GSI + * delivery because that's pointless. We don't want a malicious guest + * to be able to trigger a deadlock though, so we can't rule it out.) + * + * • From guest vCPU context when the HVM_PARAM_CALLBACK_IRQ is being + * configured. + * + * • From guest vCPU context in the KVM exit handler, if the upcall + * pending flag has been cleared and the GSI needs to be deasserted. + * + * • Maybe in future, in an interrupt ack/eoi notifier when the GSI has + * been acked in the irqchip. + * + * Whichever context we come from if we aren't already holding the BQL + * then e can't take it now, as we may already hold s->port_lock. So + * trigger the BH to set the IRQ for us instead of doing it immediately. + * + * In the HVM_PARAM_CALLBACK_IRQ and KVM exit handler cases, the caller + * will deliberately take the BQL because they want the change to take + * effect immediately. That just leaves interdomain loopback as the case + * which uses the BH. + */ + if (!qemu_mutex_iothread_locked()) { + qemu_bh_schedule(s->gsi_bh); + return; + } + + if (s->callback_gsi && s->callback_gsi < IOAPIC_NUM_PINS) { + qemu_set_irq(s->gsis[s->callback_gsi], level); + if (level) { + /* Ensure the vCPU polls for deassertion */ + kvm_xen_set_callback_asserted(); + } + } +} + +int xen_evtchn_set_callback_param(uint64_t param) +{ + XenEvtchnState *s = xen_evtchn_singleton; + struct kvm_xen_hvm_attr xa = { + .type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR, + .u.vector = 0, + }; + bool in_kernel = false; + uint32_t gsi = 0; + int type = param >> CALLBACK_VIA_TYPE_SHIFT; + int ret; + + if (!s) { + return -ENOTSUP; + } + + /* + * We need the BQL because set_callback_pci_intx() may call into PCI code, + * and because we may need to manipulate the old and new GSI levels. + */ + assert(qemu_mutex_iothread_locked()); + qemu_mutex_lock(&s->port_lock); + + switch (type) { + case HVM_PARAM_CALLBACK_TYPE_VECTOR: { + xa.u.vector = (uint8_t)param, + + ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa); + if (!ret && kvm_xen_has_cap(EVTCHN_SEND)) { + in_kernel = true; + } + gsi = 0; + break; + } + + case HVM_PARAM_CALLBACK_TYPE_PCI_INTX: + gsi = set_callback_pci_intx(s, param); + ret = gsi ? 0 : -EINVAL; + break; + + case HVM_PARAM_CALLBACK_TYPE_GSI: + gsi = (uint32_t)param; + ret = 0; + break; + + default: + /* Xen doesn't return error even if you set something bogus */ + ret = 0; + break; + } + + if (!ret) { + /* If vector delivery was turned *off* then tell the kernel */ + if ((s->callback_param >> CALLBACK_VIA_TYPE_SHIFT) == + HVM_PARAM_CALLBACK_TYPE_VECTOR && !xa.u.vector) { + kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa); + } + s->callback_param = param; + s->evtchn_in_kernel = in_kernel; + + if (gsi != s->callback_gsi) { + struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0); + + xen_evtchn_set_callback_level(0); + s->callback_gsi = gsi; + + if (gsi && vi && vi->evtchn_upcall_pending) { + kvm_xen_inject_vcpu_callback_vector(0, type); + } + } + } + + qemu_mutex_unlock(&s->port_lock); + + return ret; +} + +static void inject_callback(XenEvtchnState *s, uint32_t vcpu) +{ + int type = s->callback_param >> CALLBACK_VIA_TYPE_SHIFT; + + kvm_xen_inject_vcpu_callback_vector(vcpu, type); +} + +static void deassign_kernel_port(evtchn_port_t port) +{ + struct kvm_xen_hvm_attr ha; + int ret; + + ha.type = KVM_XEN_ATTR_TYPE_EVTCHN; + ha.u.evtchn.send_port = port; + ha.u.evtchn.flags = KVM_XEN_EVTCHN_DEASSIGN; + + ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, "Failed to unbind kernel port %d: %s\n", + port, strerror(ret)); + } +} + +static int assign_kernel_port(uint16_t type, evtchn_port_t port, + uint32_t vcpu_id) +{ + CPUState *cpu = qemu_get_cpu(vcpu_id); + struct kvm_xen_hvm_attr ha; + + if (!cpu) { + return -ENOENT; + } + + ha.type = KVM_XEN_ATTR_TYPE_EVTCHN; + ha.u.evtchn.send_port = port; + ha.u.evtchn.type = type; + ha.u.evtchn.flags = 0; + ha.u.evtchn.deliver.port.port = port; + ha.u.evtchn.deliver.port.vcpu = kvm_arch_vcpu_id(cpu); + ha.u.evtchn.deliver.port.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; + + return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha); +} + +static int assign_kernel_eventfd(uint16_t type, evtchn_port_t port, int fd) +{ + struct kvm_xen_hvm_attr ha; + + ha.type = KVM_XEN_ATTR_TYPE_EVTCHN; + ha.u.evtchn.send_port = port; + ha.u.evtchn.type = type; + ha.u.evtchn.flags = 0; + ha.u.evtchn.deliver.eventfd.port = 0; + ha.u.evtchn.deliver.eventfd.fd = fd; + + return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha); +} + +static bool valid_port(evtchn_port_t port) +{ + if (!port) { + return false; + } + + if (xen_is_long_mode()) { + return port < EVTCHN_2L_NR_CHANNELS; + } else { + return port < COMPAT_EVTCHN_2L_NR_CHANNELS; + } +} + +static bool valid_vcpu(uint32_t vcpu) +{ + return !!qemu_get_cpu(vcpu); +} + +static void unbind_backend_ports(XenEvtchnState *s) +{ + XenEvtchnPort *p; + int i; + + for (i = 1; i < s->nr_ports; i++) { + p = &s->port_table[i]; + if (p->type == EVTCHNSTAT_interdomain && + (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU)) { + evtchn_port_t be_port = p->type_val & PORT_INFO_TYPEVAL_REMOTE_PORT_MASK; + + if (s->be_handles[be_port]) { + /* This part will be overwritten on the load anyway. */ + p->type = EVTCHNSTAT_unbound; + p->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU; + + /* Leave the backend port open and unbound too. */ + if (kvm_xen_has_cap(EVTCHN_SEND)) { + deassign_kernel_port(i); + } + s->be_handles[be_port]->guest_port = 0; + } + } + } +} + +int xen_evtchn_status_op(struct evtchn_status *status) +{ + XenEvtchnState *s = xen_evtchn_singleton; + XenEvtchnPort *p; + + if (!s) { + return -ENOTSUP; + } + + if (status->dom != DOMID_SELF && status->dom != xen_domid) { + return -ESRCH; + } + + if (!valid_port(status->port)) { + return -EINVAL; + } + + qemu_mutex_lock(&s->port_lock); + + p = &s->port_table[status->port]; + + status->status = p->type; + status->vcpu = p->vcpu; + + switch (p->type) { + case EVTCHNSTAT_unbound: + if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) { + status->u.unbound.dom = DOMID_QEMU; + } else { + status->u.unbound.dom = xen_domid; + } + break; + + case EVTCHNSTAT_interdomain: + if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) { + status->u.interdomain.dom = DOMID_QEMU; + } else { + status->u.interdomain.dom = xen_domid; + } + + status->u.interdomain.port = p->type_val & + PORT_INFO_TYPEVAL_REMOTE_PORT_MASK; + break; + + case EVTCHNSTAT_pirq: + status->u.pirq = p->type_val; + break; + + case EVTCHNSTAT_virq: + status->u.virq = p->type_val; + break; + } + + qemu_mutex_unlock(&s->port_lock); + return 0; +} + +/* + * Never thought I'd hear myself say this, but C++ templates would be + * kind of nice here. + * + * template static int do_unmask_port(T *shinfo, ...); + */ +static int do_unmask_port_lm(XenEvtchnState *s, evtchn_port_t port, + bool do_unmask, struct shared_info *shinfo, + struct vcpu_info *vcpu_info) +{ + const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]); + typeof(shinfo->evtchn_pending[0]) mask; + int idx = port / bits_per_word; + int offset = port % bits_per_word; + + mask = 1UL << offset; + + if (idx >= bits_per_word) { + return -EINVAL; + } + + if (do_unmask) { + /* + * If this is a true unmask operation, clear the mask bit. If + * it was already unmasked, we have nothing further to do. + */ + if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) { + return 0; + } + } else { + /* + * This is a pseudo-unmask for affinity changes. We don't + * change the mask bit, and if it's *masked* we have nothing + * else to do. + */ + if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) { + return 0; + } + } + + /* If the event was not pending, we're done. */ + if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) { + return 0; + } + + /* Now on to the vcpu_info evtchn_pending_sel index... */ + mask = 1UL << idx; + + /* If a port in this word was already pending for this vCPU, all done. */ + if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) { + return 0; + } + + /* Set evtchn_upcall_pending for this vCPU */ + if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) { + return 0; + } + + inject_callback(s, s->port_table[port].vcpu); + + return 0; +} + +static int do_unmask_port_compat(XenEvtchnState *s, evtchn_port_t port, + bool do_unmask, + struct compat_shared_info *shinfo, + struct compat_vcpu_info *vcpu_info) +{ + const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]); + typeof(shinfo->evtchn_pending[0]) mask; + int idx = port / bits_per_word; + int offset = port % bits_per_word; + + mask = 1UL << offset; + + if (idx >= bits_per_word) { + return -EINVAL; + } + + if (do_unmask) { + /* + * If this is a true unmask operation, clear the mask bit. If + * it was already unmasked, we have nothing further to do. + */ + if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) { + return 0; + } + } else { + /* + * This is a pseudo-unmask for affinity changes. We don't + * change the mask bit, and if it's *masked* we have nothing + * else to do. + */ + if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) { + return 0; + } + } + + /* If the event was not pending, we're done. */ + if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) { + return 0; + } + + /* Now on to the vcpu_info evtchn_pending_sel index... */ + mask = 1UL << idx; + + /* If a port in this word was already pending for this vCPU, all done. */ + if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) { + return 0; + } + + /* Set evtchn_upcall_pending for this vCPU */ + if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) { + return 0; + } + + inject_callback(s, s->port_table[port].vcpu); + + return 0; +} + +static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask) +{ + void *vcpu_info, *shinfo; + + if (s->port_table[port].type == EVTCHNSTAT_closed) { + return -EINVAL; + } + + shinfo = xen_overlay_get_shinfo_ptr(); + if (!shinfo) { + return -ENOTSUP; + } + + vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu); + if (!vcpu_info) { + return -EINVAL; + } + + if (xen_is_long_mode()) { + return do_unmask_port_lm(s, port, do_unmask, shinfo, vcpu_info); + } else { + return do_unmask_port_compat(s, port, do_unmask, shinfo, vcpu_info); + } +} + +static int do_set_port_lm(XenEvtchnState *s, evtchn_port_t port, + struct shared_info *shinfo, + struct vcpu_info *vcpu_info) +{ + const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]); + typeof(shinfo->evtchn_pending[0]) mask; + int idx = port / bits_per_word; + int offset = port % bits_per_word; + + mask = 1UL << offset; + + if (idx >= bits_per_word) { + return -EINVAL; + } + + /* Update the pending bit itself. If it was already set, we're done. */ + if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) { + return 0; + } + + /* Check if it's masked. */ + if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) { + return 0; + } + + /* Now on to the vcpu_info evtchn_pending_sel index... */ + mask = 1UL << idx; + + /* If a port in this word was already pending for this vCPU, all done. */ + if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) { + return 0; + } + + /* Set evtchn_upcall_pending for this vCPU */ + if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) { + return 0; + } + + inject_callback(s, s->port_table[port].vcpu); + + return 0; +} + +static int do_set_port_compat(XenEvtchnState *s, evtchn_port_t port, + struct compat_shared_info *shinfo, + struct compat_vcpu_info *vcpu_info) +{ + const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]); + typeof(shinfo->evtchn_pending[0]) mask; + int idx = port / bits_per_word; + int offset = port % bits_per_word; + + mask = 1UL << offset; + + if (idx >= bits_per_word) { + return -EINVAL; + } + + /* Update the pending bit itself. If it was already set, we're done. */ + if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) { + return 0; + } + + /* Check if it's masked. */ + if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) { + return 0; + } + + /* Now on to the vcpu_info evtchn_pending_sel index... */ + mask = 1UL << idx; + + /* If a port in this word was already pending for this vCPU, all done. */ + if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) { + return 0; + } + + /* Set evtchn_upcall_pending for this vCPU */ + if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) { + return 0; + } + + inject_callback(s, s->port_table[port].vcpu); + + return 0; +} + +static int set_port_pending(XenEvtchnState *s, evtchn_port_t port) +{ + void *vcpu_info, *shinfo; + + if (s->port_table[port].type == EVTCHNSTAT_closed) { + return -EINVAL; + } + + if (s->evtchn_in_kernel) { + XenEvtchnPort *p = &s->port_table[port]; + CPUState *cpu = qemu_get_cpu(p->vcpu); + struct kvm_irq_routing_xen_evtchn evt; + + if (!cpu) { + return 0; + } + + evt.port = port; + evt.vcpu = kvm_arch_vcpu_id(cpu); + evt.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; + + return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_EVTCHN_SEND, &evt); + } + + shinfo = xen_overlay_get_shinfo_ptr(); + if (!shinfo) { + return -ENOTSUP; + } + + vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu); + if (!vcpu_info) { + return -EINVAL; + } + + if (xen_is_long_mode()) { + return do_set_port_lm(s, port, shinfo, vcpu_info); + } else { + return do_set_port_compat(s, port, shinfo, vcpu_info); + } +} + +static int clear_port_pending(XenEvtchnState *s, evtchn_port_t port) +{ + void *p = xen_overlay_get_shinfo_ptr(); + + if (!p) { + return -ENOTSUP; + } + + if (xen_is_long_mode()) { + struct shared_info *shinfo = p; + const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]); + typeof(shinfo->evtchn_pending[0]) mask; + int idx = port / bits_per_word; + int offset = port % bits_per_word; + + mask = 1UL << offset; + + qatomic_fetch_and(&shinfo->evtchn_pending[idx], ~mask); + } else { + struct compat_shared_info *shinfo = p; + const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]); + typeof(shinfo->evtchn_pending[0]) mask; + int idx = port / bits_per_word; + int offset = port % bits_per_word; + + mask = 1UL << offset; + + qatomic_fetch_and(&shinfo->evtchn_pending[idx], ~mask); + } + return 0; +} + +static void free_port(XenEvtchnState *s, evtchn_port_t port) +{ + s->port_table[port].type = EVTCHNSTAT_closed; + s->port_table[port].type_val = 0; + s->port_table[port].vcpu = 0; + + if (s->nr_ports == port + 1) { + do { + s->nr_ports--; + } while (s->nr_ports && + s->port_table[s->nr_ports - 1].type == EVTCHNSTAT_closed); + } + + /* Clear pending event to avoid unexpected behavior on re-bind. */ + clear_port_pending(s, port); +} + +static int allocate_port(XenEvtchnState *s, uint32_t vcpu, uint16_t type, + uint16_t val, evtchn_port_t *port) +{ + evtchn_port_t p = 1; + + for (p = 1; valid_port(p); p++) { + if (s->port_table[p].type == EVTCHNSTAT_closed) { + s->port_table[p].vcpu = vcpu; + s->port_table[p].type = type; + s->port_table[p].type_val = val; + + *port = p; + + if (s->nr_ports < p + 1) { + s->nr_ports = p + 1; + } + + return 0; + } + } + return -ENOSPC; +} + +static bool virq_is_global(uint32_t virq) +{ + switch (virq) { + case VIRQ_TIMER: + case VIRQ_DEBUG: + case VIRQ_XENOPROF: + case VIRQ_XENPMU: + return false; + + default: + return true; + } +} + +static int close_port(XenEvtchnState *s, evtchn_port_t port, + bool *flush_kvm_routes) +{ + XenEvtchnPort *p = &s->port_table[port]; + + /* Because it *might* be a PIRQ port */ + assert(qemu_mutex_iothread_locked()); + + switch (p->type) { + case EVTCHNSTAT_closed: + return -ENOENT; + + case EVTCHNSTAT_pirq: + s->pirq[p->type_val].port = 0; + if (s->pirq[p->type_val].is_translated) { + *flush_kvm_routes = true; + } + break; + + case EVTCHNSTAT_virq: + kvm_xen_set_vcpu_virq(virq_is_global(p->type_val) ? 0 : p->vcpu, + p->type_val, 0); + break; + + case EVTCHNSTAT_ipi: + if (s->evtchn_in_kernel) { + deassign_kernel_port(port); + } + break; + + case EVTCHNSTAT_interdomain: + if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) { + uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU; + struct xenevtchn_handle *xc = s->be_handles[be_port]; + if (xc) { + if (kvm_xen_has_cap(EVTCHN_SEND)) { + deassign_kernel_port(port); + } + xc->guest_port = 0; + } + } else { + /* Loopback interdomain */ + XenEvtchnPort *rp = &s->port_table[p->type_val]; + if (!valid_port(p->type_val) || rp->type_val != port || + rp->type != EVTCHNSTAT_interdomain) { + error_report("Inconsistent state for interdomain unbind"); + } else { + /* Set the other end back to unbound */ + rp->type = EVTCHNSTAT_unbound; + rp->type_val = 0; + } + } + break; + + default: + break; + } + + free_port(s, port); + return 0; +} + +int xen_evtchn_soft_reset(void) +{ + XenEvtchnState *s = xen_evtchn_singleton; + bool flush_kvm_routes; + int i; + + if (!s) { + return -ENOTSUP; + } + + assert(qemu_mutex_iothread_locked()); + + qemu_mutex_lock(&s->port_lock); + + for (i = 0; i < s->nr_ports; i++) { + close_port(s, i, &flush_kvm_routes); + } + + qemu_mutex_unlock(&s->port_lock); + + if (flush_kvm_routes) { + kvm_update_msi_routes_all(NULL, true, 0, 0); + } + + return 0; +} + +int xen_evtchn_reset_op(struct evtchn_reset *reset) +{ + if (reset->dom != DOMID_SELF && reset->dom != xen_domid) { + return -ESRCH; + } + + return xen_evtchn_soft_reset(); +} + +int xen_evtchn_close_op(struct evtchn_close *close) +{ + XenEvtchnState *s = xen_evtchn_singleton; + bool flush_kvm_routes = false; + int ret; + + if (!s) { + return -ENOTSUP; + } + + if (!valid_port(close->port)) { + return -EINVAL; + } + + QEMU_IOTHREAD_LOCK_GUARD(); + qemu_mutex_lock(&s->port_lock); + + ret = close_port(s, close->port, &flush_kvm_routes); + + qemu_mutex_unlock(&s->port_lock); + + if (flush_kvm_routes) { + kvm_update_msi_routes_all(NULL, true, 0, 0); + } + + return ret; +} + +int xen_evtchn_unmask_op(struct evtchn_unmask *unmask) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int ret; + + if (!s) { + return -ENOTSUP; + } + + if (!valid_port(unmask->port)) { + return -EINVAL; + } + + qemu_mutex_lock(&s->port_lock); + + ret = unmask_port(s, unmask->port, true); + + qemu_mutex_unlock(&s->port_lock); + + return ret; +} + +int xen_evtchn_bind_vcpu_op(struct evtchn_bind_vcpu *vcpu) +{ + XenEvtchnState *s = xen_evtchn_singleton; + XenEvtchnPort *p; + int ret = -EINVAL; + + if (!s) { + return -ENOTSUP; + } + + if (!valid_port(vcpu->port)) { + return -EINVAL; + } + + if (!valid_vcpu(vcpu->vcpu)) { + return -ENOENT; + } + + qemu_mutex_lock(&s->port_lock); + + p = &s->port_table[vcpu->port]; + + if (p->type == EVTCHNSTAT_interdomain || + p->type == EVTCHNSTAT_unbound || + p->type == EVTCHNSTAT_pirq || + (p->type == EVTCHNSTAT_virq && virq_is_global(p->type_val))) { + /* + * unmask_port() with do_unmask==false will just raise the event + * on the new vCPU if the port was already pending. + */ + p->vcpu = vcpu->vcpu; + unmask_port(s, vcpu->port, false); + ret = 0; + } + + qemu_mutex_unlock(&s->port_lock); + + return ret; +} + +int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int ret; + + if (!s) { + return -ENOTSUP; + } + + if (virq->virq >= NR_VIRQS) { + return -EINVAL; + } + + /* Global VIRQ must be allocated on vCPU0 first */ + if (virq_is_global(virq->virq) && virq->vcpu != 0) { + return -EINVAL; + } + + if (!valid_vcpu(virq->vcpu)) { + return -ENOENT; + } + + qemu_mutex_lock(&s->port_lock); + + ret = allocate_port(s, virq->vcpu, EVTCHNSTAT_virq, virq->virq, + &virq->port); + if (!ret) { + ret = kvm_xen_set_vcpu_virq(virq->vcpu, virq->virq, virq->port); + if (ret) { + free_port(s, virq->port); + } + } + + qemu_mutex_unlock(&s->port_lock); + + return ret; +} + +int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int ret; + + if (!s) { + return -ENOTSUP; + } + + if (pirq->pirq >= s->nr_pirqs) { + return -EINVAL; + } + + QEMU_IOTHREAD_LOCK_GUARD(); + + if (s->pirq[pirq->pirq].port) { + return -EBUSY; + } + + qemu_mutex_lock(&s->port_lock); + + ret = allocate_port(s, 0, EVTCHNSTAT_pirq, pirq->pirq, + &pirq->port); + if (ret) { + qemu_mutex_unlock(&s->port_lock); + return ret; + } + + s->pirq[pirq->pirq].port = pirq->port; + trace_kvm_xen_bind_pirq(pirq->pirq, pirq->port); + + qemu_mutex_unlock(&s->port_lock); + + /* + * Need to do the unmask outside port_lock because it may call + * back into the MSI translate function. + */ + if (s->pirq[pirq->pirq].gsi == IRQ_MSI_EMU) { + if (s->pirq[pirq->pirq].is_masked) { + PCIDevice *dev = s->pirq[pirq->pirq].dev; + int vector = s->pirq[pirq->pirq].vector; + char *dev_path = qdev_get_dev_path(DEVICE(dev)); + + trace_kvm_xen_unmask_pirq(pirq->pirq, dev_path, vector); + g_free(dev_path); + + if (s->pirq[pirq->pirq].is_msix) { + msix_set_mask(dev, vector, false); + } else { + msi_set_mask(dev, vector, false, NULL); + } + } else if (s->pirq[pirq->pirq].is_translated) { + /* + * If KVM had attempted to translate this one before, make it try + * again. If we unmasked, then the notifier on the MSI(-X) vector + * will already have had the same effect. + */ + kvm_update_msi_routes_all(NULL, true, 0, 0); + } + } + + return ret; +} + +int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int ret; + + if (!s) { + return -ENOTSUP; + } + + if (!valid_vcpu(ipi->vcpu)) { + return -ENOENT; + } + + qemu_mutex_lock(&s->port_lock); + + ret = allocate_port(s, ipi->vcpu, EVTCHNSTAT_ipi, 0, &ipi->port); + if (!ret && s->evtchn_in_kernel) { + assign_kernel_port(EVTCHNSTAT_ipi, ipi->port, ipi->vcpu); + } + + qemu_mutex_unlock(&s->port_lock); + + return ret; +} + +int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain) +{ + XenEvtchnState *s = xen_evtchn_singleton; + uint16_t type_val; + int ret; + + if (!s) { + return -ENOTSUP; + } + + if (interdomain->remote_dom == DOMID_QEMU) { + type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU; + } else if (interdomain->remote_dom == DOMID_SELF || + interdomain->remote_dom == xen_domid) { + type_val = 0; + } else { + return -ESRCH; + } + + if (!valid_port(interdomain->remote_port)) { + return -EINVAL; + } + + qemu_mutex_lock(&s->port_lock); + + /* The newly allocated port starts out as unbound */ + ret = allocate_port(s, 0, EVTCHNSTAT_unbound, type_val, + &interdomain->local_port); + if (ret) { + goto out; + } + + if (interdomain->remote_dom == DOMID_QEMU) { + struct xenevtchn_handle *xc = s->be_handles[interdomain->remote_port]; + XenEvtchnPort *lp = &s->port_table[interdomain->local_port]; + + if (!xc) { + ret = -ENOENT; + goto out_free_port; + } + + if (xc->guest_port) { + ret = -EBUSY; + goto out_free_port; + } + + assert(xc->be_port == interdomain->remote_port); + xc->guest_port = interdomain->local_port; + if (kvm_xen_has_cap(EVTCHN_SEND)) { + assign_kernel_eventfd(lp->type, xc->guest_port, xc->fd); + } + lp->type = EVTCHNSTAT_interdomain; + lp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU | interdomain->remote_port; + ret = 0; + } else { + /* Loopback */ + XenEvtchnPort *rp = &s->port_table[interdomain->remote_port]; + XenEvtchnPort *lp = &s->port_table[interdomain->local_port]; + + if (rp->type == EVTCHNSTAT_unbound && rp->type_val == 0) { + /* It's a match! */ + rp->type = EVTCHNSTAT_interdomain; + rp->type_val = interdomain->local_port; + + lp->type = EVTCHNSTAT_interdomain; + lp->type_val = interdomain->remote_port; + } else { + ret = -EINVAL; + } + } + + out_free_port: + if (ret) { + free_port(s, interdomain->local_port); + } + out: + qemu_mutex_unlock(&s->port_lock); + + return ret; + +} +int xen_evtchn_alloc_unbound_op(struct evtchn_alloc_unbound *alloc) +{ + XenEvtchnState *s = xen_evtchn_singleton; + uint16_t type_val; + int ret; + + if (!s) { + return -ENOTSUP; + } + + if (alloc->dom != DOMID_SELF && alloc->dom != xen_domid) { + return -ESRCH; + } + + if (alloc->remote_dom == DOMID_QEMU) { + type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU; + } else if (alloc->remote_dom == DOMID_SELF || + alloc->remote_dom == xen_domid) { + type_val = 0; + } else { + return -EPERM; + } + + qemu_mutex_lock(&s->port_lock); + + ret = allocate_port(s, 0, EVTCHNSTAT_unbound, type_val, &alloc->port); + + qemu_mutex_unlock(&s->port_lock); + + return ret; +} + +int xen_evtchn_send_op(struct evtchn_send *send) +{ + XenEvtchnState *s = xen_evtchn_singleton; + XenEvtchnPort *p; + int ret = 0; + + if (!s) { + return -ENOTSUP; + } + + if (!valid_port(send->port)) { + return -EINVAL; + } + + qemu_mutex_lock(&s->port_lock); + + p = &s->port_table[send->port]; + + switch (p->type) { + case EVTCHNSTAT_interdomain: + if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) { + /* + * This is an event from the guest to qemu itself, which is + * serving as the driver domain. + */ + uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU; + struct xenevtchn_handle *xc = s->be_handles[be_port]; + if (xc) { + eventfd_write(xc->fd, 1); + ret = 0; + } else { + ret = -ENOENT; + } + } else { + /* Loopback interdomain ports; just a complex IPI */ + set_port_pending(s, p->type_val); + } + break; + + case EVTCHNSTAT_ipi: + set_port_pending(s, send->port); + break; + + case EVTCHNSTAT_unbound: + /* Xen will silently drop these */ + break; + + default: + ret = -EINVAL; + break; + } + + qemu_mutex_unlock(&s->port_lock); + + return ret; +} + +int xen_evtchn_set_port(uint16_t port) +{ + XenEvtchnState *s = xen_evtchn_singleton; + XenEvtchnPort *p; + int ret = -EINVAL; + + if (!s) { + return -ENOTSUP; + } + + if (!valid_port(port)) { + return -EINVAL; + } + + qemu_mutex_lock(&s->port_lock); + + p = &s->port_table[port]; + + /* QEMU has no business sending to anything but these */ + if (p->type == EVTCHNSTAT_virq || + (p->type == EVTCHNSTAT_interdomain && + (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU))) { + set_port_pending(s, port); + ret = 0; + } + + qemu_mutex_unlock(&s->port_lock); + + return ret; +} + +static int allocate_pirq(XenEvtchnState *s, int type, int gsi) +{ + uint16_t pirq; + + /* + * Preserve the allocation strategy that Xen has. It looks like + * we *never* give out PIRQ 0-15, we give out 16-nr_irqs_gsi only + * to GSIs (counting up from 16), and then we count backwards from + * the top for MSIs or when the GSI space is exhausted. + */ + if (type == MAP_PIRQ_TYPE_GSI) { + for (pirq = 16 ; pirq < IOAPIC_NUM_PINS; pirq++) { + if (pirq_inuse(s, pirq)) { + continue; + } + + /* Found it */ + goto found; + } + } + for (pirq = s->nr_pirqs - 1; pirq >= IOAPIC_NUM_PINS; pirq--) { + /* Skip whole words at a time when they're full */ + if (pirq_inuse_word(s, pirq) == UINT64_MAX) { + pirq &= ~63ULL; + continue; + } + if (pirq_inuse(s, pirq)) { + continue; + } + + goto found; + } + return -ENOSPC; + + found: + pirq_inuse_word(s, pirq) |= pirq_inuse_bit(pirq); + if (gsi >= 0) { + assert(gsi <= IOAPIC_NUM_PINS); + s->gsi_pirq[gsi] = pirq; + } + s->pirq[pirq].gsi = gsi; + return pirq; +} + +bool xen_evtchn_set_gsi(int gsi, int level) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int pirq; + + assert(qemu_mutex_iothread_locked()); + + if (!s || gsi < 0 || gsi > IOAPIC_NUM_PINS) { + return false; + } + + /* + * Check that that it *isn't* the event channel GSI, and thus + * that we are not recursing and it's safe to take s->port_lock. + * + * Locking aside, it's perfectly sane to bail out early for that + * special case, as it would make no sense for the event channel + * GSI to be routed back to event channels, when the delivery + * method is to raise the GSI... that recursion wouldn't *just* + * be a locking issue. + */ + if (gsi && gsi == s->callback_gsi) { + return false; + } + + QEMU_LOCK_GUARD(&s->port_lock); + + pirq = s->gsi_pirq[gsi]; + if (!pirq) { + return false; + } + + if (level) { + int port = s->pirq[pirq].port; + + s->pirq_gsi_set |= (1U << gsi); + if (port) { + set_port_pending(s, port); + } + } else { + s->pirq_gsi_set &= ~(1U << gsi); + } + return true; +} + +static uint32_t msi_pirq_target(uint64_t addr, uint32_t data) +{ + /* The vector (in low 8 bits of data) must be zero */ + if (data & 0xff) { + return 0; + } + + uint32_t pirq = (addr & 0xff000) >> 12; + pirq |= (addr >> 32) & 0xffffff00; + + return pirq; +} + +static void do_remove_pci_vector(XenEvtchnState *s, PCIDevice *dev, int vector, + int except_pirq) +{ + uint32_t pirq; + + for (pirq = 0; pirq < s->nr_pirqs; pirq++) { + /* + * We could be cleverer here, but it isn't really a fast path, and + * this trivial optimisation is enough to let us skip the big gap + * in the middle a bit quicker (in terms of both loop iterations, + * and cache lines). + */ + if (!(pirq & 63) && !(pirq_inuse_word(s, pirq))) { + pirq += 64; + continue; + } + if (except_pirq && pirq == except_pirq) { + continue; + } + if (s->pirq[pirq].dev != dev) { + continue; + } + if (vector != -1 && s->pirq[pirq].vector != vector) { + continue; + } + + /* It could theoretically be bound to a port already, but that is OK. */ + s->pirq[pirq].dev = dev; + s->pirq[pirq].gsi = IRQ_UNBOUND; + s->pirq[pirq].is_msix = false; + s->pirq[pirq].vector = 0; + s->pirq[pirq].is_masked = false; + s->pirq[pirq].is_translated = false; + } +} + +void xen_evtchn_remove_pci_device(PCIDevice *dev) +{ + XenEvtchnState *s = xen_evtchn_singleton; + + if (!s) { + return; + } + + QEMU_LOCK_GUARD(&s->port_lock); + do_remove_pci_vector(s, dev, -1, 0); +} + +void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector, + uint64_t addr, uint32_t data, bool is_masked) +{ + XenEvtchnState *s = xen_evtchn_singleton; + uint32_t pirq; + + if (!s) { + return; + } + + assert(qemu_mutex_iothread_locked()); + + pirq = msi_pirq_target(addr, data); + + /* + * The PIRQ# must be sane, and there must be an allocated PIRQ in + * IRQ_UNBOUND or IRQ_MSI_EMU state to match it. + */ + if (!pirq || pirq >= s->nr_pirqs || !pirq_inuse(s, pirq) || + (s->pirq[pirq].gsi != IRQ_UNBOUND && + s->pirq[pirq].gsi != IRQ_MSI_EMU)) { + pirq = 0; + } + + if (pirq) { + s->pirq[pirq].dev = dev; + s->pirq[pirq].gsi = IRQ_MSI_EMU; + s->pirq[pirq].is_msix = is_msix; + s->pirq[pirq].vector = vector; + s->pirq[pirq].is_masked = is_masked; + } + + /* Remove any (other) entries for this {device, vector} */ + do_remove_pci_vector(s, dev, vector, pirq); +} + +int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data) +{ + XenEvtchnState *s = xen_evtchn_singleton; + uint32_t pirq, port; + CPUState *cpu; + + if (!s) { + return 1; /* Not a PIRQ */ + } + + assert(qemu_mutex_iothread_locked()); + + pirq = msi_pirq_target(address, data); + if (!pirq || pirq >= s->nr_pirqs) { + return 1; /* Not a PIRQ */ + } + + if (!kvm_xen_has_cap(EVTCHN_2LEVEL)) { + return -ENOTSUP; + } + + if (s->pirq[pirq].gsi != IRQ_MSI_EMU) { + return -EINVAL; + } + + /* Remember that KVM tried to translate this. It might need to try again. */ + s->pirq[pirq].is_translated = true; + + QEMU_LOCK_GUARD(&s->port_lock); + + port = s->pirq[pirq].port; + if (!valid_port(port)) { + return -EINVAL; + } + + cpu = qemu_get_cpu(s->port_table[port].vcpu); + if (!cpu) { + return -EINVAL; + } + + route->type = KVM_IRQ_ROUTING_XEN_EVTCHN; + route->u.xen_evtchn.port = port; + route->u.xen_evtchn.vcpu = kvm_arch_vcpu_id(cpu); + route->u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; + + return 0; /* Handled */ +} + +bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data) +{ + XenEvtchnState *s = xen_evtchn_singleton; + uint32_t pirq, port; + + if (!s) { + return false; + } + + assert(qemu_mutex_iothread_locked()); + + pirq = msi_pirq_target(address, data); + if (!pirq || pirq >= s->nr_pirqs) { + return false; + } + + QEMU_LOCK_GUARD(&s->port_lock); + + port = s->pirq[pirq].port; + if (!valid_port(port)) { + return false; + } + + set_port_pending(s, port); + return true; +} + +int xen_physdev_map_pirq(struct physdev_map_pirq *map) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int pirq = map->pirq; + int gsi = map->index; + + if (!s) { + return -ENOTSUP; + } + + QEMU_IOTHREAD_LOCK_GUARD(); + QEMU_LOCK_GUARD(&s->port_lock); + + if (map->domid != DOMID_SELF && map->domid != xen_domid) { + return -EPERM; + } + if (map->type != MAP_PIRQ_TYPE_GSI) { + return -EINVAL; + } + if (gsi < 0 || gsi >= IOAPIC_NUM_PINS) { + return -EINVAL; + } + + if (pirq < 0) { + pirq = allocate_pirq(s, map->type, gsi); + if (pirq < 0) { + return pirq; + } + map->pirq = pirq; + } else if (pirq > s->nr_pirqs) { + return -EINVAL; + } else { + /* + * User specified a valid-looking PIRQ#. Allow it if it is + * allocated and not yet bound, or if it is unallocated + */ + if (pirq_inuse(s, pirq)) { + if (s->pirq[pirq].gsi != IRQ_UNBOUND) { + return -EBUSY; + } + } else { + /* If it was unused, mark it used now. */ + pirq_inuse_word(s, pirq) |= pirq_inuse_bit(pirq); + } + /* Set the mapping in both directions. */ + s->pirq[pirq].gsi = gsi; + s->gsi_pirq[gsi] = pirq; + } + + trace_kvm_xen_map_pirq(pirq, gsi); + return 0; +} + +int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int pirq = unmap->pirq; + int gsi; + + if (!s) { + return -ENOTSUP; + } + + if (unmap->domid != DOMID_SELF && unmap->domid != xen_domid) { + return -EPERM; + } + if (pirq < 0 || pirq >= s->nr_pirqs) { + return -EINVAL; + } + + QEMU_IOTHREAD_LOCK_GUARD(); + qemu_mutex_lock(&s->port_lock); + + if (!pirq_inuse(s, pirq)) { + qemu_mutex_unlock(&s->port_lock); + return -ENOENT; + } + + gsi = s->pirq[pirq].gsi; + + /* We can only unmap GSI PIRQs */ + if (gsi < 0) { + qemu_mutex_unlock(&s->port_lock); + return -EINVAL; + } + + s->gsi_pirq[gsi] = 0; + s->pirq[pirq].gsi = IRQ_UNBOUND; /* Doesn't actually matter because: */ + pirq_inuse_word(s, pirq) &= ~pirq_inuse_bit(pirq); + + trace_kvm_xen_unmap_pirq(pirq, gsi); + qemu_mutex_unlock(&s->port_lock); + + if (gsi == IRQ_MSI_EMU) { + kvm_update_msi_routes_all(NULL, true, 0, 0); + } + + return 0; +} + +int xen_physdev_eoi_pirq(struct physdev_eoi *eoi) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int pirq = eoi->irq; + int gsi; + + if (!s) { + return -ENOTSUP; + } + + QEMU_IOTHREAD_LOCK_GUARD(); + QEMU_LOCK_GUARD(&s->port_lock); + + if (!pirq_inuse(s, pirq)) { + return -ENOENT; + } + + gsi = s->pirq[pirq].gsi; + if (gsi < 0) { + return -EINVAL; + } + + /* Reassert a level IRQ if needed */ + if (s->pirq_gsi_set & (1U << gsi)) { + int port = s->pirq[pirq].port; + if (port) { + set_port_pending(s, port); + } + } + + return 0; +} + +int xen_physdev_query_pirq(struct physdev_irq_status_query *query) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int pirq = query->irq; + + if (!s) { + return -ENOTSUP; + } + + QEMU_IOTHREAD_LOCK_GUARD(); + QEMU_LOCK_GUARD(&s->port_lock); + + if (!pirq_inuse(s, pirq)) { + return -ENOENT; + } + + if (s->pirq[pirq].gsi >= 0) { + query->flags = XENIRQSTAT_needs_eoi; + } else { + query->flags = 0; + } + + return 0; +} + +int xen_physdev_get_free_pirq(struct physdev_get_free_pirq *get) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int pirq; + + if (!s) { + return -ENOTSUP; + } + + QEMU_LOCK_GUARD(&s->port_lock); + + pirq = allocate_pirq(s, get->type, IRQ_UNBOUND); + if (pirq < 0) { + return pirq; + } + + get->pirq = pirq; + trace_kvm_xen_get_free_pirq(pirq, get->type); + return 0; +} + +struct xenevtchn_handle *xen_be_evtchn_open(void) +{ + struct xenevtchn_handle *xc = g_new0(struct xenevtchn_handle, 1); + + xc->fd = eventfd(0, EFD_CLOEXEC); + if (xc->fd < 0) { + free(xc); + return NULL; + } + + return xc; +} + +static int find_be_port(XenEvtchnState *s, struct xenevtchn_handle *xc) +{ + int i; + + for (i = 1; i < EVTCHN_2L_NR_CHANNELS; i++) { + if (!s->be_handles[i]) { + s->be_handles[i] = xc; + xc->be_port = i; + return i; + } + } + return 0; +} + +int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid, + evtchn_port_t guest_port) +{ + XenEvtchnState *s = xen_evtchn_singleton; + XenEvtchnPort *gp; + uint16_t be_port = 0; + int ret; + + if (!s) { + return -ENOTSUP; + } + + if (!xc) { + return -EFAULT; + } + + if (domid != xen_domid) { + return -ESRCH; + } + + if (!valid_port(guest_port)) { + return -EINVAL; + } + + qemu_mutex_lock(&s->port_lock); + + /* The guest has to have an unbound port waiting for us to bind */ + gp = &s->port_table[guest_port]; + + switch (gp->type) { + case EVTCHNSTAT_interdomain: + /* Allow rebinding after migration, preserve port # if possible */ + be_port = gp->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU; + assert(be_port != 0); + if (!s->be_handles[be_port]) { + s->be_handles[be_port] = xc; + xc->guest_port = guest_port; + ret = xc->be_port = be_port; + if (kvm_xen_has_cap(EVTCHN_SEND)) { + assign_kernel_eventfd(gp->type, guest_port, xc->fd); + } + break; + } + /* fall through */ + + case EVTCHNSTAT_unbound: + be_port = find_be_port(s, xc); + if (!be_port) { + ret = -ENOSPC; + goto out; + } + + gp->type = EVTCHNSTAT_interdomain; + gp->type_val = be_port | PORT_INFO_TYPEVAL_REMOTE_QEMU; + xc->guest_port = guest_port; + if (kvm_xen_has_cap(EVTCHN_SEND)) { + assign_kernel_eventfd(gp->type, guest_port, xc->fd); + } + ret = be_port; + break; + + default: + ret = -EINVAL; + break; + } + + out: + qemu_mutex_unlock(&s->port_lock); + + return ret; +} + +int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int ret; + + if (!s) { + return -ENOTSUP; + } + + if (!xc) { + return -EFAULT; + } + + qemu_mutex_lock(&s->port_lock); + + if (port && port != xc->be_port) { + ret = -EINVAL; + goto out; + } + + if (xc->guest_port) { + XenEvtchnPort *gp = &s->port_table[xc->guest_port]; + + /* This should never *not* be true */ + if (gp->type == EVTCHNSTAT_interdomain) { + gp->type = EVTCHNSTAT_unbound; + gp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU; + } + + if (kvm_xen_has_cap(EVTCHN_SEND)) { + deassign_kernel_port(xc->guest_port); + } + xc->guest_port = 0; + } + + s->be_handles[xc->be_port] = NULL; + xc->be_port = 0; + ret = 0; + out: + qemu_mutex_unlock(&s->port_lock); + return ret; +} + +int xen_be_evtchn_close(struct xenevtchn_handle *xc) +{ + if (!xc) { + return -EFAULT; + } + + xen_be_evtchn_unbind(xc, 0); + + close(xc->fd); + free(xc); + return 0; +} + +int xen_be_evtchn_fd(struct xenevtchn_handle *xc) +{ + if (!xc) { + return -1; + } + return xc->fd; +} + +int xen_be_evtchn_notify(struct xenevtchn_handle *xc, evtchn_port_t port) +{ + XenEvtchnState *s = xen_evtchn_singleton; + int ret; + + if (!s) { + return -ENOTSUP; + } + + if (!xc) { + return -EFAULT; + } + + qemu_mutex_lock(&s->port_lock); + + if (xc->guest_port) { + set_port_pending(s, xc->guest_port); + ret = 0; + } else { + ret = -ENOTCONN; + } + + qemu_mutex_unlock(&s->port_lock); + + return ret; +} + +int xen_be_evtchn_pending(struct xenevtchn_handle *xc) +{ + uint64_t val; + + if (!xc) { + return -EFAULT; + } + + if (!xc->be_port) { + return 0; + } + + if (eventfd_read(xc->fd, &val)) { + return -errno; + } + + return val ? xc->be_port : 0; +} + +int xen_be_evtchn_unmask(struct xenevtchn_handle *xc, evtchn_port_t port) +{ + if (!xc) { + return -EFAULT; + } + + if (xc->be_port != port) { + return -EINVAL; + } + + /* + * We don't actually do anything to unmask it; the event was already + * consumed in xen_be_evtchn_pending(). + */ + return 0; +} + +int xen_be_evtchn_get_guest_port(struct xenevtchn_handle *xc) +{ + return xc->guest_port; +} + +EvtchnInfoList *qmp_xen_event_list(Error **errp) +{ + XenEvtchnState *s = xen_evtchn_singleton; + EvtchnInfoList *head = NULL, **tail = &head; + void *shinfo, *pending, *mask; + int i; + + if (!s) { + error_setg(errp, "Xen event channel emulation not enabled"); + return NULL; + } + + shinfo = xen_overlay_get_shinfo_ptr(); + if (!shinfo) { + error_setg(errp, "Xen shared info page not allocated"); + return NULL; + } + + if (xen_is_long_mode()) { + pending = shinfo + offsetof(struct shared_info, evtchn_pending); + mask = shinfo + offsetof(struct shared_info, evtchn_mask); + } else { + pending = shinfo + offsetof(struct compat_shared_info, evtchn_pending); + mask = shinfo + offsetof(struct compat_shared_info, evtchn_mask); + } + + QEMU_LOCK_GUARD(&s->port_lock); + + for (i = 0; i < s->nr_ports; i++) { + XenEvtchnPort *p = &s->port_table[i]; + EvtchnInfo *info; + + if (p->type == EVTCHNSTAT_closed) { + continue; + } + + info = g_new0(EvtchnInfo, 1); + + info->port = i; + qemu_build_assert(EVTCHN_PORT_TYPE_CLOSED == EVTCHNSTAT_closed); + qemu_build_assert(EVTCHN_PORT_TYPE_UNBOUND == EVTCHNSTAT_unbound); + qemu_build_assert(EVTCHN_PORT_TYPE_INTERDOMAIN == EVTCHNSTAT_interdomain); + qemu_build_assert(EVTCHN_PORT_TYPE_PIRQ == EVTCHNSTAT_pirq); + qemu_build_assert(EVTCHN_PORT_TYPE_VIRQ == EVTCHNSTAT_virq); + qemu_build_assert(EVTCHN_PORT_TYPE_IPI == EVTCHNSTAT_ipi); + + info->type = p->type; + if (p->type == EVTCHNSTAT_interdomain) { + info->remote_domain = g_strdup((p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) ? + "qemu" : "loopback"); + info->target = p->type_val & PORT_INFO_TYPEVAL_REMOTE_PORT_MASK; + } else { + info->target = p->type_val; + } + info->vcpu = p->vcpu; + info->pending = test_bit(i, pending); + info->masked = test_bit(i, mask); + + QAPI_LIST_APPEND(tail, info); + } + + return head; +} + +void qmp_xen_event_inject(uint32_t port, Error **errp) +{ + XenEvtchnState *s = xen_evtchn_singleton; + + if (!s) { + error_setg(errp, "Xen event channel emulation not enabled"); + return; + } + + if (!valid_port(port)) { + error_setg(errp, "Invalid port %u", port); + } + + QEMU_LOCK_GUARD(&s->port_lock); + + if (set_port_pending(s, port)) { + error_setg(errp, "Failed to set port %u", port); + return; + } +} + +void hmp_xen_event_list(Monitor *mon, const QDict *qdict) +{ + EvtchnInfoList *iter, *info_list; + Error *err = NULL; + + info_list = qmp_xen_event_list(&err); + if (err) { + hmp_handle_error(mon, err); + return; + } + + for (iter = info_list; iter; iter = iter->next) { + EvtchnInfo *info = iter->value; + + monitor_printf(mon, "port %4u: vcpu: %d %s", info->port, info->vcpu, + EvtchnPortType_str(info->type)); + if (info->type != EVTCHN_PORT_TYPE_IPI) { + monitor_printf(mon, "("); + if (info->remote_domain) { + monitor_printf(mon, "%s:", info->remote_domain); + } + monitor_printf(mon, "%d)", info->target); + } + if (info->pending) { + monitor_printf(mon, " PENDING"); + } + if (info->masked) { + monitor_printf(mon, " MASKED"); + } + monitor_printf(mon, "\n"); + } + + qapi_free_EvtchnInfoList(info_list); +} + +void hmp_xen_event_inject(Monitor *mon, const QDict *qdict) +{ + int port = qdict_get_int(qdict, "port"); + Error *err = NULL; + + qmp_xen_event_inject(port, &err); + if (err) { + hmp_handle_error(mon, err); + } else { + monitor_printf(mon, "Delivered port %d\n", port); + } +} + diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h new file mode 100644 index 0000000000..bfb67ac2bc --- /dev/null +++ b/hw/i386/kvm/xen_evtchn.h @@ -0,0 +1,88 @@ +/* + * QEMU Xen emulation: Event channel support + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_XEN_EVTCHN_H +#define QEMU_XEN_EVTCHN_H + +#include "hw/sysbus.h" + +typedef uint32_t evtchn_port_t; + +void xen_evtchn_create(void); +int xen_evtchn_soft_reset(void); +int xen_evtchn_set_callback_param(uint64_t param); +void xen_evtchn_connect_gsis(qemu_irq *system_gsis); +void xen_evtchn_set_callback_level(int level); + +int xen_evtchn_set_port(uint16_t port); + +bool xen_evtchn_set_gsi(int gsi, int level); +void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector, + uint64_t addr, uint32_t data, bool is_masked); +void xen_evtchn_remove_pci_device(PCIDevice *dev); +struct kvm_irq_routing_entry; +int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data); +bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data); + + +/* + * These functions mirror the libxenevtchn library API, providing the QEMU + * backend side of "interdomain" event channels. + */ +struct xenevtchn_handle; +struct xenevtchn_handle *xen_be_evtchn_open(void); +int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid, + evtchn_port_t guest_port); +int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port); +int xen_be_evtchn_close(struct xenevtchn_handle *xc); +int xen_be_evtchn_fd(struct xenevtchn_handle *xc); +int xen_be_evtchn_notify(struct xenevtchn_handle *xc, evtchn_port_t port); +int xen_be_evtchn_unmask(struct xenevtchn_handle *xc, evtchn_port_t port); +int xen_be_evtchn_pending(struct xenevtchn_handle *xc); +/* Apart from this which is a local addition */ +int xen_be_evtchn_get_guest_port(struct xenevtchn_handle *xc); + +struct evtchn_status; +struct evtchn_close; +struct evtchn_unmask; +struct evtchn_bind_virq; +struct evtchn_bind_pirq; +struct evtchn_bind_ipi; +struct evtchn_send; +struct evtchn_alloc_unbound; +struct evtchn_bind_interdomain; +struct evtchn_bind_vcpu; +struct evtchn_reset; +int xen_evtchn_status_op(struct evtchn_status *status); +int xen_evtchn_close_op(struct evtchn_close *close); +int xen_evtchn_unmask_op(struct evtchn_unmask *unmask); +int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq); +int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq); +int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi); +int xen_evtchn_send_op(struct evtchn_send *send); +int xen_evtchn_alloc_unbound_op(struct evtchn_alloc_unbound *alloc); +int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain); +int xen_evtchn_bind_vcpu_op(struct evtchn_bind_vcpu *vcpu); +int xen_evtchn_reset_op(struct evtchn_reset *reset); + +struct physdev_map_pirq; +struct physdev_unmap_pirq; +struct physdev_eoi; +struct physdev_irq_status_query; +struct physdev_get_free_pirq; +int xen_physdev_map_pirq(struct physdev_map_pirq *map); +int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap); +int xen_physdev_eoi_pirq(struct physdev_eoi *eoi); +int xen_physdev_query_pirq(struct physdev_irq_status_query *query); +int xen_physdev_get_free_pirq(struct physdev_get_free_pirq *get); + +#endif /* QEMU_XEN_EVTCHN_H */ diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c new file mode 100644 index 0000000000..21c30e3659 --- /dev/null +++ b/hw/i386/kvm/xen_gnttab.c @@ -0,0 +1,547 @@ +/* + * QEMU Xen emulation: Grant table support + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qemu/module.h" +#include "qemu/lockable.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "qom/object.h" +#include "exec/target_page.h" +#include "exec/address-spaces.h" +#include "migration/vmstate.h" + +#include "hw/sysbus.h" +#include "hw/xen/xen.h" +#include "hw/xen/xen_backend_ops.h" +#include "xen_overlay.h" +#include "xen_gnttab.h" + +#include "sysemu/kvm.h" +#include "sysemu/kvm_xen.h" + +#include "hw/xen/interface/memory.h" +#include "hw/xen/interface/grant_table.h" + +#define TYPE_XEN_GNTTAB "xen-gnttab" +OBJECT_DECLARE_SIMPLE_TYPE(XenGnttabState, XEN_GNTTAB) + +#define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t)) + +static struct gnttab_backend_ops emu_gnttab_backend_ops; + +struct XenGnttabState { + /*< private >*/ + SysBusDevice busdev; + /*< public >*/ + + QemuMutex gnt_lock; + + uint32_t nr_frames; + uint32_t max_frames; + + union { + grant_entry_v1_t *v1; + /* Theoretically, v2 support could be added here. */ + } entries; + + MemoryRegion gnt_frames; + MemoryRegion *gnt_aliases; + uint64_t *gnt_frame_gpas; + + uint8_t *map_track; +}; + +struct XenGnttabState *xen_gnttab_singleton; + +static void xen_gnttab_realize(DeviceState *dev, Error **errp) +{ + XenGnttabState *s = XEN_GNTTAB(dev); + int i; + + if (xen_mode != XEN_EMULATE) { + error_setg(errp, "Xen grant table support is for Xen emulation"); + return; + } + s->max_frames = kvm_xen_get_gnttab_max_frames(); + memory_region_init_ram(&s->gnt_frames, OBJECT(dev), "xen:grant_table", + XEN_PAGE_SIZE * s->max_frames, &error_abort); + memory_region_set_enabled(&s->gnt_frames, true); + s->entries.v1 = memory_region_get_ram_ptr(&s->gnt_frames); + + /* Create individual page-sizes aliases for overlays */ + s->gnt_aliases = (void *)g_new0(MemoryRegion, s->max_frames); + s->gnt_frame_gpas = (void *)g_new(uint64_t, s->max_frames); + for (i = 0; i < s->max_frames; i++) { + memory_region_init_alias(&s->gnt_aliases[i], OBJECT(dev), + NULL, &s->gnt_frames, + i * XEN_PAGE_SIZE, XEN_PAGE_SIZE); + s->gnt_frame_gpas[i] = INVALID_GPA; + } + + s->nr_frames = 0; + memset(s->entries.v1, 0, XEN_PAGE_SIZE * s->max_frames); + s->entries.v1[GNTTAB_RESERVED_XENSTORE].flags = GTF_permit_access; + s->entries.v1[GNTTAB_RESERVED_XENSTORE].frame = XEN_SPECIAL_PFN(XENSTORE); + + qemu_mutex_init(&s->gnt_lock); + + xen_gnttab_singleton = s; + + s->map_track = g_new0(uint8_t, s->max_frames * ENTRIES_PER_FRAME_V1); + + xen_gnttab_ops = &emu_gnttab_backend_ops; +} + +static int xen_gnttab_post_load(void *opaque, int version_id) +{ + XenGnttabState *s = XEN_GNTTAB(opaque); + uint32_t i; + + for (i = 0; i < s->nr_frames; i++) { + if (s->gnt_frame_gpas[i] != INVALID_GPA) { + xen_overlay_do_map_page(&s->gnt_aliases[i], s->gnt_frame_gpas[i]); + } + } + return 0; +} + +static bool xen_gnttab_is_needed(void *opaque) +{ + return xen_mode == XEN_EMULATE; +} + +static const VMStateDescription xen_gnttab_vmstate = { + .name = "xen_gnttab", + .version_id = 1, + .minimum_version_id = 1, + .needed = xen_gnttab_is_needed, + .post_load = xen_gnttab_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(nr_frames, XenGnttabState), + VMSTATE_VARRAY_UINT32(gnt_frame_gpas, XenGnttabState, nr_frames, 0, + vmstate_info_uint64, uint64_t), + VMSTATE_END_OF_LIST() + } +}; + +static void xen_gnttab_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xen_gnttab_realize; + dc->vmsd = &xen_gnttab_vmstate; +} + +static const TypeInfo xen_gnttab_info = { + .name = TYPE_XEN_GNTTAB, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XenGnttabState), + .class_init = xen_gnttab_class_init, +}; + +void xen_gnttab_create(void) +{ + xen_gnttab_singleton = XEN_GNTTAB(sysbus_create_simple(TYPE_XEN_GNTTAB, + -1, NULL)); +} + +static void xen_gnttab_register_types(void) +{ + type_register_static(&xen_gnttab_info); +} + +type_init(xen_gnttab_register_types) + +int xen_gnttab_map_page(uint64_t idx, uint64_t gfn) +{ + XenGnttabState *s = xen_gnttab_singleton; + uint64_t gpa = gfn << XEN_PAGE_SHIFT; + + if (!s) { + return -ENOTSUP; + } + + if (idx >= s->max_frames) { + return -EINVAL; + } + + QEMU_IOTHREAD_LOCK_GUARD(); + QEMU_LOCK_GUARD(&s->gnt_lock); + + xen_overlay_do_map_page(&s->gnt_aliases[idx], gpa); + + s->gnt_frame_gpas[idx] = gpa; + + if (s->nr_frames <= idx) { + s->nr_frames = idx + 1; + } + + return 0; +} + +int xen_gnttab_set_version_op(struct gnttab_set_version *set) +{ + int ret; + + switch (set->version) { + case 1: + ret = 0; + break; + + case 2: + /* Behave as before set_version was introduced. */ + ret = -ENOSYS; + break; + + default: + ret = -EINVAL; + } + + set->version = 1; + return ret; +} + +int xen_gnttab_get_version_op(struct gnttab_get_version *get) +{ + if (get->dom != DOMID_SELF && get->dom != xen_domid) { + return -ESRCH; + } + + get->version = 1; + return 0; +} + +int xen_gnttab_query_size_op(struct gnttab_query_size *size) +{ + XenGnttabState *s = xen_gnttab_singleton; + + if (!s) { + return -ENOTSUP; + } + + if (size->dom != DOMID_SELF && size->dom != xen_domid) { + size->status = GNTST_bad_domain; + return 0; + } + + size->status = GNTST_okay; + size->nr_frames = s->nr_frames; + size->max_nr_frames = s->max_frames; + return 0; +} + +/* Track per-open refs, to allow close() to clean up. */ +struct active_ref { + MemoryRegionSection mrs; + void *virtaddr; + uint32_t refcnt; + int prot; +}; + +static void gnt_unref(XenGnttabState *s, grant_ref_t ref, + MemoryRegionSection *mrs, int prot) +{ + if (mrs && mrs->mr) { + if (prot & PROT_WRITE) { + memory_region_set_dirty(mrs->mr, mrs->offset_within_region, + XEN_PAGE_SIZE); + } + memory_region_unref(mrs->mr); + mrs->mr = NULL; + } + assert(s->map_track[ref] != 0); + + if (--s->map_track[ref] == 0) { + grant_entry_v1_t *gnt_p = &s->entries.v1[ref]; + qatomic_and(&gnt_p->flags, (uint16_t)~(GTF_reading | GTF_writing)); + } +} + +static uint64_t gnt_ref(XenGnttabState *s, grant_ref_t ref, int prot) +{ + uint16_t mask = GTF_type_mask | GTF_sub_page; + grant_entry_v1_t gnt, *gnt_p; + int retries = 0; + + if (ref >= s->max_frames * ENTRIES_PER_FRAME_V1 || + s->map_track[ref] == UINT8_MAX) { + return INVALID_GPA; + } + + if (prot & PROT_WRITE) { + mask |= GTF_readonly; + } + + gnt_p = &s->entries.v1[ref]; + + /* + * The guest can legitimately be changing the GTF_readonly flag. Allow + * that, but don't let a malicious guest cause a livelock. + */ + for (retries = 0; retries < 5; retries++) { + uint16_t new_flags; + + /* Read the entry before an atomic operation on its flags */ + gnt = *(volatile grant_entry_v1_t *)gnt_p; + + if ((gnt.flags & mask) != GTF_permit_access || + gnt.domid != DOMID_QEMU) { + return INVALID_GPA; + } + + new_flags = gnt.flags | GTF_reading; + if (prot & PROT_WRITE) { + new_flags |= GTF_writing; + } + + if (qatomic_cmpxchg(&gnt_p->flags, gnt.flags, new_flags) == gnt.flags) { + return (uint64_t)gnt.frame << XEN_PAGE_SHIFT; + } + } + + return INVALID_GPA; +} + +struct xengntdev_handle { + GHashTable *active_maps; +}; + +static int xen_be_gnttab_set_max_grants(struct xengntdev_handle *xgt, + uint32_t nr_grants) +{ + return 0; +} + +static void *xen_be_gnttab_map_refs(struct xengntdev_handle *xgt, + uint32_t count, uint32_t domid, + uint32_t *refs, int prot) +{ + XenGnttabState *s = xen_gnttab_singleton; + struct active_ref *act; + + if (!s) { + errno = ENOTSUP; + return NULL; + } + + if (domid != xen_domid) { + errno = EINVAL; + return NULL; + } + + if (!count || count > 4096) { + errno = EINVAL; + return NULL; + } + + /* + * Making a contiguous mapping from potentially discontiguous grant + * references would be... distinctly non-trivial. We don't support it. + * Even changing the API to return an array of pointers, one per page, + * wouldn't be simple to use in PV backends because some structures + * actually cross page boundaries (e.g. 32-bit blkif_response ring + * entries are 12 bytes). + */ + if (count != 1) { + errno = EINVAL; + return NULL; + } + + QEMU_LOCK_GUARD(&s->gnt_lock); + + act = g_hash_table_lookup(xgt->active_maps, GINT_TO_POINTER(refs[0])); + if (act) { + if ((prot & PROT_WRITE) && !(act->prot & PROT_WRITE)) { + if (gnt_ref(s, refs[0], prot) == INVALID_GPA) { + return NULL; + } + act->prot |= PROT_WRITE; + } + act->refcnt++; + } else { + uint64_t gpa = gnt_ref(s, refs[0], prot); + if (gpa == INVALID_GPA) { + errno = EINVAL; + return NULL; + } + + act = g_new0(struct active_ref, 1); + act->prot = prot; + act->refcnt = 1; + act->mrs = memory_region_find(get_system_memory(), gpa, XEN_PAGE_SIZE); + + if (act->mrs.mr && + !int128_lt(act->mrs.size, int128_make64(XEN_PAGE_SIZE)) && + memory_region_get_ram_addr(act->mrs.mr) != RAM_ADDR_INVALID) { + act->virtaddr = qemu_map_ram_ptr(act->mrs.mr->ram_block, + act->mrs.offset_within_region); + } + if (!act->virtaddr) { + gnt_unref(s, refs[0], &act->mrs, 0); + g_free(act); + errno = EINVAL; + return NULL; + } + + s->map_track[refs[0]]++; + g_hash_table_insert(xgt->active_maps, GINT_TO_POINTER(refs[0]), act); + } + + return act->virtaddr; +} + +static gboolean do_unmap(gpointer key, gpointer value, gpointer user_data) +{ + XenGnttabState *s = user_data; + grant_ref_t gref = GPOINTER_TO_INT(key); + struct active_ref *act = value; + + gnt_unref(s, gref, &act->mrs, act->prot); + g_free(act); + return true; +} + +static int xen_be_gnttab_unmap(struct xengntdev_handle *xgt, + void *start_address, uint32_t *refs, + uint32_t count) +{ + XenGnttabState *s = xen_gnttab_singleton; + struct active_ref *act; + + if (!s) { + return -ENOTSUP; + } + + if (count != 1) { + return -EINVAL; + } + + QEMU_LOCK_GUARD(&s->gnt_lock); + + act = g_hash_table_lookup(xgt->active_maps, GINT_TO_POINTER(refs[0])); + if (!act) { + return -ENOENT; + } + + if (act->virtaddr != start_address) { + return -EINVAL; + } + + if (!--act->refcnt) { + do_unmap(GINT_TO_POINTER(refs[0]), act, s); + g_hash_table_remove(xgt->active_maps, GINT_TO_POINTER(refs[0])); + } + + return 0; +} + +/* + * This looks a bit like the one for true Xen in xen-operations.c but + * in emulation we don't support multi-page mappings. And under Xen we + * *want* the multi-page mappings so we have fewer bounces through the + * kernel and the hypervisor. So the code paths end up being similar, + * but different. + */ +static int xen_be_gnttab_copy(struct xengntdev_handle *xgt, bool to_domain, + uint32_t domid, XenGrantCopySegment *segs, + uint32_t nr_segs, Error **errp) +{ + int prot = to_domain ? PROT_WRITE : PROT_READ; + unsigned int i; + + for (i = 0; i < nr_segs; i++) { + XenGrantCopySegment *seg = &segs[i]; + void *page; + uint32_t ref = to_domain ? seg->dest.foreign.ref : + seg->source.foreign.ref; + + page = xen_be_gnttab_map_refs(xgt, 1, domid, &ref, prot); + if (!page) { + if (errp) { + error_setg_errno(errp, errno, + "xen_be_gnttab_map_refs failed"); + } + return -errno; + } + + if (to_domain) { + memcpy(page + seg->dest.foreign.offset, seg->source.virt, + seg->len); + } else { + memcpy(seg->dest.virt, page + seg->source.foreign.offset, + seg->len); + } + + if (xen_be_gnttab_unmap(xgt, page, &ref, 1)) { + if (errp) { + error_setg_errno(errp, errno, "xen_be_gnttab_unmap failed"); + } + return -errno; + } + } + + return 0; +} + +static struct xengntdev_handle *xen_be_gnttab_open(void) +{ + struct xengntdev_handle *xgt = g_new0(struct xengntdev_handle, 1); + + xgt->active_maps = g_hash_table_new(g_direct_hash, g_direct_equal); + return xgt; +} + +static int xen_be_gnttab_close(struct xengntdev_handle *xgt) +{ + XenGnttabState *s = xen_gnttab_singleton; + + if (!s) { + return -ENOTSUP; + } + + g_hash_table_foreach_remove(xgt->active_maps, do_unmap, s); + g_hash_table_destroy(xgt->active_maps); + g_free(xgt); + return 0; +} + +static struct gnttab_backend_ops emu_gnttab_backend_ops = { + .open = xen_be_gnttab_open, + .close = xen_be_gnttab_close, + .grant_copy = xen_be_gnttab_copy, + .set_max_grants = xen_be_gnttab_set_max_grants, + .map_refs = xen_be_gnttab_map_refs, + .unmap = xen_be_gnttab_unmap, +}; + +int xen_gnttab_reset(void) +{ + XenGnttabState *s = xen_gnttab_singleton; + + if (!s) { + return -ENOTSUP; + } + + QEMU_LOCK_GUARD(&s->gnt_lock); + + s->nr_frames = 0; + + memset(s->entries.v1, 0, XEN_PAGE_SIZE * s->max_frames); + + s->entries.v1[GNTTAB_RESERVED_XENSTORE].flags = GTF_permit_access; + s->entries.v1[GNTTAB_RESERVED_XENSTORE].frame = XEN_SPECIAL_PFN(XENSTORE); + + memset(s->map_track, 0, s->max_frames * ENTRIES_PER_FRAME_V1); + + return 0; +} diff --git a/hw/i386/kvm/xen_gnttab.h b/hw/i386/kvm/xen_gnttab.h new file mode 100644 index 0000000000..ee215239b0 --- /dev/null +++ b/hw/i386/kvm/xen_gnttab.h @@ -0,0 +1,26 @@ +/* + * QEMU Xen emulation: Grant table support + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_XEN_GNTTAB_H +#define QEMU_XEN_GNTTAB_H + +void xen_gnttab_create(void); +int xen_gnttab_reset(void); +int xen_gnttab_map_page(uint64_t idx, uint64_t gfn); + +struct gnttab_set_version; +struct gnttab_get_version; +struct gnttab_query_size; +int xen_gnttab_set_version_op(struct gnttab_set_version *set); +int xen_gnttab_get_version_op(struct gnttab_get_version *get); +int xen_gnttab_query_size_op(struct gnttab_query_size *size); + +#endif /* QEMU_XEN_GNTTAB_H */ diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c new file mode 100644 index 0000000000..39fda1b72c --- /dev/null +++ b/hw/i386/kvm/xen_overlay.c @@ -0,0 +1,272 @@ +/* + * QEMU Xen emulation: Shared/overlay pages support + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qemu/module.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "qom/object.h" +#include "exec/target_page.h" +#include "exec/address-spaces.h" +#include "migration/vmstate.h" + +#include "hw/sysbus.h" +#include "hw/xen/xen.h" +#include "xen_overlay.h" + +#include "sysemu/kvm.h" +#include "sysemu/kvm_xen.h" +#include + +#include "hw/xen/interface/memory.h" + + +#define TYPE_XEN_OVERLAY "xen-overlay" +OBJECT_DECLARE_SIMPLE_TYPE(XenOverlayState, XEN_OVERLAY) + +#define XEN_PAGE_SHIFT 12 +#define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT) + +struct XenOverlayState { + /*< private >*/ + SysBusDevice busdev; + /*< public >*/ + + MemoryRegion shinfo_mem; + void *shinfo_ptr; + uint64_t shinfo_gpa; + bool long_mode; +}; + +struct XenOverlayState *xen_overlay_singleton; + +void xen_overlay_do_map_page(MemoryRegion *page, uint64_t gpa) +{ + /* + * Xen allows guests to map the same page as many times as it likes + * into guest physical frames. We don't, because it would be hard + * to track and restore them all. One mapping of each page is + * perfectly sufficient for all known guests... and we've tested + * that theory on a few now in other implementations. dwmw2. + */ + if (memory_region_is_mapped(page)) { + if (gpa == INVALID_GPA) { + memory_region_del_subregion(get_system_memory(), page); + } else { + /* Just move it */ + memory_region_set_address(page, gpa); + } + } else if (gpa != INVALID_GPA) { + memory_region_add_subregion_overlap(get_system_memory(), gpa, page, 0); + } +} + +/* KVM is the only existing back end for now. Let's not overengineer it yet. */ +static int xen_overlay_set_be_shinfo(uint64_t gfn) +{ + struct kvm_xen_hvm_attr xa = { + .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, + .u.shared_info.gfn = gfn, + }; + + return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa); +} + + +static void xen_overlay_realize(DeviceState *dev, Error **errp) +{ + XenOverlayState *s = XEN_OVERLAY(dev); + + if (xen_mode != XEN_EMULATE) { + error_setg(errp, "Xen overlay page support is for Xen emulation"); + return; + } + + memory_region_init_ram(&s->shinfo_mem, OBJECT(dev), "xen:shared_info", + XEN_PAGE_SIZE, &error_abort); + memory_region_set_enabled(&s->shinfo_mem, true); + + s->shinfo_ptr = memory_region_get_ram_ptr(&s->shinfo_mem); + s->shinfo_gpa = INVALID_GPA; + s->long_mode = false; + memset(s->shinfo_ptr, 0, XEN_PAGE_SIZE); +} + +static int xen_overlay_pre_save(void *opaque) +{ + /* + * Fetch the kernel's idea of long_mode to avoid the race condition + * where the guest has set the hypercall page up in 64-bit mode but + * not yet made a hypercall by the time migration happens, so qemu + * hasn't yet noticed. + */ + return xen_sync_long_mode(); +} + +static int xen_overlay_post_load(void *opaque, int version_id) +{ + XenOverlayState *s = opaque; + + if (s->shinfo_gpa != INVALID_GPA) { + xen_overlay_do_map_page(&s->shinfo_mem, s->shinfo_gpa); + xen_overlay_set_be_shinfo(s->shinfo_gpa >> XEN_PAGE_SHIFT); + } + if (s->long_mode) { + xen_set_long_mode(true); + } + + return 0; +} + +static bool xen_overlay_is_needed(void *opaque) +{ + return xen_mode == XEN_EMULATE; +} + +static const VMStateDescription xen_overlay_vmstate = { + .name = "xen_overlay", + .version_id = 1, + .minimum_version_id = 1, + .needed = xen_overlay_is_needed, + .pre_save = xen_overlay_pre_save, + .post_load = xen_overlay_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64(shinfo_gpa, XenOverlayState), + VMSTATE_BOOL(long_mode, XenOverlayState), + VMSTATE_END_OF_LIST() + } +}; + +static void xen_overlay_reset(DeviceState *dev) +{ + kvm_xen_soft_reset(); +} + +static void xen_overlay_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = xen_overlay_reset; + dc->realize = xen_overlay_realize; + dc->vmsd = &xen_overlay_vmstate; +} + +static const TypeInfo xen_overlay_info = { + .name = TYPE_XEN_OVERLAY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XenOverlayState), + .class_init = xen_overlay_class_init, +}; + +void xen_overlay_create(void) +{ + xen_overlay_singleton = XEN_OVERLAY(sysbus_create_simple(TYPE_XEN_OVERLAY, + -1, NULL)); + + /* If xen_domid wasn't explicitly set, at least make sure it isn't zero. */ + if (xen_domid == DOMID_QEMU) { + xen_domid = 1; + }; +} + +static void xen_overlay_register_types(void) +{ + type_register_static(&xen_overlay_info); +} + +type_init(xen_overlay_register_types) + +int xen_overlay_map_shinfo_page(uint64_t gpa) +{ + XenOverlayState *s = xen_overlay_singleton; + int ret; + + if (!s) { + return -ENOENT; + } + + assert(qemu_mutex_iothread_locked()); + + if (s->shinfo_gpa) { + /* If removing shinfo page, turn the kernel magic off first */ + ret = xen_overlay_set_be_shinfo(INVALID_GFN); + if (ret) { + return ret; + } + } + + xen_overlay_do_map_page(&s->shinfo_mem, gpa); + if (gpa != INVALID_GPA) { + ret = xen_overlay_set_be_shinfo(gpa >> XEN_PAGE_SHIFT); + if (ret) { + return ret; + } + } + s->shinfo_gpa = gpa; + + return 0; +} + +void *xen_overlay_get_shinfo_ptr(void) +{ + XenOverlayState *s = xen_overlay_singleton; + + if (!s) { + return NULL; + } + + return s->shinfo_ptr; +} + +int xen_sync_long_mode(void) +{ + int ret; + struct kvm_xen_hvm_attr xa = { + .type = KVM_XEN_ATTR_TYPE_LONG_MODE, + }; + + if (!xen_overlay_singleton) { + return -ENOENT; + } + + ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_GET_ATTR, &xa); + if (!ret) { + xen_overlay_singleton->long_mode = xa.u.long_mode; + } + + return ret; +} + +int xen_set_long_mode(bool long_mode) +{ + int ret; + struct kvm_xen_hvm_attr xa = { + .type = KVM_XEN_ATTR_TYPE_LONG_MODE, + .u.long_mode = long_mode, + }; + + if (!xen_overlay_singleton) { + return -ENOENT; + } + + ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa); + if (!ret) { + xen_overlay_singleton->long_mode = xa.u.long_mode; + } + + return ret; +} + +bool xen_is_long_mode(void) +{ + return xen_overlay_singleton && xen_overlay_singleton->long_mode; +} diff --git a/hw/i386/kvm/xen_overlay.h b/hw/i386/kvm/xen_overlay.h new file mode 100644 index 0000000000..75ecb6b359 --- /dev/null +++ b/hw/i386/kvm/xen_overlay.h @@ -0,0 +1,26 @@ +/* + * QEMU Xen emulation: Shared/overlay pages support + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_XEN_OVERLAY_H +#define QEMU_XEN_OVERLAY_H + +void xen_overlay_create(void); + +int xen_overlay_map_shinfo_page(uint64_t gpa); +void *xen_overlay_get_shinfo_ptr(void); + +int xen_sync_long_mode(void); +int xen_set_long_mode(bool long_mode); +bool xen_is_long_mode(void); + +void xen_overlay_do_map_page(MemoryRegion *page, uint64_t gpa); + +#endif /* QEMU_XEN_OVERLAY_H */ diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c new file mode 100644 index 0000000000..0b189c6ab8 --- /dev/null +++ b/hw/i386/kvm/xen_xenstore.c @@ -0,0 +1,1728 @@ +/* + * QEMU Xen emulation: Shared/overlay pages support + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "qemu/host-utils.h" +#include "qemu/module.h" +#include "qemu/main-loop.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qom/object.h" +#include "migration/vmstate.h" + +#include "hw/sysbus.h" +#include "hw/xen/xen.h" +#include "hw/xen/xen_backend_ops.h" +#include "xen_overlay.h" +#include "xen_evtchn.h" +#include "xen_xenstore.h" + +#include "sysemu/kvm.h" +#include "sysemu/kvm_xen.h" + +#include "trace.h" + +#include "xenstore_impl.h" + +#include "hw/xen/interface/io/xs_wire.h" +#include "hw/xen/interface/event_channel.h" +#include "hw/xen/interface/grant_table.h" + +#define TYPE_XEN_XENSTORE "xen-xenstore" +OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState, XEN_XENSTORE) + +#define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t)) +#define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t)) + +#define XENSTORE_HEADER_SIZE ((unsigned int)sizeof(struct xsd_sockmsg)) + +struct XenXenstoreState { + /*< private >*/ + SysBusDevice busdev; + /*< public >*/ + + XenstoreImplState *impl; + GList *watch_events; /* for the guest */ + + MemoryRegion xenstore_page; + struct xenstore_domain_interface *xs; + uint8_t req_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX]; + uint8_t rsp_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX]; + uint32_t req_offset; + uint32_t rsp_offset; + bool rsp_pending; + bool fatal_error; + + evtchn_port_t guest_port; + evtchn_port_t be_port; + struct xenevtchn_handle *eh; + + uint8_t *impl_state; + uint32_t impl_state_size; + + struct xengntdev_handle *gt; + void *granted_xs; +}; + +struct XenXenstoreState *xen_xenstore_singleton; + +static void xen_xenstore_event(void *opaque); +static void fire_watch_cb(void *opaque, const char *path, const char *token); + +static struct xenstore_backend_ops emu_xenstore_backend_ops; + +static void G_GNUC_PRINTF (4, 5) relpath_printf(XenXenstoreState *s, + GList *perms, + const char *relpath, + const char *fmt, ...) +{ + gchar *abspath; + gchar *value; + va_list args; + GByteArray *data; + int err; + + abspath = g_strdup_printf("/local/domain/%u/%s", xen_domid, relpath); + va_start(args, fmt); + value = g_strdup_vprintf(fmt, args); + va_end(args); + + data = g_byte_array_new_take((void *)value, strlen(value)); + + err = xs_impl_write(s->impl, DOMID_QEMU, XBT_NULL, abspath, data); + assert(!err); + + g_byte_array_unref(data); + + err = xs_impl_set_perms(s->impl, DOMID_QEMU, XBT_NULL, abspath, perms); + assert(!err); + + g_free(abspath); +} + +static void xen_xenstore_realize(DeviceState *dev, Error **errp) +{ + XenXenstoreState *s = XEN_XENSTORE(dev); + GList *perms; + + if (xen_mode != XEN_EMULATE) { + error_setg(errp, "Xen xenstore support is for Xen emulation"); + return; + } + memory_region_init_ram(&s->xenstore_page, OBJECT(dev), "xen:xenstore_page", + XEN_PAGE_SIZE, &error_abort); + memory_region_set_enabled(&s->xenstore_page, true); + s->xs = memory_region_get_ram_ptr(&s->xenstore_page); + memset(s->xs, 0, XEN_PAGE_SIZE); + + /* We can't map it this early as KVM isn't ready */ + xen_xenstore_singleton = s; + + s->eh = xen_be_evtchn_open(); + if (!s->eh) { + error_setg(errp, "Xenstore evtchn port init failed"); + return; + } + aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), + xen_xenstore_event, NULL, NULL, NULL, s); + + s->impl = xs_impl_create(xen_domid); + + /* Populate the default nodes */ + + /* Nodes owned by 'dom0' but readable by the guest */ + perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, DOMID_QEMU)); + perms = g_list_append(perms, xs_perm_as_string(XS_PERM_READ, xen_domid)); + + relpath_printf(s, perms, "", "%s", ""); + + relpath_printf(s, perms, "domid", "%u", xen_domid); + + relpath_printf(s, perms, "control/platform-feature-xs_reset_watches", "%u", 1); + relpath_printf(s, perms, "control/platform-feature-multiprocessor-suspend", "%u", 1); + + relpath_printf(s, perms, "platform/acpi", "%u", 1); + relpath_printf(s, perms, "platform/acpi_s3", "%u", 1); + relpath_printf(s, perms, "platform/acpi_s4", "%u", 1); + relpath_printf(s, perms, "platform/acpi_laptop_slate", "%u", 0); + + g_list_free_full(perms, g_free); + + /* Nodes owned by the guest */ + perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, xen_domid)); + + relpath_printf(s, perms, "attr", "%s", ""); + + relpath_printf(s, perms, "control/shutdown", "%s", ""); + relpath_printf(s, perms, "control/feature-poweroff", "%u", 1); + relpath_printf(s, perms, "control/feature-reboot", "%u", 1); + relpath_printf(s, perms, "control/feature-suspend", "%u", 1); + relpath_printf(s, perms, "control/feature-s3", "%u", 1); + relpath_printf(s, perms, "control/feature-s4", "%u", 1); + + relpath_printf(s, perms, "data", "%s", ""); + relpath_printf(s, perms, "device", "%s", ""); + relpath_printf(s, perms, "drivers", "%s", ""); + relpath_printf(s, perms, "error", "%s", ""); + relpath_printf(s, perms, "feature", "%s", ""); + + g_list_free_full(perms, g_free); + + xen_xenstore_ops = &emu_xenstore_backend_ops; +} + +static bool xen_xenstore_is_needed(void *opaque) +{ + return xen_mode == XEN_EMULATE; +} + +static int xen_xenstore_pre_save(void *opaque) +{ + XenXenstoreState *s = opaque; + GByteArray *save; + + if (s->eh) { + s->guest_port = xen_be_evtchn_get_guest_port(s->eh); + } + + g_free(s->impl_state); + save = xs_impl_serialize(s->impl); + s->impl_state = save->data; + s->impl_state_size = save->len; + g_byte_array_free(save, false); + + return 0; +} + +static int xen_xenstore_post_load(void *opaque, int ver) +{ + XenXenstoreState *s = opaque; + GByteArray *save; + int ret; + + /* + * As qemu/dom0, rebind to the guest's port. The Windows drivers may + * unbind the XenStore evtchn and rebind to it, having obtained the + * "remote" port through EVTCHNOP_status. In the case that migration + * occurs while it's unbound, the "remote" port needs to be the same + * as before so that the guest can find it, but should remain unbound. + */ + if (s->guest_port) { + int be_port = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, + s->guest_port); + if (be_port < 0) { + return be_port; + } + s->be_port = be_port; + } + + save = g_byte_array_new_take(s->impl_state, s->impl_state_size); + s->impl_state = NULL; + s->impl_state_size = 0; + + ret = xs_impl_deserialize(s->impl, save, xen_domid, fire_watch_cb, s); + return ret; +} + +static const VMStateDescription xen_xenstore_vmstate = { + .name = "xen_xenstore", + .unmigratable = 1, /* The PV back ends don't migrate yet */ + .version_id = 1, + .minimum_version_id = 1, + .needed = xen_xenstore_is_needed, + .pre_save = xen_xenstore_pre_save, + .post_load = xen_xenstore_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState, + sizeof_field(XenXenstoreState, req_data)), + VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState, + sizeof_field(XenXenstoreState, rsp_data)), + VMSTATE_UINT32(req_offset, XenXenstoreState), + VMSTATE_UINT32(rsp_offset, XenXenstoreState), + VMSTATE_BOOL(rsp_pending, XenXenstoreState), + VMSTATE_UINT32(guest_port, XenXenstoreState), + VMSTATE_BOOL(fatal_error, XenXenstoreState), + VMSTATE_UINT32(impl_state_size, XenXenstoreState), + VMSTATE_VARRAY_UINT32_ALLOC(impl_state, XenXenstoreState, + impl_state_size, 0, + vmstate_info_uint8, uint8_t), + VMSTATE_END_OF_LIST() + } +}; + +static void xen_xenstore_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xen_xenstore_realize; + dc->vmsd = &xen_xenstore_vmstate; +} + +static const TypeInfo xen_xenstore_info = { + .name = TYPE_XEN_XENSTORE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XenXenstoreState), + .class_init = xen_xenstore_class_init, +}; + +void xen_xenstore_create(void) +{ + DeviceState *dev = sysbus_create_simple(TYPE_XEN_XENSTORE, -1, NULL); + + xen_xenstore_singleton = XEN_XENSTORE(dev); + + /* + * Defer the init (xen_xenstore_reset()) until KVM is set up and the + * overlay page can be mapped. + */ +} + +static void xen_xenstore_register_types(void) +{ + type_register_static(&xen_xenstore_info); +} + +type_init(xen_xenstore_register_types) + +uint16_t xen_xenstore_get_port(void) +{ + XenXenstoreState *s = xen_xenstore_singleton; + if (!s) { + return 0; + } + return s->guest_port; +} + +static bool req_pending(XenXenstoreState *s) +{ + struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; + + return s->req_offset == XENSTORE_HEADER_SIZE + req->len; +} + +static void reset_req(XenXenstoreState *s) +{ + memset(s->req_data, 0, sizeof(s->req_data)); + s->req_offset = 0; +} + +static void reset_rsp(XenXenstoreState *s) +{ + s->rsp_pending = false; + + memset(s->rsp_data, 0, sizeof(s->rsp_data)); + s->rsp_offset = 0; +} + +static void xs_error(XenXenstoreState *s, unsigned int id, + xs_transaction_t tx_id, int errnum) +{ + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + const char *errstr = NULL; + + for (unsigned int i = 0; i < ARRAY_SIZE(xsd_errors); i++) { + struct xsd_errors *xsd_error = &xsd_errors[i]; + + if (xsd_error->errnum == errnum) { + errstr = xsd_error->errstring; + break; + } + } + assert(errstr); + + trace_xenstore_error(id, tx_id, errstr); + + rsp->type = XS_ERROR; + rsp->req_id = id; + rsp->tx_id = tx_id; + rsp->len = (uint32_t)strlen(errstr) + 1; + + memcpy(&rsp[1], errstr, rsp->len); +} + +static void xs_ok(XenXenstoreState *s, unsigned int type, unsigned int req_id, + xs_transaction_t tx_id) +{ + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + const char *okstr = "OK"; + + rsp->type = type; + rsp->req_id = req_id; + rsp->tx_id = tx_id; + rsp->len = (uint32_t)strlen(okstr) + 1; + + memcpy(&rsp[1], okstr, rsp->len); +} + +/* + * The correct request and response formats are documented in xen.git: + * docs/misc/xenstore.txt. A summary is given below for convenience. + * The '|' symbol represents a NUL character. + * + * ---------- Database read, write and permissions operations ---------- + * + * READ | + * WRITE | + * Store and read the octet string at . + * WRITE creates any missing parent paths, with empty values. + * + * MKDIR | + * Ensures that the exists, by necessary by creating + * it and any missing parents with empty values. If + * or any parent already exists, its value is left unchanged. + * + * RM | + * Ensures that the does not exist, by deleting + * it and all of its children. It is not an error if does + * not exist, but it _is_ an error if 's immediate parent + * does not exist either. + * + * DIRECTORY | |* + * Gives a list of the immediate children of , as only the + * leafnames. The resulting children are each named + * /. + * + * DIRECTORY_PART | ||* + * Same as DIRECTORY, but to be used for children lists longer than + * XENSTORE_PAYLOAD_MAX. Input are and the byte offset into + * the list of children to return. Return values are the generation + * count of the node (to be used to ensure the node hasn't + * changed between two reads: being the same for multiple + * reads guarantees the node hasn't changed) and the list of children + * starting at the specified of the complete list. + * + * GET_PERMS | |+ + * SET_PERMS ||+? + * is one of the following + * w write only + * r read only + * b both read and write + * n no access + * See https://wiki.xen.org/wiki/XenBus section + * `Permissions' for details of the permissions system. + * It is possible to set permissions for the special watch paths + * "@introduceDomain" and "@releaseDomain" to enable receiving those + * watches in unprivileged domains. + * + * ---------- Watches ---------- + * + * WATCH ||? + * Adds a watch. + * + * When a is modified (including path creation, removal, + * contents change or permissions change) this generates an event + * on the changed . Changes made in transactions cause an + * event only if and when committed. Each occurring event is + * matched against all the watches currently set up, and each + * matching watch results in a WATCH_EVENT message (see below). + * + * The event's path matches the watch's if it is an child + * of . + * + * can be a to watch or @. In the + * latter case may have any syntax but it matches + * (according to the rules above) only the following special + * events which are invented by xenstored: + * @introduceDomain occurs on INTRODUCE + * @releaseDomain occurs on any domain crash or + * shutdown, and also on RELEASE + * and domain destruction + * events are sent to privileged callers or explicitly + * via SET_PERMS enabled domains only. + * + * When a watch is first set up it is triggered once straight + * away, with equal to . Watches may be triggered + * spuriously. The tx_id in a WATCH request is ignored. + * + * Watches are supposed to be restricted by the permissions + * system but in practice the implementation is imperfect. + * Applications should not rely on being sent a notification for + * paths that they cannot read; however, an application may rely + * on being sent a watch when a path which it _is_ able to read + * is deleted even if that leaves only a nonexistent unreadable + * parent. A notification may omitted if a node's permissions + * are changed so as to make it unreadable, in which case future + * notifications may be suppressed (and if the node is later made + * readable, some notifications may have been lost). + * + * WATCH_EVENT || + * Unsolicited `reply' generated for matching modification events + * as described above. req_id and tx_id are both 0. + * + * is the event's path, ie the actual path that was + * modified; however if the event was the recursive removal of an + * parent of , is just + * (rather than the actual path which was removed). So + * is a child of , regardless. + * + * Iff for the watch was specified as a relative pathname, + * the path will also be relative (with the same base, + * obviously). + * + * UNWATCH ||? + * + * RESET_WATCHES | + * Reset all watches and transactions of the caller. + * + * ---------- Transactions ---------- + * + * TRANSACTION_START | | + * is an opaque uint32_t allocated by xenstored + * represented as unsigned decimal. After this, transaction may + * be referenced by using (as 32-bit binary) in the + * tx_id request header field. When transaction is started whole + * db is copied; reads and writes happen on the copy. + * It is not legal to send non-0 tx_id in TRANSACTION_START. + * + * TRANSACTION_END T| + * TRANSACTION_END F| + * tx_id must refer to existing transaction. After this + * request the tx_id is no longer valid and may be reused by + * xenstore. If F, the transaction is discarded. If T, + * it is committed: if there were any other intervening writes + * then our END gets get EAGAIN. + * + * The plan is that in the future only intervening `conflicting' + * writes cause EAGAIN, meaning only writes or other commits + * which changed paths which were read or written in the + * transaction at hand. + * + */ + +static void xs_read(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, unsigned int len) +{ + const char *path = (const char *)req_data; + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + uint8_t *rsp_data = (uint8_t *)&rsp[1]; + g_autoptr(GByteArray) data = g_byte_array_new(); + int err; + + if (len == 0 || req_data[len - 1] != '\0') { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + trace_xenstore_read(tx_id, path); + err = xs_impl_read(s->impl, xen_domid, tx_id, path, data); + if (err) { + xs_error(s, req_id, tx_id, err); + return; + } + + rsp->type = XS_READ; + rsp->req_id = req_id; + rsp->tx_id = tx_id; + rsp->len = 0; + + len = data->len; + if (len > XENSTORE_PAYLOAD_MAX) { + xs_error(s, req_id, tx_id, E2BIG); + return; + } + + memcpy(&rsp_data[rsp->len], data->data, len); + rsp->len += len; +} + +static void xs_write(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + g_autoptr(GByteArray) data = g_byte_array_new(); + const char *path; + int err; + + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + path = (const char *)req_data; + + while (len--) { + if (*req_data++ == '\0') { + break; + } + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + } + + g_byte_array_append(data, req_data, len); + + trace_xenstore_write(tx_id, path); + err = xs_impl_write(s->impl, xen_domid, tx_id, path, data); + if (err) { + xs_error(s, req_id, tx_id, err); + return; + } + + xs_ok(s, XS_WRITE, req_id, tx_id); +} + +static void xs_mkdir(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + g_autoptr(GByteArray) data = g_byte_array_new(); + const char *path; + int err; + + if (len == 0 || req_data[len - 1] != '\0') { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + path = (const char *)req_data; + + trace_xenstore_mkdir(tx_id, path); + err = xs_impl_read(s->impl, xen_domid, tx_id, path, data); + if (err == ENOENT) { + err = xs_impl_write(s->impl, xen_domid, tx_id, path, data); + } + + if (!err) { + xs_error(s, req_id, tx_id, err); + return; + } + + xs_ok(s, XS_MKDIR, req_id, tx_id); +} + +static void xs_append_strings(XenXenstoreState *s, struct xsd_sockmsg *rsp, + GList *strings, unsigned int start, bool truncate) +{ + uint8_t *rsp_data = (uint8_t *)&rsp[1]; + GList *l; + + for (l = strings; l; l = l->next) { + size_t len = strlen(l->data) + 1; /* Including the NUL termination */ + char *str = l->data; + + if (rsp->len + len > XENSTORE_PAYLOAD_MAX) { + if (truncate) { + len = XENSTORE_PAYLOAD_MAX - rsp->len; + if (!len) { + return; + } + } else { + xs_error(s, rsp->req_id, rsp->tx_id, E2BIG); + return; + } + } + + if (start) { + if (start >= len) { + start -= len; + continue; + } + + str += start; + len -= start; + start = 0; + } + + memcpy(&rsp_data[rsp->len], str, len); + rsp->len += len; + } + /* XS_DIRECTORY_PART wants an extra NUL to indicate the end */ + if (truncate && rsp->len < XENSTORE_PAYLOAD_MAX) { + rsp_data[rsp->len++] = '\0'; + } +} + +static void xs_directory(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + GList *items = NULL; + const char *path; + int err; + + if (len == 0 || req_data[len - 1] != '\0') { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + path = (const char *)req_data; + + trace_xenstore_directory(tx_id, path); + err = xs_impl_directory(s->impl, xen_domid, tx_id, path, NULL, &items); + if (err != 0) { + xs_error(s, req_id, tx_id, err); + return; + } + + rsp->type = XS_DIRECTORY; + rsp->req_id = req_id; + rsp->tx_id = tx_id; + rsp->len = 0; + + xs_append_strings(s, rsp, items, 0, false); + + g_list_free_full(items, g_free); +} + +static void xs_directory_part(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + const char *offset_str, *path = (const char *)req_data; + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + char *rsp_data = (char *)&rsp[1]; + uint64_t gencnt = 0; + unsigned int offset; + GList *items = NULL; + int err; + + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + while (len--) { + if (*req_data++ == '\0') { + break; + } + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + } + + offset_str = (const char *)req_data; + while (len--) { + if (*req_data++ == '\0') { + break; + } + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + } + + if (len) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + if (qemu_strtoui(offset_str, NULL, 10, &offset) < 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + trace_xenstore_directory_part(tx_id, path, offset); + err = xs_impl_directory(s->impl, xen_domid, tx_id, path, &gencnt, &items); + if (err != 0) { + xs_error(s, req_id, tx_id, err); + return; + } + + rsp->type = XS_DIRECTORY_PART; + rsp->req_id = req_id; + rsp->tx_id = tx_id; + rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%" PRIu64, gencnt) + 1; + + xs_append_strings(s, rsp, items, offset, true); + + g_list_free_full(items, g_free); +} + +static void xs_transaction_start(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + char *rsp_data = (char *)&rsp[1]; + int err; + + if (len != 1 || req_data[0] != '\0') { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + rsp->type = XS_TRANSACTION_START; + rsp->req_id = req_id; + rsp->tx_id = tx_id; + rsp->len = 0; + + err = xs_impl_transaction_start(s->impl, xen_domid, &tx_id); + if (err) { + xs_error(s, req_id, tx_id, err); + return; + } + + trace_xenstore_transaction_start(tx_id); + + rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%u", tx_id); + assert(rsp->len < XENSTORE_PAYLOAD_MAX); + rsp->len++; +} + +static void xs_transaction_end(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + bool commit; + int err; + + if (len != 2 || req_data[1] != '\0') { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + switch (req_data[0]) { + case 'T': + commit = true; + break; + case 'F': + commit = false; + break; + default: + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + trace_xenstore_transaction_end(tx_id, commit); + err = xs_impl_transaction_end(s->impl, xen_domid, tx_id, commit); + if (err) { + xs_error(s, req_id, tx_id, err); + return; + } + + xs_ok(s, XS_TRANSACTION_END, req_id, tx_id); +} + +static void xs_rm(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, unsigned int len) +{ + const char *path = (const char *)req_data; + int err; + + if (len == 0 || req_data[len - 1] != '\0') { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + trace_xenstore_rm(tx_id, path); + err = xs_impl_rm(s->impl, xen_domid, tx_id, path); + if (err) { + xs_error(s, req_id, tx_id, err); + return; + } + + xs_ok(s, XS_RM, req_id, tx_id); +} + +static void xs_get_perms(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + const char *path = (const char *)req_data; + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + GList *perms = NULL; + int err; + + if (len == 0 || req_data[len - 1] != '\0') { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + trace_xenstore_get_perms(tx_id, path); + err = xs_impl_get_perms(s->impl, xen_domid, tx_id, path, &perms); + if (err) { + xs_error(s, req_id, tx_id, err); + return; + } + + rsp->type = XS_GET_PERMS; + rsp->req_id = req_id; + rsp->tx_id = tx_id; + rsp->len = 0; + + xs_append_strings(s, rsp, perms, 0, false); + + g_list_free_full(perms, g_free); +} + +static void xs_set_perms(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + const char *path = (const char *)req_data; + uint8_t *perm; + GList *perms = NULL; + int err; + + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + while (len--) { + if (*req_data++ == '\0') { + break; + } + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + } + + perm = req_data; + while (len--) { + if (*req_data++ == '\0') { + perms = g_list_append(perms, perm); + perm = req_data; + } + } + + /* + * Note that there may be trailing garbage at the end of the buffer. + * This is explicitly permitted by the '?' at the end of the definition: + * + * SET_PERMS ||+? + */ + + trace_xenstore_set_perms(tx_id, path); + err = xs_impl_set_perms(s->impl, xen_domid, tx_id, path, perms); + g_list_free(perms); + if (err) { + xs_error(s, req_id, tx_id, err); + return; + } + + xs_ok(s, XS_SET_PERMS, req_id, tx_id); +} + +static void xs_watch(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + const char *token, *path = (const char *)req_data; + int err; + + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + while (len--) { + if (*req_data++ == '\0') { + break; + } + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + } + + token = (const char *)req_data; + while (len--) { + if (*req_data++ == '\0') { + break; + } + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + } + + /* + * Note that there may be trailing garbage at the end of the buffer. + * This is explicitly permitted by the '?' at the end of the definition: + * + * WATCH ||? + */ + + trace_xenstore_watch(path, token); + err = xs_impl_watch(s->impl, xen_domid, path, token, fire_watch_cb, s); + if (err) { + xs_error(s, req_id, tx_id, err); + return; + } + + xs_ok(s, XS_WATCH, req_id, tx_id); +} + +static void xs_unwatch(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + const char *token, *path = (const char *)req_data; + int err; + + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + while (len--) { + if (*req_data++ == '\0') { + break; + } + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + } + + token = (const char *)req_data; + while (len--) { + if (*req_data++ == '\0') { + break; + } + if (len == 0) { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + } + + trace_xenstore_unwatch(path, token); + err = xs_impl_unwatch(s->impl, xen_domid, path, token, fire_watch_cb, s); + if (err) { + xs_error(s, req_id, tx_id, err); + return; + } + + xs_ok(s, XS_UNWATCH, req_id, tx_id); +} + +static void xs_reset_watches(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *req_data, + unsigned int len) +{ + if (len == 0 || req_data[len - 1] != '\0') { + xs_error(s, req_id, tx_id, EINVAL); + return; + } + + trace_xenstore_reset_watches(); + xs_impl_reset_watches(s->impl, xen_domid); + + xs_ok(s, XS_RESET_WATCHES, req_id, tx_id); +} + +static void xs_priv(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *data, + unsigned int len) +{ + xs_error(s, req_id, tx_id, EACCES); +} + +static void xs_unimpl(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *data, + unsigned int len) +{ + xs_error(s, req_id, tx_id, ENOSYS); +} + +typedef void (*xs_impl)(XenXenstoreState *s, unsigned int req_id, + xs_transaction_t tx_id, uint8_t *data, + unsigned int len); + +struct xsd_req { + const char *name; + xs_impl fn; +}; +#define XSD_REQ(_type, _fn) \ + [_type] = { .name = #_type, .fn = _fn } + +struct xsd_req xsd_reqs[] = { + XSD_REQ(XS_READ, xs_read), + XSD_REQ(XS_WRITE, xs_write), + XSD_REQ(XS_MKDIR, xs_mkdir), + XSD_REQ(XS_DIRECTORY, xs_directory), + XSD_REQ(XS_DIRECTORY_PART, xs_directory_part), + XSD_REQ(XS_TRANSACTION_START, xs_transaction_start), + XSD_REQ(XS_TRANSACTION_END, xs_transaction_end), + XSD_REQ(XS_RM, xs_rm), + XSD_REQ(XS_GET_PERMS, xs_get_perms), + XSD_REQ(XS_SET_PERMS, xs_set_perms), + XSD_REQ(XS_WATCH, xs_watch), + XSD_REQ(XS_UNWATCH, xs_unwatch), + XSD_REQ(XS_CONTROL, xs_priv), + XSD_REQ(XS_INTRODUCE, xs_priv), + XSD_REQ(XS_RELEASE, xs_priv), + XSD_REQ(XS_IS_DOMAIN_INTRODUCED, xs_priv), + XSD_REQ(XS_RESUME, xs_priv), + XSD_REQ(XS_SET_TARGET, xs_priv), + XSD_REQ(XS_RESET_WATCHES, xs_reset_watches), +}; + +static void process_req(XenXenstoreState *s) +{ + struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; + xs_impl handler = NULL; + + assert(req_pending(s)); + assert(!s->rsp_pending); + + if (req->type < ARRAY_SIZE(xsd_reqs)) { + handler = xsd_reqs[req->type].fn; + } + if (!handler) { + handler = &xs_unimpl; + } + + handler(s, req->req_id, req->tx_id, (uint8_t *)&req[1], req->len); + + s->rsp_pending = true; + reset_req(s); +} + +static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr, + unsigned int len) +{ + if (!len) { + return 0; + } + + XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod); + XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons); + unsigned int copied = 0; + + /* Ensure the ring contents don't cross the req_prod access. */ + smp_rmb(); + + while (len) { + unsigned int avail = prod - cons; + unsigned int offset = MASK_XENSTORE_IDX(cons); + unsigned int copylen = avail; + + if (avail > XENSTORE_RING_SIZE) { + error_report("XenStore ring handling error"); + s->fatal_error = true; + break; + } else if (avail == 0) { + break; + } + + if (copylen > len) { + copylen = len; + } + if (copylen > XENSTORE_RING_SIZE - offset) { + copylen = XENSTORE_RING_SIZE - offset; + } + + memcpy(ptr, &s->xs->req[offset], copylen); + copied += copylen; + + ptr += copylen; + len -= copylen; + + cons += copylen; + } + + /* + * Not sure this ever mattered except on Alpha, but this barrier + * is to ensure that the update to req_cons is globally visible + * only after we have consumed all the data from the ring, and we + * don't end up seeing data written to the ring *after* the other + * end sees the update and writes more to the ring. Xen's own + * xenstored has the same barrier here (although with no comment + * at all, obviously, because it's Xen code). + */ + smp_mb(); + + qatomic_set(&s->xs->req_cons, cons); + + return copied; +} + +static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr, + unsigned int len) +{ + if (!len) { + return 0; + } + + XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons); + XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod); + unsigned int copied = 0; + + /* + * This matches the barrier in copy_to_ring() (or the guest's + * equivalent) betweem writing the data to the ring and updating + * rsp_prod. It protects against the pathological case (which + * again I think never happened except on Alpha) where our + * subsequent writes to the ring could *cross* the read of + * rsp_cons and the guest could see the new data when it was + * intending to read the old. + */ + smp_mb(); + + while (len) { + unsigned int avail = cons + XENSTORE_RING_SIZE - prod; + unsigned int offset = MASK_XENSTORE_IDX(prod); + unsigned int copylen = len; + + if (avail > XENSTORE_RING_SIZE) { + error_report("XenStore ring handling error"); + s->fatal_error = true; + break; + } else if (avail == 0) { + break; + } + + if (copylen > avail) { + copylen = avail; + } + if (copylen > XENSTORE_RING_SIZE - offset) { + copylen = XENSTORE_RING_SIZE - offset; + } + + + memcpy(&s->xs->rsp[offset], ptr, copylen); + copied += copylen; + + ptr += copylen; + len -= copylen; + + prod += copylen; + } + + /* Ensure the ring contents are seen before rsp_prod update. */ + smp_wmb(); + + qatomic_set(&s->xs->rsp_prod, prod); + + return copied; +} + +static unsigned int get_req(XenXenstoreState *s) +{ + unsigned int copied = 0; + + if (s->fatal_error) { + return 0; + } + + assert(!req_pending(s)); + + if (s->req_offset < XENSTORE_HEADER_SIZE) { + void *ptr = s->req_data + s->req_offset; + unsigned int len = XENSTORE_HEADER_SIZE; + unsigned int copylen = copy_from_ring(s, ptr, len); + + copied += copylen; + s->req_offset += copylen; + } + + if (s->req_offset >= XENSTORE_HEADER_SIZE) { + struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; + + if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) { + error_report("Illegal XenStore request"); + s->fatal_error = true; + return 0; + } + + void *ptr = s->req_data + s->req_offset; + unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset; + unsigned int copylen = copy_from_ring(s, ptr, len); + + copied += copylen; + s->req_offset += copylen; + } + + return copied; +} + +static unsigned int put_rsp(XenXenstoreState *s) +{ + if (s->fatal_error) { + return 0; + } + + assert(s->rsp_pending); + + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len); + + void *ptr = s->rsp_data + s->rsp_offset; + unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset; + unsigned int copylen = copy_to_ring(s, ptr, len); + + s->rsp_offset += copylen; + + /* Have we produced a complete response? */ + if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len) { + reset_rsp(s); + } + + return copylen; +} + +static void deliver_watch(XenXenstoreState *s, const char *path, + const char *token) +{ + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + uint8_t *rsp_data = (uint8_t *)&rsp[1]; + unsigned int len; + + assert(!s->rsp_pending); + + trace_xenstore_watch_event(path, token); + + rsp->type = XS_WATCH_EVENT; + rsp->req_id = 0; + rsp->tx_id = 0; + rsp->len = 0; + + len = strlen(path); + + /* XENSTORE_ABS/REL_PATH_MAX should ensure there can be no overflow */ + assert(rsp->len + len < XENSTORE_PAYLOAD_MAX); + + memcpy(&rsp_data[rsp->len], path, len); + rsp->len += len; + rsp_data[rsp->len] = '\0'; + rsp->len++; + + len = strlen(token); + /* + * It is possible for the guest to have chosen a token that will + * not fit (along with the patch) into a watch event. We have no + * choice but to drop the event if this is the case. + */ + if (rsp->len + len >= XENSTORE_PAYLOAD_MAX) { + return; + } + + memcpy(&rsp_data[rsp->len], token, len); + rsp->len += len; + rsp_data[rsp->len] = '\0'; + rsp->len++; + + s->rsp_pending = true; +} + +struct watch_event { + char *path; + char *token; +}; + +static void free_watch_event(struct watch_event *ev) +{ + if (ev) { + g_free(ev->path); + g_free(ev->token); + g_free(ev); + } +} + +static void queue_watch(XenXenstoreState *s, const char *path, + const char *token) +{ + struct watch_event *ev = g_new0(struct watch_event, 1); + + ev->path = g_strdup(path); + ev->token = g_strdup(token); + + s->watch_events = g_list_append(s->watch_events, ev); +} + +static void fire_watch_cb(void *opaque, const char *path, const char *token) +{ + XenXenstoreState *s = opaque; + + assert(qemu_mutex_iothread_locked()); + + /* + * If there's a response pending, we obviously can't scribble over + * it. But if there's a request pending, it has dibs on the buffer + * too. + * + * In the common case of a watch firing due to backend activity + * when the ring was otherwise idle, we should be able to copy the + * strings directly into the rsp_data and thence the actual ring, + * without needing to perform any allocations and queue them. + */ + if (s->rsp_pending || req_pending(s)) { + queue_watch(s, path, token); + } else { + deliver_watch(s, path, token); + /* + * If the message was queued because there was already ring activity, + * no need to wake the guest. But if not, we need to send the evtchn. + */ + xen_be_evtchn_notify(s->eh, s->be_port); + } +} + +static void process_watch_events(XenXenstoreState *s) +{ + struct watch_event *ev = s->watch_events->data; + + deliver_watch(s, ev->path, ev->token); + + s->watch_events = g_list_remove(s->watch_events, ev); + free_watch_event(ev); +} + +static void xen_xenstore_event(void *opaque) +{ + XenXenstoreState *s = opaque; + evtchn_port_t port = xen_be_evtchn_pending(s->eh); + unsigned int copied_to, copied_from; + bool processed, notify = false; + + if (port != s->be_port) { + return; + } + + /* We know this is a no-op. */ + xen_be_evtchn_unmask(s->eh, port); + + do { + copied_to = copied_from = 0; + processed = false; + + if (!s->rsp_pending && s->watch_events) { + process_watch_events(s); + } + + if (s->rsp_pending) { + copied_to = put_rsp(s); + } + + if (!req_pending(s)) { + copied_from = get_req(s); + } + + if (req_pending(s) && !s->rsp_pending && !s->watch_events) { + process_req(s); + processed = true; + } + + notify |= copied_to || copied_from; + } while (copied_to || copied_from || processed); + + if (notify) { + xen_be_evtchn_notify(s->eh, s->be_port); + } +} + +static void alloc_guest_port(XenXenstoreState *s) +{ + struct evtchn_alloc_unbound alloc = { + .dom = DOMID_SELF, + .remote_dom = DOMID_QEMU, + }; + + if (!xen_evtchn_alloc_unbound_op(&alloc)) { + s->guest_port = alloc.port; + } +} + +int xen_xenstore_reset(void) +{ + XenXenstoreState *s = xen_xenstore_singleton; + int err; + + if (!s) { + return -ENOTSUP; + } + + s->req_offset = s->rsp_offset = 0; + s->rsp_pending = false; + + if (!memory_region_is_mapped(&s->xenstore_page)) { + uint64_t gpa = XEN_SPECIAL_PFN(XENSTORE) << TARGET_PAGE_BITS; + xen_overlay_do_map_page(&s->xenstore_page, gpa); + } + + alloc_guest_port(s); + + /* + * As qemu/dom0, bind to the guest's port. For incoming migration, this + * will be unbound as the guest's evtchn table is overwritten. We then + * rebind to the correct guest port in xen_xenstore_post_load(). + */ + err = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, s->guest_port); + if (err < 0) { + return err; + } + s->be_port = err; + + /* + * We don't actually access the guest's page through the grant, because + * this isn't real Xen, and we can just use the page we gave it in the + * first place. Map the grant anyway, mostly for cosmetic purposes so + * it *looks* like it's in use in the guest-visible grant table. + */ + s->gt = qemu_xen_gnttab_open(); + uint32_t xs_gntref = GNTTAB_RESERVED_XENSTORE; + s->granted_xs = qemu_xen_gnttab_map_refs(s->gt, 1, xen_domid, &xs_gntref, + PROT_READ | PROT_WRITE); + + return 0; +} + +struct qemu_xs_handle { + XenstoreImplState *impl; + GList *watches; + QEMUBH *watch_bh; +}; + +struct qemu_xs_watch { + struct qemu_xs_handle *h; + char *path; + xs_watch_fn fn; + void *opaque; + GList *events; +}; + +static char *xs_be_get_domain_path(struct qemu_xs_handle *h, unsigned int domid) +{ + return g_strdup_printf("/local/domain/%u", domid); +} + +static char **xs_be_directory(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path, unsigned int *num) +{ + GList *items = NULL, *l; + unsigned int i = 0; + char **items_ret; + int err; + + err = xs_impl_directory(h->impl, DOMID_QEMU, t, path, NULL, &items); + if (err) { + errno = err; + return NULL; + } + + items_ret = g_new0(char *, g_list_length(items) + 1); + *num = 0; + for (l = items; l; l = l->next) { + items_ret[i++] = l->data; + (*num)++; + } + g_list_free(items); + return items_ret; +} + +static void *xs_be_read(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path, unsigned int *len) +{ + GByteArray *data = g_byte_array_new(); + bool free_segment = false; + int err; + + err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data); + if (err) { + free_segment = true; + errno = err; + } else { + if (len) { + *len = data->len; + } + /* The xen-bus-helper code expects to get NUL terminated string! */ + g_byte_array_append(data, (void *)"", 1); + } + + return g_byte_array_free(data, free_segment); +} + +static bool xs_be_write(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path, const void *data, unsigned int len) +{ + GByteArray *gdata = g_byte_array_new(); + int err; + + g_byte_array_append(gdata, data, len); + err = xs_impl_write(h->impl, DOMID_QEMU, t, path, gdata); + g_byte_array_unref(gdata); + if (err) { + errno = err; + return false; + } + return true; +} + +static bool xs_be_create(struct qemu_xs_handle *h, xs_transaction_t t, + unsigned int owner, unsigned int domid, + unsigned int perms, const char *path) +{ + g_autoptr(GByteArray) data = g_byte_array_new(); + GList *perms_list = NULL; + int err; + + /* mkdir does this */ + err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data); + if (err == ENOENT) { + err = xs_impl_write(h->impl, DOMID_QEMU, t, path, data); + } + if (err) { + errno = err; + return false; + } + + perms_list = g_list_append(perms_list, + xs_perm_as_string(XS_PERM_NONE, owner)); + perms_list = g_list_append(perms_list, + xs_perm_as_string(perms, domid)); + + err = xs_impl_set_perms(h->impl, DOMID_QEMU, t, path, perms_list); + g_list_free_full(perms_list, g_free); + if (err) { + errno = err; + return false; + } + return true; +} + +static bool xs_be_destroy(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path) +{ + int err = xs_impl_rm(h->impl, DOMID_QEMU, t, path); + if (err) { + errno = err; + return false; + } + return true; +} + +static void be_watch_bh(void *_h) +{ + struct qemu_xs_handle *h = _h; + GList *l; + + for (l = h->watches; l; l = l->next) { + struct qemu_xs_watch *w = l->data; + + while (w->events) { + struct watch_event *ev = w->events->data; + + w->fn(w->opaque, ev->path); + + w->events = g_list_remove(w->events, ev); + free_watch_event(ev); + } + } +} + +static void xs_be_watch_cb(void *opaque, const char *path, const char *token) +{ + struct watch_event *ev = g_new0(struct watch_event, 1); + struct qemu_xs_watch *w = opaque; + + /* We don't care about the token */ + ev->path = g_strdup(path); + w->events = g_list_append(w->events, ev); + + qemu_bh_schedule(w->h->watch_bh); +} + +static struct qemu_xs_watch *xs_be_watch(struct qemu_xs_handle *h, + const char *path, xs_watch_fn fn, + void *opaque) +{ + struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1); + int err; + + w->h = h; + w->fn = fn; + w->opaque = opaque; + + err = xs_impl_watch(h->impl, DOMID_QEMU, path, NULL, xs_be_watch_cb, w); + if (err) { + errno = err; + g_free(w); + return NULL; + } + + w->path = g_strdup(path); + h->watches = g_list_append(h->watches, w); + return w; +} + +static void xs_be_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w) +{ + xs_impl_unwatch(h->impl, DOMID_QEMU, w->path, NULL, xs_be_watch_cb, w); + + h->watches = g_list_remove(h->watches, w); + g_list_free_full(w->events, (GDestroyNotify)free_watch_event); + g_free(w->path); + g_free(w); +} + +static xs_transaction_t xs_be_transaction_start(struct qemu_xs_handle *h) +{ + unsigned int new_tx = XBT_NULL; + int err = xs_impl_transaction_start(h->impl, DOMID_QEMU, &new_tx); + if (err) { + errno = err; + return XBT_NULL; + } + return new_tx; +} + +static bool xs_be_transaction_end(struct qemu_xs_handle *h, xs_transaction_t t, + bool abort) +{ + int err = xs_impl_transaction_end(h->impl, DOMID_QEMU, t, !abort); + if (err) { + errno = err; + return false; + } + return true; +} + +static struct qemu_xs_handle *xs_be_open(void) +{ + XenXenstoreState *s = xen_xenstore_singleton; + struct qemu_xs_handle *h; + + if (!s && !s->impl) { + errno = -ENOSYS; + return NULL; + } + + h = g_new0(struct qemu_xs_handle, 1); + h->impl = s->impl; + + h->watch_bh = aio_bh_new(qemu_get_aio_context(), be_watch_bh, h); + + return h; +} + +static void xs_be_close(struct qemu_xs_handle *h) +{ + while (h->watches) { + struct qemu_xs_watch *w = h->watches->data; + xs_be_unwatch(h, w); + } + + qemu_bh_delete(h->watch_bh); + g_free(h); +} + +static struct xenstore_backend_ops emu_xenstore_backend_ops = { + .open = xs_be_open, + .close = xs_be_close, + .get_domain_path = xs_be_get_domain_path, + .directory = xs_be_directory, + .read = xs_be_read, + .write = xs_be_write, + .create = xs_be_create, + .destroy = xs_be_destroy, + .watch = xs_be_watch, + .unwatch = xs_be_unwatch, + .transaction_start = xs_be_transaction_start, + .transaction_end = xs_be_transaction_end, +}; diff --git a/hw/i386/kvm/xen_xenstore.h b/hw/i386/kvm/xen_xenstore.h new file mode 100644 index 0000000000..8c3768e075 --- /dev/null +++ b/hw/i386/kvm/xen_xenstore.h @@ -0,0 +1,20 @@ +/* + * QEMU Xen emulation: Xenstore emulation + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_XEN_XENSTORE_H +#define QEMU_XEN_XENSTORE_H + +void xen_xenstore_create(void); +int xen_xenstore_reset(void); + +uint16_t xen_xenstore_get_port(void); + +#endif /* QEMU_XEN_XENSTORE_H */ diff --git a/hw/i386/kvm/xenstore_impl.c b/hw/i386/kvm/xenstore_impl.c new file mode 100644 index 0000000000..305fe75519 --- /dev/null +++ b/hw/i386/kvm/xenstore_impl.c @@ -0,0 +1,1927 @@ +/* + * QEMU Xen emulation: The actual implementation of XenStore + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse , Paul Durrant + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qom/object.h" + +#include "hw/xen/xen.h" + +#include "xen_xenstore.h" +#include "xenstore_impl.h" + +#include "hw/xen/interface/io/xs_wire.h" + +#define XS_MAX_WATCHES 128 +#define XS_MAX_DOMAIN_NODES 1000 +#define XS_MAX_NODE_SIZE 2048 +#define XS_MAX_TRANSACTIONS 10 +#define XS_MAX_PERMS_PER_NODE 5 + +#define XS_VALID_CHARS "abcdefghijklmnopqrstuvwxyz" \ + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \ + "0123456789-/_" + +typedef struct XsNode { + uint32_t ref; + GByteArray *content; + GList *perms; + GHashTable *children; + uint64_t gencnt; + bool deleted_in_tx; + bool modified_in_tx; + unsigned int serialized_tx; +#ifdef XS_NODE_UNIT_TEST + gchar *name; /* debug only */ +#endif +} XsNode; + +typedef struct XsWatch { + struct XsWatch *next; + xs_impl_watch_fn *cb; + void *cb_opaque; + char *token; + unsigned int dom_id; + int rel_prefix; +} XsWatch; + +typedef struct XsTransaction { + XsNode *root; + unsigned int nr_nodes; + unsigned int base_tx; + unsigned int tx_id; + unsigned int dom_id; +} XsTransaction; + +struct XenstoreImplState { + XsNode *root; + unsigned int nr_nodes; + GHashTable *watches; + unsigned int nr_domu_watches; + GHashTable *transactions; + unsigned int nr_domu_transactions; + unsigned int root_tx; + unsigned int last_tx; + bool serialized; +}; + + +static void nobble_tx(gpointer key, gpointer value, gpointer user_data) +{ + unsigned int *new_tx_id = user_data; + XsTransaction *tx = value; + + if (tx->base_tx == *new_tx_id) { + /* Transactions based on XBT_NULL will always fail */ + tx->base_tx = XBT_NULL; + } +} + +static inline unsigned int next_tx(struct XenstoreImplState *s) +{ + unsigned int tx_id; + + /* Find the next TX id which isn't either XBT_NULL or in use. */ + do { + tx_id = ++s->last_tx; + } while (tx_id == XBT_NULL || tx_id == s->root_tx || + g_hash_table_lookup(s->transactions, GINT_TO_POINTER(tx_id))); + + /* + * It is vanishingly unlikely, but ensure that no outstanding transaction + * is based on the (previous incarnation of the) newly-allocated TX id. + */ + g_hash_table_foreach(s->transactions, nobble_tx, &tx_id); + + return tx_id; +} + +static inline XsNode *xs_node_new(void) +{ + XsNode *n = g_new0(XsNode, 1); + n->ref = 1; + +#ifdef XS_NODE_UNIT_TEST + nr_xs_nodes++; + xs_node_list = g_list_prepend(xs_node_list, n); +#endif + return n; +} + +static inline XsNode *xs_node_ref(XsNode *n) +{ + /* With just 10 transactions, it can never get anywhere near this. */ + g_assert(n->ref < INT_MAX); + + g_assert(n->ref); + n->ref++; + return n; +} + +static inline void xs_node_unref(XsNode *n) +{ + if (!n) { + return; + } + g_assert(n->ref); + if (--n->ref) { + return; + } + + if (n->content) { + g_byte_array_unref(n->content); + } + if (n->perms) { + g_list_free_full(n->perms, g_free); + } + if (n->children) { + g_hash_table_unref(n->children); + } +#ifdef XS_NODE_UNIT_TEST + g_free(n->name); + nr_xs_nodes--; + xs_node_list = g_list_remove(xs_node_list, n); +#endif + g_free(n); +} + +char *xs_perm_as_string(unsigned int perm, unsigned int domid) +{ + char letter; + + switch (perm) { + case XS_PERM_READ | XS_PERM_WRITE: + letter = 'b'; + break; + case XS_PERM_READ: + letter = 'r'; + break; + case XS_PERM_WRITE: + letter = 'w'; + break; + case XS_PERM_NONE: + default: + letter = 'n'; + break; + } + + return g_strdup_printf("%c%u", letter, domid); +} + +static gpointer do_perm_copy(gconstpointer src, gpointer user_data) +{ + return g_strdup(src); +} + +static XsNode *xs_node_create(const char *name, GList *perms) +{ + XsNode *n = xs_node_new(); + +#ifdef XS_NODE_UNIT_TEST + if (name) { + n->name = g_strdup(name); + } +#endif + + n->perms = g_list_copy_deep(perms, do_perm_copy, NULL); + + return n; +} + +/* For copying from one hash table to another using g_hash_table_foreach() */ +static void do_child_insert(gpointer key, gpointer value, gpointer user_data) +{ + g_hash_table_insert(user_data, g_strdup(key), xs_node_ref(value)); +} + +static XsNode *xs_node_copy(XsNode *old) +{ + XsNode *n = xs_node_new(); + + n->gencnt = old->gencnt; + +#ifdef XS_NODE_UNIT_TEST + if (n->name) { + n->name = g_strdup(old->name); + } +#endif + + assert(old); + if (old->children) { + n->children = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, + (GDestroyNotify)xs_node_unref); + g_hash_table_foreach(old->children, do_child_insert, n->children); + } + if (old->perms) { + n->perms = g_list_copy_deep(old->perms, do_perm_copy, NULL); + } + if (old->content) { + n->content = g_byte_array_ref(old->content); + } + return n; +} + +/* Returns true if it made a change to the hash table */ +static bool xs_node_add_child(XsNode *n, const char *path_elem, XsNode *child) +{ + assert(!strchr(path_elem, '/')); + + if (!child) { + assert(n->children); + return g_hash_table_remove(n->children, path_elem); + } + +#ifdef XS_NODE_UNIT_TEST + g_free(child->name); + child->name = g_strdup(path_elem); +#endif + if (!n->children) { + n->children = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, + (GDestroyNotify)xs_node_unref); + } + + /* + * The documentation for g_hash_table_insert() says that it "returns a + * boolean value to indicate whether the newly added value was already + * in the hash table or not." + * + * It could perhaps be clearer that returning TRUE means it wasn't, + */ + return g_hash_table_insert(n->children, g_strdup(path_elem), child); +} + +struct walk_op { + struct XenstoreImplState *s; + char path[XENSTORE_ABS_PATH_MAX + 2]; /* Two NUL terminators */ + int (*op_fn)(XsNode **n, struct walk_op *op); + void *op_opaque; + void *op_opaque2; + + GList *watches; + unsigned int dom_id; + unsigned int tx_id; + + /* The number of nodes which will exist in the tree if this op succeeds. */ + unsigned int new_nr_nodes; + + /* + * This is maintained on the way *down* the walk to indicate + * whether nodes can be modified in place or whether COW is + * required. It starts off being true, as we're always going to + * replace the root node. If we walk into a shared subtree it + * becomes false. If we start *creating* new nodes for a write, + * it becomes true again. + * + * Do not use it on the way back up. + */ + bool inplace; + bool mutating; + bool create_dirs; + bool in_transaction; + + /* Tracking during recursion so we know which is first. */ + bool deleted_in_tx; +}; + +static void fire_watches(struct walk_op *op, bool parents) +{ + GList *l = NULL; + XsWatch *w; + + if (!op->mutating || op->in_transaction) { + return; + } + + if (parents) { + l = op->watches; + } + + w = g_hash_table_lookup(op->s->watches, op->path); + while (w || l) { + if (!w) { + /* Fire the parent nodes from 'op' if asked to */ + w = l->data; + l = l->next; + continue; + } + + assert(strlen(op->path) > w->rel_prefix); + w->cb(w->cb_opaque, op->path + w->rel_prefix, w->token); + + w = w->next; + } +} + +static int xs_node_add_content(XsNode **n, struct walk_op *op) +{ + GByteArray *data = op->op_opaque; + + if (op->dom_id) { + /* + * The real XenStored includes permissions and names of child nodes + * in the calculated datasize but life's too short. For a single + * tenant internal XenStore, we don't have to be quite as pedantic. + */ + if (data->len > XS_MAX_NODE_SIZE) { + return E2BIG; + } + } + /* We *are* the node to be written. Either this or a copy. */ + if (!op->inplace) { + XsNode *old = *n; + *n = xs_node_copy(old); + xs_node_unref(old); + } + + if ((*n)->content) { + g_byte_array_unref((*n)->content); + } + (*n)->content = g_byte_array_ref(data); + if (op->tx_id != XBT_NULL) { + (*n)->modified_in_tx = true; + } + return 0; +} + +static int xs_node_get_content(XsNode **n, struct walk_op *op) +{ + GByteArray *data = op->op_opaque; + GByteArray *node_data; + + assert(op->inplace); + assert(*n); + + node_data = (*n)->content; + if (node_data) { + g_byte_array_append(data, node_data->data, node_data->len); + } + + return 0; +} + +static int node_rm_recurse(gpointer key, gpointer value, gpointer user_data) +{ + struct walk_op *op = user_data; + int path_len = strlen(op->path); + int key_len = strlen(key); + XsNode *n = value; + bool this_inplace = op->inplace; + + if (n->ref != 1) { + op->inplace = 0; + } + + assert(key_len + path_len + 2 <= sizeof(op->path)); + op->path[path_len] = '/'; + memcpy(op->path + path_len + 1, key, key_len + 1); + + if (n->children) { + g_hash_table_foreach_remove(n->children, node_rm_recurse, op); + } + op->new_nr_nodes--; + + /* + * Fire watches on *this* node but not the parents because they are + * going to be deleted too, so the watch will fire for them anyway. + */ + fire_watches(op, false); + op->path[path_len] = '\0'; + + /* + * Actually deleting the child here is just an optimisation; if we + * don't then the final unref on the topmost victim will just have + * to cascade down again repeating all the g_hash_table_foreach() + * calls. + */ + return this_inplace; +} + +static XsNode *xs_node_copy_deleted(XsNode *old, struct walk_op *op); +static void copy_deleted_recurse(gpointer key, gpointer value, + gpointer user_data) +{ + struct walk_op *op = user_data; + GHashTable *siblings = op->op_opaque2; + XsNode *n = xs_node_copy_deleted(value, op); + + /* + * Reinsert the deleted_in_tx copy of the node into the parent's + * 'children' hash table. Having stashed it from op->op_opaque2 + * before the recursive call to xs_node_copy_deleted() scribbled + * over it. + */ + g_hash_table_insert(siblings, g_strdup(key), n); +} + +static XsNode *xs_node_copy_deleted(XsNode *old, struct walk_op *op) +{ + XsNode *n = xs_node_new(); + + n->gencnt = old->gencnt; + +#ifdef XS_NODE_UNIT_TEST + if (old->name) { + n->name = g_strdup(old->name); + } +#endif + + if (old->children) { + n->children = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, + (GDestroyNotify)xs_node_unref); + op->op_opaque2 = n->children; + g_hash_table_foreach(old->children, copy_deleted_recurse, op); + } + if (old->perms) { + n->perms = g_list_copy_deep(old->perms, do_perm_copy, NULL); + } + n->deleted_in_tx = true; + /* If it gets resurrected we only fire a watch if it lost its content */ + if (old->content) { + n->modified_in_tx = true; + } + op->new_nr_nodes--; + return n; +} + +static int xs_node_rm(XsNode **n, struct walk_op *op) +{ + bool this_inplace = op->inplace; + + if (op->tx_id != XBT_NULL) { + /* It's not trivial to do inplace handling for this one */ + XsNode *old = *n; + *n = xs_node_copy_deleted(old, op); + xs_node_unref(old); + return 0; + } + + /* Fire watches for, and count, nodes in the subtree which get deleted */ + if ((*n)->children) { + g_hash_table_foreach_remove((*n)->children, node_rm_recurse, op); + } + op->new_nr_nodes--; + + if (this_inplace) { + xs_node_unref(*n); + } + *n = NULL; + return 0; +} + +static int xs_node_get_perms(XsNode **n, struct walk_op *op) +{ + GList **perms = op->op_opaque; + + assert(op->inplace); + assert(*n); + + *perms = g_list_copy_deep((*n)->perms, do_perm_copy, NULL); + return 0; +} + +static void parse_perm(const char *perm, char *letter, unsigned int *dom_id) +{ + unsigned int n = sscanf(perm, "%c%u", letter, dom_id); + + assert(n == 2); +} + +static bool can_access(unsigned int dom_id, GList *perms, const char *letters) +{ + unsigned int i, n; + char perm_letter; + unsigned int perm_dom_id; + bool access; + + if (dom_id == 0) { + return true; + } + + n = g_list_length(perms); + assert(n >= 1); + + /* + * The dom_id of the first perm is the owner, and the owner always has + * read-write access. + */ + parse_perm(g_list_nth_data(perms, 0), &perm_letter, &perm_dom_id); + if (dom_id == perm_dom_id) { + return true; + } + + /* + * The letter of the first perm specified the default access for all other + * domains. + */ + access = !!strchr(letters, perm_letter); + for (i = 1; i < n; i++) { + parse_perm(g_list_nth_data(perms, i), &perm_letter, &perm_dom_id); + if (dom_id != perm_dom_id) { + continue; + } + access = !!strchr(letters, perm_letter); + } + + return access; +} + +static int xs_node_set_perms(XsNode **n, struct walk_op *op) +{ + GList *perms = op->op_opaque; + + if (op->dom_id) { + unsigned int perm_dom_id; + char perm_letter; + + /* A guest may not change permissions on nodes it does not own */ + if (!can_access(op->dom_id, (*n)->perms, "")) { + return EPERM; + } + + /* A guest may not change the owner of a node it owns. */ + parse_perm(perms->data, &perm_letter, &perm_dom_id); + if (perm_dom_id != op->dom_id) { + return EPERM; + } + + if (g_list_length(perms) > XS_MAX_PERMS_PER_NODE) { + return ENOSPC; + } + } + + /* We *are* the node to be written. Either this or a copy. */ + if (!op->inplace) { + XsNode *old = *n; + *n = xs_node_copy(old); + xs_node_unref(old); + } + + if ((*n)->perms) { + g_list_free_full((*n)->perms, g_free); + } + (*n)->perms = g_list_copy_deep(perms, do_perm_copy, NULL); + if (op->tx_id != XBT_NULL) { + (*n)->modified_in_tx = true; + } + return 0; +} + +/* + * Passed a full reference in *n which it may free if it needs to COW. + * + * When changing the tree, the op->inplace flag indicates whether this + * node may be modified in place (i.e. it and all its parents had a + * refcount of one). If walking down the tree we find a node whose + * refcount is higher, we must clear op->inplace and COW from there + * down. Unless we are creating new nodes as scaffolding for a write + * (which works like 'mkdir -p' does). In which case those newly + * created nodes can (and must) be modified in place again. + */ +static int xs_node_walk(XsNode **n, struct walk_op *op) +{ + char *child_name = NULL; + size_t namelen; + XsNode *old = *n, *child = NULL; + bool stole_child = false; + bool this_inplace; + XsWatch *watch; + int err; + + namelen = strlen(op->path); + watch = g_hash_table_lookup(op->s->watches, op->path); + + /* Is there a child, or do we hit the double-NUL termination? */ + if (op->path[namelen + 1]) { + char *slash; + child_name = op->path + namelen + 1; + slash = strchr(child_name, '/'); + if (slash) { + *slash = '\0'; + } + op->path[namelen] = '/'; + } + + /* If we walk into a subtree which is shared, we must COW */ + if (op->mutating && old->ref != 1) { + op->inplace = false; + } + + if (!child_name) { + const char *letters = op->mutating ? "wb" : "rb"; + + if (!can_access(op->dom_id, old->perms, letters)) { + err = EACCES; + goto out; + } + + /* This is the actual node on which the operation shall be performed */ + err = op->op_fn(n, op); + if (!err) { + fire_watches(op, true); + } + goto out; + } + + /* op->inplace will be further modified during the recursion */ + this_inplace = op->inplace; + + if (old && old->children) { + child = g_hash_table_lookup(old->children, child_name); + /* This is a *weak* reference to 'child', owned by the hash table */ + } + + if (child) { + if (child->deleted_in_tx) { + assert(child->ref == 1); + /* Cannot actually set child->deleted_in_tx = false until later */ + } + xs_node_ref(child); + /* + * Now we own it too. But if we can modify inplace, that's going to + * foil the check and force it to COW. We want to be the *only* owner + * so that it can be modified in place, so remove it from the hash + * table in that case. We'll add it (or its replacement) back later. + */ + if (op->mutating && this_inplace) { + g_hash_table_remove(old->children, child_name); + stole_child = true; + } + } else if (op->create_dirs) { + assert(op->mutating); + + if (!can_access(op->dom_id, old->perms, "wb")) { + err = EACCES; + goto out; + } + + if (op->dom_id && op->new_nr_nodes >= XS_MAX_DOMAIN_NODES) { + err = ENOSPC; + goto out; + } + + child = xs_node_create(child_name, old->perms); + op->new_nr_nodes++; + + /* + * If we're creating a new child, we can clearly modify it (and its + * children) in place from here on down. + */ + op->inplace = true; + } else { + err = ENOENT; + goto out; + } + + /* + * If there's a watch on this node, add it to the list to be fired + * (with the correct full pathname for the modified node) at the end. + */ + if (watch) { + op->watches = g_list_append(op->watches, watch); + } + + /* + * Except for the temporary child-stealing as noted, our node has not + * changed yet. We don't yet know the overall operation will complete. + */ + err = xs_node_walk(&child, op); + + if (watch) { + op->watches = g_list_remove(op->watches, watch); + } + + if (err || !op->mutating) { + if (stole_child) { + /* Put it back as it was. */ + g_hash_table_replace(old->children, g_strdup(child_name), child); + } else { + xs_node_unref(child); + } + goto out; + } + + /* + * Now we know the operation has completed successfully and we're on + * the way back up. Make the change, substituting 'child' in the + * node at our level. + */ + if (!this_inplace) { + *n = xs_node_copy(old); + xs_node_unref(old); + } + + /* + * If we resurrected a deleted_in_tx node, we can mark it as no longer + * deleted now that we know the overall operation has succeeded. + */ + if (op->create_dirs && child && child->deleted_in_tx) { + op->new_nr_nodes++; + child->deleted_in_tx = false; + } + + /* + * The child may be NULL here, for a remove operation. Either way, + * xs_node_add_child() will do the right thing and return a value + * indicating whether it changed the parent's hash table or not. + * + * We bump the parent gencnt if it adds a child that we *didn't* + * steal from it in the first place, or if child==NULL and was + * thus removed (whether we stole it earlier and didn't put it + * back, or xs_node_add_child() actually removed it now). + */ + if ((xs_node_add_child(*n, child_name, child) && !stole_child) || !child) { + (*n)->gencnt++; + } + + out: + op->path[namelen] = '\0'; + if (!namelen) { + assert(!op->watches); + /* + * On completing the recursion back up the path walk and reaching the + * top, assign the new node count if the operation was successful. If + * the main tree was changed, bump its tx ID so that outstanding + * transactions correctly fail. But don't bump it every time; only + * if it makes a difference. + */ + if (!err && op->mutating) { + if (!op->in_transaction) { + if (op->s->root_tx != op->s->last_tx) { + op->s->root_tx = next_tx(op->s); + } + op->s->nr_nodes = op->new_nr_nodes; + } else { + XsTransaction *tx = g_hash_table_lookup(op->s->transactions, + GINT_TO_POINTER(op->tx_id)); + assert(tx); + tx->nr_nodes = op->new_nr_nodes; + } + } + } + return err; +} + +static void append_directory_item(gpointer key, gpointer value, + gpointer user_data) +{ + GList **items = user_data; + + *items = g_list_insert_sorted(*items, g_strdup(key), (GCompareFunc)strcmp); +} + +/* Populates items with char * names which caller must free. */ +static int xs_node_directory(XsNode **n, struct walk_op *op) +{ + GList **items = op->op_opaque; + + assert(op->inplace); + assert(*n); + + if ((*n)->children) { + g_hash_table_foreach((*n)->children, append_directory_item, items); + } + + if (op->op_opaque2) { + *(uint64_t *)op->op_opaque2 = (*n)->gencnt; + } + + return 0; +} + +static int validate_path(char *outpath, const char *userpath, + unsigned int dom_id) +{ + size_t i, pathlen = strlen(userpath); + + if (!pathlen || userpath[pathlen] == '/' || strstr(userpath, "//")) { + return EINVAL; + } + for (i = 0; i < pathlen; i++) { + if (!strchr(XS_VALID_CHARS, userpath[i])) { + return EINVAL; + } + } + if (userpath[0] == '/') { + if (pathlen > XENSTORE_ABS_PATH_MAX) { + return E2BIG; + } + memcpy(outpath, userpath, pathlen + 1); + } else { + if (pathlen > XENSTORE_REL_PATH_MAX) { + return E2BIG; + } + snprintf(outpath, XENSTORE_ABS_PATH_MAX, "/local/domain/%u/%s", dom_id, + userpath); + } + return 0; +} + + +static int init_walk_op(XenstoreImplState *s, struct walk_op *op, + xs_transaction_t tx_id, unsigned int dom_id, + const char *path, XsNode ***rootp) +{ + int ret = validate_path(op->path, path, dom_id); + if (ret) { + return ret; + } + + /* + * We use *two* NUL terminators at the end of the path, as during the walk + * we will temporarily turn each '/' into a NUL to allow us to use that + * path element for the lookup. + */ + op->path[strlen(op->path) + 1] = '\0'; + op->watches = NULL; + op->path[0] = '\0'; + op->inplace = true; + op->mutating = false; + op->create_dirs = false; + op->in_transaction = false; + op->dom_id = dom_id; + op->tx_id = tx_id; + op->s = s; + + if (tx_id == XBT_NULL) { + *rootp = &s->root; + op->new_nr_nodes = s->nr_nodes; + } else { + XsTransaction *tx = g_hash_table_lookup(s->transactions, + GINT_TO_POINTER(tx_id)); + if (!tx) { + return ENOENT; + } + *rootp = &tx->root; + op->new_nr_nodes = tx->nr_nodes; + op->in_transaction = true; + } + + return 0; +} + +int xs_impl_read(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path, GByteArray *data) +{ + /* + * The data GByteArray shall exist, and will be freed by caller. + * Just g_byte_array_append() to it. + */ + struct walk_op op; + XsNode **n; + int ret; + + ret = init_walk_op(s, &op, tx_id, dom_id, path, &n); + if (ret) { + return ret; + } + op.op_fn = xs_node_get_content; + op.op_opaque = data; + return xs_node_walk(n, &op); +} + +int xs_impl_write(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path, GByteArray *data) +{ + /* + * The data GByteArray shall exist, will be freed by caller. You are + * free to use g_byte_array_steal() and keep the data. Or just ref it. + */ + struct walk_op op; + XsNode **n; + int ret; + + ret = init_walk_op(s, &op, tx_id, dom_id, path, &n); + if (ret) { + return ret; + } + op.op_fn = xs_node_add_content; + op.op_opaque = data; + op.mutating = true; + op.create_dirs = true; + return xs_node_walk(n, &op); +} + +int xs_impl_directory(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path, + uint64_t *gencnt, GList **items) +{ + /* + * The items are (char *) to be freed by caller. Although it's consumed + * immediately so if you want to change it to (const char *) and keep + * them, go ahead and change the caller. + */ + struct walk_op op; + XsNode **n; + int ret; + + ret = init_walk_op(s, &op, tx_id, dom_id, path, &n); + if (ret) { + return ret; + } + op.op_fn = xs_node_directory; + op.op_opaque = items; + op.op_opaque2 = gencnt; + return xs_node_walk(n, &op); +} + +int xs_impl_transaction_start(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t *tx_id) +{ + XsTransaction *tx; + + if (*tx_id != XBT_NULL) { + return EINVAL; + } + + if (dom_id && s->nr_domu_transactions >= XS_MAX_TRANSACTIONS) { + return ENOSPC; + } + + tx = g_new0(XsTransaction, 1); + + tx->nr_nodes = s->nr_nodes; + tx->tx_id = next_tx(s); + tx->base_tx = s->root_tx; + tx->root = xs_node_ref(s->root); + tx->dom_id = dom_id; + + g_hash_table_insert(s->transactions, GINT_TO_POINTER(tx->tx_id), tx); + if (dom_id) { + s->nr_domu_transactions++; + } + *tx_id = tx->tx_id; + return 0; +} + +static gboolean tx_commit_walk(gpointer key, gpointer value, + gpointer user_data) +{ + struct walk_op *op = user_data; + int path_len = strlen(op->path); + int key_len = strlen(key); + bool fire_parents = true; + XsWatch *watch; + XsNode *n = value; + + if (n->ref != 1) { + return false; + } + + if (n->deleted_in_tx) { + /* + * We fire watches on our parents if we are the *first* node + * to be deleted (the topmost one). This matches the behaviour + * when deleting in the live tree. + */ + fire_parents = !op->deleted_in_tx; + + /* Only used on the way down so no need to clear it later */ + op->deleted_in_tx = true; + } + + assert(key_len + path_len + 2 <= sizeof(op->path)); + op->path[path_len] = '/'; + memcpy(op->path + path_len + 1, key, key_len + 1); + + watch = g_hash_table_lookup(op->s->watches, op->path); + if (watch) { + op->watches = g_list_append(op->watches, watch); + } + + if (n->children) { + g_hash_table_foreach_remove(n->children, tx_commit_walk, op); + } + + if (watch) { + op->watches = g_list_remove(op->watches, watch); + } + + /* + * Don't fire watches if this node was only copied because a + * descendent was changed. The modified_in_tx flag indicates the + * ones which were really changed. + */ + if (n->modified_in_tx || n->deleted_in_tx) { + fire_watches(op, fire_parents); + n->modified_in_tx = false; + } + op->path[path_len] = '\0'; + + /* Deleted nodes really do get expunged when we commit */ + return n->deleted_in_tx; +} + +static int transaction_commit(XenstoreImplState *s, XsTransaction *tx) +{ + struct walk_op op; + XsNode **n; + + if (s->root_tx != tx->base_tx) { + return EAGAIN; + } + xs_node_unref(s->root); + s->root = tx->root; + tx->root = NULL; + s->root_tx = tx->tx_id; + s->nr_nodes = tx->nr_nodes; + + init_walk_op(s, &op, XBT_NULL, tx->dom_id, "/", &n); + op.deleted_in_tx = false; + op.mutating = true; + + /* + * Walk the new root and fire watches on any node which has a + * refcount of one (which is therefore unique to this transaction). + */ + if (s->root->children) { + g_hash_table_foreach_remove(s->root->children, tx_commit_walk, &op); + } + + return 0; +} + +int xs_impl_transaction_end(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, bool commit) +{ + int ret = 0; + XsTransaction *tx = g_hash_table_lookup(s->transactions, + GINT_TO_POINTER(tx_id)); + + if (!tx || tx->dom_id != dom_id) { + return ENOENT; + } + + if (commit) { + ret = transaction_commit(s, tx); + } + + g_hash_table_remove(s->transactions, GINT_TO_POINTER(tx_id)); + if (dom_id) { + assert(s->nr_domu_transactions); + s->nr_domu_transactions--; + } + return ret; +} + +int xs_impl_rm(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path) +{ + struct walk_op op; + XsNode **n; + int ret; + + ret = init_walk_op(s, &op, tx_id, dom_id, path, &n); + if (ret) { + return ret; + } + op.op_fn = xs_node_rm; + op.mutating = true; + return xs_node_walk(n, &op); +} + +int xs_impl_get_perms(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path, GList **perms) +{ + struct walk_op op; + XsNode **n; + int ret; + + ret = init_walk_op(s, &op, tx_id, dom_id, path, &n); + if (ret) { + return ret; + } + op.op_fn = xs_node_get_perms; + op.op_opaque = perms; + return xs_node_walk(n, &op); +} + +static void is_valid_perm(gpointer data, gpointer user_data) +{ + char *perm = data; + bool *valid = user_data; + char letter; + unsigned int dom_id; + + if (!*valid) { + return; + } + + if (sscanf(perm, "%c%u", &letter, &dom_id) != 2) { + *valid = false; + return; + } + + switch (letter) { + case 'n': + case 'r': + case 'w': + case 'b': + break; + + default: + *valid = false; + break; + } +} + +int xs_impl_set_perms(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path, GList *perms) +{ + struct walk_op op; + XsNode **n; + bool valid = true; + int ret; + + if (!g_list_length(perms)) { + return EINVAL; + } + + g_list_foreach(perms, is_valid_perm, &valid); + if (!valid) { + return EINVAL; + } + + ret = init_walk_op(s, &op, tx_id, dom_id, path, &n); + if (ret) { + return ret; + } + op.op_fn = xs_node_set_perms; + op.op_opaque = perms; + op.mutating = true; + return xs_node_walk(n, &op); +} + +static int do_xs_impl_watch(XenstoreImplState *s, unsigned int dom_id, + const char *path, const char *token, + xs_impl_watch_fn fn, void *opaque) + +{ + char abspath[XENSTORE_ABS_PATH_MAX + 1]; + XsWatch *w, *l; + int ret; + + ret = validate_path(abspath, path, dom_id); + if (ret) { + return ret; + } + + /* Check for duplicates */ + l = w = g_hash_table_lookup(s->watches, abspath); + while (w) { + if (!g_strcmp0(token, w->token) && opaque == w->cb_opaque && + fn == w->cb && dom_id == w->dom_id) { + return EEXIST; + } + w = w->next; + } + + if (dom_id && s->nr_domu_watches >= XS_MAX_WATCHES) { + return E2BIG; + } + + w = g_new0(XsWatch, 1); + w->token = g_strdup(token); + w->cb = fn; + w->cb_opaque = opaque; + w->dom_id = dom_id; + w->rel_prefix = strlen(abspath) - strlen(path); + + /* l was looked up above when checking for duplicates */ + if (l) { + w->next = l->next; + l->next = w; + } else { + g_hash_table_insert(s->watches, g_strdup(abspath), w); + } + if (dom_id) { + s->nr_domu_watches++; + } + + return 0; +} + +int xs_impl_watch(XenstoreImplState *s, unsigned int dom_id, const char *path, + const char *token, xs_impl_watch_fn fn, void *opaque) +{ + int ret = do_xs_impl_watch(s, dom_id, path, token, fn, opaque); + + if (!ret) { + /* A new watch should fire immediately */ + fn(opaque, path, token); + } + + return ret; +} + +static XsWatch *free_watch(XenstoreImplState *s, XsWatch *w) +{ + XsWatch *next = w->next; + + if (w->dom_id) { + assert(s->nr_domu_watches); + s->nr_domu_watches--; + } + + g_free(w->token); + g_free(w); + + return next; +} + +int xs_impl_unwatch(XenstoreImplState *s, unsigned int dom_id, + const char *path, const char *token, + xs_impl_watch_fn fn, void *opaque) +{ + char abspath[XENSTORE_ABS_PATH_MAX + 1]; + XsWatch *w, **l; + int ret; + + ret = validate_path(abspath, path, dom_id); + if (ret) { + return ret; + } + + w = g_hash_table_lookup(s->watches, abspath); + if (!w) { + return ENOENT; + } + + /* + * The hash table contains the first element of a list of + * watches. Removing the first element in the list is a + * special case because we have to update the hash table to + * point to the next (or remove it if there's nothing left). + */ + if (!g_strcmp0(token, w->token) && fn == w->cb && opaque == w->cb_opaque && + dom_id == w->dom_id) { + if (w->next) { + /* Insert the previous 'next' into the hash table */ + g_hash_table_insert(s->watches, g_strdup(abspath), w->next); + } else { + /* Nothing left; remove from hash table */ + g_hash_table_remove(s->watches, abspath); + } + free_watch(s, w); + return 0; + } + + /* + * We're all done messing with the hash table because the element + * it points to has survived the cull. Now it's just a simple + * linked list removal operation. + */ + for (l = &w->next; *l; l = &w->next) { + w = *l; + + if (!g_strcmp0(token, w->token) && fn == w->cb && + opaque != w->cb_opaque && dom_id == w->dom_id) { + *l = free_watch(s, w); + return 0; + } + } + + return ENOENT; +} + +int xs_impl_reset_watches(XenstoreImplState *s, unsigned int dom_id) +{ + char **watch_paths; + guint nr_watch_paths; + guint i; + + watch_paths = (char **)g_hash_table_get_keys_as_array(s->watches, + &nr_watch_paths); + + for (i = 0; i < nr_watch_paths; i++) { + XsWatch *w1 = g_hash_table_lookup(s->watches, watch_paths[i]); + XsWatch *w2, *w, **l; + + /* + * w1 is the original list. The hash table has this pointer. + * w2 is the head of our newly-filtered list. + * w and l are temporary for processing. w is somewhat redundant + * with *l but makes my eyes bleed less. + */ + + w = w2 = w1; + l = &w; + while (w) { + if (w->dom_id == dom_id) { + /* If we're freeing the head of the list, bump w2 */ + if (w2 == w) { + w2 = w->next; + } + *l = free_watch(s, w); + } else { + l = &w->next; + } + w = *l; + } + /* + * If the head of the list survived the cull, we don't need to + * touch the hash table and we're done with this path. Else... + */ + if (w1 != w2) { + g_hash_table_steal(s->watches, watch_paths[i]); + + /* + * It was already freed. (Don't worry, this whole thing is + * single-threaded and nobody saw it in the meantime). And + * having *stolen* it, we now own the watch_paths[i] string + * so if we don't give it back to the hash table, we need + * to free it. + */ + if (w2) { + g_hash_table_insert(s->watches, watch_paths[i], w2); + } else { + g_free(watch_paths[i]); + } + } + } + g_free(watch_paths); + return 0; +} + +static void xs_tx_free(void *_tx) +{ + XsTransaction *tx = _tx; + if (tx->root) { + xs_node_unref(tx->root); + } + g_free(tx); +} + +XenstoreImplState *xs_impl_create(unsigned int dom_id) +{ + XenstoreImplState *s = g_new0(XenstoreImplState, 1); + GList *perms; + + s->watches = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); + s->transactions = g_hash_table_new_full(g_direct_hash, g_direct_equal, + NULL, xs_tx_free); + + perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, 0)); + s->root = xs_node_create("/", perms); + g_list_free_full(perms, g_free); + s->nr_nodes = 1; + + s->root_tx = s->last_tx = 1; + return s; +} + + +static void clear_serialized_tx(gpointer key, gpointer value, gpointer opaque) +{ + XsNode *n = value; + + n->serialized_tx = XBT_NULL; + if (n->children) { + g_hash_table_foreach(n->children, clear_serialized_tx, NULL); + } +} + +static void clear_tx_serialized_tx(gpointer key, gpointer value, + gpointer opaque) +{ + XsTransaction *t = value; + + clear_serialized_tx(NULL, t->root, NULL); +} + +static void write_be32(GByteArray *save, uint32_t val) +{ + uint32_t be = htonl(val); + g_byte_array_append(save, (void *)&be, sizeof(be)); +} + + +struct save_state { + GByteArray *bytes; + unsigned int tx_id; +}; + +#define MODIFIED_IN_TX (1U << 0) +#define DELETED_IN_TX (1U << 1) +#define NODE_REF (1U << 2) + +static void save_node(gpointer key, gpointer value, gpointer opaque) +{ + struct save_state *ss = opaque; + XsNode *n = value; + char *name = key; + uint8_t flag = 0; + + /* Child nodes (i.e. anything but the root) have a name */ + if (name) { + g_byte_array_append(ss->bytes, key, strlen(key) + 1); + } + + /* + * If we already wrote this node, refer to the previous copy. + * There's no rename/move in XenStore, so all we need to find + * it is the tx_id of the transation in which it exists. Which + * may be the root tx. + */ + if (n->serialized_tx != XBT_NULL) { + flag = NODE_REF; + g_byte_array_append(ss->bytes, &flag, 1); + write_be32(ss->bytes, n->serialized_tx); + } else { + GList *l; + n->serialized_tx = ss->tx_id; + + if (n->modified_in_tx) { + flag |= MODIFIED_IN_TX; + } + if (n->deleted_in_tx) { + flag |= DELETED_IN_TX; + } + g_byte_array_append(ss->bytes, &flag, 1); + + if (n->content) { + write_be32(ss->bytes, n->content->len); + g_byte_array_append(ss->bytes, n->content->data, n->content->len); + } else { + write_be32(ss->bytes, 0); + } + + for (l = n->perms; l; l = l->next) { + g_byte_array_append(ss->bytes, l->data, strlen(l->data) + 1); + } + /* NUL termination after perms */ + g_byte_array_append(ss->bytes, (void *)"", 1); + + if (n->children) { + g_hash_table_foreach(n->children, save_node, ss); + } + /* NUL termination after children (child name is NUL) */ + g_byte_array_append(ss->bytes, (void *)"", 1); + } +} + +static void save_tree(struct save_state *ss, uint32_t tx_id, XsNode *root) +{ + write_be32(ss->bytes, tx_id); + ss->tx_id = tx_id; + save_node(NULL, root, ss); +} + +static void save_tx(gpointer key, gpointer value, gpointer opaque) +{ + uint32_t tx_id = GPOINTER_TO_INT(key); + struct save_state *ss = opaque; + XsTransaction *n = value; + + write_be32(ss->bytes, n->base_tx); + write_be32(ss->bytes, n->dom_id); + + save_tree(ss, tx_id, n->root); +} + +static void save_watch(gpointer key, gpointer value, gpointer opaque) +{ + struct save_state *ss = opaque; + XsWatch *w = value; + + /* We only save the *guest* watches. */ + if (w->dom_id) { + gpointer relpath = key + w->rel_prefix; + g_byte_array_append(ss->bytes, relpath, strlen(relpath) + 1); + g_byte_array_append(ss->bytes, (void *)w->token, strlen(w->token) + 1); + } +} + +GByteArray *xs_impl_serialize(XenstoreImplState *s) +{ + struct save_state ss; + + ss.bytes = g_byte_array_new(); + + /* + * node = flags [ real_node / node_ref ] + * flags = uint8_t (MODIFIED_IN_TX | DELETED_IN_TX | NODE_REF) + * node_ref = tx_id (in which the original version of this node exists) + * real_node = content perms child* NUL + * content = len data + * len = uint32_t + * data = uint8_t{len} + * perms = perm* NUL + * perm = asciiz + * child = name node + * name = asciiz + * + * tree = tx_id node + * tx_id = uint32_t + * + * transaction = base_tx_id dom_id tree + * base_tx_id = uint32_t + * dom_id = uint32_t + * + * tx_list = tree transaction* XBT_NULL + * + * watch = path token + * path = asciiz + * token = asciiz + * + * watch_list = watch* NUL + * + * xs_serialize_stream = last_tx tx_list watch_list + * last_tx = uint32_t + */ + + /* Clear serialized_tx in every node. */ + if (s->serialized) { + clear_serialized_tx(NULL, s->root, NULL); + g_hash_table_foreach(s->transactions, clear_tx_serialized_tx, NULL); + } + + s->serialized = true; + + write_be32(ss.bytes, s->last_tx); + save_tree(&ss, s->root_tx, s->root); + g_hash_table_foreach(s->transactions, save_tx, &ss); + + write_be32(ss.bytes, XBT_NULL); + + g_hash_table_foreach(s->watches, save_watch, &ss); + g_byte_array_append(ss.bytes, (void *)"", 1); + + return ss.bytes; +} + +struct unsave_state { + char path[XENSTORE_ABS_PATH_MAX + 1]; + XenstoreImplState *s; + GByteArray *bytes; + uint8_t *d; + size_t l; + bool root_walk; +}; + +static int consume_be32(struct unsave_state *us, unsigned int *val) +{ + uint32_t d; + + if (us->l < sizeof(d)) { + return -EINVAL; + } + memcpy(&d, us->d, sizeof(d)); + *val = ntohl(d); + us->d += sizeof(d); + us->l -= sizeof(d); + return 0; +} + +static int consume_string(struct unsave_state *us, char **str, size_t *len) +{ + size_t l; + + if (!us->l) { + return -EINVAL; + } + + l = strnlen((void *)us->d, us->l); + if (l == us->l) { + return -EINVAL; + } + + if (str) { + *str = (void *)us->d; + } + if (len) { + *len = l; + } + + us->d += l + 1; + us->l -= l + 1; + return 0; +} + +static XsNode *lookup_node(XsNode *n, char *path) +{ + char *slash = strchr(path, '/'); + XsNode *child; + + if (path[0] == '\0') { + return n; + } + + if (slash) { + *slash = '\0'; + } + + if (!n->children) { + return NULL; + } + child = g_hash_table_lookup(n->children, path); + if (!slash) { + return child; + } + + *slash = '/'; + if (!child) { + return NULL; + } + return lookup_node(child, slash + 1); +} + +static XsNode *lookup_tx_node(struct unsave_state *us, unsigned int tx_id) +{ + XsTransaction *t; + if (tx_id == us->s->root_tx) { + return lookup_node(us->s->root, us->path + 1); + } + + t = g_hash_table_lookup(us->s->transactions, GINT_TO_POINTER(tx_id)); + if (!t) { + return NULL; + } + g_assert(t->root); + return lookup_node(t->root, us->path + 1); +} + +static void count_child_nodes(gpointer key, gpointer value, gpointer user_data) +{ + unsigned int *nr_nodes = user_data; + XsNode *n = value; + + (*nr_nodes)++; + + if (n->children) { + g_hash_table_foreach(n->children, count_child_nodes, nr_nodes); + } +} + +static int consume_node(struct unsave_state *us, XsNode **nodep, + unsigned int *nr_nodes) +{ + XsNode *n = NULL; + uint8_t flags; + int ret; + + if (us->l < 1) { + return -EINVAL; + } + flags = us->d[0]; + us->d++; + us->l--; + + if (flags == NODE_REF) { + unsigned int tx; + + ret = consume_be32(us, &tx); + if (ret) { + return ret; + } + + n = lookup_tx_node(us, tx); + if (!n) { + return -EINVAL; + } + n->ref++; + if (n->children) { + g_hash_table_foreach(n->children, count_child_nodes, nr_nodes); + } + } else { + uint32_t datalen; + + if (flags & ~(DELETED_IN_TX | MODIFIED_IN_TX)) { + return -EINVAL; + } + n = xs_node_new(); + + if (flags & DELETED_IN_TX) { + n->deleted_in_tx = true; + } + if (flags & MODIFIED_IN_TX) { + n->modified_in_tx = true; + } + ret = consume_be32(us, &datalen); + if (ret) { + xs_node_unref(n); + return -EINVAL; + } + if (datalen) { + if (datalen > us->l) { + xs_node_unref(n); + return -EINVAL; + } + + GByteArray *node_data = g_byte_array_new(); + g_byte_array_append(node_data, us->d, datalen); + us->d += datalen; + us->l -= datalen; + n->content = node_data; + + if (us->root_walk) { + n->modified_in_tx = true; + } + } + while (1) { + char *perm = NULL; + size_t permlen = 0; + + ret = consume_string(us, &perm, &permlen); + if (ret) { + xs_node_unref(n); + return ret; + } + + if (!permlen) { + break; + } + + n->perms = g_list_append(n->perms, g_strdup(perm)); + } + + /* Now children */ + while (1) { + size_t childlen; + char *childname; + char *pathend; + XsNode *child = NULL; + + ret = consume_string(us, &childname, &childlen); + if (ret) { + xs_node_unref(n); + return ret; + } + + if (!childlen) { + break; + } + + pathend = us->path + strlen(us->path); + strncat(us->path, "/", sizeof(us->path) - 1); + strncat(us->path, childname, sizeof(us->path) - 1); + + ret = consume_node(us, &child, nr_nodes); + *pathend = '\0'; + if (ret) { + xs_node_unref(n); + return ret; + } + g_assert(child); + xs_node_add_child(n, childname, child); + } + + /* + * If the node has no data and no children we still want to fire + * a watch on it. + */ + if (us->root_walk && !n->children) { + n->modified_in_tx = true; + } + } + + if (!n->deleted_in_tx) { + (*nr_nodes)++; + } + + *nodep = n; + return 0; +} + +static int consume_tree(struct unsave_state *us, XsTransaction *t) +{ + int ret; + + ret = consume_be32(us, &t->tx_id); + if (ret) { + return ret; + } + + if (t->tx_id > us->s->last_tx) { + return -EINVAL; + } + + us->path[0] = '\0'; + + return consume_node(us, &t->root, &t->nr_nodes); +} + +int xs_impl_deserialize(XenstoreImplState *s, GByteArray *bytes, + unsigned int dom_id, xs_impl_watch_fn watch_fn, + void *watch_opaque) +{ + struct unsave_state us; + XsTransaction base_t = { 0 }; + int ret; + + us.s = s; + us.bytes = bytes; + us.d = bytes->data; + us.l = bytes->len; + + xs_impl_reset_watches(s, dom_id); + g_hash_table_remove_all(s->transactions); + + xs_node_unref(s->root); + s->root = NULL; + s->root_tx = s->last_tx = XBT_NULL; + + ret = consume_be32(&us, &s->last_tx); + if (ret) { + return ret; + } + + /* + * Consume the base tree into a transaction so that watches can be + * fired as we commit it. By setting us.root_walk we cause the nodes + * to be marked as 'modified_in_tx' as they are created, so that the + * watches are triggered on them. + */ + base_t.dom_id = dom_id; + base_t.base_tx = XBT_NULL; + us.root_walk = true; + ret = consume_tree(&us, &base_t); + if (ret) { + return ret; + } + us.root_walk = false; + + /* + * Commit the transaction now while the refcount on all nodes is 1. + * Note that we haven't yet reinstated the *guest* watches but that's + * OK because we don't want the guest to see any changes. Even any + * backend nodes which get recreated should be *precisely* as they + * were before the migration. Back ends may have been instantiated + * already, and will see the frontend magically blink into existence + * now (well, from the aio_bh which fires the watches). It's their + * responsibility to rebuild everything precisely as it was before. + */ + ret = transaction_commit(s, &base_t); + if (ret) { + return ret; + } + + while (1) { + unsigned int base_tx; + XsTransaction *t; + + ret = consume_be32(&us, &base_tx); + if (ret) { + return ret; + } + if (base_tx == XBT_NULL) { + break; + } + + t = g_new0(XsTransaction, 1); + t->base_tx = base_tx; + + ret = consume_be32(&us, &t->dom_id); + if (!ret) { + ret = consume_tree(&us, t); + } + if (ret) { + g_free(t); + return ret; + } + g_assert(t->root); + if (t->dom_id) { + s->nr_domu_transactions++; + } + g_hash_table_insert(s->transactions, GINT_TO_POINTER(t->tx_id), t); + } + + while (1) { + char *path, *token; + size_t pathlen, toklen; + + ret = consume_string(&us, &path, &pathlen); + if (ret) { + return ret; + } + if (!pathlen) { + break; + } + + ret = consume_string(&us, &token, &toklen); + if (ret) { + return ret; + } + + if (!watch_fn) { + continue; + } + + ret = do_xs_impl_watch(s, dom_id, path, token, watch_fn, watch_opaque); + if (ret) { + return ret; + } + } + + if (us.l) { + return -EINVAL; + } + + return 0; +} diff --git a/hw/i386/kvm/xenstore_impl.h b/hw/i386/kvm/xenstore_impl.h new file mode 100644 index 0000000000..0df2a91aae --- /dev/null +++ b/hw/i386/kvm/xenstore_impl.h @@ -0,0 +1,63 @@ +/* + * QEMU Xen emulation: The actual implementation of XenStore + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_XENSTORE_IMPL_H +#define QEMU_XENSTORE_IMPL_H + +#include "hw/xen/xen_backend_ops.h" + +typedef struct XenstoreImplState XenstoreImplState; + +XenstoreImplState *xs_impl_create(unsigned int dom_id); + +char *xs_perm_as_string(unsigned int perm, unsigned int domid); + +/* + * These functions return *positive* error numbers. This is a little + * unconventional but it helps to keep us honest because there is + * also a very limited set of error numbers that they are permitted + * to return (those in xsd_errors). + */ + +int xs_impl_read(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path, GByteArray *data); +int xs_impl_write(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path, GByteArray *data); +int xs_impl_directory(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path, + uint64_t *gencnt, GList **items); +int xs_impl_transaction_start(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t *tx_id); +int xs_impl_transaction_end(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, bool commit); +int xs_impl_rm(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path); +int xs_impl_get_perms(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path, GList **perms); +int xs_impl_set_perms(XenstoreImplState *s, unsigned int dom_id, + xs_transaction_t tx_id, const char *path, GList *perms); + +/* This differs from xs_watch_fn because it has the token */ +typedef void(xs_impl_watch_fn)(void *opaque, const char *path, + const char *token); +int xs_impl_watch(XenstoreImplState *s, unsigned int dom_id, const char *path, + const char *token, xs_impl_watch_fn fn, void *opaque); +int xs_impl_unwatch(XenstoreImplState *s, unsigned int dom_id, + const char *path, const char *token, xs_impl_watch_fn fn, + void *opaque); +int xs_impl_reset_watches(XenstoreImplState *s, unsigned int dom_id); + +GByteArray *xs_impl_serialize(XenstoreImplState *s); +int xs_impl_deserialize(XenstoreImplState *s, GByteArray *bytes, + unsigned int dom_id, xs_impl_watch_fn watch_fn, + void *watch_opaque); + +#endif /* QEMU_XENSTORE_IMPL_H */ diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index 29f30dd6d3..3d606a20b4 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -57,14 +57,14 @@ #define MICROVM_QBOOT_FILENAME "qboot.rom" #define MICROVM_BIOS_FILENAME "bios-microvm.bin" -static void microvm_set_rtc(MicrovmMachineState *mms, ISADevice *s) +static void microvm_set_rtc(MicrovmMachineState *mms, MC146818RtcState *s) { X86MachineState *x86ms = X86_MACHINE(mms); int val; val = MIN(x86ms->below_4g_mem_size / KiB, 640); - rtc_set_memory(s, 0x15, val); - rtc_set_memory(s, 0x16, val >> 8); + mc146818rtc_set_cmos_data(s, 0x15, val); + mc146818rtc_set_cmos_data(s, 0x16, val >> 8); /* extended memory (next 64MiB) */ if (x86ms->below_4g_mem_size > 1 * MiB) { val = (x86ms->below_4g_mem_size - 1 * MiB) / KiB; @@ -74,10 +74,10 @@ static void microvm_set_rtc(MicrovmMachineState *mms, ISADevice *s) if (val > 65535) { val = 65535; } - rtc_set_memory(s, 0x17, val); - rtc_set_memory(s, 0x18, val >> 8); - rtc_set_memory(s, 0x30, val); - rtc_set_memory(s, 0x31, val >> 8); + mc146818rtc_set_cmos_data(s, 0x17, val); + mc146818rtc_set_cmos_data(s, 0x18, val >> 8); + mc146818rtc_set_cmos_data(s, 0x30, val); + mc146818rtc_set_cmos_data(s, 0x31, val >> 8); /* memory between 16MiB and 4GiB */ if (x86ms->below_4g_mem_size > 16 * MiB) { val = (x86ms->below_4g_mem_size - 16 * MiB) / (64 * KiB); @@ -87,13 +87,13 @@ static void microvm_set_rtc(MicrovmMachineState *mms, ISADevice *s) if (val > 65535) { val = 65535; } - rtc_set_memory(s, 0x34, val); - rtc_set_memory(s, 0x35, val >> 8); + mc146818rtc_set_cmos_data(s, 0x34, val); + mc146818rtc_set_cmos_data(s, 0x35, val >> 8); /* memory above 4GiB */ val = x86ms->above_4g_mem_size / 65536; - rtc_set_memory(s, 0x5b, val); - rtc_set_memory(s, 0x5c, val >> 8); - rtc_set_memory(s, 0x5d, val >> 16); + mc146818rtc_set_cmos_data(s, 0x5b, val); + mc146818rtc_set_cmos_data(s, 0x5c, val >> 8); + mc146818rtc_set_cmos_data(s, 0x5d, val >> 16); } static void create_gpex(MicrovmMachineState *mms) @@ -161,7 +161,6 @@ static void microvm_devices_init(MicrovmMachineState *mms) const char *default_firmware; X86MachineState *x86ms = X86_MACHINE(mms); ISABus *isa_bus; - ISADevice *rtc_state; GSIState *gsi_state; int ioapics; int i; @@ -174,7 +173,7 @@ static void microvm_devices_init(MicrovmMachineState *mms) isa_bus = isa_bus_new(NULL, get_system_memory(), get_system_io(), &error_abort); - isa_bus_irqs(isa_bus, x86ms->gsi); + isa_bus_register_input_irqs(isa_bus, x86ms->gsi); ioapic_init_gsi(gsi_state, "machine"); if (ioapics > 1) { @@ -267,8 +266,7 @@ static void microvm_devices_init(MicrovmMachineState *mms) if (mms->rtc == ON_OFF_AUTO_ON || (mms->rtc == ON_OFF_AUTO_AUTO && !kvm_enabled())) { - rtc_state = mc146818_rtc_init(isa_bus, 2000, NULL); - microvm_set_rtc(mms, rtc_state); + microvm_set_rtc(mms, mc146818_rtc_init(isa_bus, 2000, NULL)); } if (mms->isa_serial) { @@ -330,7 +328,7 @@ static void microvm_memory_init(MicrovmMachineState *mms) rom_set_fw(fw_cfg); if (machine->kernel_filename != NULL) { - x86_load_linux(x86ms, fw_cfg, 0, true, false); + x86_load_linux(x86ms, fw_cfg, 0, true); } if (mms->option_roms) { @@ -378,8 +376,7 @@ static void microvm_fix_kernel_cmdline(MachineState *machine) MicrovmMachineState *mms = MICROVM_MACHINE(machine); BusState *bus; BusChild *kid; - char *cmdline, *existing_cmdline; - size_t len; + char *cmdline; /* * Find MMIO transports with attached devices, and add them to the kernel @@ -388,8 +385,7 @@ static void microvm_fix_kernel_cmdline(MachineState *machine) * Yes, this is a hack, but one that heavily improves the UX without * introducing any significant issues. */ - existing_cmdline = fw_cfg_read_bytes_ptr(x86ms->fw_cfg, FW_CFG_CMDLINE_DATA); - cmdline = g_strdup(existing_cmdline); + cmdline = g_strdup(machine->kernel_cmdline); bus = sysbus_get_default(); QTAILQ_FOREACH(kid, &bus->children, sibling) { DeviceState *dev = kid->child; @@ -413,12 +409,9 @@ static void microvm_fix_kernel_cmdline(MachineState *machine) } } - len = strlen(cmdline); - if (len > VIRTIO_CMDLINE_TOTAL_MAX_LEN + strlen(existing_cmdline)) { - fprintf(stderr, "qemu: virtio mmio cmdline too large, skipping\n"); - } else { - memcpy(existing_cmdline, cmdline, len + 1); - } + fw_cfg_modify_i32(x86ms->fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(cmdline) + 1); + fw_cfg_modify_string(x86ms->fw_cfg, FW_CFG_CMDLINE_DATA, cmdline); + g_free(cmdline); } diff --git a/hw/i386/pc.c b/hw/i386/pc.c index a7a2ededf9..bb62c994fa 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -28,13 +28,13 @@ #include "hw/i386/pc.h" #include "hw/char/serial.h" #include "hw/char/parallel.h" -#include "hw/i386/apic.h" #include "hw/i386/topology.h" #include "hw/i386/fw_cfg.h" #include "hw/i386/vmport.h" #include "sysemu/cpus.h" #include "hw/block/fdc.h" -#include "hw/ide.h" +#include "hw/ide/internal.h" +#include "hw/ide/isa.h" #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" #include "hw/pci-bridge/pci_expander_bridge.h" @@ -47,6 +47,7 @@ #include "multiboot.h" #include "hw/rtc/mc146818rtc.h" #include "hw/intc/i8259.h" +#include "hw/intc/ioapic.h" #include "hw/timer/i8254.h" #include "hw/input/i8042.h" #include "hw/irq.h" @@ -89,6 +90,10 @@ #include "hw/virtio/virtio-iommu.h" #include "hw/virtio/virtio-pmem-pci.h" #include "hw/virtio/virtio-mem-pci.h" +#include "hw/i386/kvm/xen_overlay.h" +#include "hw/i386/kvm/xen_evtchn.h" +#include "hw/i386/kvm/xen_gnttab.h" +#include "hw/i386/kvm/xen_xenstore.h" #include "hw/mem/memory-device.h" #include "sysemu/replay.h" #include "target/i386/cpu.h" @@ -97,6 +102,11 @@ #include "trace.h" #include CONFIG_DEVICES +#ifdef CONFIG_XEN_EMU +#include "hw/xen/xen-legacy-backend.h" +#include "hw/xen/xen-bus.h" +#endif + /* * Helper for setting model-id for CPU models that changed model-id * depending on QEMU versions up to QEMU 2.4. @@ -106,6 +116,11 @@ { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, }, +GlobalProperty pc_compat_8_0[] = { + { "virtio-mem", "unplugged-inaccessible", "auto" }, +}; +const size_t pc_compat_8_0_len = G_N_ELEMENTS(pc_compat_8_0); + GlobalProperty pc_compat_7_2[] = { { "ICH9-LPC", "noreboot", "true" }, }; @@ -405,7 +420,7 @@ GSIState *pc_gsi_create(qemu_irq **irqs, bool pci_enabled) if (kvm_ioapic_in_kernel()) { kvm_pc_setup_irq_routing(pci_enabled); } - *irqs = qemu_allocate_irqs(gsi_handler, s, GSI_NUM_PINS); + *irqs = qemu_allocate_irqs(gsi_handler, s, IOAPIC_NUM_PINS); return s; } @@ -438,19 +453,19 @@ static uint64_t ioportF0_read(void *opaque, hwaddr addr, unsigned size) #define REG_EQUIPMENT_BYTE 0x14 -static void cmos_init_hd(ISADevice *s, int type_ofs, int info_ofs, +static void cmos_init_hd(MC146818RtcState *s, int type_ofs, int info_ofs, int16_t cylinders, int8_t heads, int8_t sectors) { - rtc_set_memory(s, type_ofs, 47); - rtc_set_memory(s, info_ofs, cylinders); - rtc_set_memory(s, info_ofs + 1, cylinders >> 8); - rtc_set_memory(s, info_ofs + 2, heads); - rtc_set_memory(s, info_ofs + 3, 0xff); - rtc_set_memory(s, info_ofs + 4, 0xff); - rtc_set_memory(s, info_ofs + 5, 0xc0 | ((heads > 8) << 3)); - rtc_set_memory(s, info_ofs + 6, cylinders); - rtc_set_memory(s, info_ofs + 7, cylinders >> 8); - rtc_set_memory(s, info_ofs + 8, sectors); + mc146818rtc_set_cmos_data(s, type_ofs, 47); + mc146818rtc_set_cmos_data(s, info_ofs, cylinders); + mc146818rtc_set_cmos_data(s, info_ofs + 1, cylinders >> 8); + mc146818rtc_set_cmos_data(s, info_ofs + 2, heads); + mc146818rtc_set_cmos_data(s, info_ofs + 3, 0xff); + mc146818rtc_set_cmos_data(s, info_ofs + 4, 0xff); + mc146818rtc_set_cmos_data(s, info_ofs + 5, 0xc0 | ((heads > 8) << 3)); + mc146818rtc_set_cmos_data(s, info_ofs + 6, cylinders); + mc146818rtc_set_cmos_data(s, info_ofs + 7, cylinders >> 8); + mc146818rtc_set_cmos_data(s, info_ofs + 8, sectors); } /* convert boot_device letter to something recognizable by the bios */ @@ -470,7 +485,8 @@ static int boot_device2nibble(char boot_device) return 0; } -static void set_boot_dev(ISADevice *s, const char *boot_device, Error **errp) +static void set_boot_dev(MC146818RtcState *s, const char *boot_device, + Error **errp) { #define PC_MAX_BOOT_DEVICES 3 int nbds, bds[3] = { 0, }; @@ -489,8 +505,8 @@ static void set_boot_dev(ISADevice *s, const char *boot_device, Error **errp) return; } } - rtc_set_memory(s, 0x3d, (bds[1] << 4) | bds[0]); - rtc_set_memory(s, 0x38, (bds[2] << 4) | (fd_bootchk ? 0x0 : 0x1)); + mc146818rtc_set_cmos_data(s, 0x3d, (bds[1] << 4) | bds[0]); + mc146818rtc_set_cmos_data(s, 0x38, (bds[2] << 4) | (fd_bootchk ? 0x0 : 0x1)); } static void pc_boot_set(void *opaque, const char *boot_device, Error **errp) @@ -498,7 +514,7 @@ static void pc_boot_set(void *opaque, const char *boot_device, Error **errp) set_boot_dev(opaque, boot_device, errp); } -static void pc_cmos_init_floppy(ISADevice *rtc_state, ISADevice *floppy) +static void pc_cmos_init_floppy(MC146818RtcState *rtc_state, ISADevice *floppy) { int val, nb, i; FloppyDriveType fd_type[2] = { FLOPPY_DRIVE_TYPE_NONE, @@ -512,9 +528,9 @@ static void pc_cmos_init_floppy(ISADevice *rtc_state, ISADevice *floppy) } val = (cmos_get_fd_drive_type(fd_type[0]) << 4) | cmos_get_fd_drive_type(fd_type[1]); - rtc_set_memory(rtc_state, 0x10, val); + mc146818rtc_set_cmos_data(rtc_state, 0x10, val); - val = rtc_get_memory(rtc_state, REG_EQUIPMENT_BYTE); + val = mc146818rtc_get_cmos_data(rtc_state, REG_EQUIPMENT_BYTE); nb = 0; if (fd_type[0] != FLOPPY_DRIVE_TYPE_NONE) { nb++; @@ -532,11 +548,11 @@ static void pc_cmos_init_floppy(ISADevice *rtc_state, ISADevice *floppy) val |= 0x41; /* 2 drives, ready for boot */ break; } - rtc_set_memory(rtc_state, REG_EQUIPMENT_BYTE, val); + mc146818rtc_set_cmos_data(rtc_state, REG_EQUIPMENT_BYTE, val); } typedef struct pc_cmos_init_late_arg { - ISADevice *rtc_state; + MC146818RtcState *rtc_state; BusState *idebus[2]; } pc_cmos_init_late_arg; @@ -603,7 +619,7 @@ static ISADevice *pc_find_fdc0(void) static void pc_cmos_init_late(void *opaque) { pc_cmos_init_late_arg *arg = opaque; - ISADevice *s = arg->rtc_state; + MC146818RtcState *s = arg->rtc_state; int16_t cylinders; int8_t heads, sectors; int val; @@ -620,7 +636,7 @@ static void pc_cmos_init_late(void *opaque) cmos_init_hd(s, 0x1a, 0x24, cylinders, heads, sectors); val |= 0x0f; } - rtc_set_memory(s, 0x12, val); + mc146818rtc_set_cmos_data(s, 0x12, val); val = 0; for (i = 0; i < 4; i++) { @@ -636,7 +652,7 @@ static void pc_cmos_init_late(void *opaque) val |= trans << (i * 2); } } - rtc_set_memory(s, 0x39, val); + mc146818rtc_set_cmos_data(s, 0x39, val); pc_cmos_init_floppy(s, pc_find_fdc0()); @@ -645,19 +661,20 @@ static void pc_cmos_init_late(void *opaque) void pc_cmos_init(PCMachineState *pcms, BusState *idebus0, BusState *idebus1, - ISADevice *s) + ISADevice *rtc) { int val; static pc_cmos_init_late_arg arg; X86MachineState *x86ms = X86_MACHINE(pcms); + MC146818RtcState *s = MC146818_RTC(rtc); /* various important CMOS locations needed by PC/Bochs bios */ /* memory size */ /* base memory (first MiB) */ val = MIN(x86ms->below_4g_mem_size / KiB, 640); - rtc_set_memory(s, 0x15, val); - rtc_set_memory(s, 0x16, val >> 8); + mc146818rtc_set_cmos_data(s, 0x15, val); + mc146818rtc_set_cmos_data(s, 0x16, val >> 8); /* extended memory (next 64MiB) */ if (x86ms->below_4g_mem_size > 1 * MiB) { val = (x86ms->below_4g_mem_size - 1 * MiB) / KiB; @@ -666,10 +683,10 @@ void pc_cmos_init(PCMachineState *pcms, } if (val > 65535) val = 65535; - rtc_set_memory(s, 0x17, val); - rtc_set_memory(s, 0x18, val >> 8); - rtc_set_memory(s, 0x30, val); - rtc_set_memory(s, 0x31, val >> 8); + mc146818rtc_set_cmos_data(s, 0x17, val); + mc146818rtc_set_cmos_data(s, 0x18, val >> 8); + mc146818rtc_set_cmos_data(s, 0x30, val); + mc146818rtc_set_cmos_data(s, 0x31, val >> 8); /* memory between 16MiB and 4GiB */ if (x86ms->below_4g_mem_size > 16 * MiB) { val = (x86ms->below_4g_mem_size - 16 * MiB) / (64 * KiB); @@ -678,13 +695,13 @@ void pc_cmos_init(PCMachineState *pcms, } if (val > 65535) val = 65535; - rtc_set_memory(s, 0x34, val); - rtc_set_memory(s, 0x35, val >> 8); + mc146818rtc_set_cmos_data(s, 0x34, val); + mc146818rtc_set_cmos_data(s, 0x35, val >> 8); /* memory above 4GiB */ val = x86ms->above_4g_mem_size / 65536; - rtc_set_memory(s, 0x5b, val); - rtc_set_memory(s, 0x5c, val >> 8); - rtc_set_memory(s, 0x5d, val >> 16); + mc146818rtc_set_cmos_data(s, 0x5b, val); + mc146818rtc_set_cmos_data(s, 0x5c, val >> 8); + mc146818rtc_set_cmos_data(s, 0x5d, val >> 16); object_property_add_link(OBJECT(pcms), "rtc_state", TYPE_ISA_DEVICE, @@ -699,7 +716,7 @@ void pc_cmos_init(PCMachineState *pcms, val = 0; val |= 0x02; /* FPU is there */ val |= 0x04; /* PS/2 mouse installed */ - rtc_set_memory(s, REG_EQUIPMENT_BYTE, val); + mc146818rtc_set_cmos_data(s, REG_EQUIPMENT_BYTE, val); /* hard drives and FDC */ arg.rtc_state = s; @@ -803,7 +820,7 @@ void xen_load_linux(PCMachineState *pcms) rom_set_fw(fw_cfg); x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, - pcmc->pvh_enabled, pcmc->legacy_no_rng_seed); + pcmc->pvh_enabled); for (i = 0; i < nb_option_roms; i++) { assert(!strcmp(option_rom[i].name, "linuxboot.bin") || !strcmp(option_rom[i].name, "linuxboot_dma.bin") || @@ -935,7 +952,6 @@ static hwaddr pc_max_used_gpa(PCMachineState *pcms, uint64_t pci_hole64_size) void pc_memory_init(PCMachineState *pcms, MemoryRegion *system_memory, MemoryRegion *rom_memory, - MemoryRegion **ram_memory, uint64_t pci_hole64_size) { int linux_boot, i; @@ -993,7 +1009,6 @@ void pc_memory_init(PCMachineState *pcms, * Split single memory region and use aliases to address portions of it, * done for backwards compatibility with older qemus. */ - *ram_memory = machine->ram; ram_below_4g = g_malloc(sizeof(*ram_below_4g)); memory_region_init_alias(ram_below_4g, NULL, "ram-below-4g", machine->ram, 0, x86ms->below_4g_mem_size); @@ -1123,7 +1138,7 @@ void pc_memory_init(PCMachineState *pcms, if (linux_boot) { x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, - pcmc->pvh_enabled, pcmc->legacy_no_rng_seed); + pcmc->pvh_enabled); } for (i = 0; i < nb_option_roms; i++) { @@ -1250,7 +1265,7 @@ static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, void pc_basic_device_init(struct PCMachineState *pcms, ISABus *isa_bus, qemu_irq *gsi, - ISADevice **rtc_state, + ISADevice *rtc_state, bool create_fdctrl, uint32_t hpet_irqs) { @@ -1296,16 +1311,37 @@ void pc_basic_device_init(struct PCMachineState *pcms, sysbus_realize_and_unref(SYS_BUS_DEVICE(hpet), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(hpet), 0, HPET_BASE); - for (i = 0; i < GSI_NUM_PINS; i++) { + for (i = 0; i < IOAPIC_NUM_PINS; i++) { sysbus_connect_irq(SYS_BUS_DEVICE(hpet), i, gsi[i]); } pit_isa_irq = -1; pit_alt_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_PIT_INT); rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT); } - *rtc_state = mc146818_rtc_init(isa_bus, 2000, rtc_irq); - qemu_register_boot_set(pc_boot_set, *rtc_state); + if (rtc_irq) { + qdev_connect_gpio_out(DEVICE(rtc_state), 0, rtc_irq); + } else { + uint32_t irq = object_property_get_uint(OBJECT(rtc_state), + "irq", + &error_fatal); + isa_connect_gpio_out(rtc_state, 0, irq); + } + object_property_add_alias(OBJECT(pcms), "rtc-time", OBJECT(rtc_state), + "date"); + +#ifdef CONFIG_XEN_EMU + if (xen_mode == XEN_EMULATE) { + xen_evtchn_connect_gsis(gsi); + if (pcms->bus) { + pci_create_simple(pcms->bus, -1, "xen-platform"); + } + xen_bus_init(); + xen_be_init(); + } +#endif + + qemu_register_boot_set(pc_boot_set, rtc_state); if (!xen_enabled() && (x86ms->pit == ON_OFF_AUTO_AUTO || x86ms->pit == ON_OFF_AUTO_ON)) { @@ -1328,12 +1364,13 @@ void pc_basic_device_init(struct PCMachineState *pcms, void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus) { + MachineClass *mc = MACHINE_CLASS(pcmc); int i; rom_set_order_override(FW_CFG_ORDER_OVERRIDE_NIC); for (i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; - const char *model = nd->model ? nd->model : pcmc->default_nic_model; + const char *model = nd->model ? nd->model : mc->default_nic; if (g_str_equal(model, "ne2k_isa")) { pc_init_ne2k_isa(isa_bus, nd); @@ -1843,6 +1880,19 @@ static void pc_machine_initfn(Object *obj) cxl_machine_init(obj, &pcms->cxl_devices_state); } +int pc_machine_kvm_type(MachineState *machine, const char *kvm_type) +{ +#ifdef CONFIG_XEN_EMU + if (xen_mode == XEN_EMULATE) { + xen_overlay_create(); + xen_evtchn_create(); + xen_gnttab_create(); + xen_xenstore_create(); + } +#endif + return 0; +} + static void pc_machine_reset(MachineState *machine, ShutdownCause reason) { CPUState *cs; @@ -1907,6 +1957,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) pcmc->acpi_data_size = 0x20000 + 0x8000; pcmc->pvh_enabled = true; pcmc->kvmclock_create_always = true; + pcmc->resizable_acpi_blob = true; assert(!mc->get_hotplug_handler); mc->get_hotplug_handler = pc_get_hotplug_handler; mc->hotplug_allowed = pc_hotplug_allowed; diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index df64dd8dcc..d5b0dcd1fe 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -26,12 +26,14 @@ #include CONFIG_DEVICES #include "qemu/units.h" +#include "hw/char/parallel.h" #include "hw/dma/i8257.h" #include "hw/loader.h" #include "hw/i386/x86.h" #include "hw/i386/pc.h" #include "hw/i386/apic.h" #include "hw/pci-host/i440fx.h" +#include "hw/rtc/mc146818rtc.h" #include "hw/southbridge/piix.h" #include "hw/display/ramfb.h" #include "hw/firmware/smbios.h" @@ -39,6 +41,7 @@ #include "hw/pci/pci_ids.h" #include "hw/usb.h" #include "net/net.h" +#include "hw/ide/isa.h" #include "hw/ide/pci.h" #include "hw/ide/piix.h" #include "hw/irq.h" @@ -46,8 +49,6 @@ #include "hw/kvm/clock.h" #include "hw/sysbus.h" #include "hw/i2c/smbus_eeprom.h" -#include "hw/xen/xen-x86.h" -#include "hw/xen/xen.h" #include "exec/memory.h" #include "hw/acpi/acpi.h" #include "hw/acpi/piix4.h" @@ -59,6 +60,8 @@ #include #include "hw/xen/xen_pt.h" #endif +#include "hw/xen/xen-x86.h" +#include "hw/xen/xen.h" #include "migration/global_state.h" #include "migration/misc.h" #include "sysemu/numa.h" @@ -143,6 +146,7 @@ static void pc_init1(MachineState *machine, if (xen_enabled()) { xen_hvm_init_pc(pcms, &ram_memory); } else { + ram_memory = machine->ram; if (!pcms->max_ram_below_4g) { pcms->max_ram_below_4g = 0xe0000000; /* default: 3.5G */ } @@ -197,7 +201,7 @@ static void pc_init1(MachineState *machine, if (pcmc->smbios_defaults) { MachineClass *mc = MACHINE_GET_CLASS(machine); /* These values are guest ABI, do not change */ - smbios_set_defaults("QEMU", "Standard PC (i440FX + PIIX, 1996)", + smbios_set_defaults("QEMU", mc->desc, mc->name, pcmc->smbios_legacy_mode, pcmc->smbios_uuid_encoded, pcms->smbios_entry_point_type); @@ -205,8 +209,7 @@ static void pc_init1(MachineState *machine, /* allocate ram and load rom/bios */ if (!xen_enabled()) { - pc_memory_init(pcms, system_memory, - rom_memory, &ram_memory, hole64_size); + pc_memory_init(pcms, system_memory, rom_memory, hole64_size); } else { pc_system_flash_cleanup_unused(pcms); if (machine->kernel_filename != NULL) { @@ -239,14 +242,21 @@ static void pc_init1(MachineState *machine, piix3->pic = x86ms->gsi; piix3_devfn = piix3->dev.devfn; isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix3), "isa.0")); + rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev), + "rtc")); } else { pci_bus = NULL; - isa_bus = isa_bus_new(NULL, get_system_memory(), system_io, + isa_bus = isa_bus_new(NULL, system_memory, system_io, &error_abort); + + rtc_state = isa_new(TYPE_MC146818_RTC); + qdev_prop_set_int32(DEVICE(rtc_state), "base_year", 2000); + isa_realize_and_unref(rtc_state, isa_bus, &error_fatal); + i8257_dma_init(isa_bus, 0); pcms->hpet_enabled = false; } - isa_bus_irqs(isa_bus, x86ms->gsi); + isa_bus_register_input_irqs(isa_bus, x86ms->gsi); if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) { pc_i8259_create(isa_bus, gsi_state->i8259_irq); @@ -268,7 +278,7 @@ static void pc_init1(MachineState *machine, } /* init basic PC hardware */ - pc_basic_device_init(pcms, isa_bus, x86ms->gsi, &rtc_state, true, + pc_basic_device_init(pcms, isa_bus, x86ms->gsi, rtc_state, true, 0x4); pc_nic_init(pcmc, isa_bus, pci_bus); @@ -421,6 +431,7 @@ static void pc_xen_hvm_init(MachineState *machine) } pc_xen_hvm_init_pci(machine); + xen_igd_reserve_slot(pcms->bus); pci_create_simple(pcms->bus, -1, "xen-platform"); } #endif @@ -440,7 +451,6 @@ static void pc_xen_hvm_init(MachineState *machine) static void pc_i440fx_machine_options(MachineClass *m) { PCMachineClass *pcmc = PC_MACHINE_CLASS(m); - pcmc->default_nic_model = "e1000"; pcmc->pci_root_uid = 0; pcmc->default_cpu_version = 1; @@ -448,25 +458,37 @@ static void pc_i440fx_machine_options(MachineClass *m) m->desc = "Standard PC (i440FX + PIIX, 1996)"; m->default_machine_opts = "firmware=bios-256k.bin"; m->default_display = "std"; + m->default_nic = "e1000"; + m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); } -static void pc_i440fx_8_0_machine_options(MachineClass *m) +static void pc_i440fx_8_1_machine_options(MachineClass *m) { pc_i440fx_machine_options(m); m->alias = "pc"; m->is_default = true; } +DEFINE_I440FX_MACHINE(v8_1, "pc-i440fx-8.1", NULL, + pc_i440fx_8_1_machine_options); + +static void pc_i440fx_8_0_machine_options(MachineClass *m) +{ + pc_i440fx_8_1_machine_options(m); + m->alias = NULL; + m->is_default = false; + compat_props_add(m->compat_props, hw_compat_8_0, hw_compat_8_0_len); + compat_props_add(m->compat_props, pc_compat_8_0, pc_compat_8_0_len); +} + DEFINE_I440FX_MACHINE(v8_0, "pc-i440fx-8.0", NULL, pc_i440fx_8_0_machine_options); static void pc_i440fx_7_2_machine_options(MachineClass *m) { pc_i440fx_8_0_machine_options(m); - m->alias = NULL; - m->is_default = false; compat_props_add(m->compat_props, hw_compat_7_2, hw_compat_7_2_len); compat_props_add(m->compat_props, pc_compat_7_2, pc_compat_7_2_len); } @@ -476,9 +498,7 @@ DEFINE_I440FX_MACHINE(v7_2, "pc-i440fx-7.2", NULL, static void pc_i440fx_7_1_machine_options(MachineClass *m) { - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_7_2_machine_options(m); - pcmc->legacy_no_rng_seed = true; compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len); compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len); } @@ -746,6 +766,7 @@ static void pc_i440fx_2_2_machine_options(MachineClass *m) compat_props_add(m->compat_props, hw_compat_2_2, hw_compat_2_2_len); compat_props_add(m->compat_props, pc_compat_2_2, pc_compat_2_2_len); pcmc->rsdp_in_ram = false; + pcmc->resizable_acpi_blob = false; } DEFINE_I440FX_MACHINE(v2_2, "pc-i440fx-2.2", pc_compat_2_2_fn, @@ -865,8 +886,9 @@ static void isapc_machine_options(MachineClass *m) pcmc->gigabyte_align = false; pcmc->smbios_legacy_mode = true; pcmc->has_reserved_memory = false; - pcmc->default_nic_model = "ne2k_isa"; + m->default_nic = "ne2k_isa"; m->default_cpu_type = X86_CPU_TYPE_NAME("486"); + m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); } DEFINE_PC_MACHINE(isapc, "isapc", pc_init_isa, diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 66cd718b70..6155427e48 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -30,6 +30,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "hw/char/parallel.h" #include "hw/loader.h" #include "hw/i2c/smbus_eeprom.h" #include "hw/rtc/mc146818rtc.h" @@ -40,13 +41,14 @@ #include "hw/qdev-properties.h" #include "hw/i386/x86.h" #include "hw/i386/pc.h" -#include "hw/i386/ich9.h" #include "hw/i386/amd_iommu.h" #include "hw/i386/intel_iommu.h" #include "hw/display/ramfb.h" #include "hw/firmware/smbios.h" #include "hw/ide/pci.h" #include "hw/ide/ahci.h" +#include "hw/intc/ioapic.h" +#include "hw/southbridge/ich9.h" #include "hw/usb.h" #include "hw/usb/hcd-uhci.h" #include "qapi/error.h" @@ -125,14 +127,13 @@ static void pc_q35_init(MachineState *machine) DeviceState *lpc_dev; BusState *idebus[MAX_SATA_PORTS]; ISADevice *rtc_state; + MemoryRegion *system_memory = get_system_memory(); MemoryRegion *system_io = get_system_io(); MemoryRegion *pci_memory; MemoryRegion *rom_memory; - MemoryRegion *ram_memory; GSIState *gsi_state; ISABus *isa_bus; int i; - ICH9LPCState *ich9_lpc; PCIDevice *ahci; ram_addr_t lowmem; DriveInfo *hd[MAX_SATA_PORTS]; @@ -192,14 +193,14 @@ static void pc_q35_init(MachineState *machine) rom_memory = pci_memory; } else { pci_memory = NULL; - rom_memory = get_system_memory(); + rom_memory = system_memory; } pc_guest_info_init(pcms); if (pcmc->smbios_defaults) { /* These values are guest ABI, do not change */ - smbios_set_defaults("QEMU", "Standard PC (Q35 + ICH9, 2009)", + smbios_set_defaults("QEMU", mc->desc, mc->name, pcmc->smbios_legacy_mode, pcmc->smbios_uuid_encoded, pcms->smbios_entry_point_type); @@ -215,16 +216,15 @@ static void pc_q35_init(MachineState *machine) } /* allocate ram and load rom/bios */ - pc_memory_init(pcms, get_system_memory(), rom_memory, &ram_memory, - pci_hole64_size); + pc_memory_init(pcms, system_memory, rom_memory, pci_hole64_size); - object_property_add_child(qdev_get_machine(), "q35", OBJECT(q35_host)); + object_property_add_child(OBJECT(machine), "q35", OBJECT(q35_host)); object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_RAM_MEM, - OBJECT(ram_memory), NULL); + OBJECT(machine->ram), NULL); object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_PCI_MEM, OBJECT(pci_memory), NULL); object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_SYSTEM_MEM, - OBJECT(get_system_memory()), NULL); + OBJECT(system_memory), NULL); object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_IO_MEM, OBJECT(system_io), NULL); object_property_set_int(OBJECT(q35_host), PCI_HOST_BELOW_4G_MEM_SIZE, @@ -236,9 +236,13 @@ static void pc_q35_init(MachineState *machine) phb = PCI_HOST_BRIDGE(q35_host); host_bus = phb->bus; /* create ISA bus */ - lpc = pci_create_simple_multifunction(host_bus, PCI_DEVFN(ICH9_LPC_DEV, - ICH9_LPC_FUNC), true, - TYPE_ICH9_LPC_DEVICE); + lpc = pci_new_multifunction(PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC), true, + TYPE_ICH9_LPC_DEVICE); + qdev_prop_set_bit(DEVICE(lpc), "smm-enabled", + x86_machine_is_smm_enabled(x86ms)); + pci_realize_and_unref(lpc, host_bus, &error_fatal); + + rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(lpc), "rtc")); object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, TYPE_HOTPLUG_HANDLER, @@ -265,15 +269,11 @@ static void pc_q35_init(MachineState *machine) /* irq lines */ gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled); - ich9_lpc = ICH9_LPC_DEVICE(lpc); lpc_dev = DEVICE(lpc); - for (i = 0; i < GSI_NUM_PINS; i++) { + for (i = 0; i < IOAPIC_NUM_PINS; i++) { qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, x86ms->gsi[i]); } - pci_bus_irqs(host_bus, ich9_lpc_set_irq, ich9_lpc, ICH9_LPC_NB_PIRQS); - pci_bus_map_irqs(host_bus, ich9_lpc_map_irq); - pci_bus_set_route_irq_fn(host_bus, ich9_route_intx_pin_to_irq); - isa_bus = ich9_lpc->isa_bus; + isa_bus = ISA_BUS(qdev_get_child_bus(lpc_dev, "isa.0")); if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) { pc_i8259_create(isa_bus, gsi_state->i8259_irq); @@ -293,12 +293,9 @@ static void pc_q35_init(MachineState *machine) } /* init basic PC hardware */ - pc_basic_device_init(pcms, isa_bus, x86ms->gsi, &rtc_state, !mc->no_floppy, + pc_basic_device_init(pcms, isa_bus, x86ms->gsi, rtc_state, !mc->no_floppy, 0xff0104); - /* connect pm stuff to lpc */ - ich9_lpc_pm_init(lpc, x86_machine_is_smm_enabled(x86ms)); - if (pcms->sata_enabled) { /* ahci and SATA device, for q35 1 ahci controller is built-in */ ahci = pci_create_simple_multifunction(host_bus, @@ -320,10 +317,15 @@ static void pc_q35_init(MachineState *machine) } if (pcms->smbus_enabled) { + PCIDevice *smb; + /* TODO: Populate SPD eeprom data. */ - pcms->smbus = ich9_smb_init(host_bus, - PCI_DEVFN(ICH9_SMB_DEV, ICH9_SMB_FUNC), - 0xb100); + smb = pci_create_simple_multifunction(host_bus, + PCI_DEVFN(ICH9_SMB_DEV, + ICH9_SMB_FUNC), + true, TYPE_ICH9_SMB_DEVICE); + pcms->smbus = I2C_BUS(qdev_get_child_bus(DEVICE(smb), "i2c")); + smbus_eeprom_init(pcms->smbus, 8, NULL, 0); } @@ -355,7 +357,6 @@ static void pc_q35_init(MachineState *machine) static void pc_q35_machine_options(MachineClass *m) { PCMachineClass *pcmc = PC_MACHINE_CLASS(m); - pcmc->default_nic_model = "e1000e"; pcmc->pci_root_uid = 0; pcmc->default_cpu_version = 1; @@ -364,8 +365,10 @@ static void pc_q35_machine_options(MachineClass *m) m->units_per_default_bus = 1; m->default_machine_opts = "firmware=bios-256k.bin"; m->default_display = "std"; + m->default_nic = "e1000e"; m->default_kernel_irqchip_split = false; m->no_floppy = 1; + m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); machine_class_allow_dynamic_sysbus_dev(m, TYPE_AMD_IOMMU_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); @@ -373,19 +376,29 @@ static void pc_q35_machine_options(MachineClass *m) m->max_cpus = 288; } -static void pc_q35_8_0_machine_options(MachineClass *m) +static void pc_q35_8_1_machine_options(MachineClass *m) { pc_q35_machine_options(m); m->alias = "q35"; } +DEFINE_Q35_MACHINE(v8_1, "pc-q35-8.1", NULL, + pc_q35_8_1_machine_options); + +static void pc_q35_8_0_machine_options(MachineClass *m) +{ + pc_q35_8_1_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_8_0, hw_compat_8_0_len); + compat_props_add(m->compat_props, pc_compat_8_0, pc_compat_8_0_len); +} + DEFINE_Q35_MACHINE(v8_0, "pc-q35-8.0", NULL, pc_q35_8_0_machine_options); static void pc_q35_7_2_machine_options(MachineClass *m) { pc_q35_8_0_machine_options(m); - m->alias = NULL; compat_props_add(m->compat_props, hw_compat_7_2, hw_compat_7_2_len); compat_props_add(m->compat_props, pc_compat_7_2, pc_compat_7_2_len); } @@ -395,9 +408,7 @@ DEFINE_Q35_MACHINE(v7_2, "pc-q35-7.2", NULL, static void pc_q35_7_1_machine_options(MachineClass *m) { - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_q35_7_2_machine_options(m); - pcmc->legacy_no_rng_seed = true; compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len); compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len); } @@ -568,10 +579,8 @@ DEFINE_Q35_MACHINE(v2_12, "pc-q35-2.12", NULL, static void pc_q35_2_11_machine_options(MachineClass *m) { - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); - pc_q35_2_12_machine_options(m); - pcmc->default_nic_model = "e1000"; + m->default_nic = "e1000"; compat_props_add(m->compat_props, hw_compat_2_11, hw_compat_2_11_len); compat_props_add(m->compat_props, pc_compat_2_11, pc_compat_2_11_len); } diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c index db004d17a6..70305547d4 100644 --- a/hw/i386/sgx.c +++ b/hw/i386/sgx.c @@ -18,6 +18,7 @@ #include "monitor/monitor.h" #include "monitor/hmp-target.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "qapi/qapi-commands-misc-target.h" #include "exec/address-spaces.h" #include "sysemu/hw_accel.h" diff --git a/hw/i386/x86.c b/hw/i386/x86.c index 48be7a1c23..a88a126123 100644 --- a/hw/i386/x86.c +++ b/hw/i386/x86.c @@ -26,7 +26,6 @@ #include "qemu/cutils.h" #include "qemu/units.h" #include "qemu/datadir.h" -#include "qemu/guest-random.h" #include "qapi/error.h" #include "qapi/qapi-visit-common.h" #include "qapi/clone-visitor.h" @@ -36,7 +35,6 @@ #include "sysemu/whpx.h" #include "sysemu/numa.h" #include "sysemu/replay.h" -#include "sysemu/reset.h" #include "sysemu/sysemu.h" #include "sysemu/cpu-timers.h" #include "sysemu/xen.h" @@ -49,7 +47,6 @@ #include "hw/intc/i8259.h" #include "hw/rtc/mc146818rtc.h" #include "target/i386/sev.h" -#include "hw/i386/microvm.h" #include "hw/acpi/cpu_hotplug.h" #include "hw/irq.h" @@ -61,10 +58,15 @@ #include CONFIG_DEVICES #include "kvm/kvm_i386.h" +#ifdef CONFIG_XEN_EMU +#include "hw/xen/xen.h" +#include "hw/i386/kvm/xen_evtchn.h" +#endif + /* Physical Address of PVH entry point read from kernel ELF NOTE */ static size_t pvh_start_addr; -inline void init_topo_info(X86CPUTopoInfo *topo_info, +static void init_topo_info(X86CPUTopoInfo *topo_info, const X86MachineState *x86ms) { MachineState *ms = MACHINE(x86ms); @@ -150,17 +152,19 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version) } } -void x86_rtc_set_cpus_count(ISADevice *rtc, uint16_t cpus_count) +void x86_rtc_set_cpus_count(ISADevice *s, uint16_t cpus_count) { + MC146818RtcState *rtc = MC146818_RTC(s); + if (cpus_count > 0xff) { /* * If the number of CPUs can't be represented in 8 bits, the * BIOS must use "FW_CFG_NB_CPUS". Set RTC field to 0 just * to make old BIOSes fail more predictably. */ - rtc_set_memory(rtc, 0x5f, 0); + mc146818rtc_set_cmos_data(rtc, 0x5f, 0); } else { - rtc_set_memory(rtc, 0x5f, cpus_count - 1); + mc146818rtc_set_cmos_data(rtc, 0x5f, cpus_count - 1); } } @@ -608,6 +612,17 @@ void gsi_handler(void *opaque, int n, int level) } /* fall through */ case ISA_NUM_IRQS ... IOAPIC_NUM_PINS - 1: +#ifdef CONFIG_XEN_EMU + /* + * Xen delivers the GSI to the Legacy PIC (not that Legacy PIC + * routing actually works properly under Xen). And then to + * *either* the PIRQ handling or the I/OAPIC depending on + * whether the former wants it. + */ + if (xen_mode == XEN_EMULATE && xen_evtchn_set_gsi(n, level)) { + break; + } +#endif qemu_set_irq(s->ioapic_irq[n], level); break; case IO_APIC_SECONDARY_IRQBASE @@ -657,12 +672,12 @@ DeviceState *ioapic_init_secondary(GSIState *gsi_state) return dev; } -typedef struct SetupData { +struct setup_data { uint64_t next; uint32_t type; uint32_t len; uint8_t data[]; -} __attribute__((packed)) SetupData; +} __attribute__((packed)); /* @@ -769,35 +784,10 @@ static bool load_elfboot(const char *kernel_filename, return true; } -typedef struct SetupDataFixup { - void *pos; - hwaddr orig_val, new_val; - uint32_t addr; -} SetupDataFixup; - -static void fixup_setup_data(void *opaque) -{ - SetupDataFixup *fixup = opaque; - stq_p(fixup->pos, fixup->new_val); -} - -static void reset_setup_data(void *opaque) -{ - SetupDataFixup *fixup = opaque; - stq_p(fixup->pos, fixup->orig_val); -} - -static void reset_rng_seed(void *opaque) -{ - SetupData *setup_data = opaque; - qemu_guest_getrandom_nofail(setup_data->data, le32_to_cpu(setup_data->len)); -} - void x86_load_linux(X86MachineState *x86ms, FWCfgState *fw_cfg, int acpi_data_size, - bool pvh_enabled, - bool legacy_no_rng_seed) + bool pvh_enabled) { bool linuxboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled; uint16_t protocol; @@ -805,26 +795,19 @@ void x86_load_linux(X86MachineState *x86ms, int dtb_size, setup_data_offset; uint32_t initrd_max; uint8_t header[8192], *setup, *kernel; - hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0, first_setup_data = 0; + hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0; FILE *f; char *vmode; MachineState *machine = MACHINE(x86ms); - SetupData *setup_data; + struct setup_data *setup_data; const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; const char *dtb_filename = machine->dtb; - char *kernel_cmdline; + const char *kernel_cmdline = machine->kernel_cmdline; SevKernelLoaderContext sev_load_ctx = {}; - enum { RNG_SEED_LENGTH = 32 }; - /* - * Add the NUL terminator, some padding for the microvm cmdline fiddling - * hack, and then align to 16 bytes as a paranoia measure - */ - cmdline_size = (strlen(machine->kernel_cmdline) + 1 + - VIRTIO_CMDLINE_TOTAL_MAX_LEN + 16) & ~15; - /* Make a copy, since we might append arbitrary bytes to it later. */ - kernel_cmdline = g_strndup(machine->kernel_cmdline, cmdline_size); + /* Align to 16 bytes as a paranoia measure */ + cmdline_size = (strlen(kernel_cmdline) + 16) & ~15; /* load the kernel header */ f = fopen(kernel_filename, "rb"); @@ -965,6 +948,12 @@ void x86_load_linux(X86MachineState *x86ms, initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1; } + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); + sev_load_ctx.cmdline_data = (char *)kernel_cmdline; + sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1; + if (protocol >= 0x202) { stl_p(header + 0x228, cmdline_addr); } else { @@ -1091,45 +1080,20 @@ void x86_load_linux(X86MachineState *x86ms, exit(1); } - setup_data_offset = cmdline_size; - cmdline_size += sizeof(SetupData) + dtb_size; - kernel_cmdline = g_realloc(kernel_cmdline, cmdline_size); - setup_data = (void *)kernel_cmdline + setup_data_offset; - setup_data->next = cpu_to_le64(first_setup_data); - first_setup_data = cmdline_addr + setup_data_offset; + setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16); + kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size; + kernel = g_realloc(kernel, kernel_size); + + stq_p(header + 0x250, prot_addr + setup_data_offset); + + setup_data = (struct setup_data *)(kernel + setup_data_offset); + setup_data->next = 0; setup_data->type = cpu_to_le32(SETUP_DTB); setup_data->len = cpu_to_le32(dtb_size); + load_image_size(dtb_filename, setup_data->data, dtb_size); } - if (!legacy_no_rng_seed && protocol >= 0x209) { - setup_data_offset = cmdline_size; - cmdline_size += sizeof(SetupData) + RNG_SEED_LENGTH; - kernel_cmdline = g_realloc(kernel_cmdline, cmdline_size); - setup_data = (void *)kernel_cmdline + setup_data_offset; - setup_data->next = cpu_to_le64(first_setup_data); - first_setup_data = cmdline_addr + setup_data_offset; - setup_data->type = cpu_to_le32(SETUP_RNG_SEED); - setup_data->len = cpu_to_le32(RNG_SEED_LENGTH); - qemu_guest_getrandom_nofail(setup_data->data, RNG_SEED_LENGTH); - qemu_register_reset_nosnapshotload(reset_rng_seed, setup_data); - fw_cfg_add_bytes_callback(fw_cfg, FW_CFG_KERNEL_DATA, reset_rng_seed, NULL, - setup_data, kernel, kernel_size, true); - } else { - fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); - } - - fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, cmdline_size); - fw_cfg_add_bytes(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline, cmdline_size); - sev_load_ctx.cmdline_data = (char *)kernel_cmdline; - sev_load_ctx.cmdline_size = cmdline_size; - - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); - sev_load_ctx.kernel_data = (char *)kernel; - sev_load_ctx.kernel_size = kernel_size; - /* * If we're starting an encrypted VM, it will be OVMF based, which uses the * efi stub for booting and doesn't require any values to be placed in the @@ -1137,21 +1101,17 @@ void x86_load_linux(X86MachineState *x86ms, * kernel on the other side of the fw_cfg interface matches the hash of the * file the user passed in. */ - if (!sev_enabled() && first_setup_data) { - SetupDataFixup *fixup = g_malloc(sizeof(*fixup)); - + if (!sev_enabled()) { memcpy(setup, header, MIN(sizeof(header), setup_size)); - /* Offset 0x250 is a pointer to the first setup_data link. */ - fixup->pos = setup + 0x250; - fixup->orig_val = ldq_p(fixup->pos); - fixup->new_val = first_setup_data; - fixup->addr = cpu_to_le32(real_addr); - fw_cfg_add_bytes_callback(fw_cfg, FW_CFG_SETUP_ADDR, fixup_setup_data, NULL, - fixup, &fixup->addr, sizeof(fixup->addr), true); - qemu_register_reset(reset_setup_data, fixup); - } else { - fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); } + + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); + sev_load_ctx.kernel_data = (char *)kernel; + sev_load_ctx.kernel_size = kernel_size; + + fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size); sev_load_ctx.setup_data = (char *)setup; diff --git a/hw/i386/xen/meson.build b/hw/i386/xen/meson.build index be84130300..2e64a34e16 100644 --- a/hw/i386/xen/meson.build +++ b/hw/i386/xen/meson.build @@ -2,6 +2,9 @@ i386_ss.add(when: 'CONFIG_XEN', if_true: files( 'xen-hvm.c', 'xen-mapcache.c', 'xen_apic.c', - 'xen_platform.c', 'xen_pvdevice.c', )) + +i386_ss.add(when: 'CONFIG_XEN_BUS', if_true: files( + 'xen_platform.c', +)) diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index b9a6f7f538..56641a550e 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -18,7 +18,7 @@ #include "hw/irq.h" #include "hw/hw.h" #include "hw/i386/apic-msidef.h" -#include "hw/xen/xen_common.h" +#include "hw/xen/xen_native.h" #include "hw/xen/xen-legacy-backend.h" #include "hw/xen/xen-bus.h" #include "hw/xen/xen-x86.h" @@ -52,10 +52,11 @@ static bool xen_in_migration; /* Compatibility with older version */ -/* This allows QEMU to build on a system that has Xen 4.5 or earlier - * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h - * needs to be included before this block and hw/xen/xen_common.h needs to - * be included before xen/hvm/ioreq.h +/* + * This allows QEMU to build on a system that has Xen 4.5 or earlier installed. + * This is here (not in hw/xen/xen_native.h) because xen/hvm/ioreq.h needs to + * be included before this block and hw/xen/xen_native.h needs to be included + * before xen/hvm/ioreq.h */ #ifndef IOREQ_TYPE_VMWARE_PORT #define IOREQ_TYPE_VMWARE_PORT 3 @@ -761,7 +762,7 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state) int i; evtchn_port_t port; - port = xenevtchn_pending(state->xce_handle); + port = qemu_xen_evtchn_pending(state->xce_handle); if (port == state->bufioreq_local_port) { timer_mod(state->buffered_io_timer, BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); @@ -780,7 +781,7 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state) } /* unmask the wanted port again */ - xenevtchn_unmask(state->xce_handle, port); + qemu_xen_evtchn_unmask(state->xce_handle, port); /* get the io packet from shared memory */ state->send_vcpu = i; @@ -1147,7 +1148,7 @@ static void handle_buffered_io(void *opaque) BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); } else { timer_del(state->buffered_io_timer); - xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port); + qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port); } } @@ -1196,8 +1197,8 @@ static void cpu_handle_ioreq(void *opaque) } req->state = STATE_IORESP_READY; - xenevtchn_notify(state->xce_handle, - state->ioreq_local_port[state->send_vcpu]); + qemu_xen_evtchn_notify(state->xce_handle, + state->ioreq_local_port[state->send_vcpu]); } } @@ -1206,7 +1207,7 @@ static void xen_main_loop_prepare(XenIOState *state) int evtchn_fd = -1; if (state->xce_handle != NULL) { - evtchn_fd = xenevtchn_fd(state->xce_handle); + evtchn_fd = qemu_xen_evtchn_fd(state->xce_handle); } state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, @@ -1249,7 +1250,7 @@ static void xen_exit_notifier(Notifier *n, void *data) xenforeignmemory_unmap_resource(xen_fmem, state->fres); } - xenevtchn_close(state->xce_handle); + qemu_xen_evtchn_close(state->xce_handle); xs_daemon_close(state->xenstore); } @@ -1397,9 +1398,11 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) xen_pfn_t ioreq_pfn; XenIOState *state; + setup_xen_backend_ops(); + state = g_new0(XenIOState, 1); - state->xce_handle = xenevtchn_open(NULL, 0); + state->xce_handle = qemu_xen_evtchn_open(); if (state->xce_handle == NULL) { perror("xen: event channel open"); goto err; @@ -1463,8 +1466,9 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) /* FIXME: how about if we overflow the page here? */ for (i = 0; i < max_cpus; i++) { - rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, - xen_vcpu_eport(state->shared_page, i)); + rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, + xen_vcpu_eport(state->shared_page, + i)); if (rc == -1) { error_report("shared evtchn %d bind error %d", i, errno); goto err; @@ -1472,8 +1476,8 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) state->ioreq_local_port[i] = rc; } - rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, - state->bufioreq_remote_port); + rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, + state->bufioreq_remote_port); if (rc == -1) { error_report("buffered evtchn bind error %d", errno); goto err; @@ -1502,13 +1506,7 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) device_listener_register(&state->device_listener); xen_bus_init(); - - /* Initialize backend core & drivers */ - if (xen_be_init() != 0) { - error_report("xen backend core setup failed"); - goto err; - } - xen_be_register_common(); + xen_be_init(); QLIST_INIT(&xen_physmap); xen_read_physmap(state); diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c index 1d0879d234..f7d974677d 100644 --- a/hw/i386/xen/xen-mapcache.c +++ b/hw/i386/xen/xen-mapcache.c @@ -14,7 +14,7 @@ #include -#include "hw/xen/xen-legacy-backend.h" +#include "hw/xen/xen_native.h" #include "qemu/bitmap.h" #include "sysemu/runstate.h" diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c index 66e6de31a6..57f1d742c1 100644 --- a/hw/i386/xen/xen_platform.c +++ b/hw/i386/xen/xen_platform.c @@ -25,12 +25,10 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "hw/ide.h" #include "hw/ide/pci.h" #include "hw/pci/pci.h" -#include "hw/xen/xen_common.h" #include "migration/vmstate.h" -#include "hw/xen/xen-legacy-backend.h" +#include "net/net.h" #include "trace.h" #include "sysemu/xen.h" #include "sysemu/block-backend.h" @@ -38,6 +36,13 @@ #include "qemu/module.h" #include "qom/object.h" +#ifdef CONFIG_XEN +#include "hw/xen/xen_native.h" +#endif + +/* The rule is that xen_native.h must come first */ +#include "hw/xen/xen.h" + //#define DEBUG_PLATFORM #ifdef DEBUG_PLATFORM @@ -109,12 +114,25 @@ static void log_writeb(PCIXenPlatformState *s, char val) #define _UNPLUG_NVME_DISKS 3 #define UNPLUG_NVME_DISKS (1u << _UNPLUG_NVME_DISKS) +static bool pci_device_is_passthrough(PCIDevice *d) +{ + if (!strcmp(d->name, "xen-pci-passthrough")) { + return true; + } + + if (xen_mode == XEN_EMULATE && !strcmp(d->name, "vfio-pci")) { + return true; + } + + return false; +} + static void unplug_nic(PCIBus *b, PCIDevice *d, void *o) { /* We have to ignore passthrough devices */ if (pci_get_word(d->config + PCI_CLASS_DEVICE) == PCI_CLASS_NETWORK_ETHERNET - && strcmp(d->name, "xen-pci-passthrough") != 0) { + && !pci_device_is_passthrough(d)) { object_unparent(OBJECT(d)); } } @@ -187,9 +205,8 @@ static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque) !(flags & UNPLUG_IDE_SCSI_DISKS); /* We have to ignore passthrough devices */ - if (!strcmp(d->name, "xen-pci-passthrough")) { + if (pci_device_is_passthrough(d)) return; - } switch (pci_get_word(d->config + PCI_CLASS_DEVICE)) { case PCI_CLASS_STORAGE_IDE: @@ -268,18 +285,26 @@ static void platform_fixed_ioport_writeb(void *opaque, uint32_t addr, uint32_t v PCIXenPlatformState *s = opaque; switch (addr) { - case 0: /* Platform flags */ { - hvmmem_type_t mem_type = (val & PFFLAG_ROM_LOCK) ? - HVMMEM_ram_ro : HVMMEM_ram_rw; - if (xen_set_mem_type(xen_domid, mem_type, 0xc0, 0x40)) { - DPRINTF("unable to change ro/rw state of ROM memory area!\n"); - } else { + case 0: /* Platform flags */ + if (xen_mode == XEN_EMULATE) { + /* XX: Use i440gx/q35 PAM setup to do this? */ s->flags = val & PFFLAG_ROM_LOCK; - DPRINTF("changed ro/rw state of ROM memory area. now is %s state.\n", - (mem_type == HVMMEM_ram_ro ? "ro":"rw")); +#ifdef CONFIG_XEN + } else { + hvmmem_type_t mem_type = (val & PFFLAG_ROM_LOCK) ? + HVMMEM_ram_ro : HVMMEM_ram_rw; + + if (xen_set_mem_type(xen_domid, mem_type, 0xc0, 0x40)) { + DPRINTF("unable to change ro/rw state of ROM memory area!\n"); + } else { + s->flags = val & PFFLAG_ROM_LOCK; + DPRINTF("changed ro/rw state of ROM memory area. now is %s state.\n", + (mem_type == HVMMEM_ram_ro ? "ro" : "rw")); + } +#endif } break; - } + case 2: log_writeb(s, val); break; @@ -497,8 +522,8 @@ static void xen_platform_realize(PCIDevice *dev, Error **errp) uint8_t *pci_conf; /* Device will crash on reset if xen is not initialized */ - if (!xen_enabled()) { - error_setg(errp, "xen-platform device requires the Xen accelerator"); + if (xen_mode == XEN_DISABLED) { + error_setg(errp, "xen-platform device requires a Xen guest"); return; } diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index 7ce001cacd..4e76d6b191 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -22,6 +22,7 @@ */ #include "qemu/osdep.h" +#include "hw/irq.h" #include "hw/pci/msi.h" #include "hw/pci/pci.h" #include "hw/qdev-properties.h" @@ -1085,8 +1086,8 @@ static void execute_ncq_command(NCQTransferState *ncq_tfs) ncq_cb, ncq_tfs); break; case WRITE_FPDMA_QUEUED: - trace_execute_ncq_command_read(ad->hba, port, ncq_tfs->tag, - ncq_tfs->sector_count, ncq_tfs->lba); + trace_execute_ncq_command_write(ad->hba, port, ncq_tfs->tag, + ncq_tfs->sector_count, ncq_tfs->lba); dma_acct_start(ide_state->blk, &ncq_tfs->acct, &ncq_tfs->sglist, BLOCK_ACCT_WRITE); ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist, @@ -1268,7 +1269,7 @@ static void handle_reg_h2d_fis(AHCIState *s, int port, cmd->status = 0; /* We're ready to process the command in FIS byte 2. */ - ide_exec_cmd(&s->dev[port].port, cmd_fis[2]); + ide_bus_exec_cmd(&s->dev[port].port, cmd_fis[2]); } static int handle_cmd(AHCIState *s, int port, uint8_t slot) @@ -1508,7 +1509,8 @@ static void ahci_cmd_done(const IDEDMA *dma) ahci_write_fis_d2h(ad); if (ad->port_regs.cmd_issue && !ad->check_bh) { - ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); + ad->check_bh = qemu_bh_new_guarded(ahci_check_cmd_bh, ad, + &ad->mem_reentrancy_guard); qemu_bh_schedule(ad->check_bh); } } @@ -1553,13 +1555,13 @@ void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports) AHCIDevice *ad = &s->dev[i]; ide_bus_init(&ad->port, sizeof(ad->port), qdev, i, 1); - ide_init2(&ad->port, irqs[i]); + ide_bus_init_output_irq(&ad->port, irqs[i]); ad->hba = s; ad->port_no = i; ad->port.dma = &ad->dma; ad->port.dma->ops = &ahci_dma_ops; - ide_register_restart_cb(&ad->port); + ide_bus_register_restart_cb(&ad->port); } g_free(irqs); } @@ -1841,7 +1843,7 @@ void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd) if (hd[i] == NULL) { continue; } - ide_create_drive(&ahci->dev[i].port, 0, hd[i]); + ide_bus_create_drive(&ahci->dev[i].port, 0, hd[i]); } } diff --git a/hw/ide/ahci_internal.h b/hw/ide/ahci_internal.h index 303fcd7235..2480455372 100644 --- a/hw/ide/ahci_internal.h +++ b/hw/ide/ahci_internal.h @@ -321,6 +321,7 @@ struct AHCIDevice { bool init_d2h_sent; AHCICmdHdr *cur_cmd; NCQTransferState ncq_tfs[AHCI_MAX_CMDS]; + MemReentrancyGuard mem_reentrancy_guard; }; struct AHCIPCIState { diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c index 0a9aa6f009..dcc39df9a4 100644 --- a/hw/ide/atapi.c +++ b/hw/ide/atapi.c @@ -27,6 +27,7 @@ #include "hw/ide/internal.h" #include "hw/scsi/scsi.h" #include "sysemu/block-backend.h" +#include "scsi/constants.h" #include "trace.h" #define ATAPI_SECTOR_BITS (2 + BDRV_SECTOR_BITS) @@ -178,7 +179,7 @@ void ide_atapi_cmd_ok(IDEState *s) s->status = READY_STAT | SEEK_STAT; s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; ide_transfer_stop(s); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc) @@ -190,7 +191,7 @@ void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc) s->sense_key = sense_key; s->asc = asc; ide_transfer_stop(s); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } void ide_atapi_io_error(IDEState *s, int ret) @@ -253,7 +254,7 @@ void ide_atapi_cmd_reply_end(IDEState *s) } else { /* a new transfer is needed */ s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO; - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); byte_count_limit = atapi_byte_count_limit(s); trace_ide_atapi_cmd_reply_end_bcl(s, byte_count_limit); size = s->packet_transfer_size; @@ -293,7 +294,7 @@ void ide_atapi_cmd_reply_end(IDEState *s) /* end of transfer */ trace_ide_atapi_cmd_reply_end_eot(s, s->status); ide_atapi_cmd_ok(s); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } /* send a reply of 'size' bytes in s->io_buffer to an ATAPI command */ @@ -339,7 +340,7 @@ static void ide_atapi_cmd_check_status(IDEState *s) s->error = MC_ERR | (UNIT_ATTENTION << 4); s->status = ERR_STAT; s->nsector = 0; - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } /* ATAPI DMA support */ @@ -383,7 +384,7 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret) if (s->packet_transfer_size <= 0) { s->status = READY_STAT | SEEK_STAT; s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); goto eot; } diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c index 94c576262c..a68357c1c5 100644 --- a/hw/ide/cmd646.c +++ b/hw/ide/cmd646.c @@ -36,20 +36,20 @@ #include "trace.h" /* CMD646 specific */ -#define CFR 0x50 -#define CFR_INTR_CH0 0x04 -#define CNTRL 0x51 -#define CNTRL_EN_CH0 0x04 -#define CNTRL_EN_CH1 0x08 -#define ARTTIM23 0x57 -#define ARTTIM23_INTR_CH1 0x10 -#define MRDMODE 0x71 -#define MRDMODE_INTR_CH0 0x04 -#define MRDMODE_INTR_CH1 0x08 -#define MRDMODE_BLK_CH0 0x10 -#define MRDMODE_BLK_CH1 0x20 -#define UDIDETCR0 0x73 -#define UDIDETCR1 0x7B +#define CFR 0x50 +#define CFR_INTR_CH0 0x04 +#define CNTRL 0x51 +#define CNTRL_EN_CH0 0x04 +#define CNTRL_EN_CH1 0x08 +#define ARTTIM23 0x57 +#define ARTTIM23_INTR_CH1 0x10 +#define MRDMODE 0x71 +#define MRDMODE_INTR_CH0 0x04 +#define MRDMODE_INTR_CH1 0x08 +#define MRDMODE_BLK_CH0 0x10 +#define MRDMODE_BLK_CH1 0x20 +#define UDIDETCR0 0x73 +#define UDIDETCR1 0x7B static void cmd646_update_irq(PCIDevice *pd); @@ -294,11 +294,11 @@ static void pci_cmd646_ide_realize(PCIDevice *dev, Error **errp) qdev_init_gpio_in(ds, cmd646_set_irq, 2); for (i = 0; i < 2; i++) { ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); - ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i)); + ide_bus_init_output_irq(&d->bus[i], qdev_get_gpio_in(ds, i)); bmdma_init(&d->bus[i], &d->bmdma[i], d); d->bmdma[i].bus = &d->bus[i]; - ide_register_restart_cb(&d->bus[i]); + ide_bus_register_restart_cb(&d->bus[i]); } } diff --git a/hw/ide/core.c b/hw/ide/core.c index 5d1039378f..de48ff9f86 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -24,6 +24,7 @@ */ #include "qemu/osdep.h" +#include "hw/irq.h" #include "hw/isa/isa.h" #include "migration/vmstate.h" #include "qemu/error-report.h" @@ -317,52 +318,52 @@ static void ide_cfata_identify(IDEState *s) cur_sec = s->cylinders * s->heads * s->sectors; - put_le16(p + 0, 0x848a); /* CF Storage Card signature */ - put_le16(p + 1, s->cylinders); /* Default cylinders */ - put_le16(p + 3, s->heads); /* Default heads */ - put_le16(p + 6, s->sectors); /* Default sectors per track */ + put_le16(p + 0, 0x848a); /* CF Storage Card signature */ + put_le16(p + 1, s->cylinders); /* Default cylinders */ + put_le16(p + 3, s->heads); /* Default heads */ + put_le16(p + 6, s->sectors); /* Default sectors per track */ /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */ /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */ padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ - put_le16(p + 22, 0x0004); /* ECC bytes */ - padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */ + put_le16(p + 22, 0x0004); /* ECC bytes */ + padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */ padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */ #if MAX_MULT_SECTORS > 1 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS); #else put_le16(p + 47, 0x0000); #endif - put_le16(p + 49, 0x0f00); /* Capabilities */ - put_le16(p + 51, 0x0002); /* PIO cycle timing mode */ - put_le16(p + 52, 0x0001); /* DMA cycle timing mode */ - put_le16(p + 53, 0x0003); /* Translation params valid */ - put_le16(p + 54, s->cylinders); /* Current cylinders */ - put_le16(p + 55, s->heads); /* Current heads */ - put_le16(p + 56, s->sectors); /* Current sectors */ - put_le16(p + 57, cur_sec); /* Current capacity */ - put_le16(p + 58, cur_sec >> 16); /* Current capacity */ - if (s->mult_sectors) /* Multiple sector setting */ + put_le16(p + 49, 0x0f00); /* Capabilities */ + put_le16(p + 51, 0x0002); /* PIO cycle timing mode */ + put_le16(p + 52, 0x0001); /* DMA cycle timing mode */ + put_le16(p + 53, 0x0003); /* Translation params valid */ + put_le16(p + 54, s->cylinders); /* Current cylinders */ + put_le16(p + 55, s->heads); /* Current heads */ + put_le16(p + 56, s->sectors); /* Current sectors */ + put_le16(p + 57, cur_sec); /* Current capacity */ + put_le16(p + 58, cur_sec >> 16); /* Current capacity */ + if (s->mult_sectors) /* Multiple sector setting */ put_le16(p + 59, 0x100 | s->mult_sectors); /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */ /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */ - put_le16(p + 63, 0x0203); /* Multiword DMA capability */ - put_le16(p + 64, 0x0001); /* Flow Control PIO support */ - put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */ - put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */ - put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */ - put_le16(p + 82, 0x400c); /* Command Set supported */ - put_le16(p + 83, 0x7068); /* Command Set supported */ - put_le16(p + 84, 0x4000); /* Features supported */ - put_le16(p + 85, 0x000c); /* Command Set enabled */ - put_le16(p + 86, 0x7044); /* Command Set enabled */ - put_le16(p + 87, 0x4000); /* Features enabled */ - put_le16(p + 91, 0x4060); /* Current APM level */ - put_le16(p + 129, 0x0002); /* Current features option */ - put_le16(p + 130, 0x0005); /* Reassigned sectors */ - put_le16(p + 131, 0x0001); /* Initial power mode */ - put_le16(p + 132, 0x0000); /* User signature */ - put_le16(p + 160, 0x8100); /* Power requirement */ - put_le16(p + 161, 0x8001); /* CF command set */ + put_le16(p + 63, 0x0203); /* Multiword DMA capability */ + put_le16(p + 64, 0x0001); /* Flow Control PIO support */ + put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */ + put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */ + put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */ + put_le16(p + 82, 0x400c); /* Command Set supported */ + put_le16(p + 83, 0x7068); /* Command Set supported */ + put_le16(p + 84, 0x4000); /* Features supported */ + put_le16(p + 85, 0x000c); /* Command Set enabled */ + put_le16(p + 86, 0x7044); /* Command Set enabled */ + put_le16(p + 87, 0x4000); /* Features enabled */ + put_le16(p + 91, 0x4060); /* Current APM level */ + put_le16(p + 129, 0x0002); /* Current features option */ + put_le16(p + 130, 0x0005); /* Reassigned sectors */ + put_le16(p + 131, 0x0001); /* Initial power mode */ + put_le16(p + 132, 0x0000); /* User signature */ + put_le16(p + 160, 0x8100); /* Power requirement */ + put_le16(p + 161, 0x8001); /* CF command set */ ide_cfata_identify_size(s); s->identify_set = 1; @@ -512,6 +513,7 @@ BlockAIOCB *ide_issue_trim( BlockCompletionFunc *cb, void *cb_opaque, void *opaque) { IDEState *s = opaque; + IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master; TrimAIOCB *iocb; /* Paired with a decrement in ide_trim_bh_cb() */ @@ -519,7 +521,8 @@ BlockAIOCB *ide_issue_trim( iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque); iocb->s = s; - iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb); + iocb->bh = qemu_bh_new_guarded(ide_trim_bh_cb, iocb, + &DEVICE(dev)->mem_reentrancy_guard); iocb->ret = 0; iocb->qiov = qiov; iocb->i = -1; @@ -653,7 +656,7 @@ void ide_set_sector(IDEState *s, int64_t sector_num) static void ide_rw_error(IDEState *s) { ide_abort_command(s); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } static void ide_buffered_readv_cb(void *opaque, int ret) @@ -772,7 +775,7 @@ static void ide_sector_read_cb(void *opaque, int ret) s->nsector -= n; /* Allow the guest to read the io_buffer */ ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } static void ide_sector_read(IDEState *s) @@ -836,7 +839,7 @@ void ide_dma_error(IDEState *s) dma_buf_commit(s, 0); ide_abort_command(s); ide_set_inactive(s, false); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } int ide_handle_rw_error(IDEState *s, int error, int op) @@ -906,7 +909,7 @@ static void ide_dma_cb(void *opaque, int ret) /* end of transfer ? */ if (s->nsector == 0) { s->status = READY_STAT | SEEK_STAT; - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); goto eot; } @@ -1006,7 +1009,7 @@ static void ide_sector_write(IDEState *s); static void ide_sector_write_timer_cb(void *opaque) { IDEState *s = opaque; - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } static void ide_sector_write_cb(void *opaque, int ret) @@ -1054,7 +1057,7 @@ static void ide_sector_write_cb(void *opaque, int ret) timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (NANOSECONDS_PER_SECOND / 1000)); } else { - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } } @@ -1105,7 +1108,7 @@ static void ide_flush_cb(void *opaque, int ret) } s->status = READY_STAT | SEEK_STAT; ide_cmd_done(s); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } static void ide_flush_cache(IDEState *s) @@ -1130,13 +1133,13 @@ static void ide_cfata_metadata_inquiry(IDEState *s) memset(p, 0, 0x200); spd = ((s->mdata_size - 1) >> 9) + 1; - put_le16(p + 0, 0x0001); /* Data format revision */ - put_le16(p + 1, 0x0000); /* Media property: silicon */ - put_le16(p + 2, s->media_changed); /* Media status */ - put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */ - put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */ - put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */ - put_le16(p + 6, spd >> 16); /* Sectors per device (high) */ + put_le16(p + 0, 0x0001); /* Data format revision */ + put_le16(p + 1, 0x0000); /* Media property: silicon */ + put_le16(p + 2, s->media_changed); /* Media status */ + put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */ + put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */ + put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */ + put_le16(p + 6, spd >> 16); /* Sectors per device (high) */ } static void ide_cfata_metadata_read(IDEState *s) @@ -1152,7 +1155,7 @@ static void ide_cfata_metadata_read(IDEState *s) p = (uint16_t *) s->io_buffer; memset(p, 0, 0x200); - put_le16(p + 0, s->media_changed); /* Media status */ + put_le16(p + 0, s->media_changed); /* Media status */ memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9), MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9), s->nsector << 9), 0x200 - 2)); @@ -1194,7 +1197,7 @@ static void ide_cd_change_cb(void *opaque, bool load, Error **errp) s->cdrom_changed = 1; s->events.new_media = true; s->events.eject_request = false; - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } static void ide_cd_eject_request_cb(void *opaque, bool force) @@ -1205,7 +1208,7 @@ static void ide_cd_eject_request_cb(void *opaque, bool force) if (force) { s->tray_locked = false; } - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } static void ide_cmd_lba48_transform(IDEState *s, int lba48) @@ -1264,7 +1267,7 @@ const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = { void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val) { IDEBus *bus = opaque; - IDEState *s = idebus_active_if(bus); + IDEState *s = ide_bus_active_if(bus); int reg_num = addr & 7; trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s); @@ -1326,7 +1329,7 @@ void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val) case ATA_IOPORT_WR_COMMAND: ide_clear_hob(bus); qemu_irq_lower(bus->irq); - ide_exec_cmd(bus, val); + ide_bus_exec_cmd(bus, val); break; } } @@ -1439,7 +1442,7 @@ static bool cmd_identify(IDEState *s, uint8_t cmd) } s->status = READY_STAT | SEEK_STAT; ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); return false; } else { if (s->drive_kind == IDE_CD) { @@ -1629,7 +1632,7 @@ static bool cmd_specify(IDEState *s, uint8_t cmd) if (s->blk && s->drive_kind != IDE_CD) { s->heads = (s->select & (ATA_DEV_HS)) + 1; s->sectors = s->nsector; - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } else { ide_abort_command(s); } @@ -1730,7 +1733,7 @@ static bool cmd_identify_packet(IDEState *s, uint8_t cmd) ide_atapi_identify(s); s->status = READY_STAT | SEEK_STAT; ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); return false; } @@ -1755,7 +1758,7 @@ static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd) * They are part of the regular output (this is why ERR_STAT isn't set) * Device 0 passed, Device 1 passed or not present. */ s->error = 0x01; - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } return false; @@ -1787,7 +1790,7 @@ static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd) { s->error = 0x09; /* miscellaneous error */ s->status = READY_STAT | SEEK_STAT; - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); return false; } @@ -1826,7 +1829,7 @@ static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd) s->io_buffer[0x1a] = 0x01; /* Hot count */ ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); return false; } @@ -1850,7 +1853,7 @@ static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd) ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); s->status = 0x00; /* NOTE: READY is _not_ set */ - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); return false; } @@ -1933,7 +1936,7 @@ static bool cmd_smart(IDEState *s, uint8_t cmd) s->status = READY_STAT | SEEK_STAT; ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); return false; case SMART_READ_DATA: @@ -1974,7 +1977,7 @@ static bool cmd_smart(IDEState *s, uint8_t cmd) s->status = READY_STAT | SEEK_STAT; ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); return false; case SMART_READ_LOG: @@ -2013,7 +2016,7 @@ static bool cmd_smart(IDEState *s, uint8_t cmd) } s->status = READY_STAT | SEEK_STAT; ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); return false; case SMART_EXECUTE_OFFLINE: @@ -2122,13 +2125,13 @@ static bool ide_cmd_permitted(IDEState *s, uint32_t cmd) && (ide_cmd_table[cmd].flags & (1u << s->drive_kind)); } -void ide_exec_cmd(IDEBus *bus, uint32_t val) +void ide_bus_exec_cmd(IDEBus *bus, uint32_t val) { IDEState *s; bool complete; - s = idebus_active_if(bus); - trace_ide_exec_cmd(bus, s, val); + s = ide_bus_active_if(bus); + trace_ide_bus_exec_cmd(bus, s, val); /* ignore commands to non existent slave */ if (s != bus->ifs && !s->blk) { @@ -2145,7 +2148,7 @@ void ide_exec_cmd(IDEBus *bus, uint32_t val) if (!ide_cmd_permitted(s, val)) { ide_abort_command(s); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); return; } @@ -2163,7 +2166,7 @@ void ide_exec_cmd(IDEBus *bus, uint32_t val) } ide_cmd_done(s); - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); } } @@ -2194,7 +2197,7 @@ const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = { uint32_t ide_ioport_read(void *opaque, uint32_t addr) { IDEBus *bus = opaque; - IDEState *s = idebus_active_if(bus); + IDEState *s = ide_bus_active_if(bus); uint32_t reg_num; int ret, hob; @@ -2280,7 +2283,7 @@ uint32_t ide_ioport_read(void *opaque, uint32_t addr) uint32_t ide_status_read(void *opaque, uint32_t addr) { IDEBus *bus = opaque; - IDEState *s = idebus_active_if(bus); + IDEState *s = ide_bus_active_if(bus); int ret; if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || @@ -2369,7 +2372,7 @@ static bool ide_is_pio_out(IDEState *s) void ide_data_writew(void *opaque, uint32_t addr, uint32_t val) { IDEBus *bus = opaque; - IDEState *s = idebus_active_if(bus); + IDEState *s = ide_bus_active_if(bus); uint8_t *p; trace_ide_data_writew(addr, val, bus, s); @@ -2405,7 +2408,7 @@ void ide_data_writew(void *opaque, uint32_t addr, uint32_t val) uint32_t ide_data_readw(void *opaque, uint32_t addr) { IDEBus *bus = opaque; - IDEState *s = idebus_active_if(bus); + IDEState *s = ide_bus_active_if(bus); uint8_t *p; int ret; @@ -2443,7 +2446,7 @@ uint32_t ide_data_readw(void *opaque, uint32_t addr) void ide_data_writel(void *opaque, uint32_t addr, uint32_t val) { IDEBus *bus = opaque; - IDEState *s = idebus_active_if(bus); + IDEState *s = ide_bus_active_if(bus); uint8_t *p; trace_ide_data_writel(addr, val, bus, s); @@ -2471,7 +2474,7 @@ void ide_data_writel(void *opaque, uint32_t addr, uint32_t val) uint32_t ide_data_readl(void *opaque, uint32_t addr) { IDEBus *bus = opaque; - IDEState *s = idebus_active_if(bus); + IDEState *s = ide_bus_active_if(bus); uint8_t *p; int ret; @@ -2710,7 +2713,7 @@ static void ide_restart_bh(void *opaque) return; } - s = idebus_active_if(bus); + s = ide_bus_active_if(bus); is_read = (bus->error_status & IDE_RETRY_READ) != 0; /* The error status must be cleared before resubmitting the request: The @@ -2758,7 +2761,7 @@ static void ide_restart_cb(void *opaque, bool running, RunState state) } } -void ide_register_restart_cb(IDEBus *bus) +void ide_bus_register_restart_cb(IDEBus *bus) { if (bus->dma->ops->restart_dma) { bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus); @@ -2770,7 +2773,7 @@ static IDEDMA ide_dma_nop = { .aiocb = NULL, }; -void ide_init2(IDEBus *bus, qemu_irq irq) +void ide_bus_init_output_irq(IDEBus *bus, qemu_irq irq_out) { int i; @@ -2778,10 +2781,17 @@ void ide_init2(IDEBus *bus, qemu_irq irq) ide_init1(bus, i); ide_reset(&bus->ifs[i]); } - bus->irq = irq; + bus->irq = irq_out; bus->dma = &ide_dma_nop; } +void ide_bus_set_irq(IDEBus *bus) +{ + if (!(bus->cmd & IDE_CTRL_DISABLE_IRQ)) { + qemu_irq_raise(bus->irq); + } +} + void ide_exit(IDEState *s) { timer_free(s->sector_write_timer); diff --git a/hw/ide/ich.c b/hw/ide/ich.c index 1007a51fcb..d61faab532 100644 --- a/hw/ide/ich.c +++ b/hw/ide/ich.c @@ -61,6 +61,7 @@ */ #include "qemu/osdep.h" +#include "hw/irq.h" #include "hw/pci/msi.h" #include "hw/pci/pci.h" #include "migration/vmstate.h" diff --git a/hw/ide/ioport.c b/hw/ide/ioport.c index e6caa537fa..e2ecc6230c 100644 --- a/hw/ide/ioport.c +++ b/hw/ide/ioport.c @@ -25,16 +25,6 @@ #include "qemu/osdep.h" #include "hw/isa/isa.h" -#include "qemu/error-report.h" -#include "qemu/timer.h" -#include "sysemu/blockdev.h" -#include "sysemu/dma.h" -#include "hw/block/block.h" -#include "sysemu/block-backend.h" -#include "qapi/error.h" -#include "qemu/cutils.h" -#include "sysemu/replay.h" - #include "hw/ide/internal.h" #include "trace.h" diff --git a/hw/ide/isa.c b/hw/ide/isa.c index 8bedbd13f1..95053e026f 100644 --- a/hw/ide/isa.c +++ b/hw/ide/isa.c @@ -31,23 +31,20 @@ #include "qemu/module.h" #include "sysemu/dma.h" +#include "hw/ide/isa.h" #include "hw/ide/internal.h" #include "qom/object.h" /***********************************************************/ /* ISA IDE definitions */ -#define TYPE_ISA_IDE "isa-ide" -OBJECT_DECLARE_SIMPLE_TYPE(ISAIDEState, ISA_IDE) - struct ISAIDEState { ISADevice parent_obj; IDEBus bus; uint32_t iobase; uint32_t iobase2; - uint32_t isairq; - qemu_irq irq; + uint32_t irqnum; }; static void isa_ide_reset(DeviceState *d) @@ -75,13 +72,12 @@ static void isa_ide_realizefn(DeviceState *dev, Error **errp) ide_bus_init(&s->bus, sizeof(s->bus), dev, 0, 2); ide_init_ioport(&s->bus, isadev, s->iobase, s->iobase2); - s->irq = isa_get_irq(isadev, s->isairq); - ide_init2(&s->bus, s->irq); + ide_bus_init_output_irq(&s->bus, isa_get_irq(isadev, s->irqnum)); vmstate_register(VMSTATE_IF(dev), 0, &vmstate_ide_isa, s); - ide_register_restart_cb(&s->bus); + ide_bus_register_restart_cb(&s->bus); } -ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq, +ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int irqnum, DriveInfo *hd0, DriveInfo *hd1) { DeviceState *dev; @@ -92,15 +88,15 @@ ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq, dev = DEVICE(isadev); qdev_prop_set_uint32(dev, "iobase", iobase); qdev_prop_set_uint32(dev, "iobase2", iobase2); - qdev_prop_set_uint32(dev, "irq", isairq); + qdev_prop_set_uint32(dev, "irq", irqnum); isa_realize_and_unref(isadev, bus, &error_fatal); s = ISA_IDE(dev); if (hd0) { - ide_create_drive(&s->bus, 0, hd0); + ide_bus_create_drive(&s->bus, 0, hd0); } if (hd1) { - ide_create_drive(&s->bus, 1, hd1); + ide_bus_create_drive(&s->bus, 1, hd1); } return isadev; } @@ -108,7 +104,7 @@ ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq, static Property isa_ide_properties[] = { DEFINE_PROP_UINT32("iobase", ISAIDEState, iobase, 0x1f0), DEFINE_PROP_UINT32("iobase2", ISAIDEState, iobase2, 0x3f6), - DEFINE_PROP_UINT32("irq", ISAIDEState, isairq, 14), + DEFINE_PROP_UINT32("irq", ISAIDEState, irqnum, 14), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/ide/macio.c b/hw/ide/macio.c index e604466acb..dca1cc9efc 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -24,6 +24,7 @@ */ #include "qemu/osdep.h" +#include "hw/irq.h" #include "hw/ppc/mac_dbdma.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" @@ -59,7 +60,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) { DBDMA_io *io = opaque; MACIOIDEState *m = io->opaque; - IDEState *s = idebus_active_if(&m->bus); + IDEState *s = ide_bus_active_if(&m->bus); int64_t offset; MACIO_DPRINTF("pmac_ide_atapi_transfer_cb\n"); @@ -135,7 +136,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) { DBDMA_io *io = opaque; MACIOIDEState *m = io->opaque; - IDEState *s = idebus_active_if(&m->bus); + IDEState *s = ide_bus_active_if(&m->bus); int64_t offset; MACIO_DPRINTF("pmac_ide_transfer_cb\n"); @@ -159,7 +160,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) MACIO_DPRINTF("End of IDE transfer\n"); qemu_sglist_destroy(&s->sg); s->status = READY_STAT | SEEK_STAT; - ide_set_irq(s->bus); + ide_bus_set_irq(s->bus); m->dma_active = false; goto done; } @@ -219,7 +220,7 @@ done: static void pmac_ide_transfer(DBDMA_io *io) { MACIOIDEState *m = io->opaque; - IDEState *s = idebus_active_if(&m->bus); + IDEState *s = ide_bus_active_if(&m->bus); MACIO_DPRINTF("\n"); @@ -250,7 +251,7 @@ static void pmac_ide_transfer(DBDMA_io *io) static void pmac_ide_flush(DBDMA_io *io) { MACIOIDEState *m = io->opaque; - IDEState *s = idebus_active_if(&m->bus); + IDEState *s = ide_bus_active_if(&m->bus); if (s->bus->dma->aiocb) { blk_drain(s->blk); @@ -419,7 +420,7 @@ static void macio_ide_realizefn(DeviceState *dev, Error **errp) { MACIOIDEState *s = MACIO_IDE(dev); - ide_init2(&s->bus, s->ide_irq); + ide_bus_init_output_irq(&s->bus, s->ide_irq); /* Register DMA callbacks */ s->dma.ops = &dbdma_ops; @@ -500,7 +501,7 @@ void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table) for (i = 0; i < 2; i++) { if (hd_table[i]) { - ide_create_drive(&s->bus, i, hd_table[i]); + ide_bus_create_drive(&s->bus, i, hd_table[i]); } } } diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c index 56c5be3655..981cfbd97f 100644 --- a/hw/ide/microdrive.c +++ b/hw/ide/microdrive.c @@ -29,6 +29,7 @@ #include "qapi/error.h" #include "qemu/module.h" #include "sysemu/dma.h" +#include "hw/irq.h" #include "hw/ide/internal.h" #include "qom/object.h" @@ -39,7 +40,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(MicroDriveState, MICRODRIVE) /***********************************************************/ /* CF-ATA Microdrive */ -#define METADATA_SIZE 0x20 +#define METADATA_SIZE 0x20 /* DSCM-1XXXX Microdrive hard disk with CF+ II / PCMCIA interface. */ @@ -64,29 +65,29 @@ struct MicroDriveState { /* Register bitfields */ enum md_opt { - OPT_MODE_MMAP = 0, - OPT_MODE_IOMAP16 = 1, - OPT_MODE_IOMAP1 = 2, - OPT_MODE_IOMAP2 = 3, - OPT_MODE = 0x3f, - OPT_LEVIREQ = 0x40, - OPT_SRESET = 0x80, + OPT_MODE_MMAP = 0, + OPT_MODE_IOMAP16 = 1, + OPT_MODE_IOMAP1 = 2, + OPT_MODE_IOMAP2 = 3, + OPT_MODE = 0x3f, + OPT_LEVIREQ = 0x40, + OPT_SRESET = 0x80, }; enum md_cstat { - STAT_INT = 0x02, - STAT_PWRDWN = 0x04, - STAT_XE = 0x10, - STAT_IOIS8 = 0x20, - STAT_SIGCHG = 0x40, - STAT_CHANGED = 0x80, + STAT_INT = 0x02, + STAT_PWRDWN = 0x04, + STAT_XE = 0x10, + STAT_IOIS8 = 0x20, + STAT_SIGCHG = 0x40, + STAT_CHANGED = 0x80, }; enum md_pins { - PINS_MRDY = 0x02, - PINS_CRDY = 0x20, + PINS_MRDY = 0x02, + PINS_CRDY = 0x20, }; enum md_ctrl { - CTRL_IEN = 0x02, - CTRL_SRST = 0x04, + CTRL_IEN = 0x02, + CTRL_SRST = 0x04, }; static inline void md_interrupt_update(MicroDriveState *s) @@ -98,7 +99,7 @@ static inline void md_interrupt_update(MicroDriveState *s) } qemu_set_irq(card->slot->irq, - !(s->stat & STAT_INT) && /* Inverted */ + !(s->stat & STAT_INT) && /* Inverted */ !(s->ctrl & (CTRL_IEN | CTRL_SRST)) && !(s->opt & OPT_SRESET)); } @@ -144,17 +145,17 @@ static uint8_t md_attr_read(PCMCIACardState *card, uint32_t at) at -= s->attr_base; switch (at) { - case 0x00: /* Configuration Option Register */ + case 0x00: /* Configuration Option Register */ return s->opt; - case 0x02: /* Card Configuration Status Register */ + case 0x02: /* Card Configuration Status Register */ if (s->ctrl & CTRL_IEN) { return s->stat & ~STAT_INT; } else { return s->stat; } - case 0x04: /* Pin Replacement Register */ + case 0x04: /* Pin Replacement Register */ return (s->pins & PINS_CRDY) | 0x0c; - case 0x06: /* Socket and Copy Register */ + case 0x06: /* Socket and Copy Register */ return 0x00; #ifdef VERBOSE default: @@ -172,14 +173,14 @@ static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value) at -= s->attr_base; switch (at) { - case 0x00: /* Configuration Option Register */ + case 0x00: /* Configuration Option Register */ s->opt = value & 0xcf; if (value & OPT_SRESET) { device_cold_reset(DEVICE(s)); } md_interrupt_update(s); break; - case 0x02: /* Card Configuration Status Register */ + case 0x02: /* Card Configuration Status Register */ if ((s->stat ^ value) & STAT_PWRDWN) { s->pins |= PINS_CRDY; } @@ -188,11 +189,11 @@ static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value) md_interrupt_update(s); /* Word 170 in Identify Device must be equal to STAT_XE */ break; - case 0x04: /* Pin Replacement Register */ + case 0x04: /* Pin Replacement Register */ s->pins &= PINS_CRDY; s->pins |= value & PINS_MRDY; break; - case 0x06: /* Socket and Copy Register */ + case 0x06: /* Socket and Copy Register */ break; default: printf("%s: Bad attribute space register %02x\n", __func__, at); @@ -231,7 +232,7 @@ static uint16_t md_common_read(PCMCIACardState *card, uint32_t at) } switch (at) { - case 0x0: /* Even RD Data */ + case 0x0: /* Even RD Data */ case 0x8: return ide_data_readw(&s->bus, 0); @@ -244,19 +245,19 @@ static uint16_t md_common_read(PCMCIACardState *card, uint32_t at) } s->cycle = !s->cycle; return ret; - case 0x9: /* Odd RD Data */ + case 0x9: /* Odd RD Data */ return s->io >> 8; - case 0xd: /* Error */ + case 0xd: /* Error */ return ide_ioport_read(&s->bus, 0x1); - case 0xe: /* Alternate Status */ - ifs = idebus_active_if(&s->bus); + case 0xe: /* Alternate Status */ + ifs = ide_bus_active_if(&s->bus); if (ifs->blk) { return ifs->status; } else { return 0; } - case 0xf: /* Device Address */ - ifs = idebus_active_if(&s->bus); + case 0xf: /* Device Address */ + ifs = ide_bus_active_if(&s->bus); return 0xc2 | ((~ifs->select << 2) & 0x3c); default: return ide_ioport_read(&s->bus, at); @@ -295,7 +296,7 @@ static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value) } switch (at) { - case 0x0: /* Even WR Data */ + case 0x0: /* Even WR Data */ case 0x8: ide_data_writew(&s->bus, 0, value); break; @@ -312,10 +313,10 @@ static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value) s->io = value & 0xff; s->cycle = !s->cycle; break; - case 0xd: /* Features */ + case 0xd: /* Features */ ide_ioport_write(&s->bus, 0x1, value); break; - case 0xe: /* Device Control */ + case 0xe: /* Device Control */ s->ctrl = value; if (value & CTRL_SRST) { device_cold_reset(DEVICE(s)); @@ -349,35 +350,35 @@ static const VMStateDescription vmstate_microdrive = { }; static const uint8_t dscm1xxxx_cis[0x14a] = { - [0x000] = CISTPL_DEVICE, /* 5V Device Information */ - [0x002] = 0x03, /* Tuple length = 4 bytes */ - [0x004] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ - [0x006] = 0x01, /* Size = 2K bytes */ + [0x000] = CISTPL_DEVICE, /* 5V Device Information */ + [0x002] = 0x03, /* Tuple length = 4 bytes */ + [0x004] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ + [0x006] = 0x01, /* Size = 2K bytes */ [0x008] = CISTPL_ENDMARK, - [0x00a] = CISTPL_DEVICE_OC, /* Additional Device Information */ - [0x00c] = 0x04, /* Tuple length = 4 byest */ - [0x00e] = 0x03, /* Conditions: Ext = 0, Vcc 3.3V, MWAIT = 1 */ - [0x010] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ - [0x012] = 0x01, /* Size = 2K bytes */ + [0x00a] = CISTPL_DEVICE_OC, /* Additional Device Information */ + [0x00c] = 0x04, /* Tuple length = 4 byest */ + [0x00e] = 0x03, /* Conditions: Ext = 0, Vcc 3.3V, MWAIT = 1 */ + [0x010] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ + [0x012] = 0x01, /* Size = 2K bytes */ [0x014] = CISTPL_ENDMARK, - [0x016] = CISTPL_JEDEC_C, /* JEDEC ID */ - [0x018] = 0x02, /* Tuple length = 2 bytes */ - [0x01a] = 0xdf, /* PC Card ATA with no Vpp required */ + [0x016] = CISTPL_JEDEC_C, /* JEDEC ID */ + [0x018] = 0x02, /* Tuple length = 2 bytes */ + [0x01a] = 0xdf, /* PC Card ATA with no Vpp required */ [0x01c] = 0x01, - [0x01e] = CISTPL_MANFID, /* Manufacture ID */ - [0x020] = 0x04, /* Tuple length = 4 bytes */ - [0x022] = 0xa4, /* TPLMID_MANF = 00a4 (IBM) */ + [0x01e] = CISTPL_MANFID, /* Manufacture ID */ + [0x020] = 0x04, /* Tuple length = 4 bytes */ + [0x022] = 0xa4, /* TPLMID_MANF = 00a4 (IBM) */ [0x024] = 0x00, - [0x026] = 0x00, /* PLMID_CARD = 0000 */ + [0x026] = 0x00, /* PLMID_CARD = 0000 */ [0x028] = 0x00, - [0x02a] = CISTPL_VERS_1, /* Level 1 Version */ - [0x02c] = 0x12, /* Tuple length = 23 bytes */ - [0x02e] = 0x04, /* Major Version = JEIDA 4.2 / PCMCIA 2.1 */ - [0x030] = 0x01, /* Minor Version = 1 */ + [0x02a] = CISTPL_VERS_1, /* Level 1 Version */ + [0x02c] = 0x12, /* Tuple length = 23 bytes */ + [0x02e] = 0x04, /* Major Version = JEIDA 4.2 / PCMCIA 2.1 */ + [0x030] = 0x01, /* Minor Version = 1 */ [0x032] = 'I', [0x034] = 'B', [0x036] = 'M', @@ -395,142 +396,142 @@ static const uint8_t dscm1xxxx_cis[0x14a] = { [0x04e] = 0x00, [0x050] = CISTPL_ENDMARK, - [0x052] = CISTPL_FUNCID, /* Function ID */ - [0x054] = 0x02, /* Tuple length = 2 bytes */ - [0x056] = 0x04, /* TPLFID_FUNCTION = Fixed Disk */ - [0x058] = 0x01, /* TPLFID_SYSINIT: POST = 1, ROM = 0 */ + [0x052] = CISTPL_FUNCID, /* Function ID */ + [0x054] = 0x02, /* Tuple length = 2 bytes */ + [0x056] = 0x04, /* TPLFID_FUNCTION = Fixed Disk */ + [0x058] = 0x01, /* TPLFID_SYSINIT: POST = 1, ROM = 0 */ - [0x05a] = CISTPL_FUNCE, /* Function Extension */ - [0x05c] = 0x02, /* Tuple length = 2 bytes */ - [0x05e] = 0x01, /* TPLFE_TYPE = Disk Device Interface */ - [0x060] = 0x01, /* TPLFE_DATA = PC Card ATA Interface */ + [0x05a] = CISTPL_FUNCE, /* Function Extension */ + [0x05c] = 0x02, /* Tuple length = 2 bytes */ + [0x05e] = 0x01, /* TPLFE_TYPE = Disk Device Interface */ + [0x060] = 0x01, /* TPLFE_DATA = PC Card ATA Interface */ - [0x062] = CISTPL_FUNCE, /* Function Extension */ - [0x064] = 0x03, /* Tuple length = 3 bytes */ - [0x066] = 0x02, /* TPLFE_TYPE = Basic PC Card ATA Interface */ - [0x068] = 0x08, /* TPLFE_DATA: Rotating, Unique, Single */ - [0x06a] = 0x0f, /* TPLFE_DATA: Sleep, Standby, Idle, Auto */ + [0x062] = CISTPL_FUNCE, /* Function Extension */ + [0x064] = 0x03, /* Tuple length = 3 bytes */ + [0x066] = 0x02, /* TPLFE_TYPE = Basic PC Card ATA Interface */ + [0x068] = 0x08, /* TPLFE_DATA: Rotating, Unique, Single */ + [0x06a] = 0x0f, /* TPLFE_DATA: Sleep, Standby, Idle, Auto */ - [0x06c] = CISTPL_CONFIG, /* Configuration */ - [0x06e] = 0x05, /* Tuple length = 5 bytes */ - [0x070] = 0x01, /* TPCC_RASZ = 2 bytes, TPCC_RMSZ = 1 byte */ - [0x072] = 0x07, /* TPCC_LAST = 7 */ - [0x074] = 0x00, /* TPCC_RADR = 0200 */ + [0x06c] = CISTPL_CONFIG, /* Configuration */ + [0x06e] = 0x05, /* Tuple length = 5 bytes */ + [0x070] = 0x01, /* TPCC_RASZ = 2 bytes, TPCC_RMSZ = 1 byte */ + [0x072] = 0x07, /* TPCC_LAST = 7 */ + [0x074] = 0x00, /* TPCC_RADR = 0200 */ [0x076] = 0x02, - [0x078] = 0x0f, /* TPCC_RMSK = 200, 202, 204, 206 */ + [0x078] = 0x0f, /* TPCC_RMSK = 200, 202, 204, 206 */ - [0x07a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x07c] = 0x0b, /* Tuple length = 11 bytes */ - [0x07e] = 0xc0, /* TPCE_INDX = Memory Mode, Default, Iface */ - [0x080] = 0xc0, /* TPCE_IF = Memory, no BVDs, no WP, READY */ - [0x082] = 0xa1, /* TPCE_FS = Vcc only, no I/O, Memory, Misc */ - [0x084] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x086] = 0x55, /* NomV: 5.0 V */ - [0x088] = 0x4d, /* MinV: 4.5 V */ - [0x08a] = 0x5d, /* MaxV: 5.5 V */ - [0x08c] = 0x4e, /* Peakl: 450 mA */ - [0x08e] = 0x08, /* TPCE_MS = 1 window, 1 byte, Host address */ - [0x090] = 0x00, /* Window descriptor: Window length = 0 */ - [0x092] = 0x20, /* TPCE_MI: support power down mode, RW */ + [0x07a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x07c] = 0x0b, /* Tuple length = 11 bytes */ + [0x07e] = 0xc0, /* TPCE_INDX = Memory Mode, Default, Iface */ + [0x080] = 0xc0, /* TPCE_IF = Memory, no BVDs, no WP, READY */ + [0x082] = 0xa1, /* TPCE_FS = Vcc only, no I/O, Memory, Misc */ + [0x084] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x086] = 0x55, /* NomV: 5.0 V */ + [0x088] = 0x4d, /* MinV: 4.5 V */ + [0x08a] = 0x5d, /* MaxV: 5.5 V */ + [0x08c] = 0x4e, /* Peakl: 450 mA */ + [0x08e] = 0x08, /* TPCE_MS = 1 window, 1 byte, Host address */ + [0x090] = 0x00, /* Window descriptor: Window length = 0 */ + [0x092] = 0x20, /* TPCE_MI: support power down mode, RW */ - [0x094] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x096] = 0x06, /* Tuple length = 6 bytes */ - [0x098] = 0x00, /* TPCE_INDX = Memory Mode, no Default */ - [0x09a] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x09c] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x09e] = 0xb5, /* NomV: 3.3 V */ + [0x094] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x096] = 0x06, /* Tuple length = 6 bytes */ + [0x098] = 0x00, /* TPCE_INDX = Memory Mode, no Default */ + [0x09a] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x09c] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x09e] = 0xb5, /* NomV: 3.3 V */ [0x0a0] = 0x1e, - [0x0a2] = 0x3e, /* Peakl: 350 mA */ + [0x0a2] = 0x3e, /* Peakl: 350 mA */ - [0x0a4] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0a6] = 0x0d, /* Tuple length = 13 bytes */ - [0x0a8] = 0xc1, /* TPCE_INDX = I/O and Memory Mode, Default */ - [0x0aa] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ - [0x0ac] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ - [0x0ae] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x0b0] = 0x55, /* NomV: 5.0 V */ - [0x0b2] = 0x4d, /* MinV: 4.5 V */ - [0x0b4] = 0x5d, /* MaxV: 5.5 V */ - [0x0b6] = 0x4e, /* Peakl: 450 mA */ - [0x0b8] = 0x64, /* TPCE_IO = 16-byte boundary, 16/8 accesses */ - [0x0ba] = 0xf0, /* TPCE_IR = MASK, Level, Pulse, Share */ - [0x0bc] = 0xff, /* IRQ0..IRQ7 supported */ - [0x0be] = 0xff, /* IRQ8..IRQ15 supported */ - [0x0c0] = 0x20, /* TPCE_MI = support power down mode */ + [0x0a4] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0a6] = 0x0d, /* Tuple length = 13 bytes */ + [0x0a8] = 0xc1, /* TPCE_INDX = I/O and Memory Mode, Default */ + [0x0aa] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x0ac] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x0ae] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x0b0] = 0x55, /* NomV: 5.0 V */ + [0x0b2] = 0x4d, /* MinV: 4.5 V */ + [0x0b4] = 0x5d, /* MaxV: 5.5 V */ + [0x0b6] = 0x4e, /* Peakl: 450 mA */ + [0x0b8] = 0x64, /* TPCE_IO = 16-byte boundary, 16/8 accesses */ + [0x0ba] = 0xf0, /* TPCE_IR = MASK, Level, Pulse, Share */ + [0x0bc] = 0xff, /* IRQ0..IRQ7 supported */ + [0x0be] = 0xff, /* IRQ8..IRQ15 supported */ + [0x0c0] = 0x20, /* TPCE_MI = support power down mode */ - [0x0c2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0c4] = 0x06, /* Tuple length = 6 bytes */ - [0x0c6] = 0x01, /* TPCE_INDX = I/O and Memory Mode */ - [0x0c8] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x0ca] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x0cc] = 0xb5, /* NomV: 3.3 V */ + [0x0c2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0c4] = 0x06, /* Tuple length = 6 bytes */ + [0x0c6] = 0x01, /* TPCE_INDX = I/O and Memory Mode */ + [0x0c8] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x0ca] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x0cc] = 0xb5, /* NomV: 3.3 V */ [0x0ce] = 0x1e, - [0x0d0] = 0x3e, /* Peakl: 350 mA */ + [0x0d0] = 0x3e, /* Peakl: 350 mA */ - [0x0d2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0d4] = 0x12, /* Tuple length = 18 bytes */ - [0x0d6] = 0xc2, /* TPCE_INDX = I/O Primary Mode */ - [0x0d8] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ - [0x0da] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ - [0x0dc] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x0de] = 0x55, /* NomV: 5.0 V */ - [0x0e0] = 0x4d, /* MinV: 4.5 V */ - [0x0e2] = 0x5d, /* MaxV: 5.5 V */ - [0x0e4] = 0x4e, /* Peakl: 450 mA */ - [0x0e6] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ - [0x0e8] = 0x61, /* Range: 2 fields, 2 bytes addr, 1 byte len */ - [0x0ea] = 0xf0, /* Field 1 address = 0x01f0 */ + [0x0d2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0d4] = 0x12, /* Tuple length = 18 bytes */ + [0x0d6] = 0xc2, /* TPCE_INDX = I/O Primary Mode */ + [0x0d8] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x0da] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x0dc] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x0de] = 0x55, /* NomV: 5.0 V */ + [0x0e0] = 0x4d, /* MinV: 4.5 V */ + [0x0e2] = 0x5d, /* MaxV: 5.5 V */ + [0x0e4] = 0x4e, /* Peakl: 450 mA */ + [0x0e6] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ + [0x0e8] = 0x61, /* Range: 2 fields, 2 bytes addr, 1 byte len */ + [0x0ea] = 0xf0, /* Field 1 address = 0x01f0 */ [0x0ec] = 0x01, - [0x0ee] = 0x07, /* Address block length = 8 */ - [0x0f0] = 0xf6, /* Field 2 address = 0x03f6 */ + [0x0ee] = 0x07, /* Address block length = 8 */ + [0x0f0] = 0xf6, /* Field 2 address = 0x03f6 */ [0x0f2] = 0x03, - [0x0f4] = 0x01, /* Address block length = 2 */ - [0x0f6] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ - [0x0f8] = 0x20, /* TPCE_MI = support power down mode */ + [0x0f4] = 0x01, /* Address block length = 2 */ + [0x0f6] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ + [0x0f8] = 0x20, /* TPCE_MI = support power down mode */ - [0x0fa] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0fc] = 0x06, /* Tuple length = 6 bytes */ - [0x0fe] = 0x02, /* TPCE_INDX = I/O Primary Mode, no Default */ - [0x100] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x102] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x104] = 0xb5, /* NomV: 3.3 V */ + [0x0fa] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0fc] = 0x06, /* Tuple length = 6 bytes */ + [0x0fe] = 0x02, /* TPCE_INDX = I/O Primary Mode, no Default */ + [0x100] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x102] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x104] = 0xb5, /* NomV: 3.3 V */ [0x106] = 0x1e, - [0x108] = 0x3e, /* Peakl: 350 mA */ + [0x108] = 0x3e, /* Peakl: 350 mA */ - [0x10a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x10c] = 0x12, /* Tuple length = 18 bytes */ - [0x10e] = 0xc3, /* TPCE_INDX = I/O Secondary Mode, Default */ - [0x110] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ - [0x112] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ - [0x114] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x116] = 0x55, /* NomV: 5.0 V */ - [0x118] = 0x4d, /* MinV: 4.5 V */ - [0x11a] = 0x5d, /* MaxV: 5.5 V */ - [0x11c] = 0x4e, /* Peakl: 450 mA */ - [0x11e] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ - [0x120] = 0x61, /* Range: 2 fields, 2 byte addr, 1 byte len */ - [0x122] = 0x70, /* Field 1 address = 0x0170 */ + [0x10a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x10c] = 0x12, /* Tuple length = 18 bytes */ + [0x10e] = 0xc3, /* TPCE_INDX = I/O Secondary Mode, Default */ + [0x110] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x112] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x114] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x116] = 0x55, /* NomV: 5.0 V */ + [0x118] = 0x4d, /* MinV: 4.5 V */ + [0x11a] = 0x5d, /* MaxV: 5.5 V */ + [0x11c] = 0x4e, /* Peakl: 450 mA */ + [0x11e] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ + [0x120] = 0x61, /* Range: 2 fields, 2 byte addr, 1 byte len */ + [0x122] = 0x70, /* Field 1 address = 0x0170 */ [0x124] = 0x01, - [0x126] = 0x07, /* Address block length = 8 */ - [0x128] = 0x76, /* Field 2 address = 0x0376 */ + [0x126] = 0x07, /* Address block length = 8 */ + [0x128] = 0x76, /* Field 2 address = 0x0376 */ [0x12a] = 0x03, - [0x12c] = 0x01, /* Address block length = 2 */ - [0x12e] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ - [0x130] = 0x20, /* TPCE_MI = support power down mode */ + [0x12c] = 0x01, /* Address block length = 2 */ + [0x12e] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ + [0x130] = 0x20, /* TPCE_MI = support power down mode */ - [0x132] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x134] = 0x06, /* Tuple length = 6 bytes */ - [0x136] = 0x03, /* TPCE_INDX = I/O Secondary Mode */ - [0x138] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x13a] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x13c] = 0xb5, /* NomV: 3.3 V */ + [0x132] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x134] = 0x06, /* Tuple length = 6 bytes */ + [0x136] = 0x03, /* TPCE_INDX = I/O Secondary Mode */ + [0x138] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x13a] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x13c] = 0xb5, /* NomV: 3.3 V */ [0x13e] = 0x1e, - [0x140] = 0x3e, /* Peakl: 350 mA */ + [0x140] = 0x3e, /* Peakl: 350 mA */ - [0x142] = CISTPL_NO_LINK, /* No Link */ - [0x144] = 0x00, /* Tuple length = 0 bytes */ + [0x142] = CISTPL_NO_LINK, /* No Link */ + [0x144] = 0x00, /* Tuple length = 0 bytes */ - [0x146] = CISTPL_END, /* Tuple End */ + [0x146] = CISTPL_END, /* Tuple End */ }; #define TYPE_DSCM1XXXX "dscm1xxxx" @@ -565,7 +566,7 @@ PCMCIACardState *dscm1xxxx_init(DriveInfo *dinfo) qdev_realize(DEVICE(md), NULL, &error_fatal); if (dinfo != NULL) { - ide_create_drive(&md->bus, 0, dinfo); + ide_bus_create_drive(&md->bus, 0, dinfo); } md->bus.ifs[0].drive_kind = IDE_CFATA; md->bus.ifs[0].mdata_size = METADATA_SIZE; @@ -598,7 +599,7 @@ static void microdrive_realize(DeviceState *dev, Error **errp) { MicroDriveState *md = MICRODRIVE(dev); - ide_init2(&md->bus, qemu_allocate_irq(md_set_irq, md, 0)); + ide_bus_init_output_irq(&md->bus, qemu_allocate_irq(md_set_irq, md, 0)); } static void microdrive_init(Object *obj) diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c index fb2ebd4847..3aeacab3bb 100644 --- a/hw/ide/mmio.c +++ b/hw/ide/mmio.c @@ -29,9 +29,9 @@ #include "qemu/module.h" #include "sysemu/dma.h" +#include "hw/ide/mmio.h" #include "hw/ide/internal.h" #include "hw/qdev-properties.h" -#include "qom/object.h" /***********************************************************/ /* MMIO based ide port @@ -39,11 +39,6 @@ * dedicated ide controller, which is often seen on embedded boards. */ -#define TYPE_MMIO_IDE "mmio-ide" -typedef struct MMIOIDEState MMIOState; -DECLARE_INSTANCE_CHECKER(MMIOState, MMIO_IDE, - TYPE_MMIO_IDE) - struct MMIOIDEState { /*< private >*/ SysBusDevice parent_obj; @@ -58,7 +53,7 @@ struct MMIOIDEState { static void mmio_ide_reset(DeviceState *dev) { - MMIOState *s = MMIO_IDE(dev); + MMIOIDEState *s = MMIO_IDE(dev); ide_bus_reset(&s->bus); } @@ -66,7 +61,7 @@ static void mmio_ide_reset(DeviceState *dev) static uint64_t mmio_ide_read(void *opaque, hwaddr addr, unsigned size) { - MMIOState *s = opaque; + MMIOIDEState *s = opaque; addr >>= s->shift; if (addr & 7) return ide_ioport_read(&s->bus, addr); @@ -77,7 +72,7 @@ static uint64_t mmio_ide_read(void *opaque, hwaddr addr, static void mmio_ide_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { - MMIOState *s = opaque; + MMIOIDEState *s = opaque; addr >>= s->shift; if (addr & 7) ide_ioport_write(&s->bus, addr, val); @@ -94,14 +89,14 @@ static const MemoryRegionOps mmio_ide_ops = { static uint64_t mmio_ide_status_read(void *opaque, hwaddr addr, unsigned size) { - MMIOState *s= opaque; + MMIOIDEState *s = opaque; return ide_status_read(&s->bus, 0); } static void mmio_ide_ctrl_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { - MMIOState *s = opaque; + MMIOIDEState *s = opaque; ide_ctrl_write(&s->bus, 0, val); } @@ -116,8 +111,8 @@ static const VMStateDescription vmstate_ide_mmio = { .version_id = 3, .minimum_version_id = 0, .fields = (VMStateField[]) { - VMSTATE_IDE_BUS(bus, MMIOState), - VMSTATE_IDE_DRIVES(bus.ifs, MMIOState), + VMSTATE_IDE_BUS(bus, MMIOIDEState), + VMSTATE_IDE_DRIVES(bus.ifs, MMIOIDEState), VMSTATE_END_OF_LIST() } }; @@ -125,9 +120,9 @@ static const VMStateDescription vmstate_ide_mmio = { static void mmio_ide_realizefn(DeviceState *dev, Error **errp) { SysBusDevice *d = SYS_BUS_DEVICE(dev); - MMIOState *s = MMIO_IDE(dev); + MMIOIDEState *s = MMIO_IDE(dev); - ide_init2(&s->bus, s->irq); + ide_bus_init_output_irq(&s->bus, s->irq); memory_region_init_io(&s->iomem1, OBJECT(s), &mmio_ide_ops, s, "ide-mmio.1", 16 << s->shift); @@ -140,14 +135,14 @@ static void mmio_ide_realizefn(DeviceState *dev, Error **errp) static void mmio_ide_initfn(Object *obj) { SysBusDevice *d = SYS_BUS_DEVICE(obj); - MMIOState *s = MMIO_IDE(obj); + MMIOIDEState *s = MMIO_IDE(obj); ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); sysbus_init_irq(d, &s->irq); } static Property mmio_ide_properties[] = { - DEFINE_PROP_UINT32("shift", MMIOState, shift, 0), + DEFINE_PROP_UINT32("shift", MMIOIDEState, shift, 0), DEFINE_PROP_END_OF_LIST() }; @@ -164,7 +159,7 @@ static void mmio_ide_class_init(ObjectClass *oc, void *data) static const TypeInfo mmio_ide_type_info = { .name = TYPE_MMIO_IDE, .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(MMIOState), + .instance_size = sizeof(MMIOIDEState), .instance_init = mmio_ide_initfn, .class_init = mmio_ide_class_init, }; @@ -176,13 +171,13 @@ static void mmio_ide_register_types(void) void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1) { - MMIOState *s = MMIO_IDE(dev); + MMIOIDEState *s = MMIO_IDE(dev); if (hd0 != NULL) { - ide_create_drive(&s->bus, 0, hd0); + ide_bus_create_drive(&s->bus, 0, hd0); } if (hd1 != NULL) { - ide_create_drive(&s->bus, 1, hd1); + ide_bus_create_drive(&s->bus, 1, hd1); } } diff --git a/hw/ide/pci.c b/hw/ide/pci.c index 84ba733548..fc9224bbc9 100644 --- a/hw/ide/pci.c +++ b/hw/ide/pci.c @@ -24,6 +24,7 @@ */ #include "qemu/osdep.h" +#include "hw/irq.h" #include "hw/pci/pci.h" #include "migration/vmstate.h" #include "sysemu/dma.h" @@ -103,6 +104,12 @@ const MemoryRegionOps pci_ide_data_le_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; +static IDEState *bmdma_active_if(BMDMAState *bmdma) +{ + assert(bmdma->bus->retry_unit != (uint8_t)-1); + return bmdma->bus->ifs + bmdma->bus->retry_unit; +} + static void bmdma_start_dma(const IDEDMA *dma, IDEState *s, BlockCompletionFunc *dma_cb) { @@ -295,7 +302,7 @@ void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val) /* Ignore writes to SSBM if it keeps the old value */ if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) { if (!(val & BM_CMD_START)) { - ide_cancel_dma_sync(idebus_active_if(bm->bus)); + ide_cancel_dma_sync(ide_bus_active_if(bm->bus)); bm->status &= ~BM_STATUS_DMAING; } else { bm->cur_addr = bm->addr; @@ -488,7 +495,7 @@ void pci_ide_create_devs(PCIDevice *dev) ide_drive_get(hd_table, ARRAY_SIZE(hd_table)); for (i = 0; i < 4; i++) { if (hd_table[i]) { - ide_create_drive(d->bus + bus[i], unit[i], hd_table[i]); + ide_bus_create_drive(d->bus + bus[i], unit[i], hd_table[i]); } } } diff --git a/hw/ide/piix.c b/hw/ide/piix.c index 267dbf37db..41d60921e3 100644 --- a/hw/ide/piix.c +++ b/hw/ide/piix.c @@ -28,14 +28,9 @@ */ #include "qemu/osdep.h" -#include "hw/pci/pci.h" #include "migration/vmstate.h" #include "qapi/error.h" -#include "qemu/module.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" -#include "sysemu/dma.h" - +#include "hw/pci/pci.h" #include "hw/ide/piix.h" #include "hw/ide/pci.h" #include "trace.h" @@ -126,7 +121,7 @@ static void piix_ide_reset(DeviceState *dev) pci_set_byte(pci_conf + 0x20, 0x01); /* BMIBA: 20-23h */ } -static int pci_piix_init_ports(PCIIDEState *d) +static bool pci_piix_init_bus(PCIIDEState *d, unsigned i, Error **errp) { static const struct { int iobase; @@ -136,30 +131,29 @@ static int pci_piix_init_ports(PCIIDEState *d) {0x1f0, 0x3f6, 14}, {0x170, 0x376, 15}, }; - int i, ret; + int ret; - for (i = 0; i < 2; i++) { - ide_bus_init(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2); - ret = ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase, - port_info[i].iobase2); - if (ret) { - return ret; - } - ide_init2(&d->bus[i], isa_get_irq(NULL, port_info[i].isairq)); - - bmdma_init(&d->bus[i], &d->bmdma[i], d); - d->bmdma[i].bus = &d->bus[i]; - ide_register_restart_cb(&d->bus[i]); + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2); + ret = ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase, + port_info[i].iobase2); + if (ret) { + error_setg_errno(errp, -ret, "Failed to realize %s port %u", + object_get_typename(OBJECT(d)), i); + return false; } + ide_bus_init_output_irq(&d->bus[i], isa_get_irq(NULL, port_info[i].isairq)); - return 0; + bmdma_init(&d->bus[i], &d->bmdma[i], d); + d->bmdma[i].bus = &d->bus[i]; + ide_bus_register_restart_cb(&d->bus[i]); + + return true; } static void pci_piix_ide_realize(PCIDevice *dev, Error **errp) { PCIIDEState *d = PCI_IDE(dev); uint8_t *pci_conf = dev->config; - int rc; pci_conf[PCI_CLASS_PROG] = 0x80; // legacy ATA mode @@ -168,10 +162,10 @@ static void pci_piix_ide_realize(PCIDevice *dev, Error **errp) vmstate_register(VMSTATE_IF(dev), 0, &vmstate_ide_pci, d); - rc = pci_piix_init_ports(d); - if (rc) { - error_setg_errno(errp, -rc, "Failed to realize %s", - object_get_typename(OBJECT(dev))); + for (unsigned i = 0; i < 2; i++) { + if (!pci_piix_init_bus(d, i, errp)) { + return; + } } } diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c index 6f6c7462f3..1b3b4da01d 100644 --- a/hw/ide/qdev.c +++ b/hw/ide/qdev.c @@ -124,7 +124,7 @@ static void ide_qdev_realize(DeviceState *qdev, Error **errp) dc->realize(dev, errp); } -IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive) +IDEDevice *ide_bus_create_drive(IDEBus *bus, int unit, DriveInfo *drive) { DeviceState *dev; diff --git a/hw/ide/sii3112.c b/hw/ide/sii3112.c index 46204f10d7..f9becdff8e 100644 --- a/hw/ide/sii3112.c +++ b/hw/ide/sii3112.c @@ -284,11 +284,11 @@ static void sii3112_pci_realize(PCIDevice *dev, Error **errp) qdev_init_gpio_in(ds, sii3112_set_irq, 2); for (i = 0; i < 2; i++) { ide_bus_init(&s->bus[i], sizeof(s->bus[i]), ds, i, 1); - ide_init2(&s->bus[i], qdev_get_gpio_in(ds, i)); + ide_bus_init_output_irq(&s->bus[i], qdev_get_gpio_in(ds, i)); bmdma_init(&s->bus[i], &s->bmdma[i], s); s->bmdma[i].bus = &s->bus[i]; - ide_register_restart_cb(&s->bus[i]); + ide_bus_register_restart_cb(&s->bus[i]); } } diff --git a/hw/ide/trace-events b/hw/ide/trace-events index 15d7921f15..57042cafdd 100644 --- a/hw/ide/trace-events +++ b/hw/ide/trace-events @@ -12,7 +12,7 @@ ide_data_writew(uint32_t addr, uint32_t val, void *bus, void *s) ide_data_readl(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (Data: Long); val 0x%08"PRIx32"; bus %p; IDEState %p" ide_data_writel(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO wr @ 0x%"PRIx32" (Data: Long); val 0x%08"PRIx32"; bus %p; IDEState %p" # misc -ide_exec_cmd(void *bus, void *state, uint32_t cmd) "IDE exec cmd: bus %p; state %p; cmd 0x%02x" +ide_bus_exec_cmd(void *bus, void *state, uint32_t cmd) "IDE exec cmd: bus %p; state %p; cmd 0x%02x" ide_cancel_dma_sync_buffered(void *fn, void *req) "invoking cb %p of buffered request %p with -ECANCELED" ide_cancel_dma_sync_remaining(void) "draining all remaining requests" ide_sector_read(int64_t sector_num, int nsectors) "sector=%"PRId64" nsectors=%d" @@ -91,6 +91,7 @@ ahci_populate_sglist_short_map(void *s, int port) "ahci(%p)[%d]: mapped less tha ahci_populate_sglist_bad_offset(void *s, int port, int off_idx, int64_t off_pos) "ahci(%p)[%d]: Incorrect offset! off_idx: %d, off_pos: %"PRId64 ncq_finish(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: NCQ transfer finished" execute_ncq_command_read(void *s, int port, uint8_t tag, int count, int64_t lba) "ahci(%p)[%d][tag:%d]: NCQ reading %d sectors from LBA %"PRId64 +execute_ncq_command_write(void *s, int port, uint8_t tag, int count, int64_t lba) "ahci(%p)[%d][tag:%d]: NCQ writing %d sectors to LBA %"PRId64 execute_ncq_command_unsup(void *s, int port, uint8_t tag, uint8_t cmd) "ahci(%p)[%d][tag:%d]: error: unsupported NCQ command (0x%02x) received" process_ncq_command_mismatch(void *s, int port, uint8_t tag, uint8_t slot) "ahci(%p)[%d][tag:%d]: Warning: NCQ slot (%d) did not match the given tag" process_ncq_command_aux(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Attempt to use NCQ auxiliary fields" diff --git a/hw/ide/via.c b/hw/ide/via.c index e1a429405d..177baea9a7 100644 --- a/hw/ide/via.c +++ b/hw/ide/via.c @@ -90,7 +90,7 @@ static void bmdma_setup_bar(PCIIDEState *d) int i; memory_region_init(&d->bmdma_bar, OBJECT(d), "via-bmdma-container", 16); - for(i = 0;i < 2; i++) { + for (i = 0; i < ARRAY_SIZE(d->bmdma); i++) { BMDMAState *bm = &d->bmdma[i]; memory_region_init_io(&bm->extra_io, OBJECT(d), &via_bmdma_ops, bm, @@ -122,7 +122,7 @@ static void via_ide_reset(DeviceState *dev) uint8_t *pci_conf = pd->config; int i; - for (i = 0; i < 2; i++) { + for (i = 0; i < ARRAY_SIZE(d->bus); i++) { ide_bus_reset(&d->bus[i]); } @@ -188,14 +188,14 @@ static void via_ide_realize(PCIDevice *dev, Error **errp) bmdma_setup_bar(d); pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar); - qdev_init_gpio_in(ds, via_ide_set_irq, 2); - for (i = 0; i < 2; i++) { - ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); - ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i)); + qdev_init_gpio_in(ds, via_ide_set_irq, ARRAY_SIZE(d->bus)); + for (i = 0; i < ARRAY_SIZE(d->bus); i++) { + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, MAX_IDE_DEVS); + ide_bus_init_output_irq(&d->bus[i], qdev_get_gpio_in(ds, i)); bmdma_init(&d->bus[i], &d->bmdma[i], d); d->bmdma[i].bus = &d->bus[i]; - ide_register_restart_cb(&d->bus[i]); + ide_bus_register_restart_cb(&d->bus[i]); } } @@ -204,7 +204,7 @@ static void via_ide_exitfn(PCIDevice *dev) PCIIDEState *d = PCI_IDE(dev); unsigned i; - for (i = 0; i < 2; ++i) { + for (i = 0; i < ARRAY_SIZE(d->bmdma); ++i) { memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io); memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport); } diff --git a/hw/input/ads7846.c b/hw/input/ads7846.c index 1d4e04a2dc..dc0998ac79 100644 --- a/hw/input/ads7846.c +++ b/hw/input/ads7846.c @@ -34,28 +34,28 @@ struct ADS7846State { OBJECT_DECLARE_SIMPLE_TYPE(ADS7846State, ADS7846) /* Control-byte bitfields */ -#define CB_PD0 (1 << 0) -#define CB_PD1 (1 << 1) -#define CB_SER (1 << 2) -#define CB_MODE (1 << 3) -#define CB_A0 (1 << 4) -#define CB_A1 (1 << 5) -#define CB_A2 (1 << 6) -#define CB_START (1 << 7) +#define CB_PD0 (1 << 0) +#define CB_PD1 (1 << 1) +#define CB_SER (1 << 2) +#define CB_MODE (1 << 3) +#define CB_A0 (1 << 4) +#define CB_A1 (1 << 5) +#define CB_A2 (1 << 6) +#define CB_START (1 << 7) -#define X_AXIS_DMAX 3470 -#define X_AXIS_MIN 290 -#define Y_AXIS_DMAX 3450 -#define Y_AXIS_MIN 200 +#define X_AXIS_DMAX 3470 +#define X_AXIS_MIN 290 +#define Y_AXIS_DMAX 3450 +#define Y_AXIS_MIN 200 -#define ADS_VBAT 2000 -#define ADS_VAUX 2000 -#define ADS_TEMP0 2000 -#define ADS_TEMP1 3000 -#define ADS_XPOS(x, y) (X_AXIS_MIN + ((X_AXIS_DMAX * (x)) >> 15)) -#define ADS_YPOS(x, y) (Y_AXIS_MIN + ((Y_AXIS_DMAX * (y)) >> 15)) -#define ADS_Z1POS(x, y) 600 -#define ADS_Z2POS(x, y) (600 + 6000 / ADS_XPOS(x, y)) +#define ADS_VBAT 2000 +#define ADS_VAUX 2000 +#define ADS_TEMP0 2000 +#define ADS_TEMP1 3000 +#define ADS_XPOS(x, y) (X_AXIS_MIN + ((X_AXIS_DMAX * (x)) >> 15)) +#define ADS_YPOS(x, y) (Y_AXIS_MIN + ((Y_AXIS_DMAX * (y)) >> 15)) +#define ADS_Z1POS(x, y) 600 +#define ADS_Z2POS(x, y) (600 + 6000 / ADS_XPOS(x, y)) static void ads7846_int_update(ADS7846State *s) { @@ -86,7 +86,7 @@ static uint32_t ads7846_transfer(SSIPeripheral *dev, uint32_t value) } if (value & CB_MODE) - s->output >>= 4; /* 8 bits instead of 12 */ + s->output >>= 4; /* 8 bits instead of 12 */ break; case 1: @@ -147,10 +147,10 @@ static void ads7846_realize(SSIPeripheral *d, Error **errp) qdev_init_gpio_out(dev, &s->interrupt, 1); - s->input[0] = ADS_TEMP0; /* TEMP0 */ - s->input[2] = ADS_VBAT; /* VBAT */ - s->input[6] = ADS_VAUX; /* VAUX */ - s->input[7] = ADS_TEMP1; /* TEMP1 */ + s->input[0] = ADS_TEMP0; /* TEMP0 */ + s->input[2] = ADS_VBAT; /* VBAT */ + s->input[6] = ADS_VAUX; /* VAUX */ + s->input[7] = ADS_TEMP1; /* TEMP1 */ /* We want absolute coordinates */ qemu_add_mouse_event_handler(ads7846_ts_event, s, 1, diff --git a/hw/input/ps2.c b/hw/input/ps2.c index 3253ab6a92..45af76a837 100644 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@ -402,6 +402,9 @@ static void ps2_keyboard_event(DeviceState *dev, QemuConsole *src, ps2_put_keycode(s, 0xaa); } } + } else if ((qcode == Q_KEY_CODE_LANG1 || qcode == Q_KEY_CODE_LANG2) + && !key->down) { + /* Ignore release for these keys */ } else { if (qcode < qemu_input_map_qcode_to_atset1_len) { keycode = qemu_input_map_qcode_to_atset1[qcode]; @@ -497,6 +500,9 @@ static void ps2_keyboard_event(DeviceState *dev, QemuConsole *src, ps2_put_keycode(s, 0x12); } } + } else if ((qcode == Q_KEY_CODE_LANG1 || qcode == Q_KEY_CODE_LANG2) && + !key->down) { + /* Ignore release for these keys */ } else { if (qcode < qemu_input_map_qcode_to_atset2_len) { keycode = qemu_input_map_qcode_to_atset2[qcode]; diff --git a/hw/input/virtio-input-hid.c b/hw/input/virtio-input-hid.c index a7a244a95d..7053ad72d4 100644 --- a/hw/input/virtio-input-hid.c +++ b/hw/input/virtio-input-hid.c @@ -16,9 +16,10 @@ #include "standard-headers/linux/input.h" -#define VIRTIO_ID_NAME_KEYBOARD "QEMU Virtio Keyboard" -#define VIRTIO_ID_NAME_MOUSE "QEMU Virtio Mouse" -#define VIRTIO_ID_NAME_TABLET "QEMU Virtio Tablet" +#define VIRTIO_ID_NAME_KEYBOARD "QEMU Virtio Keyboard" +#define VIRTIO_ID_NAME_MOUSE "QEMU Virtio Mouse" +#define VIRTIO_ID_NAME_TABLET "QEMU Virtio Tablet" +#define VIRTIO_ID_NAME_MULTITOUCH "QEMU Virtio MultiTouch" /* ----------------------------------------------------------------- */ @@ -30,6 +31,7 @@ static const unsigned short keymap_button[INPUT_BUTTON__MAX] = { [INPUT_BUTTON_WHEEL_DOWN] = BTN_GEAR_DOWN, [INPUT_BUTTON_SIDE] = BTN_SIDE, [INPUT_BUTTON_EXTRA] = BTN_EXTRA, + [INPUT_BUTTON_TOUCH] = BTN_TOUCH, }; static const unsigned short axismap_rel[INPUT_AXIS__MAX] = { @@ -42,32 +44,38 @@ static const unsigned short axismap_abs[INPUT_AXIS__MAX] = { [INPUT_AXIS_Y] = ABS_Y, }; +static const unsigned short axismap_tch[INPUT_AXIS__MAX] = { + [INPUT_AXIS_X] = ABS_MT_POSITION_X, + [INPUT_AXIS_Y] = ABS_MT_POSITION_Y, +}; + /* ----------------------------------------------------------------- */ -static void virtio_input_key_config(VirtIOInput *vinput, - const unsigned short *keymap, - size_t mapsize) +static void virtio_input_extend_config(VirtIOInput *vinput, + const unsigned short *map, + size_t mapsize, + uint8_t select, uint8_t subsel) { - virtio_input_config keys; + virtio_input_config ext; int i, bit, byte, bmax = 0; - memset(&keys, 0, sizeof(keys)); + memset(&ext, 0, sizeof(ext)); for (i = 0; i < mapsize; i++) { - bit = keymap[i]; + bit = map[i]; if (!bit) { continue; } byte = bit / 8; bit = bit % 8; - keys.u.bitmap[byte] |= (1 << bit); + ext.u.bitmap[byte] |= (1 << bit); if (bmax < byte+1) { bmax = byte+1; } } - keys.select = VIRTIO_INPUT_CFG_EV_BITS; - keys.subsel = EV_KEY; - keys.size = bmax; - virtio_input_add_config(vinput, &keys); + ext.select = select; + ext.subsel = subsel; + ext.size = bmax; + virtio_input_add_config(vinput, &ext); } static void virtio_input_handle_event(DeviceState *dev, QemuConsole *src, @@ -80,6 +88,7 @@ static void virtio_input_handle_event(DeviceState *dev, QemuConsole *src, InputKeyEvent *key; InputMoveEvent *move; InputBtnEvent *btn; + InputMultiTouchEvent *mtt; switch (evt->type) { case INPUT_EVENT_KIND_KEY: @@ -136,6 +145,24 @@ static void virtio_input_handle_event(DeviceState *dev, QemuConsole *src, event.value = cpu_to_le32(move->value); virtio_input_send(vinput, &event); break; + case INPUT_EVENT_KIND_MTT: + mtt = evt->u.mtt.data; + if (mtt->type == INPUT_MULTI_TOUCH_TYPE_DATA) { + event.type = cpu_to_le16(EV_ABS); + event.code = cpu_to_le16(axismap_tch[mtt->axis]); + event.value = cpu_to_le32(mtt->value); + virtio_input_send(vinput, &event); + } else { + event.type = cpu_to_le16(EV_ABS); + event.code = cpu_to_le16(ABS_MT_SLOT); + event.value = cpu_to_le32(mtt->slot); + virtio_input_send(vinput, &event); + event.type = cpu_to_le16(EV_ABS); + event.code = cpu_to_le16(ABS_MT_TRACKING_ID); + event.value = cpu_to_le32(mtt->tracking_id); + virtio_input_send(vinput, &event); + } + break; default: /* keep gcc happy */ break; @@ -281,8 +308,9 @@ static void virtio_keyboard_init(Object *obj) vhid->handler = &virtio_keyboard_handler; virtio_input_init_config(vinput, virtio_keyboard_config); - virtio_input_key_config(vinput, qemu_input_map_qcode_to_linux, - qemu_input_map_qcode_to_linux_len); + virtio_input_extend_config(vinput, qemu_input_map_qcode_to_linux, + qemu_input_map_qcode_to_linux_len, + VIRTIO_INPUT_CFG_EV_BITS, EV_KEY); } static const TypeInfo virtio_keyboard_info = { @@ -373,8 +401,9 @@ static void virtio_mouse_init(Object *obj) virtio_input_init_config(vinput, vhid->wheel_axis ? virtio_mouse_config_v2 : virtio_mouse_config_v1); - virtio_input_key_config(vinput, keymap_button, - ARRAY_SIZE(keymap_button)); + virtio_input_extend_config(vinput, keymap_button, + ARRAY_SIZE(keymap_button), + VIRTIO_INPUT_CFG_EV_BITS, EV_KEY); } static const TypeInfo virtio_mouse_info = { @@ -497,8 +526,9 @@ static void virtio_tablet_init(Object *obj) virtio_input_init_config(vinput, vhid->wheel_axis ? virtio_tablet_config_v2 : virtio_tablet_config_v1); - virtio_input_key_config(vinput, keymap_button, - ARRAY_SIZE(keymap_button)); + virtio_input_extend_config(vinput, keymap_button, + ARRAY_SIZE(keymap_button), + VIRTIO_INPUT_CFG_EV_BITS, EV_KEY); } static const TypeInfo virtio_tablet_info = { @@ -511,12 +541,98 @@ static const TypeInfo virtio_tablet_info = { /* ----------------------------------------------------------------- */ +static QemuInputHandler virtio_multitouch_handler = { + .name = VIRTIO_ID_NAME_MULTITOUCH, + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_MTT, + .event = virtio_input_handle_event, + .sync = virtio_input_handle_sync, +}; + +static struct virtio_input_config virtio_multitouch_config[] = { + { + .select = VIRTIO_INPUT_CFG_ID_NAME, + .size = sizeof(VIRTIO_ID_NAME_MULTITOUCH), + .u.string = VIRTIO_ID_NAME_MULTITOUCH, + },{ + .select = VIRTIO_INPUT_CFG_ID_DEVIDS, + .size = sizeof(struct virtio_input_devids), + .u.ids = { + .bustype = const_le16(BUS_VIRTUAL), + .vendor = const_le16(0x0627), /* same we use for usb hid devices */ + .product = const_le16(0x0003), + .version = const_le16(0x0001), + }, + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_MT_SLOT, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_SLOTS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_SLOTS_MAX), + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_MT_TRACKING_ID, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_SLOTS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_SLOTS_MAX), + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_MT_POSITION_X, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_ABS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_ABS_MAX), + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_MT_POSITION_Y, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_ABS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_ABS_MAX), + }, + { /* end of list */ }, +}; + +static void virtio_multitouch_init(Object *obj) +{ + VirtIOInputHID *vhid = VIRTIO_INPUT_HID(obj); + VirtIOInput *vinput = VIRTIO_INPUT(obj); + unsigned short abs_props[] = { + INPUT_PROP_DIRECT, + }; + unsigned short abs_bits[] = { + ABS_MT_SLOT, + ABS_MT_TRACKING_ID, + ABS_MT_POSITION_X, + ABS_MT_POSITION_Y, + }; + + vhid->handler = &virtio_multitouch_handler; + virtio_input_init_config(vinput, virtio_multitouch_config); + virtio_input_extend_config(vinput, keymap_button, + ARRAY_SIZE(keymap_button), + VIRTIO_INPUT_CFG_EV_BITS, EV_KEY); + virtio_input_extend_config(vinput, abs_props, + ARRAY_SIZE(abs_props), + VIRTIO_INPUT_CFG_PROP_BITS, 0); + virtio_input_extend_config(vinput, abs_bits, + ARRAY_SIZE(abs_bits), + VIRTIO_INPUT_CFG_EV_BITS, EV_ABS); +} + +static const TypeInfo virtio_multitouch_info = { + .name = TYPE_VIRTIO_MULTITOUCH, + .parent = TYPE_VIRTIO_INPUT_HID, + .instance_size = sizeof(VirtIOInputHID), + .instance_init = virtio_multitouch_init, +}; + +/* ----------------------------------------------------------------- */ + static void virtio_register_types(void) { type_register_static(&virtio_input_hid_info); type_register_static(&virtio_keyboard_info); type_register_static(&virtio_mouse_info); type_register_static(&virtio_tablet_info); + type_register_static(&virtio_multitouch_info); } type_init(virtio_register_types) diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c index 8cca124807..4875e68ba6 100644 --- a/hw/intc/allwinner-a10-pic.c +++ b/hw/intc/allwinner-a10-pic.c @@ -49,12 +49,9 @@ static void aw_a10_pic_update(AwA10PICState *s) static void aw_a10_pic_set_irq(void *opaque, int irq, int level) { AwA10PICState *s = opaque; + uint32_t *pending_reg = &s->irq_pending[irq / 32]; - if (level) { - set_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); - } else { - clear_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); - } + *pending_reg = deposit32(*pending_reg, irq % 32, 1, level); aw_a10_pic_update(s); } diff --git a/hw/intc/apic.c b/hw/intc/apic.c index 2d3e55f4e2..ac3d47d231 100644 --- a/hw/intc/apic.c +++ b/hw/intc/apic.c @@ -18,9 +18,10 @@ */ #include "qemu/osdep.h" #include "qemu/thread.h" +#include "qemu/error-report.h" #include "hw/i386/apic_internal.h" #include "hw/i386/apic.h" -#include "hw/i386/ioapic.h" +#include "hw/intc/ioapic.h" #include "hw/intc/i8259.h" #include "hw/intc/kvm_irqcount.h" #include "hw/pci/msi.h" @@ -884,6 +885,13 @@ static void apic_realize(DeviceState *dev, Error **errp) memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", APIC_SPACE_SIZE); + /* + * apic-msi's apic_mem_write can call into ioapic_eoi_broadcast, which can + * write back to apic-msi. As such mark the apic-msi region re-entrancy + * safe. + */ + s->io_memory.disable_reentrancy_guard = true; + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); local_apics[s->id] = s; diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c index 0261f087b2..bbae2d87f4 100644 --- a/hw/intc/i8259.c +++ b/hw/intc/i8259.c @@ -133,7 +133,7 @@ static void pic_set_irq(void *opaque, int irq, int level) } #endif - if (s->elcr & mask) { + if (s->ltim || (s->elcr & mask)) { /* level triggered */ if (level) { s->irr |= mask; @@ -167,7 +167,7 @@ static void pic_intack(PICCommonState *s, int irq) s->isr |= (1 << irq); } /* We don't clear a level sensitive interrupt here */ - if (!(s->elcr & (1 << irq))) { + if (!s->ltim && !(s->elcr & (1 << irq))) { s->irr &= ~(1 << irq); } pic_update_irq(s); @@ -224,6 +224,7 @@ static void pic_reset(DeviceState *dev) PICCommonState *s = PIC_COMMON(dev); s->elcr = 0; + s->ltim = 0; pic_init_reset(s); } @@ -243,10 +244,7 @@ static void pic_ioport_write(void *opaque, hwaddr addr64, s->init_state = 1; s->init4 = val & 1; s->single_mode = val & 2; - if (val & 0x08) { - qemu_log_mask(LOG_UNIMP, - "i8259: level sensitive irq not supported\n"); - } + s->ltim = val & 8; } else if (val & 0x08) { if (val & 0x04) { s->poll = 1; @@ -406,7 +404,7 @@ static void pic_realize(DeviceState *dev, Error **errp) pc->parent_realize(dev, errp); } -qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq) +qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq_in) { qemu_irq *irq_set; DeviceState *dev; @@ -418,7 +416,7 @@ qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq) isadev = i8259_init_chip(TYPE_I8259, bus, true); dev = DEVICE(isadev); - qdev_connect_gpio_out(dev, 0, parent_irq); + qdev_connect_gpio_out(dev, 0, parent_irq_in); for (i = 0 ; i < 8; i++) { irq_set[i] = qdev_get_gpio_in(dev, i); } diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c index af2e4a2241..c931dc2d07 100644 --- a/hw/intc/i8259_common.c +++ b/hw/intc/i8259_common.c @@ -51,7 +51,7 @@ void pic_reset_common(PICCommonState *s) s->special_fully_nested_mode = 0; s->init4 = 0; s->single_mode = 0; - /* Note: ELCR is not reset */ + /* Note: ELCR and LTIM are not reset */ } static int pic_dispatch_pre_save(void *opaque) @@ -144,6 +144,24 @@ static void pic_print_info(InterruptStatsProvider *obj, Monitor *mon) s->special_fully_nested_mode); } +static bool ltim_state_needed(void *opaque) +{ + PICCommonState *s = PIC_COMMON(opaque); + + return !!s->ltim; +} + +static const VMStateDescription vmstate_pic_ltim = { + .name = "i8259/ltim", + .version_id = 1, + .minimum_version_id = 1, + .needed = ltim_state_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(ltim, PICCommonState), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_pic_common = { .name = "i8259", .version_id = 1, @@ -168,6 +186,10 @@ static const VMStateDescription vmstate_pic_common = { VMSTATE_UINT8(single_mode, PICCommonState), VMSTATE_UINT8(elcr, PICCommonState), VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_pic_ltim, + NULL } }; diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c index 264262959d..716ffc8bbb 100644 --- a/hw/intc/ioapic.c +++ b/hw/intc/ioapic.c @@ -24,10 +24,10 @@ #include "qapi/error.h" #include "monitor/monitor.h" #include "hw/i386/apic.h" -#include "hw/i386/ioapic.h" -#include "hw/i386/ioapic_internal.h" #include "hw/i386/x86.h" #include "hw/intc/i8259.h" +#include "hw/intc/ioapic.h" +#include "hw/intc/ioapic_internal.h" #include "hw/pci/msi.h" #include "hw/qdev-properties.h" #include "sysemu/kvm.h" @@ -405,6 +405,7 @@ ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val, s->ioredtbl[index] |= ro_bits; s->irq_eoi[index] = 0; ioapic_fix_edge_remote_irr(&s->ioredtbl[index]); + ioapic_update_kvm_routes(s); ioapic_service(s); } } @@ -417,8 +418,6 @@ ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val, ioapic_eoi_broadcast(val); break; } - - ioapic_update_kvm_routes(s); } static const MemoryRegionOps ioapic_io_ops = { diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c index aa5f760871..b05f436dac 100644 --- a/hw/intc/ioapic_common.c +++ b/hw/intc/ioapic_common.c @@ -24,9 +24,9 @@ #include "qemu/module.h" #include "migration/vmstate.h" #include "monitor/monitor.h" -#include "hw/i386/ioapic.h" -#include "hw/i386/ioapic_internal.h" #include "hw/intc/intc.h" +#include "hw/intc/ioapic.h" +#include "hw/intc/ioapic_internal.h" #include "hw/sysbus.h" /* ioapic_no count start from 0 to MAX_IOAPICS, diff --git a/include/hw/i386/ioapic_internal.h b/hw/intc/ioapic_internal.h similarity index 96% rename from include/hw/i386/ioapic_internal.h rename to hw/intc/ioapic_internal.h index e8ff338d7f..37b8565539 100644 --- a/include/hw/i386/ioapic_internal.h +++ b/hw/intc/ioapic_internal.h @@ -19,11 +19,11 @@ * License along with this library; if not, see . */ -#ifndef QEMU_IOAPIC_INTERNAL_H -#define QEMU_IOAPIC_INTERNAL_H +#ifndef HW_INTC_IOAPIC_INTERNAL_H +#define HW_INTC_IOAPIC_INTERNAL_H #include "exec/memory.h" -#include "hw/i386/ioapic.h" +#include "hw/intc/ioapic.h" #include "hw/sysbus.h" #include "qemu/notify.h" #include "qom/object.h" @@ -115,4 +115,4 @@ void ioapic_reset_common(DeviceState *dev); void ioapic_stat_update_irq(IOAPICCommonState *s, int irq, int level); -#endif /* QEMU_IOAPIC_INTERNAL_H */ +#endif /* HW_INTC_IOAPIC_INTERNAL_H */ diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c index 4b8ec3f28a..0e7a3e32f3 100644 --- a/hw/intc/loongarch_extioi.c +++ b/hw/intc/loongarch_extioi.c @@ -254,7 +254,7 @@ static const VMStateDescription vmstate_loongarch_extioi = { .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOI, EXTIOI_IRQS_GROUP_COUNT), - VMSTATE_UINT32_2DARRAY(coreisr, LoongArchExtIOI, LOONGARCH_MAX_VCPUS, + VMSTATE_UINT32_2DARRAY(coreisr, LoongArchExtIOI, EXTIOI_CPUS, EXTIOI_IRQS_GROUP_COUNT), VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOI, EXTIOI_IRQS_NODETYPE_COUNT / 2), @@ -281,7 +281,7 @@ static void loongarch_extioi_instance_init(Object *obj) qdev_init_gpio_in(DEVICE(obj), extioi_setirq, EXTIOI_IRQS); - for (cpu = 0; cpu < LOONGARCH_MAX_VCPUS; cpu++) { + for (cpu = 0; cpu < EXTIOI_CPUS; cpu++) { memory_region_init_io(&s->extioi_iocsr_mem[cpu], OBJECT(s), &extioi_ops, s, "extioi_iocsr", 0x900); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->extioi_iocsr_mem[cpu]); diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c index aa4bf9eb74..d6ab91721e 100644 --- a/hw/intc/loongarch_ipi.c +++ b/hw/intc/loongarch_ipi.c @@ -50,7 +50,7 @@ static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size) return ret; } -static void send_ipi_data(CPULoongArchState *env, target_ulong val, target_ulong addr) +static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) { int i, mask = 0, data = 0; @@ -77,31 +77,42 @@ static void send_ipi_data(CPULoongArchState *env, target_ulong val, target_ulong static void ipi_send(uint64_t val) { - int cpuid, data; + uint32_t cpuid; + uint8_t vector; CPULoongArchState *env; CPUState *cs; LoongArchCPU *cpu; - cpuid = (val >> 16) & 0x3ff; + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_IPI_SEND", cpuid); + return; + } + /* IPI status vector */ - data = 1 << (val & 0x1f); + vector = extract8(val, 0, 5); + cs = qemu_get_cpu(cpuid); cpu = LOONGARCH_CPU(cs); env = &cpu->env; address_space_stl(&env->address_space_iocsr, 0x1008, - data, MEMTXATTRS_UNSPECIFIED, NULL); - + BIT(vector), MEMTXATTRS_UNSPECIFIED, NULL); } static void mail_send(uint64_t val) { - int cpuid; + uint32_t cpuid; hwaddr addr; CPULoongArchState *env; CPUState *cs; LoongArchCPU *cpu; - cpuid = (val >> 16) & 0x3ff; + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_MAIL_SEND", cpuid); + return; + } + addr = 0x1020 + (val & 0x1c); cs = qemu_get_cpu(cpuid); cpu = LOONGARCH_CPU(cs); @@ -111,14 +122,21 @@ static void mail_send(uint64_t val) static void any_send(uint64_t val) { - int cpuid; + uint32_t cpuid; hwaddr addr; CPULoongArchState *env; + CPUState *cs; + LoongArchCPU *cpu; + + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_ANY_SEND", cpuid); + return; + } - cpuid = (val >> 16) & 0x3ff; addr = val & 0xffff; - CPUState *cs = qemu_get_cpu(cpuid); - LoongArchCPU *cpu = LOONGARCH_CPU(cs); + cs = qemu_get_cpu(cpuid); + cpu = LOONGARCH_CPU(cs); env = &cpu->env; send_ipi_data(env, val, addr); } @@ -201,51 +219,43 @@ static const MemoryRegionOps loongarch_ipi64_ops = { static void loongarch_ipi_init(Object *obj) { - int cpu; - LoongArchMachineState *lams; LoongArchIPI *s = LOONGARCH_IPI(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - Object *machine = qdev_get_machine(); - ObjectClass *mc = object_get_class(machine); - /* 'lams' should be initialized */ - if (!strcmp(MACHINE_CLASS(mc)->name, "none")) { - return; - } - lams = LOONGARCH_MACHINE(machine); - for (cpu = 0; cpu < MAX_IPI_CORE_NUM; cpu++) { - memory_region_init_io(&s->ipi_iocsr_mem[cpu], obj, &loongarch_ipi_ops, - &lams->ipi_core[cpu], "loongarch_ipi_iocsr", 0x48); - sysbus_init_mmio(sbd, &s->ipi_iocsr_mem[cpu]); - memory_region_init_io(&s->ipi64_iocsr_mem[cpu], obj, &loongarch_ipi64_ops, - &lams->ipi_core[cpu], "loongarch_ipi64_iocsr", 0x118); - sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem[cpu]); - qdev_init_gpio_out(DEVICE(obj), &lams->ipi_core[cpu].irq, 1); - } + memory_region_init_io(&s->ipi_iocsr_mem, obj, &loongarch_ipi_ops, + &s->ipi_core, "loongarch_ipi_iocsr", 0x48); + + /* loongarch_ipi_iocsr performs re-entrant IO through ipi_send */ + s->ipi_iocsr_mem.disable_reentrancy_guard = true; + + sysbus_init_mmio(sbd, &s->ipi_iocsr_mem); + + memory_region_init_io(&s->ipi64_iocsr_mem, obj, &loongarch_ipi64_ops, + &s->ipi_core, "loongarch_ipi64_iocsr", 0x118); + sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem); + qdev_init_gpio_out(DEVICE(obj), &s->ipi_core.irq, 1); } static const VMStateDescription vmstate_ipi_core = { .name = "ipi-single", - .version_id = 0, - .minimum_version_id = 0, + .version_id = 1, + .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(status, IPICore), VMSTATE_UINT32(en, IPICore), VMSTATE_UINT32(set, IPICore), VMSTATE_UINT32(clear, IPICore), - VMSTATE_UINT32_ARRAY(buf, IPICore, MAX_IPI_MBX_NUM * 2), + VMSTATE_UINT32_ARRAY(buf, IPICore, 2), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_loongarch_ipi = { .name = TYPE_LOONGARCH_IPI, - .version_id = 0, - .minimum_version_id = 0, + .version_id = 1, + .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_STRUCT_ARRAY(ipi_core, LoongArchMachineState, - MAX_IPI_CORE_NUM, 0, - vmstate_ipi_core, IPICore), + VMSTATE_STRUCT(ipi_core, LoongArchIPI, 0, vmstate_ipi_core, IPICore), VMSTATE_END_OF_LIST() } }; diff --git a/hw/intc/mips_gic.c b/hw/intc/mips_gic.c index bda4549925..4bdc3b1bd1 100644 --- a/hw/intc/mips_gic.c +++ b/hw/intc/mips_gic.c @@ -439,8 +439,8 @@ static void mips_gic_realize(DeviceState *dev, Error **errp) } static Property mips_gic_properties[] = { - DEFINE_PROP_INT32("num-vp", MIPSGICState, num_vps, 1), - DEFINE_PROP_INT32("num-irq", MIPSGICState, num_irq, 256), + DEFINE_PROP_UINT32("num-vp", MIPSGICState, num_vps, 1), + DEFINE_PROP_UINT32("num-irq", MIPSGICState, num_irq, 256), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c index eee04643cb..b466a6abaf 100644 --- a/hw/intc/riscv_aclint.c +++ b/hw/intc/riscv_aclint.c @@ -130,7 +130,7 @@ static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr, addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) { size_t hartid = mtimer->hartid_base + ((addr - mtimer->timecmp_base) >> 3); - CPUState *cpu = qemu_get_cpu(hartid); + CPUState *cpu = cpu_by_arch_id(hartid); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; if (!env) { qemu_log_mask(LOG_GUEST_ERROR, @@ -173,7 +173,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr, addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) { size_t hartid = mtimer->hartid_base + ((addr - mtimer->timecmp_base) >> 3); - CPUState *cpu = qemu_get_cpu(hartid); + CPUState *cpu = cpu_by_arch_id(hartid); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; if (!env) { qemu_log_mask(LOG_GUEST_ERROR, @@ -231,7 +231,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr, /* Check if timer interrupt is triggered for each hart. */ for (i = 0; i < mtimer->num_harts; i++) { - CPUState *cpu = qemu_get_cpu(mtimer->hartid_base + i); + CPUState *cpu = cpu_by_arch_id(mtimer->hartid_base + i); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; if (!env) { continue; @@ -292,7 +292,7 @@ static void riscv_aclint_mtimer_realize(DeviceState *dev, Error **errp) s->timecmp = g_new0(uint64_t, s->num_harts); /* Claim timer interrupt bits */ for (i = 0; i < s->num_harts; i++) { - RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i)); + RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(s->hartid_base + i)); if (riscv_cpu_claim_interrupts(cpu, MIP_MTIP) < 0) { error_report("MTIP already claimed"); exit(1); @@ -372,7 +372,7 @@ DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size, sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); for (i = 0; i < num_harts; i++) { - CPUState *cpu = qemu_get_cpu(hartid_base + i); + CPUState *cpu = cpu_by_arch_id(hartid_base + i); RISCVCPU *rvcpu = RISCV_CPU(cpu); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; riscv_aclint_mtimer_callback *cb = @@ -407,7 +407,7 @@ static uint64_t riscv_aclint_swi_read(void *opaque, hwaddr addr, if (addr < (swi->num_harts << 2)) { size_t hartid = swi->hartid_base + (addr >> 2); - CPUState *cpu = qemu_get_cpu(hartid); + CPUState *cpu = cpu_by_arch_id(hartid); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; if (!env) { qemu_log_mask(LOG_GUEST_ERROR, @@ -430,7 +430,7 @@ static void riscv_aclint_swi_write(void *opaque, hwaddr addr, uint64_t value, if (addr < (swi->num_harts << 2)) { size_t hartid = swi->hartid_base + (addr >> 2); - CPUState *cpu = qemu_get_cpu(hartid); + CPUState *cpu = cpu_by_arch_id(hartid); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; if (!env) { qemu_log_mask(LOG_GUEST_ERROR, @@ -545,7 +545,7 @@ DeviceState *riscv_aclint_swi_create(hwaddr addr, uint32_t hartid_base, sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); for (i = 0; i < num_harts; i++) { - CPUState *cpu = qemu_get_cpu(hartid_base + i); + CPUState *cpu = cpu_by_arch_id(hartid_base + i); RISCVCPU *rvcpu = RISCV_CPU(cpu); qdev_connect_gpio_out(dev, i, diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c index cfd007e629..afc5b54dbb 100644 --- a/hw/intc/riscv_aplic.c +++ b/hw/intc/riscv_aplic.c @@ -803,7 +803,7 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp) aplic->bitfield_words = (aplic->num_irqs + 31) >> 5; aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs); - aplic->state = g_new(uint32_t, aplic->num_irqs); + aplic->state = g_new0(uint32_t, aplic->num_irqs); aplic->target = g_new0(uint32_t, aplic->num_irqs); if (!aplic->msimode) { for (i = 0; i < aplic->num_irqs; i++) { @@ -833,7 +833,7 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp) /* Claim the CPU interrupt to be triggered by this APLIC */ for (i = 0; i < aplic->num_harts; i++) { - RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(aplic->hartid_base + i)); + RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i)); if (riscv_cpu_claim_interrupts(cpu, (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) { error_report("%s already claimed", @@ -966,7 +966,7 @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size, if (!msimode) { for (i = 0; i < num_harts; i++) { - CPUState *cpu = qemu_get_cpu(hartid_base + i); + CPUState *cpu = cpu_by_arch_id(hartid_base + i); qdev_connect_gpio_out_named(dev, NULL, i, qdev_get_gpio_in(DEVICE(cpu), diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c index 4d4d5b50ca..fea3385b51 100644 --- a/hw/intc/riscv_imsic.c +++ b/hw/intc/riscv_imsic.c @@ -316,8 +316,8 @@ static const MemoryRegionOps riscv_imsic_ops = { static void riscv_imsic_realize(DeviceState *dev, Error **errp) { RISCVIMSICState *imsic = RISCV_IMSIC(dev); - RISCVCPU *rcpu = RISCV_CPU(qemu_get_cpu(imsic->hartid)); - CPUState *cpu = qemu_get_cpu(imsic->hartid); + RISCVCPU *rcpu = RISCV_CPU(cpu_by_arch_id(imsic->hartid)); + CPUState *cpu = cpu_by_arch_id(imsic->hartid); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; imsic->num_eistate = imsic->num_pages * imsic->num_irqs; @@ -413,7 +413,7 @@ DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode, uint32_t num_pages, uint32_t num_ids) { DeviceState *dev = qdev_new(TYPE_RISCV_IMSIC); - CPUState *cpu = qemu_get_cpu(hartid); + CPUState *cpu = cpu_by_arch_id(hartid); uint32_t i; assert(!(addr & (IMSIC_MMIO_PAGE_SZ - 1))); diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 50cadfb996..5c6094c457 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -292,6 +292,7 @@ sh_intc_set(int id, int enable) "setting interrupt group %d to %d" # loongarch_ipi.c loongarch_ipi_read(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64 loongarch_ipi_write(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64 +loongarch_ipi_unsupported_cpuid(const char *s, uint32_t cpuid) "%s unsupported cpuid 0x%" PRIx32 # loongarch_pch_pic.c loongarch_pch_pic_irq_handler(int irq, int level) "irq %d level %d" diff --git a/hw/isa/Kconfig b/hw/isa/Kconfig index 0156a66889..c10cbc5fc1 100644 --- a/hw/isa/Kconfig +++ b/hw/isa/Kconfig @@ -35,6 +35,7 @@ config PIIX3 bool select I8257 select ISA_BUS + select MC146818RTC config PIIX4 bool @@ -79,3 +80,4 @@ config LPC_ICH9 select I8257 select ISA_BUS select ACPI_ICH9 + select MC146818RTC diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c index e3322e03bf..5432ab5065 100644 --- a/hw/isa/i82378.c +++ b/hw/isa/i82378.c @@ -32,8 +32,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(I82378State, I82378) struct I82378State { PCIDevice parent_obj; - qemu_irq out[2]; - qemu_irq *i8259; + qemu_irq cpu_intr; + qemu_irq *isa_irqs_in; MemoryRegion io; }; @@ -50,7 +50,7 @@ static const VMStateDescription vmstate_i82378 = { static void i82378_request_out0_irq(void *opaque, int irq, int level) { I82378State *s = opaque; - qemu_set_irq(s->out[0], level); + qemu_set_irq(s->cpu_intr, level); } static void i82378_request_pic_irq(void *opaque, int irq, int level) @@ -58,7 +58,7 @@ static void i82378_request_pic_irq(void *opaque, int irq, int level) DeviceState *dev = opaque; I82378State *s = I82378(dev); - qemu_set_irq(s->i8259[irq], level); + qemu_set_irq(s->isa_irqs_in[irq], level); } static void i82378_realize(PCIDevice *pci, Error **errp) @@ -94,9 +94,10 @@ static void i82378_realize(PCIDevice *pci, Error **errp) */ /* 2 82C59 (irq) */ - s->i8259 = i8259_init(isabus, - qemu_allocate_irq(i82378_request_out0_irq, s, 0)); - isa_bus_irqs(isabus, s->i8259); + s->isa_irqs_in = i8259_init(isabus, + qemu_allocate_irq(i82378_request_out0_irq, + s, 0)); + isa_bus_register_input_irqs(isabus, s->isa_irqs_in); /* 1 82C54 (pit) */ pit = i8254_pit_init(isabus, 0x40, 0, NULL); @@ -113,7 +114,7 @@ static void i82378_init(Object *obj) DeviceState *dev = DEVICE(obj); I82378State *s = I82378(obj); - qdev_init_gpio_out(dev, s->out, 1); + qdev_init_gpio_out(dev, &s->cpu_intr, 1); qdev_init_gpio_in(dev, i82378_request_pic_irq, 16); } diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c index f155b80010..a289eccfb1 100644 --- a/hw/isa/isa-bus.c +++ b/hw/isa/isa-bus.c @@ -67,13 +67,20 @@ ISABus *isa_bus_new(DeviceState *dev, MemoryRegion* address_space, return isabus; } -void isa_bus_irqs(ISABus *bus, qemu_irq *irqs) +void isa_bus_register_input_irqs(ISABus *bus, qemu_irq *irqs_in) { - bus->irqs = irqs; + bus->irqs_in = irqs_in; +} + +qemu_irq isa_bus_get_irq(ISABus *bus, unsigned irqnum) +{ + assert(irqnum < ISA_NUM_IRQS); + assert(bus->irqs_in); + return bus->irqs_in[irqnum]; } /* - * isa_get_irq() returns the corresponding qemu_irq entry for the i8259. + * isa_get_irq() returns the corresponding input qemu_irq entry for the i8259. * * This function is only for special cases such as the 'ferr', and * temporary use for normal devices until they are converted to qdev. @@ -81,14 +88,13 @@ void isa_bus_irqs(ISABus *bus, qemu_irq *irqs) qemu_irq isa_get_irq(ISADevice *dev, unsigned isairq) { assert(!dev || ISA_BUS(qdev_get_parent_bus(DEVICE(dev))) == isabus); - assert(isairq < ISA_NUM_IRQS); - return isabus->irqs[isairq]; + return isa_bus_get_irq(isabus, isairq); } void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, unsigned isairq) { - qemu_irq irq = isa_get_irq(isadev, isairq); - qdev_connect_gpio_out(DEVICE(isadev), gpioirq, irq); + qemu_irq input_irq = isa_get_irq(isadev, isairq); + qdev_connect_gpio_out(DEVICE(isadev), gpioirq, input_irq); } void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16) @@ -99,7 +105,7 @@ void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16) bus->dma[1] = dma16; } -IsaDma *isa_get_dma(ISABus *bus, int nchan) +IsaDma *isa_bus_get_dma(ISABus *bus, int nchan) { assert(bus); return bus->dma[nchan > 3 ? 1 : 0]; @@ -114,7 +120,7 @@ static inline void isa_init_ioport(ISADevice *dev, uint16_t ioport) void isa_register_ioport(ISADevice *dev, MemoryRegion *io, uint16_t start) { - memory_region_add_subregion(isabus->address_space_io, start, io); + memory_region_add_subregion(isa_address_space_io(dev), start, io); isa_init_ioport(dev, start); } @@ -135,7 +141,7 @@ int isa_register_portio_list(ISADevice *dev, isa_init_ioport(dev, start); portio_list_init(piolist, OBJECT(dev), pio_start, opaque, name); - portio_list_add(piolist, isabus->address_space_io, start); + portio_list_add(piolist, isa_address_space_io(dev), start); return 0; } @@ -164,6 +170,11 @@ bool isa_realize_and_unref(ISADevice *dev, ISABus *bus, Error **errp) return qdev_realize_and_unref(&dev->parent_obj, &bus->parent_obj, errp); } +ISABus *isa_bus_from_device(ISADevice *dev) +{ + return ISA_BUS(qdev_get_parent_bus(DEVICE(dev))); +} + ISADevice *isa_vga_init(ISABus *bus) { vga_interface_created = true; @@ -213,7 +224,6 @@ static const TypeInfo isa_device_type_info = { .parent = TYPE_DEVICE, .instance_size = sizeof(ISADevice), .abstract = true, - .class_size = sizeof(ISADeviceClass), .class_init = isa_device_class_init, }; diff --git a/hw/isa/isa-superio.c b/hw/isa/isa-superio.c index c81bfe58ef..9292ec3bcf 100644 --- a/hw/isa/isa-superio.c +++ b/hw/isa/isa-superio.c @@ -16,6 +16,7 @@ #include "qapi/error.h" #include "sysemu/blockdev.h" #include "chardev/char.h" +#include "hw/char/parallel.h" #include "hw/block/fdc.h" #include "hw/isa/superio.h" #include "hw/qdev-properties.h" @@ -51,7 +52,7 @@ static void isa_superio_realize(DeviceState *dev, Error **errp) } else { name = g_strdup_printf("parallel%d", i); } - isa = isa_new("isa-parallel"); + isa = isa_new(TYPE_ISA_PARALLEL); d = DEVICE(isa); qdev_prop_set_uint32(d, "index", i); if (k->parallel.get_iobase) { diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c index 1fba3c210c..9c47a2f6c7 100644 --- a/hw/isa/lpc_ich9.c +++ b/hw/isa/lpc_ich9.c @@ -40,8 +40,8 @@ #include "hw/irq.h" #include "hw/isa/apm.h" #include "hw/pci/pci.h" -#include "hw/pci/pci_bridge.h" -#include "hw/i386/ich9.h" +#include "hw/southbridge/ich9.h" +#include "hw/i386/pc.h" #include "hw/acpi/acpi.h" #include "hw/acpi/ich9.h" #include "hw/pci/pci_bus.h" @@ -57,8 +57,6 @@ /*****************************************************************************/ /* ICH9 LPC PCI to ISA bridge */ -static void ich9_lpc_reset(DeviceState *qdev); - /* chipset configuration register * to access chipset configuration registers, pci_[sg]et_{byte, word, long} * are used. @@ -259,7 +257,7 @@ static void ich9_lpc_update_apic(ICH9LPCState *lpc, int gsi) qemu_set_irq(lpc->gsi[gsi], level); } -void ich9_lpc_set_irq(void *opaque, int pirq, int level) +static void ich9_lpc_set_irq(void *opaque, int pirq, int level) { ICH9LPCState *lpc = opaque; int pic_irq, pic_dis; @@ -275,7 +273,7 @@ void ich9_lpc_set_irq(void *opaque, int pirq, int level) /* return the pirq number (PIRQ[A-H]:0-7) corresponding to * a given device irq pin. */ -int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx) +static int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx) { BusState *bus = qdev_get_parent_bus(&pci_dev->qdev); PCIBus *pci_bus = PCI_BUS(bus); @@ -286,7 +284,7 @@ int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx) return lpc->irr[PCI_SLOT(pci_dev->devfn)][intx]; } -PCIINTxRoute ich9_route_intx_pin_to_irq(void *opaque, int pirq_pin) +static PCIINTxRoute ich9_route_intx_pin_to_irq(void *opaque, int pirq_pin) { ICH9LPCState *lpc = opaque; PCIINTxRoute route; @@ -407,14 +405,13 @@ static void smi_features_ok_callback(void *opaque) lpc->smi_features_ok = 1; } -void ich9_lpc_pm_init(PCIDevice *lpc_pci, bool smm_enabled) +static void ich9_lpc_pm_init(ICH9LPCState *lpc) { - ICH9LPCState *lpc = ICH9_LPC_DEVICE(lpc_pci); qemu_irq sci_irq; FWCfgState *fw_cfg = fw_cfg_find(); sci_irq = qemu_allocate_irq(ich9_set_sci, lpc, 0); - ich9_pm_init(lpc_pci, &lpc->pm, smm_enabled, sci_irq); + ich9_pm_init(PCI_DEVICE(lpc), &lpc->pm, sci_irq); if (lpc->smi_host_features && fw_cfg) { uint64_t host_features_le; @@ -440,8 +437,6 @@ void ich9_lpc_pm_init(PCIDevice *lpc_pci, bool smm_enabled) sizeof lpc->smi_features_ok, true); } - - ich9_lpc_reset(DEVICE(lpc)); } /* APM */ @@ -663,6 +658,8 @@ static void ich9_lpc_initfn(Object *obj) static const uint8_t acpi_enable_cmd = ICH9_APM_ACPI_ENABLE; static const uint8_t acpi_disable_cmd = ICH9_APM_ACPI_DISABLE; + object_initialize_child(obj, "rtc", &lpc->rtc, TYPE_MC146818_RTC); + object_property_add_uint8_ptr(obj, ACPI_PM_PROP_SCI_INT, &lpc->sci_gsi, OBJ_PROP_FLAG_READ); object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_ENABLE_CMD, @@ -680,6 +677,7 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp) { ICH9LPCState *lpc = ICH9_LPC_DEVICE(d); DeviceState *dev = DEVICE(d); + PCIBus *pci_bus = pci_get_bus(d); ISABus *isa_bus; if ((lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT)) && @@ -709,8 +707,6 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp) memory_region_init_io(&lpc->rcrb_mem, OBJECT(d), &rcrb_mmio_ops, lpc, "lpc-rcrb-mmio", ICH9_CC_SIZE); - lpc->isa_bus = isa_bus; - ich9_cc_init(lpc); apm_init(d, &lpc->apm, ich9_apm_ctrl_changed, lpc); @@ -723,11 +719,23 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp) ICH9_RST_CNT_IOPORT, &lpc->rst_cnt_mem, 1); - qdev_init_gpio_out_named(dev, lpc->gsi, ICH9_GPIO_GSI, GSI_NUM_PINS); + qdev_init_gpio_out_named(dev, lpc->gsi, ICH9_GPIO_GSI, IOAPIC_NUM_PINS); - isa_bus_irqs(isa_bus, lpc->gsi); + isa_bus_register_input_irqs(isa_bus, lpc->gsi); i8257_dma_init(isa_bus, 0); + + /* RTC */ + qdev_prop_set_int32(DEVICE(&lpc->rtc), "base_year", 2000); + if (!qdev_realize(DEVICE(&lpc->rtc), BUS(isa_bus), errp)) { + return; + } + + pci_bus_irqs(pci_bus, ich9_lpc_set_irq, d, ICH9_LPC_NB_PIRQS); + pci_bus_map_irqs(pci_bus, ich9_lpc_map_irq); + pci_bus_set_route_irq_fn(pci_bus, ich9_route_intx_pin_to_irq); + + ich9_lpc_pm_init(lpc); } static bool ich9_rst_cnt_needed(void *opaque) @@ -794,6 +802,7 @@ static const VMStateDescription vmstate_ich9_lpc = { static Property ich9_lpc_properties[] = { DEFINE_PROP_BOOL("noreboot", ICH9LPCState, pin_strap.spkr_hi, false), DEFINE_PROP_BOOL("smm-compat", ICH9LPCState, pm.smm_compat, false), + DEFINE_PROP_BOOL("smm-enabled", ICH9LPCState, pm.smm_enabled, false), DEFINE_PROP_BIT64("x-smi-broadcast", ICH9LPCState, smi_host_features, ICH9_LPC_SMI_F_BROADCAST_BIT, true), DEFINE_PROP_BIT64("x-smi-cpu-hotplug", ICH9LPCState, smi_host_features, @@ -813,8 +822,7 @@ static void ich9_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev) static void build_ich9_isa_aml(AcpiDevAmlIf *adev, Aml *scope) { Aml *field; - ICH9LPCState *s = ICH9_LPC_DEVICE(adev); - BusState *bus = BUS(s->isa_bus); + BusState *bus = qdev_get_child_bus(DEVICE(adev), "isa.0"); Aml *sb_scope = aml_scope("\\_SB"); /* ICH9 PCI to ISA irq remapping */ @@ -865,6 +873,7 @@ static void ich9_lpc_class_init(ObjectClass *klass, void *data) hc->plug = ich9_pm_device_plug_cb; hc->unplug_request = ich9_pm_device_unplug_request_cb; hc->unplug = ich9_pm_device_unplug_cb; + hc->is_hotpluggable_bus = ich9_pm_is_hotpluggable_bus; adevc->ospm_status = ich9_pm_ospm_status; adevc->send_event = ich9_send_gpe; adevc->madt_cpu = pc_madt_cpu_entry; diff --git a/hw/isa/piix3.c b/hw/isa/piix3.c index a9cb39bf21..f9103ea45a 100644 --- a/hw/isa/piix3.c +++ b/hw/isa/piix3.c @@ -28,6 +28,7 @@ #include "hw/dma/i8257.h" #include "hw/southbridge/piix.h" #include "hw/irq.h" +#include "hw/qdev-properties.h" #include "hw/isa/isa.h" #include "hw/xen/xen.h" #include "sysemu/runstate.h" @@ -301,6 +302,12 @@ static void pci_piix3_realize(PCIDevice *dev, Error **errp) PIIX_RCR_IOPORT, &d->rcr_mem, 1); i8257_dma_init(isa_bus, 0); + + /* RTC */ + qdev_prop_set_int32(DEVICE(&d->rtc), "base_year", 2000); + if (!qdev_realize(DEVICE(&d->rtc), BUS(isa_bus), errp)) { + return; + } } static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope) @@ -324,6 +331,13 @@ static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope) qbus_build_aml(bus, scope); } +static void pci_piix3_init(Object *obj) +{ + PIIX3State *d = PIIX3_PCI_DEVICE(obj); + + object_initialize_child(obj, "rtc", &d->rtc, TYPE_MC146818_RTC); +} + static void pci_piix3_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -350,6 +364,7 @@ static const TypeInfo piix3_pci_type_info = { .name = TYPE_PIIX3_PCI_DEVICE, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PIIX3State), + .instance_init = pci_piix3_init, .abstract = true, .class_init = pci_piix3_class_init, .interfaces = (InterfaceInfo[]) { diff --git a/hw/isa/piix4.c b/hw/isa/piix4.c index de60ceef73..e0b149f8eb 100644 --- a/hw/isa/piix4.c +++ b/hw/isa/piix4.c @@ -47,7 +47,7 @@ struct PIIX4State { qemu_irq cpu_intr; qemu_irq *isa; - RTCState rtc; + MC146818RtcState rtc; PCIIDEState ide; UHCIState uhci; PIIX4PMState pm; @@ -212,7 +212,7 @@ static void piix4_realize(PCIDevice *dev, Error **errp) s->isa = i8259_init(isa_bus, *i8259_out_irq); /* initialize ISA irqs */ - isa_bus_irqs(isa_bus, s->isa); + isa_bus_register_input_irqs(isa_bus, s->isa); /* initialize pit */ i8254_pit_init(isa_bus, 0x40, 0, NULL); diff --git a/hw/isa/trace-events b/hw/isa/trace-events index c4567a9b47..1816e8307a 100644 --- a/hw/isa/trace-events +++ b/hw/isa/trace-events @@ -16,6 +16,7 @@ apm_io_write(uint8_t addr, uint8_t val) "write addr=0x%x val=0x%02x" # vt82c686.c via_isa_write(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x" +via_pm_read(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x" via_pm_write(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x" via_pm_io_read(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x" via_pm_io_write(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x" diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c index 3f9bd0c04d..ca89119ce0 100644 --- a/hw/isa/vt82c686.c +++ b/hw/isa/vt82c686.c @@ -548,13 +548,13 @@ OBJECT_DECLARE_SIMPLE_TYPE(ViaISAState, VIA_ISA) struct ViaISAState { PCIDevice dev; qemu_irq cpu_intr; - qemu_irq *isa_irqs; + qemu_irq *isa_irqs_in; ViaSuperIOState via_sio; - RTCState rtc; + MC146818RtcState rtc; PCIIDEState ide; UHCIState uhci[2]; ViaPMState pm; - PCIDevice ac97; + ViaAC97State ac97; PCIDevice mc97; }; @@ -595,7 +595,7 @@ static const TypeInfo via_isa_info = { void via_isa_set_irq(PCIDevice *d, int n, int level) { ViaISAState *s = VIA_ISA(d); - qemu_set_irq(s->isa_irqs[n], level); + qemu_set_irq(s->isa_irqs_in[n], level); } static void via_isa_request_i8259_irq(void *opaque, int irq, int level) @@ -604,6 +604,46 @@ static void via_isa_request_i8259_irq(void *opaque, int irq, int level) qemu_set_irq(s->cpu_intr, level); } +static int via_isa_get_pci_irq(const ViaISAState *s, int irq_num) +{ + switch (irq_num) { + case 0: + return s->dev.config[0x55] >> 4; + case 1: + return s->dev.config[0x56] & 0xf; + case 2: + return s->dev.config[0x56] >> 4; + case 3: + return s->dev.config[0x57] >> 4; + } + return 0; +} + +static void via_isa_set_pci_irq(void *opaque, int irq_num, int level) +{ + ViaISAState *s = opaque; + PCIBus *bus = pci_get_bus(&s->dev); + int i, pic_level, pic_irq = via_isa_get_pci_irq(s, irq_num); + + /* IRQ 0: disabled, IRQ 2,8,13: reserved */ + if (!pic_irq) { + return; + } + if (unlikely(pic_irq == 2 || pic_irq == 8 || pic_irq == 13)) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid ISA IRQ routing"); + } + + /* The pic level is the logical OR of all the PCI irqs mapped to it. */ + pic_level = 0; + for (i = 0; i < PCI_NUM_PINS; i++) { + if (pic_irq == via_isa_get_pci_irq(s, i)) { + pic_level |= pci_bus_get_irq_level(bus, i); + } + } + /* Now we change the pic irq level according to the via irq mappings. */ + qemu_set_irq(s->isa_irqs_in[pic_irq], pic_level); +} + static void via_isa_realize(PCIDevice *d, Error **errp) { ViaISAState *s = VIA_ISA(d); @@ -622,11 +662,13 @@ static void via_isa_realize(PCIDevice *d, Error **errp) return; } - s->isa_irqs = i8259_init(isa_bus, *isa_irq); - isa_bus_irqs(isa_bus, s->isa_irqs); + s->isa_irqs_in = i8259_init(isa_bus, *isa_irq); + isa_bus_register_input_irqs(isa_bus, s->isa_irqs_in); i8254_pit_init(isa_bus, 0x40, 0, NULL); i8257_dma_init(isa_bus, 0); + qdev_init_gpio_in_named(dev, via_isa_set_pci_irq, "pirq", PCI_NUM_PINS); + /* RTC */ qdev_prop_set_int32(DEVICE(&s->rtc), "base_year", 2000); if (!qdev_realize(DEVICE(&s->rtc), BUS(isa_bus), errp)) { diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c index f551296a0e..8e3ce07367 100644 --- a/hw/loongarch/acpi-build.c +++ b/hw/loongarch/acpi-build.c @@ -7,6 +7,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "qemu/bitmap.h" #include "hw/pci/pci.h" #include "hw/core/cpu.h" @@ -260,6 +261,7 @@ build_la_ged_aml(Aml *dsdt, MachineState *machine) AML_SYSTEM_MEMORY, VIRT_GED_MEM_ADDR); } + acpi_dsdt_add_power_button(dsdt); } static void build_pci_device_aml(Aml *scope, LoongArchMachineState *lams) @@ -271,7 +273,7 @@ static void build_pci_device_aml(Aml *scope, LoongArchMachineState *lams) .pio.size = VIRT_PCI_IO_SIZE, .ecam.base = VIRT_PCI_CFG_BASE, .ecam.size = VIRT_PCI_CFG_SIZE, - .irq = PCH_PIC_IRQ_OFFSET + VIRT_DEVICE_IRQS, + .irq = VIRT_GSI_BASE + VIRT_DEVICE_IRQS, .bus = lams->pci_bus, }; diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c index 66be925068..ceddec1b23 100644 --- a/hw/loongarch/virt.c +++ b/hw/loongarch/virt.c @@ -44,6 +44,8 @@ #include "sysemu/tpm.h" #include "sysemu/block-backend.h" #include "hw/block/flash.h" +#include "qemu/error-report.h" + static void virt_flash_create(LoongArchMachineState *lams) { @@ -316,6 +318,14 @@ static void virt_machine_done(Notifier *notifier, void *data) loongarch_acpi_setup(lams); } +static void virt_powerdown_req(Notifier *notifier, void *opaque) +{ + LoongArchMachineState *s = container_of(notifier, + LoongArchMachineState, powerdown_notifier); + + acpi_send_event(s->acpi_ged, ACPI_POWER_DOWN_STATUS); +} + struct memmap_entry { uint64_t address; uint64_t length; @@ -389,7 +399,7 @@ static struct _loaderparams { static uint64_t cpu_loongarch_virt_to_phys(void *opaque, uint64_t addr) { - return addr & 0x1fffffffll; + return addr & MAKE_64BIT_MASK(0, TARGET_PHYS_ADDR_SPACE_BITS); } static int64_t load_kernel_info(void) @@ -432,7 +442,7 @@ static DeviceState *create_acpi_ged(DeviceState *pch_pic, LoongArchMachineState sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, VIRT_GED_REG_ADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, - qdev_get_gpio_in(pch_pic, VIRT_SCI_IRQ - PCH_PIC_IRQ_OFFSET)); + qdev_get_gpio_in(pch_pic, VIRT_SCI_IRQ - VIRT_GSI_BASE)); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); return dev; } @@ -452,7 +462,7 @@ static DeviceState *create_platform_bus(DeviceState *pch_pic) sysbus = SYS_BUS_DEVICE(dev); for (i = 0; i < VIRT_PLATFORM_BUS_NUM_IRQS; i++) { - irq = VIRT_PLATFORM_BUS_IRQ - PCH_PIC_IRQ_OFFSET + i; + irq = VIRT_PLATFORM_BUS_IRQ - VIRT_GSI_BASE + i; sysbus_connect_irq(sysbus, i, qdev_get_gpio_in(pch_pic, irq)); } @@ -464,6 +474,7 @@ static DeviceState *create_platform_bus(DeviceState *pch_pic) static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState *lams) { + MachineClass *mc = MACHINE_GET_CLASS(lams); DeviceState *gpex_dev; SysBusDevice *d; PCIBus *pci_bus; @@ -509,7 +520,7 @@ static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState * serial_mm_init(get_system_memory(), VIRT_UART_BASE, 0, qdev_get_gpio_in(pch_pic, - VIRT_UART_IRQ - PCH_PIC_IRQ_OFFSET), + VIRT_UART_IRQ - VIRT_GSI_BASE), 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN); fdt_add_uart_node(lams); @@ -518,7 +529,7 @@ static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState * NICInfo *nd = &nd_table[i]; if (!nd->model) { - nd->model = g_strdup("virtio"); + nd->model = g_strdup(mc->default_nic); } pci_nic_init_nofail(nd, pci_bus, nd->model, NULL); @@ -531,7 +542,7 @@ static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState * create_unimplemented_device("pci-dma-cfg", 0x1001041c, 0x4); sysbus_create_simple("ls7a_rtc", VIRT_RTC_REG_BASE, qdev_get_gpio_in(pch_pic, - VIRT_RTC_IRQ - PCH_PIC_IRQ_OFFSET)); + VIRT_RTC_IRQ - VIRT_GSI_BASE)); fdt_add_rtc_node(lams); pm_mem = g_new(MemoryRegion, 1); @@ -555,9 +566,6 @@ static void loongarch_irq_init(LoongArchMachineState *lams) CPUState *cpu_state; int cpu, pin, i, start, num; - ipi = qdev_new(TYPE_LOONGARCH_IPI); - sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); - extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); @@ -588,17 +596,25 @@ static void loongarch_irq_init(LoongArchMachineState *lams) lacpu = LOONGARCH_CPU(cpu_state); env = &(lacpu->env); + ipi = qdev_new(TYPE_LOONGARCH_IPI); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + /* connect ipi irq to cpu irq */ - qdev_connect_gpio_out(ipi, cpu, qdev_get_gpio_in(cpudev, IRQ_IPI)); + qdev_connect_gpio_out(ipi, 0, qdev_get_gpio_in(cpudev, IRQ_IPI)); /* IPI iocsr memory region */ memory_region_add_subregion(&env->system_iocsr, SMP_IPI_MAILBOX, sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - cpu * 2)); + 0)); memory_region_add_subregion(&env->system_iocsr, MAIL_SEND_ADDR, sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - cpu * 2 + 1)); - /* extioi iocsr memory region */ - memory_region_add_subregion(&env->system_iocsr, APIC_BASE, + 1)); + /* + * extioi iocsr memory region + * only one extioi is added on loongarch virt machine + * external device interrupt can only be routed to cpu 0-3 + */ + if (cpu < EXTIOI_CPUS) + memory_region_add_subregion(&env->system_iocsr, APIC_BASE, sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), cpu)); } @@ -607,7 +623,7 @@ static void loongarch_irq_init(LoongArchMachineState *lams) * connect ext irq to the cpu irq * cpu_pin[9:2] <= intc_pin[7:0] */ - for (cpu = 0; cpu < ms->smp.cpus; cpu++) { + for (cpu = 0; cpu < MIN(ms->smp.cpus, EXTIOI_CPUS); cpu++) { cpudev = DEVICE(qemu_get_cpu(cpu)); for (pin = 0; pin < LS3A_INTC_IP; pin++) { qdev_connect_gpio_out(extioi, (cpu * 8 + pin), @@ -859,6 +875,10 @@ static void loongarch_init(MachineState *machine) VIRT_PLATFORM_BUS_IRQ); lams->machine_done.notify = virt_machine_done; qemu_add_machine_init_done_notifier(&lams->machine_done); + /* connect powerdown request */ + lams->powerdown_notifier.notify = virt_powerdown_req; + qemu_register_powerdown_notifier(&lams->powerdown_notifier); + fdt_add_pcie_node(lams); /* * Since lowmem region starts from 0 and Linux kernel legacy start address @@ -1012,13 +1032,14 @@ static void loongarch_class_init(ObjectClass *oc, void *data) mc->default_ram_size = 1 * GiB; mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("la464"); mc->default_ram_id = "loongarch.ram"; - mc->max_cpus = LOONGARCH_MAX_VCPUS; + mc->max_cpus = LOONGARCH_MAX_CPUS; mc->is_default = 1; mc->default_kernel_irqchip_split = false; mc->block_default_type = IF_VIRTIO; mc->default_boot_order = "c"; mc->no_cdrom = 1; mc->get_hotplug_handler = virt_machine_get_hotplug_handler; + mc->default_nic = "virtio-net-pci"; hc->plug = loongarch_machine_device_plug_cb; hc->pre_plug = virt_machine_device_pre_plug; hc->unplug_request = virt_machine_device_unplug_request; diff --git a/hw/m68k/next-cube.c b/hw/m68k/next-cube.c index e0d4a94f9d..ce8ee50b9e 100644 --- a/hw/m68k/next-cube.c +++ b/hw/m68k/next-cube.c @@ -24,6 +24,7 @@ #include "hw/block/fdc.h" #include "hw/qdev-properties.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "ui/console.h" #include "target/m68k/cpu.h" #include "migration/vmstate.h" diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c index 9d52ca6613..b35ecafbc7 100644 --- a/hw/m68k/q800.c +++ b/hw/m68k/q800.c @@ -45,6 +45,7 @@ #include "hw/block/swim.h" #include "net/net.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "sysemu/qtest.h" #include "sysemu/runstate.h" #include "sysemu/reset.h" diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c index 4cb5beef1a..731205b215 100644 --- a/hw/m68k/virt.c +++ b/hw/m68k/virt.c @@ -23,6 +23,7 @@ #include "bootinfo.h" #include "net/net.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "sysemu/qtest.h" #include "sysemu/runstate.h" #include "sysemu/reset.h" @@ -346,10 +347,17 @@ type_init(virt_machine_register_types) } \ type_init(machvirt_machine_##major##_##minor##_init); -static void virt_machine_8_0_options(MachineClass *mc) +static void virt_machine_8_1_options(MachineClass *mc) { } -DEFINE_VIRT_MACHINE(8, 0, true) +DEFINE_VIRT_MACHINE(8, 1, true) + +static void virt_machine_8_0_options(MachineClass *mc) +{ + virt_machine_8_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len); +} +DEFINE_VIRT_MACHINE(8, 0, false) static void virt_machine_7_2_options(MachineClass *mc) { diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index dae4fd89ca..2adacbd01b 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -1,6 +1,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu/error-report.h" +#include "qapi/qapi-commands-cxl.h" #include "hw/mem/memory-device.h" #include "hw/mem/pc-dimm.h" #include "hw/pci/pci.h" @@ -30,7 +31,8 @@ enum { }; static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, - int dsmad_handle, MemoryRegion *mr) + int dsmad_handle, MemoryRegion *mr, + bool is_pmem, uint64_t dpa_base) { g_autofree CDATDsmas *dsmas = NULL; g_autofree CDATDslbis *dslbis0 = NULL; @@ -49,9 +51,9 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, .length = sizeof(*dsmas), }, .DSMADhandle = dsmad_handle, - .flags = CDAT_DSMAS_FLAG_NV, - .DPA_base = 0, - .DPA_length = int128_get64(mr->size), + .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0, + .DPA_base = dpa_base, + .DPA_length = memory_region_size(mr), }; /* For now, no memory side cache, plausiblish numbers */ @@ -129,10 +131,13 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, .length = sizeof(*dsemts), }, .DSMAS_handle = dsmad_handle, - /* Reserved - the non volatile from DSMAS matters */ - .EFI_memory_type_attr = 2, + /* + * NV: Reserved - the non volatile from DSMAS matters + * V: EFI_MEMORY_SP + */ + .EFI_memory_type_attr = is_pmem ? 2 : 1, .DPA_offset = 0, - .DPA_length = int128_get64(mr->size), + .DPA_length = memory_region_size(mr), }; /* Header always at start of structure */ @@ -149,33 +154,68 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) { g_autofree CDATSubHeader **table = NULL; - MemoryRegion *nonvolatile_mr; CXLType3Dev *ct3d = priv; + MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL; int dsmad_handle = 0; - int rc; + int cur_ent = 0; + int len = 0; + int rc, i; - if (!ct3d->hostmem) { + if (!ct3d->hostpmem && !ct3d->hostvmem) { return 0; } - nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostmem); - if (!nonvolatile_mr) { - return -EINVAL; + if (ct3d->hostvmem) { + volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem); + if (!volatile_mr) { + return -EINVAL; + } + len += CT3_CDAT_NUM_ENTRIES; } - table = g_malloc0(CT3_CDAT_NUM_ENTRIES * sizeof(*table)); + if (ct3d->hostpmem) { + nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem); + if (!nonvolatile_mr) { + return -EINVAL; + } + len += CT3_CDAT_NUM_ENTRIES; + } + + table = g_malloc0(len * sizeof(*table)); if (!table) { return -ENOMEM; } - rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, nonvolatile_mr); - if (rc < 0) { - return rc; + /* Now fill them in */ + if (volatile_mr) { + rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr, + false, 0); + if (rc < 0) { + return rc; + } + cur_ent = CT3_CDAT_NUM_ENTRIES; } + if (nonvolatile_mr) { + rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++, + nonvolatile_mr, true, + (volatile_mr ? + memory_region_size(volatile_mr) : 0)); + if (rc < 0) { + goto error_cleanup; + } + cur_ent += CT3_CDAT_NUM_ENTRIES; + } + assert(len == cur_ent); + *cdat_table = g_steal_pointer(&table); - return CT3_CDAT_NUM_ENTRIES; + return len; +error_cleanup: + for (i = 0; i < cur_ent; i++) { + g_free(table[i]); + } + return rc; } static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv) @@ -250,6 +290,7 @@ static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val, pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size); pci_default_write_config(pci_dev, addr, val, size); + pcie_aer_write_config(pci_dev, addr, val, size); } /* @@ -262,16 +303,42 @@ static void build_dvsecs(CXLType3Dev *ct3d) { CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; uint8_t *dvsec; + uint32_t range1_size_hi, range1_size_lo, + range1_base_hi = 0, range1_base_lo = 0, + range2_size_hi = 0, range2_size_lo = 0, + range2_base_hi = 0, range2_base_lo = 0; + + /* + * Volatile memory is mapped as (0x0) + * Persistent memory is mapped at (volatile->size) + */ + if (ct3d->hostvmem) { + range1_size_hi = ct3d->hostvmem->size >> 32; + range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | + (ct3d->hostvmem->size & 0xF0000000); + if (ct3d->hostpmem) { + range2_size_hi = ct3d->hostpmem->size >> 32; + range2_size_lo = (2 << 5) | (2 << 2) | 0x3 | + (ct3d->hostpmem->size & 0xF0000000); + } + } else { + range1_size_hi = ct3d->hostpmem->size >> 32; + range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | + (ct3d->hostpmem->size & 0xF0000000); + } dvsec = (uint8_t *)&(CXLDVSECDevice){ .cap = 0x1e, .ctrl = 0x2, .status2 = 0x2, - .range1_size_hi = ct3d->hostmem->size >> 32, - .range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | - (ct3d->hostmem->size & 0xF0000000), - .range1_base_hi = 0, - .range1_base_lo = 0, + .range1_size_hi = range1_size_hi, + .range1_size_lo = range1_size_lo, + .range1_base_hi = range1_base_hi, + .range1_base_lo = range1_base_lo, + .range2_size_hi = range2_size_hi, + .range2_size_lo = range2_size_lo, + .range2_base_hi = range2_base_hi, + .range2_base_lo = range2_base_lo, }; cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, PCIE_CXL_DEVICE_DVSEC_LENGTH, @@ -312,14 +379,92 @@ static void hdm_decoder_commit(CXLType3Dev *ct3d, int which) { ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; uint32_t *cache_mem = cregs->cache_mem_registers; + uint32_t ctrl; assert(which == 0); + ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL); /* TODO: Sanity checks that the decoder is possible */ - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); + ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0); + ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); - ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); + stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL, ctrl); +} + +static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which) +{ + ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; + uint32_t *cache_mem = cregs->cache_mem_registers; + uint32_t ctrl; + + assert(which == 0); + + ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL); + + ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0); + ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0); + + stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL, ctrl); +} + +static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err) +{ + switch (qmp_err) { + case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY: + return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY; + case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY: + return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY; + case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY: + return CXL_RAS_UNC_ERR_CACHE_BE_PARITY; + case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC: + return CXL_RAS_UNC_ERR_CACHE_DATA_ECC; + case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY: + return CXL_RAS_UNC_ERR_MEM_DATA_PARITY; + case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY: + return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY; + case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY: + return CXL_RAS_UNC_ERR_MEM_BE_PARITY; + case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC: + return CXL_RAS_UNC_ERR_MEM_DATA_ECC; + case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD: + return CXL_RAS_UNC_ERR_REINIT_THRESHOLD; + case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING: + return CXL_RAS_UNC_ERR_RSVD_ENCODING; + case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED: + return CXL_RAS_UNC_ERR_POISON_RECEIVED; + case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW: + return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW; + case CXL_UNCOR_ERROR_TYPE_INTERNAL: + return CXL_RAS_UNC_ERR_INTERNAL; + case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX: + return CXL_RAS_UNC_ERR_CXL_IDE_TX; + case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX: + return CXL_RAS_UNC_ERR_CXL_IDE_RX; + default: + return -EINVAL; + } +} + +static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err) +{ + switch (qmp_err) { + case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC: + return CXL_RAS_COR_ERR_CACHE_DATA_ECC; + case CXL_COR_ERROR_TYPE_MEM_DATA_ECC: + return CXL_RAS_COR_ERR_MEM_DATA_ECC; + case CXL_COR_ERROR_TYPE_CRC_THRESHOLD: + return CXL_RAS_COR_ERR_CRC_THRESHOLD; + case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD: + return CXL_RAS_COR_ERR_RETRY_THRESHOLD; + case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED: + return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED; + case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED: + return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED; + case CXL_COR_ERROR_TYPE_PHYSICAL: + return CXL_RAS_COR_ERR_PHYSICAL; + default: + return -EINVAL; + } } static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, @@ -330,6 +475,7 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate); uint32_t *cache_mem = cregs->cache_mem_registers; bool should_commit = false; + bool should_uncommit = false; int which_hdm = -1; assert(size == 4); @@ -338,8 +484,86 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, switch (offset) { case A_CXL_HDM_DECODER0_CTRL: should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); + should_uncommit = !should_commit; which_hdm = 0; break; + case A_CXL_RAS_UNC_ERR_STATUS: + { + uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL); + uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER); + CXLError *cxl_err; + uint32_t unc_err; + + /* + * If single bit written that corresponds to the first error + * pointer being cleared, update the status and header log. + */ + if (!QTAILQ_EMPTY(&ct3d->error_list)) { + if ((1 << fe) ^ value) { + CXLError *cxl_next; + /* + * Software is using wrong flow for multiple header recording + * Following behavior in PCIe r6.0 and assuming multiple + * header support. Implementation defined choice to clear all + * matching records if more than one bit set - which corresponds + * closest to behavior of hardware not capable of multiple + * header recording. + */ + QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, cxl_next) { + if ((1 << cxl_err->type) & value) { + QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node); + g_free(cxl_err); + } + } + } else { + /* Done with previous FE, so drop from list */ + cxl_err = QTAILQ_FIRST(&ct3d->error_list); + QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node); + g_free(cxl_err); + } + + /* + * If there is another FE, then put that in place and update + * the header log + */ + if (!QTAILQ_EMPTY(&ct3d->error_list)) { + uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0]; + int i; + + cxl_err = QTAILQ_FIRST(&ct3d->error_list); + for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) { + stl_le_p(header_log + i, cxl_err->header[i]); + } + capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL, + FIRST_ERROR_POINTER, cxl_err->type); + } else { + /* + * If no more errors, then follow recomendation of PCI spec + * r6.0 6.2.4.2 to set the first error pointer to a status + * bit that will never be used. + */ + capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL, + FIRST_ERROR_POINTER, + CXL_RAS_UNC_ERR_CXL_UNUSED); + } + stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl); + } + unc_err = 0; + QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) { + unc_err |= 1 << cxl_err->type; + } + stl_le_p((uint8_t *)cache_mem + offset, unc_err); + + return; + } + case A_CXL_RAS_COR_ERR_STATUS: + { + uint32_t rw1c = value; + uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset); + temp &= ~rw1c; + stl_le_p((uint8_t *)cache_mem + offset, temp); + return; + } default: break; } @@ -347,42 +571,77 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, stl_le_p((uint8_t *)cache_mem + offset, value); if (should_commit) { hdm_decoder_commit(ct3d, which_hdm); + } else if (should_uncommit) { + hdm_decoder_uncommit(ct3d, which_hdm); } } static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp) { DeviceState *ds = DEVICE(ct3d); - MemoryRegion *mr; - char *name; - if (!ct3d->hostmem) { - error_setg(errp, "memdev property must be set"); + if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) { + error_setg(errp, "at least one memdev property must be set"); + return false; + } else if (ct3d->hostmem && ct3d->hostpmem) { + error_setg(errp, "[memdev] cannot be used with new " + "[persistent-memdev] property"); + return false; + } else if (ct3d->hostmem) { + /* Use of hostmem property implies pmem */ + ct3d->hostpmem = ct3d->hostmem; + ct3d->hostmem = NULL; + } + + if (ct3d->hostpmem && !ct3d->lsa) { + error_setg(errp, "lsa property must be set for persistent devices"); return false; } - mr = host_memory_backend_get_memory(ct3d->hostmem); - if (!mr) { - error_setg(errp, "memdev property must be set"); - return false; + if (ct3d->hostvmem) { + MemoryRegion *vmr; + char *v_name; + + vmr = host_memory_backend_get_memory(ct3d->hostvmem); + if (!vmr) { + error_setg(errp, "volatile memdev must have backing device"); + return false; + } + memory_region_set_nonvolatile(vmr, false); + memory_region_set_enabled(vmr, true); + host_memory_backend_set_mapped(ct3d->hostvmem, true); + if (ds->id) { + v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id); + } else { + v_name = g_strdup("cxl-type3-dpa-vmem-space"); + } + address_space_init(&ct3d->hostvmem_as, vmr, v_name); + ct3d->cxl_dstate.vmem_size = memory_region_size(vmr); + ct3d->cxl_dstate.mem_size += memory_region_size(vmr); + g_free(v_name); } - memory_region_set_nonvolatile(mr, true); - memory_region_set_enabled(mr, true); - host_memory_backend_set_mapped(ct3d->hostmem, true); - if (ds->id) { - name = g_strdup_printf("cxl-type3-dpa-space:%s", ds->id); - } else { - name = g_strdup("cxl-type3-dpa-space"); - } - address_space_init(&ct3d->hostmem_as, mr, name); - g_free(name); + if (ct3d->hostpmem) { + MemoryRegion *pmr; + char *p_name; - ct3d->cxl_dstate.pmem_size = ct3d->hostmem->size; - - if (!ct3d->lsa) { - error_setg(errp, "lsa property must be set"); - return false; + pmr = host_memory_backend_get_memory(ct3d->hostpmem); + if (!pmr) { + error_setg(errp, "persistent memdev must have backing device"); + return false; + } + memory_region_set_nonvolatile(pmr, true); + memory_region_set_enabled(pmr, true); + host_memory_backend_set_mapped(ct3d->hostpmem, true); + if (ds->id) { + p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id); + } else { + p_name = g_strdup("cxl-type3-dpa-pmem-space"); + } + address_space_init(&ct3d->hostpmem_as, pmr, p_name); + ct3d->cxl_dstate.pmem_size = memory_region_size(pmr); + ct3d->cxl_dstate.mem_size += memory_region_size(pmr); + g_free(p_name); } return true; @@ -401,14 +660,15 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) MemoryRegion *mr = ®s->component_registers; uint8_t *pci_conf = pci_dev->config; unsigned short msix_num = 1; - int i; + int i, rc; + + QTAILQ_INIT(&ct3d->error_list); if (!cxl_setup_memory(ct3d, errp)) { return; } pci_config_set_prog_interface(pci_conf, 0x10); - pci_config_set_class(pci_conf, PCI_CLASS_MEMORY_CXL); pcie_endpoint_cap_init(pci_dev, 0x80); if (ct3d->sn != UI64_NULL) { @@ -438,7 +698,10 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) &ct3d->cxl_dstate.device_registers); /* MSI(-X) Initailization */ - msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL); + rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL); + if (rc) { + goto err_address_space_free; + } for (i = 0; i < msix_num; i++) { msix_vector_use(pci_dev, i); } @@ -450,6 +713,31 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table; cxl_cstate->cdat.private = ct3d; cxl_doe_cdat_init(cxl_cstate, errp); + if (*errp) { + goto err_free_special_ops; + } + + pcie_cap_deverr_init(pci_dev); + /* Leave a bit of room for expansion */ + rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL); + if (rc) { + goto err_release_cdat; + } + + return; + +err_release_cdat: + cxl_doe_cdat_release(cxl_cstate); +err_free_special_ops: + g_free(regs->special_ops); +err_address_space_free: + if (ct3d->hostpmem) { + address_space_destroy(&ct3d->hostpmem_as); + } + if (ct3d->hostvmem) { + address_space_destroy(&ct3d->hostvmem_as); + } + return; } static void ct3_exit(PCIDevice *pci_dev) @@ -458,9 +746,15 @@ static void ct3_exit(PCIDevice *pci_dev) CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; ComponentRegisters *regs = &cxl_cstate->crb; + pcie_aer_exit(pci_dev); cxl_doe_cdat_release(cxl_cstate); g_free(regs->special_ops); - address_space_destroy(&ct3d->hostmem_as); + if (ct3d->hostpmem) { + address_space_destroy(&ct3d->hostpmem_as); + } + if (ct3d->hostvmem) { + address_space_destroy(&ct3d->hostvmem_as); + } } /* TODO: Support multiple HDM decoders and DPA skip */ @@ -495,51 +789,77 @@ static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa) return true; } +static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d, + hwaddr host_addr, + unsigned int size, + AddressSpace **as, + uint64_t *dpa_offset) +{ + MemoryRegion *vmr = NULL, *pmr = NULL; + + if (ct3d->hostvmem) { + vmr = host_memory_backend_get_memory(ct3d->hostvmem); + } + if (ct3d->hostpmem) { + pmr = host_memory_backend_get_memory(ct3d->hostpmem); + } + + if (!vmr && !pmr) { + return -ENODEV; + } + + if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) { + return -EINVAL; + } + + if (*dpa_offset > ct3d->cxl_dstate.mem_size) { + return -EINVAL; + } + + if (vmr) { + if (*dpa_offset < memory_region_size(vmr)) { + *as = &ct3d->hostvmem_as; + } else { + *as = &ct3d->hostpmem_as; + *dpa_offset -= memory_region_size(vmr); + } + } else { + *as = &ct3d->hostpmem_as; + } + + return 0; +} + MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, unsigned size, MemTxAttrs attrs) { - CXLType3Dev *ct3d = CXL_TYPE3(d); - uint64_t dpa_offset; - MemoryRegion *mr; + uint64_t dpa_offset = 0; + AddressSpace *as = NULL; + int res; - /* TODO support volatile region */ - mr = host_memory_backend_get_memory(ct3d->hostmem); - if (!mr) { + res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size, + &as, &dpa_offset); + if (res) { return MEMTX_ERROR; } - if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) { - return MEMTX_ERROR; - } - - if (dpa_offset > int128_get64(mr->size)) { - return MEMTX_ERROR; - } - - return address_space_read(&ct3d->hostmem_as, dpa_offset, attrs, data, size); + return address_space_read(as, dpa_offset, attrs, data, size); } MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, unsigned size, MemTxAttrs attrs) { - CXLType3Dev *ct3d = CXL_TYPE3(d); - uint64_t dpa_offset; - MemoryRegion *mr; + uint64_t dpa_offset = 0; + AddressSpace *as = NULL; + int res; - mr = host_memory_backend_get_memory(ct3d->hostmem); - if (!mr) { - return MEMTX_OK; + res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size, + &as, &dpa_offset); + if (res) { + return MEMTX_ERROR; } - if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) { - return MEMTX_OK; - } - - if (dpa_offset > int128_get64(mr->size)) { - return MEMTX_OK; - } - return address_space_write(&ct3d->hostmem_as, dpa_offset, attrs, - &data, size); + return address_space_write(as, dpa_offset, attrs, &data, size); } static void ct3d_reset(DeviceState *dev) @@ -554,7 +874,11 @@ static void ct3d_reset(DeviceState *dev) static Property ct3_props[] = { DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND, - HostMemoryBackend *), + HostMemoryBackend *), /* for backward compatibility */ + DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem, + TYPE_MEMORY_BACKEND, HostMemoryBackend *), + DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem, + TYPE_MEMORY_BACKEND, HostMemoryBackend *), DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND, HostMemoryBackend *), DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL), @@ -566,6 +890,10 @@ static uint64_t get_lsa_size(CXLType3Dev *ct3d) { MemoryRegion *mr; + if (!ct3d->lsa) { + return 0; + } + mr = host_memory_backend_get_memory(ct3d->lsa); return memory_region_size(mr); } @@ -583,6 +911,10 @@ static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size, MemoryRegion *mr; void *lsa; + if (!ct3d->lsa) { + return 0; + } + mr = host_memory_backend_get_memory(ct3d->lsa); validate_lsa_access(mr, size, offset); @@ -598,6 +930,10 @@ static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size, MemoryRegion *mr; void *lsa; + if (!ct3d->lsa) { + return; + } + mr = host_memory_backend_get_memory(ct3d->lsa); validate_lsa_access(mr, size, offset); @@ -611,6 +947,147 @@ static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size, */ } +/* For uncorrectable errors include support for multiple header recording */ +void qmp_cxl_inject_uncorrectable_errors(const char *path, + CXLUncorErrorRecordList *errors, + Error **errp) +{ + Object *obj = object_resolve_path(path, NULL); + static PCIEAERErr err = {}; + CXLType3Dev *ct3d; + CXLError *cxl_err; + uint32_t *reg_state; + uint32_t unc_err; + bool first; + + if (!obj) { + error_setg(errp, "Unable to resolve path"); + return; + } + + if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { + error_setg(errp, "Path does not point to a CXL type 3 device"); + return; + } + + err.status = PCI_ERR_UNC_INTN; + err.source_id = pci_requester_id(PCI_DEVICE(obj)); + err.flags = 0; + + ct3d = CXL_TYPE3(obj); + + first = QTAILQ_EMPTY(&ct3d->error_list); + reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; + while (errors) { + uint32List *header = errors->value->header; + uint8_t header_count = 0; + int cxl_err_code; + + cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type); + if (cxl_err_code < 0) { + error_setg(errp, "Unknown error code"); + return; + } + + /* If the error is masked, nothing to do here */ + if (!((1 << cxl_err_code) & + ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) { + errors = errors->next; + continue; + } + + cxl_err = g_malloc0(sizeof(*cxl_err)); + if (!cxl_err) { + return; + } + + cxl_err->type = cxl_err_code; + while (header && header_count < 32) { + cxl_err->header[header_count++] = header->value; + header = header->next; + } + if (header_count > 32) { + error_setg(errp, "Header must be 32 DWORD or less"); + return; + } + QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node); + + errors = errors->next; + } + + if (first && !QTAILQ_EMPTY(&ct3d->error_list)) { + uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers; + uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL); + uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0]; + int i; + + cxl_err = QTAILQ_FIRST(&ct3d->error_list); + for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) { + stl_le_p(header_log + i, cxl_err->header[i]); + } + + capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL, + FIRST_ERROR_POINTER, cxl_err->type); + stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl); + } + + unc_err = 0; + QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) { + unc_err |= (1 << cxl_err->type); + } + if (!unc_err) { + return; + } + + stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err); + pcie_aer_inject_error(PCI_DEVICE(obj), &err); + + return; +} + +void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type, + Error **errp) +{ + static PCIEAERErr err = {}; + Object *obj = object_resolve_path(path, NULL); + CXLType3Dev *ct3d; + uint32_t *reg_state; + uint32_t cor_err; + int cxl_err_type; + + if (!obj) { + error_setg(errp, "Unable to resolve path"); + return; + } + if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { + error_setg(errp, "Path does not point to a CXL type 3 device"); + return; + } + + err.status = PCI_ERR_COR_INTERNAL; + err.source_id = pci_requester_id(PCI_DEVICE(obj)); + err.flags = PCIE_AER_ERR_IS_CORRECTABLE; + + ct3d = CXL_TYPE3(obj); + reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; + cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS); + + cxl_err_type = ct3d_qmp_cor_err_to_cxl(type); + if (cxl_err_type < 0) { + error_setg(errp, "Invalid COR error"); + return; + } + /* If the error is masked, nothting to do here */ + if (!((1 << cxl_err_type) & ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) { + return; + } + + cor_err |= (1 << cxl_err_type); + stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err); + + pcie_aer_inject_error(PCI_DEVICE(obj), &err); +} + static void ct3_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -619,7 +1096,7 @@ static void ct3_class_init(ObjectClass *oc, void *data) pc->realize = ct3_realize; pc->exit = ct3_exit; - pc->class_id = PCI_CLASS_STORAGE_EXPRESS; + pc->class_id = PCI_CLASS_MEMORY_CXL; pc->vendor_id = PCI_VENDOR_ID_INTEL; pc->device_id = 0xd93; /* LVF for now */ pc->revision = 1; @@ -628,7 +1105,7 @@ static void ct3_class_init(ObjectClass *oc, void *data) pc->config_read = ct3d_config_read; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); - dc->desc = "CXL PMEM Device (Type 3)"; + dc->desc = "CXL Memory Device (Type 3)"; dc->reset = ct3d_reset; device_class_set_props(dc, ct3_props); diff --git a/hw/mem/cxl_type3_stubs.c b/hw/mem/cxl_type3_stubs.c new file mode 100644 index 0000000000..d574c58f9a --- /dev/null +++ b/hw/mem/cxl_type3_stubs.c @@ -0,0 +1,17 @@ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-cxl.h" + +void qmp_cxl_inject_uncorrectable_errors(const char *path, + CXLUncorErrorRecordList *errors, + Error **errp) +{ + error_setg(errp, "CXL Type 3 support is not compiled in"); +} + +void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type, + Error **errp) +{ + error_setg(errp, "CXL Type 3 support is not compiled in"); +} diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c index d9f8301711..1636db9679 100644 --- a/hw/mem/memory-device.c +++ b/hw/mem/memory-device.c @@ -10,6 +10,7 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include "hw/mem/memory-device.h" #include "qapi/error.h" #include "hw/boards.h" diff --git a/hw/mem/meson.build b/hw/mem/meson.build index 609b2b36fc..56c2618b84 100644 --- a/hw/mem/meson.build +++ b/hw/mem/meson.build @@ -4,6 +4,8 @@ mem_ss.add(when: 'CONFIG_DIMM', if_true: files('pc-dimm.c')) mem_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_mc.c')) mem_ss.add(when: 'CONFIG_NVDIMM', if_true: files('nvdimm.c')) mem_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_true: files('cxl_type3.c')) +softmmu_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_false: files('cxl_type3_stubs.c')) +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('cxl_type3_stubs.c')) softmmu_ss.add_all(when: 'CONFIG_MEM_DEVICE', if_true: mem_ss) diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c index 50ef83215c..37f1f4ccfd 100644 --- a/hw/mem/pc-dimm.c +++ b/hw/mem/pc-dimm.c @@ -81,6 +81,10 @@ void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine) memory_device_plug(MEMORY_DEVICE(dimm), machine); vmstate_register_ram(vmstate_mr, DEVICE(dimm)); + /* count only "real" DIMMs, not NVDIMMs */ + if (!object_dynamic_cast(OBJECT(dimm), TYPE_NVDIMM)) { + machine->device_memory->dimm_size += memory_region_size(vmstate_mr); + } } void pc_dimm_unplug(PCDIMMDevice *dimm, MachineState *machine) @@ -90,6 +94,9 @@ void pc_dimm_unplug(PCDIMMDevice *dimm, MachineState *machine) memory_device_unplug(MEMORY_DEVICE(dimm), machine); vmstate_unregister_ram(vmstate_mr, DEVICE(dimm)); + if (!object_dynamic_cast(OBJECT(dimm), TYPE_NVDIMM)) { + machine->device_memory->dimm_size -= memory_region_size(vmstate_mr); + } } static int pc_dimm_slot2bitmap(Object *obj, void *opaque) diff --git a/hw/mem/sparse-mem.c b/hw/mem/sparse-mem.c index 72f038d47d..6e8f4f84fb 100644 --- a/hw/mem/sparse-mem.c +++ b/hw/mem/sparse-mem.c @@ -11,6 +11,7 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include "hw/qdev-properties.h" #include "hw/sysbus.h" diff --git a/hw/mips/boston.c b/hw/mips/boston.c index a9d87f3437..21ad844519 100644 --- a/hw/mips/boston.c +++ b/hw/mips/boston.c @@ -702,7 +702,7 @@ static void boston_mach_init(MachineState *machine) object_initialize_child(OBJECT(machine), "cps", &s->cps, TYPE_MIPS_CPS); object_property_set_str(OBJECT(&s->cps), "cpu-type", machine->cpu_type, &error_fatal); - object_property_set_int(OBJECT(&s->cps), "num-vp", machine->smp.cpus, + object_property_set_uint(OBJECT(&s->cps), "num-vp", machine->smp.cpus, &error_fatal); qdev_connect_clock_in(DEVICE(&s->cps), "clk-in", qdev_get_clock_out(dev, "cpu-refclk")); diff --git a/hw/mips/cps.c b/hw/mips/cps.c index 2b436700ce..2b5269ebf1 100644 --- a/hw/mips/cps.c +++ b/hw/mips/cps.c @@ -66,20 +66,17 @@ static bool cpu_mips_itu_supported(CPUMIPSState *env) static void mips_cps_realize(DeviceState *dev, Error **errp) { MIPSCPSState *s = MIPS_CPS(dev); - CPUMIPSState *env; - MIPSCPU *cpu; - int i; target_ulong gcr_base; bool itu_present = false; - bool saar_present = false; if (!clock_get(s->clock)) { error_setg(errp, "CPS input clock is not connected to an output clock"); return; } - for (i = 0; i < s->num_vp; i++) { - cpu = MIPS_CPU(object_new(s->cpu_type)); + for (int i = 0; i < s->num_vp; i++) { + MIPSCPU *cpu = MIPS_CPU(object_new(s->cpu_type)); + CPUMIPSState *env = &cpu->env; /* All VPs are halted on reset. Leave powering up to CPC. */ if (!object_property_set_bool(OBJECT(cpu), "start-powered-off", true, @@ -97,7 +94,6 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) cpu_mips_irq_init_cpu(cpu); cpu_mips_clock_init(cpu); - env = &cpu->env; if (cpu_mips_itu_supported(env)) { itu_present = true; /* Attach ITC Tag to the VP */ @@ -107,22 +103,15 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) qemu_register_reset(main_cpu_reset, cpu); } - cpu = MIPS_CPU(first_cpu); - env = &cpu->env; - saar_present = (bool)env->saarp; - /* Inter-Thread Communication Unit */ if (itu_present) { object_initialize_child(OBJECT(dev), "itu", &s->itu, TYPE_MIPS_ITU); - object_property_set_int(OBJECT(&s->itu), "num-fifo", 16, + object_property_set_link(OBJECT(&s->itu), "cpu[0]", + OBJECT(first_cpu), &error_abort); + object_property_set_uint(OBJECT(&s->itu), "num-fifo", 16, &error_abort); - object_property_set_int(OBJECT(&s->itu), "num-semaphores", 16, + object_property_set_uint(OBJECT(&s->itu), "num-semaphores", 16, &error_abort); - object_property_set_bool(OBJECT(&s->itu), "saar-present", saar_present, - &error_abort); - if (saar_present) { - s->itu.saar = &env->CP0_SAAR; - } if (!sysbus_realize(SYS_BUS_DEVICE(&s->itu), errp)) { return; } @@ -133,7 +122,7 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) /* Cluster Power Controller */ object_initialize_child(OBJECT(dev), "cpc", &s->cpc, TYPE_MIPS_CPC); - object_property_set_int(OBJECT(&s->cpc), "num-vp", s->num_vp, + object_property_set_uint(OBJECT(&s->cpc), "num-vp", s->num_vp, &error_abort); object_property_set_int(OBJECT(&s->cpc), "vp-start-running", 1, &error_abort); @@ -146,9 +135,9 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) /* Global Interrupt Controller */ object_initialize_child(OBJECT(dev), "gic", &s->gic, TYPE_MIPS_GIC); - object_property_set_int(OBJECT(&s->gic), "num-vp", s->num_vp, + object_property_set_uint(OBJECT(&s->gic), "num-vp", s->num_vp, &error_abort); - object_property_set_int(OBJECT(&s->gic), "num-irq", 128, + object_property_set_uint(OBJECT(&s->gic), "num-irq", 128, &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) { return; @@ -158,10 +147,10 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gic), 0)); /* Global Configuration Registers */ - gcr_base = env->CP0_CMGCRBase << 4; + gcr_base = MIPS_CPU(first_cpu)->env.CP0_CMGCRBase << 4; object_initialize_child(OBJECT(dev), "gcr", &s->gcr, TYPE_MIPS_GCR); - object_property_set_int(OBJECT(&s->gcr), "num-vp", s->num_vp, + object_property_set_uint(OBJECT(&s->gcr), "num-vp", s->num_vp, &error_abort); object_property_set_int(OBJECT(&s->gcr), "gcr-rev", 0x800, &error_abort); diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c index 6aefe9a61b..ca4426a92c 100644 --- a/hw/mips/jazz.c +++ b/hw/mips/jazz.c @@ -249,7 +249,7 @@ static void mips_jazz_init(MachineState *machine, /* ISA devices */ i8259 = i8259_init(isa_bus, env->irq[4]); - isa_bus_irqs(isa_bus, i8259); + isa_bus_register_input_irqs(isa_bus, i8259); i8257_dma_init(isa_bus, 0); pit = i8254_pit_init(isa_bus, 0x40, 0, NULL); pcspk_init(isa_new(TYPE_PC_SPEAKER), isa_bus, pit); diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c index 25534288dd..216812f660 100644 --- a/hw/mips/loongson3_virt.c +++ b/hw/mips/loongson3_virt.c @@ -406,6 +406,7 @@ static inline void loongson3_virt_devices_init(MachineState *machine, PCIBus *pci_bus; DeviceState *dev; MemoryRegion *mmio_reg, *ecam_reg; + MachineClass *mc = MACHINE_GET_CLASS(machine); LoongsonMachineState *s = LOONGSON_MACHINE(machine); dev = qdev_new(TYPE_GPEX_HOST); @@ -456,7 +457,7 @@ static inline void loongson3_virt_devices_init(MachineState *machine, NICInfo *nd = &nd_table[i]; if (!nd->model) { - nd->model = g_strdup("virtio"); + nd->model = g_strdup(mc->default_nic); } pci_nic_init_nofail(nd, pci_bus, nd->model, NULL); @@ -619,6 +620,7 @@ static void loongson3v_machine_class_init(ObjectClass *oc, void *data) mc->default_ram_size = 1600 * MiB; mc->kvm_type = mips_kvm_type; mc->minimum_page_bits = 14; + mc->default_nic = "virtio-net-pci"; } static const TypeInfo loongson3_machine_types[] = { diff --git a/hw/mips/malta.c b/hw/mips/malta.c index ec172b111a..e3be2eea56 100644 --- a/hw/mips/malta.c +++ b/hw/mips/malta.c @@ -748,7 +748,6 @@ static void write_bootloader(uint8_t *base, uint64_t run_addr, uint64_t kernel_entry) { uint32_t *p; - void *v; /* Small bootloader */ p = (uint32_t *)base; @@ -785,9 +784,7 @@ static void write_bootloader(uint8_t *base, uint64_t run_addr, * */ - v = p; - bl_setup_gt64120_jump_kernel(&v, run_addr, kernel_entry); - p = v; + bl_setup_gt64120_jump_kernel((void **)&p, run_addr, kernel_entry); /* YAMON subroutines */ p = (uint32_t *) (base + 0x800); @@ -1066,7 +1063,7 @@ static void create_cps(MachineState *ms, MaltaState *s, object_initialize_child(OBJECT(s), "cps", &s->cps, TYPE_MIPS_CPS); object_property_set_str(OBJECT(&s->cps), "cpu-type", ms->cpu_type, &error_fatal); - object_property_set_int(OBJECT(&s->cps), "num-vp", ms->smp.cpus, + object_property_set_uint(OBJECT(&s->cps), "num-vp", ms->smp.cpus, &error_fatal); qdev_connect_clock_in(DEVICE(&s->cps), "clk-in", s->cpuclk); sysbus_realize(SYS_BUS_DEVICE(&s->cps), &error_fatal); diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c index 890ae7bae5..251b3d865d 100644 --- a/hw/misc/bcm2835_property.c +++ b/hw/misc/bcm2835_property.c @@ -282,7 +282,17 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) break; case 0x00050001: /* Get command line */ - resplen = 0; + /* + * We follow the firmware behaviour: no NUL terminator is + * written to the buffer, and if the buffer is too short + * we report the required length in the response header + * and copy nothing to the buffer. + */ + resplen = strlen(s->command_line); + if (bufsize >= resplen) + address_space_write(&s->dma_as, value + 12, + MEMTXATTRS_UNSPECIFIED, s->command_line, + resplen); break; default: @@ -382,6 +392,13 @@ static void bcm2835_property_init(Object *obj) memory_region_init_io(&s->iomem, OBJECT(s), &bcm2835_property_ops, s, TYPE_BCM2835_PROPERTY, 0x10); + + /* + * bcm2835_property_ops call into bcm2835_mbox, which in-turn reads from + * iomem. As such, mark iomem as re-entracy safe. + */ + s->iomem.disable_reentrancy_guard = true; + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); sysbus_init_irq(SYS_BUS_DEVICE(s), &s->mbox_irq); } @@ -413,6 +430,7 @@ static void bcm2835_property_realize(DeviceState *dev, Error **errp) static Property bcm2835_property_props[] = { DEFINE_PROP_UINT32("board-rev", BCM2835PropertyState, board_rev, 0), + DEFINE_PROP_STRING("command-line", BCM2835PropertyState, command_line), DEFINE_PROP_END_OF_LIST() }; diff --git a/hw/misc/edu.c b/hw/misc/edu.c index e935c418d4..a1f8bc77e7 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -267,6 +267,8 @@ static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val, case 0x20: if (val & EDU_STATUS_IRQFACT) { qatomic_or(&edu->status, EDU_STATUS_IRQFACT); + /* Order check of the COMPUTING flag after setting IRQFACT. */ + smp_mb__after_rmw(); } else { qatomic_and(&edu->status, ~EDU_STATUS_IRQFACT); } @@ -349,6 +351,9 @@ static void *edu_fact_thread(void *opaque) qemu_mutex_unlock(&edu->thr_mutex); qatomic_and(&edu->status, ~EDU_STATUS_COMPUTING); + /* Clear COMPUTING flag before checking IRQFACT. */ + smp_mb__after_rmw(); + if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) { qemu_mutex_lock_iothread(); edu_raise_irq(edu, FACT_IRQ); diff --git a/hw/misc/i2c-echo.c b/hw/misc/i2c-echo.c new file mode 100644 index 0000000000..5705ab5d73 --- /dev/null +++ b/hw/misc/i2c-echo.c @@ -0,0 +1,156 @@ +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "qemu/main-loop.h" +#include "block/aio.h" +#include "hw/i2c/i2c.h" + +#define TYPE_I2C_ECHO "i2c-echo" +OBJECT_DECLARE_SIMPLE_TYPE(I2CEchoState, I2C_ECHO) + +enum i2c_echo_state { + I2C_ECHO_STATE_IDLE, + I2C_ECHO_STATE_START_SEND, + I2C_ECHO_STATE_ACK, +}; + +typedef struct I2CEchoState { + I2CSlave parent_obj; + + I2CBus *bus; + + enum i2c_echo_state state; + QEMUBH *bh; + + unsigned int pos; + uint8_t data[3]; +} I2CEchoState; + +static void i2c_echo_bh(void *opaque) +{ + I2CEchoState *state = opaque; + + switch (state->state) { + case I2C_ECHO_STATE_IDLE: + return; + + case I2C_ECHO_STATE_START_SEND: + if (i2c_start_send_async(state->bus, state->data[0])) { + goto release_bus; + } + + state->pos++; + state->state = I2C_ECHO_STATE_ACK; + return; + + case I2C_ECHO_STATE_ACK: + if (state->pos > 2) { + break; + } + + if (i2c_send_async(state->bus, state->data[state->pos++])) { + break; + } + + return; + } + + + i2c_end_transfer(state->bus); +release_bus: + i2c_bus_release(state->bus); + + state->state = I2C_ECHO_STATE_IDLE; +} + +static int i2c_echo_event(I2CSlave *s, enum i2c_event event) +{ + I2CEchoState *state = I2C_ECHO(s); + + switch (event) { + case I2C_START_RECV: + state->pos = 0; + + break; + + case I2C_START_SEND: + state->pos = 0; + + break; + + case I2C_FINISH: + state->pos = 0; + state->state = I2C_ECHO_STATE_START_SEND; + i2c_bus_master(state->bus, state->bh); + + break; + + case I2C_NACK: + break; + + default: + return -1; + } + + return 0; +} + +static uint8_t i2c_echo_recv(I2CSlave *s) +{ + I2CEchoState *state = I2C_ECHO(s); + + if (state->pos > 2) { + return 0xff; + } + + return state->data[state->pos++]; +} + +static int i2c_echo_send(I2CSlave *s, uint8_t data) +{ + I2CEchoState *state = I2C_ECHO(s); + + if (state->pos > 2) { + return -1; + } + + state->data[state->pos++] = data; + + return 0; +} + +static void i2c_echo_realize(DeviceState *dev, Error **errp) +{ + I2CEchoState *state = I2C_ECHO(dev); + BusState *bus = qdev_get_parent_bus(dev); + + state->bus = I2C_BUS(bus); + state->bh = qemu_bh_new(i2c_echo_bh, state); + + return; +} + +static void i2c_echo_class_init(ObjectClass *oc, void *data) +{ + I2CSlaveClass *sc = I2C_SLAVE_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = i2c_echo_realize; + + sc->event = i2c_echo_event; + sc->recv = i2c_echo_recv; + sc->send = i2c_echo_send; +} + +static const TypeInfo i2c_echo = { + .name = TYPE_I2C_ECHO, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(I2CEchoState), + .class_init = i2c_echo_class_init, +}; + +static void register_types(void) +{ + type_register_static(&i2c_echo); +} + +type_init(register_types); diff --git a/hw/misc/imx_rngc.c b/hw/misc/imx_rngc.c index 632c03779c..082c6980ad 100644 --- a/hw/misc/imx_rngc.c +++ b/hw/misc/imx_rngc.c @@ -228,8 +228,10 @@ static void imx_rngc_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->irq); - s->self_test_bh = qemu_bh_new(imx_rngc_self_test, s); - s->seed_bh = qemu_bh_new(imx_rngc_seed, s); + s->self_test_bh = qemu_bh_new_guarded(imx_rngc_self_test, s, + &dev->mem_reentrancy_guard); + s->seed_bh = qemu_bh_new_guarded(imx_rngc_seed, s, + &dev->mem_reentrancy_guard); } static void imx_rngc_reset(DeviceState *dev) diff --git a/hw/misc/lasi.c b/hw/misc/lasi.c index 23a7634a8c..ff9dc893ae 100644 --- a/hw/misc/lasi.c +++ b/hw/misc/lasi.c @@ -194,7 +194,7 @@ static const MemoryRegionOps lasi_chip_ops = { static const VMStateDescription vmstate_lasi = { .name = "Lasi", - .version_id = 1, + .version_id = 2, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(irr, LasiState), @@ -204,6 +204,7 @@ static const VMStateDescription vmstate_lasi = { VMSTATE_UINT32(iar, LasiState), VMSTATE_UINT32(errlog, LasiState), VMSTATE_UINT32(amr, LasiState), + VMSTATE_UINT32_V(rtc_ref, LasiState, 2), VMSTATE_END_OF_LIST() } }; @@ -233,7 +234,6 @@ static void lasi_reset(DeviceState *dev) s->iar = 0xFFFB0000 + 3; /* CPU_HPA + 3 */ /* Real time clock (RTC), it's only one 32-bit counter @9000 */ - s->rtc = time(NULL); s->rtc_ref = 0; } diff --git a/hw/misc/macio/gpio.c b/hw/misc/macio/gpio.c index c8ac5633b2..4deb330471 100644 --- a/hw/misc/macio/gpio.c +++ b/hw/misc/macio/gpio.c @@ -28,6 +28,7 @@ #include "migration/vmstate.h" #include "hw/misc/macio/macio.h" #include "hw/misc/macio/gpio.h" +#include "hw/irq.h" #include "hw/nmi.h" #include "qemu/log.h" #include "qemu/module.h" diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c index 43bb1f56ba..80a789f32b 100644 --- a/hw/misc/macio/mac_dbdma.c +++ b/hw/misc/macio/mac_dbdma.c @@ -914,7 +914,7 @@ static void mac_dbdma_realize(DeviceState *dev, Error **errp) { DBDMAState *s = MAC_DBDMA(dev); - s->bh = qemu_bh_new(DBDMA_run_bh, s); + s->bh = qemu_bh_new_guarded(DBDMA_run_bh, s, &dev->mem_reentrancy_guard); } static void mac_dbdma_class_init(ObjectClass *oc, void *data) diff --git a/hw/misc/meson.build b/hw/misc/meson.build index fe869b98ca..a40245ad44 100644 --- a/hw/misc/meson.build +++ b/hw/misc/meson.build @@ -128,6 +128,8 @@ softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_rng.c')) softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_ahb_apb_pnp.c')) +softmmu_ss.add(when: 'CONFIG_I2C', if_true: files('i2c-echo.c')) + specific_ss.add(when: 'CONFIG_AVR_POWER', if_true: files('avr_power.c')) specific_ss.add(when: 'CONFIG_MAC_VIA', if_true: files('mac_via.c')) diff --git a/hw/misc/mips_cmgcr.c b/hw/misc/mips_cmgcr.c index 3c8b37f700..66eb11662c 100644 --- a/hw/misc/mips_cmgcr.c +++ b/hw/misc/mips_cmgcr.c @@ -212,7 +212,7 @@ static const VMStateDescription vmstate_mips_gcr = { }; static Property mips_gcr_properties[] = { - DEFINE_PROP_INT32("num-vp", MIPSGCRState, num_vps, 1), + DEFINE_PROP_UINT32("num-vp", MIPSGCRState, num_vps, 1), DEFINE_PROP_INT32("gcr-rev", MIPSGCRState, gcr_rev, 0x800), DEFINE_PROP_UINT64("gcr-base", MIPSGCRState, gcr_base, GCR_BASE_ADDR), DEFINE_PROP_LINK("gic", MIPSGCRState, gic_mr, TYPE_MEMORY_REGION, diff --git a/hw/misc/mips_itu.c b/hw/misc/mips_itu.c index badef5c214..0eda302db4 100644 --- a/hw/misc/mips_itu.c +++ b/hw/misc/mips_itu.c @@ -93,10 +93,10 @@ void itc_reconfigure(MIPSITUState *tag) uint64_t size = (1 * KiB) + (am[1] & ITC_AM1_ADDR_MASK_MASK); bool is_enabled = (am[0] & ITC_AM0_EN_MASK) != 0; - if (tag->saar_present) { - address = ((*(uint64_t *) tag->saar) & 0xFFFFFFFFE000ULL) << 4; - size = 1ULL << ((*(uint64_t *) tag->saar >> 1) & 0x1f); - is_enabled = *(uint64_t *) tag->saar & 1; + if (tag->saar) { + address = (tag->saar[0] & 0xFFFFFFFFE000ULL) << 4; + size = 1ULL << ((tag->saar[0] >> 1) & 0x1f); + is_enabled = tag->saar[0] & 1; } memory_region_transaction_begin(); @@ -157,7 +157,7 @@ static inline ITCView get_itc_view(hwaddr addr) static inline int get_cell_stride_shift(const MIPSITUState *s) { /* Minimum interval (for EntryGain = 0) is 128 B */ - if (s->saar_present) { + if (s->saar) { return 7 + ((s->icr0 >> ITC_ICR0_BLK_GRAIN) & ITC_ICR0_BLK_GRAIN_MASK); } else { @@ -515,6 +515,7 @@ static void mips_itu_init(Object *obj) static void mips_itu_realize(DeviceState *dev, Error **errp) { MIPSITUState *s = MIPS_ITU(dev); + CPUMIPSState *env; if (s->num_fifo > ITC_FIFO_NUM_MAX) { error_setg(errp, "Exceed maximum number of FIFO cells: %d", @@ -526,6 +527,15 @@ static void mips_itu_realize(DeviceState *dev, Error **errp) s->num_semaphores); return; } + if (!s->cpu0) { + error_setg(errp, "Missing 'cpu[0]' property"); + return; + } + + env = &s->cpu0->env; + if (env->saarp) { + s->saar = env->CP0_SAAR; + } s->cell = g_new(ITCStorageCell, get_num_cells(s)); } @@ -534,8 +544,8 @@ static void mips_itu_reset(DeviceState *dev) { MIPSITUState *s = MIPS_ITU(dev); - if (s->saar_present) { - *(uint64_t *) s->saar = 0x11 << 1; + if (s->saar) { + s->saar[0] = 0x11 << 1; s->icr0 = get_num_cells(s) << ITC_ICR0_CELL_NUM; } else { s->ITCAddressMap[0] = 0; @@ -549,11 +559,11 @@ static void mips_itu_reset(DeviceState *dev) } static Property mips_itu_properties[] = { - DEFINE_PROP_INT32("num-fifo", MIPSITUState, num_fifo, + DEFINE_PROP_UINT32("num-fifo", MIPSITUState, num_fifo, ITC_FIFO_NUM_MAX), - DEFINE_PROP_INT32("num-semaphores", MIPSITUState, num_semaphores, + DEFINE_PROP_UINT32("num-semaphores", MIPSITUState, num_semaphores, ITC_SEMAPH_NUM_MAX), - DEFINE_PROP_BOOL("saar-present", MIPSITUState, saar_present, false), + DEFINE_PROP_LINK("cpu[0]", MIPSITUState, cpu0, TYPE_MIPS_CPU, MIPSCPU *), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/net/Kconfig b/hw/net/Kconfig index 1cc1c5775e..98e00be4f9 100644 --- a/hw/net/Kconfig +++ b/hw/net/Kconfig @@ -44,6 +44,11 @@ config E1000E_PCI_EXPRESS default y if PCI_DEVICES depends on PCI_EXPRESS && MSI_NONBROKEN +config IGB_PCI_EXPRESS + bool + default y if PCI_DEVICES + depends on PCI_EXPRESS && MSI_NONBROKEN + config RTL8139_PCI bool default y if PCI_DEVICES @@ -51,7 +56,7 @@ config RTL8139_PCI config VMXNET3_PCI bool - default y if PCI_DEVICES && PC_PCI + default y if PCI_DEVICES depends on PCI config SMC91C111 diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c index b861d8ff35..fac4405f45 100644 --- a/hw/net/allwinner-sun8i-emac.c +++ b/hw/net/allwinner-sun8i-emac.c @@ -350,8 +350,13 @@ static void allwinner_sun8i_emac_get_desc(AwSun8iEmacState *s, FrameDescriptor *desc, uint32_t phys_addr) { - dma_memory_read(&s->dma_as, phys_addr, desc, sizeof(*desc), + uint32_t desc_words[4]; + dma_memory_read(&s->dma_as, phys_addr, &desc_words, sizeof(desc_words), MEMTXATTRS_UNSPECIFIED); + desc->status = le32_to_cpu(desc_words[0]); + desc->status2 = le32_to_cpu(desc_words[1]); + desc->addr = le32_to_cpu(desc_words[2]); + desc->next = le32_to_cpu(desc_words[3]); } static uint32_t allwinner_sun8i_emac_next_desc(AwSun8iEmacState *s, @@ -400,10 +405,15 @@ static uint32_t allwinner_sun8i_emac_tx_desc(AwSun8iEmacState *s, } static void allwinner_sun8i_emac_flush_desc(AwSun8iEmacState *s, - FrameDescriptor *desc, + const FrameDescriptor *desc, uint32_t phys_addr) { - dma_memory_write(&s->dma_as, phys_addr, desc, sizeof(*desc), + uint32_t desc_words[4]; + desc_words[0] = cpu_to_le32(desc->status); + desc_words[1] = cpu_to_le32(desc->status2); + desc_words[2] = cpu_to_le32(desc->addr); + desc_words[3] = cpu_to_le32(desc->next); + dma_memory_write(&s->dma_as, phys_addr, &desc_words, sizeof(desc_words), MEMTXATTRS_UNSPECIFIED); } @@ -638,8 +648,7 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, break; case REG_TX_CUR_BUF: /* Transmit Current Buffer */ if (s->tx_desc_curr != 0) { - dma_memory_read(&s->dma_as, s->tx_desc_curr, &desc, sizeof(desc), - MEMTXATTRS_UNSPECIFIED); + allwinner_sun8i_emac_get_desc(s, &desc, s->tx_desc_curr); value = desc.addr; } else { value = 0; @@ -652,8 +661,7 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, break; case REG_RX_CUR_BUF: /* Receive Current Buffer */ if (s->rx_desc_curr != 0) { - dma_memory_read(&s->dma_as, s->rx_desc_curr, &desc, sizeof(desc), - MEMTXATTRS_UNSPECIFIED); + allwinner_sun8i_emac_get_desc(s, &desc, s->rx_desc_curr); value = desc.addr; } else { value = 0; diff --git a/hw/net/e1000.c b/hw/net/e1000.c index 7efb8a4c52..aae5f0bdc0 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -26,6 +26,7 @@ #include "qemu/osdep.h" +#include "hw/net/mii.h" #include "hw/pci/pci_device.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" @@ -38,12 +39,11 @@ #include "qemu/module.h" #include "qemu/range.h" +#include "e1000_common.h" #include "e1000x_common.h" #include "trace.h" #include "qom/object.h" -static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - /* #define E1000_DEBUG */ #ifdef E1000_DEBUG @@ -66,9 +66,8 @@ static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL); #define IOPORT_SIZE 0x40 #define PNPMMIO_SIZE 0x20000 -#define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */ -#define MAXIMUM_ETHERNET_HDR_LEN (14+4) +#define MAXIMUM_ETHERNET_HDR_LEN (ETH_HLEN + 4) /* * HW models: @@ -181,67 +180,73 @@ e1000_autoneg_done(E1000State *s) static bool have_autoneg(E1000State *s) { - return chkflag(AUTONEG) && (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN); + return chkflag(AUTONEG) && (s->phy_reg[MII_BMCR] & MII_BMCR_AUTOEN); } static void set_phy_ctrl(E1000State *s, int index, uint16_t val) { - /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */ - s->phy_reg[PHY_CTRL] = val & ~(0x3f | - MII_CR_RESET | - MII_CR_RESTART_AUTO_NEG); + /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */ + s->phy_reg[MII_BMCR] = val & ~(0x3f | + MII_BMCR_RESET | + MII_BMCR_ANRESTART); /* * QEMU 1.3 does not support link auto-negotiation emulation, so if we * migrate during auto negotiation, after migration the link will be * down. */ - if (have_autoneg(s) && (val & MII_CR_RESTART_AUTO_NEG)) { + if (have_autoneg(s) && (val & MII_BMCR_ANRESTART)) { e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer); } } static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = { - [PHY_CTRL] = set_phy_ctrl, + [MII_BMCR] = set_phy_ctrl, }; enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) }; enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W }; static const char phy_regcap[0x20] = { - [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW, - [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW, - [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW, - [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R, - [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R, - [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R, - [PHY_AUTONEG_EXP] = PHY_R, + [MII_BMSR] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW, + [MII_PHYID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW, + [MII_BMCR] = PHY_RW, [MII_CTRL1000] = PHY_RW, + [MII_ANLPAR] = PHY_R, [MII_STAT1000] = PHY_R, + [MII_ANAR] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R, + [MII_PHYID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R, + [MII_ANER] = PHY_R, }; -/* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */ +/* MII_PHYID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */ static const uint16_t phy_reg_init[] = { - [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB | - MII_CR_FULL_DUPLEX | - MII_CR_AUTO_NEG_EN, + [MII_BMCR] = MII_BMCR_SPEED1000 | + MII_BMCR_FD | + MII_BMCR_AUTOEN, - [PHY_STATUS] = MII_SR_EXTENDED_CAPS | - MII_SR_LINK_STATUS | /* link initially up */ - MII_SR_AUTONEG_CAPS | - /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */ - MII_SR_PREAMBLE_SUPPRESS | - MII_SR_EXTENDED_STATUS | - MII_SR_10T_HD_CAPS | - MII_SR_10T_FD_CAPS | - MII_SR_100X_HD_CAPS | - MII_SR_100X_FD_CAPS, + [MII_BMSR] = MII_BMSR_EXTCAP | + MII_BMSR_LINK_ST | /* link initially up */ + MII_BMSR_AUTONEG | + /* MII_BMSR_AN_COMP: initially NOT completed */ + MII_BMSR_MFPS | + MII_BMSR_EXTSTAT | + MII_BMSR_10T_HD | + MII_BMSR_10T_FD | + MII_BMSR_100TX_HD | + MII_BMSR_100TX_FD, - [PHY_ID1] = 0x141, - /* [PHY_ID2] configured per DevId, from e1000_reset() */ - [PHY_AUTONEG_ADV] = 0xde1, - [PHY_LP_ABILITY] = 0x1e0, - [PHY_1000T_CTRL] = 0x0e00, - [PHY_1000T_STATUS] = 0x3c00, + [MII_PHYID1] = 0x141, + /* [MII_PHYID2] configured per DevId, from e1000_reset() */ + [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 | + MII_ANAR_10FD | MII_ANAR_TX | + MII_ANAR_TXFD | MII_ANAR_PAUSE | + MII_ANAR_PAUSE_ASYM, + [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD | + MII_ANLPAR_TX | MII_ANLPAR_TXFD, + [MII_CTRL1000] = MII_CTRL1000_FULL | MII_CTRL1000_PORT | + MII_CTRL1000_MASTER, + [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL | + MII_STAT1000_ROK | MII_STAT1000_LOK, [M88E1000_PHY_SPEC_CTRL] = 0x360, [M88E1000_PHY_SPEC_STATUS] = 0xac00, [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, @@ -373,9 +378,9 @@ static bool e1000_vet_init_need(void *opaque) return chkflag(VET); } -static void e1000_reset(void *opaque) +static void e1000_reset_hold(Object *obj) { - E1000State *d = opaque; + E1000State *d = E1000(obj); E1000BaseClass *edc = E1000_GET_CLASS(d); uint8_t *macaddr = d->conf.macaddr.a; @@ -386,10 +391,10 @@ static void e1000_reset(void *opaque) d->mit_irq_level = 0; d->mit_ide = 0; memset(d->phy_reg, 0, sizeof d->phy_reg); - memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init); - d->phy_reg[PHY_ID2] = edc->phy_id2; + memcpy(d->phy_reg, phy_reg_init, sizeof phy_reg_init); + d->phy_reg[MII_PHYID2] = edc->phy_id2; memset(d->mac_reg, 0, sizeof d->mac_reg); - memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init); + memcpy(d->mac_reg, mac_reg_init, sizeof mac_reg_init); d->rxbuf_min_shift = 1; memset(&d->tx, 0, sizeof d->tx); @@ -547,9 +552,9 @@ putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse) static inline void inc_tx_bcast_or_mcast_count(E1000State *s, const unsigned char *arr) { - if (!memcmp(arr, bcast, sizeof bcast)) { + if (is_broadcast_ether_addr(arr)) { e1000x_inc_reg_if_not_full(s->mac_reg, BPTC); - } else if (arr[0] & 1) { + } else if (is_multicast_ether_addr(arr)) { e1000x_inc_reg_if_not_full(s->mac_reg, MPTC); } } @@ -561,13 +566,13 @@ e1000_send_packet(E1000State *s, const uint8_t *buf, int size) PTC1023, PTC1522 }; NetClientState *nc = qemu_get_queue(s->nic); - if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) { + if (s->phy_reg[MII_BMCR] & MII_BMCR_LOOPBACK) { qemu_receive_packet(nc, buf, size); } else { qemu_send_packet(nc, buf, size); } inc_tx_bcast_or_mcast_count(s, buf); - e1000x_increase_size_stats(s->mac_reg, PTCregs, size); + e1000x_increase_size_stats(s->mac_reg, PTCregs, size + 4); } static void @@ -631,10 +636,9 @@ xmit_seg(E1000State *s) } e1000x_inc_reg_if_not_full(s->mac_reg, TPT); - e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size); - s->mac_reg[GPTC] = s->mac_reg[TPT]; - s->mac_reg[GOTCL] = s->mac_reg[TOTL]; - s->mac_reg[GOTCH] = s->mac_reg[TOTH]; + e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size + 4); + e1000x_inc_reg_if_not_full(s->mac_reg, GPTC); + e1000x_grow_8reg_if_not_full(s->mac_reg, GOTCL, s->tx.size + 4); } static void @@ -800,35 +804,11 @@ start_xmit(E1000State *s) } static int -receive_filter(E1000State *s, const uint8_t *buf, int size) +receive_filter(E1000State *s, const void *buf) { - uint32_t rctl = s->mac_reg[RCTL]; - int isbcast = !memcmp(buf, bcast, sizeof bcast), ismcast = (buf[0] & 1); - - if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) && - e1000x_vlan_rx_filter_enabled(s->mac_reg)) { - uint16_t vid = lduw_be_p(buf + 14); - uint32_t vfta = ldl_le_p((uint32_t*)(s->mac_reg + VFTA) + - ((vid >> 5) & 0x7f)); - if ((vfta & (1 << (vid & 0x1f))) == 0) - return 0; - } - - if (!isbcast && !ismcast && (rctl & E1000_RCTL_UPE)) { /* promiscuous ucast */ - return 1; - } - - if (ismcast && (rctl & E1000_RCTL_MPE)) { /* promiscuous mcast */ - e1000x_inc_reg_if_not_full(s->mac_reg, MPRC); - return 1; - } - - if (isbcast && (rctl & E1000_RCTL_BAM)) { /* broadcast enabled */ - e1000x_inc_reg_if_not_full(s->mac_reg, BPRC); - return 1; - } - - return e1000x_rx_group_filter(s->mac_reg, buf); + return (!e1000x_is_vlan_packet(buf, s->mac_reg[VET]) || + e1000x_rx_vlan_filter(s->mac_reg, PKT_GET_VLAN_HDR(buf))) && + e1000x_rx_group_filter(s->mac_reg, buf); } static void @@ -841,7 +821,7 @@ e1000_set_link_status(NetClientState *nc) e1000x_update_regs_on_link_down(s->mac_reg, s->phy_reg); } else { if (have_autoneg(s) && - !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { + !(s->phy_reg[MII_BMSR] & MII_BMSR_AN_COMP)) { e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer); } else { e1000_link_up(s); @@ -907,7 +887,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) uint32_t rdh_start; uint16_t vlan_special = 0; uint8_t vlan_status = 0; - uint8_t min_buf[MIN_BUF_SIZE]; + uint8_t min_buf[ETH_ZLEN]; struct iovec min_iov; uint8_t *filter_buf = iov->iov_base; size_t size = iov_size(iov, iovcnt); @@ -915,6 +895,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) size_t desc_offset; size_t desc_size; size_t total_size; + eth_pkt_types_e pkt_type; if (!e1000x_hw_rx_enabled(s->mac_reg)) { return -1; @@ -943,7 +924,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) return size; } - if (!receive_filter(s, filter_buf, size)) { + if (!receive_filter(s, filter_buf)) { return size; } @@ -964,6 +945,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) size -= 4; } + pkt_type = get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf)); rdh_start = s->mac_reg[RDH]; desc_offset = 0; total_size = size + e1000x_fcs_len(s->mac_reg); @@ -1029,7 +1011,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) } } while (desc_offset < total_size); - e1000x_update_rx_total_stats(s->mac_reg, size, total_size); + e1000x_update_rx_total_stats(s->mac_reg, pkt_type, size, total_size); n = E1000_ICS_RXT0; if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH]) @@ -1060,30 +1042,6 @@ mac_readreg(E1000State *s, int index) return s->mac_reg[index]; } -static uint32_t -mac_low4_read(E1000State *s, int index) -{ - return s->mac_reg[index] & 0xf; -} - -static uint32_t -mac_low11_read(E1000State *s, int index) -{ - return s->mac_reg[index] & 0x7ff; -} - -static uint32_t -mac_low13_read(E1000State *s, int index) -{ - return s->mac_reg[index] & 0x1fff; -} - -static uint32_t -mac_low16_read(E1000State *s, int index) -{ - return s->mac_reg[index] & 0xffff; -} - static uint32_t mac_icr_read(E1000State *s, int index) { @@ -1136,11 +1094,17 @@ set_rdt(E1000State *s, int index, uint32_t val) } } -static void -set_16bit(E1000State *s, int index, uint32_t val) -{ - s->mac_reg[index] = val & 0xffff; -} +#define LOW_BITS_SET_FUNC(num) \ + static void \ + set_##num##bit(E1000State *s, int index, uint32_t val) \ + { \ + s->mac_reg[index] = val & (BIT(num) - 1); \ + } + +LOW_BITS_SET_FUNC(4) +LOW_BITS_SET_FUNC(11) +LOW_BITS_SET_FUNC(13) +LOW_BITS_SET_FUNC(16) static void set_dlen(E1000State *s, int index, uint32_t val) @@ -1194,7 +1158,9 @@ static const readops macreg_readops[] = { getreg(XONRXC), getreg(XONTXC), getreg(XOFFRXC), getreg(XOFFTXC), getreg(RFC), getreg(RJC), getreg(RNBC), getreg(TSCTFC), getreg(MGTPRC), getreg(MGTPDC), getreg(MGTPTC), getreg(GORCL), - getreg(GOTCL), + getreg(GOTCL), getreg(RDFH), getreg(RDFT), getreg(RDFHS), + getreg(RDFTS), getreg(RDFPC), getreg(TDFH), getreg(TDFT), + getreg(TDFHS), getreg(TDFTS), getreg(TDFPC), getreg(AIT), [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GOTCH] = mac_read_clr8, [GORCH] = mac_read_clr8, @@ -1212,24 +1178,17 @@ static const readops macreg_readops[] = { [MPTC] = mac_read_clr4, [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read, - [RDFH] = mac_low13_read, [RDFT] = mac_low13_read, - [RDFHS] = mac_low13_read, [RDFTS] = mac_low13_read, - [RDFPC] = mac_low13_read, - [TDFH] = mac_low11_read, [TDFT] = mac_low11_read, - [TDFHS] = mac_low13_read, [TDFTS] = mac_low13_read, - [TDFPC] = mac_low13_read, - [AIT] = mac_low16_read, - [CRCERRS ... MPC] = &mac_readreg, - [IP6AT ... IP6AT+3] = &mac_readreg, [IP4AT ... IP4AT+6] = &mac_readreg, - [FFLT ... FFLT+6] = &mac_low11_read, - [RA ... RA+31] = &mac_readreg, - [WUPM ... WUPM+31] = &mac_readreg, - [MTA ... MTA+127] = &mac_readreg, - [VFTA ... VFTA+127] = &mac_readreg, - [FFMT ... FFMT+254] = &mac_low4_read, - [FFVT ... FFVT+254] = &mac_readreg, - [PBM ... PBM+16383] = &mac_readreg, + [CRCERRS ... MPC] = &mac_readreg, + [IP6AT ... IP6AT + 3] = &mac_readreg, [IP4AT ... IP4AT + 6] = &mac_readreg, + [FFLT ... FFLT + 6] = &mac_readreg, + [RA ... RA + 31] = &mac_readreg, + [WUPM ... WUPM + 31] = &mac_readreg, + [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = &mac_readreg, + [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = &mac_readreg, + [FFMT ... FFMT + 254] = &mac_readreg, + [FFVT ... FFVT + 254] = &mac_readreg, + [PBM ... PBM + 16383] = &mac_readreg, }; enum { NREADOPS = ARRAY_SIZE(macreg_readops) }; @@ -1239,27 +1198,28 @@ static const writeops macreg_writeops[] = { putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC), putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH), putreg(RDBAL), putreg(LEDCTL), putreg(VET), putreg(FCRUC), - putreg(TDFH), putreg(TDFT), putreg(TDFHS), putreg(TDFTS), - putreg(TDFPC), putreg(RDFH), putreg(RDFT), putreg(RDFHS), - putreg(RDFTS), putreg(RDFPC), putreg(IPAV), putreg(WUC), - putreg(WUS), putreg(AIT), + putreg(IPAV), putreg(WUC), + putreg(WUS), - [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl, - [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics, - [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt, - [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr, - [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl, - [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit, - [ITR] = set_16bit, + [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl, + [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics, + [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt, + [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr, + [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl, + [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit, + [ITR] = set_16bit, [TDFH] = set_11bit, [TDFT] = set_11bit, + [TDFHS] = set_13bit, [TDFTS] = set_13bit, [TDFPC] = set_13bit, + [RDFH] = set_13bit, [RDFT] = set_13bit, [RDFHS] = set_13bit, + [RDFTS] = set_13bit, [RDFPC] = set_13bit, [AIT] = set_16bit, - [IP6AT ... IP6AT+3] = &mac_writereg, [IP4AT ... IP4AT+6] = &mac_writereg, - [FFLT ... FFLT+6] = &mac_writereg, - [RA ... RA+31] = &mac_writereg, - [WUPM ... WUPM+31] = &mac_writereg, - [MTA ... MTA+127] = &mac_writereg, - [VFTA ... VFTA+127] = &mac_writereg, - [FFMT ... FFMT+254] = &mac_writereg, [FFVT ... FFVT+254] = &mac_writereg, - [PBM ... PBM+16383] = &mac_writereg, + [IP6AT ... IP6AT + 3] = &mac_writereg, [IP4AT ... IP4AT + 6] = &mac_writereg, + [FFLT ... FFLT + 6] = &set_11bit, + [RA ... RA + 31] = &mac_writereg, + [WUPM ... WUPM + 31] = &mac_writereg, + [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = &mac_writereg, + [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = &mac_writereg, + [FFMT ... FFMT + 254] = &set_4bit, [FFVT ... FFVT + 254] = &mac_writereg, + [PBM ... PBM + 16383] = &mac_writereg, }; enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) }; @@ -1415,10 +1375,10 @@ static int e1000_pre_save(void *opaque) /* * If link is down and auto-negotiation is supported and ongoing, * complete auto-negotiation immediately. This allows us to look - * at MII_SR_AUTONEG_COMPLETE to infer link status on load. + * at MII_BMSR_AN_COMP to infer link status on load. */ if (nc->link_down && have_autoneg(s)) { - s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE; + s->phy_reg[MII_BMSR] |= MII_BMSR_AN_COMP; } /* Decide which set of props to migrate in the main structure */ @@ -1457,8 +1417,7 @@ static int e1000_post_load(void *opaque, int version_id) * Alternatively, restart link negotiation if it was in progress. */ nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0; - if (have_autoneg(s) && - !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { + if (have_autoneg(s) && !(s->phy_reg[MII_BMSR] & MII_BMSR_AN_COMP)) { nc->link_down = false; timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500); @@ -1624,8 +1583,9 @@ static const VMStateDescription vmstate_e1000 = { VMSTATE_UINT32(mac_reg[WUFC], E1000State), VMSTATE_UINT32(mac_reg[VET], E1000State), VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32), - VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128), - VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128), + VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, E1000_MC_TBL_SIZE), + VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, + E1000_VLAN_FILTER_TBL_SIZE), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription*[]) { @@ -1746,12 +1706,6 @@ static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp) e1000_flush_queue_timer, d); } -static void qdev_e1000_reset(DeviceState *dev) -{ - E1000State *d = E1000(dev); - e1000_reset(d); -} - static Property e1000_properties[] = { DEFINE_NIC_PROPERTIES(E1000State, conf), DEFINE_PROP_BIT("autonegotiation", E1000State, @@ -1777,6 +1731,7 @@ typedef struct E1000Info { static void e1000_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); E1000BaseClass *e = E1000_CLASS(klass); const E1000Info *info = data; @@ -1789,9 +1744,9 @@ static void e1000_class_init(ObjectClass *klass, void *data) k->revision = info->revision; e->phy_id2 = info->phy_id2; k->class_id = PCI_CLASS_NETWORK_ETHERNET; + rc->phases.hold = e1000_reset_hold; set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); dc->desc = "Intel Gigabit Ethernet"; - dc->reset = qdev_e1000_reset; dc->vmsd = &vmstate_e1000; device_class_set_props(dc, e1000_properties); } diff --git a/hw/net/e1000_common.h b/hw/net/e1000_common.h new file mode 100644 index 0000000000..48feda7404 --- /dev/null +++ b/hw/net/e1000_common.h @@ -0,0 +1,102 @@ +/* + * QEMU e1000(e) emulation - shared definitions + * + * Copyright (c) 2008 Qumranet + * + * Based on work done by: + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef HW_NET_E1000_COMMON_H +#define HW_NET_E1000_COMMON_H + +#include "e1000_regs.h" + +#define defreg(x) x = (E1000_##x >> 2) +enum { + defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC), + defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC), + defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC), + defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH0), + defreg(RDBAL0), defreg(RDH0), defreg(RDLEN0), defreg(RDT0), + defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH), + defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT), + defreg(TDLEN1), defreg(TDBAL1), defreg(TDBAH1), defreg(TDH1), + defreg(TDT1), defreg(TORH), defreg(TORL), defreg(TOTH), + defreg(TOTL), defreg(TPR), defreg(TPT), defreg(TXDCTL), + defreg(WUFC), defreg(RA), defreg(MTA), defreg(CRCERRS), + defreg(VFTA), defreg(VET), defreg(RDTR), defreg(RADV), + defreg(TADV), defreg(ITR), defreg(SCC), defreg(ECOL), + defreg(MCC), defreg(LATECOL), defreg(COLC), defreg(DC), + defreg(TNCRS), defreg(SEQEC), defreg(CEXTERR), defreg(RLEC), + defreg(XONRXC), defreg(XONTXC), defreg(XOFFRXC), defreg(XOFFTXC), + defreg(FCRUC), defreg(AIT), defreg(TDFH), defreg(TDFT), + defreg(TDFHS), defreg(TDFTS), defreg(TDFPC), defreg(WUC), + defreg(WUS), defreg(POEMB), defreg(PBS), defreg(RDFH), + defreg(RDFT), defreg(RDFHS), defreg(RDFTS), defreg(RDFPC), + defreg(PBM), defreg(IPAV), defreg(IP4AT), defreg(IP6AT), + defreg(WUPM), defreg(FFLT), defreg(FFMT), defreg(FFVT), + defreg(TARC0), defreg(TARC1), defreg(IAM), defreg(EXTCNF_CTRL), + defreg(GCR), defreg(TIMINCA), defreg(EIAC), defreg(CTRL_EXT), + defreg(IVAR), defreg(MFUTP01), defreg(MFUTP23), defreg(MANC2H), + defreg(MFVAL), defreg(MDEF), defreg(FACTPS), defreg(FTFT), + defreg(RUC), defreg(ROC), defreg(RFC), defreg(RJC), + defreg(PRC64), defreg(PRC127), defreg(PRC255), defreg(PRC511), + defreg(PRC1023), defreg(PRC1522), defreg(PTC64), defreg(PTC127), + defreg(PTC255), defreg(PTC511), defreg(PTC1023), defreg(PTC1522), + defreg(GORCL), defreg(GORCH), defreg(GOTCL), defreg(GOTCH), + defreg(RNBC), defreg(BPRC), defreg(MPRC), defreg(RFCTL), + defreg(PSRCTL), defreg(MPTC), defreg(BPTC), defreg(TSCTFC), + defreg(IAC), defreg(MGTPRC), defreg(MGTPDC), defreg(MGTPTC), + defreg(TSCTC), defreg(RXCSUM), defreg(FUNCTAG), defreg(GSCL_1), + defreg(GSCL_2), defreg(GSCL_3), defreg(GSCL_4), defreg(GSCN_0), + defreg(GSCN_1), defreg(GSCN_2), defreg(GSCN_3), defreg(GCR2), + defreg(RAID), defreg(RSRPD), defreg(TIDV), defreg(EITR), + defreg(MRQC), defreg(RETA), defreg(RSSRK), defreg(RDBAH1), + defreg(RDBAL1), defreg(RDLEN1), defreg(RDH1), defreg(RDT1), + defreg(PBACLR), defreg(FCAL), defreg(FCAH), defreg(FCT), + defreg(FCRTH), defreg(FCRTL), defreg(FCTTV), defreg(FCRTV), + defreg(FLA), defreg(EEWR), defreg(FLOP), defreg(FLOL), + defreg(FLSWCTL), defreg(FLSWCNT), defreg(RXDCTL), defreg(RXDCTL1), + defreg(MAVTV0), defreg(MAVTV1), defreg(MAVTV2), defreg(MAVTV3), + defreg(TXSTMPL), defreg(TXSTMPH), defreg(SYSTIML), defreg(SYSTIMH), + defreg(RXCFGL), defreg(RXUDP), defreg(TIMADJL), defreg(TIMADJH), + defreg(RXSTMPH), defreg(RXSTMPL), defreg(RXSATRL), defreg(RXSATRH), + defreg(FLASHT), defreg(TIPG), defreg(RDH), defreg(RDT), + defreg(RDLEN), defreg(RDBAH), defreg(RDBAL), + defreg(TXDCTL1), + defreg(FLSWDATA), + defreg(CTRL_DUP), + defreg(EXTCNF_SIZE), + defreg(EEMNGCTL), + defreg(EEMNGDATA), + defreg(FLMNGCTL), + defreg(FLMNGDATA), + defreg(FLMNGCNT), + defreg(TSYNCRXCTL), + defreg(TSYNCTXCTL), + + /* Aliases */ + defreg(RDH0_A), defreg(RDT0_A), defreg(RDTR_A), defreg(RDFH_A), + defreg(RDFT_A), defreg(TDH_A), defreg(TDT_A), defreg(TIDV_A), + defreg(TDFH_A), defreg(TDFT_A), defreg(RA_A), defreg(RDBAL0_A), + defreg(TDBAL_A), defreg(TDLEN_A), defreg(VFTA_A), defreg(RDLEN0_A), + defreg(FCRTL_A), defreg(FCRTH_A) +}; + +#endif diff --git a/hw/net/e1000_regs.h b/hw/net/e1000_regs.h index 59e050742b..8a4ce82034 100644 --- a/hw/net/e1000_regs.h +++ b/hw/net/e1000_regs.h @@ -32,157 +32,35 @@ #ifndef HW_E1000_REGS_H #define HW_E1000_REGS_H -/* PCI Device IDs */ -#define E1000_DEV_ID_82542 0x1000 -#define E1000_DEV_ID_82543GC_FIBER 0x1001 -#define E1000_DEV_ID_82543GC_COPPER 0x1004 -#define E1000_DEV_ID_82544EI_COPPER 0x1008 -#define E1000_DEV_ID_82544EI_FIBER 0x1009 -#define E1000_DEV_ID_82544GC_COPPER 0x100C -#define E1000_DEV_ID_82544GC_LOM 0x100D -#define E1000_DEV_ID_82540EM 0x100E -#define E1000_DEV_ID_82540EM_LOM 0x1015 -#define E1000_DEV_ID_82540EP_LOM 0x1016 -#define E1000_DEV_ID_82540EP 0x1017 -#define E1000_DEV_ID_82540EP_LP 0x101E -#define E1000_DEV_ID_82545EM_COPPER 0x100F -#define E1000_DEV_ID_82545EM_FIBER 0x1011 -#define E1000_DEV_ID_82545GM_COPPER 0x1026 -#define E1000_DEV_ID_82545GM_FIBER 0x1027 -#define E1000_DEV_ID_82545GM_SERDES 0x1028 -#define E1000_DEV_ID_82546EB_COPPER 0x1010 -#define E1000_DEV_ID_82546EB_FIBER 0x1012 -#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D -#define E1000_DEV_ID_82541EI 0x1013 -#define E1000_DEV_ID_82541EI_MOBILE 0x1018 -#define E1000_DEV_ID_82541ER_LOM 0x1014 -#define E1000_DEV_ID_82541ER 0x1078 -#define E1000_DEV_ID_82547GI 0x1075 -#define E1000_DEV_ID_82541GI 0x1076 -#define E1000_DEV_ID_82541GI_MOBILE 0x1077 -#define E1000_DEV_ID_82541GI_LF 0x107C -#define E1000_DEV_ID_82546GB_COPPER 0x1079 -#define E1000_DEV_ID_82546GB_FIBER 0x107A -#define E1000_DEV_ID_82546GB_SERDES 0x107B -#define E1000_DEV_ID_82546GB_PCIE 0x108A -#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 -#define E1000_DEV_ID_82547EI 0x1019 -#define E1000_DEV_ID_82547EI_MOBILE 0x101A -#define E1000_DEV_ID_82571EB_COPPER 0x105E -#define E1000_DEV_ID_82571EB_FIBER 0x105F -#define E1000_DEV_ID_82571EB_SERDES 0x1060 -#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 -#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 -#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 -#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC -#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 -#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA -#define E1000_DEV_ID_82572EI_COPPER 0x107D -#define E1000_DEV_ID_82572EI_FIBER 0x107E -#define E1000_DEV_ID_82572EI_SERDES 0x107F -#define E1000_DEV_ID_82572EI 0x10B9 -#define E1000_DEV_ID_82573E 0x108B -#define E1000_DEV_ID_82573E_IAMT 0x108C -#define E1000_DEV_ID_82573L 0x109A -#define E1000_DEV_ID_82574L 0x10D3 -#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 -#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 -#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 -#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA -#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB +#include "e1000x_regs.h" -#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 -#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A -#define E1000_DEV_ID_ICH8_IGP_C 0x104B -#define E1000_DEV_ID_ICH8_IFE 0x104C -#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 -#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 -#define E1000_DEV_ID_ICH8_IGP_M 0x104D - -/* Device Specific Register Defaults */ -#define E1000_PHY_ID2_82541x 0x380 -#define E1000_PHY_ID2_82544x 0xC30 -#define E1000_PHY_ID2_8254xx_DEFAULT 0xC20 /* 82540x, 82545x, and 82546x */ -#define E1000_PHY_ID2_82573x 0xCC0 -#define E1000_PHY_ID2_82574x 0xCB1 - -/* Register Set. (82543, 82544) - * - * Registers are defined to be 32 bits and should be accessed as 32 bit values. - * These registers are physically located on the NIC, but are mapped into the - * host memory address space. - * - * RW - register is both readable and writable - * RO - register is read only - * WO - register is write only - * R/clr - register is read only and is cleared when read - * A - register array - */ -#define E1000_CTRL 0x00000 /* Device Control - RW */ -#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ -#define E1000_STATUS 0x00008 /* Device Status - RO */ -#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ -#define E1000_EERD 0x00014 /* EEPROM Read - RW */ -#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ -#define E1000_FLA 0x0001C /* Flash Access - RW */ -#define E1000_MDIC 0x00020 /* MDI Control - RW */ -#define E1000_SCTL 0x00024 /* SerDes Control - RW */ -#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ -#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ -#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ -#define E1000_FCT 0x00030 /* Flow Control Type - RW */ -#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ -#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ #define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ -#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ #define E1000_EIAC 0x000DC /* Ext. Interrupt Auto Clear - RW */ -#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ -#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ #define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ #define E1000_EITR 0x000E8 /* Extended Interrupt Throttling Rate - RW */ -#define E1000_RCTL 0x00100 /* RX Control - RW */ -#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ #define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ #define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ #define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ #define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ #define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ -#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ #define E1000_FCRTV 0x05F40 /* Flow Control Refresh Timer Value - RW */ #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ #define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ -#define E1000_TCTL 0x00400 /* TX Control - RW */ -#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ -#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ #define E1000_TBT 0x00448 /* TX Burst Timer - RW */ #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ -#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ #define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ #define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ #define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ -#define FEXTNVM_SW_CONFIG 0x0001 #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBM 0x10000 /* Packet Buffer Memory - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size - RW */ -#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ -#define E1000_EEMNGDATA 0x01014 /* MNG EEPROM Read/Write data */ -#define E1000_FLMNGCTL 0x01018 /* MNG Flash Control */ -#define E1000_FLMNGDATA 0x0101C /* MNG FLASH Read data */ -#define E1000_FLMNGCNT 0x01020 /* MNG FLASH Read Counter */ -#define E1000_FLASH_UPDATES 1000 -#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ #define E1000_FLASHT 0x01028 /* FLASH Timer Register */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_FLSWCTL 0x01030 /* FLASH control register */ #define E1000_FLSWDATA 0x01034 /* FLASH data register */ #define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ -#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ #define E1000_FLOL 0x01050 /* FEEP Auto Load */ #define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ -#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ -#define E1000_FCRTL_A 0x00168 /* Alias to FCRTL */ -#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ #define E1000_FCRTH_A 0x00160 /* Alias to FCRTH */ #define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ #define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ @@ -208,23 +86,7 @@ #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ #define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ -#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ -#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ #define E1000_POEMB 0x00F10 /* PHY OEM Bits Register - RW */ -#define E1000_RDFH 0x02410 /* Receive Data FIFO Head Register - RW */ -#define E1000_RDFH_A 0x08000 /* Alias to RDFH */ -#define E1000_RDFT 0x02418 /* Receive Data FIFO Tail Register - RW */ -#define E1000_RDFT_A 0x08008 /* Alias to RDFT */ -#define E1000_RDFHS 0x02420 /* Receive Data FIFO Head Saved Register - RW */ -#define E1000_RDFTS 0x02428 /* Receive Data FIFO Tail Saved Register - RW */ -#define E1000_RDFPC 0x02430 /* Receive Data FIFO Packet Count - RW */ -#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ -#define E1000_TDFH_A 0x08010 /* Alias to TDFH */ -#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ -#define E1000_TDFT_A 0x08018 /* Alias to TDFT */ -#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ -#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ -#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ #define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ #define E1000_TDBAL_A 0x00420 /* Alias to TDBAL */ #define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ @@ -248,174 +110,40 @@ #define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ #define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ #define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ -#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ -#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ -#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ -#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ -#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ -#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ -#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ -#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ -#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ -#define E1000_COLC 0x04028 /* Collision Count - R/clr */ -#define E1000_DC 0x04030 /* Defer Count - R/clr */ -#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ #define E1000_SEQEC 0x04038 /* Sequence Error Count - R/clr */ #define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ -#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ -#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ -#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ -#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ -#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ -#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ -#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ -#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ -#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ -#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ -#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ -#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ -#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ -#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ -#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ -#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ -#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ -#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ -#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ -#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ -#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ -#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ -#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ -#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ -#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ -#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ -#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ -#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ -#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ -#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ -#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ -#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ -#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ -#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ -#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ -#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ -#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ -#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ -#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ -#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ -#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ -#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ -#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ #define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ -#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ -#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ #define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ #define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ #define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ #define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ #define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ -#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ #define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ -#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ -#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ -#define E1000_MAVTV0 0x05010 /* Management VLAN TAG Value 0 */ -#define E1000_MAVTV1 0x05014 /* Management VLAN TAG Value 1 */ -#define E1000_MAVTV2 0x05018 /* Management VLAN TAG Value 2 */ -#define E1000_MAVTV3 0x0501c /* Management VLAN TAG Value 3 */ -#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ -#define E1000_RA 0x05400 /* Receive Address - RW Array */ -#define E1000_RA_A 0x00040 /* Alias to RA */ -#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ -#define E1000_VFTA_A 0x00600 /* Alias to VFTA */ -#define E1000_WUC 0x05800 /* Wakeup Control - RW */ -#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ -#define E1000_WUS 0x05810 /* Wakeup Status - RO */ -#define E1000_MANC 0x05820 /* Management Control - RW */ -#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ -#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ -#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ -#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ -#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ #define E1000_MFUTP01 0x05828 /* Management Flex UDP/TCP Ports 0/1 - RW */ #define E1000_MFUTP23 0x05830 /* Management Flex UDP/TCP Ports 2/3 - RW */ -#define E1000_MFVAL 0x05824 /* Manageability Filters Valid - RW */ -#define E1000_MDEF 0x05890 /* Manageability Decision Filters - RW Array */ #define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ #define E1000_HOST_IF 0x08800 /* Host Interface */ -#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ -#define E1000_FTFT 0x09400 /* Flexible TCO Filter Table - RW Array */ #define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ #define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ #define E1000_MDPHYA 0x0003C /* PHY address - RW */ -#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ -#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ -#define E1000_GCR 0x05B00 /* PCI-Ex Control */ -#define E1000_FUNCTAG 0x05B08 /* Function-Tag Register */ -#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ -#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ -#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ -#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ -#define E1000_GSCN_0 0x05B20 /* 3GIO Statistic Counter Register #0 */ -#define E1000_GSCN_1 0x05B24 /* 3GIO Statistic Counter Register #1 */ -#define E1000_GSCN_2 0x05B28 /* 3GIO Statistic Counter Register #2 */ -#define E1000_GSCN_3 0x05B2C /* 3GIO Statistic Counter Register #3 */ -#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ -#define E1000_SWSM 0x05B50 /* SW Semaphore */ #define E1000_GCR2 0x05B64 /* 3GIO Control Register 2 */ -#define E1000_FWSM 0x05B54 /* FW Semaphore */ -#define E1000_PBACLR 0x05B68 /* MSI-X PBA Clear */ #define E1000_FFLT_DBG 0x05F04 /* Debug Register */ #define E1000_HICR 0x08F00 /* Host Inteface Control */ -#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ -#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ -#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ -#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ -#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ -#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ -#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ -#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ -#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ -#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ #define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ #define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ -#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ -#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ -#define E1000_TIMADJL 0x0B60C /* Time Adjustment Offset register Low - RW */ -#define E1000_TIMADJH 0x0B610 /* Time Adjustment Offset register High - RW */ #define E1000_RXCFGL 0x0B634 /* RX Ethertype and Message Type - RW*/ -/* RSS registers */ -#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ -#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ -#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ -#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ -#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ -#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ - #define E1000_MRQC_ENABLED(mrqc) (((mrqc) & (BIT(0) | BIT(1))) == BIT(0)) -#define E1000_RETA_IDX(hash) ((hash) & (BIT(7) - 1)) -#define E1000_RETA_VAL(reta, hash) (((uint8_t *)(reta))[E1000_RETA_IDX(hash)]) +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ + #define E1000_RSS_QUEUE(reta, hash) ((E1000_RETA_VAL(reta, hash) & BIT(7)) >> 7) -#define E1000_MRQC_EN_TCPIPV4(mrqc) ((mrqc) & BIT(16)) -#define E1000_MRQC_EN_IPV4(mrqc) ((mrqc) & BIT(17)) -#define E1000_MRQC_EN_TCPIPV6(mrqc) ((mrqc) & BIT(18)) -#define E1000_MRQC_EN_IPV6EX(mrqc) ((mrqc) & BIT(19)) -#define E1000_MRQC_EN_IPV6(mrqc) ((mrqc) & BIT(20)) - -#define E1000_MRQ_RSS_TYPE_NONE (0) -#define E1000_MRQ_RSS_TYPE_IPV4TCP (1) -#define E1000_MRQ_RSS_TYPE_IPV4 (2) -#define E1000_MRQ_RSS_TYPE_IPV6TCP (3) -#define E1000_MRQ_RSS_TYPE_IPV6EX (4) -#define E1000_MRQ_RSS_TYPE_IPV6 (5) - -#define E1000_ICR_ASSERTED BIT(31) -#define E1000_EIAC_MASK 0x01F00000 - /* [TR]DBAL and [TR]DLEN masks */ #define E1000_XDBAL_MASK (~(BIT(4) - 1)) #define E1000_XDLEN_MASK ((BIT(20) - 1) & (~(BIT(7) - 1))) @@ -444,18 +172,8 @@ #define E1000_IVAR_TX_INT_EVERY_WB BIT(31) -/* RFCTL register bits */ -#define E1000_RFCTL_ISCSI_DIS 0x00000001 -#define E1000_RFCTL_NFSW_DIS 0x00000040 -#define E1000_RFCTL_NFSR_DIS 0x00000080 -#define E1000_RFCTL_IPV6_DIS 0x00000400 -#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 #define E1000_RFCTL_ACK_DIS 0x00001000 #define E1000_RFCTL_ACK_DATA_DIS 0x00002000 -#define E1000_RFCTL_IPFRSP_DIS 0x00004000 -#define E1000_RFCTL_EXTEN 0x00008000 -#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 -#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 /* PSRCTL parsing */ #define E1000_PSRCTL_BSIZE0_MASK 0x0000007F @@ -470,24 +188,7 @@ #define E1000_PSRCTL_BUFFS_PER_DESC 4 -/* TARC* parsing */ -#define E1000_TARC_ENABLE BIT(10) - /* PHY 1000 MII Register/Bit Definitions */ -/* PHY Registers defined by IEEE */ -#define PHY_CTRL 0x00 /* Control Register */ -#define PHY_STATUS 0x01 /* Status Regiser */ -#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ -#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ -#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ -#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ -#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ -#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ -#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ -#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ -#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ -#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ - /* 82574-specific registers */ #define PHY_COPPER_CTRL1 0x10 /* Copper Specific Control Register 1 */ #define PHY_COPPER_STAT1 0x11 /* Copper Specific Status Register 1 */ @@ -539,287 +240,6 @@ #define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ #define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ -/* PHY Control Register */ -#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ -#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ -#define MII_CR_POWER_DOWN 0x0800 /* Power down */ -#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ - -/* PHY Status Register */ -#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ -#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ -#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ -#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ -#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ -#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ -#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ -#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ -#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ -#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ -#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ -#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ - -/* PHY Link Partner Ability Register */ -#define MII_LPAR_LPACK 0x4000 /* Acked by link partner */ - -/* Interrupt Cause Read */ -#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ -#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ -#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ -#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ -#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ -#define E1000_ICR_RXO 0x00000040 /* rx overrun */ -#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ -#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ -#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ -#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ -#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ -#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ -#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ -#define E1000_ICR_TXD_LOW 0x00008000 -#define E1000_ICR_SRPD 0x00010000 -#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ -#define E1000_ICR_MNG 0x00040000 /* Manageability event */ -#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ -#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ -#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ -#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ -#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ -#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ -#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ -#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ -#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ -#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ -#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ -#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ -#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ -#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ -#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ -#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ -#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ - -#define E1000_ICR_OTHER_CAUSES (E1000_ICR_LSC | \ - E1000_ICR_RXO | \ - E1000_ICR_MDAC | \ - E1000_ICR_SRPD | \ - E1000_ICR_ACK | \ - E1000_ICR_MNG) - -/* Interrupt Cause Set */ -#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ -#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ -#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ -#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ -#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ -#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ -#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ -#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ -#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ -#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ -#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ -#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW -#define E1000_ICS_SRPD E1000_ICR_SRPD -#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ -#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ -#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ -#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ -#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ -#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ -#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ -#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ -#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ -#define E1000_ICS_DSW E1000_ICR_DSW -#define E1000_ICS_PHYINT E1000_ICR_PHYINT -#define E1000_ICS_EPRST E1000_ICR_EPRST - -/* Interrupt Mask Set */ -#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ -#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ -#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ -#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ -#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ -#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ -#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ -#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ -#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ -#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ -#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ -#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW -#define E1000_IMS_SRPD E1000_ICR_SRPD -#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ -#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ -#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 -#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 -#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 -#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 -#define E1000_IMS_OTHER E1000_ICR_OTHER -#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ -#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ -#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ -#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ -#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ -#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ -#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ -#define E1000_IMS_DSW E1000_ICR_DSW -#define E1000_IMS_PHYINT E1000_ICR_PHYINT -#define E1000_IMS_EPRST E1000_ICR_EPRST - -/* Interrupt Mask Clear */ -#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ -#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ -#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ -#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ -#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ -#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ -#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ -#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ -#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ -#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ -#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ -#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW -#define E1000_IMC_SRPD E1000_ICR_SRPD -#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ -#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ -#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ -#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ -#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ -#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ -#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ -#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ -#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ -#define E1000_IMC_DSW E1000_ICR_DSW -#define E1000_IMC_PHYINT E1000_ICR_PHYINT -#define E1000_IMC_EPRST E1000_ICR_EPRST - -/* Receive Control */ -#define E1000_RCTL_RST 0x00000001 /* Software reset */ -#define E1000_RCTL_EN 0x00000002 /* enable */ -#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ -#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ -#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ -#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ -#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ -#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ -#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ -#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ -#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ -#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ -#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ -#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ -#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ -#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ -#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ -#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ -#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ -#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ -#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ -#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ -/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ -#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ -#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ -#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ -#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ -/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ -#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ -#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ -#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ -#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ -#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ -#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ -#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ -#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ -#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ -#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ -#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ -#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ - - -#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ -#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ -#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ -#define E1000_EEPROM_RW_REG_DONE 0x10 /* Offset to READ/WRITE done bit */ -#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ -#define E1000_EEPROM_RW_ADDR_SHIFT 8 /* Shift to the address bits */ -#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ -#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ - -/* 82574 EERD/EEWR registers layout */ -#define E1000_EERW_START BIT(0) -#define E1000_EERW_DONE BIT(1) -#define E1000_EERW_ADDR_SHIFT 2 -#define E1000_EERW_ADDR_MASK ((1L << 14) - 1) -#define E1000_EERW_DATA_SHIFT 16 -#define E1000_EERW_DATA_MASK ((1L << 16) - 1) - -/* Register Bit Masks */ -/* Device Control */ -#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ -#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ -#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ -#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ -#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ -#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ -#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ -#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ -#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ -#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ -#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ -#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ -#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ -#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ -#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ -#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ -#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ -#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ -#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ -#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ -#define E1000_CTRL_SPD_SHIFT 8 /* Speed Select Shift */ - -#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* auto speed detection check */ -#define E1000_CTRL_EXT_EE_RST 0x00002000 /* EEPROM reset */ -#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ -#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ -#define E1000_CTRL_EXT_EIAME 0x01000000 -#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ -#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ -#define E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA 0x20000000 -#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ - -#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ -#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ -#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ -#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ -#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ -#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ -#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ -#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ -#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ -#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ -#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ -#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ -#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ -#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ -#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ - -/* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ #define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ #define E1000_STATUS_FUNC_SHIFT 2 #define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ @@ -827,9 +247,6 @@ #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ #define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ #define E1000_STATUS_SPEED_MASK 0x000000C0 -#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by EEPROM/Flash */ #define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ @@ -837,9 +254,7 @@ #define E1000_STATUS_ASDV_100 0x00000100 /* ASDV 100Mb */ #define E1000_STATUS_ASDV_1000 0x00000200 /* ASDV 1Gb */ #define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ -#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ #define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ -#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ #define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ #define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ @@ -857,111 +272,6 @@ #define E1000_STATUS_SPEED_SHIFT 6 #define E1000_STATUS_ASDV_SHIFT 8 -/* EEPROM/Flash Control */ -#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ -#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ -#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ -#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ -#define E1000_EECD_FWE_MASK 0x00000030 -#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ -#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ -#define E1000_EECD_FWE_SHIFT 4 -#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ -#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ -#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ -#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ -#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type - * (0-small, 1-large) */ -#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ -#ifndef E1000_EEPROM_GRANT_ATTEMPTS -#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ -#endif -#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ -#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ -#define E1000_EECD_SIZE_EX_SHIFT 11 -#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ -#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ -#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ -#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ -#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ -#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ -#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ - - -#define E1000_EECD_SECVAL_SHIFT 22 -#define E1000_STM_OPCODE 0xDB00 -#define E1000_HICR_FW_RESET 0xC0 - -#define E1000_SHADOW_RAM_WORDS 2048 -#define E1000_ICH_NVM_SIG_WORD 0x13 -#define E1000_ICH_NVM_SIG_MASK 0xC0 - -/* MDI Control */ -#define E1000_MDIC_DATA_MASK 0x0000FFFF -#define E1000_MDIC_REG_MASK 0x001F0000 -#define E1000_MDIC_REG_SHIFT 16 -#define E1000_MDIC_PHY_MASK 0x03E00000 -#define E1000_MDIC_PHY_SHIFT 21 -#define E1000_MDIC_OP_WRITE 0x04000000 -#define E1000_MDIC_OP_READ 0x08000000 -#define E1000_MDIC_READY 0x10000000 -#define E1000_MDIC_INT_EN 0x20000000 -#define E1000_MDIC_ERROR 0x40000000 - -/* Rx Interrupt Delay Timer */ -#define E1000_RDTR_FPD BIT(31) - -/* Tx Interrupt Delay Timer */ -#define E1000_TIDV_FPD BIT(31) - -/* Delay increments in nanoseconds for delayed interrupts registers */ -#define E1000_INTR_DELAY_NS_RES (1024) - -/* Delay increments in nanoseconds for interrupt throttling registers */ -#define E1000_INTR_THROTTLING_NS_RES (256) - -/* EEPROM Commands - Microwire */ -#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ -#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ -#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ -#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ -#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */ - -/* EEPROM Word Offsets */ -#define EEPROM_COMPAT 0x0003 -#define EEPROM_ID_LED_SETTINGS 0x0004 -#define EEPROM_VERSION 0x0005 -#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ -#define EEPROM_PHY_CLASS_WORD 0x0007 -#define EEPROM_INIT_CONTROL1_REG 0x000A -#define EEPROM_INIT_CONTROL2_REG 0x000F -#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 -#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 -#define EEPROM_INIT_3GIO_3 0x001A -#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 -#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 -#define EEPROM_CFG 0x0012 -#define EEPROM_FLASH_VERSION 0x0032 -#define EEPROM_CHECKSUM_REG 0x003F - -#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ -#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ - -/* PCI Express Control */ -/* 3GIO Control Register - GCR (0x05B00; RW) */ -#define E1000_L0S_ADJUST (1 << 9) -#define E1000_L1_ENTRY_LATENCY_MSB (1 << 23) -#define E1000_L1_ENTRY_LATENCY_LSB (1 << 25 | 1 << 26) - -#define E1000_L0S_ADJUST (1 << 9) -#define E1000_L1_ENTRY_LATENCY_MSB (1 << 23) -#define E1000_L1_ENTRY_LATENCY_LSB (1 << 25 | 1 << 26) - -#define E1000_GCR_RO_BITS (1 << 23 | 1 << 25 | 1 << 26) - -/* MSI-X PBA Clear register */ -#define E1000_PBACLR_VALID_MASK (BIT(5) - 1) - /* Transmit Descriptor */ struct e1000_tx_desc { uint64_t buffer_addr; /* Address of the descriptor's data buffer */ @@ -983,269 +293,7 @@ struct e1000_tx_desc { } upper; }; -/* Transmit Descriptor bit definitions */ -#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ -#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ #define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ #define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ -#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ -#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ -#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ -#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ -#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ -#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ -#define E1000_TXD_CMD_SNAP 0x40000000 /* Update SNAP header */ -#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ -#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ - -/* Transmit Control */ -#define E1000_TCTL_RST 0x00000001 /* software reset */ -#define E1000_TCTL_EN 0x00000002 /* enable tx */ -#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ -#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ -#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ -#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ -#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ -#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ -#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ -#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ -#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ - -/* Legacy Receive Descriptor */ -struct e1000_rx_desc { - uint64_t buffer_addr; /* Address of the descriptor's data buffer */ - uint16_t length; /* Length of data DMAed into data buffer */ - uint16_t csum; /* Packet checksum */ - uint8_t status; /* Descriptor status */ - uint8_t errors; /* Descriptor Errors */ - uint16_t special; -}; - -/* Extended Receive Descriptor */ -union e1000_rx_desc_extended { - struct { - uint64_t buffer_addr; - uint64_t reserved; - } read; - struct { - struct { - uint32_t mrq; /* Multiple Rx Queues */ - union { - uint32_t rss; /* RSS Hash */ - struct { - uint16_t ip_id; /* IP id */ - uint16_t csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - uint32_t status_error; /* ext status/error */ - uint16_t length; - uint16_t vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ -}; - -#define MAX_PS_BUFFERS 4 - -/* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) - -/* Receive Descriptor - Packet Split */ -union e1000_rx_desc_packet_split { - struct { - /* one buffer for protocol header(s), three data buffers */ - uint64_t buffer_addr[MAX_PS_BUFFERS]; - } read; - struct { - struct { - uint32_t mrq; /* Multiple Rx Queues */ - union { - uint32_t rss; /* RSS Hash */ - struct { - uint16_t ip_id; /* IP id */ - uint16_t csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - uint32_t status_error; /* ext status/error */ - uint16_t length0; /* length of buffer 0 */ - uint16_t vlan; /* VLAN tag */ - } middle; - struct { - uint16_t header_status; - /* length of buffers 1-3 */ - uint16_t length[PS_PAGE_BUFFERS]; - } upper; - uint64_t reserved; - } wb; /* writeback */ -}; - -/* Receive Checksum Control bits */ -#define E1000_RXCSUM_IPOFLD 0x100 /* IP Checksum Offload Enable */ -#define E1000_RXCSUM_TUOFLD 0x200 /* TCP/UDP Checksum Offload Enable */ -#define E1000_RXCSUM_PCSD 0x2000 /* Packet Checksum Disable */ - -#define E1000_RING_DESC_LEN (16) -#define E1000_RING_DESC_LEN_SHIFT (4) - -#define E1000_MIN_RX_DESC_LEN E1000_RING_DESC_LEN -#define E1000_MAX_RX_DESC_LEN (sizeof(union e1000_rx_desc_packet_split)) - -/* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ -#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ -#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ -#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ -#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ -#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ -#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ -#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ -#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define E1000_RXD_SPC_PRI_SHIFT 13 -#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define E1000_RXD_SPC_CFI_SHIFT 12 - -/* RX packet types */ -#define E1000_RXD_PKT_MAC (0) -#define E1000_RXD_PKT_IP4 (1) -#define E1000_RXD_PKT_IP4_XDP (2) -#define E1000_RXD_PKT_IP6 (5) -#define E1000_RXD_PKT_IP6_XDP (6) - -#define E1000_RXD_PKT_TYPE(t) ((t) << 16) - -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -#define E1000_RXDEXT_STATERR_IPE 0x40000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 - -#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 -#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF - -/* Receive Address */ -#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ - -/* Offload Context Descriptor */ -struct e1000_context_desc { - union { - uint32_t ip_config; - struct { - uint8_t ipcss; /* IP checksum start */ - uint8_t ipcso; /* IP checksum offset */ - uint16_t ipcse; /* IP checksum end */ - } ip_fields; - } lower_setup; - union { - uint32_t tcp_config; - struct { - uint8_t tucss; /* TCP checksum start */ - uint8_t tucso; /* TCP checksum offset */ - uint16_t tucse; /* TCP checksum end */ - } tcp_fields; - } upper_setup; - uint32_t cmd_and_length; /* */ - union { - uint32_t data; - struct { - uint8_t status; /* Descriptor status */ - uint8_t hdr_len; /* Header length */ - uint16_t mss; /* Maximum segment size */ - } fields; - } tcp_seg_setup; -}; - -/* Offload data descriptor */ -struct e1000_data_desc { - uint64_t buffer_addr; /* Address of the descriptor's buffer address */ - union { - uint32_t data; - struct { - uint16_t length; /* Data buffer length */ - uint8_t typ_len_ext; /* */ - uint8_t cmd; /* */ - } flags; - } lower; - union { - uint32_t data; - struct { - uint8_t status; /* Descriptor status */ - uint8_t popts; /* Packet Options */ - uint16_t special; /* */ - } fields; - } upper; -}; - -/* Management Control */ -#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ -#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ -#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ -#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ -#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ -#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ -#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ -#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ -#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ -#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery - * Filtering */ -#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ -#define E1000_MANC_DIS_IP_CHK_ARP 0x10000000 /* Disable IP address chacking */ - /*for ARP packets - in 82574 */ -#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ -#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ -#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ -#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ -#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ -#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address - * filtering */ -#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host - * memory */ -#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address - * filtering */ -#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ -#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ -#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ -#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ -#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ -#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ -#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ -#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ - -#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ -#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ - -/* FACTPS Control */ -#define E1000_FACTPS_LAN0_ON 0x00000004 /* Lan 0 enable */ - -/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ -#define EEPROM_SUM 0xBABA - -/* I/O-Mapped Access to Internal Registers, Memories, and Flash */ -#define E1000_IOADDR 0x00 -#define E1000_IODATA 0x04 #endif /* HW_E1000_REGS_H */ diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c index 7523e9f5d2..c3848797b8 100644 --- a/hw/net/e1000e.c +++ b/hw/net/e1000e.c @@ -1,37 +1,37 @@ /* -* QEMU INTEL 82574 GbE NIC emulation -* -* Software developer's manuals: -* http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf -* -* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) -* Developed by Daynix Computing LTD (http://www.daynix.com) -* -* Authors: -* Dmitry Fleytman -* Leonid Bloch -* Yan Vugenfirer -* -* Based on work done by: -* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. -* Copyright (c) 2008 Qumranet -* Based on work done by: -* Copyright (c) 2007 Dan Aloni -* Copyright (c) 2004 Antony T Curtis -* -* This library is free software; you can redistribute it and/or -* modify it under the terms of the GNU Lesser General Public -* License as published by the Free Software Foundation; either -* version 2.1 of the License, or (at your option) any later version. -* -* This library is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -* Lesser General Public License for more details. -* -* You should have received a copy of the GNU Lesser General Public -* License along with this library; if not, see . -*/ + * QEMU INTEL 82574 GbE NIC emulation + * + * Software developer's manuals: + * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf + * + * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Leonid Bloch + * Yan Vugenfirer + * + * Based on work done by: + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2008 Qumranet + * Based on work done by: + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ #include "qemu/osdep.h" #include "qemu/units.h" @@ -42,13 +42,13 @@ #include "qemu/range.h" #include "sysemu/sysemu.h" #include "hw/hw.h" +#include "hw/net/mii.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" -#include "e1000_regs.h" - +#include "e1000_common.h" #include "e1000x_common.h" #include "e1000e_core.h" @@ -81,6 +81,7 @@ struct E1000EState { E1000ECore core; bool init_vet; + bool timadj; }; #define E1000E_MMIO_IDX 0 @@ -239,9 +240,9 @@ static NetClientInfo net_e1000e_info = { }; /* -* EEPROM (NVM) contents documented in Table 36, section 6.1 -* and generally 6.1.2 Software accessed words. -*/ + * EEPROM (NVM) contents documented in Table 36, section 6.1 + * and generally 6.1.2 Software accessed words. + */ static const uint16_t e1000e_eeprom_template[64] = { /* Address | Compat. | ImVer | Compat. */ 0x0000, 0x0000, 0x0000, 0x0420, 0xf746, 0x2010, 0xffff, 0xffff, @@ -512,11 +513,11 @@ static void e1000e_pci_uninit(PCIDevice *pci_dev) msi_uninit(pci_dev); } -static void e1000e_qdev_reset(DeviceState *dev) +static void e1000e_qdev_reset_hold(Object *obj) { - E1000EState *s = E1000E(dev); + E1000EState *s = E1000E(obj); - trace_e1000e_cb_qdev_reset(); + trace_e1000e_cb_qdev_reset_hold(); e1000e_core_reset(&s->core); @@ -553,6 +554,12 @@ static int e1000e_post_load(void *opaque, int version_id) return e1000e_core_post_load(&s->core); } +static bool e1000e_migrate_timadj(void *opaque, int version_id) +{ + E1000EState *s = opaque; + return s->timadj; +} + static const VMStateDescription e1000e_vmstate_tx = { .name = "e1000e-tx", .version_id = 1, @@ -630,12 +637,11 @@ static const VMStateDescription e1000e_vmstate = { VMSTATE_E1000E_INTR_DELAY_TIMER(core.tidv, E1000EState), VMSTATE_E1000E_INTR_DELAY_TIMER(core.itr, E1000EState), - VMSTATE_BOOL(core.itr_intr_pending, E1000EState), + VMSTATE_UNUSED(1), VMSTATE_E1000E_INTR_DELAY_TIMER_ARRAY(core.eitr, E1000EState, E1000E_MSIX_VEC_NUM), - VMSTATE_BOOL_ARRAY(core.eitr_intr_pending, E1000EState, - E1000E_MSIX_VEC_NUM), + VMSTATE_UNUSED(E1000E_MSIX_VEC_NUM), VMSTATE_UINT32(core.itr_guest_value, E1000EState), VMSTATE_UINT32_ARRAY(core.eitr_guest_value, E1000EState, @@ -645,6 +651,9 @@ static const VMStateDescription e1000e_vmstate = { VMSTATE_STRUCT_ARRAY(core.tx, E1000EState, E1000E_NUM_QUEUES, 0, e1000e_vmstate_tx, struct e1000e_tx), + + VMSTATE_INT64_TEST(core.timadj, E1000EState, e1000e_migrate_timadj), + VMSTATE_END_OF_LIST() } }; @@ -663,12 +672,14 @@ static Property e1000e_properties[] = { DEFINE_PROP_SIGNED("subsys", E1000EState, subsys, 0, e1000e_prop_subsys, uint16_t), DEFINE_PROP_BOOL("init-vet", E1000EState, init_vet, true), + DEFINE_PROP_BOOL("migrate-timadj", E1000EState, timadj, true), DEFINE_PROP_END_OF_LIST(), }; static void e1000e_class_init(ObjectClass *class, void *data) { DeviceClass *dc = DEVICE_CLASS(class); + ResettableClass *rc = RESETTABLE_CLASS(class); PCIDeviceClass *c = PCI_DEVICE_CLASS(class); c->realize = e1000e_pci_realize; @@ -679,8 +690,9 @@ static void e1000e_class_init(ObjectClass *class, void *data) c->romfile = "efi-e1000e.rom"; c->class_id = PCI_CLASS_NETWORK_ETHERNET; + rc->phases.hold = e1000e_qdev_reset_hold; + dc->desc = "Intel 82574L GbE Controller"; - dc->reset = e1000e_qdev_reset; dc->vmsd = &e1000e_vmstate; e1000e_prop_disable_vnet = qdev_prop_uint8; diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index fc9cdb4528..9f185d099c 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -1,42 +1,43 @@ /* -* Core code for QEMU e1000e emulation -* -* Software developer's manuals: -* http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf -* -* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) -* Developed by Daynix Computing LTD (http://www.daynix.com) -* -* Authors: -* Dmitry Fleytman -* Leonid Bloch -* Yan Vugenfirer -* -* Based on work done by: -* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. -* Copyright (c) 2008 Qumranet -* Based on work done by: -* Copyright (c) 2007 Dan Aloni -* Copyright (c) 2004 Antony T Curtis -* -* This library is free software; you can redistribute it and/or -* modify it under the terms of the GNU Lesser General Public -* License as published by the Free Software Foundation; either -* version 2.1 of the License, or (at your option) any later version. -* -* This library is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -* Lesser General Public License for more details. -* -* You should have received a copy of the GNU Lesser General Public -* License along with this library; if not, see . -*/ + * Core code for QEMU e1000e emulation + * + * Software developer's manuals: + * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf + * + * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Leonid Bloch + * Yan Vugenfirer + * + * Based on work done by: + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2008 Qumranet + * Based on work done by: + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ #include "qemu/osdep.h" #include "qemu/log.h" #include "net/net.h" #include "net/tap.h" +#include "hw/net/mii.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "sysemu/runstate.h" @@ -44,18 +45,32 @@ #include "net_tx_pkt.h" #include "net_rx_pkt.h" +#include "e1000_common.h" #include "e1000x_common.h" #include "e1000e_core.h" #include "trace.h" -#define E1000E_MIN_XITR (500) /* No more then 7813 interrupts per - second according to spec 10.2.4.2 */ +/* No more then 7813 interrupts per second according to spec 10.2.4.2 */ +#define E1000E_MIN_XITR (500) + #define E1000E_MAX_TX_FRAGS (64) +union e1000_rx_desc_union { + struct e1000_rx_desc legacy; + union e1000_rx_desc_extended extended; + union e1000_rx_desc_packet_split packet_split; +}; + +static ssize_t +e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, + bool has_vnet); + static inline void e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val); +static void e1000e_reset(E1000ECore *core, bool sw); + static inline void e1000e_process_ts_option(E1000ECore *core, struct e1000_tx_desc *dp) { @@ -148,23 +163,16 @@ e1000e_intrmgr_on_throttling_timer(void *opaque) { E1000IntrDelayTimer *timer = opaque; - assert(!msix_enabled(timer->core->owner)); - timer->running = false; - if (!timer->core->itr_intr_pending) { - trace_e1000e_irq_throttling_no_pending_interrupts(); - return; - } - - if (msi_enabled(timer->core->owner)) { - trace_e1000e_irq_msi_notify_postponed(); - /* Clear msi_causes_pending to fire MSI eventually */ - timer->core->msi_causes_pending = 0; - e1000e_set_interrupt_cause(timer->core, 0); - } else { - trace_e1000e_irq_legacy_notify_postponed(); - e1000e_set_interrupt_cause(timer->core, 0); + if (timer->core->mac[IMS] & timer->core->mac[ICR]) { + if (msi_enabled(timer->core->owner)) { + trace_e1000e_irq_msi_notify_postponed(); + msi_notify(timer->core->owner, 0); + } else { + trace_e1000e_irq_legacy_notify_postponed(); + e1000e_raise_legacy_irq(timer->core); + } } } @@ -174,15 +182,8 @@ e1000e_intrmgr_on_msix_throttling_timer(void *opaque) E1000IntrDelayTimer *timer = opaque; int idx = timer - &timer->core->eitr[0]; - assert(msix_enabled(timer->core->owner)); - timer->running = false; - if (!timer->core->eitr_intr_pending[idx]) { - trace_e1000e_irq_throttling_no_pending_vec(idx); - return; - } - trace_e1000e_irq_msix_notify_postponed_vec(idx); msix_notify(timer->core->owner, idx); } @@ -282,14 +283,18 @@ e1000e_intrmgr_delay_rx_causes(E1000ECore *core, uint32_t *causes) core->delayed_causes |= *causes & delayable_causes; *causes &= ~delayable_causes; - /* Check if delayed RX interrupts disabled by client - or if there are causes that cannot be delayed */ + /* + * Check if delayed RX interrupts disabled by client + * or if there are causes that cannot be delayed + */ if ((rdtr == 0) || (*causes != 0)) { return false; } - /* Check if delayed RX ACK interrupts disabled by client - and there is an ACK packet received */ + /* + * Check if delayed RX ACK interrupts disabled by client + * and there is an ACK packet received + */ if ((raid == 0) && (core->delayed_causes & E1000_ICR_ACK)) { return false; } @@ -361,10 +366,6 @@ static void e1000e_intrmgr_fire_all_timers(E1000ECore *core) { int i; - uint32_t val = e1000e_intmgr_collect_delayed_causes(core); - - trace_e1000e_irq_adding_delayed_causes(val, core->mac[ICR]); - core->mac[ICR] |= val; if (core->itr.running) { timer_del(core->itr.timer); @@ -493,27 +494,27 @@ typedef struct E1000E_RSSInfo_st { static uint32_t e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) { - bool isip4, isip6, isudp, istcp; + bool hasip4, hasip6; + EthL4HdrProto l4hdr_proto; assert(e1000e_rss_enabled(core)); - net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); - if (isip4) { - bool fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; - - trace_e1000e_rx_rss_ip4(fragment, istcp, core->mac[MRQC], + if (hasip4) { + trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC], E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]), E1000_MRQC_EN_IPV4(core->mac[MRQC])); - if (!fragment && istcp && E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) { + if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && + E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) { return E1000_MRQ_RSS_TYPE_IPV4TCP; } if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { return E1000_MRQ_RSS_TYPE_IPV4; } - } else if (isip6) { + } else if (hasip6) { eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt); bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS; @@ -527,12 +528,12 @@ e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) * backends like these. */ trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]); - trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, istcp, + trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, l4hdr_proto, ip6info->has_ext_hdrs, ip6info->rss_ex_dst_valid, ip6info->rss_ex_src_valid, core->mac[MRQC], - E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]), + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6(core->mac[MRQC])); @@ -540,9 +541,9 @@ e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) (!new_ex_dis || !(ip6info->rss_ex_dst_valid || ip6info->rss_ex_src_valid))) { - if (istcp && !ip6info->fragment && - E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) { - return E1000_MRQ_RSS_TYPE_IPV6TCP; + if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6TCPEX; } if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { @@ -576,7 +577,7 @@ e1000e_rss_calc_hash(E1000ECore *core, case E1000_MRQ_RSS_TYPE_IPV4TCP: type = NetPktRssIpV4Tcp; break; - case E1000_MRQ_RSS_TYPE_IPV6TCP: + case E1000_MRQ_RSS_TYPE_IPV6TCPEX: type = NetPktRssIpV6TcpEx; break; case E1000_MRQ_RSS_TYPE_IPV6: @@ -625,23 +626,39 @@ e1000e_rss_parse_packet(E1000ECore *core, info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash); } -static void +static bool e1000e_setup_tx_offloads(E1000ECore *core, struct e1000e_tx *tx) { if (tx->props.tse && tx->cptse) { - net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss); + if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss)) { + return false; + } + net_tx_pkt_update_ip_checksums(tx->tx_pkt); e1000x_inc_reg_if_not_full(core->mac, TSCTC); - return; + return true; } if (tx->sum_needed & E1000_TXD_POPTS_TXSM) { - net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0); + if (!net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0)) { + return false; + } } if (tx->sum_needed & E1000_TXD_POPTS_IXSM) { net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt); } + + return true; +} + +static void e1000e_tx_pkt_callback(void *core, + const struct iovec *iov, + int iovcnt, + const struct iovec *virt_iov, + int virt_iovcnt) +{ + e1000e_receive_internal(core, virt_iov, virt_iovcnt, true); } static bool @@ -650,13 +667,16 @@ e1000e_tx_pkt_send(E1000ECore *core, struct e1000e_tx *tx, int queue_index) int target_queue = MIN(core->max_queue_num, queue_index); NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue); - e1000e_setup_tx_offloads(core, tx); + if (!e1000e_setup_tx_offloads(core, tx)) { + return false; + } net_tx_pkt_dump(tx->tx_pkt); - if ((core->phy[0][PHY_CTRL] & MII_CR_LOOPBACK) || + if ((core->phy[0][MII_BMCR] & MII_BMCR_LOOPBACK) || ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) { - return net_tx_pkt_send_loopback(tx->tx_pkt, queue); + return net_tx_pkt_send_custom(tx->tx_pkt, false, + e1000e_tx_pkt_callback, core); } else { return net_tx_pkt_send(tx->tx_pkt, queue); } @@ -668,7 +688,7 @@ e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511, PTC1023, PTC1522 }; - size_t tot_len = net_tx_pkt_get_total_len(tx_pkt); + size_t tot_len = net_tx_pkt_get_total_len(tx_pkt) + 4; e1000x_increase_size_stats(core->mac, PTCregs, tot_len); e1000x_inc_reg_if_not_full(core->mac, TPT); @@ -687,9 +707,8 @@ e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) g_assert_not_reached(); } - core->mac[GPTC] = core->mac[TPT]; - core->mac[GOTCL] = core->mac[TOTL]; - core->mac[GOTCH] = core->mac[TOTH]; + e1000x_inc_reg_if_not_full(core->mac, GPTC); + e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); } static void @@ -723,7 +742,8 @@ e1000e_process_tx_desc(E1000ECore *core, addr = le64_to_cpu(dp->buffer_addr); if (!tx->skip_cp) { - if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) { + if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, core->owner, + addr, split_size)) { tx->skip_cp = true; } } @@ -741,7 +761,7 @@ e1000e_process_tx_desc(E1000ECore *core, } tx->skip_cp = false; - net_tx_pkt_reset(tx->tx_pkt); + net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner); tx->sum_needed = 0; tx->cptse = 0; @@ -935,6 +955,8 @@ e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) if (!ide || !e1000e_intrmgr_delay_tx_causes(core, &cause)) { e1000e_set_interrupt_cause(core, cause); } + + net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner); } static bool @@ -1010,92 +1032,55 @@ e1000e_rx_l4_cso_enabled(E1000ECore *core) } static bool -e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size) +e1000e_receive_filter(E1000ECore *core, const void *buf) { - uint32_t rctl = core->mac[RCTL]; - - if (e1000x_is_vlan_packet(buf, core->mac[VET]) && - e1000x_vlan_rx_filter_enabled(core->mac)) { - uint16_t vid = lduw_be_p(buf + 14); - uint32_t vfta = ldl_le_p((uint32_t *)(core->mac + VFTA) + - ((vid >> 5) & 0x7f)); - if ((vfta & (1 << (vid & 0x1f))) == 0) { - trace_e1000e_rx_flt_vlan_mismatch(vid); - return false; - } else { - trace_e1000e_rx_flt_vlan_match(vid); - } - } - - switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { - case ETH_PKT_UCAST: - if (rctl & E1000_RCTL_UPE) { - return true; /* promiscuous ucast */ - } - break; - - case ETH_PKT_BCAST: - if (rctl & E1000_RCTL_BAM) { - return true; /* broadcast enabled */ - } - break; - - case ETH_PKT_MCAST: - if (rctl & E1000_RCTL_MPE) { - return true; /* promiscuous mcast */ - } - break; - - default: - g_assert_not_reached(); - } - - return e1000x_rx_group_filter(core->mac, buf); + return (!e1000x_is_vlan_packet(buf, core->mac[VET]) || + e1000x_rx_vlan_filter(core->mac, PKT_GET_VLAN_HDR(buf))) && + e1000x_rx_group_filter(core->mac, buf); } static inline void -e1000e_read_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr) +e1000e_read_lgcy_rx_descr(E1000ECore *core, struct e1000_rx_desc *desc, + hwaddr *buff_addr) { - struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; - *buff_addr = le64_to_cpu(d->buffer_addr); + *buff_addr = le64_to_cpu(desc->buffer_addr); } static inline void -e1000e_read_ext_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr) +e1000e_read_ext_rx_descr(E1000ECore *core, union e1000_rx_desc_extended *desc, + hwaddr *buff_addr) { - union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc; - *buff_addr = le64_to_cpu(d->read.buffer_addr); + *buff_addr = le64_to_cpu(desc->read.buffer_addr); } static inline void -e1000e_read_ps_rx_descr(E1000ECore *core, uint8_t *desc, - hwaddr (*buff_addr)[MAX_PS_BUFFERS]) +e1000e_read_ps_rx_descr(E1000ECore *core, + union e1000_rx_desc_packet_split *desc, + hwaddr buff_addr[MAX_PS_BUFFERS]) { int i; - union e1000_rx_desc_packet_split *d = - (union e1000_rx_desc_packet_split *) desc; for (i = 0; i < MAX_PS_BUFFERS; i++) { - (*buff_addr)[i] = le64_to_cpu(d->read.buffer_addr[i]); + buff_addr[i] = le64_to_cpu(desc->read.buffer_addr[i]); } - trace_e1000e_rx_desc_ps_read((*buff_addr)[0], (*buff_addr)[1], - (*buff_addr)[2], (*buff_addr)[3]); + trace_e1000e_rx_desc_ps_read(buff_addr[0], buff_addr[1], + buff_addr[2], buff_addr[3]); } static inline void -e1000e_read_rx_descr(E1000ECore *core, uint8_t *desc, - hwaddr (*buff_addr)[MAX_PS_BUFFERS]) +e1000e_read_rx_descr(E1000ECore *core, union e1000_rx_desc_union *desc, + hwaddr buff_addr[MAX_PS_BUFFERS]) { if (e1000e_rx_use_legacy_descriptor(core)) { - e1000e_read_lgcy_rx_descr(core, desc, &(*buff_addr)[0]); - (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0; + e1000e_read_lgcy_rx_descr(core, &desc->legacy, &buff_addr[0]); + buff_addr[1] = buff_addr[2] = buff_addr[3] = 0; } else { if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { - e1000e_read_ps_rx_descr(core, desc, buff_addr); + e1000e_read_ps_rx_descr(core, &desc->packet_split, buff_addr); } else { - e1000e_read_ext_rx_descr(core, desc, &(*buff_addr)[0]); - (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0; + e1000e_read_ext_rx_descr(core, &desc->extended, &buff_addr[0]); + buff_addr[1] = buff_addr[2] = buff_addr[3] = 0; } } } @@ -1104,7 +1089,7 @@ static void e1000e_verify_csum_in_sw(E1000ECore *core, struct NetRxPkt *pkt, uint32_t *status_flags, - bool istcp, bool isudp) + EthL4HdrProto l4hdr_proto) { bool csum_valid; uint32_t csum_error; @@ -1125,20 +1110,21 @@ e1000e_verify_csum_in_sw(E1000ECore *core, return; } + if (l4hdr_proto != ETH_L4_HDR_PROTO_TCP && + l4hdr_proto != ETH_L4_HDR_PROTO_UDP) { + return; + } + if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { trace_e1000e_rx_metadata_l4_csum_validation_failed(); return; } csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE; + *status_flags |= E1000_RXD_STAT_TCPCS | csum_error; - if (istcp) { - *status_flags |= E1000_RXD_STAT_TCPCS | - csum_error; - } else if (isudp) { - *status_flags |= E1000_RXD_STAT_TCPCS | - E1000_RXD_STAT_UDPCS | - csum_error; + if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { + *status_flags |= E1000_RXD_STAT_UDPCS; } } @@ -1167,7 +1153,8 @@ e1000e_build_rx_metadata(E1000ECore *core, uint16_t *vlan_tag) { struct virtio_net_hdr *vhdr; - bool isip4, isip6, istcp, isudp; + bool hasip4, hasip6; + EthL4HdrProto l4hdr_proto; uint32_t pkt_type; *status_flags = E1000_RXD_STAT_DD; @@ -1179,8 +1166,8 @@ e1000e_build_rx_metadata(E1000ECore *core, *status_flags |= E1000_RXD_STAT_EOP; - net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); - trace_e1000e_rx_metadata_protocols(isip4, isip6, isudp, istcp); + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); + trace_e1000e_rx_metadata_protocols(hasip4, hasip6, l4hdr_proto); /* VLAN state */ if (net_rx_pkt_is_vlan_stripped(pkt)) { @@ -1196,24 +1183,25 @@ e1000e_build_rx_metadata(E1000ECore *core, *mrq = cpu_to_le32(rss_info->type | (rss_info->queue << 8)); trace_e1000e_rx_metadata_rss(*rss, *mrq); } - } else if (isip4) { + } else if (hasip4) { *status_flags |= E1000_RXD_STAT_IPIDV; *ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt)); trace_e1000e_rx_metadata_ip_id(*ip_id); } - if (istcp && e1000e_is_tcp_ack(core, pkt)) { + if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && e1000e_is_tcp_ack(core, pkt)) { *status_flags |= E1000_RXD_STAT_ACK; trace_e1000e_rx_metadata_ack(); } - if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { + if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { trace_e1000e_rx_metadata_ipv6_filtering_disabled(); pkt_type = E1000_RXD_PKT_MAC; - } else if (istcp || isudp) { - pkt_type = isip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP; - } else if (isip4 || isip6) { - pkt_type = isip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6; + } else if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP || + l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { + pkt_type = hasip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP; + } else if (hasip4 || hasip6) { + pkt_type = hasip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6; } else { pkt_type = E1000_RXD_PKT_MAC; } @@ -1222,50 +1210,50 @@ e1000e_build_rx_metadata(E1000ECore *core, trace_e1000e_rx_metadata_pkt_type(pkt_type); /* RX CSO information */ - if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { + if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { trace_e1000e_rx_metadata_ipv6_sum_disabled(); goto func_exit; } - if (!net_rx_pkt_has_virt_hdr(pkt)) { - trace_e1000e_rx_metadata_no_virthdr(); - e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp); - goto func_exit; - } - vhdr = net_rx_pkt_get_vhdr(pkt); if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) && !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) { trace_e1000e_rx_metadata_virthdr_no_csum_info(); - e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp); + e1000e_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto); goto func_exit; } if (e1000e_rx_l3_cso_enabled(core)) { - *status_flags |= isip4 ? E1000_RXD_STAT_IPCS : 0; + *status_flags |= hasip4 ? E1000_RXD_STAT_IPCS : 0; } else { trace_e1000e_rx_metadata_l3_cso_disabled(); } if (e1000e_rx_l4_cso_enabled(core)) { - if (istcp) { + switch (l4hdr_proto) { + case ETH_L4_HDR_PROTO_TCP: *status_flags |= E1000_RXD_STAT_TCPCS; - } else if (isudp) { + break; + + case ETH_L4_HDR_PROTO_UDP: *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS; + break; + + default: + break; } } else { trace_e1000e_rx_metadata_l4_cso_disabled(); } - trace_e1000e_rx_metadata_status_flags(*status_flags); - func_exit: + trace_e1000e_rx_metadata_status_flags(*status_flags); *status_flags = cpu_to_le32(*status_flags); } static inline void -e1000e_write_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, +e1000e_write_lgcy_rx_descr(E1000ECore *core, struct e1000_rx_desc *desc, struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, uint16_t length) @@ -1273,71 +1261,66 @@ e1000e_write_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, uint32_t status_flags, rss, mrq; uint16_t ip_id; - struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; - assert(!rss_info->enabled); - d->length = cpu_to_le16(length); - d->csum = 0; + desc->length = cpu_to_le16(length); + desc->csum = 0; e1000e_build_rx_metadata(core, pkt, pkt != NULL, rss_info, &rss, &mrq, &status_flags, &ip_id, - &d->special); - d->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24); - d->status = (uint8_t) le32_to_cpu(status_flags); + &desc->special); + desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24); + desc->status = (uint8_t) le32_to_cpu(status_flags); } static inline void -e1000e_write_ext_rx_descr(E1000ECore *core, uint8_t *desc, +e1000e_write_ext_rx_descr(E1000ECore *core, union e1000_rx_desc_extended *desc, struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, uint16_t length) { - union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc; + memset(&desc->wb, 0, sizeof(desc->wb)); - memset(&d->wb, 0, sizeof(d->wb)); - - d->wb.upper.length = cpu_to_le16(length); + desc->wb.upper.length = cpu_to_le16(length); e1000e_build_rx_metadata(core, pkt, pkt != NULL, rss_info, - &d->wb.lower.hi_dword.rss, - &d->wb.lower.mrq, - &d->wb.upper.status_error, - &d->wb.lower.hi_dword.csum_ip.ip_id, - &d->wb.upper.vlan); + &desc->wb.lower.hi_dword.rss, + &desc->wb.lower.mrq, + &desc->wb.upper.status_error, + &desc->wb.lower.hi_dword.csum_ip.ip_id, + &desc->wb.upper.vlan); } static inline void -e1000e_write_ps_rx_descr(E1000ECore *core, uint8_t *desc, +e1000e_write_ps_rx_descr(E1000ECore *core, + union e1000_rx_desc_packet_split *desc, struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, size_t ps_hdr_len, uint16_t(*written)[MAX_PS_BUFFERS]) { int i; - union e1000_rx_desc_packet_split *d = - (union e1000_rx_desc_packet_split *) desc; - memset(&d->wb, 0, sizeof(d->wb)); + memset(&desc->wb, 0, sizeof(desc->wb)); - d->wb.middle.length0 = cpu_to_le16((*written)[0]); + desc->wb.middle.length0 = cpu_to_le16((*written)[0]); for (i = 0; i < PS_PAGE_BUFFERS; i++) { - d->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]); + desc->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]); } e1000e_build_rx_metadata(core, pkt, pkt != NULL, rss_info, - &d->wb.lower.hi_dword.rss, - &d->wb.lower.mrq, - &d->wb.middle.status_error, - &d->wb.lower.hi_dword.csum_ip.ip_id, - &d->wb.middle.vlan); + &desc->wb.lower.hi_dword.rss, + &desc->wb.lower.mrq, + &desc->wb.middle.status_error, + &desc->wb.lower.hi_dword.csum_ip.ip_id, + &desc->wb.middle.vlan); - d->wb.upper.header_status = + desc->wb.upper.header_status = cpu_to_le16(ps_hdr_len | (ps_hdr_len ? E1000_RXDPS_HDRSTAT_HDRSP : 0)); trace_e1000e_rx_desc_ps_write((*written)[0], (*written)[1], @@ -1345,20 +1328,21 @@ e1000e_write_ps_rx_descr(E1000ECore *core, uint8_t *desc, } static inline void -e1000e_write_rx_descr(E1000ECore *core, uint8_t *desc, +e1000e_write_rx_descr(E1000ECore *core, union e1000_rx_desc_union *desc, struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, size_t ps_hdr_len, uint16_t(*written)[MAX_PS_BUFFERS]) { if (e1000e_rx_use_legacy_descriptor(core)) { assert(ps_hdr_len == 0); - e1000e_write_lgcy_rx_descr(core, desc, pkt, rss_info, (*written)[0]); + e1000e_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, + (*written)[0]); } else { if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { - e1000e_write_ps_rx_descr(core, desc, pkt, rss_info, + e1000e_write_ps_rx_descr(core, &desc->packet_split, pkt, rss_info, ps_hdr_len, written); } else { assert(ps_hdr_len == 0); - e1000e_write_ext_rx_descr(core, desc, pkt, rss_info, + e1000e_write_ext_rx_descr(core, &desc->extended, pkt, rss_info, (*written)[0]); } } @@ -1366,12 +1350,12 @@ struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, static inline void e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr, - uint8_t *desc, dma_addr_t len) + union e1000_rx_desc_union *desc, dma_addr_t len) { PCIDevice *dev = core->owner; if (e1000e_rx_use_legacy_descriptor(core)) { - struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; + struct e1000_rx_desc *d = &desc->legacy; size_t offset = offsetof(struct e1000_rx_desc, status); uint8_t status = d->status; @@ -1384,8 +1368,7 @@ e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr, } } else { if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { - union e1000_rx_desc_packet_split *d = - (union e1000_rx_desc_packet_split *) desc; + union e1000_rx_desc_packet_split *d = &desc->packet_split; size_t offset = offsetof(union e1000_rx_desc_packet_split, wb.middle.status_error); uint32_t status = d->wb.middle.status_error; @@ -1398,8 +1381,7 @@ e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr, pci_dma_write(dev, addr + offset, &status, sizeof(status)); } } else { - union e1000_rx_desc_extended *d = - (union e1000_rx_desc_extended *) desc; + union e1000_rx_desc_extended *d = &desc->extended; size_t offset = offsetof(union e1000_rx_desc_extended, wb.upper.status_error); uint32_t status = d->wb.upper.status_error; @@ -1422,14 +1404,14 @@ typedef struct e1000e_ba_state_st { static inline void e1000e_write_hdr_to_rx_buffers(E1000ECore *core, - hwaddr (*ba)[MAX_PS_BUFFERS], + hwaddr ba[MAX_PS_BUFFERS], e1000e_ba_state *bastate, const char *data, dma_addr_t data_len) { assert(data_len <= core->rxbuf_sizes[0] - bastate->written[0]); - pci_dma_write(core->owner, (*ba)[0] + bastate->written[0], data, data_len); + pci_dma_write(core->owner, ba[0] + bastate->written[0], data, data_len); bastate->written[0] += data_len; bastate->cur_idx = 1; @@ -1437,7 +1419,7 @@ e1000e_write_hdr_to_rx_buffers(E1000ECore *core, static void e1000e_write_to_rx_buffers(E1000ECore *core, - hwaddr (*ba)[MAX_PS_BUFFERS], + hwaddr ba[MAX_PS_BUFFERS], e1000e_ba_state *bastate, const char *data, dma_addr_t data_len) @@ -1449,13 +1431,13 @@ e1000e_write_to_rx_buffers(E1000ECore *core, uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left); trace_e1000e_rx_desc_buff_write(bastate->cur_idx, - (*ba)[bastate->cur_idx], + ba[bastate->cur_idx], bastate->written[bastate->cur_idx], data, bytes_to_write); pci_dma_write(core->owner, - (*ba)[bastate->cur_idx] + bastate->written[bastate->cur_idx], + ba[bastate->cur_idx] + bastate->written[bastate->cur_idx], data, bytes_to_write); bastate->written[bastate->cur_idx] += bytes_to_write; @@ -1471,24 +1453,10 @@ e1000e_write_to_rx_buffers(E1000ECore *core, } static void -e1000e_update_rx_stats(E1000ECore *core, - size_t data_size, - size_t data_fcs_size) +e1000e_update_rx_stats(E1000ECore *core, size_t pkt_size, size_t pkt_fcs_size) { - e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size); - - switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { - case ETH_PKT_BCAST: - e1000x_inc_reg_if_not_full(core->mac, BPRC); - break; - - case ETH_PKT_MCAST: - e1000x_inc_reg_if_not_full(core->mac, MPRC); - break; - - default: - break; - } + eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt); + e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size); } static inline bool @@ -1501,18 +1469,19 @@ e1000e_rx_descr_threshold_hit(E1000ECore *core, const E1000E_RingInfo *rxi) static bool e1000e_do_ps(E1000ECore *core, struct NetRxPkt *pkt, size_t *hdr_len) { - bool isip4, isip6, isudp, istcp; + bool hasip4, hasip6; + EthL4HdrProto l4hdr_proto; bool fragment; if (!e1000e_rx_use_ps_descriptor(core)) { return false; } - net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); - if (isip4) { + if (hasip4) { fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; - } else if (isip6) { + } else if (hasip6) { fragment = net_rx_pkt_get_ip6_info(pkt)->fragment; } else { return false; @@ -1522,7 +1491,8 @@ e1000e_do_ps(E1000ECore *core, struct NetRxPkt *pkt, size_t *hdr_len) return false; } - if (!fragment && (isudp || istcp)) { + if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP || + l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { *hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt); } else { *hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt); @@ -1543,7 +1513,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, { PCIDevice *d = core->owner; dma_addr_t base; - uint8_t desc[E1000_MAX_RX_DESC_LEN]; + union e1000_rx_desc_union desc; size_t desc_size; size_t desc_offset = 0; size_t iov_ofs = 0; @@ -1579,7 +1549,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len); - e1000e_read_rx_descr(core, desc, &ba); + e1000e_read_rx_descr(core, &desc, ba); if (ba[0]) { if (desc_offset < size) { @@ -1598,7 +1568,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, iov_copy = MIN(ps_hdr_len - ps_hdr_copied, iov->iov_len - iov_ofs); - e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate, + e1000e_write_hdr_to_rx_buffers(core, ba, &bastate, iov->iov_base, iov_copy); copy_size -= iov_copy; @@ -1615,7 +1585,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, } else { /* Leave buffer 0 of each descriptor except first */ /* empty as per spec 7.1.5.1 */ - e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate, + e1000e_write_hdr_to_rx_buffers(core, ba, &bastate, NULL, 0); } } @@ -1624,7 +1594,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, while (copy_size) { iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); - e1000e_write_to_rx_buffers(core, &ba, &bastate, + e1000e_write_to_rx_buffers(core, ba, &bastate, iov->iov_base + iov_ofs, iov_copy); copy_size -= iov_copy; @@ -1637,7 +1607,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, if (desc_offset + desc_size >= total_size) { /* Simulate FCS checksum presence in the last descriptor */ - e1000e_write_to_rx_buffers(core, &ba, &bastate, + e1000e_write_to_rx_buffers(core, ba, &bastate, (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); } } @@ -1649,9 +1619,9 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, is_last = true; } - e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL, + e1000e_write_rx_descr(core, &desc, is_last ? core->rx_pkt : NULL, rss_info, do_ps ? ps_hdr_len : 0, &bastate.written); - e1000e_pci_dma_write_rx_desc(core, base, desc, core->rx_desc_len); + e1000e_pci_dma_write_rx_desc(core, base, &desc, core->rx_desc_len); e1000e_ring_advance(core, rxi, core->rx_desc_len / E1000_MIN_RX_DESC_LEN); @@ -1664,27 +1634,26 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, static inline void e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt) { - if (net_rx_pkt_has_virt_hdr(pkt)) { - struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt); + struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt); - if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - net_rx_pkt_fix_l4_csum(pkt); - } + if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + net_rx_pkt_fix_l4_csum(pkt); } } -/* Min. octets in an ethernet frame sans FCS */ -#define MIN_BUF_SIZE 60 - ssize_t e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) { - static const int maximum_ethernet_hdr_len = (14 + 4); + return e1000e_receive_internal(core, iov, iovcnt, core->has_vnet); +} - uint32_t n = 0; - uint8_t min_buf[MIN_BUF_SIZE]; +static ssize_t +e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, + bool has_vnet) +{ + uint32_t causes = 0; + uint8_t buf[ETH_ZLEN]; struct iovec min_iov; - uint8_t *filter_buf; size_t size, orig_size; size_t iov_ofs = 0; E1000E_RxRing rxr; @@ -1700,29 +1669,28 @@ e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) } /* Pull virtio header in */ - if (core->has_vnet) { + if (has_vnet) { net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt); iov_ofs = sizeof(struct virtio_net_hdr); + } else { + net_rx_pkt_unset_vhdr(core->rx_pkt); } - filter_buf = iov->iov_base + iov_ofs; orig_size = iov_size(iov, iovcnt); size = orig_size - iov_ofs; /* Pad to minimum Ethernet frame length */ - if (size < sizeof(min_buf)) { - iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size); - memset(&min_buf[size], 0, sizeof(min_buf) - size); + if (size < sizeof(buf)) { + iov_to_buf(iov, iovcnt, iov_ofs, buf, size); + memset(&buf[size], 0, sizeof(buf) - size); e1000x_inc_reg_if_not_full(core->mac, RUC); - min_iov.iov_base = filter_buf = min_buf; - min_iov.iov_len = size = sizeof(min_buf); + min_iov.iov_base = buf; + min_iov.iov_len = size = sizeof(buf); iovcnt = 1; iov = &min_iov; iov_ofs = 0; - } else if (iov->iov_len < maximum_ethernet_hdr_len) { - /* This is very unlikely, but may happen. */ - iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len); - filter_buf = min_buf; + } else { + iov_to_buf(iov, iovcnt, iov_ofs, buf, ETH_HLEN + 4); } /* Discard oversized packets if !LPE and !SBP. */ @@ -1731,21 +1699,20 @@ e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) } net_rx_pkt_set_packet_type(core->rx_pkt, - get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf))); + get_eth_packet_type(PKT_GET_ETH_HDR(buf))); - if (!e1000e_receive_filter(core, filter_buf, size)) { + if (!e1000e_receive_filter(core, buf)) { trace_e1000e_rx_flt_dropped(); return orig_size; } net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, - e1000x_vlan_enabled(core->mac), core->mac[VET]); + e1000x_vlan_enabled(core->mac) ? 0 : -1, + core->mac[VET], 0); e1000e_rss_parse_packet(core, core->rx_pkt, &rss_info); e1000e_rx_ring_init(core, &rxr, rss_info.queue); - trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx); - total_size = net_rx_pkt_get_total_len(core->rx_pkt) + e1000x_fcs_len(core->mac); @@ -1758,32 +1725,32 @@ e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) /* Perform small receive detection (RSRPD) */ if (total_size < core->mac[RSRPD]) { - n |= E1000_ICS_SRPD; + causes |= E1000_ICS_SRPD; } /* Perform ACK receive detection */ if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS) && (e1000e_is_tcp_ack(core, core->rx_pkt))) { - n |= E1000_ICS_ACK; + causes |= E1000_ICS_ACK; } /* Check if receive descriptor minimum threshold hit */ rdmts_hit = e1000e_rx_descr_threshold_hit(core, rxr.i); - n |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); + causes |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); - trace_e1000e_rx_written_to_guest(n); + trace_e1000e_rx_written_to_guest(rxr.i->idx); } else { - n |= E1000_ICS_RXO; + causes |= E1000_ICS_RXO; retval = 0; - trace_e1000e_rx_not_written_to_guest(n); + trace_e1000e_rx_not_written_to_guest(rxr.i->idx); } - if (!e1000e_intrmgr_delay_rx_causes(core, &n)) { - trace_e1000e_rx_interrupt_set(n); - e1000e_set_interrupt_cause(core, n); + if (!e1000e_intrmgr_delay_rx_causes(core, &causes)) { + trace_e1000e_rx_interrupt_set(causes); + e1000e_set_interrupt_cause(core, causes); } else { - trace_e1000e_rx_interrupt_delayed(n); + trace_e1000e_rx_interrupt_delayed(causes); } return retval; @@ -1792,13 +1759,13 @@ e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) static inline bool e1000e_have_autoneg(E1000ECore *core) { - return core->phy[0][PHY_CTRL] & MII_CR_AUTO_NEG_EN; + return core->phy[0][MII_BMCR] & MII_BMCR_AUTOEN; } static void e1000e_update_flowctl_status(E1000ECore *core) { if (e1000e_have_autoneg(core) && - core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE) { + core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP) { trace_e1000e_link_autoneg_flowctl(true); core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE; } else { @@ -1816,12 +1783,12 @@ e1000e_link_down(E1000ECore *core) static inline void e1000e_set_phy_ctrl(E1000ECore *core, int index, uint16_t val) { - /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */ - core->phy[0][PHY_CTRL] = val & ~(0x3f | - MII_CR_RESET | - MII_CR_RESTART_AUTO_NEG); + /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */ + core->phy[0][MII_BMCR] = val & ~(0x3f | + MII_BMCR_RESET | + MII_BMCR_ANRESTART); - if ((val & MII_CR_RESTART_AUTO_NEG) && + if ((val & MII_BMCR_ANRESTART) && e1000e_have_autoneg(core)) { e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); } @@ -1855,7 +1822,7 @@ e1000e_core_set_link_status(E1000ECore *core) e1000x_update_regs_on_link_down(core->mac, core->phy[0]); } else { if (e1000e_have_autoneg(core) && - !(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { + !(core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP)) { e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); } else { @@ -1888,7 +1855,7 @@ e1000e_set_ctrl(E1000ECore *core, int index, uint32_t val) if (val & E1000_CTRL_RST) { trace_e1000e_core_ctrl_sw_reset(); - e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); + e1000e_reset(core, true); } if (val & E1000_CTRL_PHY_RST) { @@ -1997,27 +1964,18 @@ static void(*e1000e_phyreg_writeops[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE]) (E1000ECore *, int, uint16_t) = { [0] = { - [PHY_CTRL] = e1000e_set_phy_ctrl, + [MII_BMCR] = e1000e_set_phy_ctrl, [PHY_PAGE] = e1000e_set_phy_page, [PHY_OEM_BITS] = e1000e_set_phy_oem_bits } }; -static inline void -e1000e_clear_ims_bits(E1000ECore *core, uint32_t bits) -{ - trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits); - core->mac[IMS] &= ~bits; -} - static inline bool -e1000e_postpone_interrupt(bool *interrupt_pending, - E1000IntrDelayTimer *timer) +e1000e_postpone_interrupt(E1000IntrDelayTimer *timer) { if (timer->running) { trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2); - *interrupt_pending = true; return true; } @@ -2031,14 +1989,13 @@ e1000e_postpone_interrupt(bool *interrupt_pending, static inline bool e1000e_itr_should_postpone(E1000ECore *core) { - return e1000e_postpone_interrupt(&core->itr_intr_pending, &core->itr); + return e1000e_postpone_interrupt(&core->itr); } static inline bool e1000e_eitr_should_postpone(E1000ECore *core, int idx) { - return e1000e_postpone_interrupt(&core->eitr_intr_pending[idx], - &core->eitr[idx]); + return e1000e_postpone_interrupt(&core->eitr[idx]); } static void @@ -2070,7 +2027,6 @@ e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) effective_eiac = core->mac[EIAC] & cause; core->mac[ICR] &= ~effective_eiac; - core->msi_causes_pending &= ~effective_eiac; if (!(core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { core->mac[IMS] &= ~effective_eiac; @@ -2162,33 +2118,17 @@ e1000e_fix_icr_asserted(E1000ECore *core) trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); } -static void -e1000e_send_msi(E1000ECore *core, bool msix) +static void e1000e_raise_interrupts(E1000ECore *core, + size_t index, uint32_t causes) { - uint32_t causes = core->mac[ICR] & core->mac[IMS] & ~E1000_ICR_ASSERTED; - - core->msi_causes_pending &= causes; - causes ^= core->msi_causes_pending; - if (causes == 0) { - return; - } - core->msi_causes_pending |= causes; - - if (msix) { - e1000e_msix_notify(core, causes); - } else { - if (!e1000e_itr_should_postpone(core)) { - trace_e1000e_irq_msi_notify(causes); - msi_notify(core->owner, 0); - } - } -} - -static void -e1000e_update_interrupt_state(E1000ECore *core) -{ - bool interrupts_pending; bool is_msix = msix_enabled(core->owner); + uint32_t old_causes = core->mac[IMS] & core->mac[ICR]; + uint32_t raised_causes; + + trace_e1000e_irq_set(index << 2, + core->mac[index], core->mac[index] | causes); + + core->mac[index] |= causes; /* Set ICR[OTHER] for MSI-X */ if (is_msix) { @@ -2210,40 +2150,58 @@ e1000e_update_interrupt_state(E1000ECore *core) */ core->mac[ICS] = core->mac[ICR]; - interrupts_pending = (core->mac[IMS] & core->mac[ICR]) ? true : false; - if (!interrupts_pending) { - core->msi_causes_pending = 0; + trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], + core->mac[ICR], core->mac[IMS]); + + raised_causes = core->mac[IMS] & core->mac[ICR] & ~old_causes; + if (!raised_causes) { + return; } + if (is_msix) { + e1000e_msix_notify(core, raised_causes & ~E1000_ICR_ASSERTED); + } else if (!e1000e_itr_should_postpone(core)) { + if (msi_enabled(core->owner)) { + trace_e1000e_irq_msi_notify(raised_causes); + msi_notify(core->owner, 0); + } else { + e1000e_raise_legacy_irq(core); + } + } +} + +static void e1000e_lower_interrupts(E1000ECore *core, + size_t index, uint32_t causes) +{ + trace_e1000e_irq_clear(index << 2, + core->mac[index], core->mac[index] & ~causes); + + core->mac[index] &= ~causes; + + /* + * Make sure ICR and ICS registers have the same value. + * The spec says that the ICS register is write-only. However in practice, + * on real hardware ICS is readable, and for reads it has the same value as + * ICR (except that ICS does not have the clear on read behaviour of ICR). + * + * The VxWorks PRO/1000 driver uses this behaviour. + */ + core->mac[ICS] = core->mac[ICR]; + trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], core->mac[ICR], core->mac[IMS]); - if (is_msix || msi_enabled(core->owner)) { - if (interrupts_pending) { - e1000e_send_msi(core, is_msix); - } - } else { - if (interrupts_pending) { - if (!e1000e_itr_should_postpone(core)) { - e1000e_raise_legacy_irq(core); - } - } else { - e1000e_lower_legacy_irq(core); - } + if (!(core->mac[IMS] & core->mac[ICR]) && + !msix_enabled(core->owner) && !msi_enabled(core->owner)) { + e1000e_lower_legacy_irq(core); } } static void e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val) { - trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]); - val |= e1000e_intmgr_collect_delayed_causes(core); - core->mac[ICR] |= val; - - trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]); - - e1000e_update_interrupt_state(core); + e1000e_raise_interrupts(core, ICR, val); } static inline void @@ -2269,19 +2227,19 @@ e1000e_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr) static const char e1000e_phy_regcap[E1000E_PHY_PAGES][0x20] = { [0] = { - [PHY_CTRL] = PHY_ANYPAGE | PHY_RW, - [PHY_STATUS] = PHY_ANYPAGE | PHY_R, - [PHY_ID1] = PHY_ANYPAGE | PHY_R, - [PHY_ID2] = PHY_ANYPAGE | PHY_R, - [PHY_AUTONEG_ADV] = PHY_ANYPAGE | PHY_RW, - [PHY_LP_ABILITY] = PHY_ANYPAGE | PHY_R, - [PHY_AUTONEG_EXP] = PHY_ANYPAGE | PHY_R, - [PHY_NEXT_PAGE_TX] = PHY_ANYPAGE | PHY_RW, - [PHY_LP_NEXT_PAGE] = PHY_ANYPAGE | PHY_R, - [PHY_1000T_CTRL] = PHY_ANYPAGE | PHY_RW, - [PHY_1000T_STATUS] = PHY_ANYPAGE | PHY_R, - [PHY_EXT_STATUS] = PHY_ANYPAGE | PHY_R, - [PHY_PAGE] = PHY_ANYPAGE | PHY_RW, + [MII_BMCR] = PHY_ANYPAGE | PHY_RW, + [MII_BMSR] = PHY_ANYPAGE | PHY_R, + [MII_PHYID1] = PHY_ANYPAGE | PHY_R, + [MII_PHYID2] = PHY_ANYPAGE | PHY_R, + [MII_ANAR] = PHY_ANYPAGE | PHY_RW, + [MII_ANLPAR] = PHY_ANYPAGE | PHY_R, + [MII_ANER] = PHY_ANYPAGE | PHY_R, + [MII_ANNP] = PHY_ANYPAGE | PHY_RW, + [MII_ANLPRNP] = PHY_ANYPAGE | PHY_R, + [MII_CTRL1000] = PHY_ANYPAGE | PHY_RW, + [MII_STAT1000] = PHY_ANYPAGE | PHY_R, + [MII_EXTSTAT] = PHY_ANYPAGE | PHY_R, + [PHY_PAGE] = PHY_ANYPAGE | PHY_RW, [PHY_COPPER_CTRL1] = PHY_RW, [PHY_COPPER_STAT1] = PHY_R, @@ -2434,17 +2392,19 @@ e1000e_set_fcrtl(E1000ECore *core, int index, uint32_t val) core->mac[FCRTL] = val & 0x8000FFF8; } -static inline void -e1000e_set_16bit(E1000ECore *core, int index, uint32_t val) -{ - core->mac[index] = val & 0xffff; -} +#define E1000E_LOW_BITS_SET_FUNC(num) \ + static void \ + e1000e_set_##num##bit(E1000ECore *core, int index, uint32_t val) \ + { \ + core->mac[index] = val & (BIT(num) - 1); \ + } -static void -e1000e_set_12bit(E1000ECore *core, int index, uint32_t val) -{ - core->mac[index] = val & 0xfff; -} +E1000E_LOW_BITS_SET_FUNC(4) +E1000E_LOW_BITS_SET_FUNC(6) +E1000E_LOW_BITS_SET_FUNC(11) +E1000E_LOW_BITS_SET_FUNC(12) +E1000E_LOW_BITS_SET_FUNC(13) +E1000E_LOW_BITS_SET_FUNC(16) static void e1000e_set_vet(E1000ECore *core, int index, uint32_t val) @@ -2507,29 +2467,27 @@ e1000e_set_ics(E1000ECore *core, int index, uint32_t val) static void e1000e_set_icr(E1000ECore *core, int index, uint32_t val) { - uint32_t icr = 0; if ((core->mac[ICR] & E1000_ICR_ASSERTED) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { trace_e1000e_irq_icr_process_iame(); - e1000e_clear_ims_bits(core, core->mac[IAM]); + e1000e_lower_interrupts(core, IMS, core->mac[IAM]); } - icr = core->mac[ICR] & ~val; - /* Windows driver expects that the "receive overrun" bit and other + /* + * Windows driver expects that the "receive overrun" bit and other * ones to be cleared when the "Other" bit (#24) is cleared. */ - icr = (val & E1000_ICR_OTHER) ? (icr & ~E1000_ICR_OTHER_CAUSES) : icr; - trace_e1000e_irq_icr_write(val, core->mac[ICR], icr); - core->mac[ICR] = icr; - e1000e_update_interrupt_state(core); + if (val & E1000_ICR_OTHER) { + val |= E1000_ICR_OTHER_CAUSES; + } + e1000e_lower_interrupts(core, ICR, val); } static void e1000e_set_imc(E1000ECore *core, int index, uint32_t val) { trace_e1000e_irq_ims_clear_set_imc(val); - e1000e_clear_ims_bits(core, val); - e1000e_update_interrupt_state(core); + e1000e_lower_interrupts(core, IMS, val); } static void @@ -2550,9 +2508,6 @@ e1000e_set_ims(E1000ECore *core, int index, uint32_t val) uint32_t valid_val = val & ims_valid_mask; - trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val); - core->mac[IMS] |= valid_val; - if ((valid_val & ims_ext_mask) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PBA_CLR) && msix_enabled(core->owner)) { @@ -2565,7 +2520,7 @@ e1000e_set_ims(E1000ECore *core, int index, uint32_t val) e1000e_intrmgr_fire_all_timers(core); } - e1000e_update_interrupt_state(core); + e1000e_raise_interrupts(core, IMS, valid_val); } static void @@ -2614,27 +2569,11 @@ e1000e_mac_ims_read(E1000ECore *core, int index) return core->mac[IMS]; } -#define E1000E_LOW_BITS_READ_FUNC(num) \ - static uint32_t \ - e1000e_mac_low##num##_read(E1000ECore *core, int index) \ - { \ - return core->mac[index] & (BIT(num) - 1); \ - } \ - -#define E1000E_LOW_BITS_READ(num) \ - e1000e_mac_low##num##_read - -E1000E_LOW_BITS_READ_FUNC(4); -E1000E_LOW_BITS_READ_FUNC(6); -E1000E_LOW_BITS_READ_FUNC(11); -E1000E_LOW_BITS_READ_FUNC(13); -E1000E_LOW_BITS_READ_FUNC(16); - static uint32_t e1000e_mac_swsm_read(E1000ECore *core, int index) { uint32_t val = core->mac[SWSM]; - core->mac[SWSM] = val | 1; + core->mac[SWSM] = val | E1000_SWSM_SMBI; return val; } @@ -2654,28 +2593,25 @@ static uint32_t e1000e_mac_icr_read(E1000ECore *core, int index) { uint32_t ret = core->mac[ICR]; - trace_e1000e_irq_icr_read_entry(ret); if (core->mac[IMS] == 0) { trace_e1000e_irq_icr_clear_zero_ims(); - core->mac[ICR] = 0; + e1000e_lower_interrupts(core, ICR, 0xffffffff); } if (!msix_enabled(core->owner)) { trace_e1000e_irq_icr_clear_nonmsix_icr_read(); - core->mac[ICR] = 0; + e1000e_lower_interrupts(core, ICR, 0xffffffff); } if ((core->mac[ICR] & E1000_ICR_ASSERTED) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { trace_e1000e_irq_icr_clear_iame(); - core->mac[ICR] = 0; + e1000e_lower_interrupts(core, ICR, 0xffffffff); trace_e1000e_irq_icr_process_iame(); - e1000e_clear_ims_bits(core, core->mac[IAM]); + e1000e_lower_interrupts(core, IMS, core->mac[IAM]); } - trace_e1000e_irq_icr_read_exit(core->mac[ICR]); - e1000e_update_interrupt_state(core); return ret; } @@ -2908,6 +2844,35 @@ e1000e_set_gcr(E1000ECore *core, int index, uint32_t val) core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits; } +static uint32_t e1000e_get_systiml(E1000ECore *core, int index) +{ + e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH); + return core->mac[SYSTIML]; +} + +static uint32_t e1000e_get_rxsatrh(E1000ECore *core, int index) +{ + core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID; + return core->mac[RXSATRH]; +} + +static uint32_t e1000e_get_txstmph(E1000ECore *core, int index) +{ + core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID; + return core->mac[TXSTMPH]; +} + +static void e1000e_set_timinca(E1000ECore *core, int index, uint32_t val) +{ + e1000x_set_timinca(core->mac, &core->timadj, val); +} + +static void e1000e_set_timadjh(E1000ECore *core, int index, uint32_t val) +{ + core->mac[TIMADJH] = val; + core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32); +} + #define e1000e_getreg(x) [x] = e1000e_mac_readreg typedef uint32_t (*readops)(E1000ECore *, int); static const readops e1000e_macreg_readops[] = { @@ -2923,7 +2888,19 @@ static const readops e1000e_macreg_readops[] = { e1000e_getreg(LATECOL), e1000e_getreg(SEQEC), e1000e_getreg(XONTXC), + e1000e_getreg(AIT), + e1000e_getreg(TDFH), + e1000e_getreg(TDFT), + e1000e_getreg(TDFHS), + e1000e_getreg(TDFTS), + e1000e_getreg(TDFPC), e1000e_getreg(WUS), + e1000e_getreg(PBS), + e1000e_getreg(RDFH), + e1000e_getreg(RDFT), + e1000e_getreg(RDFHS), + e1000e_getreg(RDFTS), + e1000e_getreg(RDFPC), e1000e_getreg(GORCL), e1000e_getreg(MGTPRC), e1000e_getreg(EERD), @@ -2951,7 +2928,6 @@ static const readops e1000e_macreg_readops[] = { e1000e_getreg(GSCL_2), e1000e_getreg(RDBAH1), e1000e_getreg(FLSWDATA), - e1000e_getreg(RXSATRH), e1000e_getreg(TIPG), e1000e_getreg(FLMNGCTL), e1000e_getreg(FLMNGCNT), @@ -2992,7 +2968,6 @@ static const readops e1000e_macreg_readops[] = { e1000e_getreg(FLSWCTL), e1000e_getreg(RXDCTL1), e1000e_getreg(RXSATRL), - e1000e_getreg(SYSTIML), e1000e_getreg(RXUDP), e1000e_getreg(TORL), e1000e_getreg(TDLEN1), @@ -3032,7 +3007,6 @@ static const readops e1000e_macreg_readops[] = { e1000e_getreg(FLOL), e1000e_getreg(RXDCTL), e1000e_getreg(RXSTMPL), - e1000e_getreg(TXSTMPH), e1000e_getreg(TIMADJH), e1000e_getreg(FCRTL), e1000e_getreg(TDBAH), @@ -3059,16 +3033,9 @@ static const readops e1000e_macreg_readops[] = { [MPTC] = e1000e_mac_read_clr4, [IAC] = e1000e_mac_read_clr4, [ICR] = e1000e_mac_icr_read, - [RDFH] = E1000E_LOW_BITS_READ(13), - [RDFHS] = E1000E_LOW_BITS_READ(13), - [RDFPC] = E1000E_LOW_BITS_READ(13), - [TDFH] = E1000E_LOW_BITS_READ(13), - [TDFHS] = E1000E_LOW_BITS_READ(13), [STATUS] = e1000e_get_status, [TARC0] = e1000e_get_tarc, - [PBS] = E1000E_LOW_BITS_READ(6), [ICS] = e1000e_mac_ics_read, - [AIT] = E1000E_LOW_BITS_READ(16), [TORH] = e1000e_mac_read_clr8, [GORCH] = e1000e_mac_read_clr8, [PRC127] = e1000e_mac_read_clr4, @@ -3084,27 +3051,25 @@ static const readops e1000e_macreg_readops[] = { [BPTC] = e1000e_mac_read_clr4, [TSCTC] = e1000e_mac_read_clr4, [ITR] = e1000e_mac_itr_read, - [RDFT] = E1000E_LOW_BITS_READ(13), - [RDFTS] = E1000E_LOW_BITS_READ(13), - [TDFPC] = E1000E_LOW_BITS_READ(13), - [TDFT] = E1000E_LOW_BITS_READ(13), - [TDFTS] = E1000E_LOW_BITS_READ(13), [CTRL] = e1000e_get_ctrl, [TARC1] = e1000e_get_tarc, [SWSM] = e1000e_mac_swsm_read, [IMS] = e1000e_mac_ims_read, + [SYSTIML] = e1000e_get_systiml, + [RXSATRH] = e1000e_get_rxsatrh, + [TXSTMPH] = e1000e_get_txstmph, [CRCERRS ... MPC] = e1000e_mac_readreg, [IP6AT ... IP6AT + 3] = e1000e_mac_readreg, [IP4AT ... IP4AT + 6] = e1000e_mac_readreg, [RA ... RA + 31] = e1000e_mac_readreg, [WUPM ... WUPM + 31] = e1000e_mac_readreg, - [MTA ... MTA + 127] = e1000e_mac_readreg, - [VFTA ... VFTA + 127] = e1000e_mac_readreg, - [FFMT ... FFMT + 254] = E1000E_LOW_BITS_READ(4), + [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = e1000e_mac_readreg, + [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = e1000e_mac_readreg, + [FFMT ... FFMT + 254] = e1000e_mac_readreg, [FFVT ... FFVT + 254] = e1000e_mac_readreg, [MDEF ... MDEF + 7] = e1000e_mac_readreg, - [FFLT ... FFLT + 10] = E1000E_LOW_BITS_READ(11), + [FFLT ... FFLT + 10] = e1000e_mac_readreg, [FTFT ... FTFT + 254] = e1000e_mac_readreg, [PBM ... PBM + 10239] = e1000e_mac_readreg, [RETA ... RETA + 31] = e1000e_mac_readreg, @@ -3127,22 +3092,10 @@ static const writeops e1000e_macreg_writeops[] = { e1000e_putreg(LEDCTL), e1000e_putreg(FCAL), e1000e_putreg(FCRUC), - e1000e_putreg(AIT), - e1000e_putreg(TDFH), - e1000e_putreg(TDFT), - e1000e_putreg(TDFHS), - e1000e_putreg(TDFTS), - e1000e_putreg(TDFPC), e1000e_putreg(WUC), e1000e_putreg(WUS), - e1000e_putreg(RDFH), - e1000e_putreg(RDFT), - e1000e_putreg(RDFHS), - e1000e_putreg(RDFTS), - e1000e_putreg(RDFPC), e1000e_putreg(IPAV), e1000e_putreg(TDBAH1), - e1000e_putreg(TIMINCA), e1000e_putreg(IAM), e1000e_putreg(EIAC), e1000e_putreg(IVAR), @@ -3150,7 +3103,6 @@ static const writeops e1000e_macreg_writeops[] = { e1000e_putreg(TARC1), e1000e_putreg(FLSWDATA), e1000e_putreg(POEMB), - e1000e_putreg(PBS), e1000e_putreg(MFUTP01), e1000e_putreg(MFUTP23), e1000e_putreg(MANC), @@ -3186,7 +3138,6 @@ static const writeops e1000e_macreg_writeops[] = { e1000e_putreg(SYSTIML), e1000e_putreg(SYSTIMH), e1000e_putreg(TIMADJL), - e1000e_putreg(TIMADJH), e1000e_putreg(RXUDP), e1000e_putreg(RXCFGL), e1000e_putreg(TSYNCRXCTL), @@ -3215,6 +3166,18 @@ static const writeops e1000e_macreg_writeops[] = { [TADV] = e1000e_set_16bit, [ITR] = e1000e_set_itr, [EERD] = e1000e_set_eerd, + [AIT] = e1000e_set_16bit, + [TDFH] = e1000e_set_13bit, + [TDFT] = e1000e_set_13bit, + [TDFHS] = e1000e_set_13bit, + [TDFTS] = e1000e_set_13bit, + [TDFPC] = e1000e_set_13bit, + [RDFH] = e1000e_set_13bit, + [RDFHS] = e1000e_set_13bit, + [RDFT] = e1000e_set_13bit, + [RDFTS] = e1000e_set_13bit, + [RDFPC] = e1000e_set_13bit, + [PBS] = e1000e_set_6bit, [GCR] = e1000e_set_gcr, [PSRCTL] = e1000e_set_psrctl, [RXCSUM] = e1000e_set_rxcsum, @@ -3247,18 +3210,20 @@ static const writeops e1000e_macreg_writeops[] = { [CTRL_DUP] = e1000e_set_ctrl, [RFCTL] = e1000e_set_rfctl, [RA + 1] = e1000e_mac_setmacaddr, + [TIMINCA] = e1000e_set_timinca, + [TIMADJH] = e1000e_set_timadjh, [IP6AT ... IP6AT + 3] = e1000e_mac_writereg, [IP4AT ... IP4AT + 6] = e1000e_mac_writereg, [RA + 2 ... RA + 31] = e1000e_mac_writereg, [WUPM ... WUPM + 31] = e1000e_mac_writereg, - [MTA ... MTA + 127] = e1000e_mac_writereg, - [VFTA ... VFTA + 127] = e1000e_mac_writereg, - [FFMT ... FFMT + 254] = e1000e_mac_writereg, + [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = e1000e_mac_writereg, + [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = e1000e_mac_writereg, + [FFMT ... FFMT + 254] = e1000e_set_4bit, [FFVT ... FFVT + 254] = e1000e_mac_writereg, [PBM ... PBM + 10239] = e1000e_mac_writereg, [MDEF ... MDEF + 7] = e1000e_mac_writereg, - [FFLT ... FFLT + 10] = e1000e_mac_writereg, + [FFLT ... FFLT + 10] = e1000e_set_11bit, [FTFT ... FTFT + 254] = e1000e_mac_writereg, [RETA ... RETA + 31] = e1000e_mac_writereg, [RSSRK ... RSSRK + 31] = e1000e_mac_writereg, @@ -3269,10 +3234,12 @@ enum { E1000E_NWRITEOPS = ARRAY_SIZE(e1000e_macreg_writeops) }; enum { MAC_ACCESS_PARTIAL = 1 }; -/* The array below combines alias offsets of the index values for the +/* + * The array below combines alias offsets of the index values for the * MAC registers that have aliases, with the indication of not fully * implemented registers (lowest bit). This combination is possible - * because all of the offsets are even. */ + * because all of the offsets are even. + */ static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = { /* Alias index offsets */ [FCRTL_A] = 0x07fe, [FCRTH_A] = 0x0802, @@ -3281,7 +3248,7 @@ static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = { [TDH_A] = 0x0cf8, [TDT_A] = 0x0cf8, [TIDV_A] = 0x0cf8, [TDFH_A] = 0xed00, [TDFT_A] = 0xed00, [RA_A ... RA_A + 31] = 0x14f0, - [VFTA_A ... VFTA_A + 127] = 0x1400, + [VFTA_A ... VFTA_A + E1000_VLAN_FILTER_TBL_SIZE - 1] = 0x1400, [RDBAL0_A ... RDLEN0_A] = 0x09bc, [TDBAL_A ... TDLEN_A] = 0x0cf8, /* Access options */ @@ -3347,7 +3314,7 @@ static void e1000e_autoneg_resume(E1000ECore *core) { if (e1000e_have_autoneg(core) && - !(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { + !(core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP)) { qemu_get_queue(core->owner_nic)->link_down = false; timer_mod(core->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500); @@ -3386,11 +3353,10 @@ e1000e_core_pci_realize(E1000ECore *core, qemu_add_vm_change_state_handler(e1000e_vm_state_change, core); for (i = 0; i < E1000E_NUM_QUEUES; i++) { - net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, - E1000E_MAX_TX_FRAGS, core->has_vnet); + net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS); } - net_rx_pkt_init(&core->rx_pkt, core->has_vnet); + net_rx_pkt_init(&core->rx_pkt); e1000x_core_prepare_eeprom(core->eeprom, eeprom_templ, @@ -3412,7 +3378,6 @@ e1000e_core_pci_uninit(E1000ECore *core) qemu_del_vm_change_state_handler(core->vmstate); for (i = 0; i < E1000E_NUM_QUEUES; i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt); net_tx_pkt_uninit(core->tx[i].tx_pkt); } @@ -3422,29 +3387,36 @@ e1000e_core_pci_uninit(E1000ECore *core) static const uint16_t e1000e_phy_reg_init[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE] = { [0] = { - [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB | - MII_CR_FULL_DUPLEX | - MII_CR_AUTO_NEG_EN, + [MII_BMCR] = MII_BMCR_SPEED1000 | + MII_BMCR_FD | + MII_BMCR_AUTOEN, - [PHY_STATUS] = MII_SR_EXTENDED_CAPS | - MII_SR_LINK_STATUS | - MII_SR_AUTONEG_CAPS | - MII_SR_PREAMBLE_SUPPRESS | - MII_SR_EXTENDED_STATUS | - MII_SR_10T_HD_CAPS | - MII_SR_10T_FD_CAPS | - MII_SR_100X_HD_CAPS | - MII_SR_100X_FD_CAPS, + [MII_BMSR] = MII_BMSR_EXTCAP | + MII_BMSR_LINK_ST | + MII_BMSR_AUTONEG | + MII_BMSR_MFPS | + MII_BMSR_EXTSTAT | + MII_BMSR_10T_HD | + MII_BMSR_10T_FD | + MII_BMSR_100TX_HD | + MII_BMSR_100TX_FD, - [PHY_ID1] = 0x141, - [PHY_ID2] = E1000_PHY_ID2_82574x, - [PHY_AUTONEG_ADV] = 0xde1, - [PHY_LP_ABILITY] = 0x7e0, - [PHY_AUTONEG_EXP] = BIT(2), - [PHY_NEXT_PAGE_TX] = BIT(0) | BIT(13), - [PHY_1000T_CTRL] = BIT(8) | BIT(9) | BIT(10) | BIT(11), - [PHY_1000T_STATUS] = 0x3c00, - [PHY_EXT_STATUS] = BIT(12) | BIT(13), + [MII_PHYID1] = 0x141, + [MII_PHYID2] = E1000_PHY_ID2_82574x, + [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 | + MII_ANAR_10FD | MII_ANAR_TX | + MII_ANAR_TXFD | MII_ANAR_PAUSE | + MII_ANAR_PAUSE_ASYM, + [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD | + MII_ANLPAR_TX | MII_ANLPAR_TXFD | + MII_ANLPAR_T4 | MII_ANLPAR_PAUSE, + [MII_ANER] = MII_ANER_NP | MII_ANER_NWAY, + [MII_ANNP] = 1 | MII_ANNP_MP, + [MII_CTRL1000] = MII_CTRL1000_HALF | MII_CTRL1000_FULL | + MII_CTRL1000_PORT | MII_CTRL1000_MASTER, + [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL | + MII_STAT1000_ROK | MII_STAT1000_LOK, + [MII_EXTSTAT] = MII_EXTSTAT_1000T_HD | MII_EXTSTAT_1000T_FD, [PHY_COPPER_CTRL1] = BIT(5) | BIT(6) | BIT(8) | BIT(9) | BIT(12) | BIT(13), @@ -3501,8 +3473,7 @@ static const uint32_t e1000e_mac_reg_init[] = { [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = E1000E_MIN_XITR, }; -void -e1000e_core_reset(E1000ECore *core) +static void e1000e_reset(E1000ECore *core, bool sw) { int i; @@ -3511,9 +3482,16 @@ e1000e_core_reset(E1000ECore *core) e1000e_intrmgr_reset(core); memset(core->phy, 0, sizeof core->phy); - memmove(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init); - memset(core->mac, 0, sizeof core->mac); - memmove(core->mac, e1000e_mac_reg_init, sizeof e1000e_mac_reg_init); + memcpy(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init); + + for (i = 0; i < E1000E_MAC_SIZE; i++) { + if (sw && (i == PBA || i == PBS || i == FLA)) { + continue; + } + + core->mac[i] = i < ARRAY_SIZE(e1000e_mac_reg_init) ? + e1000e_mac_reg_init[i] : 0; + } core->rxbuf_min_shift = 1 + E1000_RING_DESC_LEN_SHIFT; @@ -3524,24 +3502,29 @@ e1000e_core_reset(E1000ECore *core) e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); for (i = 0; i < ARRAY_SIZE(core->tx); i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt); memset(&core->tx[i].props, 0, sizeof(core->tx[i].props)); core->tx[i].skip_cp = false; } } +void +e1000e_core_reset(E1000ECore *core) +{ + e1000e_reset(core, false); +} + void e1000e_core_pre_save(E1000ECore *core) { int i; NetClientState *nc = qemu_get_queue(core->owner_nic); /* - * If link is down and auto-negotiation is supported and ongoing, - * complete auto-negotiation immediately. This allows us to look - * at MII_SR_AUTONEG_COMPLETE to infer link status on load. - */ + * If link is down and auto-negotiation is supported and ongoing, + * complete auto-negotiation immediately. This allows us to look + * at MII_BMSR_AN_COMP to infer link status on load. + */ if (nc->link_down && e1000e_have_autoneg(core)) { - core->phy[0][PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE; + core->phy[0][MII_BMSR] |= MII_BMSR_AN_COMP; e1000e_update_flowctl_status(core); } @@ -3557,7 +3540,8 @@ e1000e_core_post_load(E1000ECore *core) { NetClientState *nc = qemu_get_queue(core->owner_nic); - /* nc.link_down can't be migrated, so infer link_down according + /* + * nc.link_down can't be migrated, so infer link_down according * to link status bit in core.mac[STATUS]. */ nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0; diff --git a/hw/net/e1000e_core.h b/hw/net/e1000e_core.h index 4ddb4d2c39..66b025cc43 100644 --- a/hw/net/e1000e_core.h +++ b/hw/net/e1000e_core.h @@ -1,37 +1,37 @@ /* -* Core code for QEMU e1000e emulation -* -* Software developer's manuals: -* http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf -* -* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) -* Developed by Daynix Computing LTD (http://www.daynix.com) -* -* Authors: -* Dmitry Fleytman -* Leonid Bloch -* Yan Vugenfirer -* -* Based on work done by: -* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. -* Copyright (c) 2008 Qumranet -* Based on work done by: -* Copyright (c) 2007 Dan Aloni -* Copyright (c) 2004 Antony T Curtis -* -* This library is free software; you can redistribute it and/or -* modify it under the terms of the GNU Lesser General Public -* License as published by the Free Software Foundation; either -* version 2.1 of the License, or (at your option) any later version. -* -* This library is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -* Lesser General Public License for more details. -* -* You should have received a copy of the GNU Lesser General Public -* License along with this library; if not, see . -*/ + * Core code for QEMU e1000e emulation + * + * Software developer's manuals: + * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf + * + * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Leonid Bloch + * Yan Vugenfirer + * + * Based on work done by: + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2008 Qumranet + * Based on work done by: + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ #ifndef HW_NET_E1000E_CORE_H #define HW_NET_E1000E_CORE_H @@ -95,10 +95,8 @@ struct E1000Core { E1000IntrDelayTimer tidv; E1000IntrDelayTimer itr; - bool itr_intr_pending; E1000IntrDelayTimer eitr[E1000E_MSIX_VEC_NUM]; - bool eitr_intr_pending[E1000E_MSIX_VEC_NUM]; VMChangeStateEntry *vmstate; @@ -113,7 +111,7 @@ struct E1000Core { PCIDevice *owner; void (*owner_start_recv)(PCIDevice *d); - uint32_t msi_causes_pending; + int64_t timadj; }; void diff --git a/hw/net/e1000x_common.c b/hw/net/e1000x_common.c index 2f43e8cd13..212873fd77 100644 --- a/hw/net/e1000x_common.c +++ b/hw/net/e1000x_common.c @@ -24,9 +24,12 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "hw/net/mii.h" #include "hw/pci/pci_device.h" +#include "net/eth.h" #include "net/net.h" +#include "e1000_common.h" #include "e1000x_common.h" #include "trace.h" @@ -45,9 +48,9 @@ bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac) return true; } -bool e1000x_is_vlan_packet(const uint8_t *buf, uint16_t vet) +bool e1000x_is_vlan_packet(const void *buf, uint16_t vet) { - uint16_t eth_proto = lduw_be_p(buf + 12); + uint16_t eth_proto = lduw_be_p(&PKT_GET_ETH_HDR(buf)->h_proto); bool res = (eth_proto == vet); trace_e1000x_vlan_is_vlan_pkt(res, eth_proto, vet); @@ -55,33 +58,64 @@ bool e1000x_is_vlan_packet(const uint8_t *buf, uint16_t vet) return res; } -bool e1000x_rx_group_filter(uint32_t *mac, const uint8_t *buf) +bool e1000x_rx_vlan_filter(uint32_t *mac, const struct vlan_header *vhdr) +{ + if (e1000x_vlan_rx_filter_enabled(mac)) { + uint16_t vid = lduw_be_p(&vhdr->h_tci); + uint32_t vfta = + ldl_le_p((uint32_t *)(mac + VFTA) + + ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK)); + if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) { + trace_e1000x_rx_flt_vlan_mismatch(vid); + return false; + } + + trace_e1000x_rx_flt_vlan_match(vid); + } + + return true; +} + +bool e1000x_rx_group_filter(uint32_t *mac, const struct eth_header *ehdr) { static const int mta_shift[] = { 4, 3, 2, 0 }; uint32_t f, ra[2], *rp, rctl = mac[RCTL]; + if (is_broadcast_ether_addr(ehdr->h_dest)) { + if (rctl & E1000_RCTL_BAM) { + return true; + } + } else if (is_multicast_ether_addr(ehdr->h_dest)) { + if (rctl & E1000_RCTL_MPE) { + return true; + } + } else { + if (rctl & E1000_RCTL_UPE) { + return true; + } + } + for (rp = mac + RA; rp < mac + RA + 32; rp += 2) { if (!(rp[1] & E1000_RAH_AV)) { continue; } ra[0] = cpu_to_le32(rp[0]); ra[1] = cpu_to_le32(rp[1]); - if (!memcmp(buf, (uint8_t *)ra, 6)) { + if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) { trace_e1000x_rx_flt_ucast_match((int)(rp - mac - RA) / 2, - MAC_ARG(buf)); + MAC_ARG(ehdr->h_dest)); return true; } } - trace_e1000x_rx_flt_ucast_mismatch(MAC_ARG(buf)); + trace_e1000x_rx_flt_ucast_mismatch(MAC_ARG(ehdr->h_dest)); f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3]; - f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff; + f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff; if (mac[MTA + (f >> 5)] & (1 << (f & 0x1f))) { - e1000x_inc_reg_if_not_full(mac, MPRC); return true; } - trace_e1000x_rx_flt_inexact_mismatch(MAC_ARG(buf), + trace_e1000x_rx_flt_inexact_mismatch(MAC_ARG(ehdr->h_dest), (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5, mac[MTA + (f >> 5)]); @@ -106,16 +140,16 @@ bool e1000x_hw_rx_enabled(uint32_t *mac) bool e1000x_is_oversized(uint32_t *mac, size_t size) { + size_t header_size = sizeof(struct eth_header) + sizeof(struct vlan_header); /* this is the size past which hardware will drop packets when setting LPE=0 */ - static const int maximum_ethernet_vlan_size = 1522; + size_t maximum_short_size = header_size + ETH_MTU; /* this is the size past which hardware will drop packets when setting LPE=1 */ - static const int maximum_ethernet_lpe_size = 16 * KiB; + size_t maximum_large_size = 16 * KiB - ETH_FCS_LEN; - if ((size > maximum_ethernet_lpe_size || - (size > maximum_ethernet_vlan_size - && !(mac[RCTL] & E1000_RCTL_LPE))) + if ((size > maximum_large_size || + (size > maximum_short_size && !(mac[RCTL] & E1000_RCTL_LPE))) && !(mac[RCTL] & E1000_RCTL_SBP)) { e1000x_inc_reg_if_not_full(mac, ROC); trace_e1000x_rx_oversized(size); @@ -152,8 +186,8 @@ void e1000x_reset_mac_addr(NICState *nic, uint32_t *mac_regs, void e1000x_update_regs_on_autoneg_done(uint32_t *mac, uint16_t *phy) { e1000x_update_regs_on_link_up(mac, phy); - phy[PHY_LP_ABILITY] |= MII_LPAR_LPACK; - phy[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE; + phy[MII_ANLPAR] |= MII_ANLPAR_ACK; + phy[MII_BMSR] |= MII_BMSR_AN_COMP; trace_e1000x_link_negotiation_done(); } @@ -209,23 +243,36 @@ e1000x_rxbufsize(uint32_t rctl) void e1000x_update_rx_total_stats(uint32_t *mac, - size_t data_size, - size_t data_fcs_size) + eth_pkt_types_e pkt_type, + size_t pkt_size, + size_t pkt_fcs_size) { static const int PRCregs[6] = { PRC64, PRC127, PRC255, PRC511, PRC1023, PRC1522 }; - e1000x_increase_size_stats(mac, PRCregs, data_fcs_size); + e1000x_increase_size_stats(mac, PRCregs, pkt_fcs_size); e1000x_inc_reg_if_not_full(mac, TPR); - mac[GPRC] = mac[TPR]; + e1000x_inc_reg_if_not_full(mac, GPRC); /* TOR - Total Octets Received: * This register includes bytes received in a packet from the field through the field, inclusively. * Always include FCS length (4) in size. */ - e1000x_grow_8reg_if_not_full(mac, TORL, data_size + 4); - mac[GORCL] = mac[TORL]; - mac[GORCH] = mac[TORH]; + e1000x_grow_8reg_if_not_full(mac, TORL, pkt_size + 4); + e1000x_grow_8reg_if_not_full(mac, GORCL, pkt_size + 4); + + switch (pkt_type) { + case ETH_PKT_BCAST: + e1000x_inc_reg_if_not_full(mac, BPRC); + break; + + case ETH_PKT_MCAST: + e1000x_inc_reg_if_not_full(mac, MPRC); + break; + + default: + break; + } } void @@ -265,3 +312,28 @@ e1000x_read_tx_ctx_descr(struct e1000_context_desc *d, props->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0; props->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0; } + +void e1000x_timestamp(uint32_t *mac, int64_t timadj, size_t lo, size_t hi) +{ + int64_t ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint32_t timinca = mac[TIMINCA]; + uint32_t incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK; + uint32_t incperiod = MAX(timinca >> E1000_TIMINCA_INCPERIOD_SHIFT, 1); + int64_t timestamp = timadj + muldiv64(ns, incvalue, incperiod * 16); + + mac[lo] = timestamp & 0xffffffff; + mac[hi] = timestamp >> 32; +} + +void e1000x_set_timinca(uint32_t *mac, int64_t *timadj, uint32_t val) +{ + int64_t ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint32_t old_val = mac[TIMINCA]; + uint32_t old_incvalue = old_val & E1000_TIMINCA_INCVALUE_MASK; + uint32_t old_incperiod = MAX(old_val >> E1000_TIMINCA_INCPERIOD_SHIFT, 1); + uint32_t incvalue = val & E1000_TIMINCA_INCVALUE_MASK; + uint32_t incperiod = MAX(val >> E1000_TIMINCA_INCPERIOD_SHIFT, 1); + + mac[TIMINCA] = val; + *timadj += (muldiv64(ns, incvalue, incperiod) - muldiv64(ns, old_incvalue, old_incperiod)) / 16; +} diff --git a/hw/net/e1000x_common.h b/hw/net/e1000x_common.h index b7742775c4..be291684de 100644 --- a/hw/net/e1000x_common.h +++ b/hw/net/e1000x_common.h @@ -1,108 +1,34 @@ /* -* QEMU e1000(e) emulation - shared code -* -* Copyright (c) 2008 Qumranet -* -* Based on work done by: -* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. -* Copyright (c) 2007 Dan Aloni -* Copyright (c) 2004 Antony T Curtis -* -* This library is free software; you can redistribute it and/or -* modify it under the terms of the GNU Lesser General Public -* License as published by the Free Software Foundation; either -* version 2.1 of the License, or (at your option) any later version. -* -* This library is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -* Lesser General Public License for more details. -* -* You should have received a copy of the GNU Lesser General Public -* License along with this library; if not, see . -*/ + * QEMU e1000(e) emulation - shared code + * + * Copyright (c) 2008 Qumranet + * + * Based on work done by: + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ #ifndef HW_NET_E1000X_COMMON_H #define HW_NET_E1000X_COMMON_H -#include "e1000_regs.h" - -#define defreg(x) x = (E1000_##x >> 2) -enum { - defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC), - defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC), - defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC), - defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH0), - defreg(RDBAL0), defreg(RDH0), defreg(RDLEN0), defreg(RDT0), - defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH), - defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT), - defreg(TDLEN1), defreg(TDBAL1), defreg(TDBAH1), defreg(TDH1), - defreg(TDT1), defreg(TORH), defreg(TORL), defreg(TOTH), - defreg(TOTL), defreg(TPR), defreg(TPT), defreg(TXDCTL), - defreg(WUFC), defreg(RA), defreg(MTA), defreg(CRCERRS), - defreg(VFTA), defreg(VET), defreg(RDTR), defreg(RADV), - defreg(TADV), defreg(ITR), defreg(SCC), defreg(ECOL), - defreg(MCC), defreg(LATECOL), defreg(COLC), defreg(DC), - defreg(TNCRS), defreg(SEQEC), defreg(CEXTERR), defreg(RLEC), - defreg(XONRXC), defreg(XONTXC), defreg(XOFFRXC), defreg(XOFFTXC), - defreg(FCRUC), defreg(AIT), defreg(TDFH), defreg(TDFT), - defreg(TDFHS), defreg(TDFTS), defreg(TDFPC), defreg(WUC), - defreg(WUS), defreg(POEMB), defreg(PBS), defreg(RDFH), - defreg(RDFT), defreg(RDFHS), defreg(RDFTS), defreg(RDFPC), - defreg(PBM), defreg(IPAV), defreg(IP4AT), defreg(IP6AT), - defreg(WUPM), defreg(FFLT), defreg(FFMT), defreg(FFVT), - defreg(TARC0), defreg(TARC1), defreg(IAM), defreg(EXTCNF_CTRL), - defreg(GCR), defreg(TIMINCA), defreg(EIAC), defreg(CTRL_EXT), - defreg(IVAR), defreg(MFUTP01), defreg(MFUTP23), defreg(MANC2H), - defreg(MFVAL), defreg(MDEF), defreg(FACTPS), defreg(FTFT), - defreg(RUC), defreg(ROC), defreg(RFC), defreg(RJC), - defreg(PRC64), defreg(PRC127), defreg(PRC255), defreg(PRC511), - defreg(PRC1023), defreg(PRC1522), defreg(PTC64), defreg(PTC127), - defreg(PTC255), defreg(PTC511), defreg(PTC1023), defreg(PTC1522), - defreg(GORCL), defreg(GORCH), defreg(GOTCL), defreg(GOTCH), - defreg(RNBC), defreg(BPRC), defreg(MPRC), defreg(RFCTL), - defreg(PSRCTL), defreg(MPTC), defreg(BPTC), defreg(TSCTFC), - defreg(IAC), defreg(MGTPRC), defreg(MGTPDC), defreg(MGTPTC), - defreg(TSCTC), defreg(RXCSUM), defreg(FUNCTAG), defreg(GSCL_1), - defreg(GSCL_2), defreg(GSCL_3), defreg(GSCL_4), defreg(GSCN_0), - defreg(GSCN_1), defreg(GSCN_2), defreg(GSCN_3), defreg(GCR2), - defreg(RAID), defreg(RSRPD), defreg(TIDV), defreg(EITR), - defreg(MRQC), defreg(RETA), defreg(RSSRK), defreg(RDBAH1), - defreg(RDBAL1), defreg(RDLEN1), defreg(RDH1), defreg(RDT1), - defreg(PBACLR), defreg(FCAL), defreg(FCAH), defreg(FCT), - defreg(FCRTH), defreg(FCRTL), defreg(FCTTV), defreg(FCRTV), - defreg(FLA), defreg(EEWR), defreg(FLOP), defreg(FLOL), - defreg(FLSWCTL), defreg(FLSWCNT), defreg(RXDCTL), defreg(RXDCTL1), - defreg(MAVTV0), defreg(MAVTV1), defreg(MAVTV2), defreg(MAVTV3), - defreg(TXSTMPL), defreg(TXSTMPH), defreg(SYSTIML), defreg(SYSTIMH), - defreg(RXCFGL), defreg(RXUDP), defreg(TIMADJL), defreg(TIMADJH), - defreg(RXSTMPH), defreg(RXSTMPL), defreg(RXSATRL), defreg(RXSATRH), - defreg(FLASHT), defreg(TIPG), defreg(RDH), defreg(RDT), - defreg(RDLEN), defreg(RDBAH), defreg(RDBAL), - defreg(TXDCTL1), - defreg(FLSWDATA), - defreg(CTRL_DUP), - defreg(EXTCNF_SIZE), - defreg(EEMNGCTL), - defreg(EEMNGDATA), - defreg(FLMNGCTL), - defreg(FLMNGDATA), - defreg(FLMNGCNT), - defreg(TSYNCRXCTL), - defreg(TSYNCTXCTL), - - /* Aliases */ - defreg(RDH0_A), defreg(RDT0_A), defreg(RDTR_A), defreg(RDFH_A), - defreg(RDFT_A), defreg(TDH_A), defreg(TDT_A), defreg(TIDV_A), - defreg(TDFH_A), defreg(TDFT_A), defreg(RA_A), defreg(RDBAL0_A), - defreg(TDBAL_A), defreg(TDLEN_A), defreg(VFTA_A), defreg(RDLEN0_A), - defreg(FCRTL_A), defreg(FCRTH_A) -}; - static inline void e1000x_inc_reg_if_not_full(uint32_t *mac, int index) { - if (mac[index] != 0xffffffff) { + if (mac[index] != UINT32_MAX) { mac[index]++; } } @@ -152,21 +78,22 @@ static inline void e1000x_update_regs_on_link_down(uint32_t *mac, uint16_t *phy) { mac[STATUS] &= ~E1000_STATUS_LU; - phy[PHY_STATUS] &= ~MII_SR_LINK_STATUS; - phy[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE; - phy[PHY_LP_ABILITY] &= ~MII_LPAR_LPACK; + phy[MII_BMSR] &= ~MII_BMSR_LINK_ST; + phy[MII_BMSR] &= ~MII_BMSR_AN_COMP; + phy[MII_ANLPAR] &= ~MII_ANLPAR_ACK; } static inline void e1000x_update_regs_on_link_up(uint32_t *mac, uint16_t *phy) { mac[STATUS] |= E1000_STATUS_LU; - phy[PHY_STATUS] |= MII_SR_LINK_STATUS; + phy[MII_BMSR] |= MII_BMSR_LINK_ST; } void e1000x_update_rx_total_stats(uint32_t *mac, - size_t data_size, - size_t data_fcs_size); + eth_pkt_types_e pkt_type, + size_t pkt_size, + size_t pkt_fcs_size); void e1000x_core_prepare_eeprom(uint16_t *eeprom, const uint16_t *templ, @@ -178,9 +105,11 @@ uint32_t e1000x_rxbufsize(uint32_t rctl); bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac); -bool e1000x_is_vlan_packet(const uint8_t *buf, uint16_t vet); +bool e1000x_is_vlan_packet(const void *buf, uint16_t vet); -bool e1000x_rx_group_filter(uint32_t *mac, const uint8_t *buf); +bool e1000x_rx_vlan_filter(uint32_t *mac, const struct vlan_header *vhdr); + +bool e1000x_rx_group_filter(uint32_t *mac, const struct eth_header *ehdr); bool e1000x_hw_rx_enabled(uint32_t *mac); @@ -213,4 +142,7 @@ typedef struct e1000x_txd_props { void e1000x_read_tx_ctx_descr(struct e1000_context_desc *d, e1000x_txd_props *props); +void e1000x_timestamp(uint32_t *mac, int64_t timadj, size_t lo, size_t hi); +void e1000x_set_timinca(uint32_t *mac, int64_t *timadj, uint32_t val); + #endif diff --git a/hw/net/e1000x_regs.h b/hw/net/e1000x_regs.h new file mode 100644 index 0000000000..13760c66d3 --- /dev/null +++ b/hw/net/e1000x_regs.h @@ -0,0 +1,971 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef HW_E1000X_REGS_H +#define HW_E1000X_REGS_H + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_82574L 0x10D3 +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB + +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D + +/* Device Specific Register Defaults */ +#define E1000_PHY_ID2_82541x 0x380 +#define E1000_PHY_ID2_82544x 0xC30 +#define E1000_PHY_ID2_8254xx_DEFAULT 0xC20 /* 82540x, 82545x, and 82546x */ +#define E1000_PHY_ID2_82573x 0xCC0 +#define E1000_PHY_ID2_82574x 0xCB1 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGDATA 0x01014 /* MNG EEPROM Read/Write data */ +#define E1000_FLMNGCTL 0x01018 /* MNG Flash Control */ +#define E1000_FLMNGDATA 0x0101C /* MNG FLASH Read data */ +#define E1000_FLMNGCNT 0x01020 /* MNG FLASH Read Counter */ +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTL_A 0x00168 /* Alias to FCRTL */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_RDFH 0x02410 /* Receive Data FIFO Head Register - RW */ +#define E1000_RDFH_A 0x08000 /* Alias to RDFH */ +#define E1000_RDFT 0x02418 /* Receive Data FIFO Tail Register - RW */ +#define E1000_RDFT_A 0x08008 /* Alias to RDFT */ +#define E1000_RDFHS 0x02420 /* Receive Data FIFO Head Saved Register - RW */ +#define E1000_RDFTS 0x02428 /* Receive Data FIFO Tail Saved Register - RW */ +#define E1000_RDFPC 0x02430 /* Receive Data FIFO Packet Count - RW */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFH_A 0x08010 /* Alias to TDFH */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFT_A 0x08018 /* Alias to TDFT */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MAVTV0 0x05010 /* Management VLAN TAG Value 0 */ +#define E1000_MAVTV1 0x05014 /* Management VLAN TAG Value 1 */ +#define E1000_MAVTV2 0x05018 /* Management VLAN TAG Value 2 */ +#define E1000_MAVTV3 0x0501c /* Management VLAN TAG Value 3 */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA_A 0x00040 /* Alias to RA */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VFTA_A 0x00600 /* Alias to VFTA */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_MFVAL 0x05824 /* Manageability Filters Valid - RW */ +#define E1000_MDEF 0x05890 /* Manageability Decision Filters - RW Array */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FTFT 0x09400 /* Flexible TCO Filter Table - RW Array */ + +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_FUNCTAG 0x05B08 /* Function-Tag Register */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_GSCN_0 0x05B20 /* 3GIO Statistic Counter Register #0 */ +#define E1000_GSCN_1 0x05B24 /* 3GIO Statistic Counter Register #1 */ +#define E1000_GSCN_2 0x05B28 /* 3GIO Statistic Counter Register #2 */ +#define E1000_GSCN_3 0x05B2C /* 3GIO Statistic Counter Register #3 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_PBACLR 0x05B68 /* MSI-X PBA Clear */ + +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TIMADJL 0x0B60C /* Time Adjustment Offset register Low - RW */ +#define E1000_TIMADJH 0x0B610 /* Time Adjustment Offset register High - RW */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ + +#define E1000_RETA_IDX(hash) ((hash) & (BIT(7) - 1)) +#define E1000_RETA_VAL(reta, hash) (((uint8_t *)(reta))[E1000_RETA_IDX(hash)]) + +#define E1000_MRQC_EN_TCPIPV4(mrqc) ((mrqc) & BIT(16)) +#define E1000_MRQC_EN_IPV4(mrqc) ((mrqc) & BIT(17)) +#define E1000_MRQC_EN_TCPIPV6EX(mrqc) ((mrqc) & BIT(18)) +#define E1000_MRQC_EN_IPV6EX(mrqc) ((mrqc) & BIT(19)) +#define E1000_MRQC_EN_IPV6(mrqc) ((mrqc) & BIT(20)) + +#define E1000_MRQ_RSS_TYPE_NONE (0) +#define E1000_MRQ_RSS_TYPE_IPV4TCP (1) +#define E1000_MRQ_RSS_TYPE_IPV4 (2) +#define E1000_MRQ_RSS_TYPE_IPV6TCPEX (3) +#define E1000_MRQ_RSS_TYPE_IPV6EX (4) +#define E1000_MRQ_RSS_TYPE_IPV6 (5) + +#define E1000_ICR_ASSERTED BIT(31) +#define E1000_EIAC_MASK 0x01F00000 + +/* RFCTL register bits */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* TARC* parsing */ +#define E1000_TARC_ENABLE BIT(10) + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_RXDW 0x00000080 /* rx desc written back */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ +#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ + +#define E1000_ICR_OTHER_CAUSES (E1000_ICR_LSC | \ + E1000_ICR_RXO | \ + E1000_ICR_MDAC | \ + E1000_ICR_SRPD | \ + E1000_ICR_ACK | \ + E1000_ICR_MNG) + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_RXDW E1000_ICR_RXDW /* rx desc written back */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_RXDW E1000_ICR_RXDW /* rx desc written back */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 +#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 +#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 +#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 +#define E1000_IMS_OTHER E1000_ICR_OTHER +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_RXDW E1000_ICR_RXDW /* rx desc written back */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 0x10 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 8 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ + +/* 82574 EERD/EEWR registers layout */ +#define E1000_EERW_START BIT(0) +#define E1000_EERW_DONE BIT(1) +#define E1000_EERW_ADDR_SHIFT 2 +#define E1000_EERW_ADDR_MASK ((1L << 14) - 1) +#define E1000_EERW_DATA_SHIFT 16 +#define E1000_EERW_DATA_MASK ((1L << 16) - 1) + +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_SPD_SHIFT 8 /* Speed Select Shift */ + +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* auto speed detection check */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* EEPROM reset */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA 0x20000000 +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ + +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ + + +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* Rx Interrupt Delay Timer */ +#define E1000_RDTR_FPD BIT(31) + +/* Tx Interrupt Delay Timer */ +#define E1000_TIDV_FPD BIT(31) + +/* Delay increments in nanoseconds for delayed interrupts registers */ +#define E1000_INTR_DELAY_NS_RES (1024) + +/* Delay increments in nanoseconds for interrupt throttling registers */ +#define E1000_INTR_THROTTLING_NS_RES (256) + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */ + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* HH Time Sync */ +#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000 + +#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000 + +#define E1000_TIMINCA_INCPERIOD_SHIFT 24 +#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF + +/* PCI Express Control */ +/* 3GIO Control Register - GCR (0x05B00; RW) */ +#define E1000_L0S_ADJUST (1 << 9) +#define E1000_L1_ENTRY_LATENCY_MSB (1 << 23) +#define E1000_L1_ENTRY_LATENCY_LSB (1 << 25 | 1 << 26) + +#define E1000_L0S_ADJUST (1 << 9) +#define E1000_L1_ENTRY_LATENCY_MSB (1 << 23) +#define E1000_L1_ENTRY_LATENCY_LSB (1 << 25 | 1 << 26) + +#define E1000_GCR_RO_BITS (1 << 23 | 1 << 25 | 1 << 26) + +/* MSI-X PBA Clear register */ +#define E1000_PBACLR_VALID_MASK (BIT(5) - 1) + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_CMD_SNAP 0x40000000 /* Update SNAP header */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Legacy Receive Descriptor */ +struct e1000_rx_desc { + uint64_t buffer_addr; /* Address of the descriptor's data buffer */ + uint16_t length; /* Length of data DMAed into data buffer */ + uint16_t csum; /* Packet checksum */ + uint8_t status; /* Descriptor status */ + uint8_t errors; /* Descriptor Errors */ + uint16_t special; +}; + +/* Extended Receive Descriptor */ +union e1000_rx_desc_extended { + struct { + uint64_t buffer_addr; + uint64_t reserved; + } read; + struct { + struct { + uint32_t mrq; /* Multiple Rx Queues */ + union { + uint32_t rss; /* RSS Hash */ + struct { + uint16_t ip_id; /* IP id */ + uint16_t csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + uint32_t status_error; /* ext status/error */ + uint16_t length; + uint16_t vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + uint64_t buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + uint32_t mrq; /* Multiple Rx Queues */ + union { + uint32_t rss; /* RSS Hash */ + struct { + uint16_t ip_id; /* IP id */ + uint16_t csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + uint32_t status_error; /* ext status/error */ + uint16_t length0; /* length of buffer 0 */ + uint16_t vlan; /* VLAN tag */ + } middle; + struct { + uint16_t header_status; + /* length of buffers 1-3 */ + uint16_t length[PS_PAGE_BUFFERS]; + } upper; + uint64_t reserved; + } wb; /* writeback */ +}; + +/* Receive Checksum Control bits */ +#define E1000_RXCSUM_IPOFLD 0x100 /* IP Checksum Offload Enable */ +#define E1000_RXCSUM_TUOFLD 0x200 /* TCP/UDP Checksum Offload Enable */ +#define E1000_RXCSUM_PCSD 0x2000 /* Packet Checksum Disable */ + +#define E1000_RING_DESC_LEN (16) +#define E1000_RING_DESC_LEN_SHIFT (4) + +#define E1000_MIN_RX_DESC_LEN E1000_RING_DESC_LEN + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +/* RX packet types */ +#define E1000_RXD_PKT_MAC (0) +#define E1000_RXD_PKT_IP4 (1) +#define E1000_RXD_PKT_IP4_XDP (2) +#define E1000_RXD_PKT_IP6 (5) +#define E1000_RXD_PKT_IP6_XDP (6) + +#define E1000_RXD_PKT_TYPE(t) ((t) << 16) + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + uint32_t ip_config; + struct { + uint8_t ipcss; /* IP checksum start */ + uint8_t ipcso; /* IP checksum offset */ + uint16_t ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + uint32_t tcp_config; + struct { + uint8_t tucss; /* TCP checksum start */ + uint8_t tucso; /* TCP checksum offset */ + uint16_t tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + uint32_t cmd_and_length; /* */ + union { + uint32_t data; + struct { + uint8_t status; /* Descriptor status */ + uint8_t hdr_len; /* Header length */ + uint16_t mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_DIS_IP_CHK_ARP 0x10000000 /* Disable IP address chacking */ + /*for ARP packets - in 82574 */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* FACTPS Control */ +#define E1000_FACTPS_LAN0_ON 0x00000004 /* Lan 0 enable */ + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* I/O-Mapped Access to Internal Registers, Memories, and Flash */ +#define E1000_IOADDR 0x00 +#define E1000_IODATA 0x04 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#endif /* HW_E1000_REGS_H */ diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c index c753bfb3a8..798ea33d08 100644 --- a/hw/net/fsl_etsec/etsec.c +++ b/hw/net/fsl_etsec/etsec.c @@ -29,6 +29,7 @@ #include "qemu/osdep.h" #include "hw/sysbus.h" #include "hw/irq.h" +#include "hw/net/mii.h" #include "hw/ptimer.h" #include "hw/qdev-properties.h" #include "etsec.h" @@ -339,11 +340,11 @@ static void etsec_reset(DeviceState *d) etsec->rx_buffer_len = 0; etsec->phy_status = - MII_SR_EXTENDED_CAPS | MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS | - MII_SR_AUTONEG_COMPLETE | MII_SR_PREAMBLE_SUPPRESS | - MII_SR_EXTENDED_STATUS | MII_SR_100T2_HD_CAPS | MII_SR_100T2_FD_CAPS | - MII_SR_10T_HD_CAPS | MII_SR_10T_FD_CAPS | MII_SR_100X_HD_CAPS | - MII_SR_100X_FD_CAPS | MII_SR_100T4_CAPS; + MII_BMSR_EXTCAP | MII_BMSR_LINK_ST | MII_BMSR_AUTONEG | + MII_BMSR_AN_COMP | MII_BMSR_MFPS | MII_BMSR_EXTSTAT | + MII_BMSR_100T2_HD | MII_BMSR_100T2_FD | + MII_BMSR_10T_HD | MII_BMSR_10T_FD | + MII_BMSR_100TX_HD | MII_BMSR_100TX_FD | MII_BMSR_100T4; etsec_update_irq(etsec); } diff --git a/hw/net/fsl_etsec/etsec.h b/hw/net/fsl_etsec/etsec.h index 3c625c955c..3860864a3f 100644 --- a/hw/net/fsl_etsec/etsec.h +++ b/hw/net/fsl_etsec/etsec.h @@ -76,23 +76,6 @@ typedef struct eTSEC_rxtx_bd { #define FCB_TX_CTU (1 << 1) #define FCB_TX_NPH (1 << 0) -/* PHY Status Register */ -#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ -#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ -#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ -#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ -#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ -#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ -#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ -#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ -#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ -#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ -#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ -#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ - /* eTSEC */ /* Number of register in the device */ diff --git a/hw/net/fsl_etsec/miim.c b/hw/net/fsl_etsec/miim.c index 6bba01c82a..b48d2cb57b 100644 --- a/hw/net/fsl_etsec/miim.c +++ b/hw/net/fsl_etsec/miim.c @@ -23,6 +23,7 @@ */ #include "qemu/osdep.h" +#include "hw/net/mii.h" #include "etsec.h" #include "registers.h" @@ -140,8 +141,8 @@ void etsec_miim_link_status(eTSEC *etsec, NetClientState *nc) { /* Set link status */ if (nc->link_down) { - etsec->phy_status &= ~MII_SR_LINK_STATUS; + etsec->phy_status &= ~MII_BMSR_LINK_ST; } else { - etsec->phy_status |= MII_SR_LINK_STATUS; + etsec->phy_status |= MII_BMSR_LINK_ST; } } diff --git a/hw/net/igb.c b/hw/net/igb.c new file mode 100644 index 0000000000..1c989d7677 --- /dev/null +++ b/hw/net/igb.c @@ -0,0 +1,635 @@ +/* + * QEMU Intel 82576 SR/IOV Ethernet Controller Emulation + * + * Datasheet: + * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf + * + * Copyright (c) 2020-2023 Red Hat, Inc. + * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Akihiko Odaki + * Gal Hammmer + * Marcel Apfelbaum + * Dmitry Fleytman + * Leonid Bloch + * Yan Vugenfirer + * + * Based on work done by: + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2008 Qumranet + * Based on work done by: + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "net/eth.h" +#include "net/net.h" +#include "net/tap.h" +#include "qemu/module.h" +#include "qemu/range.h" +#include "sysemu/sysemu.h" +#include "hw/hw.h" +#include "hw/net/mii.h" +#include "hw/pci/pci.h" +#include "hw/pci/pcie.h" +#include "hw/pci/pcie_sriov.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +#include "igb_common.h" +#include "igb_core.h" + +#include "trace.h" +#include "qapi/error.h" +#include "qom/object.h" + +#define TYPE_IGB "igb" +OBJECT_DECLARE_SIMPLE_TYPE(IGBState, IGB) + +struct IGBState { + PCIDevice parent_obj; + NICState *nic; + NICConf conf; + + MemoryRegion mmio; + MemoryRegion flash; + MemoryRegion io; + MemoryRegion msix; + + uint32_t ioaddr; + + IGBCore core; +}; + +#define IGB_CAP_SRIOV_OFFSET (0x160) +#define IGB_VF_OFFSET (0x80) +#define IGB_VF_STRIDE (2) + +#define E1000E_MMIO_IDX 0 +#define E1000E_FLASH_IDX 1 +#define E1000E_IO_IDX 2 +#define E1000E_MSIX_IDX 3 + +#define E1000E_MMIO_SIZE (128 * KiB) +#define E1000E_FLASH_SIZE (128 * KiB) +#define E1000E_IO_SIZE (32) +#define E1000E_MSIX_SIZE (16 * KiB) + +static void igb_write_config(PCIDevice *dev, uint32_t addr, + uint32_t val, int len) +{ + IGBState *s = IGB(dev); + + trace_igb_write_config(addr, val, len); + pci_default_write_config(dev, addr, val, len); + + if (range_covers_byte(addr, len, PCI_COMMAND) && + (dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { + igb_start_recv(&s->core); + } +} + +uint64_t +igb_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + IGBState *s = opaque; + return igb_core_read(&s->core, addr, size); +} + +void +igb_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + IGBState *s = opaque; + igb_core_write(&s->core, addr, val, size); +} + +static bool +igb_io_get_reg_index(IGBState *s, uint32_t *idx) +{ + if (s->ioaddr < 0x1FFFF) { + *idx = s->ioaddr; + return true; + } + + if (s->ioaddr < 0x7FFFF) { + trace_e1000e_wrn_io_addr_undefined(s->ioaddr); + return false; + } + + if (s->ioaddr < 0xFFFFF) { + trace_e1000e_wrn_io_addr_flash(s->ioaddr); + return false; + } + + trace_e1000e_wrn_io_addr_unknown(s->ioaddr); + return false; +} + +static uint64_t +igb_io_read(void *opaque, hwaddr addr, unsigned size) +{ + IGBState *s = opaque; + uint32_t idx = 0; + uint64_t val; + + switch (addr) { + case E1000_IOADDR: + trace_e1000e_io_read_addr(s->ioaddr); + return s->ioaddr; + case E1000_IODATA: + if (igb_io_get_reg_index(s, &idx)) { + val = igb_core_read(&s->core, idx, sizeof(val)); + trace_e1000e_io_read_data(idx, val); + return val; + } + return 0; + default: + trace_e1000e_wrn_io_read_unknown(addr); + return 0; + } +} + +static void +igb_io_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + IGBState *s = opaque; + uint32_t idx = 0; + + switch (addr) { + case E1000_IOADDR: + trace_e1000e_io_write_addr(val); + s->ioaddr = (uint32_t) val; + return; + case E1000_IODATA: + if (igb_io_get_reg_index(s, &idx)) { + trace_e1000e_io_write_data(idx, val); + igb_core_write(&s->core, idx, val, sizeof(val)); + } + return; + default: + trace_e1000e_wrn_io_write_unknown(addr); + return; + } +} + +static const MemoryRegionOps mmio_ops = { + .read = igb_mmio_read, + .write = igb_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps io_ops = { + .read = igb_io_read, + .write = igb_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static bool +igb_nc_can_receive(NetClientState *nc) +{ + IGBState *s = qemu_get_nic_opaque(nc); + return igb_can_receive(&s->core); +} + +static ssize_t +igb_nc_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) +{ + IGBState *s = qemu_get_nic_opaque(nc); + return igb_receive_iov(&s->core, iov, iovcnt); +} + +static ssize_t +igb_nc_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + IGBState *s = qemu_get_nic_opaque(nc); + return igb_receive(&s->core, buf, size); +} + +static void +igb_set_link_status(NetClientState *nc) +{ + IGBState *s = qemu_get_nic_opaque(nc); + igb_core_set_link_status(&s->core); +} + +static NetClientInfo net_igb_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = igb_nc_can_receive, + .receive = igb_nc_receive, + .receive_iov = igb_nc_receive_iov, + .link_status_changed = igb_set_link_status, +}; + +/* + * EEPROM (NVM) contents documented in section 6.1, table 6-1: + * and in 6.10 Software accessed words. + */ +static const uint16_t igb_eeprom_template[] = { + /* Address |Compat.|OEM sp.| ImRev | OEM sp. */ + 0x0000, 0x0000, 0x0000, 0x0d34, 0xffff, 0x2010, 0xffff, 0xffff, + /* PBA |ICtrl1 | SSID | SVID | DevID |-------|ICtrl2 */ + 0x1040, 0xffff, 0x002b, 0x0000, 0x8086, 0x10c9, 0x0000, 0x70c3, + /* SwPin0| DevID | EESZ |-------|ICtrl3 |PCI-tc | MSIX | APtr */ + 0x0004, 0x10c9, 0x5c00, 0x0000, 0x2880, 0x0014, 0x4a40, 0x0060, + /* PCIe Init. Conf 1,2,3 |PCICtrl| LD1,3 |DDevID |DevRev | LD0,2 */ + 0x6cfb, 0xc7b0, 0x0abe, 0x0403, 0x0783, 0x10a6, 0x0001, 0x0602, + /* SwPin1| FunC |LAN-PWR|ManHwC |ICtrl3 | IOVct |VDevID |-------*/ + 0x0004, 0x0020, 0x0000, 0x004a, 0x2080, 0x00f5, 0x10ca, 0x0000, + /*---------------| LD1,3 | LD0,2 | ROEnd | ROSta | Wdog | VPD */ + 0x0000, 0x0000, 0x4784, 0x4602, 0x0000, 0x0000, 0x1000, 0xffff, + /* PCSet0| Ccfg0 |PXEver |IBAcap |PCSet1 | Ccfg1 |iSCVer | ?? */ + 0x0100, 0x4000, 0x131f, 0x4013, 0x0100, 0x4000, 0xffff, 0xffff, + /* PCSet2| Ccfg2 |PCSet3 | Ccfg3 | ?? |AltMacP| ?? |CHKSUM */ + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x00e0, 0xffff, 0x0000, + /* NC-SIC */ + 0x0003, +}; + +static void igb_core_realize(IGBState *s) +{ + s->core.owner = &s->parent_obj; + s->core.owner_nic = s->nic; +} + +static void +igb_init_msix(IGBState *s) +{ + int i, res; + + res = msix_init(PCI_DEVICE(s), IGB_MSIX_VEC_NUM, + &s->msix, + E1000E_MSIX_IDX, 0, + &s->msix, + E1000E_MSIX_IDX, 0x2000, + 0x70, NULL); + + if (res < 0) { + trace_e1000e_msix_init_fail(res); + } else { + for (i = 0; i < IGB_MSIX_VEC_NUM; i++) { + msix_vector_use(PCI_DEVICE(s), i); + } + } +} + +static void +igb_cleanup_msix(IGBState *s) +{ + msix_unuse_all_vectors(PCI_DEVICE(s)); + msix_uninit(PCI_DEVICE(s), &s->msix, &s->msix); +} + +static void +igb_init_net_peer(IGBState *s, PCIDevice *pci_dev, uint8_t *macaddr) +{ + DeviceState *dev = DEVICE(pci_dev); + NetClientState *nc; + int i; + + s->nic = qemu_new_nic(&net_igb_info, &s->conf, + object_get_typename(OBJECT(s)), dev->id, s); + + s->core.max_queue_num = s->conf.peers.queues ? s->conf.peers.queues - 1 : 0; + + trace_e1000e_mac_set_permanent(MAC_ARG(macaddr)); + memcpy(s->core.permanent_mac, macaddr, sizeof(s->core.permanent_mac)); + + qemu_format_nic_info_str(qemu_get_queue(s->nic), macaddr); + + /* Setup virtio headers */ + for (i = 0; i < s->conf.peers.queues; i++) { + nc = qemu_get_subqueue(s->nic, i); + if (!nc->peer || !qemu_has_vnet_hdr(nc->peer)) { + trace_e1000e_cfg_support_virtio(false); + return; + } + } + + trace_e1000e_cfg_support_virtio(true); + s->core.has_vnet = true; + + for (i = 0; i < s->conf.peers.queues; i++) { + nc = qemu_get_subqueue(s->nic, i); + qemu_set_vnet_hdr_len(nc->peer, sizeof(struct virtio_net_hdr)); + qemu_using_vnet_hdr(nc->peer, true); + } +} + +static int +igb_add_pm_capability(PCIDevice *pdev, uint8_t offset, uint16_t pmc) +{ + Error *local_err = NULL; + int ret = pci_add_capability(pdev, PCI_CAP_ID_PM, offset, + PCI_PM_SIZEOF, &local_err); + + if (local_err) { + error_report_err(local_err); + return ret; + } + + pci_set_word(pdev->config + offset + PCI_PM_PMC, + PCI_PM_CAP_VER_1_1 | + pmc); + + pci_set_word(pdev->wmask + offset + PCI_PM_CTRL, + PCI_PM_CTRL_STATE_MASK | + PCI_PM_CTRL_PME_ENABLE | + PCI_PM_CTRL_DATA_SEL_MASK); + + pci_set_word(pdev->w1cmask + offset + PCI_PM_CTRL, + PCI_PM_CTRL_PME_STATUS); + + return ret; +} + +static void igb_pci_realize(PCIDevice *pci_dev, Error **errp) +{ + IGBState *s = IGB(pci_dev); + uint8_t *macaddr; + int ret; + + trace_e1000e_cb_pci_realize(); + + pci_dev->config_write = igb_write_config; + + pci_dev->config[PCI_CACHE_LINE_SIZE] = 0x10; + pci_dev->config[PCI_INTERRUPT_PIN] = 1; + + /* Define IO/MMIO regions */ + memory_region_init_io(&s->mmio, OBJECT(s), &mmio_ops, s, + "igb-mmio", E1000E_MMIO_SIZE); + pci_register_bar(pci_dev, E1000E_MMIO_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio); + + /* + * We provide a dummy implementation for the flash BAR + * for drivers that may theoretically probe for its presence. + */ + memory_region_init(&s->flash, OBJECT(s), + "igb-flash", E1000E_FLASH_SIZE); + pci_register_bar(pci_dev, E1000E_FLASH_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->flash); + + memory_region_init_io(&s->io, OBJECT(s), &io_ops, s, + "igb-io", E1000E_IO_SIZE); + pci_register_bar(pci_dev, E1000E_IO_IDX, + PCI_BASE_ADDRESS_SPACE_IO, &s->io); + + memory_region_init(&s->msix, OBJECT(s), "igb-msix", + E1000E_MSIX_SIZE); + pci_register_bar(pci_dev, E1000E_MSIX_IDX, + PCI_BASE_ADDRESS_MEM_TYPE_64, &s->msix); + + /* Create networking backend */ + qemu_macaddr_default_if_unset(&s->conf.macaddr); + macaddr = s->conf.macaddr.a; + + /* Add PCI capabilities in reverse order */ + assert(pcie_endpoint_cap_init(pci_dev, 0xa0) > 0); + + igb_init_msix(s); + + ret = msi_init(pci_dev, 0x50, 1, true, true, NULL); + if (ret) { + trace_e1000e_msi_init_fail(ret); + } + + if (igb_add_pm_capability(pci_dev, 0x40, PCI_PM_CAP_DSI) < 0) { + hw_error("Failed to initialize PM capability"); + } + + /* PCIe extended capabilities (in order) */ + if (pcie_aer_init(pci_dev, 1, 0x100, 0x40, errp) < 0) { + hw_error("Failed to initialize AER capability"); + } + + pcie_ari_init(pci_dev, 0x150, 1); + + pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, TYPE_IGBVF, + IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS, + IGB_VF_OFFSET, IGB_VF_STRIDE); + + pcie_sriov_pf_init_vf_bar(pci_dev, IGBVF_MMIO_BAR_IDX, + PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH, + IGBVF_MMIO_SIZE); + pcie_sriov_pf_init_vf_bar(pci_dev, IGBVF_MSIX_BAR_IDX, + PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH, + IGBVF_MSIX_SIZE); + + igb_init_net_peer(s, pci_dev, macaddr); + + /* Initialize core */ + igb_core_realize(s); + + igb_core_pci_realize(&s->core, + igb_eeprom_template, + sizeof(igb_eeprom_template), + macaddr); +} + +static void igb_pci_uninit(PCIDevice *pci_dev) +{ + IGBState *s = IGB(pci_dev); + + trace_e1000e_cb_pci_uninit(); + + igb_core_pci_uninit(&s->core); + + pcie_sriov_pf_exit(pci_dev); + pcie_cap_exit(pci_dev); + + qemu_del_nic(s->nic); + + igb_cleanup_msix(s); + msi_uninit(pci_dev); +} + +static void igb_qdev_reset_hold(Object *obj) +{ + PCIDevice *d = PCI_DEVICE(obj); + IGBState *s = IGB(obj); + + trace_e1000e_cb_qdev_reset_hold(); + + pcie_sriov_pf_disable_vfs(d); + igb_core_reset(&s->core); +} + +static int igb_pre_save(void *opaque) +{ + IGBState *s = opaque; + + trace_e1000e_cb_pre_save(); + + igb_core_pre_save(&s->core); + + return 0; +} + +static int igb_post_load(void *opaque, int version_id) +{ + IGBState *s = opaque; + + trace_e1000e_cb_post_load(); + return igb_core_post_load(&s->core); +} + +static const VMStateDescription igb_vmstate_tx_ctx = { + .name = "igb-tx-ctx", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(vlan_macip_lens, struct e1000_adv_tx_context_desc), + VMSTATE_UINT32(seqnum_seed, struct e1000_adv_tx_context_desc), + VMSTATE_UINT32(type_tucmd_mlhl, struct e1000_adv_tx_context_desc), + VMSTATE_UINT32(mss_l4len_idx, struct e1000_adv_tx_context_desc), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription igb_vmstate_tx = { + .name = "igb-tx", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(ctx, struct igb_tx, 2, 0, igb_vmstate_tx_ctx, + struct e1000_adv_tx_context_desc), + VMSTATE_UINT32(first_cmd_type_len, struct igb_tx), + VMSTATE_UINT32(first_olinfo_status, struct igb_tx), + VMSTATE_BOOL(first, struct igb_tx), + VMSTATE_BOOL(skip_cp, struct igb_tx), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription igb_vmstate_intr_timer = { + .name = "igb-intr-timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(timer, IGBIntrDelayTimer), + VMSTATE_BOOL(running, IGBIntrDelayTimer), + VMSTATE_END_OF_LIST() + } +}; + +#define VMSTATE_IGB_INTR_DELAY_TIMER(_f, _s) \ + VMSTATE_STRUCT(_f, _s, 0, \ + igb_vmstate_intr_timer, IGBIntrDelayTimer) + +#define VMSTATE_IGB_INTR_DELAY_TIMER_ARRAY(_f, _s, _num) \ + VMSTATE_STRUCT_ARRAY(_f, _s, _num, 0, \ + igb_vmstate_intr_timer, IGBIntrDelayTimer) + +static const VMStateDescription igb_vmstate = { + .name = "igb", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = igb_pre_save, + .post_load = igb_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, IGBState), + VMSTATE_MSIX(parent_obj, IGBState), + + VMSTATE_UINT32(ioaddr, IGBState), + VMSTATE_UINT8(core.rx_desc_len, IGBState), + VMSTATE_UINT16_ARRAY(core.eeprom, IGBState, IGB_EEPROM_SIZE), + VMSTATE_UINT16_ARRAY(core.phy, IGBState, MAX_PHY_REG_ADDRESS + 1), + VMSTATE_UINT32_ARRAY(core.mac, IGBState, E1000E_MAC_SIZE), + VMSTATE_UINT8_ARRAY(core.permanent_mac, IGBState, ETH_ALEN), + + VMSTATE_IGB_INTR_DELAY_TIMER_ARRAY(core.eitr, IGBState, + IGB_INTR_NUM), + + VMSTATE_UINT32_ARRAY(core.eitr_guest_value, IGBState, IGB_INTR_NUM), + + VMSTATE_STRUCT_ARRAY(core.tx, IGBState, IGB_NUM_QUEUES, 0, + igb_vmstate_tx, struct igb_tx), + + VMSTATE_INT64(core.timadj, IGBState), + + VMSTATE_END_OF_LIST() + } +}; + +static Property igb_properties[] = { + DEFINE_NIC_PROPERTIES(IGBState, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void igb_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(class); + ResettableClass *rc = RESETTABLE_CLASS(class); + PCIDeviceClass *c = PCI_DEVICE_CLASS(class); + + c->realize = igb_pci_realize; + c->exit = igb_pci_uninit; + c->vendor_id = PCI_VENDOR_ID_INTEL; + c->device_id = E1000_DEV_ID_82576; + c->revision = 1; + c->class_id = PCI_CLASS_NETWORK_ETHERNET; + + rc->phases.hold = igb_qdev_reset_hold; + + dc->desc = "Intel 82576 Gigabit Ethernet Controller"; + dc->vmsd = &igb_vmstate; + + device_class_set_props(dc, igb_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static void igb_instance_init(Object *obj) +{ + IGBState *s = IGB(obj); + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(obj)); +} + +static const TypeInfo igb_info = { + .name = TYPE_IGB, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(IGBState), + .class_init = igb_class_init, + .instance_init = igb_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void igb_register_types(void) +{ + type_register_static(&igb_info); +} + +type_init(igb_register_types) diff --git a/hw/net/igb_common.h b/hw/net/igb_common.h new file mode 100644 index 0000000000..5c261ba9d3 --- /dev/null +++ b/hw/net/igb_common.h @@ -0,0 +1,156 @@ +/* + * QEMU igb emulation - shared definitions + * + * Copyright (c) 2020-2023 Red Hat, Inc. + * Copyright (c) 2008 Qumranet + * + * Based on work done by: + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef HW_NET_IGB_COMMON_H +#define HW_NET_IGB_COMMON_H + +#include "igb_regs.h" + +#define TYPE_IGBVF "igbvf" + +#define IGBVF_MMIO_BAR_IDX (0) +#define IGBVF_MSIX_BAR_IDX (3) + +#define IGBVF_MMIO_SIZE (16 * 1024) +#define IGBVF_MSIX_SIZE (16 * 1024) + +#define defreg(x) x = (E1000_##x >> 2) +#define defreg_indexed(x, i) x##i = (E1000_##x(i) >> 2) +#define defreg_indexeda(x, i) x##i##_A = (E1000_##x##_A(i) >> 2) + +#define defregd(x) defreg_indexed(x, 0), defreg_indexed(x, 1), \ + defreg_indexed(x, 2), defreg_indexed(x, 3), \ + defreg_indexed(x, 4), defreg_indexed(x, 5), \ + defreg_indexed(x, 6), defreg_indexed(x, 7), \ + defreg_indexed(x, 8), defreg_indexed(x, 9), \ + defreg_indexed(x, 10), defreg_indexed(x, 11), \ + defreg_indexed(x, 12), defreg_indexed(x, 13), \ + defreg_indexed(x, 14), defreg_indexed(x, 15), \ + defreg_indexeda(x, 0), defreg_indexeda(x, 1), \ + defreg_indexeda(x, 2), defreg_indexeda(x, 3) + +#define defreg8(x) defreg_indexed(x, 0), defreg_indexed(x, 1), \ + defreg_indexed(x, 2), defreg_indexed(x, 3), \ + defreg_indexed(x, 4), defreg_indexed(x, 5), \ + defreg_indexed(x, 6), defreg_indexed(x, 7) + +enum { + defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC), + defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC), + defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC), + defreg(MPC), defreg(RCTL), + defreg(STATUS), defreg(SWSM), defreg(TCTL), + defreg(TORH), defreg(TORL), defreg(TOTH), + defreg(TOTL), defreg(TPR), defreg(TPT), + defreg(WUFC), defreg(RA), defreg(MTA), defreg(CRCERRS), + defreg(VFTA), defreg(VET), + defreg(SCC), defreg(ECOL), + defreg(MCC), defreg(LATECOL), defreg(COLC), defreg(DC), + defreg(TNCRS), defreg(RLEC), + defreg(XONRXC), defreg(XONTXC), defreg(XOFFRXC), defreg(XOFFTXC), + defreg(FCRUC), defreg(TDFH), defreg(TDFT), + defreg(TDFHS), defreg(TDFTS), defreg(TDFPC), defreg(WUC), + defreg(WUS), defreg(RDFH), + defreg(RDFT), defreg(RDFHS), defreg(RDFTS), defreg(RDFPC), + defreg(IPAV), defreg(IP4AT), defreg(IP6AT), + defreg(WUPM), defreg(FFMT), + defreg(IAM), + defreg(GCR), defreg(TIMINCA), defreg(EIAC), defreg(CTRL_EXT), + defreg(IVAR0), defreg(MANC2H), + defreg(MFVAL), defreg(MDEF), defreg(FACTPS), defreg(FTFT), + defreg(RUC), defreg(ROC), defreg(RFC), defreg(RJC), + defreg(PRC64), defreg(PRC127), defreg(PRC255), defreg(PRC511), + defreg(PRC1023), defreg(PRC1522), defreg(PTC64), defreg(PTC127), + defreg(PTC255), defreg(PTC511), defreg(PTC1023), defreg(PTC1522), + defreg(GORCL), defreg(GORCH), defreg(GOTCL), defreg(GOTCH), + defreg(RNBC), defreg(BPRC), defreg(MPRC), defreg(RFCTL), + defreg(MPTC), defreg(BPTC), + defreg(IAC), defreg(MGTPRC), defreg(MGTPDC), defreg(MGTPTC), + defreg(TSCTC), defreg(RXCSUM), defreg(FUNCTAG), defreg(GSCL_1), + defreg(GSCL_2), defreg(GSCL_3), defreg(GSCL_4), defreg(GSCN_0), + defreg(GSCN_1), defreg(GSCN_2), defreg(GSCN_3), + defreg_indexed(EITR, 0), + defreg(MRQC), defreg(RETA), defreg(RSSRK), + defreg(PBACLR), defreg(FCAL), defreg(FCAH), defreg(FCT), + defreg(FCRTH), defreg(FCRTL), defreg(FCTTV), defreg(FCRTV), + defreg(FLA), defreg(FLOP), + defreg(MAVTV0), defreg(MAVTV1), defreg(MAVTV2), defreg(MAVTV3), + defreg(TXSTMPL), defreg(TXSTMPH), defreg(SYSTIML), defreg(SYSTIMH), + defreg(TIMADJL), defreg(TIMADJH), + defreg(RXSTMPH), defreg(RXSTMPL), defreg(RXSATRL), defreg(RXSATRH), + defreg(TIPG), + defreg(CTRL_DUP), + defreg(EEMNGCTL), + defreg(EEMNGDATA), + defreg(FLMNGCTL), + defreg(FLMNGDATA), + defreg(FLMNGCNT), + defreg(TSYNCRXCTL), + defreg(TSYNCTXCTL), + defreg(RLPML), + defreg(UTA), + + /* Aliases */ + defreg(RDFH_A), defreg(RDFT_A), defreg(TDFH_A), defreg(TDFT_A), + defreg(RA_A), defreg(VFTA_A), defreg(FCRTL_A), + + /* Additional regs used by IGB */ + defreg(FWSM), defreg(SW_FW_SYNC), + + defreg(EICS), defreg(EIMS), defreg(EIMC), defreg(EIAM), + defreg(EICR), defreg(IVAR_MISC), defreg(GPIE), + + defreg(TSYNCRXCFG), defreg8(ETQF), + + defreg(RXPBS), defregd(RDBAL), defregd(RDBAH), defregd(RDLEN), + defregd(SRRCTL), defregd(RDH), defregd(RDT), + defregd(RXDCTL), defregd(RXCTL), defregd(RQDPC), defreg(RA2), + + defreg(TXPBS), defreg(TCTL_EXT), defreg(DTXCTL), defreg(HTCBDPC), + defregd(TDBAL), defregd(TDBAH), defregd(TDLEN), defregd(TDH), + defregd(TDT), defregd(TXDCTL), defregd(TXCTL), + defregd(TDWBAL), defregd(TDWBAH), + + defreg(VT_CTL), + + defreg8(P2VMAILBOX), defreg8(V2PMAILBOX), defreg(MBVFICR), defreg(MBVFIMR), + defreg(VFLRE), defreg(VFRE), defreg(VFTE), defreg(WVBR), + defreg(QDE), defreg(DTXSWC), defreg_indexed(VLVF, 0), + defreg8(VMOLR), defreg(RPLOLR), defreg8(VMBMEM), defreg8(VMVIR), + + defreg8(PVTCTRL), defreg8(PVTEICS), defreg8(PVTEIMS), defreg8(PVTEIMC), + defreg8(PVTEIAC), defreg8(PVTEIAM), defreg8(PVTEICR), defreg8(PVFGPRC), + defreg8(PVFGPTC), defreg8(PVFGORC), defreg8(PVFGOTC), defreg8(PVFMPRC), + defreg8(PVFGPRLBC), defreg8(PVFGPTLBC), defreg8(PVFGORLBC), defreg8(PVFGOTLBC), + + defreg(MTA_A), + + defreg(VTIVAR), defreg(VTIVAR_MISC), +}; + +uint64_t igb_mmio_read(void *opaque, hwaddr addr, unsigned size); +void igb_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size); + +#endif diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c new file mode 100644 index 0000000000..d00b1caa6a --- /dev/null +++ b/hw/net/igb_core.c @@ -0,0 +1,4269 @@ +/* + * Core code for QEMU igb emulation + * + * Datasheet: + * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf + * + * Copyright (c) 2020-2023 Red Hat, Inc. + * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Akihiko Odaki + * Gal Hammmer + * Marcel Apfelbaum + * Dmitry Fleytman + * Leonid Bloch + * Yan Vugenfirer + * + * Based on work done by: + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2008 Qumranet + * Based on work done by: + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "net/net.h" +#include "net/tap.h" +#include "hw/net/mii.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "sysemu/runstate.h" + +#include "net_tx_pkt.h" +#include "net_rx_pkt.h" + +#include "igb_common.h" +#include "e1000x_common.h" +#include "igb_core.h" + +#include "trace.h" + +#define E1000E_MAX_TX_FRAGS (64) + +union e1000_rx_desc_union { + struct e1000_rx_desc legacy; + union e1000_adv_rx_desc adv; +}; + +typedef struct IGBTxPktVmdqCallbackContext { + IGBCore *core; + NetClientState *nc; +} IGBTxPktVmdqCallbackContext; + +typedef struct L2Header { + struct eth_header eth; + struct vlan_header vlan[2]; +} L2Header; + +typedef struct PTP2 { + uint8_t message_id_transport_specific; + uint8_t version_ptp; + uint16_t message_length; + uint8_t subdomain_number; + uint8_t reserved0; + uint16_t flags; + uint64_t correction; + uint8_t reserved1[5]; + uint8_t source_communication_technology; + uint32_t source_uuid_lo; + uint16_t source_uuid_hi; + uint16_t source_port_id; + uint16_t sequence_id; + uint8_t control; + uint8_t log_message_period; +} PTP2; + +static ssize_t +igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, + bool has_vnet, bool *external_tx); + +static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes); +static void igb_reset(IGBCore *core, bool sw); + +static inline void +igb_raise_legacy_irq(IGBCore *core) +{ + trace_e1000e_irq_legacy_notify(true); + e1000x_inc_reg_if_not_full(core->mac, IAC); + pci_set_irq(core->owner, 1); +} + +static inline void +igb_lower_legacy_irq(IGBCore *core) +{ + trace_e1000e_irq_legacy_notify(false); + pci_set_irq(core->owner, 0); +} + +static void igb_msix_notify(IGBCore *core, unsigned int cause) +{ + PCIDevice *dev = core->owner; + uint16_t vfn; + uint32_t effective_eiac; + unsigned int vector; + + vfn = 8 - (cause + 2) / IGBVF_MSIX_VEC_NUM; + if (vfn < pcie_sriov_num_vfs(core->owner)) { + dev = pcie_sriov_get_vf_at_index(core->owner, vfn); + assert(dev); + vector = (cause + 2) % IGBVF_MSIX_VEC_NUM; + } else if (cause >= IGB_MSIX_VEC_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, + "igb: Tried to use vector unavailable for PF"); + return; + } else { + vector = cause; + } + + msix_notify(dev, vector); + + trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]); + effective_eiac = core->mac[EIAC] & BIT(cause); + core->mac[EICR] &= ~effective_eiac; +} + +static inline void +igb_intrmgr_rearm_timer(IGBIntrDelayTimer *timer) +{ + int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] * + timer->delay_resolution_ns; + + trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns); + + timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns); + + timer->running = true; +} + +static void +igb_intmgr_timer_resume(IGBIntrDelayTimer *timer) +{ + if (timer->running) { + igb_intrmgr_rearm_timer(timer); + } +} + +static void +igb_intmgr_timer_pause(IGBIntrDelayTimer *timer) +{ + if (timer->running) { + timer_del(timer->timer); + } +} + +static void +igb_intrmgr_on_msix_throttling_timer(void *opaque) +{ + IGBIntrDelayTimer *timer = opaque; + int idx = timer - &timer->core->eitr[0]; + + timer->running = false; + + trace_e1000e_irq_msix_notify_postponed_vec(idx); + igb_msix_notify(timer->core, idx); +} + +static void +igb_intrmgr_initialize_all_timers(IGBCore *core, bool create) +{ + int i; + + for (i = 0; i < IGB_INTR_NUM; i++) { + core->eitr[i].core = core; + core->eitr[i].delay_reg = EITR0 + i; + core->eitr[i].delay_resolution_ns = E1000_INTR_DELAY_NS_RES; + } + + if (!create) { + return; + } + + for (i = 0; i < IGB_INTR_NUM; i++) { + core->eitr[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + igb_intrmgr_on_msix_throttling_timer, + &core->eitr[i]); + } +} + +static void +igb_intrmgr_resume(IGBCore *core) +{ + int i; + + for (i = 0; i < IGB_INTR_NUM; i++) { + igb_intmgr_timer_resume(&core->eitr[i]); + } +} + +static void +igb_intrmgr_pause(IGBCore *core) +{ + int i; + + for (i = 0; i < IGB_INTR_NUM; i++) { + igb_intmgr_timer_pause(&core->eitr[i]); + } +} + +static void +igb_intrmgr_reset(IGBCore *core) +{ + int i; + + for (i = 0; i < IGB_INTR_NUM; i++) { + if (core->eitr[i].running) { + timer_del(core->eitr[i].timer); + igb_intrmgr_on_msix_throttling_timer(&core->eitr[i]); + } + } +} + +static void +igb_intrmgr_pci_unint(IGBCore *core) +{ + int i; + + for (i = 0; i < IGB_INTR_NUM; i++) { + timer_free(core->eitr[i].timer); + } +} + +static void +igb_intrmgr_pci_realize(IGBCore *core) +{ + igb_intrmgr_initialize_all_timers(core, true); +} + +static inline bool +igb_rx_csum_enabled(IGBCore *core) +{ + return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true; +} + +static inline bool +igb_rx_use_legacy_descriptor(IGBCore *core) +{ + /* + * TODO: If SRRCTL[n],DESCTYPE = 000b, the 82576 uses the legacy Rx + * descriptor. + */ + return false; +} + +static inline bool +igb_rss_enabled(IGBCore *core) +{ + return (core->mac[MRQC] & 3) == E1000_MRQC_ENABLE_RSS_MQ && + !igb_rx_csum_enabled(core) && + !igb_rx_use_legacy_descriptor(core); +} + +typedef struct E1000E_RSSInfo_st { + bool enabled; + uint32_t hash; + uint32_t queue; + uint32_t type; +} E1000E_RSSInfo; + +static uint32_t +igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt) +{ + bool hasip4, hasip6; + EthL4HdrProto l4hdr_proto; + + assert(igb_rss_enabled(core)); + + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); + + if (hasip4) { + trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC], + E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]), + E1000_MRQC_EN_IPV4(core->mac[MRQC])); + + if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && + E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV4TCP; + } + + if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP && + (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV4_UDP)) { + return E1000_MRQ_RSS_TYPE_IPV4UDP; + } + + if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV4; + } + } else if (hasip6) { + eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt); + + bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS; + bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS; + + /* + * Following two traces must not be combined because resulting + * event will have 11 arguments totally and some trace backends + * (at least "ust") have limitation of maximum 10 arguments per + * event. Events with more arguments fail to compile for + * backends like these. + */ + trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]); + trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, l4hdr_proto, + ip6info->has_ext_hdrs, + ip6info->rss_ex_dst_valid, + ip6info->rss_ex_src_valid, + core->mac[MRQC], + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]), + E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), + E1000_MRQC_EN_IPV6(core->mac[MRQC])); + + if ((!ex_dis || !ip6info->has_ext_hdrs) && + (!new_ex_dis || !(ip6info->rss_ex_dst_valid || + ip6info->rss_ex_src_valid))) { + + if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6TCPEX; + } + + if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP && + (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV6_UDP)) { + return E1000_MRQ_RSS_TYPE_IPV6UDP; + } + + if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6EX; + } + + } + + if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6; + } + + } + + return E1000_MRQ_RSS_TYPE_NONE; +} + +static uint32_t +igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info) +{ + NetRxPktRssType type; + + assert(igb_rss_enabled(core)); + + switch (info->type) { + case E1000_MRQ_RSS_TYPE_IPV4: + type = NetPktRssIpV4; + break; + case E1000_MRQ_RSS_TYPE_IPV4TCP: + type = NetPktRssIpV4Tcp; + break; + case E1000_MRQ_RSS_TYPE_IPV6TCPEX: + type = NetPktRssIpV6TcpEx; + break; + case E1000_MRQ_RSS_TYPE_IPV6: + type = NetPktRssIpV6; + break; + case E1000_MRQ_RSS_TYPE_IPV6EX: + type = NetPktRssIpV6Ex; + break; + case E1000_MRQ_RSS_TYPE_IPV4UDP: + type = NetPktRssIpV4Udp; + break; + case E1000_MRQ_RSS_TYPE_IPV6UDP: + type = NetPktRssIpV6Udp; + break; + default: + assert(false); + return 0; + } + + return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]); +} + +static void +igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx, + E1000E_RSSInfo *info) +{ + trace_e1000e_rx_rss_started(); + + if (tx || !igb_rss_enabled(core)) { + info->enabled = false; + info->hash = 0; + info->queue = 0; + info->type = 0; + trace_e1000e_rx_rss_disabled(); + return; + } + + info->enabled = true; + + info->type = igb_rss_get_hash_type(core, pkt); + + trace_e1000e_rx_rss_type(info->type); + + if (info->type == E1000_MRQ_RSS_TYPE_NONE) { + info->hash = 0; + info->queue = 0; + return; + } + + info->hash = igb_rss_calc_hash(core, pkt, info); + info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash); +} + +static void +igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx, + uint16_t vlan, bool insert_vlan) +{ + if (core->mac[MRQC] & 1) { + uint16_t pool = qn % IGB_NUM_VM_POOLS; + + if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) { + /* always insert default VLAN */ + insert_vlan = true; + vlan = core->mac[VMVIR0 + pool] & 0xffff; + } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) { + insert_vlan = false; + } + } + + if (insert_vlan) { + net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan, + core->mac[VET] & 0xffff); + } +} + +static bool +igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx) +{ + uint32_t idx = (tx->first_olinfo_status >> 4) & 1; + + if (tx->first_cmd_type_len & E1000_ADVTXD_DCMD_TSE) { + uint32_t mss = tx->ctx[idx].mss_l4len_idx >> E1000_ADVTXD_MSS_SHIFT; + if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, mss)) { + return false; + } + + net_tx_pkt_update_ip_checksums(tx->tx_pkt); + e1000x_inc_reg_if_not_full(core->mac, TSCTC); + return true; + } + + if ((tx->first_olinfo_status & E1000_ADVTXD_POTS_TXSM) && + !((tx->ctx[idx].type_tucmd_mlhl & E1000_ADVTXD_TUCMD_L4T_SCTP) ? + net_tx_pkt_update_sctp_checksum(tx->tx_pkt) : + net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0))) { + return false; + } + + if (tx->first_olinfo_status & E1000_ADVTXD_POTS_IXSM) { + net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt); + } + + return true; +} + +static void igb_tx_pkt_mac_callback(void *core, + const struct iovec *iov, + int iovcnt, + const struct iovec *virt_iov, + int virt_iovcnt) +{ + igb_receive_internal(core, virt_iov, virt_iovcnt, true, NULL); +} + +static void igb_tx_pkt_vmdq_callback(void *opaque, + const struct iovec *iov, + int iovcnt, + const struct iovec *virt_iov, + int virt_iovcnt) +{ + IGBTxPktVmdqCallbackContext *context = opaque; + bool external_tx; + + igb_receive_internal(context->core, virt_iov, virt_iovcnt, true, + &external_tx); + + if (external_tx) { + if (context->core->has_vnet) { + qemu_sendv_packet(context->nc, virt_iov, virt_iovcnt); + } else { + qemu_sendv_packet(context->nc, iov, iovcnt); + } + } +} + +/* TX Packets Switching (7.10.3.6) */ +static bool igb_tx_pkt_switch(IGBCore *core, struct igb_tx *tx, + NetClientState *nc) +{ + IGBTxPktVmdqCallbackContext context; + + /* TX switching is only used to serve VM to VM traffic. */ + if (!(core->mac[MRQC] & 1)) { + goto send_out; + } + + /* TX switching requires DTXSWC.Loopback_en bit enabled. */ + if (!(core->mac[DTXSWC] & E1000_DTXSWC_VMDQ_LOOPBACK_EN)) { + goto send_out; + } + + context.core = core; + context.nc = nc; + + return net_tx_pkt_send_custom(tx->tx_pkt, false, + igb_tx_pkt_vmdq_callback, &context); + +send_out: + return net_tx_pkt_send(tx->tx_pkt, nc); +} + +static bool +igb_tx_pkt_send(IGBCore *core, struct igb_tx *tx, int queue_index) +{ + int target_queue = MIN(core->max_queue_num, queue_index); + NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue); + + if (!igb_setup_tx_offloads(core, tx)) { + return false; + } + + net_tx_pkt_dump(tx->tx_pkt); + + if ((core->phy[MII_BMCR] & MII_BMCR_LOOPBACK) || + ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) { + return net_tx_pkt_send_custom(tx->tx_pkt, false, + igb_tx_pkt_mac_callback, core); + } else { + return igb_tx_pkt_switch(core, tx, queue); + } +} + +static void +igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt, int qn) +{ + static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511, + PTC1023, PTC1522 }; + + size_t tot_len = net_tx_pkt_get_total_len(tx_pkt) + 4; + + e1000x_increase_size_stats(core->mac, PTCregs, tot_len); + e1000x_inc_reg_if_not_full(core->mac, TPT); + e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len); + + switch (net_tx_pkt_get_packet_type(tx_pkt)) { + case ETH_PKT_BCAST: + e1000x_inc_reg_if_not_full(core->mac, BPTC); + break; + case ETH_PKT_MCAST: + e1000x_inc_reg_if_not_full(core->mac, MPTC); + break; + case ETH_PKT_UCAST: + break; + default: + g_assert_not_reached(); + } + + e1000x_inc_reg_if_not_full(core->mac, GPTC); + e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); + + if (core->mac[MRQC] & 1) { + uint16_t pool = qn % IGB_NUM_VM_POOLS; + + core->mac[PVFGOTC0 + (pool * 64)] += tot_len; + core->mac[PVFGPTC0 + (pool * 64)]++; + } +} + +static void +igb_process_tx_desc(IGBCore *core, + PCIDevice *dev, + struct igb_tx *tx, + union e1000_adv_tx_desc *tx_desc, + int queue_index) +{ + struct e1000_adv_tx_context_desc *tx_ctx_desc; + uint32_t cmd_type_len; + uint32_t idx; + uint64_t buffer_addr; + uint16_t length; + + cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len); + + if (cmd_type_len & E1000_ADVTXD_DCMD_DEXT) { + if ((cmd_type_len & E1000_ADVTXD_DTYP_DATA) == + E1000_ADVTXD_DTYP_DATA) { + /* advanced transmit data descriptor */ + if (tx->first) { + tx->first_cmd_type_len = cmd_type_len; + tx->first_olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status); + tx->first = false; + } + } else if ((cmd_type_len & E1000_ADVTXD_DTYP_CTXT) == + E1000_ADVTXD_DTYP_CTXT) { + /* advanced transmit context descriptor */ + tx_ctx_desc = (struct e1000_adv_tx_context_desc *)tx_desc; + idx = (le32_to_cpu(tx_ctx_desc->mss_l4len_idx) >> 4) & 1; + tx->ctx[idx].vlan_macip_lens = le32_to_cpu(tx_ctx_desc->vlan_macip_lens); + tx->ctx[idx].seqnum_seed = le32_to_cpu(tx_ctx_desc->seqnum_seed); + tx->ctx[idx].type_tucmd_mlhl = le32_to_cpu(tx_ctx_desc->type_tucmd_mlhl); + tx->ctx[idx].mss_l4len_idx = le32_to_cpu(tx_ctx_desc->mss_l4len_idx); + return; + } else { + /* unknown descriptor type */ + return; + } + } else { + /* legacy descriptor */ + + /* TODO: Implement a support for legacy descriptors (7.2.2.1). */ + } + + buffer_addr = le64_to_cpu(tx_desc->read.buffer_addr); + length = cmd_type_len & 0xFFFF; + + if (!tx->skip_cp) { + if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, dev, + buffer_addr, length)) { + tx->skip_cp = true; + } + } + + if (cmd_type_len & E1000_TXD_CMD_EOP) { + if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) { + idx = (tx->first_olinfo_status >> 4) & 1; + igb_tx_insert_vlan(core, queue_index, tx, + tx->ctx[idx].vlan_macip_lens >> IGB_TX_FLAGS_VLAN_SHIFT, + !!(tx->first_cmd_type_len & E1000_TXD_CMD_VLE)); + + if ((tx->first_cmd_type_len & E1000_ADVTXD_MAC_TSTAMP) && + (core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_ENABLED) && + !(core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_VALID)) { + core->mac[TSYNCTXCTL] |= E1000_TSYNCTXCTL_VALID; + e1000x_timestamp(core->mac, core->timadj, TXSTMPL, TXSTMPH); + } + + if (igb_tx_pkt_send(core, tx, queue_index)) { + igb_on_tx_done_update_stats(core, tx->tx_pkt, queue_index); + } + } + + tx->first = true; + tx->skip_cp = false; + net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, dev); + } +} + +static uint32_t igb_tx_wb_eic(IGBCore *core, int queue_idx) +{ + uint32_t n, ent = 0; + + n = igb_ivar_entry_tx(queue_idx); + ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff; + + return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0; +} + +static uint32_t igb_rx_wb_eic(IGBCore *core, int queue_idx) +{ + uint32_t n, ent = 0; + + n = igb_ivar_entry_rx(queue_idx); + ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff; + + return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0; +} + +typedef struct E1000E_RingInfo_st { + int dbah; + int dbal; + int dlen; + int dh; + int dt; + int idx; +} E1000E_RingInfo; + +static inline bool +igb_ring_empty(IGBCore *core, const E1000E_RingInfo *r) +{ + return core->mac[r->dh] == core->mac[r->dt] || + core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN; +} + +static inline uint64_t +igb_ring_base(IGBCore *core, const E1000E_RingInfo *r) +{ + uint64_t bah = core->mac[r->dbah]; + uint64_t bal = core->mac[r->dbal]; + + return (bah << 32) + bal; +} + +static inline uint64_t +igb_ring_head_descr(IGBCore *core, const E1000E_RingInfo *r) +{ + return igb_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh]; +} + +static inline void +igb_ring_advance(IGBCore *core, const E1000E_RingInfo *r, uint32_t count) +{ + core->mac[r->dh] += count; + + if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) { + core->mac[r->dh] = 0; + } +} + +static inline uint32_t +igb_ring_free_descr_num(IGBCore *core, const E1000E_RingInfo *r) +{ + trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen], + core->mac[r->dh], core->mac[r->dt]); + + if (core->mac[r->dh] <= core->mac[r->dt]) { + return core->mac[r->dt] - core->mac[r->dh]; + } + + if (core->mac[r->dh] > core->mac[r->dt]) { + return core->mac[r->dlen] / E1000_RING_DESC_LEN + + core->mac[r->dt] - core->mac[r->dh]; + } + + g_assert_not_reached(); + return 0; +} + +static inline bool +igb_ring_enabled(IGBCore *core, const E1000E_RingInfo *r) +{ + return core->mac[r->dlen] > 0; +} + +typedef struct IGB_TxRing_st { + const E1000E_RingInfo *i; + struct igb_tx *tx; +} IGB_TxRing; + +static inline int +igb_mq_queue_idx(int base_reg_idx, int reg_idx) +{ + return (reg_idx - base_reg_idx) / 16; +} + +static inline void +igb_tx_ring_init(IGBCore *core, IGB_TxRing *txr, int idx) +{ + static const E1000E_RingInfo i[IGB_NUM_QUEUES] = { + { TDBAH0, TDBAL0, TDLEN0, TDH0, TDT0, 0 }, + { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 }, + { TDBAH2, TDBAL2, TDLEN2, TDH2, TDT2, 2 }, + { TDBAH3, TDBAL3, TDLEN3, TDH3, TDT3, 3 }, + { TDBAH4, TDBAL4, TDLEN4, TDH4, TDT4, 4 }, + { TDBAH5, TDBAL5, TDLEN5, TDH5, TDT5, 5 }, + { TDBAH6, TDBAL6, TDLEN6, TDH6, TDT6, 6 }, + { TDBAH7, TDBAL7, TDLEN7, TDH7, TDT7, 7 }, + { TDBAH8, TDBAL8, TDLEN8, TDH8, TDT8, 8 }, + { TDBAH9, TDBAL9, TDLEN9, TDH9, TDT9, 9 }, + { TDBAH10, TDBAL10, TDLEN10, TDH10, TDT10, 10 }, + { TDBAH11, TDBAL11, TDLEN11, TDH11, TDT11, 11 }, + { TDBAH12, TDBAL12, TDLEN12, TDH12, TDT12, 12 }, + { TDBAH13, TDBAL13, TDLEN13, TDH13, TDT13, 13 }, + { TDBAH14, TDBAL14, TDLEN14, TDH14, TDT14, 14 }, + { TDBAH15, TDBAL15, TDLEN15, TDH15, TDT15, 15 } + }; + + assert(idx < ARRAY_SIZE(i)); + + txr->i = &i[idx]; + txr->tx = &core->tx[idx]; +} + +typedef struct E1000E_RxRing_st { + const E1000E_RingInfo *i; +} E1000E_RxRing; + +static inline void +igb_rx_ring_init(IGBCore *core, E1000E_RxRing *rxr, int idx) +{ + static const E1000E_RingInfo i[IGB_NUM_QUEUES] = { + { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 }, + { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 }, + { RDBAH2, RDBAL2, RDLEN2, RDH2, RDT2, 2 }, + { RDBAH3, RDBAL3, RDLEN3, RDH3, RDT3, 3 }, + { RDBAH4, RDBAL4, RDLEN4, RDH4, RDT4, 4 }, + { RDBAH5, RDBAL5, RDLEN5, RDH5, RDT5, 5 }, + { RDBAH6, RDBAL6, RDLEN6, RDH6, RDT6, 6 }, + { RDBAH7, RDBAL7, RDLEN7, RDH7, RDT7, 7 }, + { RDBAH8, RDBAL8, RDLEN8, RDH8, RDT8, 8 }, + { RDBAH9, RDBAL9, RDLEN9, RDH9, RDT9, 9 }, + { RDBAH10, RDBAL10, RDLEN10, RDH10, RDT10, 10 }, + { RDBAH11, RDBAL11, RDLEN11, RDH11, RDT11, 11 }, + { RDBAH12, RDBAL12, RDLEN12, RDH12, RDT12, 12 }, + { RDBAH13, RDBAL13, RDLEN13, RDH13, RDT13, 13 }, + { RDBAH14, RDBAL14, RDLEN14, RDH14, RDT14, 14 }, + { RDBAH15, RDBAL15, RDLEN15, RDH15, RDT15, 15 } + }; + + assert(idx < ARRAY_SIZE(i)); + + rxr->i = &i[idx]; +} + +static uint32_t +igb_txdesc_writeback(IGBCore *core, dma_addr_t base, + union e1000_adv_tx_desc *tx_desc, + const E1000E_RingInfo *txi) +{ + PCIDevice *d; + uint32_t cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len); + uint64_t tdwba; + + tdwba = core->mac[E1000_TDWBAL(txi->idx) >> 2]; + tdwba |= (uint64_t)core->mac[E1000_TDWBAH(txi->idx) >> 2] << 32; + + if (!(cmd_type_len & E1000_TXD_CMD_RS)) { + return 0; + } + + d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8); + if (!d) { + d = core->owner; + } + + if (tdwba & 1) { + uint32_t buffer = cpu_to_le32(core->mac[txi->dh]); + pci_dma_write(d, tdwba & ~3, &buffer, sizeof(buffer)); + } else { + uint32_t status = le32_to_cpu(tx_desc->wb.status) | E1000_TXD_STAT_DD; + + tx_desc->wb.status = cpu_to_le32(status); + pci_dma_write(d, base + offsetof(union e1000_adv_tx_desc, wb), + &tx_desc->wb, sizeof(tx_desc->wb)); + } + + return igb_tx_wb_eic(core, txi->idx); +} + +static inline bool +igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi) +{ + bool vmdq = core->mac[MRQC] & 1; + uint16_t qn = txi->idx; + uint16_t pool = qn % IGB_NUM_VM_POOLS; + + return (core->mac[TCTL] & E1000_TCTL_EN) && + (!vmdq || core->mac[VFTE] & BIT(pool)) && + (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE); +} + +static void +igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) +{ + PCIDevice *d; + dma_addr_t base; + union e1000_adv_tx_desc desc; + const E1000E_RingInfo *txi = txr->i; + uint32_t eic = 0; + + if (!igb_tx_enabled(core, txi)) { + trace_e1000e_tx_disabled(); + return; + } + + d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8); + if (!d) { + d = core->owner; + } + + while (!igb_ring_empty(core, txi)) { + base = igb_ring_head_descr(core, txi); + + pci_dma_read(d, base, &desc, sizeof(desc)); + + trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr, + desc.read.cmd_type_len, desc.wb.status); + + igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx); + igb_ring_advance(core, txi, 1); + eic |= igb_txdesc_writeback(core, base, &desc, txi); + } + + if (eic) { + igb_raise_interrupts(core, EICR, eic); + igb_raise_interrupts(core, ICR, E1000_ICR_TXDW); + } + + net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, d); +} + +static uint32_t +igb_rxbufsize(IGBCore *core, const E1000E_RingInfo *r) +{ + uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2]; + uint32_t bsizepkt = srrctl & E1000_SRRCTL_BSIZEPKT_MASK; + if (bsizepkt) { + return bsizepkt << E1000_SRRCTL_BSIZEPKT_SHIFT; + } + + return e1000x_rxbufsize(core->mac[RCTL]); +} + +static bool +igb_has_rxbufs(IGBCore *core, const E1000E_RingInfo *r, size_t total_size) +{ + uint32_t bufs = igb_ring_free_descr_num(core, r); + uint32_t bufsize = igb_rxbufsize(core, r); + + trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, bufsize); + + return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) * + bufsize; +} + +void +igb_start_recv(IGBCore *core) +{ + int i; + + trace_e1000e_rx_start_recv(); + + for (i = 0; i <= core->max_queue_num; i++) { + qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i)); + } +} + +bool +igb_can_receive(IGBCore *core) +{ + int i; + + if (!e1000x_rx_ready(core->owner, core->mac)) { + return false; + } + + for (i = 0; i < IGB_NUM_QUEUES; i++) { + E1000E_RxRing rxr; + if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) { + continue; + } + + igb_rx_ring_init(core, &rxr, i); + if (igb_ring_enabled(core, rxr.i) && igb_has_rxbufs(core, rxr.i, 1)) { + trace_e1000e_rx_can_recv(); + return true; + } + } + + trace_e1000e_rx_can_recv_rings_full(); + return false; +} + +ssize_t +igb_receive(IGBCore *core, const uint8_t *buf, size_t size) +{ + const struct iovec iov = { + .iov_base = (uint8_t *)buf, + .iov_len = size + }; + + return igb_receive_iov(core, &iov, 1); +} + +static inline bool +igb_rx_l3_cso_enabled(IGBCore *core) +{ + return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD); +} + +static inline bool +igb_rx_l4_cso_enabled(IGBCore *core) +{ + return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD); +} + +static bool igb_rx_is_oversized(IGBCore *core, const struct eth_header *ehdr, + size_t size, size_t vlan_num, + bool lpe, uint16_t rlpml) +{ + size_t vlan_header_size = sizeof(struct vlan_header) * vlan_num; + size_t header_size = sizeof(struct eth_header) + vlan_header_size; + return lpe ? size + ETH_FCS_LEN > rlpml : size > header_size + ETH_MTU; +} + +static uint16_t igb_receive_assign(IGBCore *core, const struct iovec *iov, + size_t iovcnt, size_t iov_ofs, + const L2Header *l2_header, size_t size, + E1000E_RSSInfo *rss_info, + uint16_t *etqf, bool *ts, bool *external_tx) +{ + static const int ta_shift[] = { 4, 3, 2, 0 }; + const struct eth_header *ehdr = &l2_header->eth; + uint32_t f, ra[2], *macp, rctl = core->mac[RCTL]; + uint16_t queues = 0; + uint16_t oversized = 0; + size_t vlan_num = 0; + PTP2 ptp2; + bool lpe; + uint16_t rlpml; + int i; + + memset(rss_info, 0, sizeof(E1000E_RSSInfo)); + *ts = false; + + if (external_tx) { + *external_tx = true; + } + + if (core->mac[CTRL_EXT] & BIT(26)) { + if (be16_to_cpu(ehdr->h_proto) == core->mac[VET] >> 16 && + be16_to_cpu(l2_header->vlan[0].h_proto) == (core->mac[VET] & 0xffff)) { + vlan_num = 2; + } + } else { + if (be16_to_cpu(ehdr->h_proto) == (core->mac[VET] & 0xffff)) { + vlan_num = 1; + } + } + + lpe = !!(core->mac[RCTL] & E1000_RCTL_LPE); + rlpml = core->mac[RLPML]; + if (!(core->mac[RCTL] & E1000_RCTL_SBP) && + igb_rx_is_oversized(core, ehdr, size, vlan_num, lpe, rlpml)) { + trace_e1000x_rx_oversized(size); + return queues; + } + + for (*etqf = 0; *etqf < 8; (*etqf)++) { + if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_FILTER_ENABLE) && + be16_to_cpu(ehdr->h_proto) == (core->mac[ETQF0 + *etqf] & E1000_ETQF_ETYPE_MASK)) { + if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_1588) && + (core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_ENABLED) && + !(core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_VALID) && + iov_to_buf(iov, iovcnt, iov_ofs + ETH_HLEN, &ptp2, sizeof(ptp2)) >= sizeof(ptp2) && + (ptp2.version_ptp & 15) == 2 && + ptp2.message_id_transport_specific == ((core->mac[TSYNCRXCFG] >> 8) & 255)) { + e1000x_timestamp(core->mac, core->timadj, RXSTMPL, RXSTMPH); + *ts = true; + core->mac[TSYNCRXCTL] |= E1000_TSYNCRXCTL_VALID; + core->mac[RXSATRL] = le32_to_cpu(ptp2.source_uuid_lo); + core->mac[RXSATRH] = le16_to_cpu(ptp2.source_uuid_hi) | + (le16_to_cpu(ptp2.sequence_id) << 16); + } + break; + } + } + + if (vlan_num && + !e1000x_rx_vlan_filter(core->mac, l2_header->vlan + vlan_num - 1)) { + return queues; + } + + if (core->mac[MRQC] & 1) { + if (is_broadcast_ether_addr(ehdr->h_dest)) { + for (i = 0; i < IGB_NUM_VM_POOLS; i++) { + if (core->mac[VMOLR0 + i] & E1000_VMOLR_BAM) { + queues |= BIT(i); + } + } + } else { + for (macp = core->mac + RA; macp < core->mac + RA + 32; macp += 2) { + if (!(macp[1] & E1000_RAH_AV)) { + continue; + } + ra[0] = cpu_to_le32(macp[0]); + ra[1] = cpu_to_le32(macp[1]); + if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) { + queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1; + } + } + + for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) { + if (!(macp[1] & E1000_RAH_AV)) { + continue; + } + ra[0] = cpu_to_le32(macp[0]); + ra[1] = cpu_to_le32(macp[1]); + if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) { + queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1; + } + } + + if (!queues) { + macp = core->mac + (is_multicast_ether_addr(ehdr->h_dest) ? MTA : UTA); + + f = ta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3]; + f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff; + if (macp[f >> 5] & (1 << (f & 0x1f))) { + for (i = 0; i < IGB_NUM_VM_POOLS; i++) { + if (core->mac[VMOLR0 + i] & E1000_VMOLR_ROMPE) { + queues |= BIT(i); + } + } + } + } else if (is_unicast_ether_addr(ehdr->h_dest) && external_tx) { + *external_tx = false; + } + } + + if (e1000x_vlan_rx_filter_enabled(core->mac)) { + uint16_t mask = 0; + + if (vlan_num) { + uint16_t vid = be16_to_cpu(l2_header->vlan[vlan_num - 1].h_tci) & VLAN_VID_MASK; + + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { + if ((core->mac[VLVF0 + i] & E1000_VLVF_VLANID_MASK) == vid && + (core->mac[VLVF0 + i] & E1000_VLVF_VLANID_ENABLE)) { + uint32_t poolsel = core->mac[VLVF0 + i] & E1000_VLVF_POOLSEL_MASK; + mask |= poolsel >> E1000_VLVF_POOLSEL_SHIFT; + } + } + } else { + for (i = 0; i < IGB_NUM_VM_POOLS; i++) { + if (core->mac[VMOLR0 + i] & E1000_VMOLR_AUPE) { + mask |= BIT(i); + } + } + } + + queues &= mask; + } + + if (is_unicast_ether_addr(ehdr->h_dest) && !queues && !external_tx && + !(core->mac[VT_CTL] & E1000_VT_CTL_DISABLE_DEF_POOL)) { + uint32_t def_pl = core->mac[VT_CTL] & E1000_VT_CTL_DEFAULT_POOL_MASK; + queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT); + } + + queues &= core->mac[VFRE]; + if (queues) { + for (i = 0; i < IGB_NUM_VM_POOLS; i++) { + lpe = !!(core->mac[VMOLR0 + i] & E1000_VMOLR_LPE); + rlpml = core->mac[VMOLR0 + i] & E1000_VMOLR_RLPML_MASK; + if ((queues & BIT(i)) && + igb_rx_is_oversized(core, ehdr, size, vlan_num, + lpe, rlpml)) { + oversized |= BIT(i); + } + } + /* 8.19.37 increment ROC if packet is oversized for all queues */ + if (oversized == queues) { + trace_e1000x_rx_oversized(size); + e1000x_inc_reg_if_not_full(core->mac, ROC); + } + queues &= ~oversized; + } + + if (queues) { + igb_rss_parse_packet(core, core->rx_pkt, + external_tx != NULL, rss_info); + /* Sec 8.26.1: PQn = VFn + VQn*8 */ + if (rss_info->queue & 1) { + for (i = 0; i < IGB_NUM_VM_POOLS; i++) { + if ((queues & BIT(i)) && + (core->mac[VMOLR0 + i] & E1000_VMOLR_RSSE)) { + queues |= BIT(i + IGB_NUM_VM_POOLS); + queues &= ~BIT(i); + } + } + } + } + } else { + bool accepted = e1000x_rx_group_filter(core->mac, ehdr); + if (!accepted) { + for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) { + if (!(macp[1] & E1000_RAH_AV)) { + continue; + } + ra[0] = cpu_to_le32(macp[0]); + ra[1] = cpu_to_le32(macp[1]); + if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) { + trace_e1000x_rx_flt_ucast_match((int)(macp - core->mac - RA2) / 2, + MAC_ARG(ehdr->h_dest)); + + accepted = true; + break; + } + } + } + + if (accepted) { + igb_rss_parse_packet(core, core->rx_pkt, false, rss_info); + queues = BIT(rss_info->queue); + } + } + + return queues; +} + +static inline void +igb_read_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, + hwaddr *buff_addr) +{ + *buff_addr = le64_to_cpu(desc->buffer_addr); +} + +static inline void +igb_read_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, + hwaddr *buff_addr) +{ + *buff_addr = le64_to_cpu(desc->read.pkt_addr); +} + +static inline void +igb_read_rx_descr(IGBCore *core, union e1000_rx_desc_union *desc, + hwaddr *buff_addr) +{ + if (igb_rx_use_legacy_descriptor(core)) { + igb_read_lgcy_rx_descr(core, &desc->legacy, buff_addr); + } else { + igb_read_adv_rx_descr(core, &desc->adv, buff_addr); + } +} + +static void +igb_verify_csum_in_sw(IGBCore *core, + struct NetRxPkt *pkt, + uint32_t *status_flags, + EthL4HdrProto l4hdr_proto) +{ + bool csum_valid; + uint32_t csum_error; + + if (igb_rx_l3_cso_enabled(core)) { + if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) { + trace_e1000e_rx_metadata_l3_csum_validation_failed(); + } else { + csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE; + *status_flags |= E1000_RXD_STAT_IPCS | csum_error; + } + } else { + trace_e1000e_rx_metadata_l3_cso_disabled(); + } + + if (!igb_rx_l4_cso_enabled(core)) { + trace_e1000e_rx_metadata_l4_cso_disabled(); + return; + } + + if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { + trace_e1000e_rx_metadata_l4_csum_validation_failed(); + return; + } + + csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE; + *status_flags |= E1000_RXD_STAT_TCPCS | csum_error; + + if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { + *status_flags |= E1000_RXD_STAT_UDPCS; + } +} + +static void +igb_build_rx_metadata(IGBCore *core, + struct NetRxPkt *pkt, + bool is_eop, + const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, + uint16_t *pkt_info, uint16_t *hdr_info, + uint32_t *rss, + uint32_t *status_flags, + uint16_t *ip_id, + uint16_t *vlan_tag) +{ + struct virtio_net_hdr *vhdr; + bool hasip4, hasip6, csum_valid; + EthL4HdrProto l4hdr_proto; + + *status_flags = E1000_RXD_STAT_DD; + + /* No additional metadata needed for non-EOP descriptors */ + /* TODO: EOP apply only to status so don't skip whole function. */ + if (!is_eop) { + goto func_exit; + } + + *status_flags |= E1000_RXD_STAT_EOP; + + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); + trace_e1000e_rx_metadata_protocols(hasip4, hasip6, l4hdr_proto); + + /* VLAN state */ + if (net_rx_pkt_is_vlan_stripped(pkt)) { + *status_flags |= E1000_RXD_STAT_VP; + *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt)); + trace_e1000e_rx_metadata_vlan(*vlan_tag); + } + + /* Packet parsing results */ + if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) { + if (rss_info->enabled) { + *rss = cpu_to_le32(rss_info->hash); + trace_igb_rx_metadata_rss(*rss); + } + } else if (hasip4) { + *status_flags |= E1000_RXD_STAT_IPIDV; + *ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt)); + trace_e1000e_rx_metadata_ip_id(*ip_id); + } + + if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && net_rx_pkt_is_tcp_ack(pkt)) { + *status_flags |= E1000_RXD_STAT_ACK; + trace_e1000e_rx_metadata_ack(); + } + + if (pkt_info) { + *pkt_info = rss_info->enabled ? rss_info->type : 0; + + if (etqf < 8) { + *pkt_info |= (BIT(11) | etqf) << 4; + } else { + if (hasip4) { + *pkt_info |= E1000_ADVRXD_PKT_IP4; + } + + if (hasip6) { + *pkt_info |= E1000_ADVRXD_PKT_IP6; + } + + switch (l4hdr_proto) { + case ETH_L4_HDR_PROTO_TCP: + *pkt_info |= E1000_ADVRXD_PKT_TCP; + break; + + case ETH_L4_HDR_PROTO_UDP: + *pkt_info |= E1000_ADVRXD_PKT_UDP; + break; + + case ETH_L4_HDR_PROTO_SCTP: + *pkt_info |= E1000_ADVRXD_PKT_SCTP; + break; + + default: + break; + } + } + } + + if (hdr_info) { + *hdr_info = 0; + } + + if (ts) { + *status_flags |= BIT(16); + } + + /* RX CSO information */ + if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { + trace_e1000e_rx_metadata_ipv6_sum_disabled(); + goto func_exit; + } + + vhdr = net_rx_pkt_get_vhdr(pkt); + + if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) && + !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) { + trace_e1000e_rx_metadata_virthdr_no_csum_info(); + igb_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto); + goto func_exit; + } + + if (igb_rx_l3_cso_enabled(core)) { + *status_flags |= hasip4 ? E1000_RXD_STAT_IPCS : 0; + } else { + trace_e1000e_rx_metadata_l3_cso_disabled(); + } + + if (igb_rx_l4_cso_enabled(core)) { + switch (l4hdr_proto) { + case ETH_L4_HDR_PROTO_SCTP: + if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { + trace_e1000e_rx_metadata_l4_csum_validation_failed(); + goto func_exit; + } + if (!csum_valid) { + *status_flags |= E1000_RXDEXT_STATERR_TCPE; + } + /* fall through */ + case ETH_L4_HDR_PROTO_TCP: + *status_flags |= E1000_RXD_STAT_TCPCS; + break; + + case ETH_L4_HDR_PROTO_UDP: + *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS; + break; + + default: + break; + } + } else { + trace_e1000e_rx_metadata_l4_cso_disabled(); + } + +func_exit: + trace_e1000e_rx_metadata_status_flags(*status_flags); + *status_flags = cpu_to_le32(*status_flags); +} + +static inline void +igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, + struct NetRxPkt *pkt, + const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, + uint16_t length) +{ + uint32_t status_flags, rss; + uint16_t ip_id; + + assert(!rss_info->enabled); + desc->length = cpu_to_le16(length); + desc->csum = 0; + + igb_build_rx_metadata(core, pkt, pkt != NULL, + rss_info, etqf, ts, + NULL, NULL, &rss, + &status_flags, &ip_id, + &desc->special); + desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24); + desc->status = (uint8_t) le32_to_cpu(status_flags); +} + +static inline void +igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, + struct NetRxPkt *pkt, + const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, + uint16_t length) +{ + memset(&desc->wb, 0, sizeof(desc->wb)); + + desc->wb.upper.length = cpu_to_le16(length); + + igb_build_rx_metadata(core, pkt, pkt != NULL, + rss_info, etqf, ts, + &desc->wb.lower.lo_dword.pkt_info, + &desc->wb.lower.lo_dword.hdr_info, + &desc->wb.lower.hi_dword.rss, + &desc->wb.upper.status_error, + &desc->wb.lower.hi_dword.csum_ip.ip_id, + &desc->wb.upper.vlan); +} + +static inline void +igb_write_rx_descr(IGBCore *core, union e1000_rx_desc_union *desc, + struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, + uint16_t etqf, bool ts, uint16_t length) +{ + if (igb_rx_use_legacy_descriptor(core)) { + igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, + etqf, ts, length); + } else { + igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info, + etqf, ts, length); + } +} + +static inline void +igb_pci_dma_write_rx_desc(IGBCore *core, PCIDevice *dev, dma_addr_t addr, + union e1000_rx_desc_union *desc, dma_addr_t len) +{ + if (igb_rx_use_legacy_descriptor(core)) { + struct e1000_rx_desc *d = &desc->legacy; + size_t offset = offsetof(struct e1000_rx_desc, status); + uint8_t status = d->status; + + d->status &= ~E1000_RXD_STAT_DD; + pci_dma_write(dev, addr, desc, len); + + if (status & E1000_RXD_STAT_DD) { + d->status = status; + pci_dma_write(dev, addr + offset, &status, sizeof(status)); + } + } else { + union e1000_adv_rx_desc *d = &desc->adv; + size_t offset = + offsetof(union e1000_adv_rx_desc, wb.upper.status_error); + uint32_t status = d->wb.upper.status_error; + + d->wb.upper.status_error &= ~E1000_RXD_STAT_DD; + pci_dma_write(dev, addr, desc, len); + + if (status & E1000_RXD_STAT_DD) { + d->wb.upper.status_error = status; + pci_dma_write(dev, addr + offset, &status, sizeof(status)); + } + } +} + +static void +igb_write_to_rx_buffers(IGBCore *core, + PCIDevice *d, + hwaddr ba, + uint16_t *written, + const char *data, + dma_addr_t data_len) +{ + trace_igb_rx_desc_buff_write(ba, *written, data, data_len); + pci_dma_write(d, ba + *written, data, data_len); + *written += data_len; +} + +static void +igb_update_rx_stats(IGBCore *core, const E1000E_RingInfo *rxi, + size_t pkt_size, size_t pkt_fcs_size) +{ + eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt); + e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size); + + if (core->mac[MRQC] & 1) { + uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS; + + core->mac[PVFGORC0 + (pool * 64)] += pkt_size + 4; + core->mac[PVFGPRC0 + (pool * 64)]++; + if (pkt_type == ETH_PKT_MCAST) { + core->mac[PVFMPRC0 + (pool * 64)]++; + } + } +} + +static inline bool +igb_rx_descr_threshold_hit(IGBCore *core, const E1000E_RingInfo *rxi) +{ + return igb_ring_free_descr_num(core, rxi) == + ((core->mac[E1000_SRRCTL(rxi->idx) >> 2] >> 20) & 31) * 16; +} + +static void +igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt, + const E1000E_RxRing *rxr, + const E1000E_RSSInfo *rss_info, + uint16_t etqf, bool ts) +{ + PCIDevice *d; + dma_addr_t base; + union e1000_rx_desc_union desc; + size_t desc_size; + size_t desc_offset = 0; + size_t iov_ofs = 0; + + struct iovec *iov = net_rx_pkt_get_iovec(pkt); + size_t size = net_rx_pkt_get_total_len(pkt); + size_t total_size = size + e1000x_fcs_len(core->mac); + const E1000E_RingInfo *rxi = rxr->i; + size_t bufsize = igb_rxbufsize(core, rxi); + + d = pcie_sriov_get_vf_at_index(core->owner, rxi->idx % 8); + if (!d) { + d = core->owner; + } + + do { + hwaddr ba; + uint16_t written = 0; + bool is_last = false; + + desc_size = total_size - desc_offset; + + if (desc_size > bufsize) { + desc_size = bufsize; + } + + if (igb_ring_empty(core, rxi)) { + return; + } + + base = igb_ring_head_descr(core, rxi); + + pci_dma_read(d, base, &desc, core->rx_desc_len); + + trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len); + + igb_read_rx_descr(core, &desc, &ba); + + if (ba) { + if (desc_offset < size) { + static const uint32_t fcs_pad; + size_t iov_copy; + size_t copy_size = size - desc_offset; + if (copy_size > bufsize) { + copy_size = bufsize; + } + + /* Copy packet payload */ + while (copy_size) { + iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); + + igb_write_to_rx_buffers(core, d, ba, &written, + iov->iov_base + iov_ofs, iov_copy); + + copy_size -= iov_copy; + iov_ofs += iov_copy; + if (iov_ofs == iov->iov_len) { + iov++; + iov_ofs = 0; + } + } + + if (desc_offset + desc_size >= total_size) { + /* Simulate FCS checksum presence in the last descriptor */ + igb_write_to_rx_buffers(core, d, ba, &written, + (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); + } + } + } else { /* as per intel docs; skip descriptors with null buf addr */ + trace_e1000e_rx_null_descriptor(); + } + desc_offset += desc_size; + if (desc_offset >= total_size) { + is_last = true; + } + + igb_write_rx_descr(core, &desc, is_last ? core->rx_pkt : NULL, + rss_info, etqf, ts, written); + igb_pci_dma_write_rx_desc(core, d, base, &desc, core->rx_desc_len); + + igb_ring_advance(core, rxi, core->rx_desc_len / E1000_MIN_RX_DESC_LEN); + + } while (desc_offset < total_size); + + igb_update_rx_stats(core, rxi, size, total_size); +} + +static bool +igb_rx_strip_vlan(IGBCore *core, const E1000E_RingInfo *rxi) +{ + if (core->mac[MRQC] & 1) { + uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS; + /* Sec 7.10.3.8: CTRL.VME is ignored, only VMOLR/RPLOLR is used */ + return (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) ? + core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN : + core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN; + } + + return e1000x_vlan_enabled(core->mac); +} + +static inline void +igb_rx_fix_l4_csum(IGBCore *core, struct NetRxPkt *pkt) +{ + struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt); + + if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + net_rx_pkt_fix_l4_csum(pkt); + } +} + +ssize_t +igb_receive_iov(IGBCore *core, const struct iovec *iov, int iovcnt) +{ + return igb_receive_internal(core, iov, iovcnt, core->has_vnet, NULL); +} + +static ssize_t +igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, + bool has_vnet, bool *external_tx) +{ + uint16_t queues = 0; + uint32_t causes = 0; + uint32_t ecauses = 0; + union { + L2Header l2_header; + uint8_t octets[ETH_ZLEN]; + } buf; + struct iovec min_iov; + size_t size, orig_size; + size_t iov_ofs = 0; + E1000E_RxRing rxr; + E1000E_RSSInfo rss_info; + uint16_t etqf; + bool ts; + size_t total_size; + int strip_vlan_index; + int i; + + trace_e1000e_rx_receive_iov(iovcnt); + + if (external_tx) { + *external_tx = true; + } + + if (!e1000x_hw_rx_enabled(core->mac)) { + return -1; + } + + /* Pull virtio header in */ + if (has_vnet) { + net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt); + iov_ofs = sizeof(struct virtio_net_hdr); + } else { + net_rx_pkt_unset_vhdr(core->rx_pkt); + } + + orig_size = iov_size(iov, iovcnt); + size = orig_size - iov_ofs; + + /* Pad to minimum Ethernet frame length */ + if (size < sizeof(buf)) { + iov_to_buf(iov, iovcnt, iov_ofs, &buf, size); + memset(&buf.octets[size], 0, sizeof(buf) - size); + e1000x_inc_reg_if_not_full(core->mac, RUC); + min_iov.iov_base = &buf; + min_iov.iov_len = size = sizeof(buf); + iovcnt = 1; + iov = &min_iov; + iov_ofs = 0; + } else { + iov_to_buf(iov, iovcnt, iov_ofs, &buf, sizeof(buf.l2_header)); + } + + net_rx_pkt_set_packet_type(core->rx_pkt, + get_eth_packet_type(&buf.l2_header.eth)); + net_rx_pkt_set_protocols(core->rx_pkt, iov, iovcnt, iov_ofs); + + queues = igb_receive_assign(core, iov, iovcnt, iov_ofs, + &buf.l2_header, size, + &rss_info, &etqf, &ts, external_tx); + if (!queues) { + trace_e1000e_rx_flt_dropped(); + return orig_size; + } + + for (i = 0; i < IGB_NUM_QUEUES; i++) { + if (!(queues & BIT(i)) || + !(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) { + continue; + } + + igb_rx_ring_init(core, &rxr, i); + + if (!igb_rx_strip_vlan(core, rxr.i)) { + strip_vlan_index = -1; + } else if (core->mac[CTRL_EXT] & BIT(26)) { + strip_vlan_index = 1; + } else { + strip_vlan_index = 0; + } + + net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, + strip_vlan_index, + core->mac[VET] & 0xffff, + core->mac[VET] >> 16); + + total_size = net_rx_pkt_get_total_len(core->rx_pkt) + + e1000x_fcs_len(core->mac); + + if (!igb_has_rxbufs(core, rxr.i, total_size)) { + causes |= E1000_ICS_RXO; + trace_e1000e_rx_not_written_to_guest(rxr.i->idx); + continue; + } + + causes |= E1000_ICR_RXDW; + + igb_rx_fix_l4_csum(core, core->rx_pkt); + igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info, etqf, ts); + + /* Check if receive descriptor minimum threshold hit */ + if (igb_rx_descr_threshold_hit(core, rxr.i)) { + causes |= E1000_ICS_RXDMT0; + } + + ecauses |= igb_rx_wb_eic(core, rxr.i->idx); + + trace_e1000e_rx_written_to_guest(rxr.i->idx); + } + + trace_e1000e_rx_interrupt_set(causes); + igb_raise_interrupts(core, EICR, ecauses); + igb_raise_interrupts(core, ICR, causes); + + return orig_size; +} + +static inline bool +igb_have_autoneg(IGBCore *core) +{ + return core->phy[MII_BMCR] & MII_BMCR_AUTOEN; +} + +static void igb_update_flowctl_status(IGBCore *core) +{ + if (igb_have_autoneg(core) && core->phy[MII_BMSR] & MII_BMSR_AN_COMP) { + trace_e1000e_link_autoneg_flowctl(true); + core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE; + } else { + trace_e1000e_link_autoneg_flowctl(false); + } +} + +static inline void +igb_link_down(IGBCore *core) +{ + e1000x_update_regs_on_link_down(core->mac, core->phy); + igb_update_flowctl_status(core); +} + +static inline void +igb_set_phy_ctrl(IGBCore *core, uint16_t val) +{ + /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */ + core->phy[MII_BMCR] = val & ~(0x3f | MII_BMCR_RESET | MII_BMCR_ANRESTART); + + if ((val & MII_BMCR_ANRESTART) && igb_have_autoneg(core)) { + e1000x_restart_autoneg(core->mac, core->phy, core->autoneg_timer); + } +} + +void igb_core_set_link_status(IGBCore *core) +{ + NetClientState *nc = qemu_get_queue(core->owner_nic); + uint32_t old_status = core->mac[STATUS]; + + trace_e1000e_link_status_changed(nc->link_down ? false : true); + + if (nc->link_down) { + e1000x_update_regs_on_link_down(core->mac, core->phy); + } else { + if (igb_have_autoneg(core) && + !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) { + e1000x_restart_autoneg(core->mac, core->phy, + core->autoneg_timer); + } else { + e1000x_update_regs_on_link_up(core->mac, core->phy); + igb_start_recv(core); + } + } + + if (core->mac[STATUS] != old_status) { + igb_raise_interrupts(core, ICR, E1000_ICR_LSC); + } +} + +static void +igb_set_ctrl(IGBCore *core, int index, uint32_t val) +{ + trace_e1000e_core_ctrl_write(index, val); + + /* RST is self clearing */ + core->mac[CTRL] = val & ~E1000_CTRL_RST; + core->mac[CTRL_DUP] = core->mac[CTRL]; + + trace_e1000e_link_set_params( + !!(val & E1000_CTRL_ASDE), + (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, + !!(val & E1000_CTRL_FRCSPD), + !!(val & E1000_CTRL_FRCDPX), + !!(val & E1000_CTRL_RFCE), + !!(val & E1000_CTRL_TFCE)); + + if (val & E1000_CTRL_RST) { + trace_e1000e_core_ctrl_sw_reset(); + igb_reset(core, true); + } + + if (val & E1000_CTRL_PHY_RST) { + trace_e1000e_core_ctrl_phy_reset(); + core->mac[STATUS] |= E1000_STATUS_PHYRA; + } +} + +static void +igb_set_rfctl(IGBCore *core, int index, uint32_t val) +{ + trace_e1000e_rx_set_rfctl(val); + + if (!(val & E1000_RFCTL_ISCSI_DIS)) { + trace_e1000e_wrn_iscsi_filtering_not_supported(); + } + + if (!(val & E1000_RFCTL_NFSW_DIS)) { + trace_e1000e_wrn_nfsw_filtering_not_supported(); + } + + if (!(val & E1000_RFCTL_NFSR_DIS)) { + trace_e1000e_wrn_nfsr_filtering_not_supported(); + } + + core->mac[RFCTL] = val; +} + +static void +igb_calc_rxdesclen(IGBCore *core) +{ + if (igb_rx_use_legacy_descriptor(core)) { + core->rx_desc_len = sizeof(struct e1000_rx_desc); + } else { + core->rx_desc_len = sizeof(union e1000_adv_rx_desc); + } + trace_e1000e_rx_desc_len(core->rx_desc_len); +} + +static void +igb_set_rx_control(IGBCore *core, int index, uint32_t val) +{ + core->mac[RCTL] = val; + trace_e1000e_rx_set_rctl(core->mac[RCTL]); + + if (val & E1000_RCTL_DTYP_MASK) { + qemu_log_mask(LOG_GUEST_ERROR, + "igb: RCTL.DTYP must be zero for compatibility"); + } + + if (val & E1000_RCTL_EN) { + igb_calc_rxdesclen(core); + igb_start_recv(core); + } +} + +static inline bool +igb_postpone_interrupt(IGBIntrDelayTimer *timer) +{ + if (timer->running) { + trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2); + + return true; + } + + if (timer->core->mac[timer->delay_reg] != 0) { + igb_intrmgr_rearm_timer(timer); + } + + return false; +} + +static inline bool +igb_eitr_should_postpone(IGBCore *core, int idx) +{ + return igb_postpone_interrupt(&core->eitr[idx]); +} + +static void igb_send_msix(IGBCore *core, uint32_t causes) +{ + int vector; + + for (vector = 0; vector < IGB_INTR_NUM; ++vector) { + if ((causes & BIT(vector)) && !igb_eitr_should_postpone(core, vector)) { + + trace_e1000e_irq_msix_notify_vec(vector); + igb_msix_notify(core, vector); + } + } +} + +static inline void +igb_fix_icr_asserted(IGBCore *core) +{ + core->mac[ICR] &= ~E1000_ICR_ASSERTED; + if (core->mac[ICR]) { + core->mac[ICR] |= E1000_ICR_ASSERTED; + } + + trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); +} + +static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes) +{ + uint32_t old_causes = core->mac[ICR] & core->mac[IMS]; + uint32_t old_ecauses = core->mac[EICR] & core->mac[EIMS]; + uint32_t raised_causes; + uint32_t raised_ecauses; + uint32_t int_alloc; + + trace_e1000e_irq_set(index << 2, + core->mac[index], core->mac[index] | causes); + + core->mac[index] |= causes; + + if (core->mac[GPIE] & E1000_GPIE_MSIX_MODE) { + raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes; + + if (raised_causes & E1000_ICR_DRSTA) { + int_alloc = core->mac[IVAR_MISC] & 0xff; + if (int_alloc & E1000_IVAR_VALID) { + core->mac[EICR] |= BIT(int_alloc & 0x1f); + } + } + /* Check if other bits (excluding the TCP Timer) are enabled. */ + if (raised_causes & ~E1000_ICR_DRSTA) { + int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff; + if (int_alloc & E1000_IVAR_VALID) { + core->mac[EICR] |= BIT(int_alloc & 0x1f); + } + } + + raised_ecauses = core->mac[EICR] & core->mac[EIMS] & ~old_ecauses; + if (!raised_ecauses) { + return; + } + + igb_send_msix(core, raised_ecauses); + } else { + igb_fix_icr_asserted(core); + + raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes; + if (!raised_causes) { + return; + } + + core->mac[EICR] |= (raised_causes & E1000_ICR_DRSTA) | E1000_EICR_OTHER; + + if (msix_enabled(core->owner)) { + trace_e1000e_irq_msix_notify_vec(0); + msix_notify(core->owner, 0); + } else if (msi_enabled(core->owner)) { + trace_e1000e_irq_msi_notify(raised_causes); + msi_notify(core->owner, 0); + } else { + igb_raise_legacy_irq(core); + } + } +} + +static void igb_lower_interrupts(IGBCore *core, size_t index, uint32_t causes) +{ + trace_e1000e_irq_clear(index << 2, + core->mac[index], core->mac[index] & ~causes); + + core->mac[index] &= ~causes; + + trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], + core->mac[ICR], core->mac[IMS]); + + if (!(core->mac[ICR] & core->mac[IMS]) && + !(core->mac[GPIE] & E1000_GPIE_MSIX_MODE)) { + core->mac[EICR] &= ~E1000_EICR_OTHER; + + if (!msix_enabled(core->owner) && !msi_enabled(core->owner)) { + igb_lower_legacy_irq(core); + } + } +} + +static void igb_set_eics(IGBCore *core, int index, uint32_t val) +{ + bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; + + trace_igb_irq_write_eics(val, msix); + igb_raise_interrupts(core, EICR, val & mask); +} + +static void igb_set_eims(IGBCore *core, int index, uint32_t val) +{ + bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; + + trace_igb_irq_write_eims(val, msix); + igb_raise_interrupts(core, EIMS, val & mask); +} + +static void mailbox_interrupt_to_vf(IGBCore *core, uint16_t vfn) +{ + uint32_t ent = core->mac[VTIVAR_MISC + vfn]; + uint32_t causes; + + if ((ent & E1000_IVAR_VALID)) { + causes = (ent & 0x3) << (22 - vfn * IGBVF_MSIX_VEC_NUM); + igb_raise_interrupts(core, EICR, causes); + } +} + +static void mailbox_interrupt_to_pf(IGBCore *core) +{ + igb_raise_interrupts(core, ICR, E1000_ICR_VMMB); +} + +static void igb_set_pfmailbox(IGBCore *core, int index, uint32_t val) +{ + uint16_t vfn = index - P2VMAILBOX0; + + trace_igb_set_pfmailbox(vfn, val); + + if (val & E1000_P2VMAILBOX_STS) { + core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFSTS; + mailbox_interrupt_to_vf(core, vfn); + } + + if (val & E1000_P2VMAILBOX_ACK) { + core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFACK; + mailbox_interrupt_to_vf(core, vfn); + } + + /* Buffer Taken by PF (can be set only if the VFU is cleared). */ + if (val & E1000_P2VMAILBOX_PFU) { + if (!(core->mac[index] & E1000_P2VMAILBOX_VFU)) { + core->mac[index] |= E1000_P2VMAILBOX_PFU; + core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFU; + } + } else { + core->mac[index] &= ~E1000_P2VMAILBOX_PFU; + core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_PFU; + } + + if (val & E1000_P2VMAILBOX_RVFU) { + core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_VFU; + core->mac[MBVFICR] &= ~((E1000_MBVFICR_VFACK_VF1 << vfn) | + (E1000_MBVFICR_VFREQ_VF1 << vfn)); + } +} + +static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val) +{ + uint16_t vfn = index - V2PMAILBOX0; + + trace_igb_set_vfmailbox(vfn, val); + + if (val & E1000_V2PMAILBOX_REQ) { + core->mac[MBVFICR] |= E1000_MBVFICR_VFREQ_VF1 << vfn; + mailbox_interrupt_to_pf(core); + } + + if (val & E1000_V2PMAILBOX_ACK) { + core->mac[MBVFICR] |= E1000_MBVFICR_VFACK_VF1 << vfn; + mailbox_interrupt_to_pf(core); + } + + /* Buffer Taken by VF (can be set only if the PFU is cleared). */ + if (val & E1000_V2PMAILBOX_VFU) { + if (!(core->mac[index] & E1000_V2PMAILBOX_PFU)) { + core->mac[index] |= E1000_V2PMAILBOX_VFU; + core->mac[P2VMAILBOX0 + vfn] |= E1000_P2VMAILBOX_VFU; + } + } else { + core->mac[index] &= ~E1000_V2PMAILBOX_VFU; + core->mac[P2VMAILBOX0 + vfn] &= ~E1000_P2VMAILBOX_VFU; + } +} + +static void igb_vf_reset(IGBCore *core, uint16_t vfn) +{ + uint16_t qn0 = vfn; + uint16_t qn1 = vfn + IGB_NUM_VM_POOLS; + + /* disable Rx and Tx for the VF*/ + core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE; + core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE; + core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE; + core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE; + core->mac[VFRE] &= ~BIT(vfn); + core->mac[VFTE] &= ~BIT(vfn); + /* indicate VF reset to PF */ + core->mac[VFLRE] |= BIT(vfn); + /* VFLRE and mailbox use the same interrupt cause */ + mailbox_interrupt_to_pf(core); +} + +static void igb_w1c(IGBCore *core, int index, uint32_t val) +{ + core->mac[index] &= ~val; +} + +static void igb_set_eimc(IGBCore *core, int index, uint32_t val) +{ + bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; + + trace_igb_irq_write_eimc(val, msix); + + /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */ + igb_lower_interrupts(core, EIMS, val & mask); +} + +static void igb_set_eiac(IGBCore *core, int index, uint32_t val) +{ + bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + + if (msix) { + trace_igb_irq_write_eiac(val); + + /* + * TODO: When using IOV, the bits that correspond to MSI-X vectors + * that are assigned to a VF are read-only. + */ + core->mac[EIAC] |= (val & E1000_EICR_MSIX_MASK); + } +} + +static void igb_set_eiam(IGBCore *core, int index, uint32_t val) +{ + bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + + /* + * TODO: When using IOV, the bits that correspond to MSI-X vectors that + * are assigned to a VF are read-only. + */ + core->mac[EIAM] |= + ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK)); + + trace_igb_irq_write_eiam(val, msix); +} + +static void igb_set_eicr(IGBCore *core, int index, uint32_t val) +{ + bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + + /* + * TODO: In IOV mode, only bit zero of this vector is available for the PF + * function. + */ + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; + + trace_igb_irq_write_eicr(val, msix); + igb_lower_interrupts(core, EICR, val & mask); +} + +static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val) +{ + uint16_t vfn; + + if (val & E1000_CTRL_RST) { + vfn = (index - PVTCTRL0) / 0x40; + igb_vf_reset(core, vfn); + } +} + +static void igb_set_vteics(IGBCore *core, int index, uint32_t val) +{ + uint16_t vfn = (index - PVTEICS0) / 0x40; + + core->mac[index] = val; + igb_set_eics(core, EICS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); +} + +static void igb_set_vteims(IGBCore *core, int index, uint32_t val) +{ + uint16_t vfn = (index - PVTEIMS0) / 0x40; + + core->mac[index] = val; + igb_set_eims(core, EIMS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); +} + +static void igb_set_vteimc(IGBCore *core, int index, uint32_t val) +{ + uint16_t vfn = (index - PVTEIMC0) / 0x40; + + core->mac[index] = val; + igb_set_eimc(core, EIMC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); +} + +static void igb_set_vteiac(IGBCore *core, int index, uint32_t val) +{ + uint16_t vfn = (index - PVTEIAC0) / 0x40; + + core->mac[index] = val; + igb_set_eiac(core, EIAC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); +} + +static void igb_set_vteiam(IGBCore *core, int index, uint32_t val) +{ + uint16_t vfn = (index - PVTEIAM0) / 0x40; + + core->mac[index] = val; + igb_set_eiam(core, EIAM, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); +} + +static void igb_set_vteicr(IGBCore *core, int index, uint32_t val) +{ + uint16_t vfn = (index - PVTEICR0) / 0x40; + + core->mac[index] = val; + igb_set_eicr(core, EICR, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); +} + +static void igb_set_vtivar(IGBCore *core, int index, uint32_t val) +{ + uint16_t vfn = (index - VTIVAR); + uint16_t qn = vfn; + uint8_t ent; + int n; + + core->mac[index] = val; + + /* Get assigned vector associated with queue Rx#0. */ + if ((val & E1000_IVAR_VALID)) { + n = igb_ivar_entry_rx(qn); + ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (val & 0x7))); + core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4); + } + + /* Get assigned vector associated with queue Tx#0 */ + ent = val >> 8; + if ((ent & E1000_IVAR_VALID)) { + n = igb_ivar_entry_tx(qn); + ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (ent & 0x7))); + core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4); + } + + /* + * Ignoring assigned vectors associated with queues Rx#1 and Tx#1 for now. + */ +} + +static inline void +igb_autoneg_timer(void *opaque) +{ + IGBCore *core = opaque; + if (!qemu_get_queue(core->owner_nic)->link_down) { + e1000x_update_regs_on_autoneg_done(core->mac, core->phy); + igb_start_recv(core); + + igb_update_flowctl_status(core); + /* signal link status change to the guest */ + igb_raise_interrupts(core, ICR, E1000_ICR_LSC); + } +} + +static inline uint16_t +igb_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr) +{ + uint16_t index = (addr & 0x1ffff) >> 2; + return index + (mac_reg_access[index] & 0xfffe); +} + +static const char igb_phy_regcap[MAX_PHY_REG_ADDRESS + 1] = { + [MII_BMCR] = PHY_RW, + [MII_BMSR] = PHY_R, + [MII_PHYID1] = PHY_R, + [MII_PHYID2] = PHY_R, + [MII_ANAR] = PHY_RW, + [MII_ANLPAR] = PHY_R, + [MII_ANER] = PHY_R, + [MII_ANNP] = PHY_RW, + [MII_ANLPRNP] = PHY_R, + [MII_CTRL1000] = PHY_RW, + [MII_STAT1000] = PHY_R, + [MII_EXTSTAT] = PHY_R, + + [IGP01E1000_PHY_PORT_CONFIG] = PHY_RW, + [IGP01E1000_PHY_PORT_STATUS] = PHY_R, + [IGP01E1000_PHY_PORT_CTRL] = PHY_RW, + [IGP01E1000_PHY_LINK_HEALTH] = PHY_R, + [IGP02E1000_PHY_POWER_MGMT] = PHY_RW, + [IGP01E1000_PHY_PAGE_SELECT] = PHY_W +}; + +static void +igb_phy_reg_write(IGBCore *core, uint32_t addr, uint16_t data) +{ + assert(addr <= MAX_PHY_REG_ADDRESS); + + if (addr == MII_BMCR) { + igb_set_phy_ctrl(core, data); + } else { + core->phy[addr] = data; + } +} + +static void +igb_set_mdic(IGBCore *core, int index, uint32_t val) +{ + uint32_t data = val & E1000_MDIC_DATA_MASK; + uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + + if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */ + val = core->mac[MDIC] | E1000_MDIC_ERROR; + } else if (val & E1000_MDIC_OP_READ) { + if (!(igb_phy_regcap[addr] & PHY_R)) { + trace_igb_core_mdic_read_unhandled(addr); + val |= E1000_MDIC_ERROR; + } else { + val = (val ^ data) | core->phy[addr]; + trace_igb_core_mdic_read(addr, val); + } + } else if (val & E1000_MDIC_OP_WRITE) { + if (!(igb_phy_regcap[addr] & PHY_W)) { + trace_igb_core_mdic_write_unhandled(addr); + val |= E1000_MDIC_ERROR; + } else { + trace_igb_core_mdic_write(addr, data); + igb_phy_reg_write(core, addr, data); + } + } + core->mac[MDIC] = val | E1000_MDIC_READY; + + if (val & E1000_MDIC_INT_EN) { + igb_raise_interrupts(core, ICR, E1000_ICR_MDAC); + } +} + +static void +igb_set_rdt(IGBCore *core, int index, uint32_t val) +{ + core->mac[index] = val & 0xffff; + trace_e1000e_rx_set_rdt(igb_mq_queue_idx(RDT0, index), val); + igb_start_recv(core); +} + +static void +igb_set_status(IGBCore *core, int index, uint32_t val) +{ + if ((val & E1000_STATUS_PHYRA) == 0) { + core->mac[index] &= ~E1000_STATUS_PHYRA; + } +} + +static void +igb_set_ctrlext(IGBCore *core, int index, uint32_t val) +{ + trace_igb_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK), + !!(val & E1000_CTRL_EXT_SPD_BYPS), + !!(val & E1000_CTRL_EXT_PFRSTD)); + + /* Zero self-clearing bits */ + val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST); + core->mac[CTRL_EXT] = val; + + if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PFRSTD) { + for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) { + core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_RSTI; + core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTD; + } + } +} + +static void +igb_set_pbaclr(IGBCore *core, int index, uint32_t val) +{ + int i; + + core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK; + + if (!msix_enabled(core->owner)) { + return; + } + + for (i = 0; i < IGB_INTR_NUM; i++) { + if (core->mac[PBACLR] & BIT(i)) { + msix_clr_pending(core->owner, i); + } + } +} + +static void +igb_set_fcrth(IGBCore *core, int index, uint32_t val) +{ + core->mac[FCRTH] = val & 0xFFF8; +} + +static void +igb_set_fcrtl(IGBCore *core, int index, uint32_t val) +{ + core->mac[FCRTL] = val & 0x8000FFF8; +} + +#define IGB_LOW_BITS_SET_FUNC(num) \ + static void \ + igb_set_##num##bit(IGBCore *core, int index, uint32_t val) \ + { \ + core->mac[index] = val & (BIT(num) - 1); \ + } + +IGB_LOW_BITS_SET_FUNC(4) +IGB_LOW_BITS_SET_FUNC(13) +IGB_LOW_BITS_SET_FUNC(16) + +static void +igb_set_dlen(IGBCore *core, int index, uint32_t val) +{ + core->mac[index] = val & 0xffff0; +} + +static void +igb_set_dbal(IGBCore *core, int index, uint32_t val) +{ + core->mac[index] = val & E1000_XDBAL_MASK; +} + +static void +igb_set_tdt(IGBCore *core, int index, uint32_t val) +{ + IGB_TxRing txr; + int qn = igb_mq_queue_idx(TDT0, index); + + core->mac[index] = val & 0xffff; + + igb_tx_ring_init(core, &txr, qn); + igb_start_xmit(core, &txr); +} + +static void +igb_set_ics(IGBCore *core, int index, uint32_t val) +{ + trace_e1000e_irq_write_ics(val); + igb_raise_interrupts(core, ICR, val); +} + +static void +igb_set_imc(IGBCore *core, int index, uint32_t val) +{ + trace_e1000e_irq_ims_clear_set_imc(val); + igb_lower_interrupts(core, IMS, val); +} + +static void +igb_set_ims(IGBCore *core, int index, uint32_t val) +{ + igb_raise_interrupts(core, IMS, val & 0x77D4FBFD); +} + +static void igb_nsicr(IGBCore *core) +{ + /* + * If GPIE.NSICR = 0, then the clear of IMS will occur only if at + * least one bit is set in the IMS and there is a true interrupt as + * reflected in ICR.INTA. + */ + if ((core->mac[GPIE] & E1000_GPIE_NSICR) || + (core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) { + igb_lower_interrupts(core, IMS, core->mac[IAM]); + } +} + +static void igb_set_icr(IGBCore *core, int index, uint32_t val) +{ + igb_nsicr(core); + igb_lower_interrupts(core, ICR, val); +} + +static uint32_t +igb_mac_readreg(IGBCore *core, int index) +{ + return core->mac[index]; +} + +static uint32_t +igb_mac_ics_read(IGBCore *core, int index) +{ + trace_e1000e_irq_read_ics(core->mac[ICS]); + return core->mac[ICS]; +} + +static uint32_t +igb_mac_ims_read(IGBCore *core, int index) +{ + trace_e1000e_irq_read_ims(core->mac[IMS]); + return core->mac[IMS]; +} + +static uint32_t +igb_mac_swsm_read(IGBCore *core, int index) +{ + uint32_t val = core->mac[SWSM]; + core->mac[SWSM] = val | E1000_SWSM_SMBI; + return val; +} + +static uint32_t +igb_mac_eitr_read(IGBCore *core, int index) +{ + return core->eitr_guest_value[index - EITR0]; +} + +static uint32_t igb_mac_vfmailbox_read(IGBCore *core, int index) +{ + uint32_t val = core->mac[index]; + + core->mac[index] &= ~(E1000_V2PMAILBOX_PFSTS | E1000_V2PMAILBOX_PFACK | + E1000_V2PMAILBOX_RSTD); + + return val; +} + +static uint32_t +igb_mac_icr_read(IGBCore *core, int index) +{ + uint32_t ret = core->mac[ICR]; + + if (core->mac[GPIE] & E1000_GPIE_NSICR) { + trace_igb_irq_icr_clear_gpie_nsicr(); + igb_lower_interrupts(core, ICR, 0xffffffff); + } else if (core->mac[IMS] == 0) { + trace_e1000e_irq_icr_clear_zero_ims(); + igb_lower_interrupts(core, ICR, 0xffffffff); + } else if (core->mac[ICR] & E1000_ICR_INT_ASSERTED) { + igb_lower_interrupts(core, ICR, 0xffffffff); + } else if (!msix_enabled(core->owner)) { + trace_e1000e_irq_icr_clear_nonmsix_icr_read(); + igb_lower_interrupts(core, ICR, 0xffffffff); + } + + igb_nsicr(core); + return ret; +} + +static uint32_t +igb_mac_read_clr4(IGBCore *core, int index) +{ + uint32_t ret = core->mac[index]; + + core->mac[index] = 0; + return ret; +} + +static uint32_t +igb_mac_read_clr8(IGBCore *core, int index) +{ + uint32_t ret = core->mac[index]; + + core->mac[index] = 0; + core->mac[index - 1] = 0; + return ret; +} + +static uint32_t +igb_get_ctrl(IGBCore *core, int index) +{ + uint32_t val = core->mac[CTRL]; + + trace_e1000e_link_read_params( + !!(val & E1000_CTRL_ASDE), + (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, + !!(val & E1000_CTRL_FRCSPD), + !!(val & E1000_CTRL_FRCDPX), + !!(val & E1000_CTRL_RFCE), + !!(val & E1000_CTRL_TFCE)); + + return val; +} + +static uint32_t igb_get_status(IGBCore *core, int index) +{ + uint32_t res = core->mac[STATUS]; + uint16_t num_vfs = pcie_sriov_num_vfs(core->owner); + + if (core->mac[CTRL] & E1000_CTRL_FRCDPX) { + res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0; + } else { + res |= E1000_STATUS_FD; + } + + if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) || + (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) { + switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) { + case E1000_CTRL_SPD_10: + res |= E1000_STATUS_SPEED_10; + break; + case E1000_CTRL_SPD_100: + res |= E1000_STATUS_SPEED_100; + break; + case E1000_CTRL_SPD_1000: + default: + res |= E1000_STATUS_SPEED_1000; + break; + } + } else { + res |= E1000_STATUS_SPEED_1000; + } + + if (num_vfs) { + res |= num_vfs << E1000_STATUS_NUM_VFS_SHIFT; + res |= E1000_STATUS_IOV_MODE; + } + + /* + * Windows driver 12.18.9.23 resets if E1000_STATUS_GIO_MASTER_ENABLE is + * left set after E1000_CTRL_LRST is set. + */ + if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE) && + !(core->mac[CTRL] & E1000_CTRL_LRST)) { + res |= E1000_STATUS_GIO_MASTER_ENABLE; + } + + return res; +} + +static void +igb_mac_writereg(IGBCore *core, int index, uint32_t val) +{ + core->mac[index] = val; +} + +static void +igb_mac_setmacaddr(IGBCore *core, int index, uint32_t val) +{ + uint32_t macaddr[2]; + + core->mac[index] = val; + + macaddr[0] = cpu_to_le32(core->mac[RA]); + macaddr[1] = cpu_to_le32(core->mac[RA + 1]); + qemu_format_nic_info_str(qemu_get_queue(core->owner_nic), + (uint8_t *) macaddr); + + trace_e1000e_mac_set_sw(MAC_ARG(macaddr)); +} + +static void +igb_set_eecd(IGBCore *core, int index, uint32_t val) +{ + static const uint32_t ro_bits = E1000_EECD_PRES | + E1000_EECD_AUTO_RD | + E1000_EECD_SIZE_EX_MASK; + + core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits); +} + +static void +igb_set_eerd(IGBCore *core, int index, uint32_t val) +{ + uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; + uint32_t flags = 0; + uint32_t data = 0; + + if ((addr < IGB_EEPROM_SIZE) && (val & E1000_EERW_START)) { + data = core->eeprom[addr]; + flags = E1000_EERW_DONE; + } + + core->mac[EERD] = flags | + (addr << E1000_EERW_ADDR_SHIFT) | + (data << E1000_EERW_DATA_SHIFT); +} + +static void +igb_set_eitr(IGBCore *core, int index, uint32_t val) +{ + uint32_t eitr_num = index - EITR0; + + trace_igb_irq_eitr_set(eitr_num, val); + + core->eitr_guest_value[eitr_num] = val & ~E1000_EITR_CNT_IGNR; + core->mac[index] = val & 0x7FFE; +} + +static void +igb_update_rx_offloads(IGBCore *core) +{ + int cso_state = igb_rx_l4_cso_enabled(core); + + trace_e1000e_rx_set_cso(cso_state); + + if (core->has_vnet) { + qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, + cso_state, 0, 0, 0, 0); + } +} + +static void +igb_set_rxcsum(IGBCore *core, int index, uint32_t val) +{ + core->mac[RXCSUM] = val; + igb_update_rx_offloads(core); +} + +static void +igb_set_gcr(IGBCore *core, int index, uint32_t val) +{ + uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS; + core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits; +} + +static uint32_t igb_get_systiml(IGBCore *core, int index) +{ + e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH); + return core->mac[SYSTIML]; +} + +static uint32_t igb_get_rxsatrh(IGBCore *core, int index) +{ + core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID; + return core->mac[RXSATRH]; +} + +static uint32_t igb_get_txstmph(IGBCore *core, int index) +{ + core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID; + return core->mac[TXSTMPH]; +} + +static void igb_set_timinca(IGBCore *core, int index, uint32_t val) +{ + e1000x_set_timinca(core->mac, &core->timadj, val); +} + +static void igb_set_timadjh(IGBCore *core, int index, uint32_t val) +{ + core->mac[TIMADJH] = val; + core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32); +} + +#define igb_getreg(x) [x] = igb_mac_readreg +typedef uint32_t (*readops)(IGBCore *, int); +static const readops igb_macreg_readops[] = { + igb_getreg(WUFC), + igb_getreg(MANC), + igb_getreg(TOTL), + igb_getreg(RDT0), + igb_getreg(RDT1), + igb_getreg(RDT2), + igb_getreg(RDT3), + igb_getreg(RDT4), + igb_getreg(RDT5), + igb_getreg(RDT6), + igb_getreg(RDT7), + igb_getreg(RDT8), + igb_getreg(RDT9), + igb_getreg(RDT10), + igb_getreg(RDT11), + igb_getreg(RDT12), + igb_getreg(RDT13), + igb_getreg(RDT14), + igb_getreg(RDT15), + igb_getreg(RDBAH0), + igb_getreg(RDBAH1), + igb_getreg(RDBAH2), + igb_getreg(RDBAH3), + igb_getreg(RDBAH4), + igb_getreg(RDBAH5), + igb_getreg(RDBAH6), + igb_getreg(RDBAH7), + igb_getreg(RDBAH8), + igb_getreg(RDBAH9), + igb_getreg(RDBAH10), + igb_getreg(RDBAH11), + igb_getreg(RDBAH12), + igb_getreg(RDBAH13), + igb_getreg(RDBAH14), + igb_getreg(RDBAH15), + igb_getreg(TDBAL0), + igb_getreg(TDBAL1), + igb_getreg(TDBAL2), + igb_getreg(TDBAL3), + igb_getreg(TDBAL4), + igb_getreg(TDBAL5), + igb_getreg(TDBAL6), + igb_getreg(TDBAL7), + igb_getreg(TDBAL8), + igb_getreg(TDBAL9), + igb_getreg(TDBAL10), + igb_getreg(TDBAL11), + igb_getreg(TDBAL12), + igb_getreg(TDBAL13), + igb_getreg(TDBAL14), + igb_getreg(TDBAL15), + igb_getreg(RDLEN0), + igb_getreg(RDLEN1), + igb_getreg(RDLEN2), + igb_getreg(RDLEN3), + igb_getreg(RDLEN4), + igb_getreg(RDLEN5), + igb_getreg(RDLEN6), + igb_getreg(RDLEN7), + igb_getreg(RDLEN8), + igb_getreg(RDLEN9), + igb_getreg(RDLEN10), + igb_getreg(RDLEN11), + igb_getreg(RDLEN12), + igb_getreg(RDLEN13), + igb_getreg(RDLEN14), + igb_getreg(RDLEN15), + igb_getreg(SRRCTL0), + igb_getreg(SRRCTL1), + igb_getreg(SRRCTL2), + igb_getreg(SRRCTL3), + igb_getreg(SRRCTL4), + igb_getreg(SRRCTL5), + igb_getreg(SRRCTL6), + igb_getreg(SRRCTL7), + igb_getreg(SRRCTL8), + igb_getreg(SRRCTL9), + igb_getreg(SRRCTL10), + igb_getreg(SRRCTL11), + igb_getreg(SRRCTL12), + igb_getreg(SRRCTL13), + igb_getreg(SRRCTL14), + igb_getreg(SRRCTL15), + igb_getreg(LATECOL), + igb_getreg(XONTXC), + igb_getreg(TDFH), + igb_getreg(TDFT), + igb_getreg(TDFHS), + igb_getreg(TDFTS), + igb_getreg(TDFPC), + igb_getreg(WUS), + igb_getreg(RDFH), + igb_getreg(RDFT), + igb_getreg(RDFHS), + igb_getreg(RDFTS), + igb_getreg(RDFPC), + igb_getreg(GORCL), + igb_getreg(MGTPRC), + igb_getreg(EERD), + igb_getreg(EIAC), + igb_getreg(MANC2H), + igb_getreg(RXCSUM), + igb_getreg(GSCL_3), + igb_getreg(GSCN_2), + igb_getreg(FCAH), + igb_getreg(FCRTH), + igb_getreg(FLOP), + igb_getreg(RXSTMPH), + igb_getreg(TXSTMPL), + igb_getreg(TIMADJL), + igb_getreg(RDH0), + igb_getreg(RDH1), + igb_getreg(RDH2), + igb_getreg(RDH3), + igb_getreg(RDH4), + igb_getreg(RDH5), + igb_getreg(RDH6), + igb_getreg(RDH7), + igb_getreg(RDH8), + igb_getreg(RDH9), + igb_getreg(RDH10), + igb_getreg(RDH11), + igb_getreg(RDH12), + igb_getreg(RDH13), + igb_getreg(RDH14), + igb_getreg(RDH15), + igb_getreg(TDT0), + igb_getreg(TDT1), + igb_getreg(TDT2), + igb_getreg(TDT3), + igb_getreg(TDT4), + igb_getreg(TDT5), + igb_getreg(TDT6), + igb_getreg(TDT7), + igb_getreg(TDT8), + igb_getreg(TDT9), + igb_getreg(TDT10), + igb_getreg(TDT11), + igb_getreg(TDT12), + igb_getreg(TDT13), + igb_getreg(TDT14), + igb_getreg(TDT15), + igb_getreg(TNCRS), + igb_getreg(RJC), + igb_getreg(IAM), + igb_getreg(GSCL_2), + igb_getreg(TIPG), + igb_getreg(FLMNGCTL), + igb_getreg(FLMNGCNT), + igb_getreg(TSYNCTXCTL), + igb_getreg(EEMNGDATA), + igb_getreg(CTRL_EXT), + igb_getreg(SYSTIMH), + igb_getreg(EEMNGCTL), + igb_getreg(FLMNGDATA), + igb_getreg(TSYNCRXCTL), + igb_getreg(LEDCTL), + igb_getreg(TCTL), + igb_getreg(TCTL_EXT), + igb_getreg(DTXCTL), + igb_getreg(RXPBS), + igb_getreg(TDH0), + igb_getreg(TDH1), + igb_getreg(TDH2), + igb_getreg(TDH3), + igb_getreg(TDH4), + igb_getreg(TDH5), + igb_getreg(TDH6), + igb_getreg(TDH7), + igb_getreg(TDH8), + igb_getreg(TDH9), + igb_getreg(TDH10), + igb_getreg(TDH11), + igb_getreg(TDH12), + igb_getreg(TDH13), + igb_getreg(TDH14), + igb_getreg(TDH15), + igb_getreg(ECOL), + igb_getreg(DC), + igb_getreg(RLEC), + igb_getreg(XOFFTXC), + igb_getreg(RFC), + igb_getreg(RNBC), + igb_getreg(MGTPTC), + igb_getreg(TIMINCA), + igb_getreg(FACTPS), + igb_getreg(GSCL_1), + igb_getreg(GSCN_0), + igb_getreg(PBACLR), + igb_getreg(FCTTV), + igb_getreg(RXSATRL), + igb_getreg(TORL), + igb_getreg(TDLEN0), + igb_getreg(TDLEN1), + igb_getreg(TDLEN2), + igb_getreg(TDLEN3), + igb_getreg(TDLEN4), + igb_getreg(TDLEN5), + igb_getreg(TDLEN6), + igb_getreg(TDLEN7), + igb_getreg(TDLEN8), + igb_getreg(TDLEN9), + igb_getreg(TDLEN10), + igb_getreg(TDLEN11), + igb_getreg(TDLEN12), + igb_getreg(TDLEN13), + igb_getreg(TDLEN14), + igb_getreg(TDLEN15), + igb_getreg(MCC), + igb_getreg(WUC), + igb_getreg(EECD), + igb_getreg(FCRTV), + igb_getreg(TXDCTL0), + igb_getreg(TXDCTL1), + igb_getreg(TXDCTL2), + igb_getreg(TXDCTL3), + igb_getreg(TXDCTL4), + igb_getreg(TXDCTL5), + igb_getreg(TXDCTL6), + igb_getreg(TXDCTL7), + igb_getreg(TXDCTL8), + igb_getreg(TXDCTL9), + igb_getreg(TXDCTL10), + igb_getreg(TXDCTL11), + igb_getreg(TXDCTL12), + igb_getreg(TXDCTL13), + igb_getreg(TXDCTL14), + igb_getreg(TXDCTL15), + igb_getreg(TXCTL0), + igb_getreg(TXCTL1), + igb_getreg(TXCTL2), + igb_getreg(TXCTL3), + igb_getreg(TXCTL4), + igb_getreg(TXCTL5), + igb_getreg(TXCTL6), + igb_getreg(TXCTL7), + igb_getreg(TXCTL8), + igb_getreg(TXCTL9), + igb_getreg(TXCTL10), + igb_getreg(TXCTL11), + igb_getreg(TXCTL12), + igb_getreg(TXCTL13), + igb_getreg(TXCTL14), + igb_getreg(TXCTL15), + igb_getreg(TDWBAL0), + igb_getreg(TDWBAL1), + igb_getreg(TDWBAL2), + igb_getreg(TDWBAL3), + igb_getreg(TDWBAL4), + igb_getreg(TDWBAL5), + igb_getreg(TDWBAL6), + igb_getreg(TDWBAL7), + igb_getreg(TDWBAL8), + igb_getreg(TDWBAL9), + igb_getreg(TDWBAL10), + igb_getreg(TDWBAL11), + igb_getreg(TDWBAL12), + igb_getreg(TDWBAL13), + igb_getreg(TDWBAL14), + igb_getreg(TDWBAL15), + igb_getreg(TDWBAH0), + igb_getreg(TDWBAH1), + igb_getreg(TDWBAH2), + igb_getreg(TDWBAH3), + igb_getreg(TDWBAH4), + igb_getreg(TDWBAH5), + igb_getreg(TDWBAH6), + igb_getreg(TDWBAH7), + igb_getreg(TDWBAH8), + igb_getreg(TDWBAH9), + igb_getreg(TDWBAH10), + igb_getreg(TDWBAH11), + igb_getreg(TDWBAH12), + igb_getreg(TDWBAH13), + igb_getreg(TDWBAH14), + igb_getreg(TDWBAH15), + igb_getreg(PVTCTRL0), + igb_getreg(PVTCTRL1), + igb_getreg(PVTCTRL2), + igb_getreg(PVTCTRL3), + igb_getreg(PVTCTRL4), + igb_getreg(PVTCTRL5), + igb_getreg(PVTCTRL6), + igb_getreg(PVTCTRL7), + igb_getreg(PVTEIMS0), + igb_getreg(PVTEIMS1), + igb_getreg(PVTEIMS2), + igb_getreg(PVTEIMS3), + igb_getreg(PVTEIMS4), + igb_getreg(PVTEIMS5), + igb_getreg(PVTEIMS6), + igb_getreg(PVTEIMS7), + igb_getreg(PVTEIAC0), + igb_getreg(PVTEIAC1), + igb_getreg(PVTEIAC2), + igb_getreg(PVTEIAC3), + igb_getreg(PVTEIAC4), + igb_getreg(PVTEIAC5), + igb_getreg(PVTEIAC6), + igb_getreg(PVTEIAC7), + igb_getreg(PVTEIAM0), + igb_getreg(PVTEIAM1), + igb_getreg(PVTEIAM2), + igb_getreg(PVTEIAM3), + igb_getreg(PVTEIAM4), + igb_getreg(PVTEIAM5), + igb_getreg(PVTEIAM6), + igb_getreg(PVTEIAM7), + igb_getreg(PVFGPRC0), + igb_getreg(PVFGPRC1), + igb_getreg(PVFGPRC2), + igb_getreg(PVFGPRC3), + igb_getreg(PVFGPRC4), + igb_getreg(PVFGPRC5), + igb_getreg(PVFGPRC6), + igb_getreg(PVFGPRC7), + igb_getreg(PVFGPTC0), + igb_getreg(PVFGPTC1), + igb_getreg(PVFGPTC2), + igb_getreg(PVFGPTC3), + igb_getreg(PVFGPTC4), + igb_getreg(PVFGPTC5), + igb_getreg(PVFGPTC6), + igb_getreg(PVFGPTC7), + igb_getreg(PVFGORC0), + igb_getreg(PVFGORC1), + igb_getreg(PVFGORC2), + igb_getreg(PVFGORC3), + igb_getreg(PVFGORC4), + igb_getreg(PVFGORC5), + igb_getreg(PVFGORC6), + igb_getreg(PVFGORC7), + igb_getreg(PVFGOTC0), + igb_getreg(PVFGOTC1), + igb_getreg(PVFGOTC2), + igb_getreg(PVFGOTC3), + igb_getreg(PVFGOTC4), + igb_getreg(PVFGOTC5), + igb_getreg(PVFGOTC6), + igb_getreg(PVFGOTC7), + igb_getreg(PVFMPRC0), + igb_getreg(PVFMPRC1), + igb_getreg(PVFMPRC2), + igb_getreg(PVFMPRC3), + igb_getreg(PVFMPRC4), + igb_getreg(PVFMPRC5), + igb_getreg(PVFMPRC6), + igb_getreg(PVFMPRC7), + igb_getreg(PVFGPRLBC0), + igb_getreg(PVFGPRLBC1), + igb_getreg(PVFGPRLBC2), + igb_getreg(PVFGPRLBC3), + igb_getreg(PVFGPRLBC4), + igb_getreg(PVFGPRLBC5), + igb_getreg(PVFGPRLBC6), + igb_getreg(PVFGPRLBC7), + igb_getreg(PVFGPTLBC0), + igb_getreg(PVFGPTLBC1), + igb_getreg(PVFGPTLBC2), + igb_getreg(PVFGPTLBC3), + igb_getreg(PVFGPTLBC4), + igb_getreg(PVFGPTLBC5), + igb_getreg(PVFGPTLBC6), + igb_getreg(PVFGPTLBC7), + igb_getreg(PVFGORLBC0), + igb_getreg(PVFGORLBC1), + igb_getreg(PVFGORLBC2), + igb_getreg(PVFGORLBC3), + igb_getreg(PVFGORLBC4), + igb_getreg(PVFGORLBC5), + igb_getreg(PVFGORLBC6), + igb_getreg(PVFGORLBC7), + igb_getreg(PVFGOTLBC0), + igb_getreg(PVFGOTLBC1), + igb_getreg(PVFGOTLBC2), + igb_getreg(PVFGOTLBC3), + igb_getreg(PVFGOTLBC4), + igb_getreg(PVFGOTLBC5), + igb_getreg(PVFGOTLBC6), + igb_getreg(PVFGOTLBC7), + igb_getreg(RCTL), + igb_getreg(MDIC), + igb_getreg(FCRUC), + igb_getreg(VET), + igb_getreg(RDBAL0), + igb_getreg(RDBAL1), + igb_getreg(RDBAL2), + igb_getreg(RDBAL3), + igb_getreg(RDBAL4), + igb_getreg(RDBAL5), + igb_getreg(RDBAL6), + igb_getreg(RDBAL7), + igb_getreg(RDBAL8), + igb_getreg(RDBAL9), + igb_getreg(RDBAL10), + igb_getreg(RDBAL11), + igb_getreg(RDBAL12), + igb_getreg(RDBAL13), + igb_getreg(RDBAL14), + igb_getreg(RDBAL15), + igb_getreg(TDBAH0), + igb_getreg(TDBAH1), + igb_getreg(TDBAH2), + igb_getreg(TDBAH3), + igb_getreg(TDBAH4), + igb_getreg(TDBAH5), + igb_getreg(TDBAH6), + igb_getreg(TDBAH7), + igb_getreg(TDBAH8), + igb_getreg(TDBAH9), + igb_getreg(TDBAH10), + igb_getreg(TDBAH11), + igb_getreg(TDBAH12), + igb_getreg(TDBAH13), + igb_getreg(TDBAH14), + igb_getreg(TDBAH15), + igb_getreg(SCC), + igb_getreg(COLC), + igb_getreg(XOFFRXC), + igb_getreg(IPAV), + igb_getreg(GOTCL), + igb_getreg(MGTPDC), + igb_getreg(GCR), + igb_getreg(MFVAL), + igb_getreg(FUNCTAG), + igb_getreg(GSCL_4), + igb_getreg(GSCN_3), + igb_getreg(MRQC), + igb_getreg(FCT), + igb_getreg(FLA), + igb_getreg(RXDCTL0), + igb_getreg(RXDCTL1), + igb_getreg(RXDCTL2), + igb_getreg(RXDCTL3), + igb_getreg(RXDCTL4), + igb_getreg(RXDCTL5), + igb_getreg(RXDCTL6), + igb_getreg(RXDCTL7), + igb_getreg(RXDCTL8), + igb_getreg(RXDCTL9), + igb_getreg(RXDCTL10), + igb_getreg(RXDCTL11), + igb_getreg(RXDCTL12), + igb_getreg(RXDCTL13), + igb_getreg(RXDCTL14), + igb_getreg(RXDCTL15), + igb_getreg(RXSTMPL), + igb_getreg(TIMADJH), + igb_getreg(FCRTL), + igb_getreg(XONRXC), + igb_getreg(RFCTL), + igb_getreg(GSCN_1), + igb_getreg(FCAL), + igb_getreg(GPIE), + igb_getreg(TXPBS), + igb_getreg(RLPML), + + [TOTH] = igb_mac_read_clr8, + [GOTCH] = igb_mac_read_clr8, + [PRC64] = igb_mac_read_clr4, + [PRC255] = igb_mac_read_clr4, + [PRC1023] = igb_mac_read_clr4, + [PTC64] = igb_mac_read_clr4, + [PTC255] = igb_mac_read_clr4, + [PTC1023] = igb_mac_read_clr4, + [GPRC] = igb_mac_read_clr4, + [TPT] = igb_mac_read_clr4, + [RUC] = igb_mac_read_clr4, + [BPRC] = igb_mac_read_clr4, + [MPTC] = igb_mac_read_clr4, + [IAC] = igb_mac_read_clr4, + [ICR] = igb_mac_icr_read, + [STATUS] = igb_get_status, + [ICS] = igb_mac_ics_read, + /* + * 8.8.10: Reading the IMC register returns the value of the IMS register. + */ + [IMC] = igb_mac_ims_read, + [TORH] = igb_mac_read_clr8, + [GORCH] = igb_mac_read_clr8, + [PRC127] = igb_mac_read_clr4, + [PRC511] = igb_mac_read_clr4, + [PRC1522] = igb_mac_read_clr4, + [PTC127] = igb_mac_read_clr4, + [PTC511] = igb_mac_read_clr4, + [PTC1522] = igb_mac_read_clr4, + [GPTC] = igb_mac_read_clr4, + [TPR] = igb_mac_read_clr4, + [ROC] = igb_mac_read_clr4, + [MPRC] = igb_mac_read_clr4, + [BPTC] = igb_mac_read_clr4, + [TSCTC] = igb_mac_read_clr4, + [CTRL] = igb_get_ctrl, + [SWSM] = igb_mac_swsm_read, + [IMS] = igb_mac_ims_read, + [SYSTIML] = igb_get_systiml, + [RXSATRH] = igb_get_rxsatrh, + [TXSTMPH] = igb_get_txstmph, + + [CRCERRS ... MPC] = igb_mac_readreg, + [IP6AT ... IP6AT + 3] = igb_mac_readreg, + [IP4AT ... IP4AT + 6] = igb_mac_readreg, + [RA ... RA + 31] = igb_mac_readreg, + [RA2 ... RA2 + 31] = igb_mac_readreg, + [WUPM ... WUPM + 31] = igb_mac_readreg, + [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_readreg, + [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_readreg, + [FFMT ... FFMT + 254] = igb_mac_readreg, + [MDEF ... MDEF + 7] = igb_mac_readreg, + [FTFT ... FTFT + 254] = igb_mac_readreg, + [RETA ... RETA + 31] = igb_mac_readreg, + [RSSRK ... RSSRK + 9] = igb_mac_readreg, + [MAVTV0 ... MAVTV3] = igb_mac_readreg, + [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_mac_eitr_read, + [PVTEICR0] = igb_mac_read_clr4, + [PVTEICR1] = igb_mac_read_clr4, + [PVTEICR2] = igb_mac_read_clr4, + [PVTEICR3] = igb_mac_read_clr4, + [PVTEICR4] = igb_mac_read_clr4, + [PVTEICR5] = igb_mac_read_clr4, + [PVTEICR6] = igb_mac_read_clr4, + [PVTEICR7] = igb_mac_read_clr4, + + /* IGB specific: */ + [FWSM] = igb_mac_readreg, + [SW_FW_SYNC] = igb_mac_readreg, + [HTCBDPC] = igb_mac_read_clr4, + [EICR] = igb_mac_read_clr4, + [EIMS] = igb_mac_readreg, + [EIAM] = igb_mac_readreg, + [IVAR0 ... IVAR0 + 7] = igb_mac_readreg, + igb_getreg(IVAR_MISC), + igb_getreg(TSYNCRXCFG), + [ETQF0 ... ETQF0 + 7] = igb_mac_readreg, + igb_getreg(VT_CTL), + [P2VMAILBOX0 ... P2VMAILBOX7] = igb_mac_readreg, + [V2PMAILBOX0 ... V2PMAILBOX7] = igb_mac_vfmailbox_read, + igb_getreg(MBVFICR), + [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_readreg, + igb_getreg(MBVFIMR), + igb_getreg(VFLRE), + igb_getreg(VFRE), + igb_getreg(VFTE), + igb_getreg(QDE), + igb_getreg(DTXSWC), + igb_getreg(RPLOLR), + [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_readreg, + [VMVIR0 ... VMVIR7] = igb_mac_readreg, + [VMOLR0 ... VMOLR7] = igb_mac_readreg, + [WVBR] = igb_mac_read_clr4, + [RQDPC0] = igb_mac_read_clr4, + [RQDPC1] = igb_mac_read_clr4, + [RQDPC2] = igb_mac_read_clr4, + [RQDPC3] = igb_mac_read_clr4, + [RQDPC4] = igb_mac_read_clr4, + [RQDPC5] = igb_mac_read_clr4, + [RQDPC6] = igb_mac_read_clr4, + [RQDPC7] = igb_mac_read_clr4, + [RQDPC8] = igb_mac_read_clr4, + [RQDPC9] = igb_mac_read_clr4, + [RQDPC10] = igb_mac_read_clr4, + [RQDPC11] = igb_mac_read_clr4, + [RQDPC12] = igb_mac_read_clr4, + [RQDPC13] = igb_mac_read_clr4, + [RQDPC14] = igb_mac_read_clr4, + [RQDPC15] = igb_mac_read_clr4, + [VTIVAR ... VTIVAR + 7] = igb_mac_readreg, + [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_readreg, +}; +enum { IGB_NREADOPS = ARRAY_SIZE(igb_macreg_readops) }; + +#define igb_putreg(x) [x] = igb_mac_writereg +typedef void (*writeops)(IGBCore *, int, uint32_t); +static const writeops igb_macreg_writeops[] = { + igb_putreg(SWSM), + igb_putreg(WUFC), + igb_putreg(RDBAH0), + igb_putreg(RDBAH1), + igb_putreg(RDBAH2), + igb_putreg(RDBAH3), + igb_putreg(RDBAH4), + igb_putreg(RDBAH5), + igb_putreg(RDBAH6), + igb_putreg(RDBAH7), + igb_putreg(RDBAH8), + igb_putreg(RDBAH9), + igb_putreg(RDBAH10), + igb_putreg(RDBAH11), + igb_putreg(RDBAH12), + igb_putreg(RDBAH13), + igb_putreg(RDBAH14), + igb_putreg(RDBAH15), + igb_putreg(SRRCTL0), + igb_putreg(SRRCTL1), + igb_putreg(SRRCTL2), + igb_putreg(SRRCTL3), + igb_putreg(SRRCTL4), + igb_putreg(SRRCTL5), + igb_putreg(SRRCTL6), + igb_putreg(SRRCTL7), + igb_putreg(SRRCTL8), + igb_putreg(SRRCTL9), + igb_putreg(SRRCTL10), + igb_putreg(SRRCTL11), + igb_putreg(SRRCTL12), + igb_putreg(SRRCTL13), + igb_putreg(SRRCTL14), + igb_putreg(SRRCTL15), + igb_putreg(RXDCTL0), + igb_putreg(RXDCTL1), + igb_putreg(RXDCTL2), + igb_putreg(RXDCTL3), + igb_putreg(RXDCTL4), + igb_putreg(RXDCTL5), + igb_putreg(RXDCTL6), + igb_putreg(RXDCTL7), + igb_putreg(RXDCTL8), + igb_putreg(RXDCTL9), + igb_putreg(RXDCTL10), + igb_putreg(RXDCTL11), + igb_putreg(RXDCTL12), + igb_putreg(RXDCTL13), + igb_putreg(RXDCTL14), + igb_putreg(RXDCTL15), + igb_putreg(LEDCTL), + igb_putreg(TCTL), + igb_putreg(TCTL_EXT), + igb_putreg(DTXCTL), + igb_putreg(RXPBS), + igb_putreg(RQDPC0), + igb_putreg(FCAL), + igb_putreg(FCRUC), + igb_putreg(WUC), + igb_putreg(WUS), + igb_putreg(IPAV), + igb_putreg(TDBAH0), + igb_putreg(TDBAH1), + igb_putreg(TDBAH2), + igb_putreg(TDBAH3), + igb_putreg(TDBAH4), + igb_putreg(TDBAH5), + igb_putreg(TDBAH6), + igb_putreg(TDBAH7), + igb_putreg(TDBAH8), + igb_putreg(TDBAH9), + igb_putreg(TDBAH10), + igb_putreg(TDBAH11), + igb_putreg(TDBAH12), + igb_putreg(TDBAH13), + igb_putreg(TDBAH14), + igb_putreg(TDBAH15), + igb_putreg(IAM), + igb_putreg(MANC), + igb_putreg(MANC2H), + igb_putreg(MFVAL), + igb_putreg(FACTPS), + igb_putreg(FUNCTAG), + igb_putreg(GSCL_1), + igb_putreg(GSCL_2), + igb_putreg(GSCL_3), + igb_putreg(GSCL_4), + igb_putreg(GSCN_0), + igb_putreg(GSCN_1), + igb_putreg(GSCN_2), + igb_putreg(GSCN_3), + igb_putreg(MRQC), + igb_putreg(FLOP), + igb_putreg(FLA), + igb_putreg(TXDCTL0), + igb_putreg(TXDCTL1), + igb_putreg(TXDCTL2), + igb_putreg(TXDCTL3), + igb_putreg(TXDCTL4), + igb_putreg(TXDCTL5), + igb_putreg(TXDCTL6), + igb_putreg(TXDCTL7), + igb_putreg(TXDCTL8), + igb_putreg(TXDCTL9), + igb_putreg(TXDCTL10), + igb_putreg(TXDCTL11), + igb_putreg(TXDCTL12), + igb_putreg(TXDCTL13), + igb_putreg(TXDCTL14), + igb_putreg(TXDCTL15), + igb_putreg(TXCTL0), + igb_putreg(TXCTL1), + igb_putreg(TXCTL2), + igb_putreg(TXCTL3), + igb_putreg(TXCTL4), + igb_putreg(TXCTL5), + igb_putreg(TXCTL6), + igb_putreg(TXCTL7), + igb_putreg(TXCTL8), + igb_putreg(TXCTL9), + igb_putreg(TXCTL10), + igb_putreg(TXCTL11), + igb_putreg(TXCTL12), + igb_putreg(TXCTL13), + igb_putreg(TXCTL14), + igb_putreg(TXCTL15), + igb_putreg(TDWBAL0), + igb_putreg(TDWBAL1), + igb_putreg(TDWBAL2), + igb_putreg(TDWBAL3), + igb_putreg(TDWBAL4), + igb_putreg(TDWBAL5), + igb_putreg(TDWBAL6), + igb_putreg(TDWBAL7), + igb_putreg(TDWBAL8), + igb_putreg(TDWBAL9), + igb_putreg(TDWBAL10), + igb_putreg(TDWBAL11), + igb_putreg(TDWBAL12), + igb_putreg(TDWBAL13), + igb_putreg(TDWBAL14), + igb_putreg(TDWBAL15), + igb_putreg(TDWBAH0), + igb_putreg(TDWBAH1), + igb_putreg(TDWBAH2), + igb_putreg(TDWBAH3), + igb_putreg(TDWBAH4), + igb_putreg(TDWBAH5), + igb_putreg(TDWBAH6), + igb_putreg(TDWBAH7), + igb_putreg(TDWBAH8), + igb_putreg(TDWBAH9), + igb_putreg(TDWBAH10), + igb_putreg(TDWBAH11), + igb_putreg(TDWBAH12), + igb_putreg(TDWBAH13), + igb_putreg(TDWBAH14), + igb_putreg(TDWBAH15), + igb_putreg(TIPG), + igb_putreg(RXSTMPH), + igb_putreg(RXSTMPL), + igb_putreg(RXSATRL), + igb_putreg(RXSATRH), + igb_putreg(TXSTMPL), + igb_putreg(TXSTMPH), + igb_putreg(SYSTIML), + igb_putreg(SYSTIMH), + igb_putreg(TIMADJL), + igb_putreg(TSYNCRXCTL), + igb_putreg(TSYNCTXCTL), + igb_putreg(EEMNGCTL), + igb_putreg(GPIE), + igb_putreg(TXPBS), + igb_putreg(RLPML), + igb_putreg(VET), + + [TDH0] = igb_set_16bit, + [TDH1] = igb_set_16bit, + [TDH2] = igb_set_16bit, + [TDH3] = igb_set_16bit, + [TDH4] = igb_set_16bit, + [TDH5] = igb_set_16bit, + [TDH6] = igb_set_16bit, + [TDH7] = igb_set_16bit, + [TDH8] = igb_set_16bit, + [TDH9] = igb_set_16bit, + [TDH10] = igb_set_16bit, + [TDH11] = igb_set_16bit, + [TDH12] = igb_set_16bit, + [TDH13] = igb_set_16bit, + [TDH14] = igb_set_16bit, + [TDH15] = igb_set_16bit, + [TDT0] = igb_set_tdt, + [TDT1] = igb_set_tdt, + [TDT2] = igb_set_tdt, + [TDT3] = igb_set_tdt, + [TDT4] = igb_set_tdt, + [TDT5] = igb_set_tdt, + [TDT6] = igb_set_tdt, + [TDT7] = igb_set_tdt, + [TDT8] = igb_set_tdt, + [TDT9] = igb_set_tdt, + [TDT10] = igb_set_tdt, + [TDT11] = igb_set_tdt, + [TDT12] = igb_set_tdt, + [TDT13] = igb_set_tdt, + [TDT14] = igb_set_tdt, + [TDT15] = igb_set_tdt, + [MDIC] = igb_set_mdic, + [ICS] = igb_set_ics, + [RDH0] = igb_set_16bit, + [RDH1] = igb_set_16bit, + [RDH2] = igb_set_16bit, + [RDH3] = igb_set_16bit, + [RDH4] = igb_set_16bit, + [RDH5] = igb_set_16bit, + [RDH6] = igb_set_16bit, + [RDH7] = igb_set_16bit, + [RDH8] = igb_set_16bit, + [RDH9] = igb_set_16bit, + [RDH10] = igb_set_16bit, + [RDH11] = igb_set_16bit, + [RDH12] = igb_set_16bit, + [RDH13] = igb_set_16bit, + [RDH14] = igb_set_16bit, + [RDH15] = igb_set_16bit, + [RDT0] = igb_set_rdt, + [RDT1] = igb_set_rdt, + [RDT2] = igb_set_rdt, + [RDT3] = igb_set_rdt, + [RDT4] = igb_set_rdt, + [RDT5] = igb_set_rdt, + [RDT6] = igb_set_rdt, + [RDT7] = igb_set_rdt, + [RDT8] = igb_set_rdt, + [RDT9] = igb_set_rdt, + [RDT10] = igb_set_rdt, + [RDT11] = igb_set_rdt, + [RDT12] = igb_set_rdt, + [RDT13] = igb_set_rdt, + [RDT14] = igb_set_rdt, + [RDT15] = igb_set_rdt, + [IMC] = igb_set_imc, + [IMS] = igb_set_ims, + [ICR] = igb_set_icr, + [EECD] = igb_set_eecd, + [RCTL] = igb_set_rx_control, + [CTRL] = igb_set_ctrl, + [EERD] = igb_set_eerd, + [TDFH] = igb_set_13bit, + [TDFT] = igb_set_13bit, + [TDFHS] = igb_set_13bit, + [TDFTS] = igb_set_13bit, + [TDFPC] = igb_set_13bit, + [RDFH] = igb_set_13bit, + [RDFT] = igb_set_13bit, + [RDFHS] = igb_set_13bit, + [RDFTS] = igb_set_13bit, + [RDFPC] = igb_set_13bit, + [GCR] = igb_set_gcr, + [RXCSUM] = igb_set_rxcsum, + [TDLEN0] = igb_set_dlen, + [TDLEN1] = igb_set_dlen, + [TDLEN2] = igb_set_dlen, + [TDLEN3] = igb_set_dlen, + [TDLEN4] = igb_set_dlen, + [TDLEN5] = igb_set_dlen, + [TDLEN6] = igb_set_dlen, + [TDLEN7] = igb_set_dlen, + [TDLEN8] = igb_set_dlen, + [TDLEN9] = igb_set_dlen, + [TDLEN10] = igb_set_dlen, + [TDLEN11] = igb_set_dlen, + [TDLEN12] = igb_set_dlen, + [TDLEN13] = igb_set_dlen, + [TDLEN14] = igb_set_dlen, + [TDLEN15] = igb_set_dlen, + [RDLEN0] = igb_set_dlen, + [RDLEN1] = igb_set_dlen, + [RDLEN2] = igb_set_dlen, + [RDLEN3] = igb_set_dlen, + [RDLEN4] = igb_set_dlen, + [RDLEN5] = igb_set_dlen, + [RDLEN6] = igb_set_dlen, + [RDLEN7] = igb_set_dlen, + [RDLEN8] = igb_set_dlen, + [RDLEN9] = igb_set_dlen, + [RDLEN10] = igb_set_dlen, + [RDLEN11] = igb_set_dlen, + [RDLEN12] = igb_set_dlen, + [RDLEN13] = igb_set_dlen, + [RDLEN14] = igb_set_dlen, + [RDLEN15] = igb_set_dlen, + [TDBAL0] = igb_set_dbal, + [TDBAL1] = igb_set_dbal, + [TDBAL2] = igb_set_dbal, + [TDBAL3] = igb_set_dbal, + [TDBAL4] = igb_set_dbal, + [TDBAL5] = igb_set_dbal, + [TDBAL6] = igb_set_dbal, + [TDBAL7] = igb_set_dbal, + [TDBAL8] = igb_set_dbal, + [TDBAL9] = igb_set_dbal, + [TDBAL10] = igb_set_dbal, + [TDBAL11] = igb_set_dbal, + [TDBAL12] = igb_set_dbal, + [TDBAL13] = igb_set_dbal, + [TDBAL14] = igb_set_dbal, + [TDBAL15] = igb_set_dbal, + [RDBAL0] = igb_set_dbal, + [RDBAL1] = igb_set_dbal, + [RDBAL2] = igb_set_dbal, + [RDBAL3] = igb_set_dbal, + [RDBAL4] = igb_set_dbal, + [RDBAL5] = igb_set_dbal, + [RDBAL6] = igb_set_dbal, + [RDBAL7] = igb_set_dbal, + [RDBAL8] = igb_set_dbal, + [RDBAL9] = igb_set_dbal, + [RDBAL10] = igb_set_dbal, + [RDBAL11] = igb_set_dbal, + [RDBAL12] = igb_set_dbal, + [RDBAL13] = igb_set_dbal, + [RDBAL14] = igb_set_dbal, + [RDBAL15] = igb_set_dbal, + [STATUS] = igb_set_status, + [PBACLR] = igb_set_pbaclr, + [CTRL_EXT] = igb_set_ctrlext, + [FCAH] = igb_set_16bit, + [FCT] = igb_set_16bit, + [FCTTV] = igb_set_16bit, + [FCRTV] = igb_set_16bit, + [FCRTH] = igb_set_fcrth, + [FCRTL] = igb_set_fcrtl, + [CTRL_DUP] = igb_set_ctrl, + [RFCTL] = igb_set_rfctl, + [TIMINCA] = igb_set_timinca, + [TIMADJH] = igb_set_timadjh, + + [IP6AT ... IP6AT + 3] = igb_mac_writereg, + [IP4AT ... IP4AT + 6] = igb_mac_writereg, + [RA] = igb_mac_writereg, + [RA + 1] = igb_mac_setmacaddr, + [RA + 2 ... RA + 31] = igb_mac_writereg, + [RA2 ... RA2 + 31] = igb_mac_writereg, + [WUPM ... WUPM + 31] = igb_mac_writereg, + [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg, + [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_writereg, + [FFMT ... FFMT + 254] = igb_set_4bit, + [MDEF ... MDEF + 7] = igb_mac_writereg, + [FTFT ... FTFT + 254] = igb_mac_writereg, + [RETA ... RETA + 31] = igb_mac_writereg, + [RSSRK ... RSSRK + 9] = igb_mac_writereg, + [MAVTV0 ... MAVTV3] = igb_mac_writereg, + [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_set_eitr, + + /* IGB specific: */ + [FWSM] = igb_mac_writereg, + [SW_FW_SYNC] = igb_mac_writereg, + [EICR] = igb_set_eicr, + [EICS] = igb_set_eics, + [EIAC] = igb_set_eiac, + [EIAM] = igb_set_eiam, + [EIMC] = igb_set_eimc, + [EIMS] = igb_set_eims, + [IVAR0 ... IVAR0 + 7] = igb_mac_writereg, + igb_putreg(IVAR_MISC), + igb_putreg(TSYNCRXCFG), + [ETQF0 ... ETQF0 + 7] = igb_mac_writereg, + igb_putreg(VT_CTL), + [P2VMAILBOX0 ... P2VMAILBOX7] = igb_set_pfmailbox, + [V2PMAILBOX0 ... V2PMAILBOX7] = igb_set_vfmailbox, + [MBVFICR] = igb_w1c, + [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_writereg, + igb_putreg(MBVFIMR), + [VFLRE] = igb_w1c, + igb_putreg(VFRE), + igb_putreg(VFTE), + igb_putreg(QDE), + igb_putreg(DTXSWC), + igb_putreg(RPLOLR), + [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_writereg, + [VMVIR0 ... VMVIR7] = igb_mac_writereg, + [VMOLR0 ... VMOLR7] = igb_mac_writereg, + [UTA ... UTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg, + [PVTCTRL0] = igb_set_vtctrl, + [PVTCTRL1] = igb_set_vtctrl, + [PVTCTRL2] = igb_set_vtctrl, + [PVTCTRL3] = igb_set_vtctrl, + [PVTCTRL4] = igb_set_vtctrl, + [PVTCTRL5] = igb_set_vtctrl, + [PVTCTRL6] = igb_set_vtctrl, + [PVTCTRL7] = igb_set_vtctrl, + [PVTEICS0] = igb_set_vteics, + [PVTEICS1] = igb_set_vteics, + [PVTEICS2] = igb_set_vteics, + [PVTEICS3] = igb_set_vteics, + [PVTEICS4] = igb_set_vteics, + [PVTEICS5] = igb_set_vteics, + [PVTEICS6] = igb_set_vteics, + [PVTEICS7] = igb_set_vteics, + [PVTEIMS0] = igb_set_vteims, + [PVTEIMS1] = igb_set_vteims, + [PVTEIMS2] = igb_set_vteims, + [PVTEIMS3] = igb_set_vteims, + [PVTEIMS4] = igb_set_vteims, + [PVTEIMS5] = igb_set_vteims, + [PVTEIMS6] = igb_set_vteims, + [PVTEIMS7] = igb_set_vteims, + [PVTEIMC0] = igb_set_vteimc, + [PVTEIMC1] = igb_set_vteimc, + [PVTEIMC2] = igb_set_vteimc, + [PVTEIMC3] = igb_set_vteimc, + [PVTEIMC4] = igb_set_vteimc, + [PVTEIMC5] = igb_set_vteimc, + [PVTEIMC6] = igb_set_vteimc, + [PVTEIMC7] = igb_set_vteimc, + [PVTEIAC0] = igb_set_vteiac, + [PVTEIAC1] = igb_set_vteiac, + [PVTEIAC2] = igb_set_vteiac, + [PVTEIAC3] = igb_set_vteiac, + [PVTEIAC4] = igb_set_vteiac, + [PVTEIAC5] = igb_set_vteiac, + [PVTEIAC6] = igb_set_vteiac, + [PVTEIAC7] = igb_set_vteiac, + [PVTEIAM0] = igb_set_vteiam, + [PVTEIAM1] = igb_set_vteiam, + [PVTEIAM2] = igb_set_vteiam, + [PVTEIAM3] = igb_set_vteiam, + [PVTEIAM4] = igb_set_vteiam, + [PVTEIAM5] = igb_set_vteiam, + [PVTEIAM6] = igb_set_vteiam, + [PVTEIAM7] = igb_set_vteiam, + [PVTEICR0] = igb_set_vteicr, + [PVTEICR1] = igb_set_vteicr, + [PVTEICR2] = igb_set_vteicr, + [PVTEICR3] = igb_set_vteicr, + [PVTEICR4] = igb_set_vteicr, + [PVTEICR5] = igb_set_vteicr, + [PVTEICR6] = igb_set_vteicr, + [PVTEICR7] = igb_set_vteicr, + [VTIVAR ... VTIVAR + 7] = igb_set_vtivar, + [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_writereg +}; +enum { IGB_NWRITEOPS = ARRAY_SIZE(igb_macreg_writeops) }; + +enum { MAC_ACCESS_PARTIAL = 1 }; + +/* + * The array below combines alias offsets of the index values for the + * MAC registers that have aliases, with the indication of not fully + * implemented registers (lowest bit). This combination is possible + * because all of the offsets are even. + */ +static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = { + /* Alias index offsets */ + [FCRTL_A] = 0x07fe, + [RDFH_A] = 0xe904, [RDFT_A] = 0xe904, + [TDFH_A] = 0xed00, [TDFT_A] = 0xed00, + [RA_A ... RA_A + 31] = 0x14f0, + [VFTA_A ... VFTA_A + E1000_VLAN_FILTER_TBL_SIZE - 1] = 0x1400, + + [RDBAL0_A] = 0x2600, + [RDBAH0_A] = 0x2600, + [RDLEN0_A] = 0x2600, + [SRRCTL0_A] = 0x2600, + [RDH0_A] = 0x2600, + [RDT0_A] = 0x2600, + [RXDCTL0_A] = 0x2600, + [RXCTL0_A] = 0x2600, + [RQDPC0_A] = 0x2600, + [RDBAL1_A] = 0x25D0, + [RDBAL2_A] = 0x25A0, + [RDBAL3_A] = 0x2570, + [RDBAH1_A] = 0x25D0, + [RDBAH2_A] = 0x25A0, + [RDBAH3_A] = 0x2570, + [RDLEN1_A] = 0x25D0, + [RDLEN2_A] = 0x25A0, + [RDLEN3_A] = 0x2570, + [SRRCTL1_A] = 0x25D0, + [SRRCTL2_A] = 0x25A0, + [SRRCTL3_A] = 0x2570, + [RDH1_A] = 0x25D0, + [RDH2_A] = 0x25A0, + [RDH3_A] = 0x2570, + [RDT1_A] = 0x25D0, + [RDT2_A] = 0x25A0, + [RDT3_A] = 0x2570, + [RXDCTL1_A] = 0x25D0, + [RXDCTL2_A] = 0x25A0, + [RXDCTL3_A] = 0x2570, + [RXCTL1_A] = 0x25D0, + [RXCTL2_A] = 0x25A0, + [RXCTL3_A] = 0x2570, + [RQDPC1_A] = 0x25D0, + [RQDPC2_A] = 0x25A0, + [RQDPC3_A] = 0x2570, + [TDBAL0_A] = 0x2A00, + [TDBAH0_A] = 0x2A00, + [TDLEN0_A] = 0x2A00, + [TDH0_A] = 0x2A00, + [TDT0_A] = 0x2A00, + [TXCTL0_A] = 0x2A00, + [TDWBAL0_A] = 0x2A00, + [TDWBAH0_A] = 0x2A00, + [TDBAL1_A] = 0x29D0, + [TDBAL2_A] = 0x29A0, + [TDBAL3_A] = 0x2970, + [TDBAH1_A] = 0x29D0, + [TDBAH2_A] = 0x29A0, + [TDBAH3_A] = 0x2970, + [TDLEN1_A] = 0x29D0, + [TDLEN2_A] = 0x29A0, + [TDLEN3_A] = 0x2970, + [TDH1_A] = 0x29D0, + [TDH2_A] = 0x29A0, + [TDH3_A] = 0x2970, + [TDT1_A] = 0x29D0, + [TDT2_A] = 0x29A0, + [TDT3_A] = 0x2970, + [TXDCTL0_A] = 0x2A00, + [TXDCTL1_A] = 0x29D0, + [TXDCTL2_A] = 0x29A0, + [TXDCTL3_A] = 0x2970, + [TXCTL1_A] = 0x29D0, + [TXCTL2_A] = 0x29A0, + [TXCTL3_A] = 0x29D0, + [TDWBAL1_A] = 0x29D0, + [TDWBAL2_A] = 0x29A0, + [TDWBAL3_A] = 0x2970, + [TDWBAH1_A] = 0x29D0, + [TDWBAH2_A] = 0x29A0, + [TDWBAH3_A] = 0x2970, + + /* Access options */ + [RDFH] = MAC_ACCESS_PARTIAL, [RDFT] = MAC_ACCESS_PARTIAL, + [RDFHS] = MAC_ACCESS_PARTIAL, [RDFTS] = MAC_ACCESS_PARTIAL, + [RDFPC] = MAC_ACCESS_PARTIAL, + [TDFH] = MAC_ACCESS_PARTIAL, [TDFT] = MAC_ACCESS_PARTIAL, + [TDFHS] = MAC_ACCESS_PARTIAL, [TDFTS] = MAC_ACCESS_PARTIAL, + [TDFPC] = MAC_ACCESS_PARTIAL, [EECD] = MAC_ACCESS_PARTIAL, + [FLA] = MAC_ACCESS_PARTIAL, + [FCAL] = MAC_ACCESS_PARTIAL, [FCAH] = MAC_ACCESS_PARTIAL, + [FCT] = MAC_ACCESS_PARTIAL, [FCTTV] = MAC_ACCESS_PARTIAL, + [FCRTV] = MAC_ACCESS_PARTIAL, [FCRTL] = MAC_ACCESS_PARTIAL, + [FCRTH] = MAC_ACCESS_PARTIAL, + [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL +}; + +void +igb_core_write(IGBCore *core, hwaddr addr, uint64_t val, unsigned size) +{ + uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr); + + if (index < IGB_NWRITEOPS && igb_macreg_writeops[index]) { + if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { + trace_e1000e_wrn_regs_write_trivial(index << 2); + } + trace_e1000e_core_write(index << 2, size, val); + igb_macreg_writeops[index](core, index, val); + } else if (index < IGB_NREADOPS && igb_macreg_readops[index]) { + trace_e1000e_wrn_regs_write_ro(index << 2, size, val); + } else { + trace_e1000e_wrn_regs_write_unknown(index << 2, size, val); + } +} + +uint64_t +igb_core_read(IGBCore *core, hwaddr addr, unsigned size) +{ + uint64_t val; + uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr); + + if (index < IGB_NREADOPS && igb_macreg_readops[index]) { + if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { + trace_e1000e_wrn_regs_read_trivial(index << 2); + } + val = igb_macreg_readops[index](core, index); + trace_e1000e_core_read(index << 2, size, val); + return val; + } else { + trace_e1000e_wrn_regs_read_unknown(index << 2, size); + } + return 0; +} + +static inline void +igb_autoneg_pause(IGBCore *core) +{ + timer_del(core->autoneg_timer); +} + +static void +igb_autoneg_resume(IGBCore *core) +{ + if (igb_have_autoneg(core) && + !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) { + qemu_get_queue(core->owner_nic)->link_down = false; + timer_mod(core->autoneg_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500); + } +} + +static void +igb_vm_state_change(void *opaque, bool running, RunState state) +{ + IGBCore *core = opaque; + + if (running) { + trace_e1000e_vm_state_running(); + igb_intrmgr_resume(core); + igb_autoneg_resume(core); + } else { + trace_e1000e_vm_state_stopped(); + igb_autoneg_pause(core); + igb_intrmgr_pause(core); + } +} + +void +igb_core_pci_realize(IGBCore *core, + const uint16_t *eeprom_templ, + uint32_t eeprom_size, + const uint8_t *macaddr) +{ + int i; + + core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, + igb_autoneg_timer, core); + igb_intrmgr_pci_realize(core); + + core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core); + + for (i = 0; i < IGB_NUM_QUEUES; i++) { + net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS); + } + + net_rx_pkt_init(&core->rx_pkt); + + e1000x_core_prepare_eeprom(core->eeprom, + eeprom_templ, + eeprom_size, + PCI_DEVICE_GET_CLASS(core->owner)->device_id, + macaddr); + igb_update_rx_offloads(core); +} + +void +igb_core_pci_uninit(IGBCore *core) +{ + int i; + + timer_free(core->autoneg_timer); + + igb_intrmgr_pci_unint(core); + + qemu_del_vm_change_state_handler(core->vmstate); + + for (i = 0; i < IGB_NUM_QUEUES; i++) { + net_tx_pkt_uninit(core->tx[i].tx_pkt); + } + + net_rx_pkt_uninit(core->rx_pkt); +} + +static const uint16_t +igb_phy_reg_init[] = { + [MII_BMCR] = MII_BMCR_SPEED1000 | + MII_BMCR_FD | + MII_BMCR_AUTOEN, + + [MII_BMSR] = MII_BMSR_EXTCAP | + MII_BMSR_LINK_ST | + MII_BMSR_AUTONEG | + MII_BMSR_MFPS | + MII_BMSR_EXTSTAT | + MII_BMSR_10T_HD | + MII_BMSR_10T_FD | + MII_BMSR_100TX_HD | + MII_BMSR_100TX_FD, + + [MII_PHYID1] = IGP03E1000_E_PHY_ID >> 16, + [MII_PHYID2] = (IGP03E1000_E_PHY_ID & 0xfff0) | 1, + [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 | + MII_ANAR_10FD | MII_ANAR_TX | + MII_ANAR_TXFD | MII_ANAR_PAUSE | + MII_ANAR_PAUSE_ASYM, + [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD | + MII_ANLPAR_TX | MII_ANLPAR_TXFD | + MII_ANLPAR_T4 | MII_ANLPAR_PAUSE, + [MII_ANER] = MII_ANER_NP | MII_ANER_NWAY, + [MII_ANNP] = 0x1 | MII_ANNP_MP, + [MII_CTRL1000] = MII_CTRL1000_HALF | MII_CTRL1000_FULL | + MII_CTRL1000_PORT | MII_CTRL1000_MASTER, + [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL | + MII_STAT1000_ROK | MII_STAT1000_LOK, + [MII_EXTSTAT] = MII_EXTSTAT_1000T_HD | MII_EXTSTAT_1000T_FD, + + [IGP01E1000_PHY_PORT_CONFIG] = BIT(5) | BIT(8), + [IGP01E1000_PHY_PORT_STATUS] = IGP01E1000_PSSR_SPEED_1000MBPS, + [IGP02E1000_PHY_POWER_MGMT] = BIT(0) | BIT(3) | IGP02E1000_PM_D3_LPLU | + IGP01E1000_PSCFR_SMART_SPEED +}; + +static const uint32_t igb_mac_reg_init[] = { + [LEDCTL] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24), + [EEMNGCTL] = BIT(31), + [TXDCTL0] = E1000_TXDCTL_QUEUE_ENABLE, + [RXDCTL0] = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16), + [RXDCTL1] = 1 << 16, + [RXDCTL2] = 1 << 16, + [RXDCTL3] = 1 << 16, + [RXDCTL4] = 1 << 16, + [RXDCTL5] = 1 << 16, + [RXDCTL6] = 1 << 16, + [RXDCTL7] = 1 << 16, + [RXDCTL8] = 1 << 16, + [RXDCTL9] = 1 << 16, + [RXDCTL10] = 1 << 16, + [RXDCTL11] = 1 << 16, + [RXDCTL12] = 1 << 16, + [RXDCTL13] = 1 << 16, + [RXDCTL14] = 1 << 16, + [RXDCTL15] = 1 << 16, + [TIPG] = 0x08 | (0x04 << 10) | (0x06 << 20), + [CTRL] = E1000_CTRL_FD | E1000_CTRL_LRST | E1000_CTRL_SPD_1000 | + E1000_CTRL_ADVD3WUC, + [STATUS] = E1000_STATUS_PHYRA | BIT(31), + [EECD] = E1000_EECD_FWE_DIS | E1000_EECD_PRES | + (2 << E1000_EECD_SIZE_EX_SHIFT), + [GCR] = E1000_L0S_ADJUST | + E1000_GCR_CMPL_TMOUT_RESEND | + E1000_GCR_CAP_VER2 | + E1000_L1_ENTRY_LATENCY_MSB | + E1000_L1_ENTRY_LATENCY_LSB, + [RXCSUM] = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD, + [TXPBS] = 0x28, + [RXPBS] = 0x40, + [TCTL] = E1000_TCTL_PSP | (0xF << E1000_CT_SHIFT) | + (0x40 << E1000_COLD_SHIFT) | (0x1 << 26) | (0xA << 28), + [TCTL_EXT] = 0x40 | (0x42 << 10), + [DTXCTL] = E1000_DTXCTL_8023LL | E1000_DTXCTL_SPOOF_INT, + [VET] = ETH_P_VLAN | (ETH_P_VLAN << 16), + + [V2PMAILBOX0 ... V2PMAILBOX0 + IGB_MAX_VF_FUNCTIONS - 1] = E1000_V2PMAILBOX_RSTI, + [MBVFIMR] = 0xFF, + [VFRE] = 0xFF, + [VFTE] = 0xFF, + [VMOLR0 ... VMOLR0 + 7] = 0x2600 | E1000_VMOLR_STRCRC, + [RPLOLR] = E1000_RPLOLR_STRCRC, + [RLPML] = 0x2600, + [TXCTL0] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL1] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL2] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL3] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL4] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL5] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL6] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL7] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL8] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL9] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL10] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL11] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL12] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL13] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL14] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL15] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, +}; + +static void igb_reset(IGBCore *core, bool sw) +{ + struct igb_tx *tx; + int i; + + timer_del(core->autoneg_timer); + + igb_intrmgr_reset(core); + + memset(core->phy, 0, sizeof core->phy); + memcpy(core->phy, igb_phy_reg_init, sizeof igb_phy_reg_init); + + for (i = 0; i < E1000E_MAC_SIZE; i++) { + if (sw && + (i == RXPBS || i == TXPBS || + (i >= EITR0 && i < EITR0 + IGB_INTR_NUM))) { + continue; + } + + core->mac[i] = i < ARRAY_SIZE(igb_mac_reg_init) ? + igb_mac_reg_init[i] : 0; + } + + if (qemu_get_queue(core->owner_nic)->link_down) { + igb_link_down(core); + } + + e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); + + for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) { + /* Set RSTI, so VF can identify a PF reset is in progress */ + core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTI; + } + + for (i = 0; i < ARRAY_SIZE(core->tx); i++) { + tx = &core->tx[i]; + memset(tx->ctx, 0, sizeof(tx->ctx)); + tx->first = true; + tx->skip_cp = false; + } +} + +void +igb_core_reset(IGBCore *core) +{ + igb_reset(core, false); +} + +void igb_core_pre_save(IGBCore *core) +{ + int i; + NetClientState *nc = qemu_get_queue(core->owner_nic); + + /* + * If link is down and auto-negotiation is supported and ongoing, + * complete auto-negotiation immediately. This allows us to look + * at MII_BMSR_AN_COMP to infer link status on load. + */ + if (nc->link_down && igb_have_autoneg(core)) { + core->phy[MII_BMSR] |= MII_BMSR_AN_COMP; + igb_update_flowctl_status(core); + } + + for (i = 0; i < ARRAY_SIZE(core->tx); i++) { + if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) { + core->tx[i].skip_cp = true; + } + } +} + +int +igb_core_post_load(IGBCore *core) +{ + NetClientState *nc = qemu_get_queue(core->owner_nic); + + /* + * nc.link_down can't be migrated, so infer link_down according + * to link status bit in core.mac[STATUS]. + */ + nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0; + + return 0; +} diff --git a/hw/net/igb_core.h b/hw/net/igb_core.h new file mode 100644 index 0000000000..9cbbfd516b --- /dev/null +++ b/hw/net/igb_core.h @@ -0,0 +1,145 @@ +/* + * Core code for QEMU igb emulation + * + * Datasheet: + * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf + * + * Copyright (c) 2020-2023 Red Hat, Inc. + * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Akihiko Odaki + * Gal Hammmer + * Marcel Apfelbaum + * Dmitry Fleytman + * Leonid Bloch + * Yan Vugenfirer + * + * Based on work done by: + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2008 Qumranet + * Based on work done by: + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef HW_NET_IGB_CORE_H +#define HW_NET_IGB_CORE_H + +#define E1000E_MAC_SIZE (0x8000) +#define IGB_EEPROM_SIZE (1024) + +#define IGB_INTR_NUM (25) +#define IGB_MSIX_VEC_NUM (10) +#define IGBVF_MSIX_VEC_NUM (3) +#define IGB_NUM_QUEUES (16) +#define IGB_NUM_VM_POOLS (8) + +typedef struct IGBCore IGBCore; + +enum { PHY_R = BIT(0), + PHY_W = BIT(1), + PHY_RW = PHY_R | PHY_W }; + +typedef struct IGBIntrDelayTimer_st { + QEMUTimer *timer; + bool running; + uint32_t delay_reg; + uint32_t delay_resolution_ns; + IGBCore *core; +} IGBIntrDelayTimer; + +struct IGBCore { + uint32_t mac[E1000E_MAC_SIZE]; + uint16_t phy[MAX_PHY_REG_ADDRESS + 1]; + uint16_t eeprom[IGB_EEPROM_SIZE]; + + uint8_t rx_desc_len; + + QEMUTimer *autoneg_timer; + + struct igb_tx { + struct e1000_adv_tx_context_desc ctx[2]; + uint32_t first_cmd_type_len; + uint32_t first_olinfo_status; + + bool first; + bool skip_cp; + + struct NetTxPkt *tx_pkt; + } tx[IGB_NUM_QUEUES]; + + struct NetRxPkt *rx_pkt; + + bool has_vnet; + int max_queue_num; + + IGBIntrDelayTimer eitr[IGB_INTR_NUM]; + + VMChangeStateEntry *vmstate; + + uint32_t eitr_guest_value[IGB_INTR_NUM]; + + uint8_t permanent_mac[ETH_ALEN]; + + NICState *owner_nic; + PCIDevice *owner; + void (*owner_start_recv)(PCIDevice *d); + + int64_t timadj; +}; + +void +igb_core_write(IGBCore *core, hwaddr addr, uint64_t val, unsigned size); + +uint64_t +igb_core_read(IGBCore *core, hwaddr addr, unsigned size); + +void +igb_core_pci_realize(IGBCore *regs, + const uint16_t *eeprom_templ, + uint32_t eeprom_size, + const uint8_t *macaddr); + +void +igb_core_reset(IGBCore *core); + +void +igb_core_pre_save(IGBCore *core); + +int +igb_core_post_load(IGBCore *core); + +void +igb_core_set_link_status(IGBCore *core); + +void +igb_core_pci_uninit(IGBCore *core); + +bool +igb_can_receive(IGBCore *core); + +ssize_t +igb_receive(IGBCore *core, const uint8_t *buf, size_t size); + +ssize_t +igb_receive_iov(IGBCore *core, const struct iovec *iov, int iovcnt); + +void +igb_start_recv(IGBCore *core); + +#endif diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h new file mode 100644 index 0000000000..82ff195dfc --- /dev/null +++ b/hw/net/igb_regs.h @@ -0,0 +1,711 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is copied + edited from kernel header files in + * drivers/net/ethernet/intel/igb + */ + +#ifndef HW_IGB_REGS_H_ +#define HW_IGB_REGS_H_ + +#include "e1000x_regs.h" + +/* from igb/e1000_hw.h */ + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D + +/* Context Descriptor */ +struct e1000_adv_tx_context_desc { + uint32_t vlan_macip_lens; + uint32_t seqnum_seed; + uint32_t type_tucmd_mlhl; + uint32_t mss_l4len_idx; +}; + +/* Advanced Transmit Descriptor */ +union e1000_adv_tx_desc { + struct { + uint64_t buffer_addr; /* Address of descriptor's data buffer */ + uint32_t cmd_type_len; + uint32_t olinfo_status; + } read; + struct { + uint64_t rsvd; /* Reserved */ + uint32_t nxtseq_seed; + uint32_t status; + } wb; +}; + +#define E1000_ADVTXD_POTS_IXSM 0x00000100 /* Insert TCP/UDP Checksum */ +#define E1000_ADVTXD_POTS_TXSM 0x00000200 /* Insert TCP/UDP Checksum */ + +#define E1000_TXD_POPTS_IXSM 0x00000001 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x00000002 /* Insert TCP/UDP checksum */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + uint64_t pkt_addr; /* Packet Buffer Address */ + uint64_t hdr_addr; /* Header Buffer Address */ + } read; + struct { + struct { + struct { + uint16_t pkt_info; /* RSS Type, Packet Type */ + uint16_t hdr_info; /* Split Head, Buffer Length */ + } lo_dword; + union { + uint32_t rss; /* RSS Hash */ + struct { + uint16_t ip_id; /* IP Id */ + uint16_t csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + uint32_t status_error; /* Ext Status/Error */ + uint16_t length; /* Packet Length */ + uint16_t vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* from igb/e1000_phy.h */ + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +/* Enable flexible speed on link-up */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +/* from igb/igb.h */ + +#define E1000_PCS_CFG_IGN_SD 1 + +/* Interrupt defines */ +#define IGB_START_ITR 648 /* ~6000 ints/sec */ +#define IGB_4K_ITR 980 +#define IGB_20K_ITR 196 +#define IGB_70K_ITR 56 + +/* TX/RX descriptor defines */ +#define IGB_DEFAULT_TXD 256 +#define IGB_DEFAULT_TX_WORK 128 +#define IGB_MIN_TXD 80 +#define IGB_MAX_TXD 4096 + +#define IGB_DEFAULT_RXD 256 +#define IGB_MIN_RXD 80 +#define IGB_MAX_RXD 4096 + +#define IGB_DEFAULT_ITR 3 /* dynamic */ +#define IGB_MAX_ITR_USECS 10000 +#define IGB_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 8 +#define MAX_MSIX_ENTRIES 10 + +/* Transmit and receive queues */ +#define IGB_MAX_RX_QUEUES 8 +#define IGB_MAX_RX_QUEUES_82575 4 +#define IGB_MAX_RX_QUEUES_I211 2 +#define IGB_MAX_TX_QUEUES 8 +#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_FUNCTIONS 8 +#define IGB_MAX_VFTA_ENTRIES 128 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 + +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +/* from igb/e1000_82575.h */ + +#define E1000_MRQC_ENABLE_RSS_MQ 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_ENABLE_VMDQ_RSS_MQ 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +/* Adv ctxt IPSec ESP len mask */ + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ + +/* Additional DCA related definitions, note change in position of CPUID */ +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE BIT(26) +#define E1000_ETQF_1588 BIT(30) +#define E1000_ETQF_IMM_INT BIT(29) +#define E1000_ETQF_QUEUE_ENABLE BIT(31) +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_ETQF_QUEUE_MASK 0x00070000 +#define E1000_ETQF_ETYPE_MASK 0x0000FFFF + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC BIT(28) +#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29) +#define E1000_VT_CTL_VM_REPL_EN BIT(30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +/* from igb/e1000_defines.h */ + +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 + +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +#define E1000_RAH_POOL_MASK 0x03FC0000 +#define E1000_RAH_POOL_1 0x00040000 + +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +/* LAN connected device generates an interrupt */ +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + +/* PCI Express Control */ +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +#define IGP03E1000_E_PHY_ID 0x02A80390 + +/* from igb/e1000_mbox.h */ + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_V2PMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* + * If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +/* VF requests to clear all unicast MAC filters */ +#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT) +/* VF requests to add unicast MAC filter */ +#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +/* from igb/e1000_regs.h */ + +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable; RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation Register - RW */ +#define E1000_IVAR_MISC 0x01740 /* Interrupt Vector Allocation Register (last) - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ + +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +#define E1000_SAQF0 E1000_SAQF(0) +#define E1000_DAQF0 E1000_DAQF(0) +#define E1000_SPQF0 E1000_SPQF(0) +#define E1000_FTQF0 E1000_FTQF(0) +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) + +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ + +#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ +#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ + +/* VT Registers */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +/* These act per VF so an array friendly macro is used */ +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */ +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) + +/* from igbvf/defines.h */ + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +/* from igbvf/mbox.h */ + +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* + * If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 + +/* We have a total wait time of 1s for vf mailbox posted messages */ +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */ +#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ + +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +/* VF requests PF to clear all unicast MAC filters */ +#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT) +/* VF requests PF to add unicast MAC filter */ +#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +/* from igbvf/regs.h */ + +/* Statistics registers */ +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 + +/* These act per VF so an array friendly macro is used */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) + +/* from igbvf/vf.h */ + +#define E1000_DEV_ID_82576_VF 0x10CA + +/* new */ + +/* Receive Registers */ + +/* RX Descriptor Base Low; RW */ +#define E1000_RDBAL(_n) (0x0C000 + (0x40 * (_n))) +#define E1000_RDBAL_A(_n) (0x02800 + (0x100 * (_n))) + +/* RX Descriptor Base High; RW */ +#define E1000_RDBAH(_n) (0x0C004 + (0x40 * (_n))) +#define E1000_RDBAH_A(_n) (0x02804 + (0x100 * (_n))) + +/* RX Descriptor Ring Length; RW */ +#define E1000_RDLEN(_n) (0x0C008 + (0x40 * (_n))) +#define E1000_RDLEN_A(_n) (0x02808 + (0x100 * (_n))) + +/* Split and Replication Receive Control; RW */ +#define E1000_SRRCTL(_n) (0x0C00C + (0x40 * (_n))) +#define E1000_SRRCTL_A(_n) (0x0280C + (0x100 * (_n))) + +/* RX Descriptor Head; RW */ +#define E1000_RDH(_n) (0x0C010 + (0x40 * (_n))) +#define E1000_RDH_A(_n) (0x02810 + (0x100 * (_n))) + +/* RX DCA Control; RW */ +#define E1000_RXCTL(_n) (0x0C014 + (0x40 * (_n))) +#define E1000_RXCTL_A(_n) (0x02814 + (0x100 * (_n))) + +/* RX Descriptor Tail; RW */ +#define E1000_RDT(_n) (0x0C018 + (0x40 * (_n))) +#define E1000_RDT_A(_n) (0x02818 + (0x100 * (_n))) + +/* RX Descriptor Control; RW */ +#define E1000_RXDCTL(_n) (0x0C028 + (0x40 * (_n))) +#define E1000_RXDCTL_A(_n) (0x02828 + (0x100 * (_n))) + +/* RX Queue Drop Packet Count; RC */ +#define E1000_RQDPC_A(_n) (0x02830 + (0x100 * (_n))) + +/* Transmit Registers */ + +/* TX Descriptor Base Low; RW */ +#define E1000_TDBAL(_n) (0x0E000 + (0x40 * (_n))) +#define E1000_TDBAL_A(_n) (0x03800 + (0x100 * (_n))) + +/* TX Descriptor Base High; RW */ +#define E1000_TDBAH(_n) (0x0E004 + (0x40 * (_n))) +#define E1000_TDBAH_A(_n) (0x03804 + (0x100 * (_n))) + +/* TX Descriptor Ring Length; RW */ +#define E1000_TDLEN(_n) (0x0E008 + (0x40 * (_n))) +#define E1000_TDLEN_A(_n) (0x03808 + (0x100 * (_n))) + +/* TX Descriptor Head; RW */ +#define E1000_TDH(_n) (0x0E010 + (0x40 * (_n))) +#define E1000_TDH_A(_n) (0x03810 + (0x100 * (_n))) + +/* TX DCA Control; RW */ +#define E1000_TXCTL(_n) (0x0E014 + (0x40 * (_n))) +#define E1000_TXCTL_A(_n) (0x03814 + (0x100 * (_n))) + +/* TX Descriptor Tail; RW */ +#define E1000_TDT(_n) (0x0E018 + (0x40 * (_n))) +#define E1000_TDT_A(_n) (0x03818 + (0x100 * (_n))) + +/* TX Descriptor Control; RW */ +#define E1000_TXDCTL(_n) (0x0E028 + (0x40 * (_n))) +#define E1000_TXDCTL_A(_n) (0x03828 + (0x100 * (_n))) + +/* TX Descriptor Completion Write–Back Address Low; RW */ +#define E1000_TDWBAL(_n) (0x0E038 + (0x40 * (_n))) +#define E1000_TDWBAL_A(_n) (0x03838 + (0x100 * (_n))) + +/* TX Descriptor Completion Write–Back Address High; RW */ +#define E1000_TDWBAH(_n) (0x0E03C + (0x40 * (_n))) +#define E1000_TDWBAH_A(_n) (0x0383C + (0x100 * (_n))) + +#define E1000_MTA_A 0x0200 + +#define E1000_XDBAL_MASK (~(BIT(5) - 1)) /* TDBAL and RDBAL Registers Mask */ + +#define E1000_ICR_MACSEC 0x00000020 /* MACSec */ +#define E1000_ICR_RX0 0x00000040 /* Receiver Overrun */ +#define E1000_ICR_GPI_SDP0 0x00000800 /* General Purpose, SDP0 pin */ +#define E1000_ICR_GPI_SDP1 0x00001000 /* General Purpose, SDP1 pin */ +#define E1000_ICR_GPI_SDP2 0x00002000 /* General Purpose, SDP2 pin */ +#define E1000_ICR_GPI_SDP3 0x00004000 /* General Purpose, SDP3 pin */ +#define E1000_ICR_PTRAP 0x00008000 /* Probe Trap */ +#define E1000_ICR_MNG 0x00040000 /* Management Event */ +#define E1000_ICR_OMED 0x00100000 /* Other Media Energy Detected */ +#define E1000_ICR_FER 0x00400000 /* Fatal Error */ +#define E1000_ICR_NFER 0x00800000 /* Non Fatal Error */ +#define E1000_ICR_CSRTO 0x01000000 /* CSR access Time Out Indication */ +#define E1000_ICR_SCE 0x02000000 /* Storm Control Event */ +#define E1000_ICR_SW_WD 0x04000000 /* Software Watchdog */ + +/* Extended Interrupts */ + +#define E1000_EICR_MSIX_MASK 0x01FFFFFF /* Bits used in MSI-X mode */ +#define E1000_EICR_LEGACY_MASK 0x4000FFFF /* Bits used in non MSI-X mode */ + +/* Mirror VF Control (only RST bit); RW */ +#define E1000_PVTCTRL(_n) (0x10000 + (_n) * 0x100) + +/* Mirror Good Packets Received Count; RO */ +#define E1000_PVFGPRC(_n) (0x10010 + (_n) * 0x100) + +/* Mirror Good Packets Transmitted Count; RO */ +#define E1000_PVFGPTC(_n) (0x10014 + (_n) * 0x100) + +/* Mirror Good Octets Received Count; RO */ +#define E1000_PVFGORC(_n) (0x10018 + (_n) * 0x100) + +/* Mirror Extended Interrupt Cause Set; WO */ +#define E1000_PVTEICS(_n) (0x10020 + (_n) * 0x100) + +/* Mirror Extended Interrupt Mask Set/Read; RW */ +#define E1000_PVTEIMS(_n) (0x10024 + (_n) * 0x100) + +/* Mirror Extended Interrupt Mask Clear; WO */ +#define E1000_PVTEIMC(_n) (0x10028 + (_n) * 0x100) + +/* Mirror Extended Interrupt Auto Clear; RW */ +#define E1000_PVTEIAC(_n) (0x1002C + (_n) * 0x100) + +/* Mirror Extended Interrupt Auto Mask Enable; RW */ +#define E1000_PVTEIAM(_n) (0x10030 + (_n) * 0x100) + +/* Mirror Good Octets Transmitted Count; RO */ +#define E1000_PVFGOTC(_n) (0x10034 + (_n) * 0x100) + +/* Mirror Multicast Packets Received Count; RO */ +#define E1000_PVFMPRC(_n) (0x1003C + (_n) * 0x100) + +/* Mirror Good RX Packets loopback Count; RO */ +#define E1000_PVFGPRLBC(_n) (0x10040 + (_n) * 0x100) + +/* Mirror Good TX packets loopback Count; RO */ +#define E1000_PVFGPTLBC(_n) (0x10044 + (_n) * 0x100) + +/* Mirror Good RX Octets loopback Count; RO */ +#define E1000_PVFGORLBC(_n) (0x10048 + (_n) * 0x100) + +/* Mirror Good TX Octets loopback Count; RO */ +#define E1000_PVFGOTLBC(_n) (0x10050 + (_n) * 0x100) + +/* Mirror Extended Interrupt Cause Set; RC/W1C */ +#define E1000_PVTEICR(_n) (0x10080 + (_n) * 0x100) + +/* + * These are fake addresses that, according to the specification, the device + * is not using. They are used to distinguish between the PF and the VFs + * accessing their VTIVAR register (which is the same address, 0x1700) + */ +#define E1000_VTIVAR 0x11700 +#define E1000_VTIVAR_MISC 0x11720 + +#define E1000_RSS_QUEUE(reta, hash) (E1000_RETA_VAL(reta, hash) & 0x0F) + +#define E1000_MRQ_RSS_TYPE_IPV4UDP 7 +#define E1000_MRQ_RSS_TYPE_IPV6UDP 8 + +#define E1000_STATUS_IOV_MODE 0x00040000 + +#define E1000_STATUS_NUM_VFS_SHIFT 14 + +#define E1000_ADVRXD_PKT_IP4 BIT(4) +#define E1000_ADVRXD_PKT_IP6 BIT(6) +#define E1000_ADVRXD_PKT_TCP BIT(8) +#define E1000_ADVRXD_PKT_UDP BIT(9) +#define E1000_ADVRXD_PKT_SCTP BIT(10) + +static inline uint8_t igb_ivar_entry_rx(uint8_t i) +{ + return i < 8 ? i * 4 : (i - 8) * 4 + 2; +} + +static inline uint8_t igb_ivar_entry_tx(uint8_t i) +{ + return i < 8 ? i * 4 + 1 : (i - 8) * 4 + 3; +} + +#endif diff --git a/hw/net/igbvf.c b/hw/net/igbvf.c new file mode 100644 index 0000000000..284ea61184 --- /dev/null +++ b/hw/net/igbvf.c @@ -0,0 +1,320 @@ +/* + * QEMU Intel 82576 SR/IOV Ethernet Controller Emulation + * + * Datasheet: + * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf + * + * Copyright (c) 2020-2023 Red Hat, Inc. + * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Akihiko Odaki + * Gal Hammmer + * Marcel Apfelbaum + * Dmitry Fleytman + * Leonid Bloch + * Yan Vugenfirer + * + * Based on work done by: + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2008 Qumranet + * Based on work done by: + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/net/mii.h" +#include "hw/pci/pci_device.h" +#include "hw/pci/pcie.h" +#include "hw/pci/msix.h" +#include "net/eth.h" +#include "net/net.h" +#include "igb_common.h" +#include "igb_core.h" +#include "trace.h" +#include "qapi/error.h" + +OBJECT_DECLARE_SIMPLE_TYPE(IgbVfState, IGBVF) + +struct IgbVfState { + PCIDevice parent_obj; + + MemoryRegion mmio; + MemoryRegion msix; +}; + +static hwaddr vf_to_pf_addr(hwaddr addr, uint16_t vfn, bool write) +{ + switch (addr) { + case E1000_CTRL: + case E1000_CTRL_DUP: + return E1000_PVTCTRL(vfn); + case E1000_EICS: + return E1000_PVTEICS(vfn); + case E1000_EIMS: + return E1000_PVTEIMS(vfn); + case E1000_EIMC: + return E1000_PVTEIMC(vfn); + case E1000_EIAC: + return E1000_PVTEIAC(vfn); + case E1000_EIAM: + return E1000_PVTEIAM(vfn); + case E1000_EICR: + return E1000_PVTEICR(vfn); + case E1000_EITR(0): + case E1000_EITR(1): + case E1000_EITR(2): + return E1000_EITR(22) + (addr - E1000_EITR(0)) - vfn * 0xC; + case E1000_IVAR0: + return E1000_VTIVAR + vfn * 4; + case E1000_IVAR_MISC: + return E1000_VTIVAR_MISC + vfn * 4; + case 0x0F04: /* PBACL */ + return E1000_PBACLR; + case 0x0F0C: /* PSRTYPE */ + return E1000_PSRTYPE(vfn); + case E1000_V2PMAILBOX(0): + return E1000_V2PMAILBOX(vfn); + case E1000_VMBMEM(0) ... E1000_VMBMEM(0) + 0x3F: + return addr + vfn * 0x40; + case E1000_RDBAL_A(0): + return E1000_RDBAL(vfn); + case E1000_RDBAL_A(1): + return E1000_RDBAL(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_RDBAH_A(0): + return E1000_RDBAH(vfn); + case E1000_RDBAH_A(1): + return E1000_RDBAH(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_RDLEN_A(0): + return E1000_RDLEN(vfn); + case E1000_RDLEN_A(1): + return E1000_RDLEN(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_SRRCTL_A(0): + return E1000_SRRCTL(vfn); + case E1000_SRRCTL_A(1): + return E1000_SRRCTL(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_RDH_A(0): + return E1000_RDH(vfn); + case E1000_RDH_A(1): + return E1000_RDH(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_RXCTL_A(0): + return E1000_RXCTL(vfn); + case E1000_RXCTL_A(1): + return E1000_RXCTL(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_RDT_A(0): + return E1000_RDT(vfn); + case E1000_RDT_A(1): + return E1000_RDT(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_RXDCTL_A(0): + return E1000_RXDCTL(vfn); + case E1000_RXDCTL_A(1): + return E1000_RXDCTL(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_RQDPC_A(0): + return E1000_RQDPC(vfn); + case E1000_RQDPC_A(1): + return E1000_RQDPC(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_TDBAL_A(0): + return E1000_TDBAL(vfn); + case E1000_TDBAL_A(1): + return E1000_TDBAL(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_TDBAH_A(0): + return E1000_TDBAH(vfn); + case E1000_TDBAH_A(1): + return E1000_TDBAH(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_TDLEN_A(0): + return E1000_TDLEN(vfn); + case E1000_TDLEN_A(1): + return E1000_TDLEN(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_TDH_A(0): + return E1000_TDH(vfn); + case E1000_TDH_A(1): + return E1000_TDH(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_TXCTL_A(0): + return E1000_TXCTL(vfn); + case E1000_TXCTL_A(1): + return E1000_TXCTL(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_TDT_A(0): + return E1000_TDT(vfn); + case E1000_TDT_A(1): + return E1000_TDT(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_TXDCTL_A(0): + return E1000_TXDCTL(vfn); + case E1000_TXDCTL_A(1): + return E1000_TXDCTL(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_TDWBAL_A(0): + return E1000_TDWBAL(vfn); + case E1000_TDWBAL_A(1): + return E1000_TDWBAL(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_TDWBAH_A(0): + return E1000_TDWBAH(vfn); + case E1000_TDWBAH_A(1): + return E1000_TDWBAH(vfn + IGB_MAX_VF_FUNCTIONS); + case E1000_VFGPRC: + return E1000_PVFGPRC(vfn); + case E1000_VFGPTC: + return E1000_PVFGPTC(vfn); + case E1000_VFGORC: + return E1000_PVFGORC(vfn); + case E1000_VFGOTC: + return E1000_PVFGOTC(vfn); + case E1000_VFMPRC: + return E1000_PVFMPRC(vfn); + case E1000_VFGPRLBC: + return E1000_PVFGPRLBC(vfn); + case E1000_VFGPTLBC: + return E1000_PVFGPTLBC(vfn); + case E1000_VFGORLBC: + return E1000_PVFGORLBC(vfn); + case E1000_VFGOTLBC: + return E1000_PVFGOTLBC(vfn); + case E1000_STATUS: + case E1000_FRTIMER: + if (write) { + return HWADDR_MAX; + } + /* fallthrough */ + case 0x34E8: /* PBTWAC */ + case 0x24E8: /* PBRWAC */ + return addr; + } + + trace_igbvf_wrn_io_addr_unknown(addr); + + return HWADDR_MAX; +} + +static void igbvf_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, + int len) +{ + trace_igbvf_write_config(addr, val, len); + pci_default_write_config(dev, addr, val, len); +} + +static uint64_t igbvf_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + PCIDevice *vf = PCI_DEVICE(opaque); + PCIDevice *pf = pcie_sriov_get_pf(vf); + + addr = vf_to_pf_addr(addr, pcie_sriov_vf_number(vf), false); + return addr == HWADDR_MAX ? 0 : igb_mmio_read(pf, addr, size); +} + +static void igbvf_mmio_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PCIDevice *vf = PCI_DEVICE(opaque); + PCIDevice *pf = pcie_sriov_get_pf(vf); + + addr = vf_to_pf_addr(addr, pcie_sriov_vf_number(vf), true); + if (addr != HWADDR_MAX) { + igb_mmio_write(pf, addr, val, size); + } +} + +static const MemoryRegionOps mmio_ops = { + .read = igbvf_mmio_read, + .write = igbvf_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void igbvf_pci_realize(PCIDevice *dev, Error **errp) +{ + IgbVfState *s = IGBVF(dev); + int ret; + int i; + + dev->config_write = igbvf_write_config; + + memory_region_init_io(&s->mmio, OBJECT(dev), &mmio_ops, s, "igbvf-mmio", + IGBVF_MMIO_SIZE); + pcie_sriov_vf_register_bar(dev, IGBVF_MMIO_BAR_IDX, &s->mmio); + + memory_region_init(&s->msix, OBJECT(dev), "igbvf-msix", IGBVF_MSIX_SIZE); + pcie_sriov_vf_register_bar(dev, IGBVF_MSIX_BAR_IDX, &s->msix); + + ret = msix_init(dev, IGBVF_MSIX_VEC_NUM, &s->msix, IGBVF_MSIX_BAR_IDX, 0, + &s->msix, IGBVF_MSIX_BAR_IDX, 0x2000, 0x70, errp); + if (ret) { + return; + } + + for (i = 0; i < IGBVF_MSIX_VEC_NUM; i++) { + msix_vector_use(dev, i); + } + + if (pcie_endpoint_cap_init(dev, 0xa0) < 0) { + hw_error("Failed to initialize PCIe capability"); + } + + if (pcie_aer_init(dev, 1, 0x100, 0x40, errp) < 0) { + hw_error("Failed to initialize AER capability"); + } + + pcie_ari_init(dev, 0x150, 1); +} + +static void igbvf_pci_uninit(PCIDevice *dev) +{ + IgbVfState *s = IGBVF(dev); + + pcie_aer_exit(dev); + pcie_cap_exit(dev); + msix_unuse_all_vectors(dev); + msix_uninit(dev, &s->msix, &s->msix); +} + +static void igbvf_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(class); + PCIDeviceClass *c = PCI_DEVICE_CLASS(class); + + c->realize = igbvf_pci_realize; + c->exit = igbvf_pci_uninit; + c->vendor_id = PCI_VENDOR_ID_INTEL; + c->device_id = E1000_DEV_ID_82576_VF; + c->revision = 1; + c->class_id = PCI_CLASS_NETWORK_ETHERNET; + + dc->desc = "Intel 82576 Virtual Function"; + dc->user_creatable = false; + + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static const TypeInfo igbvf_info = { + .name = TYPE_IGBVF, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(IgbVfState), + .class_init = igbvf_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void igb_register_types(void) +{ + type_register_static(&igbvf_info); +} + +type_init(igb_register_types) diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c index c862d96593..5d1f1f104c 100644 --- a/hw/net/imx_fec.c +++ b/hw/net/imx_fec.c @@ -282,11 +282,19 @@ static uint32_t imx_phy_read(IMXFECState *s, int reg) uint32_t val; uint32_t phy = reg / 32; - if (phy != s->phy_num) { - trace_imx_phy_read_num(phy, s->phy_num); + if (!s->phy_connected) { return 0xffff; } + if (phy != s->phy_num) { + if (s->phy_consumer && phy == s->phy_consumer->phy_num) { + s = s->phy_consumer; + } else { + trace_imx_phy_read_num(phy, s->phy_num); + return 0xffff; + } + } + reg %= 32; switch (reg) { @@ -343,11 +351,19 @@ static void imx_phy_write(IMXFECState *s, int reg, uint32_t val) { uint32_t phy = reg / 32; - if (phy != s->phy_num) { - trace_imx_phy_write_num(phy, s->phy_num); + if (!s->phy_connected) { return; } + if (phy != s->phy_num) { + if (s->phy_consumer && phy == s->phy_consumer->phy_num) { + s = s->phy_consumer; + } else { + trace_imx_phy_write_num(phy, s->phy_num); + return; + } + } + reg %= 32; trace_imx_phy_write(val, phy, reg); @@ -1327,6 +1343,9 @@ static Property imx_eth_properties[] = { DEFINE_NIC_PROPERTIES(IMXFECState, conf), DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1), DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0), + DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true), + DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC, + IMXFECState *), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/net/meson.build b/hw/net/meson.build index 4285145715..a7860c5efe 100644 --- a/hw/net/meson.build +++ b/hw/net/meson.build @@ -10,6 +10,8 @@ softmmu_ss.add(when: 'CONFIG_PCNET_COMMON', if_true: files('pcnet.c')) softmmu_ss.add(when: 'CONFIG_E1000_PCI', if_true: files('e1000.c', 'e1000x_common.c')) softmmu_ss.add(when: 'CONFIG_E1000E_PCI_EXPRESS', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) softmmu_ss.add(when: 'CONFIG_E1000E_PCI_EXPRESS', if_true: files('e1000e.c', 'e1000e_core.c', 'e1000x_common.c')) +softmmu_ss.add(when: 'CONFIG_IGB_PCI_EXPRESS', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) +softmmu_ss.add(when: 'CONFIG_IGB_PCI_EXPRESS', if_true: files('igb.c', 'igbvf.c', 'igb_core.c')) softmmu_ss.add(when: 'CONFIG_RTL8139_PCI', if_true: files('rtl8139.c')) softmmu_ss.add(when: 'CONFIG_TULIP', if_true: files('tulip.c')) softmmu_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) @@ -41,7 +43,7 @@ softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_emc.c')) softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_eth.c')) softmmu_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_fec.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_llan.c')) -specific_ss.add(when: 'CONFIG_XILINX_ETHLITE', if_true: files('xilinx_ethlite.c')) +softmmu_ss.add(when: 'CONFIG_XILINX_ETHLITE', if_true: files('xilinx_ethlite.c')) softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('net_rx_pkt.c')) specific_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('virtio-net.c')) diff --git a/hw/net/msf2-emac.c b/hw/net/msf2-emac.c index 7ccd3e5142..db3a04deb1 100644 --- a/hw/net/msf2-emac.c +++ b/hw/net/msf2-emac.c @@ -118,14 +118,18 @@ static void emac_load_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc) d->next = le32_to_cpu(d->next); } -static void emac_store_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc) +static void emac_store_desc(MSF2EmacState *s, const EmacDesc *d, hwaddr desc) { - /* Convert from host endianness into LE. */ - d->pktaddr = cpu_to_le32(d->pktaddr); - d->pktsize = cpu_to_le32(d->pktsize); - d->next = cpu_to_le32(d->next); + EmacDesc outd; + /* + * Convert from host endianness into LE. We use a local struct because + * calling code may still want to look at the fields afterwards. + */ + outd.pktaddr = cpu_to_le32(d->pktaddr); + outd.pktsize = cpu_to_le32(d->pktsize); + outd.next = cpu_to_le32(d->next); - address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, d, sizeof *d); + address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, &outd, sizeof outd); } static void msf2_dma_tx(MSF2EmacState *s) diff --git a/hw/net/net_rx_pkt.c b/hw/net/net_rx_pkt.c index 1e1c504e42..32e5f3f9cf 100644 --- a/hw/net/net_rx_pkt.c +++ b/hw/net/net_rx_pkt.c @@ -16,6 +16,7 @@ */ #include "qemu/osdep.h" +#include "qemu/crc32c.h" #include "trace.h" #include "net_rx_pkt.h" #include "net/checksum.h" @@ -23,21 +24,21 @@ struct NetRxPkt { struct virtio_net_hdr virt_hdr; - uint8_t ehdr_buf[sizeof(struct eth_header) + sizeof(struct vlan_header)]; + struct { + struct eth_header eth; + struct vlan_header vlan; + } ehdr_buf; struct iovec *vec; uint16_t vec_len_total; uint16_t vec_len; uint32_t tot_len; uint16_t tci; size_t ehdr_buf_len; - bool has_virt_hdr; eth_pkt_types_e packet_type; /* Analysis results */ - bool isip4; - bool isip6; - bool isudp; - bool istcp; + bool hasip4; + bool hasip6; size_t l3hdr_off; size_t l4hdr_off; @@ -48,10 +49,9 @@ struct NetRxPkt { eth_l4_hdr_info l4hdr_info; }; -void net_rx_pkt_init(struct NetRxPkt **pkt, bool has_virt_hdr) +void net_rx_pkt_init(struct NetRxPkt **pkt) { struct NetRxPkt *p = g_malloc0(sizeof *p); - p->has_virt_hdr = has_virt_hdr; p->vec = NULL; p->vec_len_total = 0; *pkt = p; @@ -93,7 +93,7 @@ net_rx_pkt_pull_data(struct NetRxPkt *pkt, if (pkt->ehdr_buf_len) { net_rx_pkt_iovec_realloc(pkt, iovcnt + 1); - pkt->vec[0].iov_base = pkt->ehdr_buf; + pkt->vec[0].iov_base = &pkt->ehdr_buf; pkt->vec[0].iov_len = pkt->ehdr_buf_len; pkt->tot_len = pllen + pkt->ehdr_buf_len; @@ -107,12 +107,11 @@ net_rx_pkt_pull_data(struct NetRxPkt *pkt, iov, iovcnt, ploff, pkt->tot_len); } - eth_get_protocols(pkt->vec, pkt->vec_len, &pkt->isip4, &pkt->isip6, - &pkt->isudp, &pkt->istcp, + eth_get_protocols(pkt->vec, pkt->vec_len, 0, &pkt->hasip4, &pkt->hasip6, &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off, &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info); - trace_net_rx_pkt_parsed(pkt->isip4, pkt->isip6, pkt->isudp, pkt->istcp, + trace_net_rx_pkt_parsed(pkt->hasip4, pkt->hasip6, pkt->l4hdr_info.proto, pkt->l3hdr_off, pkt->l4hdr_off, pkt->l5hdr_off); } @@ -125,7 +124,7 @@ void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, assert(pkt); if (strip_vlan) { - pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, pkt->ehdr_buf, + pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, &pkt->ehdr_buf, &ploff, &tci); } else { pkt->ehdr_buf_len = 0; @@ -138,20 +137,17 @@ void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt, const struct iovec *iov, int iovcnt, - size_t iovoff, bool strip_vlan, - uint16_t vet) + size_t iovoff, int strip_vlan_index, + uint16_t vet, uint16_t vet_ext) { uint16_t tci = 0; uint16_t ploff = iovoff; assert(pkt); - if (strip_vlan) { - pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff, vet, - pkt->ehdr_buf, - &ploff, &tci); - } else { - pkt->ehdr_buf_len = 0; - } + pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff, + strip_vlan_index, vet, vet_ext, + &pkt->ehdr_buf, + &ploff, &tci); pkt->tci = tci; @@ -191,32 +187,26 @@ size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt) return pkt->tot_len; } -void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, const void *data, - size_t len) +void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, + const struct iovec *iov, size_t iovcnt, + size_t iovoff) { - const struct iovec iov = { - .iov_base = (void *)data, - .iov_len = len - }; - assert(pkt); - eth_get_protocols(&iov, 1, &pkt->isip4, &pkt->isip6, - &pkt->isudp, &pkt->istcp, + eth_get_protocols(iov, iovcnt, iovoff, &pkt->hasip4, &pkt->hasip6, &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off, &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info); } void net_rx_pkt_get_protocols(struct NetRxPkt *pkt, - bool *isip4, bool *isip6, - bool *isudp, bool *istcp) + bool *hasip4, bool *hasip6, + EthL4HdrProto *l4hdr_proto) { assert(pkt); - *isip4 = pkt->isip4; - *isip6 = pkt->isip6; - *isudp = pkt->isudp; - *istcp = pkt->istcp; + *hasip4 = pkt->hasip4; + *hasip6 = pkt->hasip6; + *l4hdr_proto = pkt->l4hdr_info.proto; } size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt) @@ -247,11 +237,6 @@ eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt) return &pkt->ip4hdr_info; } -eth_l4_hdr_info *net_rx_pkt_get_l4_info(struct NetRxPkt *pkt) -{ - return &pkt->l4hdr_info; -} - static inline void _net_rx_rss_add_chunk(uint8_t *rss_input, size_t *bytes_written, void *ptr, size_t size) @@ -333,58 +318,58 @@ net_rx_pkt_calc_rss_hash(struct NetRxPkt *pkt, switch (type) { case NetPktRssIpV4: - assert(pkt->isip4); + assert(pkt->hasip4); trace_net_rx_pkt_rss_ip4(); _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length); break; case NetPktRssIpV4Tcp: - assert(pkt->isip4); - assert(pkt->istcp); + assert(pkt->hasip4); + assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP); trace_net_rx_pkt_rss_ip4_tcp(); _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length); _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length); break; case NetPktRssIpV6Tcp: - assert(pkt->isip6); - assert(pkt->istcp); + assert(pkt->hasip6); + assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP); trace_net_rx_pkt_rss_ip6_tcp(); _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length); _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length); break; case NetPktRssIpV6: - assert(pkt->isip6); + assert(pkt->hasip6); trace_net_rx_pkt_rss_ip6(); _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length); break; case NetPktRssIpV6Ex: - assert(pkt->isip6); + assert(pkt->hasip6); trace_net_rx_pkt_rss_ip6_ex(); _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length); break; case NetPktRssIpV6TcpEx: - assert(pkt->isip6); - assert(pkt->istcp); + assert(pkt->hasip6); + assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP); trace_net_rx_pkt_rss_ip6_ex_tcp(); _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length); _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length); break; case NetPktRssIpV4Udp: - assert(pkt->isip4); - assert(pkt->isudp); + assert(pkt->hasip4); + assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP); trace_net_rx_pkt_rss_ip4_udp(); _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length); _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length); break; case NetPktRssIpV6Udp: - assert(pkt->isip6); - assert(pkt->isudp); + assert(pkt->hasip6); + assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP); trace_net_rx_pkt_rss_ip6_udp(); _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length); _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length); break; case NetPktRssIpV6UdpEx: - assert(pkt->isip6); - assert(pkt->isudp); + assert(pkt->hasip6); + assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP); trace_net_rx_pkt_rss_ip6_ex_udp(); _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length); _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length); @@ -406,7 +391,7 @@ uint16_t net_rx_pkt_get_ip_id(struct NetRxPkt *pkt) { assert(pkt); - if (pkt->isip4) { + if (pkt->hasip4) { return be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_id); } @@ -417,7 +402,7 @@ bool net_rx_pkt_is_tcp_ack(struct NetRxPkt *pkt) { assert(pkt); - if (pkt->istcp) { + if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP) { return TCP_HEADER_FLAGS(&pkt->l4hdr_info.hdr.tcp) & TCP_FLAG_ACK; } @@ -428,7 +413,7 @@ bool net_rx_pkt_has_tcp_data(struct NetRxPkt *pkt) { assert(pkt); - if (pkt->istcp) { + if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP) { return pkt->l4hdr_info.has_tcp_data; } @@ -465,6 +450,13 @@ void net_rx_pkt_set_vhdr_iovec(struct NetRxPkt *pkt, iov_to_buf(iov, iovcnt, 0, &pkt->virt_hdr, sizeof pkt->virt_hdr); } +void net_rx_pkt_unset_vhdr(struct NetRxPkt *pkt) +{ + assert(pkt); + + memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr)); +} + bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt) { assert(pkt); @@ -472,13 +464,6 @@ bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt) return pkt->ehdr_buf_len ? true : false; } -bool net_rx_pkt_has_virt_hdr(struct NetRxPkt *pkt) -{ - assert(pkt); - - return pkt->has_virt_hdr; -} - uint16_t net_rx_pkt_get_vlan_tag(struct NetRxPkt *pkt) { assert(pkt); @@ -494,7 +479,7 @@ bool net_rx_pkt_validate_l3_csum(struct NetRxPkt *pkt, bool *csum_valid) trace_net_rx_pkt_l3_csum_validate_entry(); - if (!pkt->isip4) { + if (!pkt->hasip4) { trace_net_rx_pkt_l3_csum_validate_not_ip4(); return false; } @@ -525,8 +510,8 @@ _net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt) trace_net_rx_pkt_l4_csum_calc_entry(); - if (pkt->isip4) { - if (pkt->isudp) { + if (pkt->hasip4) { + if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP) { csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen); trace_net_rx_pkt_l4_csum_calc_ip4_udp(); } else { @@ -539,7 +524,7 @@ _net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt) csl, &cso); trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl); } else { - if (pkt->isudp) { + if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP) { csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen); trace_net_rx_pkt_l4_csum_calc_ip6_udp(); } else { @@ -567,30 +552,73 @@ _net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt) return csum; } +static bool +_net_rx_pkt_validate_sctp_sum(struct NetRxPkt *pkt) +{ + size_t csum_off; + size_t off = pkt->l4hdr_off; + size_t vec_len = pkt->vec_len; + struct iovec *vec; + uint32_t calculated = 0; + uint32_t original; + bool valid; + + for (vec = pkt->vec; vec->iov_len < off; vec++) { + off -= vec->iov_len; + vec_len--; + } + + csum_off = off + 8; + + if (!iov_to_buf(vec, vec_len, csum_off, &original, sizeof(original))) { + return false; + } + + if (!iov_from_buf(vec, vec_len, csum_off, + &calculated, sizeof(calculated))) { + return false; + } + + calculated = crc32c(0xffffffff, + (uint8_t *)vec->iov_base + off, vec->iov_len - off); + calculated = iov_crc32c(calculated ^ 0xffffffff, vec + 1, vec_len - 1); + valid = calculated == le32_to_cpu(original); + iov_from_buf(vec, vec_len, csum_off, &original, sizeof(original)); + + return valid; +} + bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid) { - uint16_t csum; + uint32_t csum; trace_net_rx_pkt_l4_csum_validate_entry(); - if (!pkt->istcp && !pkt->isudp) { - trace_net_rx_pkt_l4_csum_validate_not_xxp(); - return false; - } - - if (pkt->isudp && (pkt->l4hdr_info.hdr.udp.uh_sum == 0)) { - trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum(); - return false; - } - - if (pkt->isip4 && pkt->ip4hdr_info.fragment) { + if (pkt->hasip4 && pkt->ip4hdr_info.fragment) { trace_net_rx_pkt_l4_csum_validate_ip4_fragment(); return false; } - csum = _net_rx_pkt_calc_l4_csum(pkt); + switch (pkt->l4hdr_info.proto) { + case ETH_L4_HDR_PROTO_UDP: + if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) { + trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum(); + return false; + } + /* fall through */ + case ETH_L4_HDR_PROTO_TCP: + csum = _net_rx_pkt_calc_l4_csum(pkt); + *csum_valid = ((csum == 0) || (csum == 0xFFFF)); + break; - *csum_valid = ((csum == 0) || (csum == 0xFFFF)); + case ETH_L4_HDR_PROTO_SCTP: + *csum_valid = _net_rx_pkt_validate_sctp_sum(pkt); + break; + + default: + trace_net_rx_pkt_l4_csum_validate_not_xxp(); + return false; + } trace_net_rx_pkt_l4_csum_validate_csum(*csum_valid); @@ -604,22 +632,27 @@ bool net_rx_pkt_fix_l4_csum(struct NetRxPkt *pkt) trace_net_rx_pkt_l4_csum_fix_entry(); - if (pkt->istcp) { + switch (pkt->l4hdr_info.proto) { + case ETH_L4_HDR_PROTO_TCP: l4_cso = offsetof(struct tcp_header, th_sum); trace_net_rx_pkt_l4_csum_fix_tcp(l4_cso); - } else if (pkt->isudp) { + break; + + case ETH_L4_HDR_PROTO_UDP: if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) { trace_net_rx_pkt_l4_csum_fix_udp_with_no_checksum(); return false; } l4_cso = offsetof(struct udp_header, uh_sum); trace_net_rx_pkt_l4_csum_fix_udp(l4_cso); - } else { + break; + + default: trace_net_rx_pkt_l4_csum_fix_not_xxp(); return false; } - if (pkt->isip4 && pkt->ip4hdr_info.fragment) { + if (pkt->hasip4 && pkt->ip4hdr_info.fragment) { trace_net_rx_pkt_l4_csum_fix_ip4_fragment(); return false; } diff --git a/hw/net/net_rx_pkt.h b/hw/net/net_rx_pkt.h index 048e3461f0..55ec67a1a7 100644 --- a/hw/net/net_rx_pkt.h +++ b/hw/net/net_rx_pkt.h @@ -37,10 +37,9 @@ void net_rx_pkt_uninit(struct NetRxPkt *pkt); * Init function for rx packet functionality * * @pkt: packet pointer - * @has_virt_hdr: device uses virtio header * */ -void net_rx_pkt_init(struct NetRxPkt **pkt, bool has_virt_hdr); +void net_rx_pkt_init(struct NetRxPkt **pkt); /** * returns total length of data attached to rx context @@ -56,26 +55,27 @@ size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt); * parse and set packet analysis results * * @pkt: packet - * @data: pointer to the data buffer to be parsed - * @len: data length + * @iov: received data scatter-gather list + * @iovcnt: number of elements in iov + * @iovoff: data start offset in the iov * */ -void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, const void *data, - size_t len); +void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, + const struct iovec *iov, size_t iovcnt, + size_t iovoff); /** * fetches packet analysis results * * @pkt: packet - * @isip4: whether the packet given is IPv4 - * @isip6: whether the packet given is IPv6 - * @isudp: whether the packet given is UDP - * @istcp: whether the packet given is TCP + * @hasip4: whether the packet has an IPv4 header + * @hasip6: whether the packet has an IPv6 header + * @l4hdr_proto: protocol of L4 header * */ void net_rx_pkt_get_protocols(struct NetRxPkt *pkt, - bool *isip4, bool *isip6, - bool *isudp, bool *istcp); + bool *hasip4, bool *hasip6, + EthL4HdrProto *l4hdr_proto); /** * fetches L3 header offset @@ -119,15 +119,6 @@ eth_ip6_hdr_info *net_rx_pkt_get_ip6_info(struct NetRxPkt *pkt); */ eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt); -/** - * fetches L4 header analysis results - * - * Return: pointer to analysis results structure which is stored in internal - * packet area. - * - */ -eth_l4_hdr_info *net_rx_pkt_get_l4_info(struct NetRxPkt *pkt); - typedef enum { NetPktRssIpV4, NetPktRssIpV4Tcp, @@ -214,15 +205,6 @@ uint16_t net_rx_pkt_get_vlan_tag(struct NetRxPkt *pkt); */ bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt); -/** - * notifies caller if the packet has virtio header - * - * @pkt: packet - * @ret: true if packet has virtio header, false otherwize - * - */ -bool net_rx_pkt_has_virt_hdr(struct NetRxPkt *pkt); - /** * attach scatter-gather data to rx packet * @@ -241,18 +223,19 @@ void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, /** * attach scatter-gather data to rx packet * -* @pkt: packet -* @iov: received data scatter-gather list -* @iovcnt number of elements in iov -* @iovoff data start offset in the iov -* @strip_vlan: should the module strip vlan from data -* @vet: VLAN tag Ethernet type +* @pkt: packet +* @iov: received data scatter-gather list +* @iovcnt: number of elements in iov +* @iovoff: data start offset in the iov +* @strip_vlan_index: index of Q tag if it is to be stripped. negative otherwise. +* @vet: VLAN tag Ethernet type +* @vet_ext: outer VLAN tag Ethernet type * */ void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt, - const struct iovec *iov, int iovcnt, - size_t iovoff, bool strip_vlan, - uint16_t vet); + const struct iovec *iov, int iovcnt, + size_t iovoff, int strip_vlan_index, + uint16_t vet, uint16_t vet_ext); /** * attach data to rx packet @@ -322,6 +305,14 @@ void net_rx_pkt_set_vhdr(struct NetRxPkt *pkt, void net_rx_pkt_set_vhdr_iovec(struct NetRxPkt *pkt, const struct iovec *iov, int iovcnt); +/** + * unset vhdr data from packet context + * + * @pkt: packet + * + */ +void net_rx_pkt_unset_vhdr(struct NetRxPkt *pkt); + /** * save packet type in packet context * diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c index 2533ea2700..2e5f58b3c9 100644 --- a/hw/net/net_tx_pkt.c +++ b/hw/net/net_tx_pkt.c @@ -16,12 +16,13 @@ */ #include "qemu/osdep.h" -#include "net_tx_pkt.h" +#include "qemu/crc32c.h" #include "net/eth.h" #include "net/checksum.h" #include "net/tap.h" #include "net/net.h" #include "hw/pci/pci_device.h" +#include "net_tx_pkt.h" enum { NET_TX_PKT_VHDR_FRAG = 0, @@ -32,10 +33,7 @@ enum { /* TX packet private context */ struct NetTxPkt { - PCIDevice *pci_dev; - struct virtio_net_hdr virt_hdr; - bool has_virt_hdr; struct iovec *raw; uint32_t raw_frags; @@ -43,8 +41,15 @@ struct NetTxPkt { struct iovec *vec; - uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN]; - uint8_t l3_hdr[ETH_MAX_IP_DGRAM_LEN]; + struct { + struct eth_header eth; + struct vlan_header vlan[3]; + } l2_hdr; + union { + struct ip_header ip; + struct ip6_header ip6; + uint8_t octets[ETH_MAX_IP_DGRAM_LEN]; + } l3_hdr; uint32_t payload_len; @@ -54,27 +59,20 @@ struct NetTxPkt { uint16_t hdr_len; eth_pkt_types_e packet_type; uint8_t l4proto; - - bool is_loopback; }; -void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, - uint32_t max_frags, bool has_virt_hdr) +void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags) { struct NetTxPkt *p = g_malloc0(sizeof *p); - p->pci_dev = pci_dev; - p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG); p->raw = g_new(struct iovec, max_frags); p->max_payload_frags = max_frags; p->max_raw_frags = max_frags; - p->has_virt_hdr = has_virt_hdr; p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr; - p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = - p->has_virt_hdr ? sizeof p->virt_hdr : 0; + p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof p->virt_hdr; p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr; p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr; @@ -94,16 +92,14 @@ void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt) { uint16_t csum; assert(pkt); - struct ip_header *ip_hdr; - ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; - ip_hdr->ip_len = cpu_to_be16(pkt->payload_len + + pkt->l3_hdr.ip.ip_len = cpu_to_be16(pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); - ip_hdr->ip_sum = 0; - csum = net_raw_checksum((uint8_t *)ip_hdr, + pkt->l3_hdr.ip.ip_sum = 0; + csum = net_raw_checksum(pkt->l3_hdr.octets, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); - ip_hdr->ip_sum = cpu_to_be16(csum); + pkt->l3_hdr.ip.ip_sum = cpu_to_be16(csum); } void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) @@ -140,6 +136,23 @@ void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) pkt->virt_hdr.csum_offset, &csum, sizeof(csum)); } +bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt) +{ + uint32_t csum = 0; + struct iovec *pl_start_frag = pkt->vec + NET_TX_PKT_PL_START_FRAG; + + if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) { + return false; + } + + csum = cpu_to_le32(iov_crc32c(0xffffffff, pl_start_frag, pkt->payload_frags)); + if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) { + return false; + } + + return true; +} + static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt) { pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len + @@ -304,10 +317,11 @@ func_exit: return rc; } -void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, +bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, bool csum_enable, uint32_t gso_size) { struct tcp_hdr l4hdr; + size_t bytes_read; assert(pkt); /* csum has to be enabled if tso is. */ @@ -328,8 +342,13 @@ void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, case VIRTIO_NET_HDR_GSO_TCPV4: case VIRTIO_NET_HDR_GSO_TCPV6: - iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, - 0, &l4hdr, sizeof(l4hdr)); + bytes_read = iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], + pkt->payload_frags, 0, &l4hdr, sizeof(l4hdr)); + if (bytes_read < sizeof(l4hdr) || + l4hdr.th_off * sizeof(uint32_t) < sizeof(l4hdr)) { + return false; + } + pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t); pkt->virt_hdr.gso_size = gso_size; break; @@ -341,11 +360,17 @@ void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, if (csum_enable) { switch (pkt->l4proto) { case IP_PROTO_TCP: + if (pkt->payload_len < sizeof(struct tcp_hdr)) { + return false; + } pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; pkt->virt_hdr.csum_start = pkt->hdr_len; pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum); break; case IP_PROTO_UDP: + if (pkt->payload_len < sizeof(struct udp_hdr)) { + return false; + } pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; pkt->virt_hdr.csum_start = pkt->hdr_len; pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum); @@ -354,29 +379,24 @@ void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, break; } } + + return true; } void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt, uint16_t vlan, uint16_t vlan_ethtype) { - bool is_new; assert(pkt); - eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, - vlan, vlan_ethtype, &is_new); + eth_setup_vlan_headers(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, + &pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len, + vlan, vlan_ethtype); - /* update l2hdrlen */ - if (is_new) { - pkt->hdr_len += sizeof(struct vlan_header); - pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len += - sizeof(struct vlan_header); - } + pkt->hdr_len += sizeof(struct vlan_header); } -bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, - size_t len) +bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len) { - hwaddr mapped_len = 0; struct iovec *ventry; assert(pkt); @@ -384,23 +404,12 @@ bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, return false; } - if (!len) { - return true; - } - ventry = &pkt->raw[pkt->raw_frags]; - mapped_len = len; + ventry->iov_base = base; + ventry->iov_len = len; + pkt->raw_frags++; - ventry->iov_base = pci_dma_map(pkt->pci_dev, pa, - &mapped_len, DMA_DIRECTION_TO_DEVICE); - - if ((ventry->iov_base != NULL) && (len == mapped_len)) { - ventry->iov_len = mapped_len; - pkt->raw_frags++; - return true; - } else { - return false; - } + return true; } bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt) @@ -434,7 +443,8 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt) #endif } -void net_tx_pkt_reset(struct NetTxPkt *pkt) +void net_tx_pkt_reset(struct NetTxPkt *pkt, + NetTxPktFreeFrag callback, void *context) { int i; @@ -454,8 +464,7 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt) assert(pkt->raw); for (i = 0; i < pkt->raw_frags; i++) { assert(pkt->raw[i].iov_base); - pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base, - pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0); + callback(context, pkt->raw[i].iov_base, pkt->raw[i].iov_len); } } pkt->raw_frags = 0; @@ -464,15 +473,36 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt) pkt->l4proto = 0; } -static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt) +void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len) +{ + pci_dma_unmap(context, base, len, DMA_DIRECTION_TO_DEVICE, 0); +} + +bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev, + dma_addr_t pa, size_t len) +{ + dma_addr_t mapped_len = len; + void *base = pci_dma_map(pci_dev, pa, &mapped_len, DMA_DIRECTION_TO_DEVICE); + if (!base) { + return false; + } + + if (mapped_len != len || !net_tx_pkt_add_raw_fragment(pkt, base, len)) { + net_tx_pkt_unmap_frag_pci(pci_dev, base, mapped_len); + return false; + } + + return true; +} + +static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt, + struct iovec *iov, uint32_t iov_len, + uint16_t csl) { - struct iovec *iov = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; uint32_t csum_cntr; uint16_t csum = 0; uint32_t cso; /* num of iovec without vhdr */ - uint32_t iov_len = pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1; - uint16_t csl; size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset; uint16_t l3_proto = eth_get_l3_proto(iov, 1, iov->iov_len); @@ -480,8 +510,6 @@ static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt) iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); /* Calculate L4 TCP/UDP checksum */ - csl = pkt->payload_len; - csum_cntr = 0; cso = 0; /* add pseudo header to csum */ @@ -504,23 +532,16 @@ static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt) iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); } -enum { - NET_TX_PKT_FRAGMENT_L2_HDR_POS = 0, - NET_TX_PKT_FRAGMENT_L3_HDR_POS, - NET_TX_PKT_FRAGMENT_HEADER_NUM -}; - #define NET_MAX_FRAG_SG_LIST (64) static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt, - int *src_idx, size_t *src_offset, struct iovec *dst, int *dst_idx) + int *src_idx, size_t *src_offset, size_t src_len, + struct iovec *dst, int *dst_idx) { size_t fetched = 0; struct iovec *src = pkt->vec; - *dst_idx = NET_TX_PKT_FRAGMENT_HEADER_NUM; - - while (fetched < IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size)) { + while (fetched < src_len) { /* no more place in fragment iov */ if (*dst_idx == NET_MAX_FRAG_SG_LIST) { @@ -535,7 +556,7 @@ static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt, dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset; dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset, - IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size) - fetched); + src_len - fetched); *src_offset += dst[*dst_idx].iov_len; fetched += dst[*dst_idx].iov_len; @@ -551,77 +572,258 @@ static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt, return fetched; } -static inline void net_tx_pkt_sendv(struct NetTxPkt *pkt, - NetClientState *nc, const struct iovec *iov, int iov_cnt) +static void net_tx_pkt_sendv( + void *opaque, const struct iovec *iov, int iov_cnt, + const struct iovec *virt_iov, int virt_iov_cnt) { - if (pkt->is_loopback) { - qemu_receive_packet_iov(nc, iov, iov_cnt); + NetClientState *nc = opaque; + + if (qemu_get_using_vnet_hdr(nc->peer)) { + qemu_sendv_packet(nc, virt_iov, virt_iov_cnt); } else { qemu_sendv_packet(nc, iov, iov_cnt); } } -static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, - NetClientState *nc) +static bool net_tx_pkt_tcp_fragment_init(struct NetTxPkt *pkt, + struct iovec *fragment, + int *pl_idx, + size_t *l4hdr_len, + int *src_idx, + size_t *src_offset, + size_t *src_len) { + struct iovec *l4 = fragment + NET_TX_PKT_PL_START_FRAG; + size_t bytes_read = 0; + struct tcp_hdr *th; + + if (!pkt->payload_frags) { + return false; + } + + l4->iov_len = pkt->virt_hdr.hdr_len - pkt->hdr_len; + l4->iov_base = g_malloc(l4->iov_len); + + *src_idx = NET_TX_PKT_PL_START_FRAG; + while (pkt->vec[*src_idx].iov_len < l4->iov_len - bytes_read) { + memcpy((char *)l4->iov_base + bytes_read, pkt->vec[*src_idx].iov_base, + pkt->vec[*src_idx].iov_len); + + bytes_read += pkt->vec[*src_idx].iov_len; + + (*src_idx)++; + if (*src_idx >= pkt->payload_frags + NET_TX_PKT_PL_START_FRAG) { + g_free(l4->iov_base); + return false; + } + } + + *src_offset = l4->iov_len - bytes_read; + memcpy((char *)l4->iov_base + bytes_read, pkt->vec[*src_idx].iov_base, + *src_offset); + + th = l4->iov_base; + th->th_flags &= ~(TH_FIN | TH_PUSH); + + *pl_idx = NET_TX_PKT_PL_START_FRAG + 1; + *l4hdr_len = l4->iov_len; + *src_len = pkt->virt_hdr.gso_size; + + return true; +} + +static void net_tx_pkt_tcp_fragment_deinit(struct iovec *fragment) +{ + g_free(fragment[NET_TX_PKT_PL_START_FRAG].iov_base); +} + +static void net_tx_pkt_tcp_fragment_fix(struct NetTxPkt *pkt, + struct iovec *fragment, + size_t fragment_len, + uint8_t gso_type) +{ + struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG; + struct iovec *l4hdr = fragment + NET_TX_PKT_PL_START_FRAG; + struct ip_header *ip = l3hdr->iov_base; + struct ip6_header *ip6 = l3hdr->iov_base; + size_t len = l3hdr->iov_len + l4hdr->iov_len + fragment_len; + + switch (gso_type) { + case VIRTIO_NET_HDR_GSO_TCPV4: + ip->ip_len = cpu_to_be16(len); + eth_fix_ip4_checksum(l3hdr->iov_base, l3hdr->iov_len); + break; + + case VIRTIO_NET_HDR_GSO_TCPV6: + len -= sizeof(struct ip6_header); + ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = cpu_to_be16(len); + break; + } +} + +static void net_tx_pkt_tcp_fragment_advance(struct NetTxPkt *pkt, + struct iovec *fragment, + size_t fragment_len, + uint8_t gso_type) +{ + struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG; + struct iovec *l4hdr = fragment + NET_TX_PKT_PL_START_FRAG; + struct ip_header *ip = l3hdr->iov_base; + struct tcp_hdr *th = l4hdr->iov_base; + + if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4) { + ip->ip_id = cpu_to_be16(be16_to_cpu(ip->ip_id) + 1); + } + + th->th_seq = cpu_to_be32(be32_to_cpu(th->th_seq) + fragment_len); + th->th_flags &= ~TH_CWR; +} + +static void net_tx_pkt_udp_fragment_init(struct NetTxPkt *pkt, + int *pl_idx, + size_t *l4hdr_len, + int *src_idx, size_t *src_offset, + size_t *src_len) +{ + *pl_idx = NET_TX_PKT_PL_START_FRAG; + *l4hdr_len = 0; + *src_idx = NET_TX_PKT_PL_START_FRAG; + *src_offset = 0; + *src_len = IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size); +} + +static void net_tx_pkt_udp_fragment_fix(struct NetTxPkt *pkt, + struct iovec *fragment, + size_t fragment_offset, + size_t fragment_len) +{ + bool more_frags = fragment_offset + fragment_len < pkt->payload_len; + uint16_t orig_flags; + struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG; + struct ip_header *ip = l3hdr->iov_base; + uint16_t frag_off_units = fragment_offset / IP_FRAG_UNIT_SIZE; + uint16_t new_ip_off; + + assert(fragment_offset % IP_FRAG_UNIT_SIZE == 0); + assert((frag_off_units & ~IP_OFFMASK) == 0); + + orig_flags = be16_to_cpu(ip->ip_off) & ~(IP_OFFMASK | IP_MF); + new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0); + ip->ip_off = cpu_to_be16(new_ip_off); + ip->ip_len = cpu_to_be16(l3hdr->iov_len + fragment_len); + + eth_fix_ip4_checksum(l3hdr->iov_base, l3hdr->iov_len); +} + +static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, + NetTxPktSend callback, + void *context) +{ + uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; + struct iovec fragment[NET_MAX_FRAG_SG_LIST]; - size_t fragment_len = 0; - bool more_frags = false; + size_t fragment_len; + size_t l4hdr_len; + size_t src_len; - /* some pointers for shorter code */ - void *l2_iov_base, *l3_iov_base; - size_t l2_iov_len, l3_iov_len; - int src_idx = NET_TX_PKT_PL_START_FRAG, dst_idx; - size_t src_offset = 0; + int src_idx, dst_idx, pl_idx; + size_t src_offset; size_t fragment_offset = 0; - - l2_iov_base = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base; - l2_iov_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len; - l3_iov_base = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; - l3_iov_len = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; + struct virtio_net_hdr virt_hdr = { + .flags = pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM ? + VIRTIO_NET_HDR_F_DATA_VALID : 0 + }; /* Copy headers */ - fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_base = l2_iov_base; - fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_len = l2_iov_len; - fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_base = l3_iov_base; - fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_len = l3_iov_len; + fragment[NET_TX_PKT_VHDR_FRAG].iov_base = &virt_hdr; + fragment[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof(virt_hdr); + fragment[NET_TX_PKT_L2HDR_FRAG] = pkt->vec[NET_TX_PKT_L2HDR_FRAG]; + fragment[NET_TX_PKT_L3HDR_FRAG] = pkt->vec[NET_TX_PKT_L3HDR_FRAG]; + switch (gso_type) { + case VIRTIO_NET_HDR_GSO_TCPV4: + case VIRTIO_NET_HDR_GSO_TCPV6: + if (!net_tx_pkt_tcp_fragment_init(pkt, fragment, &pl_idx, &l4hdr_len, + &src_idx, &src_offset, &src_len)) { + return false; + } + break; + + case VIRTIO_NET_HDR_GSO_UDP: + net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG], + pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1, + pkt->payload_len); + net_tx_pkt_udp_fragment_init(pkt, &pl_idx, &l4hdr_len, + &src_idx, &src_offset, &src_len); + break; + + default: + abort(); + } /* Put as much data as possible and send */ - do { - fragment_len = net_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset, - fragment, &dst_idx); + while (true) { + dst_idx = pl_idx; + fragment_len = net_tx_pkt_fetch_fragment(pkt, + &src_idx, &src_offset, src_len, fragment, &dst_idx); + if (!fragment_len) { + break; + } - more_frags = (fragment_offset + fragment_len < pkt->payload_len); + switch (gso_type) { + case VIRTIO_NET_HDR_GSO_TCPV4: + case VIRTIO_NET_HDR_GSO_TCPV6: + net_tx_pkt_tcp_fragment_fix(pkt, fragment, fragment_len, gso_type); + net_tx_pkt_do_sw_csum(pkt, fragment + NET_TX_PKT_L2HDR_FRAG, + dst_idx - NET_TX_PKT_L2HDR_FRAG, + l4hdr_len + fragment_len); + break; - eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base, - l3_iov_len, fragment_len, fragment_offset, more_frags); + case VIRTIO_NET_HDR_GSO_UDP: + net_tx_pkt_udp_fragment_fix(pkt, fragment, fragment_offset, + fragment_len); + break; + } - eth_fix_ip4_checksum(l3_iov_base, l3_iov_len); + callback(context, + fragment + NET_TX_PKT_L2HDR_FRAG, dst_idx - NET_TX_PKT_L2HDR_FRAG, + fragment + NET_TX_PKT_VHDR_FRAG, dst_idx - NET_TX_PKT_VHDR_FRAG); - net_tx_pkt_sendv(pkt, nc, fragment, dst_idx); + if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 || + gso_type == VIRTIO_NET_HDR_GSO_TCPV6) { + net_tx_pkt_tcp_fragment_advance(pkt, fragment, fragment_len, + gso_type); + } fragment_offset += fragment_len; + } - } while (fragment_len && more_frags); + if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 || + gso_type == VIRTIO_NET_HDR_GSO_TCPV6) { + net_tx_pkt_tcp_fragment_deinit(fragment); + } return true; } bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc) +{ + bool offload = qemu_get_using_vnet_hdr(nc->peer); + return net_tx_pkt_send_custom(pkt, offload, net_tx_pkt_sendv, nc); +} + +bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, + NetTxPktSend callback, void *context) { assert(pkt); - if (!pkt->has_virt_hdr && - pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - net_tx_pkt_do_sw_csum(pkt); - } + uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; /* * Since underlying infrastructure does not support IP datagrams longer * than 64K we should drop such packets and don't even try to send */ - if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) { + if (VIRTIO_NET_HDR_GSO_NONE != gso_type) { if (pkt->payload_len > ETH_MAX_IP_DGRAM_LEN - pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) { @@ -629,41 +831,36 @@ bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc) } } - if (pkt->has_virt_hdr || - pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) { + if (offload || gso_type == VIRTIO_NET_HDR_GSO_NONE) { + if (!offload && pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG], + pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1, + pkt->payload_len); + } + net_tx_pkt_fix_ip6_payload_len(pkt); - net_tx_pkt_sendv(pkt, nc, pkt->vec, - pkt->payload_frags + NET_TX_PKT_PL_START_FRAG); + callback(context, pkt->vec + NET_TX_PKT_L2HDR_FRAG, + pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_L2HDR_FRAG, + pkt->vec + NET_TX_PKT_VHDR_FRAG, + pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_VHDR_FRAG); return true; } - return net_tx_pkt_do_sw_fragmentation(pkt, nc); -} - -bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc) -{ - bool res; - - pkt->is_loopback = true; - res = net_tx_pkt_send(pkt, nc); - pkt->is_loopback = false; - - return res; + return net_tx_pkt_do_sw_fragmentation(pkt, callback, context); } void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt) { struct iovec *l2 = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; if (eth_get_l3_proto(l2, 1, l2->iov_len) == ETH_P_IPV6) { - struct ip6_header *ip6 = (struct ip6_header *) pkt->l3_hdr; /* * TODO: if qemu would support >64K packets - add jumbo option check * something like that: * 'if (ip6->ip6_plen == 0 && !has_jumbo_option(ip6)) {' */ - if (ip6->ip6_plen == 0) { + if (pkt->l3_hdr.ip6.ip6_plen == 0) { if (pkt->payload_len <= ETH_MAX_IP_DGRAM_LEN) { - ip6->ip6_plen = htons(pkt->payload_len); + pkt->l3_hdr.ip6.ip6_plen = htons(pkt->payload_len); } /* * TODO: if qemu would support >64K packets diff --git a/hw/net/net_tx_pkt.h b/hw/net/net_tx_pkt.h index 4ec8bbe9bd..0a716e74a5 100644 --- a/hw/net/net_tx_pkt.h +++ b/hw/net/net_tx_pkt.h @@ -26,16 +26,16 @@ struct NetTxPkt; +typedef void (*NetTxPktFreeFrag)(void *, void *, size_t); +typedef void (*NetTxPktSend)(void *, const struct iovec *, int, const struct iovec *, int); + /** * Init function for tx packet functionality * * @pkt: packet pointer - * @pci_dev: PCI device processing this packet * @max_frags: max tx ip fragments - * @has_virt_hdr: device uses virtio header. */ -void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, - uint32_t max_frags, bool has_virt_hdr); +void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags); /** * Clean all tx packet resources. @@ -59,9 +59,10 @@ struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt); * @tso_enable: TSO enabled * @csum_enable: CSO enabled * @gso_size: MSS size for TSO + * @ret: operation result * */ -void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, +bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, bool csum_enable, uint32_t gso_size); /** @@ -93,12 +94,11 @@ net_tx_pkt_setup_vlan_header(struct NetTxPkt *pkt, uint16_t vlan) * populate data fragment into pkt context. * * @pkt: packet - * @pa: physical address of fragment + * @base: pointer to fragment * @len: length of fragment * */ -bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, - size_t len); +bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len); /** * Fix ip header fields and calculate IP header and pseudo header checksums. @@ -116,6 +116,14 @@ void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt); */ void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt); +/** + * Calculate the SCTP checksum. + * + * @pkt: packet + * + */ +bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt); + /** * get length of all populated data. * @@ -146,9 +154,30 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt); * reset tx packet private context (needed to be called between packets) * * @pkt: packet - * + * @callback: function to free the fragments + * @context: pointer to be passed to the callback */ -void net_tx_pkt_reset(struct NetTxPkt *pkt); +void net_tx_pkt_reset(struct NetTxPkt *pkt, + NetTxPktFreeFrag callback, void *context); + +/** + * Unmap a fragment mapped from a PCI device. + * + * @context: PCI device owning fragment + * @base: pointer to fragment + * @len: length of fragment + */ +void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len); + +/** + * map data fragment from PCI device and populate it into pkt context. + * + * @pci_dev: PCI device owning fragment + * @pa: physical address of fragment + * @len: length of fragment + */ +bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev, + dma_addr_t pa, size_t len); /** * Send packet to qemu. handles sw offloads if vhdr is not supported. @@ -161,15 +190,16 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt); bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc); /** -* Redirect packet directly to receive path (emulate loopback phy). -* Handles sw offloads if vhdr is not supported. -* -* @pkt: packet -* @nc: NetClientState -* @ret: operation result -* -*/ -bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc); + * Send packet with a custom function. + * + * @pkt: packet + * @offload: whether the callback implements offloading + * @callback: a function to be called back for each transformed packet + * @context: a pointer to be passed to the callback. + * @ret: operation result + */ +bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, + NetTxPktSend callback, void *context); /** * parse raw packet data and analyze offload requirements. diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c index 7c86bb52e5..8156f701b0 100644 --- a/hw/net/npcm7xx_emc.c +++ b/hw/net/npcm7xx_emc.c @@ -98,6 +98,8 @@ static const char *emc_reg_name(int regno) static void emc_reset(NPCM7xxEMCState *emc) { + uint32_t value; + trace_npcm7xx_emc_reset(emc->emc_num); memset(&emc->regs[0], 0, sizeof(emc->regs)); @@ -112,6 +114,16 @@ static void emc_reset(NPCM7xxEMCState *emc) emc->tx_active = false; emc->rx_active = false; + + /* Set the MAC address in the register space. */ + value = (emc->conf.macaddr.a[0] << 24) | + (emc->conf.macaddr.a[1] << 16) | + (emc->conf.macaddr.a[2] << 8) | + emc->conf.macaddr.a[3]; + emc->regs[REG_CAMM_BASE] = value; + + value = (emc->conf.macaddr.a[4] << 24) | (emc->conf.macaddr.a[5] << 16); + emc->regs[REG_CAML_BASE] = value; } static void npcm7xx_emc_reset(DeviceState *dev) @@ -432,13 +444,25 @@ static bool emc_receive_filter1(NPCM7xxEMCState *emc, const uint8_t *buf, } case ETH_PKT_UCAST: { bool matches; + uint32_t value; + struct MACAddr mac; if (emc->regs[REG_CAMCMR] & REG_CAMCMR_AUP) { return true; } + + value = emc->regs[REG_CAMM_BASE]; + mac.a[0] = value >> 24; + mac.a[1] = value >> 16; + mac.a[2] = value >> 8; + mac.a[3] = value >> 0; + value = emc->regs[REG_CAML_BASE]; + mac.a[4] = value >> 24; + mac.a[5] = value >> 16; + matches = ((emc->regs[REG_CAMCMR] & REG_CAMCMR_ECMP) && /* We only support one CAM register, CAM0. */ (emc->regs[REG_CAMEN] & (1 << 0)) && - memcmp(buf, emc->conf.macaddr.a, ETH_ALEN) == 0); + memcmp(buf, mac.a, ETH_ALEN) == 0); if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) { *fail_reason = "MACADDR matched, comparison complemented"; return !matches; @@ -661,15 +685,9 @@ static void npcm7xx_emc_write(void *opaque, hwaddr offset, break; case REG_CAMM_BASE + 0: emc->regs[reg] = value; - emc->conf.macaddr.a[0] = value >> 24; - emc->conf.macaddr.a[1] = value >> 16; - emc->conf.macaddr.a[2] = value >> 8; - emc->conf.macaddr.a[3] = value >> 0; break; case REG_CAML_BASE + 0: emc->regs[reg] = value; - emc->conf.macaddr.a[4] = value >> 24; - emc->conf.macaddr.a[5] = value >> 16; break; case REG_MCMDR: { uint32_t prev; diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 5a5aaf868d..5f1a4d359b 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -2154,6 +2154,9 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) int large_send_mss = (txdw0 >> CP_TC_LGSEN_MSS_SHIFT) & CP_TC_LGSEN_MSS_MASK; + if (large_send_mss == 0) { + goto skip_offload; + } DPRINTF("+++ C+ mode offloaded task TSO IP data %d " "frame data %d specified MSS=%d\n", diff --git a/hw/net/trace-events b/hw/net/trace-events index 4c0ec3fda1..e4a98b2c7d 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -61,7 +61,7 @@ pcnet_ioport_read(void *opaque, uint64_t addr, unsigned size) "opaque=%p addr=0x pcnet_ioport_write(void *opaque, uint64_t addr, uint64_t data, unsigned size) "opaque=%p addr=0x%"PRIx64" data=0x%"PRIx64" size=%d" # net_rx_pkt.c -net_rx_pkt_parsed(bool ip4, bool ip6, bool udp, bool tcp, size_t l3o, size_t l4o, size_t l5o) "RX packet parsed: ip4: %d, ip6: %d, udp: %d, tcp: %d, l3 offset: %zu, l4 offset: %zu, l5 offset: %zu" +net_rx_pkt_parsed(bool ip4, bool ip6, int l4proto, size_t l3o, size_t l4o, size_t l5o) "RX packet parsed: ip4: %d, ip6: %d, l4 protocol: %d, l3 offset: %zu, l4 offset: %zu, l5 offset: %zu" net_rx_pkt_l4_csum_validate_entry(void) "Starting L4 checksum validation" net_rx_pkt_l4_csum_validate_not_xxp(void) "Not a TCP/UDP packet" net_rx_pkt_l4_csum_validate_udp_with_no_checksum(void) "UDP packet without checksum" @@ -106,6 +106,8 @@ e1000_receiver_overrun(size_t s, uint32_t rdh, uint32_t rdt) "Receiver overrun: # e1000x_common.c e1000x_rx_can_recv_disabled(bool link_up, bool rx_enabled, bool pci_master) "link_up: %d, rx_enabled %d, pci_master %d" e1000x_vlan_is_vlan_pkt(bool is_vlan_pkt, uint16_t eth_proto, uint16_t vet) "Is VLAN packet: %d, ETH proto: 0x%X, VET: 0x%X" +e1000x_rx_flt_vlan_mismatch(uint16_t vid) "VID mismatch: 0x%X" +e1000x_rx_flt_vlan_match(uint16_t vid) "VID match: 0x%X" e1000x_rx_flt_ucast_match(uint32_t idx, uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x" e1000x_rx_flt_ucast_mismatch(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x" e1000x_rx_flt_inexact_mismatch(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5, uint32_t mo, uint32_t mta, uint32_t mta_val) "inexact mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] 0x%x" @@ -154,8 +156,6 @@ e1000e_rx_can_recv_rings_full(void) "Cannot receive: all rings are full" e1000e_rx_can_recv(void) "Can receive" e1000e_rx_has_buffers(int ridx, uint32_t free_desc, size_t total_size, uint32_t desc_buf_size) "ring #%d: free descr: %u, packet size %zu, descr buffer size %u" e1000e_rx_null_descriptor(void) "Null RX descriptor!!" -e1000e_rx_flt_vlan_mismatch(uint16_t vid) "VID mismatch: 0x%X" -e1000e_rx_flt_vlan_match(uint16_t vid) "VID match: 0x%X" e1000e_rx_desc_ps_read(uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3) "buffers: [0x%"PRIx64", 0x%"PRIx64", 0x%"PRIx64", 0x%"PRIx64"]" e1000e_rx_desc_ps_write(uint16_t a0, uint16_t a1, uint16_t a2, uint16_t a3) "bytes written: [%u, %u, %u, %u]" e1000e_rx_desc_buff_sizes(uint32_t b0, uint32_t b1, uint32_t b2, uint32_t b3) "buffer sizes: [%u, %u, %u, %u]" @@ -165,8 +165,8 @@ e1000e_rx_descr(int ridx, uint64_t base, uint8_t len) "Next RX descriptor: ring e1000e_rx_set_rctl(uint32_t rctl) "RCTL = 0x%x" e1000e_rx_receive_iov(int iovcnt) "Received vector of %d fragments" e1000e_rx_flt_dropped(void) "Received packet dropped by RX filter" -e1000e_rx_written_to_guest(uint32_t causes) "Received packet written to guest (ICR causes %u)" -e1000e_rx_not_written_to_guest(uint32_t causes) "Received packet NOT written to guest (ICR causes %u)" +e1000e_rx_written_to_guest(int queue_idx) "Received packet written to guest (queue %d)" +e1000e_rx_not_written_to_guest(int queue_idx) "Received packet NOT written to guest (queue %d)" e1000e_rx_interrupt_set(uint32_t causes) "Receive interrupt set (ICR causes %u)" e1000e_rx_interrupt_delayed(uint32_t causes) "Receive interrupt delayed (ICR causes %u)" e1000e_rx_set_cso(int cso_state) "RX CSO state set to %d" @@ -177,18 +177,16 @@ e1000e_rx_start_recv(void) e1000e_rx_rss_started(void) "Starting RSS processing" e1000e_rx_rss_disabled(void) "RSS is disabled" e1000e_rx_rss_type(uint32_t type) "RSS type is %u" -e1000e_rx_rss_ip4(bool isfragment, bool istcp, uint32_t mrqc, bool tcpipv4_enabled, bool ipv4_enabled) "RSS IPv4: fragment %d, tcp %d, mrqc 0x%X, tcpipv4 enabled %d, ipv4 enabled %d" +e1000e_rx_rss_ip4(int l4hdr_proto, uint32_t mrqc, bool tcpipv4_enabled, bool ipv4_enabled) "RSS IPv4: L4 header protocol %d, mrqc 0x%X, tcpipv4 enabled %d, ipv4 enabled %d" e1000e_rx_rss_ip6_rfctl(uint32_t rfctl) "RSS IPv6: rfctl 0x%X" -e1000e_rx_rss_ip6(bool ex_dis, bool new_ex_dis, bool istcp, bool has_ext_headers, bool ex_dst_valid, bool ex_src_valid, uint32_t mrqc, bool tcpipv6_enabled, bool ipv6ex_enabled, bool ipv6_enabled) "RSS IPv6: ex_dis: %d, new_ex_dis: %d, tcp %d, has_ext_headers %d, ex_dst_valid %d, ex_src_valid %d, mrqc 0x%X, tcpipv6 enabled %d, ipv6ex enabled %d, ipv6 enabled %d" -e1000e_rx_rss_dispatched_to_queue(int queue_idx) "Packet being dispatched to queue %d" +e1000e_rx_rss_ip6(bool ex_dis, bool new_ex_dis, int l4hdr_proto, bool has_ext_headers, bool ex_dst_valid, bool ex_src_valid, uint32_t mrqc, bool tcpipv6ex_enabled, bool ipv6ex_enabled, bool ipv6_enabled) "RSS IPv6: ex_dis: %d, new_ex_dis: %d, L4 header protocol %d, has_ext_headers %d, ex_dst_valid %d, ex_src_valid %d, mrqc 0x%X, tcpipv6ex enabled %d, ipv6ex enabled %d, ipv6 enabled %d" -e1000e_rx_metadata_protocols(bool isip4, bool isip6, bool isudp, bool istcp) "protocols: ip4: %d, ip6: %d, udp: %d, tcp: %d" +e1000e_rx_metadata_protocols(bool hasip4, bool hasip6, int l4hdr_protocol) "protocols: ip4: %d, ip6: %d, l4hdr: %d" e1000e_rx_metadata_vlan(uint16_t vlan_tag) "VLAN tag is 0x%X" e1000e_rx_metadata_rss(uint32_t rss, uint32_t mrq) "RSS data: rss: 0x%X, mrq: 0x%X" e1000e_rx_metadata_ip_id(uint16_t ip_id) "the IPv4 ID is 0x%X" e1000e_rx_metadata_ack(void) "the packet is TCP ACK" e1000e_rx_metadata_pkt_type(uint32_t pkt_type) "the packet type is %u" -e1000e_rx_metadata_no_virthdr(void) "the packet has no virt-header" e1000e_rx_metadata_virthdr_no_csum_info(void) "virt-header does not contain checksum info" e1000e_rx_metadata_l3_cso_disabled(void) "IP4 CSO is disabled" e1000e_rx_metadata_l4_cso_disabled(void) "TCP/UDP CSO is disabled" @@ -201,29 +199,22 @@ e1000e_rx_metadata_ipv6_filtering_disabled(void) "IPv6 RX filtering disabled by e1000e_vlan_vet(uint16_t vet) "Setting VLAN ethernet type 0x%X" e1000e_irq_msi_notify(uint32_t cause) "MSI notify 0x%x" -e1000e_irq_throttling_no_pending_interrupts(void) "No pending interrupts to notify" e1000e_irq_msi_notify_postponed(void) "Sending MSI postponed by ITR" e1000e_irq_legacy_notify_postponed(void) "Raising legacy IRQ postponed by ITR" -e1000e_irq_throttling_no_pending_vec(int idx) "No pending interrupts for vector %d" e1000e_irq_msix_notify_postponed_vec(int idx) "Sending MSI-X postponed by EITR[%d]" e1000e_irq_legacy_notify(bool level) "IRQ line state: %d" e1000e_irq_msix_notify_vec(uint32_t vector) "MSI-X notify vector 0x%x" e1000e_irq_postponed_by_xitr(uint32_t reg) "Interrupt postponed by [E]ITR register 0x%x" -e1000e_irq_clear_ims(uint32_t bits, uint32_t old_ims, uint32_t new_ims) "Clearing IMS bits 0x%x: 0x%x --> 0x%x" -e1000e_irq_set_ims(uint32_t bits, uint32_t old_ims, uint32_t new_ims) "Setting IMS bits 0x%x: 0x%x --> 0x%x" +e1000e_irq_clear(uint32_t offset, uint32_t old, uint32_t new) "Clearing interrupt register 0x%x: 0x%x --> 0x%x" +e1000e_irq_set(uint32_t offset, uint32_t old, uint32_t new) "Setting interrupt register 0x%x: 0x%x --> 0x%x" e1000e_irq_fix_icr_asserted(uint32_t new_val) "ICR_ASSERTED bit fixed: 0x%x" e1000e_irq_add_msi_other(uint32_t new_val) "ICR_OTHER bit added: 0x%x" e1000e_irq_pending_interrupts(uint32_t pending, uint32_t icr, uint32_t ims) "ICR PENDING: 0x%x (ICR: 0x%x, IMS: 0x%x)" -e1000e_irq_set_cause_entry(uint32_t val, uint32_t icr) "Going to set IRQ cause 0x%x, ICR: 0x%x" -e1000e_irq_set_cause_exit(uint32_t val, uint32_t icr) "Set IRQ cause 0x%x, ICR: 0x%x" -e1000e_irq_icr_write(uint32_t bits, uint32_t old_icr, uint32_t new_icr) "Clearing ICR bits 0x%x: 0x%x --> 0x%x" e1000e_irq_write_ics(uint32_t val) "Adding ICR bits 0x%x" e1000e_irq_icr_process_iame(void) "Clearing IMS bits due to IAME" e1000e_irq_read_ics(uint32_t ics) "Current ICS: 0x%x" e1000e_irq_read_ims(uint32_t ims) "Current IMS: 0x%x" e1000e_irq_icr_clear_nonmsix_icr_read(void) "Clearing ICR on read due to non MSI-X int" -e1000e_irq_icr_read_entry(uint32_t icr) "Starting ICR read. Current ICR: 0x%x" -e1000e_irq_icr_read_exit(uint32_t icr) "Ending ICR read. Current ICR: 0x%x" e1000e_irq_icr_clear_zero_ims(void) "Clearing ICR on read due to zero IMS" e1000e_irq_icr_clear_iame(void) "Clearing ICR on read due to IAME" e1000e_irq_iam_clear_eiame(uint32_t iam, uint32_t cause) "Clearing IMS due to EIAME, IAM: 0x%X, cause: 0x%X" @@ -239,7 +230,6 @@ e1000e_irq_tidv_fpd_not_running(void) "FPD written while TIDV was not running" e1000e_irq_eitr_set(uint32_t eitr_num, uint32_t val) "EITR[%u] = %u" e1000e_irq_itr_set(uint32_t val) "ITR = %u" e1000e_irq_fire_all_timers(uint32_t val) "Firing all delay/throttling timers on all interrupts enable (0x%X written to IMS)" -e1000e_irq_adding_delayed_causes(uint32_t val, uint32_t icr) "Merging delayed causes 0x%X to ICR 0x%X" e1000e_irq_msix_pending_clearing(uint32_t cause, uint32_t int_cfg, uint32_t vec) "Clearing MSI-X pending bit for cause 0x%x, IVAR config 0x%x, vector %u" e1000e_wrn_msix_vec_wrong(uint32_t cause, uint32_t cfg) "Invalid configuration for cause 0x%x: 0x%x" @@ -253,7 +243,7 @@ e1000e_vm_state_stopped(void) "VM state is stopped" # e1000e.c e1000e_cb_pci_realize(void) "E1000E PCI realize entry" e1000e_cb_pci_uninit(void) "E1000E PCI unit entry" -e1000e_cb_qdev_reset(void) "E1000E qdev reset entry" +e1000e_cb_qdev_reset_hold(void) "E1000E qdev reset hold" e1000e_cb_pre_save(void) "E1000E pre save entry" e1000e_cb_post_load(void) "E1000E post load entry" @@ -274,6 +264,39 @@ e1000e_msix_use_vector_fail(uint32_t vec, int32_t res) "Failed to use MSI-X vect e1000e_mac_set_permanent(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "Set permanent MAC: %02x:%02x:%02x:%02x:%02x:%02x" e1000e_cfg_support_virtio(bool support) "Virtio header supported: %d" +# igb.c +igb_write_config(uint32_t address, uint32_t val, int len) "CONFIG write 0x%"PRIx32", value: 0x%"PRIx32", len: %"PRId32 +igbvf_write_config(uint32_t address, uint32_t val, int len) "CONFIG write 0x%"PRIx32", value: 0x%"PRIx32", len: %"PRId32 + +# igb_core.c +igb_core_mdic_read(uint32_t addr, uint32_t data) "MDIC READ: PHY[%u] = 0x%x" +igb_core_mdic_read_unhandled(uint32_t addr) "MDIC READ: PHY[%u] UNHANDLED" +igb_core_mdic_write(uint32_t addr, uint32_t data) "MDIC WRITE: PHY[%u] = 0x%x" +igb_core_mdic_write_unhandled(uint32_t addr) "MDIC WRITE: PHY[%u] UNHANDLED" + +igb_link_set_ext_params(bool asd_check, bool speed_select_bypass, bool pfrstd) "Set extended link params: ASD check: %d, Speed select bypass: %d, PF reset done: %d" + +igb_rx_desc_buff_size(uint32_t b) "buffer size: %u" +igb_rx_desc_buff_write(uint64_t addr, uint16_t offset, const void* source, uint32_t len) "addr: 0x%"PRIx64", offset: %u, from: %p, length: %u" + +igb_rx_metadata_rss(uint32_t rss) "RSS data: 0x%X" + +igb_irq_icr_clear_gpie_nsicr(void) "Clearing ICR on read due to GPIE.NSICR enabled" +igb_irq_set_iam(uint32_t icr) "Update IAM: 0x%x" +igb_irq_read_iam(uint32_t icr) "Current IAM: 0x%x" +igb_irq_write_eics(uint32_t val, bool msix) "Update EICS: 0x%x MSI-X: %d" +igb_irq_write_eims(uint32_t val, bool msix) "Update EIMS: 0x%x MSI-X: %d" +igb_irq_write_eimc(uint32_t val, bool msix) "Update EIMC: 0x%x MSI-X: %d" +igb_irq_write_eiac(uint32_t val) "Update EIAC: 0x%x" +igb_irq_write_eiam(uint32_t val, bool msix) "Update EIAM: 0x%x MSI-X: %d" +igb_irq_write_eicr(uint32_t val, bool msix) "Update EICR: 0x%x MSI-X: %d" +igb_irq_eitr_set(uint32_t eitr_num, uint32_t val) "EITR[%u] = 0x%x" +igb_set_pfmailbox(uint32_t vf_num, uint32_t val) "PFMailbox[%d]: 0x%x" +igb_set_vfmailbox(uint32_t vf_num, uint32_t val) "VFMailbox[%d]: 0x%x" + +# igbvf.c +igbvf_wrn_io_addr_unknown(uint64_t addr) "IO unknown register 0x%"PRIx64 + # spapr_llan.c spapr_vlan_get_rx_bd_from_pool_found(int pool, int32_t count, uint32_t rx_bufs) "pool=%d count=%"PRId32" rxbufs=%"PRIu32 spapr_vlan_get_rx_bd_from_page(int buf_ptr, uint64_t bd) "use_buf_ptr=%d bd=0x%016"PRIx64 diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 3ae909041a..6df6b7329d 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -805,7 +805,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, } if (!get_vhost_net(nc->peer)) { - virtio_add_feature(&features, VIRTIO_F_RING_RESET); return features; } @@ -820,6 +819,21 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, features |= (1ULL << VIRTIO_NET_F_MTU); } + /* + * Since GUEST_ANNOUNCE is emulated the feature bit could be set without + * enabled. This happens in the vDPA case. + * + * Make sure the feature set is not incoherent, as the driver could refuse + * to start. + * + * TODO: QEMU is able to emulate a CVQ just for guest_announce purposes, + * helping guest to notify the new location with vDPA devices that does not + * support it. + */ + if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) { + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE); + } + return features; } @@ -1731,39 +1745,61 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) return 0; } -static uint8_t virtio_net_get_hash_type(bool isip4, - bool isip6, - bool isudp, - bool istcp, +static uint8_t virtio_net_get_hash_type(bool hasip4, + bool hasip6, + EthL4HdrProto l4hdr_proto, uint32_t types) { - if (isip4) { - if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) { - return NetPktRssIpV4Tcp; - } - if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) { - return NetPktRssIpV4Udp; + if (hasip4) { + switch (l4hdr_proto) { + case ETH_L4_HDR_PROTO_TCP: + if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { + return NetPktRssIpV4Tcp; + } + break; + + case ETH_L4_HDR_PROTO_UDP: + if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { + return NetPktRssIpV4Udp; + } + break; + + default: + break; } + if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { return NetPktRssIpV4; } - } else if (isip6) { - uint32_t mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | - VIRTIO_NET_RSS_HASH_TYPE_TCPv6; + } else if (hasip6) { + switch (l4hdr_proto) { + case ETH_L4_HDR_PROTO_TCP: + if (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) { + return NetPktRssIpV6TcpEx; + } + if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { + return NetPktRssIpV6Tcp; + } + break; - if (istcp && (types & mask)) { - return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ? - NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp; + case ETH_L4_HDR_PROTO_UDP: + if (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) { + return NetPktRssIpV6UdpEx; + } + if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { + return NetPktRssIpV6Udp; + } + break; + + default: + break; } - mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6; - if (isudp && (types & mask)) { - return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ? - NetPktRssIpV6UdpEx : NetPktRssIpV6Udp; + + if (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) { + return NetPktRssIpV6Ex; } - mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6; - if (types & mask) { - return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ? - NetPktRssIpV6Ex : NetPktRssIpV6; + if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { + return NetPktRssIpV6; } } return 0xff; @@ -1785,7 +1821,8 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf, struct NetRxPkt *pkt = n->rx_pkt; uint8_t net_hash_type; uint32_t hash; - bool isip4, isip6, isudp, istcp; + bool hasip4, hasip6; + EthL4HdrProto l4hdr_proto; static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = { VIRTIO_NET_HASH_REPORT_IPv4, VIRTIO_NET_HASH_REPORT_TCPv4, @@ -1797,17 +1834,14 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf, VIRTIO_NET_HASH_REPORT_UDPv6, VIRTIO_NET_HASH_REPORT_UDPv6_EX }; + struct iovec iov = { + .iov_base = (void *)buf, + .iov_len = size + }; - net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len, - size - n->host_hdr_len); - net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); - if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) { - istcp = isudp = false; - } - if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) { - istcp = isudp = false; - } - net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp, + net_rx_pkt_set_protocols(pkt, &iov, 1, n->host_hdr_len); + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); + net_hash_type = virtio_net_get_hash_type(hasip4, hasip6, l4hdr_proto, n->rss_data.hash_types); if (net_hash_type > NetPktRssIpV6UdpEx) { if (n->rss_data.populate_hash) { @@ -2885,7 +2919,8 @@ static void virtio_net_add_queue(VirtIONet *n, int index) n->vqs[index].tx_vq = virtio_add_queue(vdev, n->net_conf.tx_queue_size, virtio_net_handle_tx_bh); - n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); + n->vqs[index].tx_bh = qemu_bh_new_guarded(virtio_net_tx_bh, &n->vqs[index], + &DEVICE(vdev)->mem_reentrancy_guard); } n->vqs[index].tx_waiting = 0; @@ -3703,7 +3738,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) QTAILQ_INIT(&n->rsc_chains); n->qdev = dev; - net_rx_pkt_init(&n->rx_pkt, false); + net_rx_pkt_init(&n->rx_pkt); if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) { virtio_net_load_ebpf(n); diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 56559cda24..18b9edfdb2 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -440,19 +440,19 @@ vmxnet3_setup_tx_offloads(VMXNET3State *s) { switch (s->offload_mode) { case VMXNET3_OM_NONE: - net_tx_pkt_build_vheader(s->tx_pkt, false, false, 0); - break; + return net_tx_pkt_build_vheader(s->tx_pkt, false, false, 0); case VMXNET3_OM_CSUM: - net_tx_pkt_build_vheader(s->tx_pkt, false, true, 0); VMW_PKPRN("L4 CSO requested\n"); - break; + return net_tx_pkt_build_vheader(s->tx_pkt, false, true, 0); case VMXNET3_OM_TSO: - net_tx_pkt_build_vheader(s->tx_pkt, true, true, - s->cso_or_gso_size); - net_tx_pkt_update_ip_checksums(s->tx_pkt); VMW_PKPRN("GSO offload requested."); + if (!net_tx_pkt_build_vheader(s->tx_pkt, true, true, + s->cso_or_gso_size)) { + return false; + } + net_tx_pkt_update_ip_checksums(s->tx_pkt); break; default: @@ -651,9 +651,8 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE; data_pa = txd.addr; - if (!net_tx_pkt_add_raw_fragment(s->tx_pkt, - data_pa, - data_len)) { + if (!net_tx_pkt_add_raw_fragment_pci(s->tx_pkt, PCI_DEVICE(s), + data_pa, data_len)) { s->skip_current_tx_pkt = true; } } @@ -678,9 +677,12 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) vmxnet3_complete_packet(s, qidx, txd_idx); s->tx_sop = true; s->skip_current_tx_pkt = false; - net_tx_pkt_reset(s->tx_pkt); + net_tx_pkt_reset(s->tx_pkt, + net_tx_pkt_unmap_frag_pci, PCI_DEVICE(s)); } } + + net_tx_pkt_reset(s->tx_pkt, net_tx_pkt_unmap_frag_pci, PCI_DEVICE(s)); } static inline void @@ -847,21 +849,20 @@ static void vmxnet3_rx_need_csum_calculate(struct NetRxPkt *pkt, size_t pkt_len) { struct virtio_net_hdr *vhdr; - bool isip4, isip6, istcp, isudp; + bool hasip4, hasip6; + EthL4HdrProto l4hdr_proto; uint8_t *data; int len; - if (!net_rx_pkt_has_virt_hdr(pkt)) { - return; - } - vhdr = net_rx_pkt_get_vhdr(pkt); if (!VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM)) { return; } - net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); - if (!(isip4 || isip6) || !(istcp || isudp)) { + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); + if (!(hasip4 || hasip6) || + (l4hdr_proto != ETH_L4_HDR_PROTO_TCP && + l4hdr_proto != ETH_L4_HDR_PROTO_UDP)) { return; } @@ -889,7 +890,8 @@ static void vmxnet3_rx_update_descr(struct NetRxPkt *pkt, struct Vmxnet3_RxCompDesc *rxcd) { int csum_ok, is_gso; - bool isip4, isip6, istcp, isudp; + bool hasip4, hasip6; + EthL4HdrProto l4hdr_proto; struct virtio_net_hdr *vhdr; uint8_t offload_type; @@ -898,10 +900,6 @@ static void vmxnet3_rx_update_descr(struct NetRxPkt *pkt, rxcd->tci = net_rx_pkt_get_vlan_tag(pkt); } - if (!net_rx_pkt_has_virt_hdr(pkt)) { - goto nocsum; - } - vhdr = net_rx_pkt_get_vhdr(pkt); /* * Checksum is valid when lower level tell so or when lower level @@ -919,16 +917,18 @@ static void vmxnet3_rx_update_descr(struct NetRxPkt *pkt, goto nocsum; } - net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); - if ((!istcp && !isudp) || (!isip4 && !isip6)) { + net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); + if ((l4hdr_proto != ETH_L4_HDR_PROTO_TCP && + l4hdr_proto != ETH_L4_HDR_PROTO_UDP) || + (!hasip4 && !hasip6)) { goto nocsum; } rxcd->cnc = 0; - rxcd->v4 = isip4 ? 1 : 0; - rxcd->v6 = isip6 ? 1 : 0; - rxcd->tcp = istcp ? 1 : 0; - rxcd->udp = isudp ? 1 : 0; + rxcd->v4 = hasip4 ? 1 : 0; + rxcd->v6 = hasip6 ? 1 : 0; + rxcd->tcp = l4hdr_proto == ETH_L4_HDR_PROTO_TCP; + rxcd->udp = l4hdr_proto == ETH_L4_HDR_PROTO_UDP; rxcd->fcs = rxcd->tuc = rxcd->ipc = 1; return; @@ -1161,7 +1161,6 @@ static void vmxnet3_deactivate_device(VMXNET3State *s) { if (s->device_active) { VMW_CBPRN("Deactivating vmxnet3..."); - net_tx_pkt_reset(s->tx_pkt); net_tx_pkt_uninit(s->tx_pkt); net_rx_pkt_uninit(s->rx_pkt); s->device_active = false; @@ -1521,9 +1520,8 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Preallocate TX packet wrapper */ VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags); - net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), - s->max_tx_frags, s->peer_has_vhdr); - net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); + net_tx_pkt_init(&s->tx_pkt, s->max_tx_frags); + net_rx_pkt_init(&s->rx_pkt); /* Read rings memory locations for RX queues */ for (i = 0; i < s->rxq_num; i++) { @@ -2004,7 +2002,12 @@ vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size) get_eth_packet_type(PKT_GET_ETH_HDR(buf))); if (vmxnet3_rx_filter_may_indicate(s, buf, size)) { - net_rx_pkt_set_protocols(s->rx_pkt, buf, size); + struct iovec iov = { + .iov_base = (void *)buf, + .iov_len = size + }; + + net_rx_pkt_set_protocols(s->rx_pkt, &iov, 1, 0); vmxnet3_rx_need_csum_calculate(s->rx_pkt, buf, size); net_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping); bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1; @@ -2402,9 +2405,8 @@ static int vmxnet3_post_load(void *opaque, int version_id) { VMXNET3State *s = opaque; - net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), - s->max_tx_frags, s->peer_has_vhdr); - net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); + net_tx_pkt_init(&s->tx_pkt, s->max_tx_frags); + net_rx_pkt_init(&s->rx_pkt); if (s->msix_used) { vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS); diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c index 7d92c2d022..9bbf6599fc 100644 --- a/hw/net/xen_nic.c +++ b/hw/net/xen_nic.c @@ -145,7 +145,7 @@ static void net_tx_packets(struct XenNetDev *netdev) continue; } - if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { + if ((txreq.offset + txreq.size) > XEN_PAGE_SIZE) { xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n"); net_tx_error(netdev, &txreq, rc); continue; @@ -171,7 +171,7 @@ static void net_tx_packets(struct XenNetDev *netdev) if (txreq.flags & NETTXF_csum_blank) { /* have read-only mapping -> can't fill checksum in-place */ if (!tmpbuf) { - tmpbuf = g_malloc(XC_PAGE_SIZE); + tmpbuf = g_malloc(XEN_PAGE_SIZE); } memcpy(tmpbuf, page + txreq.offset, txreq.size); net_checksum_calculate(tmpbuf, txreq.size, CSUM_ALL); @@ -181,7 +181,7 @@ static void net_tx_packets(struct XenNetDev *netdev) qemu_send_packet(qemu_get_queue(netdev->nic), page + txreq.offset, txreq.size); } - xen_be_unmap_grant_ref(&netdev->xendev, page); + xen_be_unmap_grant_ref(&netdev->xendev, page, txreq.gref); net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); } if (!netdev->tx_work) { @@ -243,9 +243,9 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { return 0; } - if (size > XC_PAGE_SIZE - NET_IP_ALIGN) { + if (size > XEN_PAGE_SIZE - NET_IP_ALIGN) { xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", - (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); + (unsigned long)size, XEN_PAGE_SIZE - NET_IP_ALIGN); return -1; } @@ -261,7 +261,7 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size return -1; } memcpy(page + NET_IP_ALIGN, buf, size); - xen_be_unmap_grant_ref(&netdev->xendev, page); + xen_be_unmap_grant_ref(&netdev->xendev, page, rxreq.gref); net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0); return size; @@ -343,12 +343,13 @@ static int net_connect(struct XenLegacyDevice *xendev) netdev->rx_ring_ref, PROT_READ | PROT_WRITE); if (!netdev->rxs) { - xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs); + xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs, + netdev->tx_ring_ref); netdev->txs = NULL; return -1; } - BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE); - BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE); + BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XEN_PAGE_SIZE); + BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XEN_PAGE_SIZE); xen_be_bind_evtchn(&netdev->xendev); @@ -368,11 +369,13 @@ static void net_disconnect(struct XenLegacyDevice *xendev) xen_pv_unbind_evtchn(&netdev->xendev); if (netdev->txs) { - xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs); + xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs, + netdev->tx_ring_ref); netdev->txs = NULL; } if (netdev->rxs) { - xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs); + xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs, + netdev->rx_ring_ref); netdev->rxs = NULL; } } diff --git a/hw/net/xilinx_ethlite.c b/hw/net/xilinx_ethlite.c index 99c22819ea..89f4f3b254 100644 --- a/hw/net/xilinx_ethlite.c +++ b/hw/net/xilinx_ethlite.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "qom/object.h" -#include "cpu.h" /* FIXME should not use tswap* */ +#include "exec/tswap.h" #include "hw/sysbus.h" #include "hw/irq.h" #include "hw/qdev-properties.h" diff --git a/hw/nubus/nubus-device.c b/hw/nubus/nubus-device.c index 0f1852f671..49008e4938 100644 --- a/hw/nubus/nubus-device.c +++ b/hw/nubus/nubus-device.c @@ -80,6 +80,7 @@ static void nubus_device_realize(DeviceState *dev, Error **errp) &error_abort); ret = load_image_mr(path, &nd->decl_rom); g_free(path); + g_free(name); if (ret < 0) { error_setg(errp, "could not load romfile \"%s\"", nd->romfile); return; diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index f25cc2c235..fd917fcda1 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -238,6 +238,8 @@ static const bool nvme_feature_support[NVME_FID_MAX] = { [NVME_TIMESTAMP] = true, [NVME_HOST_BEHAVIOR_SUPPORT] = true, [NVME_COMMAND_SET_PROFILE] = true, + [NVME_FDP_MODE] = true, + [NVME_FDP_EVENTS] = true, }; static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { @@ -249,6 +251,8 @@ static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE, [NVME_HOST_BEHAVIOR_SUPPORT] = NVME_FEAT_CAP_CHANGE, [NVME_COMMAND_SET_PROFILE] = NVME_FEAT_CAP_CHANGE, + [NVME_FDP_MODE] = NVME_FEAT_CAP_CHANGE, + [NVME_FDP_EVENTS] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS, }; static const uint32_t nvme_cse_acs[256] = { @@ -266,6 +270,8 @@ static const uint32_t nvme_cse_acs[256] = { [NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_ADM_CMD_DIRECTIVE_RECV] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_DIRECTIVE_SEND] = NVME_CMD_EFF_CSUPP, }; static const uint32_t nvme_cse_iocs_none[256]; @@ -279,6 +285,8 @@ static const uint32_t nvme_cse_iocs_nvm[256] = { [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_IO_MGMT_RECV] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_IO_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, }; static const uint32_t nvme_cse_iocs_zoned[256] = { @@ -297,12 +305,66 @@ static const uint32_t nvme_cse_iocs_zoned[256] = { static void nvme_process_sq(void *opaque); static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst); +static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n); static uint16_t nvme_sqid(NvmeRequest *req) { return le16_to_cpu(req->sq->sqid); } +static inline uint16_t nvme_make_pid(NvmeNamespace *ns, uint16_t rg, + uint16_t ph) +{ + uint16_t rgif = ns->endgrp->fdp.rgif; + + if (!rgif) { + return ph; + } + + return (rg << (16 - rgif)) | ph; +} + +static inline bool nvme_ph_valid(NvmeNamespace *ns, uint16_t ph) +{ + return ph < ns->fdp.nphs; +} + +static inline bool nvme_rg_valid(NvmeEnduranceGroup *endgrp, uint16_t rg) +{ + return rg < endgrp->fdp.nrg; +} + +static inline uint16_t nvme_pid2ph(NvmeNamespace *ns, uint16_t pid) +{ + uint16_t rgif = ns->endgrp->fdp.rgif; + + if (!rgif) { + return pid; + } + + return pid & ((1 << (15 - rgif)) - 1); +} + +static inline uint16_t nvme_pid2rg(NvmeNamespace *ns, uint16_t pid) +{ + uint16_t rgif = ns->endgrp->fdp.rgif; + + if (!rgif) { + return 0; + } + + return pid >> (16 - rgif); +} + +static inline bool nvme_parse_pid(NvmeNamespace *ns, uint16_t pid, + uint16_t *ph, uint16_t *rg) +{ + *rg = nvme_pid2rg(ns, pid); + *ph = nvme_pid2ph(ns, pid); + + return nvme_ph_valid(ns, *ph) && nvme_rg_valid(ns->endgrp, *rg); +} + static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone, NvmeZoneState state) { @@ -376,6 +438,69 @@ static uint16_t nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn) return nvme_zns_check_resources(ns, act, opn, 0); } +static NvmeFdpEvent *nvme_fdp_alloc_event(NvmeCtrl *n, NvmeFdpEventBuffer *ebuf) +{ + NvmeFdpEvent *ret = NULL; + bool is_full = ebuf->next == ebuf->start && ebuf->nelems; + + ret = &ebuf->events[ebuf->next++]; + if (unlikely(ebuf->next == NVME_FDP_MAX_EVENTS)) { + ebuf->next = 0; + } + if (is_full) { + ebuf->start = ebuf->next; + } else { + ebuf->nelems++; + } + + memset(ret, 0, sizeof(NvmeFdpEvent)); + ret->timestamp = nvme_get_timestamp(n); + + return ret; +} + +static inline int log_event(NvmeRuHandle *ruh, uint8_t event_type) +{ + return (ruh->event_filter >> nvme_fdp_evf_shifts[event_type]) & 0x1; +} + +static bool nvme_update_ruh(NvmeCtrl *n, NvmeNamespace *ns, uint16_t pid) +{ + NvmeEnduranceGroup *endgrp = ns->endgrp; + NvmeRuHandle *ruh; + NvmeReclaimUnit *ru; + NvmeFdpEvent *e = NULL; + uint16_t ph, rg, ruhid; + + if (!nvme_parse_pid(ns, pid, &ph, &rg)) { + return false; + } + + ruhid = ns->fdp.phs[ph]; + + ruh = &endgrp->fdp.ruhs[ruhid]; + ru = &ruh->rus[rg]; + + if (ru->ruamw) { + if (log_event(ruh, FDP_EVT_RU_NOT_FULLY_WRITTEN)) { + e = nvme_fdp_alloc_event(n, &endgrp->fdp.host_events); + e->type = FDP_EVT_RU_NOT_FULLY_WRITTEN; + e->flags = FDPEF_PIV | FDPEF_NSIDV | FDPEF_LV; + e->pid = cpu_to_le16(pid); + e->nsid = cpu_to_le32(ns->params.nsid); + e->rgid = cpu_to_le16(rg); + e->ruhid = cpu_to_le16(ruhid); + } + + /* log (eventual) GC overhead of prematurely swapping the RU */ + nvme_fdp_stat_inc(&endgrp->fdp.mbmw, nvme_l2b(ns, ru->ruamw)); + } + + ru->ruamw = ruh->ruamw; + + return true; +} + static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr) { hwaddr hi, lo; @@ -1309,26 +1434,26 @@ uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len, } static inline void nvme_blk_read(BlockBackend *blk, int64_t offset, - BlockCompletionFunc *cb, NvmeRequest *req) + uint32_t align, BlockCompletionFunc *cb, + NvmeRequest *req) { assert(req->sg.flags & NVME_SG_ALLOC); if (req->sg.flags & NVME_SG_DMA) { - req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, - cb, req); + req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, align, cb, req); } else { req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req); } } static inline void nvme_blk_write(BlockBackend *blk, int64_t offset, - BlockCompletionFunc *cb, NvmeRequest *req) + uint32_t align, BlockCompletionFunc *cb, + NvmeRequest *req) { assert(req->sg.flags & NVME_SG_ALLOC); if (req->sg.flags & NVME_SG_DMA) { - req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, - cb, req); + req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, align, cb, req); } else { req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req); } @@ -2082,10 +2207,10 @@ static void nvme_rw_cb(void *opaque, int ret) } if (req->cmd.opcode == NVME_CMD_READ) { - return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req); + return nvme_blk_read(blk, offset, 1, nvme_rw_complete_cb, req); } - return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req); + return nvme_blk_write(blk, offset, 1, nvme_rw_complete_cb, req); } } @@ -2253,7 +2378,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret) for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) { - req->status = NVME_CMP_FAILURE; + req->status = NVME_CMP_FAILURE | NVME_DNR; goto out; } } @@ -2262,7 +2387,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret) } if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { - req->status = NVME_CMP_FAILURE; + req->status = NVME_CMP_FAILURE | NVME_DNR; goto out; } @@ -2311,7 +2436,7 @@ static void nvme_compare_data_cb(void *opaque, int ret) } if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) { - req->status = NVME_CMP_FAILURE; + req->status = NVME_CMP_FAILURE | NVME_DNR; goto out; } @@ -2494,6 +2619,9 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr, req); if (status) { + g_free(iocb->range); + qemu_aio_unref(iocb); + return status; } @@ -3312,7 +3440,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) block_acct_start(blk_get_stats(blk), &req->acct, data_size, BLOCK_ACCT_READ); - nvme_blk_read(blk, data_offset, nvme_rw_cb, req); + nvme_blk_read(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req); return NVME_NO_COMPLETE; invalid: @@ -3320,6 +3448,41 @@ invalid: return status | NVME_DNR; } +static void nvme_do_write_fdp(NvmeCtrl *n, NvmeRequest *req, uint64_t slba, + uint32_t nlb) +{ + NvmeNamespace *ns = req->ns; + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + uint64_t data_size = nvme_l2b(ns, nlb); + uint32_t dw12 = le32_to_cpu(req->cmd.cdw12); + uint8_t dtype = (dw12 >> 20) & 0xf; + uint16_t pid = le16_to_cpu(rw->dspec); + uint16_t ph, rg, ruhid; + NvmeReclaimUnit *ru; + + if (dtype != NVME_DIRECTIVE_DATA_PLACEMENT || + !nvme_parse_pid(ns, pid, &ph, &rg)) { + ph = 0; + rg = 0; + } + + ruhid = ns->fdp.phs[ph]; + ru = &ns->endgrp->fdp.ruhs[ruhid].rus[rg]; + + nvme_fdp_stat_inc(&ns->endgrp->fdp.hbmw, data_size); + nvme_fdp_stat_inc(&ns->endgrp->fdp.mbmw, data_size); + + while (nlb) { + if (nlb < ru->ruamw) { + ru->ruamw -= nlb; + break; + } + + nlb -= ru->ruamw; + nvme_update_ruh(n, ns, pid); + } +} + static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, bool wrz) { @@ -3429,6 +3592,8 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { zone->w_ptr += nlb; } + } else if (ns->endgrp && ns->endgrp->fdp.enabled) { + nvme_do_write_fdp(n, req, slba, nlb); } data_offset = nvme_l2b(ns, slba); @@ -3445,7 +3610,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, block_acct_start(blk_get_stats(blk), &req->acct, data_size, BLOCK_ACCT_WRITE); - nvme_blk_write(blk, data_offset, nvme_rw_cb, req); + nvme_blk_write(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req); } else { req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size, BDRV_REQ_MAY_UNMAP, nvme_rw_cb, @@ -4086,6 +4251,126 @@ static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) return status; } +static uint16_t nvme_io_mgmt_recv_ruhs(NvmeCtrl *n, NvmeRequest *req, + size_t len) +{ + NvmeNamespace *ns = req->ns; + NvmeEnduranceGroup *endgrp; + NvmeRuhStatus *hdr; + NvmeRuhStatusDescr *ruhsd; + unsigned int nruhsd; + uint16_t rg, ph, *ruhid; + size_t trans_len; + g_autofree uint8_t *buf = NULL; + + if (!n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (ns->params.nsid == 0 || ns->params.nsid == 0xffffffff) { + return NVME_INVALID_NSID | NVME_DNR; + } + + if (!n->subsys->endgrp.fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + endgrp = ns->endgrp; + + nruhsd = ns->fdp.nphs * endgrp->fdp.nrg; + trans_len = sizeof(NvmeRuhStatus) + nruhsd * sizeof(NvmeRuhStatusDescr); + buf = g_malloc(trans_len); + + trans_len = MIN(trans_len, len); + + hdr = (NvmeRuhStatus *)buf; + ruhsd = (NvmeRuhStatusDescr *)(buf + sizeof(NvmeRuhStatus)); + + hdr->nruhsd = cpu_to_le16(nruhsd); + + ruhid = ns->fdp.phs; + + for (ph = 0; ph < ns->fdp.nphs; ph++, ruhid++) { + NvmeRuHandle *ruh = &endgrp->fdp.ruhs[*ruhid]; + + for (rg = 0; rg < endgrp->fdp.nrg; rg++, ruhsd++) { + uint16_t pid = nvme_make_pid(ns, rg, ph); + + ruhsd->pid = cpu_to_le16(pid); + ruhsd->ruhid = *ruhid; + ruhsd->earutr = 0; + ruhsd->ruamw = cpu_to_le64(ruh->rus[rg].ruamw); + } + } + + return nvme_c2h(n, buf, trans_len, req); +} + +static uint16_t nvme_io_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCmd *cmd = &req->cmd; + uint32_t cdw10 = le32_to_cpu(cmd->cdw10); + uint32_t numd = le32_to_cpu(cmd->cdw11); + uint8_t mo = (cdw10 & 0xff); + size_t len = (numd + 1) << 2; + + switch (mo) { + case NVME_IOMR_MO_NOP: + return 0; + case NVME_IOMR_MO_RUH_STATUS: + return nvme_io_mgmt_recv_ruhs(n, req, len); + default: + return NVME_INVALID_FIELD | NVME_DNR; + }; +} + +static uint16_t nvme_io_mgmt_send_ruh_update(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCmd *cmd = &req->cmd; + NvmeNamespace *ns = req->ns; + uint32_t cdw10 = le32_to_cpu(cmd->cdw10); + uint16_t ret = NVME_SUCCESS; + uint32_t npid = (cdw10 >> 1) + 1; + unsigned int i = 0; + g_autofree uint16_t *pids = NULL; + uint32_t maxnpid = n->subsys->endgrp.fdp.nrg * n->subsys->endgrp.fdp.nruh; + + if (unlikely(npid >= MIN(NVME_FDP_MAXPIDS, maxnpid))) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + pids = g_new(uint16_t, npid); + + ret = nvme_h2c(n, pids, npid * sizeof(uint16_t), req); + if (ret) { + return ret; + } + + for (; i < npid; i++) { + if (!nvme_update_ruh(n, ns, pids[i])) { + return NVME_INVALID_FIELD | NVME_DNR; + } + } + + return ret; +} + +static uint16_t nvme_io_mgmt_send(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCmd *cmd = &req->cmd; + uint32_t cdw10 = le32_to_cpu(cmd->cdw10); + uint8_t mo = (cdw10 & 0xff); + + switch (mo) { + case NVME_IOMS_MO_NOP: + return 0; + case NVME_IOMS_MO_RUH_UPDATE: + return nvme_io_mgmt_send_ruh_update(n, req); + default: + return NVME_INVALID_FIELD | NVME_DNR; + }; +} + static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) { NvmeNamespace *ns; @@ -4162,6 +4447,10 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) return nvme_zone_mgmt_send(n, req); case NVME_CMD_ZONE_MGMT_RECV: return nvme_zone_mgmt_recv(n, req); + case NVME_CMD_IO_MGMT_RECV: + return nvme_io_mgmt_recv(n, req); + case NVME_CMD_IO_MGMT_SEND: + return nvme_io_mgmt_send(n, req); default: assert(false); } @@ -4318,7 +4607,8 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); } - sq->bh = qemu_bh_new(nvme_process_sq, sq); + sq->bh = qemu_bh_new_guarded(nvme_process_sq, sq, + &DEVICE(sq->ctrl)->mem_reentrancy_guard); if (n->dbbuf_enabled) { sq->db_addr = n->dbbuf_dbs + (sqid << 3); @@ -4386,8 +4676,8 @@ static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats) { BlockAcctStats *s = blk_get_stats(ns->blkconf.blk); - stats->units_read += s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS; - stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS; + stats->units_read += s->nr_bytes[BLOCK_ACCT_READ]; + stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE]; stats->read_commands += s->nr_ops[BLOCK_ACCT_READ]; stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE]; } @@ -4401,6 +4691,7 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, uint32_t trans_len; NvmeNamespace *ns; time_t current_ms; + uint64_t u_read, u_written; if (off >= sizeof(smart)) { return NVME_INVALID_FIELD | NVME_DNR; @@ -4427,10 +4718,11 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, trans_len = MIN(sizeof(smart) - off, buf_len); smart.critical_warning = n->smart_critical_warning; - smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read, - 1000)); - smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written, - 1000)); + u_read = DIV_ROUND_UP(stats.units_read >> BDRV_SECTOR_BITS, 1000); + u_written = DIV_ROUND_UP(stats.units_written >> BDRV_SECTOR_BITS, 1000); + + smart.data_units_read[0] = cpu_to_le64(u_read); + smart.data_units_written[0] = cpu_to_le64(u_written); smart.host_read_commands[0] = cpu_to_le64(stats.read_commands); smart.host_write_commands[0] = cpu_to_le64(stats.write_commands); @@ -4452,6 +4744,48 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req); } +static uint16_t nvme_endgrp_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, + uint64_t off, NvmeRequest *req) +{ + uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); + uint16_t endgrpid = (dw11 >> 16) & 0xffff; + struct nvme_stats stats = {}; + NvmeEndGrpLog info = {}; + int i; + + if (!n->subsys || endgrpid != 0x1) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (off >= sizeof(info)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i); + if (!ns) { + continue; + } + + nvme_set_blk_stats(ns, &stats); + } + + info.data_units_read[0] = + cpu_to_le64(DIV_ROUND_UP(stats.units_read / 1000000000, 1000000000)); + info.data_units_written[0] = + cpu_to_le64(DIV_ROUND_UP(stats.units_written / 1000000000, 1000000000)); + info.media_units_written[0] = + cpu_to_le64(DIV_ROUND_UP(stats.units_written / 1000000000, 1000000000)); + + info.host_read_commands[0] = cpu_to_le64(stats.read_commands); + info.host_write_commands[0] = cpu_to_le64(stats.write_commands); + + buf_len = MIN(sizeof(info) - off, buf_len); + + return nvme_c2h(n, (uint8_t *)&info + off, buf_len, req); +} + + static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off, NvmeRequest *req) { @@ -4577,6 +4911,207 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len, return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req); } +static size_t sizeof_fdp_conf_descr(size_t nruh, size_t vss) +{ + size_t entry_siz = sizeof(NvmeFdpDescrHdr) + nruh * sizeof(NvmeRuhDescr) + + vss; + return ROUND_UP(entry_siz, 8); +} + +static uint16_t nvme_fdp_confs(NvmeCtrl *n, uint32_t endgrpid, uint32_t buf_len, + uint64_t off, NvmeRequest *req) +{ + uint32_t log_size, trans_len; + g_autofree uint8_t *buf = NULL; + NvmeFdpDescrHdr *hdr; + NvmeRuhDescr *ruhd; + NvmeEnduranceGroup *endgrp; + NvmeFdpConfsHdr *log; + size_t nruh, fdp_descr_size; + int i; + + if (endgrpid != 1 || !n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + endgrp = &n->subsys->endgrp; + + if (endgrp->fdp.enabled) { + nruh = endgrp->fdp.nruh; + } else { + nruh = 1; + } + + fdp_descr_size = sizeof_fdp_conf_descr(nruh, FDPVSS); + log_size = sizeof(NvmeFdpConfsHdr) + fdp_descr_size; + + if (off >= log_size) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + trans_len = MIN(log_size - off, buf_len); + + buf = g_malloc0(log_size); + log = (NvmeFdpConfsHdr *)buf; + hdr = (NvmeFdpDescrHdr *)(log + 1); + ruhd = (NvmeRuhDescr *)(buf + sizeof(*log) + sizeof(*hdr)); + + log->num_confs = cpu_to_le16(0); + log->size = cpu_to_le32(log_size); + + hdr->descr_size = cpu_to_le16(fdp_descr_size); + if (endgrp->fdp.enabled) { + hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, VALID, 1); + hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, RGIF, endgrp->fdp.rgif); + hdr->nrg = cpu_to_le16(endgrp->fdp.nrg); + hdr->nruh = cpu_to_le16(endgrp->fdp.nruh); + hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1); + hdr->nnss = cpu_to_le32(NVME_MAX_NAMESPACES); + hdr->runs = cpu_to_le64(endgrp->fdp.runs); + + for (i = 0; i < nruh; i++) { + ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED; + ruhd++; + } + } else { + /* 1 bit for RUH in PIF -> 2 RUHs max. */ + hdr->nrg = cpu_to_le16(1); + hdr->nruh = cpu_to_le16(1); + hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1); + hdr->nnss = cpu_to_le32(1); + hdr->runs = cpu_to_le64(96 * MiB); + + ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED; + } + + return nvme_c2h(n, (uint8_t *)buf + off, trans_len, req); +} + +static uint16_t nvme_fdp_ruh_usage(NvmeCtrl *n, uint32_t endgrpid, + uint32_t dw10, uint32_t dw12, + uint32_t buf_len, uint64_t off, + NvmeRequest *req) +{ + NvmeRuHandle *ruh; + NvmeRuhuLog *hdr; + NvmeRuhuDescr *ruhud; + NvmeEnduranceGroup *endgrp; + g_autofree uint8_t *buf = NULL; + uint32_t log_size, trans_len; + uint16_t i; + + if (endgrpid != 1 || !n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + endgrp = &n->subsys->endgrp; + + if (!endgrp->fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + log_size = sizeof(NvmeRuhuLog) + endgrp->fdp.nruh * sizeof(NvmeRuhuDescr); + + if (off >= log_size) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + trans_len = MIN(log_size - off, buf_len); + + buf = g_malloc0(log_size); + hdr = (NvmeRuhuLog *)buf; + ruhud = (NvmeRuhuDescr *)(hdr + 1); + + ruh = endgrp->fdp.ruhs; + hdr->nruh = cpu_to_le16(endgrp->fdp.nruh); + + for (i = 0; i < endgrp->fdp.nruh; i++, ruhud++, ruh++) { + ruhud->ruha = ruh->ruha; + } + + return nvme_c2h(n, (uint8_t *)buf + off, trans_len, req); +} + +static uint16_t nvme_fdp_stats(NvmeCtrl *n, uint32_t endgrpid, uint32_t buf_len, + uint64_t off, NvmeRequest *req) +{ + NvmeEnduranceGroup *endgrp; + NvmeFdpStatsLog log = {}; + uint32_t trans_len; + + if (off >= sizeof(NvmeFdpStatsLog)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (endgrpid != 1 || !n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (!n->subsys->endgrp.fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + endgrp = &n->subsys->endgrp; + + trans_len = MIN(sizeof(log) - off, buf_len); + + /* spec value is 128 bit, we only use 64 bit */ + log.hbmw[0] = cpu_to_le64(endgrp->fdp.hbmw); + log.mbmw[0] = cpu_to_le64(endgrp->fdp.mbmw); + log.mbe[0] = cpu_to_le64(endgrp->fdp.mbe); + + return nvme_c2h(n, (uint8_t *)&log + off, trans_len, req); +} + +static uint16_t nvme_fdp_events(NvmeCtrl *n, uint32_t endgrpid, + uint32_t buf_len, uint64_t off, + NvmeRequest *req) +{ + NvmeEnduranceGroup *endgrp; + NvmeCmd *cmd = &req->cmd; + bool host_events = (cmd->cdw10 >> 8) & 0x1; + uint32_t log_size, trans_len; + NvmeFdpEventBuffer *ebuf; + g_autofree NvmeFdpEventsLog *elog = NULL; + NvmeFdpEvent *event; + + if (endgrpid != 1 || !n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + endgrp = &n->subsys->endgrp; + + if (!endgrp->fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + if (host_events) { + ebuf = &endgrp->fdp.host_events; + } else { + ebuf = &endgrp->fdp.ctrl_events; + } + + log_size = sizeof(NvmeFdpEventsLog) + ebuf->nelems * sizeof(NvmeFdpEvent); + trans_len = MIN(log_size - off, buf_len); + elog = g_malloc0(log_size); + elog->num_events = cpu_to_le32(ebuf->nelems); + event = (NvmeFdpEvent *)(elog + 1); + + if (ebuf->nelems && ebuf->start == ebuf->next) { + unsigned int nelems = (NVME_FDP_MAX_EVENTS - ebuf->start); + /* wrap over, copy [start;NVME_FDP_MAX_EVENTS[ and [0; next[ */ + memcpy(event, &ebuf->events[ebuf->start], + sizeof(NvmeFdpEvent) * nelems); + memcpy(event + nelems, ebuf->events, + sizeof(NvmeFdpEvent) * ebuf->next); + } else if (ebuf->start < ebuf->next) { + memcpy(event, &ebuf->events[ebuf->start], + sizeof(NvmeFdpEvent) * (ebuf->next - ebuf->start)); + } + + return nvme_c2h(n, (uint8_t *)elog + off, trans_len, req); +} + static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) { NvmeCmd *cmd = &req->cmd; @@ -4589,13 +5124,14 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) uint8_t lsp = (dw10 >> 8) & 0xf; uint8_t rae = (dw10 >> 15) & 0x1; uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24; - uint32_t numdl, numdu; + uint32_t numdl, numdu, lspi; uint64_t off, lpol, lpou; size_t len; uint16_t status; numdl = (dw10 >> 16); numdu = (dw11 & 0xffff); + lspi = (dw11 >> 16); lpol = dw12; lpou = dw13; @@ -4624,6 +5160,16 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) return nvme_changed_nslist(n, rae, len, off, req); case NVME_LOG_CMD_EFFECTS: return nvme_cmd_effects(n, csi, len, off, req); + case NVME_LOG_ENDGRP: + return nvme_endgrp_info(n, rae, len, off, req); + case NVME_LOG_FDP_CONFS: + return nvme_fdp_confs(n, lspi, len, off, req); + case NVME_LOG_FDP_RUH_USAGE: + return nvme_fdp_ruh_usage(n, lspi, dw10, dw12, len, off, req); + case NVME_LOG_FDP_STATS: + return nvme_fdp_stats(n, lspi, len, off, req); + case NVME_LOG_FDP_EVENTS: + return nvme_fdp_events(n, lspi, len, off, req); default: trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid); return NVME_INVALID_FIELD | NVME_DNR; @@ -4708,7 +5254,8 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, } } n->cq[cqid] = cq; - cq->bh = qemu_bh_new(nvme_post_cqes, cq); + cq->bh = qemu_bh_new_guarded(nvme_post_cqes, cq, + &DEVICE(cq->ctrl)->mem_reentrancy_guard); } static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) @@ -5210,6 +5757,84 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) return nvme_c2h(n, (uint8_t *)×tamp, sizeof(timestamp), req); } +static int nvme_get_feature_fdp(NvmeCtrl *n, uint32_t endgrpid, + uint32_t *result) +{ + *result = 0; + + if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + *result = FIELD_DP16(0, FEAT_FDP, FDPE, 1); + *result = FIELD_DP16(*result, FEAT_FDP, CONF_NDX, 0); + + return NVME_SUCCESS; +} + +static uint16_t nvme_get_feature_fdp_events(NvmeCtrl *n, NvmeNamespace *ns, + NvmeRequest *req, uint32_t *result) +{ + NvmeCmd *cmd = &req->cmd; + uint32_t cdw11 = le32_to_cpu(cmd->cdw11); + uint16_t ph = cdw11 & 0xffff; + uint8_t noet = (cdw11 >> 16) & 0xff; + uint16_t ruhid, ret; + uint32_t nentries = 0; + uint8_t s_events_ndx = 0; + size_t s_events_siz = sizeof(NvmeFdpEventDescr) * noet; + g_autofree NvmeFdpEventDescr *s_events = g_malloc0(s_events_siz); + NvmeRuHandle *ruh; + NvmeFdpEventDescr *s_event; + + if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + if (!nvme_ph_valid(ns, ph)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ruhid = ns->fdp.phs[ph]; + ruh = &n->subsys->endgrp.fdp.ruhs[ruhid]; + + assert(ruh); + + if (unlikely(noet == 0)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + for (uint8_t event_type = 0; event_type < FDP_EVT_MAX; event_type++) { + uint8_t shift = nvme_fdp_evf_shifts[event_type]; + if (!shift && event_type) { + /* + * only first entry (event_type == 0) has a shift value of 0 + * other entries are simply unpopulated. + */ + continue; + } + + nentries++; + + s_event = &s_events[s_events_ndx]; + s_event->evt = event_type; + s_event->evta = (ruh->event_filter >> shift) & 0x1; + + /* break if all `noet` entries are filled */ + if ((++s_events_ndx) == noet) { + break; + } + } + + ret = nvme_c2h(n, s_events, s_events_siz, req); + if (ret) { + return ret; + } + + *result = nentries; + return NVME_SUCCESS; +} + static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) { NvmeCmd *cmd = &req->cmd; @@ -5222,6 +5847,7 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) uint16_t iv; NvmeNamespace *ns; int i; + uint16_t endgrpid = 0, ret = NVME_SUCCESS; static const uint32_t nvme_feature_default[NVME_FID_MAX] = { [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT, @@ -5319,6 +5945,33 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) case NVME_HOST_BEHAVIOR_SUPPORT: return nvme_c2h(n, (uint8_t *)&n->features.hbs, sizeof(n->features.hbs), req); + case NVME_FDP_MODE: + endgrpid = dw11 & 0xff; + + if (endgrpid != 0x1) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ret = nvme_get_feature_fdp(n, endgrpid, &result); + if (ret) { + return ret; + } + goto out; + case NVME_FDP_EVENTS: + if (!nvme_nsid_valid(n, nsid)) { + return NVME_INVALID_NSID | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (unlikely(!ns)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ret = nvme_get_feature_fdp_events(n, ns, req, &result); + if (ret) { + return ret; + } + goto out; default: break; } @@ -5351,6 +6004,20 @@ defaults: if (iv == n->admin_cq.vector) { result |= NVME_INTVC_NOCOALESCING; } + break; + case NVME_FDP_MODE: + endgrpid = dw11 & 0xff; + + if (endgrpid != 0x1) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ret = nvme_get_feature_fdp(n, endgrpid, &result); + if (ret) { + return ret; + } + goto out; + break; default: result = nvme_feature_default[fid]; @@ -5359,7 +6026,7 @@ defaults: out: req->cqe.result = cpu_to_le32(result); - return NVME_SUCCESS; + return ret; } static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) @@ -5377,6 +6044,51 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) return NVME_SUCCESS; } +static uint16_t nvme_set_feature_fdp_events(NvmeCtrl *n, NvmeNamespace *ns, + NvmeRequest *req) +{ + NvmeCmd *cmd = &req->cmd; + uint32_t cdw11 = le32_to_cpu(cmd->cdw11); + uint16_t ph = cdw11 & 0xffff; + uint8_t noet = (cdw11 >> 16) & 0xff; + uint16_t ret, ruhid; + uint8_t enable = le32_to_cpu(cmd->cdw12) & 0x1; + uint8_t event_mask = 0; + unsigned int i; + g_autofree uint8_t *events = g_malloc0(noet); + NvmeRuHandle *ruh = NULL; + + assert(ns); + + if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + if (!nvme_ph_valid(ns, ph)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ruhid = ns->fdp.phs[ph]; + ruh = &n->subsys->endgrp.fdp.ruhs[ruhid]; + + ret = nvme_h2c(n, events, noet, req); + if (ret) { + return ret; + } + + for (i = 0; i < noet; i++) { + event_mask |= (1 << nvme_fdp_evf_shifts[events[i]]); + } + + if (enable) { + ruh->event_filter |= event_mask; + } else { + ruh->event_filter = ruh->event_filter & ~event_mask; + } + + return NVME_SUCCESS; +} + static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) { NvmeNamespace *ns = NULL; @@ -5536,6 +6248,11 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) return NVME_CMD_SET_CMB_REJECTED | NVME_DNR; } break; + case NVME_FDP_MODE: + /* spec: abort with cmd seq err if there's one or more NS' in endgrp */ + return NVME_CMD_SEQ_ERROR | NVME_DNR; + case NVME_FDP_EVENTS: + return nvme_set_feature_fdp_events(n, ns, req); default: return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; } @@ -6104,6 +6821,61 @@ static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req) return NVME_SUCCESS; } +static uint16_t nvme_directive_send(NvmeCtrl *n, NvmeRequest *req) +{ + return NVME_INVALID_FIELD | NVME_DNR; +} + +static uint16_t nvme_directive_receive(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeNamespace *ns; + uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); + uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); + uint32_t nsid = le32_to_cpu(req->cmd.nsid); + uint8_t doper, dtype; + uint32_t numd, trans_len; + NvmeDirectiveIdentify id = { + .supported = 1 << NVME_DIRECTIVE_IDENTIFY, + .enabled = 1 << NVME_DIRECTIVE_IDENTIFY, + }; + + numd = dw10 + 1; + doper = dw11 & 0xff; + dtype = (dw11 >> 8) & 0xff; + + trans_len = MIN(sizeof(NvmeDirectiveIdentify), numd << 2); + + if (nsid == NVME_NSID_BROADCAST || dtype != NVME_DIRECTIVE_IDENTIFY || + doper != NVME_DIRECTIVE_RETURN_PARAMS) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (!ns) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + switch (dtype) { + case NVME_DIRECTIVE_IDENTIFY: + switch (doper) { + case NVME_DIRECTIVE_RETURN_PARAMS: + if (ns->endgrp->fdp.enabled) { + id.supported |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT; + id.enabled |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT; + id.persistent |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT; + } + + return nvme_c2h(n, (uint8_t *)&id, trans_len, req); + + default: + return NVME_INVALID_FIELD | NVME_DNR; + } + + default: + return NVME_INVALID_FIELD; + } +} + static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) { trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode, @@ -6152,6 +6924,10 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) return nvme_dbbuf_config(n, req); case NVME_ADM_CMD_FORMAT_NVM: return nvme_format(n, req); + case NVME_ADM_CMD_DIRECTIVE_SEND: + return nvme_directive_send(n, req); + case NVME_ADM_CMD_DIRECTIVE_RECV: + return nvme_directive_receive(n, req); default: assert(false); } @@ -6384,9 +7160,7 @@ static int nvme_start_ctrl(NvmeCtrl *n) if (pci_is_vf(PCI_DEVICE(n)) && !sctrl->scs) { trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi), - le16_to_cpu(sctrl->nvq), - sctrl->scs ? "ONLINE" : - "OFFLINE"); + le16_to_cpu(sctrl->nvq)); return -1; } if (unlikely(n->cq[0])) { @@ -7380,6 +8154,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) uint8_t *pci_conf = pci_dev->config; uint64_t cap = ldq_le_p(&n->bar.cap); NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); + uint32_t ctratt; id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); @@ -7390,7 +8165,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->cntlid = cpu_to_le16(n->cntlid); id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); - id->ctratt |= cpu_to_le32(NVME_CTRATT_ELBAS); + ctratt = NVME_CTRATT_ELBAS; id->rab = 6; @@ -7407,7 +8182,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->mdts = n->params.mdts; id->ver = cpu_to_le32(NVME_SPEC_VER); id->oacs = - cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF); + cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF | + NVME_OACS_DIRECTIVES); id->cntrltype = 0x1; /* @@ -7457,8 +8233,17 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) if (n->subsys) { id->cmic |= NVME_CMIC_MULTI_CTRL; + ctratt |= NVME_CTRATT_ENDGRPS; + + id->endgidmax = cpu_to_le16(0x1); + + if (n->subsys->endgrp.fdp.enabled) { + ctratt |= NVME_CTRATT_FDPS; + } } + id->ctratt = cpu_to_le32(ctratt); + NVME_CAP_SET_MQES(cap, 0x7ff); NVME_CAP_SET_CQR(cap, 1); NVME_CAP_SET_TO(cap, 0xf); diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c index 62a1f97be0..547c0b1543 100644 --- a/hw/nvme/ns.c +++ b/hw/nvme/ns.c @@ -14,8 +14,10 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "qemu/cutils.h" #include "qemu/error-report.h" #include "qapi/error.h" +#include "qemu/bitops.h" #include "sysemu/sysemu.h" #include "sysemu/block-backend.h" @@ -377,6 +379,131 @@ static void nvme_zoned_ns_shutdown(NvmeNamespace *ns) assert(ns->nr_open_zones == 0); } +static NvmeRuHandle *nvme_find_ruh_by_attr(NvmeEnduranceGroup *endgrp, + uint8_t ruha, uint16_t *ruhid) +{ + for (uint16_t i = 0; i < endgrp->fdp.nruh; i++) { + NvmeRuHandle *ruh = &endgrp->fdp.ruhs[i]; + + if (ruh->ruha == ruha) { + *ruhid = i; + return ruh; + } + } + + return NULL; +} + +static bool nvme_ns_init_fdp(NvmeNamespace *ns, Error **errp) +{ + NvmeEnduranceGroup *endgrp = ns->endgrp; + NvmeRuHandle *ruh; + uint8_t lbafi = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas); + g_autofree unsigned int *ruhids = NULL; + unsigned int *ruhid; + char *r, *p, *token; + uint16_t *ph; + + if (!ns->params.fdp.ruhs) { + ns->fdp.nphs = 1; + ph = ns->fdp.phs = g_new(uint16_t, 1); + + ruh = nvme_find_ruh_by_attr(endgrp, NVME_RUHA_CTRL, ph); + if (!ruh) { + ruh = nvme_find_ruh_by_attr(endgrp, NVME_RUHA_UNUSED, ph); + if (!ruh) { + error_setg(errp, "no unused reclaim unit handles left"); + return false; + } + + ruh->ruha = NVME_RUHA_CTRL; + ruh->lbafi = lbafi; + ruh->ruamw = endgrp->fdp.runs >> ns->lbaf.ds; + + for (uint16_t rg = 0; rg < endgrp->fdp.nrg; rg++) { + ruh->rus[rg].ruamw = ruh->ruamw; + } + } else if (ruh->lbafi != lbafi) { + error_setg(errp, "lba format index of controller assigned " + "reclaim unit handle does not match namespace lba " + "format index"); + return false; + } + + return true; + } + + ruhid = ruhids = g_new0(unsigned int, endgrp->fdp.nruh); + r = p = strdup(ns->params.fdp.ruhs); + + /* parse the placement handle identifiers */ + while ((token = qemu_strsep(&p, ";")) != NULL) { + ns->fdp.nphs += 1; + if (ns->fdp.nphs > NVME_FDP_MAXPIDS || + ns->fdp.nphs == endgrp->fdp.nruh) { + error_setg(errp, "too many placement handles"); + free(r); + return false; + } + + if (qemu_strtoui(token, NULL, 0, ruhid++) < 0) { + error_setg(errp, "cannot parse reclaim unit handle identifier"); + free(r); + return false; + } + } + + free(r); + + ph = ns->fdp.phs = g_new(uint16_t, ns->fdp.nphs); + + ruhid = ruhids; + + /* verify the identifiers */ + for (unsigned int i = 0; i < ns->fdp.nphs; i++, ruhid++, ph++) { + if (*ruhid >= endgrp->fdp.nruh) { + error_setg(errp, "invalid reclaim unit handle identifier"); + return false; + } + + ruh = &endgrp->fdp.ruhs[*ruhid]; + + switch (ruh->ruha) { + case NVME_RUHA_UNUSED: + ruh->ruha = NVME_RUHA_HOST; + ruh->lbafi = lbafi; + ruh->ruamw = endgrp->fdp.runs >> ns->lbaf.ds; + + for (uint16_t rg = 0; rg < endgrp->fdp.nrg; rg++) { + ruh->rus[rg].ruamw = ruh->ruamw; + } + + break; + + case NVME_RUHA_HOST: + if (ruh->lbafi != lbafi) { + error_setg(errp, "lba format index of host assigned" + "reclaim unit handle does not match namespace " + "lba format index"); + return false; + } + + break; + + case NVME_RUHA_CTRL: + error_setg(errp, "reclaim unit handle is controller assigned"); + return false; + + default: + abort(); + } + + *ph = *ruhid; + } + + return true; +} + static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp) { unsigned int pi_size; @@ -417,6 +544,11 @@ static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp) return -1; } + if (ns->params.zoned && ns->endgrp && ns->endgrp->fdp.enabled) { + error_setg(errp, "cannot be a zoned- in an FDP configuration"); + return -1; + } + if (ns->params.zoned) { if (ns->params.max_active_zones) { if (ns->params.max_open_zones > ns->params.max_active_zones) { @@ -502,6 +634,12 @@ int nvme_ns_setup(NvmeNamespace *ns, Error **errp) nvme_ns_init_zoned(ns); } + if (ns->endgrp && ns->endgrp->fdp.enabled) { + if (!nvme_ns_init_fdp(ns, errp)) { + return -1; + } + } + return 0; } @@ -525,6 +663,10 @@ void nvme_ns_cleanup(NvmeNamespace *ns) g_free(ns->zone_array); g_free(ns->zd_extensions); } + + if (ns->endgrp && ns->endgrp->fdp.enabled) { + g_free(ns->fdp.phs); + } } static void nvme_ns_unrealize(DeviceState *dev) @@ -561,6 +703,8 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) { return; } + ns->subsys = subsys; + ns->endgrp = &subsys->endgrp; } if (nvme_ns_setup(ns, errp)) { @@ -591,6 +735,8 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) if (subsys) { subsys->namespaces[nsid] = ns; + ns->id_ns.endgid = cpu_to_le16(0x1); + if (ns->params.detached) { return; } @@ -606,6 +752,7 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) return; } + } nvme_attach_ns(n, ns); @@ -644,6 +791,7 @@ static Property nvme_ns_props[] = { DEFINE_PROP_SIZE("zoned.zrwafg", NvmeNamespace, params.zrwafg, -1), DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default, false), + DEFINE_PROP_STRING("fdp.ruhs", NvmeNamespace, params.fdp.ruhs), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h index 16da27a69b..209e8f5b4c 100644 --- a/hw/nvme/nvme.h +++ b/hw/nvme/nvme.h @@ -27,6 +27,8 @@ #define NVME_MAX_CONTROLLERS 256 #define NVME_MAX_NAMESPACES 256 #define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000) +#define NVME_FDP_MAX_EVENTS 63 +#define NVME_FDP_MAXPIDS 128 QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1); @@ -45,17 +47,68 @@ typedef struct NvmeBus { OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS) #define SUBSYS_SLOT_RSVD (void *)0xFFFF +typedef struct NvmeReclaimUnit { + uint64_t ruamw; +} NvmeReclaimUnit; + +typedef struct NvmeRuHandle { + uint8_t ruht; + uint8_t ruha; + uint64_t event_filter; + uint8_t lbafi; + uint64_t ruamw; + + /* reclaim units indexed by reclaim group */ + NvmeReclaimUnit *rus; +} NvmeRuHandle; + +typedef struct NvmeFdpEventBuffer { + NvmeFdpEvent events[NVME_FDP_MAX_EVENTS]; + unsigned int nelems; + unsigned int start; + unsigned int next; +} NvmeFdpEventBuffer; + +typedef struct NvmeEnduranceGroup { + uint8_t event_conf; + + struct { + NvmeFdpEventBuffer host_events, ctrl_events; + + uint16_t nruh; + uint16_t nrg; + uint8_t rgif; + uint64_t runs; + + uint64_t hbmw; + uint64_t mbmw; + uint64_t mbe; + + bool enabled; + + NvmeRuHandle *ruhs; + } fdp; +} NvmeEnduranceGroup; + typedef struct NvmeSubsystem { DeviceState parent_obj; NvmeBus bus; uint8_t subnqn[256]; char *serial; - NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS]; - NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; + NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS]; + NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; + NvmeEnduranceGroup endgrp; struct { char *nqn; + + struct { + bool enabled; + uint64_t runs; + uint16_t nruh; + uint32_t nrg; + } fdp; } params; } NvmeSubsystem; @@ -96,6 +149,21 @@ typedef struct NvmeZone { QTAILQ_ENTRY(NvmeZone) entry; } NvmeZone; +#define FDP_EVT_MAX 0xff +#define NVME_FDP_MAX_NS_RUHS 32u +#define FDPVSS 0 + +static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = { + /* Host events */ + [FDP_EVT_RU_NOT_FULLY_WRITTEN] = 0, + [FDP_EVT_RU_ATL_EXCEEDED] = 1, + [FDP_EVT_CTRL_RESET_RUH] = 2, + [FDP_EVT_INVALID_PID] = 3, + /* CTRL events */ + [FDP_EVT_MEDIA_REALLOC] = 32, + [FDP_EVT_RUH_IMPLICIT_RU_CHANGE] = 33, +}; + typedef struct NvmeNamespaceParams { bool detached; bool shared; @@ -125,6 +193,10 @@ typedef struct NvmeNamespaceParams { uint32_t numzrwa; uint64_t zrwas; uint64_t zrwafg; + + struct { + char *ruhs; + } fdp; } NvmeNamespaceParams; typedef struct NvmeNamespace { @@ -167,10 +239,18 @@ typedef struct NvmeNamespace { int32_t nr_active_zones; NvmeNamespaceParams params; + NvmeSubsystem *subsys; + NvmeEnduranceGroup *endgrp; struct { uint32_t err_rec; } features; + + struct { + uint16_t nphs; + /* reclaim unit handle identifiers indexed by placement handle */ + uint16_t *phs; + } fdp; } NvmeNamespace; static inline uint32_t nvme_nsid(NvmeNamespace *ns) @@ -274,6 +354,12 @@ static inline void nvme_aor_dec_active(NvmeNamespace *ns) assert(ns->nr_active_zones >= 0); } +static inline void nvme_fdp_stat_inc(uint64_t *a, uint64_t b) +{ + uint64_t ret = *a + b; + *a = ret < *a ? UINT64_MAX : ret; +} + void nvme_ns_init_format(NvmeNamespace *ns); int nvme_ns_setup(NvmeNamespace *ns, Error **errp); void nvme_ns_drain(NvmeNamespace *ns); @@ -340,7 +426,9 @@ static inline const char *nvme_adm_opc_str(uint8_t opc) case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES"; case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ"; case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT"; + case NVME_ADM_CMD_DIRECTIVE_SEND: return "NVME_ADM_CMD_DIRECTIVE_SEND"; case NVME_ADM_CMD_VIRT_MNGMT: return "NVME_ADM_CMD_VIRT_MNGMT"; + case NVME_ADM_CMD_DIRECTIVE_RECV: return "NVME_ADM_CMD_DIRECTIVE_RECV"; case NVME_ADM_CMD_DBBUF_CONFIG: return "NVME_ADM_CMD_DBBUF_CONFIG"; case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM"; default: return "NVME_ADM_CMD_UNKNOWN"; diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c index 9d2643678b..24ddec860e 100644 --- a/hw/nvme/subsys.c +++ b/hw/nvme/subsys.c @@ -7,10 +7,13 @@ */ #include "qemu/osdep.h" +#include "qemu/units.h" #include "qapi/error.h" #include "nvme.h" +#define NVME_DEFAULT_RU_SIZE (96 * MiB) + static int nvme_subsys_reserve_cntlids(NvmeCtrl *n, int start, int num) { NvmeSubsystem *subsys = n->subsys; @@ -109,13 +112,95 @@ void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n) n->cntlid = -1; } -static void nvme_subsys_setup(NvmeSubsystem *subsys) +static bool nvme_calc_rgif(uint16_t nruh, uint16_t nrg, uint8_t *rgif) +{ + uint16_t val; + unsigned int i; + + if (unlikely(nrg == 1)) { + /* PIDRG_NORGI scenario, all of pid is used for PHID */ + *rgif = 0; + return true; + } + + val = nrg; + i = 0; + while (val) { + val >>= 1; + i++; + } + *rgif = i; + + /* ensure remaining bits suffice to represent number of phids in a RG */ + if (unlikely((UINT16_MAX >> i) < nruh)) { + *rgif = 0; + return false; + } + + return true; +} + +static bool nvme_subsys_setup_fdp(NvmeSubsystem *subsys, Error **errp) +{ + NvmeEnduranceGroup *endgrp = &subsys->endgrp; + + if (!subsys->params.fdp.runs) { + error_setg(errp, "fdp.runs must be non-zero"); + return false; + } + + endgrp->fdp.runs = subsys->params.fdp.runs; + + if (!subsys->params.fdp.nrg) { + error_setg(errp, "fdp.nrg must be non-zero"); + return false; + } + + endgrp->fdp.nrg = subsys->params.fdp.nrg; + + if (!subsys->params.fdp.nruh) { + error_setg(errp, "fdp.nruh must be non-zero"); + return false; + } + + endgrp->fdp.nruh = subsys->params.fdp.nruh; + + if (!nvme_calc_rgif(endgrp->fdp.nruh, endgrp->fdp.nrg, &endgrp->fdp.rgif)) { + error_setg(errp, + "cannot derive a valid rgif (nruh %"PRIu16" nrg %"PRIu32")", + endgrp->fdp.nruh, endgrp->fdp.nrg); + return false; + } + + endgrp->fdp.ruhs = g_new(NvmeRuHandle, endgrp->fdp.nruh); + + for (uint16_t ruhid = 0; ruhid < endgrp->fdp.nruh; ruhid++) { + endgrp->fdp.ruhs[ruhid] = (NvmeRuHandle) { + .ruht = NVME_RUHT_INITIALLY_ISOLATED, + .ruha = NVME_RUHA_UNUSED, + }; + + endgrp->fdp.ruhs[ruhid].rus = g_new(NvmeReclaimUnit, endgrp->fdp.nrg); + } + + endgrp->fdp.enabled = true; + + return true; +} + +static bool nvme_subsys_setup(NvmeSubsystem *subsys, Error **errp) { const char *nqn = subsys->params.nqn ? subsys->params.nqn : subsys->parent_obj.id; snprintf((char *)subsys->subnqn, sizeof(subsys->subnqn), "nqn.2019-08.org.qemu:%s", nqn); + + if (subsys->params.fdp.enabled && !nvme_subsys_setup_fdp(subsys, errp)) { + return false; + } + + return true; } static void nvme_subsys_realize(DeviceState *dev, Error **errp) @@ -124,11 +209,16 @@ static void nvme_subsys_realize(DeviceState *dev, Error **errp) qbus_init(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id); - nvme_subsys_setup(subsys); + nvme_subsys_setup(subsys, errp); } static Property nvme_subsystem_props[] = { DEFINE_PROP_STRING("nqn", NvmeSubsystem, params.nqn), + DEFINE_PROP_BOOL("fdp", NvmeSubsystem, params.fdp.enabled, false), + DEFINE_PROP_SIZE("fdp.runs", NvmeSubsystem, params.fdp.runs, + NVME_DEFAULT_RU_SIZE), + DEFINE_PROP_UINT32("fdp.nrg", NvmeSubsystem, params.fdp.nrg, 1), + DEFINE_PROP_UINT16("fdp.nruh", NvmeSubsystem, params.fdp.nruh, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events index b16f2260b4..9afddf3b95 100644 --- a/hw/nvme/trace-events +++ b/hw/nvme/trace-events @@ -117,6 +117,7 @@ pci_nvme_clear_ns_reset(uint32_t state, uint64_t slba) "zone state=%"PRIu32", sl pci_nvme_zoned_zrwa_implicit_flush(uint64_t zslba, uint32_t nlb) "zslba 0x%"PRIx64" nlb %"PRIu32"" pci_nvme_pci_reset(void) "PCI Function Level Reset" pci_nvme_virt_mngmt(uint16_t cid, uint16_t act, uint16_t cntlid, const char* rt, uint16_t nr) "cid %"PRIu16", act=0x%"PRIx16", ctrlid=%"PRIu16" %s nr=%"PRIu16"" +pci_nvme_fdp_ruh_change(uint16_t rgid, uint16_t ruhid) "change RU on RUH rgid=%"PRIu16", ruhid=%"PRIu16"" # error conditions pci_nvme_err_mdts(size_t len) "len %zu" @@ -186,7 +187,7 @@ pci_nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the pci_nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero" pci_nvme_err_startfail_zasl_too_small(uint32_t zasl, uint32_t pagesz) "nvme_start_ctrl failed because zone append size limit %"PRIu32" is too small, needs to be >= %"PRIu32"" pci_nvme_err_startfail(void) "setting controller enable bit failed" -pci_nvme_err_startfail_virt_state(uint16_t vq, uint16_t vi, const char *state) "nvme_start_ctrl failed due to ctrl state: vi=%u vq=%u %s" +pci_nvme_err_startfail_virt_state(uint16_t vq, uint16_t vi) "nvme_start_ctrl failed due to ctrl state: vi=%u vq=%u" pci_nvme_err_invalid_mgmt_action(uint8_t action) "action=0x%"PRIx8"" pci_nvme_err_ignored_mmio_vf_offline(uint64_t addr, unsigned size) "addr 0x%"PRIx64" size %d" diff --git a/hw/nvram/eeprom_at24c.c b/hw/nvram/eeprom_at24c.c index 3328c32814..613c4929e3 100644 --- a/hw/nvram/eeprom_at24c.c +++ b/hw/nvram/eeprom_at24c.c @@ -41,6 +41,13 @@ struct EEPROMState { uint16_t cur; /* total size in bytes */ uint32_t rsize; + /* + * address byte number + * for 24c01, 24c02 size <= 256 byte, use only 1 byte + * otherwise size > 256, use 2 byte + */ + uint8_t asize; + bool writable; /* cells changed since last START? */ bool changed; @@ -91,7 +98,11 @@ uint8_t at24c_eeprom_recv(I2CSlave *s) EEPROMState *ee = AT24C_EE(s); uint8_t ret; - if (ee->haveaddr == 1) { + /* + * If got the byte address but not completely with address size + * will return the invalid value + */ + if (ee->haveaddr > 0 && ee->haveaddr < ee->asize) { return 0xff; } @@ -108,11 +119,11 @@ int at24c_eeprom_send(I2CSlave *s, uint8_t data) { EEPROMState *ee = AT24C_EE(s); - if (ee->haveaddr < 2) { + if (ee->haveaddr < ee->asize) { ee->cur <<= 8; ee->cur |= data; ee->haveaddr++; - if (ee->haveaddr == 2) { + if (ee->haveaddr == ee->asize) { ee->cur %= ee->rsize; DPRINTK("Set pointer %04x\n", ee->cur); } @@ -199,6 +210,18 @@ static void at24c_eeprom_realize(DeviceState *dev, Error **errp) } DPRINTK("Reset read backing file\n"); } + + /* + * If address size didn't define with property set + * value is 0 as default, setting it by Rom size detecting. + */ + if (ee->asize == 0) { + if (ee->rsize <= 256) { + ee->asize = 1; + } else { + ee->asize = 2; + } + } } static @@ -213,6 +236,7 @@ void at24c_eeprom_reset(DeviceState *state) static Property at24c_eeprom_props[] = { DEFINE_PROP_UINT32("rom-size", EEPROMState, rsize, 0), + DEFINE_PROP_UINT8("address-size", EEPROMState, asize, 0), DEFINE_PROP_BOOL("writable", EEPROMState, writable, true), DEFINE_PROP_DRIVE("drive", EEPROMState, blk), DEFINE_PROP_END_OF_LIST() diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 432754eda4..29a5bef1d5 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -693,12 +693,12 @@ static const VMStateDescription vmstate_fw_cfg = { } }; -void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key, - FWCfgCallback select_cb, - FWCfgWriteCallback write_cb, - void *callback_opaque, - void *data, size_t len, - bool read_only) +static void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key, + FWCfgCallback select_cb, + FWCfgWriteCallback write_cb, + void *callback_opaque, + void *data, size_t len, + bool read_only) { int arch = !!(key & FW_CFG_ARCH_LOCAL); @@ -741,15 +741,6 @@ void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len) fw_cfg_add_bytes_callback(s, key, NULL, NULL, NULL, data, len, true); } -void *fw_cfg_read_bytes_ptr(FWCfgState *s, uint16_t key) -{ - int arch = !!(key & FW_CFG_ARCH_LOCAL); - - key &= FW_CFG_ENTRY_MASK; - assert(key < fw_cfg_max_entry(s)); - return s->entries[arch][key].data; -} - void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value) { size_t sz = strlen(value) + 1; diff --git a/hw/openrisc/boot.c b/hw/openrisc/boot.c index 007e80cd5a..55475aa6d6 100644 --- a/hw/openrisc/boot.c +++ b/hw/openrisc/boot.c @@ -15,6 +15,7 @@ #include "sysemu/device_tree.h" #include "sysemu/qtest.h" #include "sysemu/reset.h" +#include "qemu/error-report.h" #include diff --git a/hw/pci-bridge/Kconfig b/hw/pci-bridge/Kconfig index 02614f49aa..67077366cc 100644 --- a/hw/pci-bridge/Kconfig +++ b/hw/pci-bridge/Kconfig @@ -3,6 +3,11 @@ config PCIE_PORT default y if PCI_DEVICES depends on PCI_EXPRESS && MSI_NONBROKEN +config PCIE_PCI_BRIDGE + bool + default y if PCIE_PORT + depends on PCIE_PORT + config PXB bool default y if Q35 || ARM_VIRT diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c index 3d4e6b59cd..54f507318f 100644 --- a/hw/pci-bridge/cxl_downstream.c +++ b/hw/pci-bridge/cxl_downstream.c @@ -15,7 +15,7 @@ #include "hw/pci/pcie_port.h" #include "qapi/error.h" -typedef struct CXLDownStreamPort { +typedef struct CXLDownstreamPort { /*< private >*/ PCIESlot parent_obj; diff --git a/hw/pci-bridge/cxl_root_port.c b/hw/pci-bridge/cxl_root_port.c index 6664783974..7dfd20aa67 100644 --- a/hw/pci-bridge/cxl_root_port.c +++ b/hw/pci-bridge/cxl_root_port.c @@ -22,6 +22,7 @@ #include "qemu/range.h" #include "hw/pci/pci_bridge.h" #include "hw/pci/pcie_port.h" +#include "hw/pci/msi.h" #include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "qapi/error.h" @@ -29,6 +30,10 @@ #define CXL_ROOT_PORT_DID 0x7075 +#define CXL_RP_MSI_OFFSET 0x60 +#define CXL_RP_MSI_SUPPORTED_FLAGS PCI_MSI_FLAGS_MASKBIT +#define CXL_RP_MSI_NR_VECTOR 2 + /* Copied from the gen root port which we derive */ #define GEN_PCIE_ROOT_PORT_AER_OFFSET 0x100 #define GEN_PCIE_ROOT_PORT_ACS_OFFSET \ @@ -47,6 +52,49 @@ typedef struct CXLRootPort { #define TYPE_CXL_ROOT_PORT "cxl-rp" DECLARE_INSTANCE_CHECKER(CXLRootPort, CXL_ROOT_PORT, TYPE_CXL_ROOT_PORT) +/* + * If two MSI vector are allocated, Advanced Error Interrupt Message Number + * is 1. otherwise 0. + * 17.12.5.10 RPERRSTS, 32:27 bit Advanced Error Interrupt Message Number. + */ +static uint8_t cxl_rp_aer_vector(const PCIDevice *d) +{ + switch (msi_nr_vectors_allocated(d)) { + case 1: + return 0; + case 2: + return 1; + case 4: + case 8: + case 16: + case 32: + default: + break; + } + abort(); + return 0; +} + +static int cxl_rp_interrupts_init(PCIDevice *d, Error **errp) +{ + int rc; + + rc = msi_init(d, CXL_RP_MSI_OFFSET, CXL_RP_MSI_NR_VECTOR, + CXL_RP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT, + CXL_RP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT, + errp); + if (rc < 0) { + assert(rc == -ENOTSUP); + } + + return rc; +} + +static void cxl_rp_interrupts_uninit(PCIDevice *d) +{ + msi_uninit(d); +} + static void latch_registers(CXLRootPort *crp) { uint32_t *reg_state = crp->cxl_cstate.crb.cache_mem_registers; @@ -183,16 +231,29 @@ static void cxl_rp_dvsec_write_config(PCIDevice *dev, uint32_t addr, } } +static void cxl_rp_aer_vector_update(PCIDevice *d) +{ + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(d); + + if (rpc->aer_vector) { + pcie_aer_root_set_vector(d, rpc->aer_vector(d)); + } +} + static void cxl_rp_write_config(PCIDevice *d, uint32_t address, uint32_t val, int len) { uint16_t slt_ctl, slt_sta; + uint32_t root_cmd = + pci_get_long(d->config + d->exp.aer_cap + PCI_ERR_ROOT_COMMAND); pcie_cap_slot_get(d, &slt_ctl, &slt_sta); pci_bridge_write_config(d, address, val, len); + cxl_rp_aer_vector_update(d); pcie_cap_flr_write_config(d, address, val, len); pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len); pcie_aer_write_config(d, address, val, len); + pcie_aer_root_write_config(d, address, val, len, root_cmd); cxl_rp_dvsec_write_config(d, address, val, len); } @@ -217,6 +278,9 @@ static void cxl_root_port_class_init(ObjectClass *oc, void *data) rpc->aer_offset = GEN_PCIE_ROOT_PORT_AER_OFFSET; rpc->acs_offset = GEN_PCIE_ROOT_PORT_ACS_OFFSET; + rpc->aer_vector = cxl_rp_aer_vector; + rpc->interrupts_init = cxl_rp_interrupts_init; + rpc->interrupts_uninit = cxl_rp_interrupts_uninit; dc->hotpluggable = false; } diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c index 9df436cb73..ef47e5d625 100644 --- a/hw/pci-bridge/cxl_upstream.c +++ b/hw/pci-bridge/cxl_upstream.c @@ -346,6 +346,9 @@ static void cxl_usp_realize(PCIDevice *d, Error **errp) cxl_cstate->cdat.free_cdat_table = free_default_cdat_table; cxl_cstate->cdat.private = d; cxl_doe_cdat_init(cxl_cstate, errp); + if (*errp) { + goto err_cap; + } return; diff --git a/hw/pci-bridge/i82801b11.c b/hw/pci-bridge/i82801b11.c index f3b4a14611..0e83cd11b2 100644 --- a/hw/pci-bridge/i82801b11.c +++ b/hw/pci-bridge/i82801b11.c @@ -45,7 +45,7 @@ #include "hw/pci/pci_bridge.h" #include "migration/vmstate.h" #include "qemu/module.h" -#include "hw/i386/ich9.h" +#include "hw/southbridge/ich9.h" /*****************************************************************************/ /* ICH9 DMI-to-PCI bridge */ diff --git a/hw/pci-bridge/meson.build b/hw/pci-bridge/meson.build index fe92d43de6..0edc6c7cbf 100644 --- a/hw/pci-bridge/meson.build +++ b/hw/pci-bridge/meson.build @@ -2,7 +2,8 @@ pci_ss = ss.source_set() pci_ss.add(files('pci_bridge_dev.c')) pci_ss.add(when: 'CONFIG_I82801B11', if_true: files('i82801b11.c')) pci_ss.add(when: 'CONFIG_IOH3420', if_true: files('ioh3420.c')) -pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c', 'pcie_pci_bridge.c')) +pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c')) +pci_ss.add(when: 'CONFIG_PCIE_PCI_BRIDGE', if_true: files('pcie_pci_bridge.c')) pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c'), if_false: files('pci_expander_bridge_stubs.c')) pci_ss.add(when: 'CONFIG_XIO3130', if_true: files('xio3130_upstream.c', 'xio3130_downstream.c')) diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index e752a21292..613857b601 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -15,6 +15,7 @@ #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" #include "hw/pci/pci_host.h" +#include "hw/pci/pcie_port.h" #include "hw/qdev-properties.h" #include "hw/pci/pci_bridge.h" #include "hw/pci-bridge/pci_expander_bridge.h" @@ -49,24 +50,8 @@ struct PXBBus { char bus_path[8]; }; -#define TYPE_PXB_DEVICE "pxb" -DECLARE_INSTANCE_CHECKER(PXBDev, PXB_DEV, - TYPE_PXB_DEVICE) - -#define TYPE_PXB_PCIE_DEVICE "pxb-pcie" -DECLARE_INSTANCE_CHECKER(PXBDev, PXB_PCIE_DEV, - TYPE_PXB_PCIE_DEVICE) - -static PXBDev *convert_to_pxb(PCIDevice *dev) -{ - /* A CXL PXB's parent bus is PCIe, so the normal check won't work */ - if (object_dynamic_cast(OBJECT(dev), TYPE_PXB_CXL_DEVICE)) { - return PXB_CXL_DEV(dev); - } - - return pci_bus_is_express(pci_get_bus(dev)) - ? PXB_PCIE_DEV(dev) : PXB_DEV(dev); -} +#define TYPE_PXB_PCIE_DEV "pxb-pcie" +OBJECT_DECLARE_SIMPLE_TYPE(PXBPCIEDev, PXB_PCIE_DEV) static GList *pxb_dev_list; @@ -79,16 +64,23 @@ CXLComponentState *cxl_get_hb_cstate(PCIHostState *hb) return &host->cxl_cstate; } +bool cxl_get_hb_passthrough(PCIHostState *hb) +{ + CXLHost *host = PXB_CXL_HOST(hb); + + return host->passthrough; +} + static int pxb_bus_num(PCIBus *bus) { - PXBDev *pxb = convert_to_pxb(bus->parent_dev); + PXBDev *pxb = PXB_DEV(bus->parent_dev); return pxb->bus_nr; } static uint16_t pxb_bus_numa_node(PCIBus *bus) { - PXBDev *pxb = convert_to_pxb(bus->parent_dev); + PXBDev *pxb = PXB_DEV(bus->parent_dev); return pxb->numa_node; } @@ -146,7 +138,7 @@ static char *pxb_host_ofw_unit_address(const SysBusDevice *dev) pxb_host = PCI_HOST_BRIDGE(dev); pxb_bus = pxb_host->bus; - pxb_dev = convert_to_pxb(pxb_bus->parent_dev); + pxb_dev = PXB_DEV(pxb_bus->parent_dev); position = g_list_index(pxb_dev_list, pxb_dev); assert(position >= 0); @@ -204,8 +196,8 @@ static void pxb_cxl_realize(DeviceState *dev, Error **errp) */ void pxb_cxl_hook_up_registers(CXLState *cxl_state, PCIBus *bus, Error **errp) { - PXBDev *pxb = PXB_CXL_DEV(pci_bridge_get_device(bus)); - CXLHost *cxl = pxb->cxl.cxl_host_bridge; + PXBCXLDev *pxb = PXB_CXL_DEV(pci_bridge_get_device(bus)); + CXLHost *cxl = pxb->cxl_host_bridge; CXLComponentState *cxl_cstate = &cxl->cxl_cstate; struct MemoryRegion *mr = &cxl_cstate->crb.component_registers; hwaddr offset; @@ -289,15 +281,32 @@ static int pxb_map_irq_fn(PCIDevice *pci_dev, int pin) return pin - PCI_SLOT(pxb->devfn); } -static void pxb_dev_reset(DeviceState *dev) +static void pxb_cxl_dev_reset(DeviceState *dev) { - CXLHost *cxl = PXB_CXL_DEV(dev)->cxl.cxl_host_bridge; + CXLHost *cxl = PXB_CXL_DEV(dev)->cxl_host_bridge; CXLComponentState *cxl_cstate = &cxl->cxl_cstate; + PCIHostState *hb = PCI_HOST_BRIDGE(cxl); uint32_t *reg_state = cxl_cstate->crb.cache_mem_registers; uint32_t *write_msk = cxl_cstate->crb.cache_mem_regs_write_mask; + int dsp_count = 0; cxl_component_register_init_common(reg_state, write_msk, CXL2_ROOT_PORT); - ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 8); + /* + * The CXL specification allows for host bridges with no HDM decoders + * if they only have a single root port. + */ + if (!PXB_CXL_DEV(dev)->hdm_for_passthrough) { + dsp_count = pcie_count_ds_ports(hb->bus); + } + /* Initial reset will have 0 dsp so wait until > 0 */ + if (dsp_count == 1) { + cxl->passthrough = true; + /* Set Capability ID in header to NONE */ + ARRAY_FIELD_DP32(reg_state, CXL_HDM_CAPABILITY_HEADER, ID, 0); + } else { + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, + 8); + } } static gint pxb_compare(gconstpointer a, gconstpointer b) @@ -312,7 +321,7 @@ static gint pxb_compare(gconstpointer a, gconstpointer b) static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, Error **errp) { - PXBDev *pxb = convert_to_pxb(dev); + PXBDev *pxb = PXB_DEV(dev); DeviceState *ds, *bds = NULL; PCIBus *bus; const char *dev_name = NULL; @@ -340,7 +349,7 @@ static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, } else if (type == CXL) { bus = pci_root_bus_new(ds, dev_name, NULL, NULL, 0, TYPE_PXB_CXL_BUS); bus->flags |= PCI_BUS_CXL; - PXB_CXL_DEV(dev)->cxl.cxl_host_bridge = PXB_CXL_HOST(ds); + PXB_CXL_DEV(dev)->cxl_host_bridge = PXB_CXL_HOST(ds); } else { bus = pci_root_bus_new(ds, "pxb-internal", NULL, NULL, 0, TYPE_PXB_BUS); bds = qdev_new("pci-bridge"); @@ -393,7 +402,7 @@ static void pxb_dev_realize(PCIDevice *dev, Error **errp) static void pxb_dev_exitfn(PCIDevice *pci_dev) { - PXBDev *pxb = convert_to_pxb(pci_dev); + PXBDev *pxb = PXB_DEV(pci_dev); pxb_dev_list = g_list_remove(pxb_dev_list, pxb); } @@ -424,7 +433,7 @@ static void pxb_dev_class_init(ObjectClass *klass, void *data) } static const TypeInfo pxb_dev_info = { - .name = TYPE_PXB_DEVICE, + .name = TYPE_PXB_DEV, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PXBDev), .class_init = pxb_dev_class_init, @@ -456,15 +465,14 @@ static void pxb_pcie_dev_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_BRIDGE_HOST; dc->desc = "PCI Express Expander Bridge"; - device_class_set_props(dc, pxb_dev_properties); dc->hotpluggable = false; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); } static const TypeInfo pxb_pcie_dev_info = { - .name = TYPE_PXB_PCIE_DEVICE, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(PXBDev), + .name = TYPE_PXB_PCIE_DEV, + .parent = TYPE_PXB_DEV, + .instance_size = sizeof(PXBPCIEDev), .class_init = pxb_pcie_dev_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, @@ -481,9 +489,14 @@ static void pxb_cxl_dev_realize(PCIDevice *dev, Error **errp) } pxb_dev_realize_common(dev, CXL, errp); - pxb_dev_reset(DEVICE(dev)); + pxb_cxl_dev_reset(DEVICE(dev)); } +static Property pxb_cxl_dev_properties[] = { + DEFINE_PROP_BOOL("hdm_for_passthrough", PXBCXLDev, hdm_for_passthrough, false), + DEFINE_PROP_END_OF_LIST(), +}; + static void pxb_cxl_dev_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -497,18 +510,18 @@ static void pxb_cxl_dev_class_init(ObjectClass *klass, void *data) */ dc->desc = "CXL Host Bridge"; - device_class_set_props(dc, pxb_dev_properties); + device_class_set_props(dc, pxb_cxl_dev_properties); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); /* Host bridges aren't hotpluggable. FIXME: spec reference */ dc->hotpluggable = false; - dc->reset = pxb_dev_reset; + dc->reset = pxb_cxl_dev_reset; } static const TypeInfo pxb_cxl_dev_info = { - .name = TYPE_PXB_CXL_DEVICE, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(PXBDev), + .name = TYPE_PXB_CXL_DEV, + .parent = TYPE_PXB_PCIE_DEV, + .instance_size = sizeof(PXBCXLDev), .class_init = pxb_cxl_dev_class_init, .interfaces = (InterfaceInfo[]){ diff --git a/hw/pci-host/gt64120.c b/hw/pci-host/gt64120.c index f226d03420..82c15edb46 100644 --- a/hw/pci-host/gt64120.c +++ b/hw/pci-host/gt64120.c @@ -321,9 +321,6 @@ static void gt64120_isd_mapping(GT64120State *s) static void gt64120_update_pci_cfgdata_mapping(GT64120State *s) { /* Indexed on MByteSwap bit, see Table 158: PCI_0 Command, Offset: 0xc00 */ - static const MemoryRegionOps *pci_host_conf_ops[] = { - &pci_host_conf_be_ops, &pci_host_conf_le_ops - }; static const MemoryRegionOps *pci_host_data_ops[] = { &pci_host_data_be_ops, &pci_host_data_le_ops }; @@ -339,15 +336,6 @@ static void gt64120_update_pci_cfgdata_mapping(GT64120State *s) * - Table 16: 32-bit PCI Transaction Endianess * - Table 158: PCI_0 Command, Offset: 0xc00 */ - if (memory_region_is_mapped(&phb->conf_mem)) { - memory_region_del_subregion(&s->ISD_mem, &phb->conf_mem); - object_unparent(OBJECT(&phb->conf_mem)); - } - memory_region_init_io(&phb->conf_mem, OBJECT(phb), - pci_host_conf_ops[s->regs[GT_PCI0_CMD] & 1], - s, "pci-conf-idx", 4); - memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGADDR << 2, - &phb->conf_mem, 1); if (memory_region_is_mapped(&phb->data_mem)) { memory_region_del_subregion(&s->ISD_mem, &phb->data_mem); @@ -1208,6 +1196,12 @@ static void gt64120_realize(DeviceState *dev, Error **errp) PCI_DEVFN(18, 0), TYPE_PCI_BUS); pci_create_simple(phb->bus, PCI_DEVFN(0, 0), "gt64120_pci"); + memory_region_init_io(&phb->conf_mem, OBJECT(phb), + &pci_host_conf_le_ops, + s, "pci-conf-idx", 4); + memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGADDR << 2, + &phb->conf_mem, 1); + /* * The whole address space decoded by the GT-64120A doesn't generate diff --git a/hw/pci-host/i440fx.c b/hw/pci-host/i440fx.c index 262f82c303..61e7b97ff4 100644 --- a/hw/pci-host/i440fx.c +++ b/hw/pci-host/i440fx.c @@ -27,6 +27,7 @@ #include "qemu/range.h" #include "hw/i386/pc.h" #include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" #include "hw/pci/pci_host.h" #include "hw/pci-host/i440fx.h" #include "hw/qdev-properties.h" @@ -217,10 +218,10 @@ static void i440fx_pcihost_realize(DeviceState *dev, Error **errp) PCIHostState *s = PCI_HOST_BRIDGE(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - sysbus_add_io(sbd, 0xcf8, &s->conf_mem); + memory_region_add_subregion(s->bus->address_space_io, 0xcf8, &s->conf_mem); sysbus_init_ioports(sbd, 0xcf8, 4); - sysbus_add_io(sbd, 0xcfc, &s->data_mem); + memory_region_add_subregion(s->bus->address_space_io, 0xcfc, &s->data_mem); sysbus_init_ioports(sbd, 0xcfc, 4); /* register i440fx 0xcf8 port as coalesced pio */ @@ -291,12 +292,12 @@ PCIBus *i440fx_init(const char *pci_type, object_property_add_const_link(qdev_get_machine(), "smram", OBJECT(&f->smram)); - init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space, - &f->pam_regions[0], PAM_BIOS_BASE, PAM_BIOS_SIZE); + init_pam(&f->pam_regions[0], OBJECT(d), f->ram_memory, f->system_memory, + f->pci_address_space, PAM_BIOS_BASE, PAM_BIOS_SIZE); for (i = 0; i < ARRAY_SIZE(f->pam_regions) - 1; ++i) { - init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space, - &f->pam_regions[i+1], PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, - PAM_EXPAN_SIZE); + init_pam(&f->pam_regions[i + 1], OBJECT(d), f->ram_memory, + f->system_memory, f->pci_address_space, + PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); } ram_size = ram_size / 8 / 1024 / 1024; diff --git a/hw/pci-host/mv64361.c b/hw/pci-host/mv64361.c index 298564f1f5..19e8031a3f 100644 --- a/hw/pci-host/mv64361.c +++ b/hw/pci-host/mv64361.c @@ -873,10 +873,6 @@ static void mv64361_realize(DeviceState *dev, Error **errp) } sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cpu_irq); qdev_init_gpio_in_named(dev, mv64361_gpp_irq, "gpp", 32); - /* FIXME: PCI IRQ connections may be board specific */ - for (i = 0; i < PCI_NUM_PINS; i++) { - s->pci[1].irq[i] = qdev_get_gpio_in_named(dev, "gpp", 12 + i); - } } static void mv64361_reset(DeviceState *dev) diff --git a/hw/pci-host/pam.c b/hw/pci-host/pam.c index 454dd120db..68e9884d27 100644 --- a/hw/pci-host/pam.c +++ b/hw/pci-host/pam.c @@ -30,24 +30,24 @@ #include "qemu/osdep.h" #include "hw/pci-host/pam.h" -void init_pam(DeviceState *dev, MemoryRegion *ram_memory, +void init_pam(PAMMemoryRegion *mem, Object *owner, MemoryRegion *ram_memory, MemoryRegion *system_memory, MemoryRegion *pci_address_space, - PAMMemoryRegion *mem, uint32_t start, uint32_t size) + uint32_t start, uint32_t size) { int i; /* RAM */ - memory_region_init_alias(&mem->alias[3], OBJECT(dev), "pam-ram", ram_memory, + memory_region_init_alias(&mem->alias[3], owner, "pam-ram", ram_memory, start, size); /* ROM (XXX: not quite correct) */ - memory_region_init_alias(&mem->alias[1], OBJECT(dev), "pam-rom", ram_memory, + memory_region_init_alias(&mem->alias[1], owner, "pam-rom", ram_memory, start, size); memory_region_set_readonly(&mem->alias[1], true); /* XXX: should distinguish read/write cases */ - memory_region_init_alias(&mem->alias[0], OBJECT(dev), "pam-pci", pci_address_space, + memory_region_init_alias(&mem->alias[0], owner, "pam-pci", pci_address_space, start, size); - memory_region_init_alias(&mem->alias[2], OBJECT(dev), "pam-pci", ram_memory, + memory_region_init_alias(&mem->alias[2], owner, "pam-pci", ram_memory, start, size); memory_region_transaction_begin(); diff --git a/hw/pci-host/pnv_phb.c b/hw/pci-host/pnv_phb.c index c62b08538a..82332d7a05 100644 --- a/hw/pci-host/pnv_phb.c +++ b/hw/pci-host/pnv_phb.c @@ -62,6 +62,15 @@ static bool pnv_parent_fixup(Object *parent, BusState *parent_bus, return true; } +static Object *pnv_phb_user_get_parent(PnvChip *chip, PnvPHB *phb, Error **errp) +{ + if (phb->version == 3) { + return OBJECT(pnv_chip_add_phb(chip, phb)); + } else { + return OBJECT(pnv_pec_add_phb(chip, phb, errp)); + } +} + /* * User created devices won't have the initial setup that default * devices have. This setup consists of assigning a parent device @@ -79,7 +88,7 @@ static bool pnv_phb_user_device_init(PnvPHB *phb, Error **errp) return false; } - parent = pnv_chip_add_phb(chip, phb, errp); + parent = pnv_phb_user_get_parent(chip, phb, errp); if (!parent) { return false; } diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c index 43267a428f..3b2850f7a3 100644 --- a/hw/pci-host/pnv_phb4_pec.c +++ b/hw/pci-host/pnv_phb4_pec.c @@ -112,9 +112,50 @@ static const MemoryRegionOps pnv_pec_pci_xscom_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static void pnv_pec_default_phb_realize(PnvPhb4PecState *pec, - int stack_no, - Error **errp) +PnvPhb4PecState *pnv_pec_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp) +{ + PnvPhb4PecState *pecs = NULL; + int chip_id = phb->chip_id; + int index = phb->phb_id; + int i, j; + + if (phb->version == 4) { + Pnv9Chip *chip9 = PNV9_CHIP(chip); + + pecs = chip9->pecs; + } else if (phb->version == 5) { + Pnv10Chip *chip10 = PNV10_CHIP(chip); + + pecs = chip10->pecs; + } else { + g_assert_not_reached(); + } + + for (i = 0; i < chip->num_pecs; i++) { + /* + * For each PEC, check the amount of phbs it supports + * and see if the given phb4 index matches an index. + */ + PnvPhb4PecState *pec = &pecs[i]; + + for (j = 0; j < pec->num_phbs; j++) { + if (index == pnv_phb4_pec_get_phb_id(pec, j)) { + pec->phbs[j] = phb; + phb->pec = pec; + return pec; + } + } + } + error_setg(errp, + "pnv-phb4 chip-id %d index %d didn't match any existing PEC", + chip_id, index); + + return NULL; +} + +static PnvPHB *pnv_pec_default_phb_realize(PnvPhb4PecState *pec, + int stack_no, + Error **errp) { PnvPHB *phb = PNV_PHB(qdev_new(TYPE_PNV_PHB)); int phb_id = pnv_phb4_pec_get_phb_id(pec, stack_no); @@ -128,8 +169,9 @@ static void pnv_pec_default_phb_realize(PnvPhb4PecState *pec, &error_fatal); if (!sysbus_realize(SYS_BUS_DEVICE(phb), errp)) { - return; + return NULL; } + return phb; } static void pnv_pec_realize(DeviceState *dev, Error **errp) @@ -148,8 +190,9 @@ static void pnv_pec_realize(DeviceState *dev, Error **errp) /* Create PHBs if running with defaults */ if (defaults_enabled()) { + g_assert(pec->num_phbs <= MAX_PHBS_PER_PEC); for (i = 0; i < pec->num_phbs; i++) { - pnv_pec_default_phb_realize(pec, i, errp); + pec->phbs[i] = pnv_pec_default_phb_realize(pec, i, errp); } } @@ -197,9 +240,12 @@ static int pnv_pec_dt_xscom(PnvXScomInterface *dev, void *fdt, pecc->compat_size))); for (i = 0; i < pec->num_phbs; i++) { - int phb_id = pnv_phb4_pec_get_phb_id(pec, i); int stk_offset; + if (!pec->phbs[i]) { + continue; + } + name = g_strdup_printf("stack@%x", i); stk_offset = fdt_add_subnode(fdt, offset, name); _FDT(stk_offset); @@ -207,7 +253,8 @@ static int pnv_pec_dt_xscom(PnvXScomInterface *dev, void *fdt, _FDT((fdt_setprop(fdt, stk_offset, "compatible", pecc->stk_compat, pecc->stk_compat_size))); _FDT((fdt_setprop_cell(fdt, stk_offset, "reg", i))); - _FDT((fdt_setprop_cell(fdt, stk_offset, "ibm,phb-index", phb_id))); + _FDT((fdt_setprop_cell(fdt, stk_offset, "ibm,phb-index", + pec->phbs[i]->phb_id))); } return 0; diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c index 26390863d6..fd18920e7f 100644 --- a/hw/pci-host/q35.c +++ b/hw/pci-host/q35.c @@ -50,10 +50,12 @@ static void q35_host_realize(DeviceState *dev, Error **errp) Q35PCIHost *s = Q35_HOST_DEVICE(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, &pci->conf_mem); + memory_region_add_subregion(s->mch.address_space_io, + MCH_HOST_BRIDGE_CONFIG_ADDR, &pci->conf_mem); sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, 4); - sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, &pci->data_mem); + memory_region_add_subregion(s->mch.address_space_io, + MCH_HOST_BRIDGE_CONFIG_DATA, &pci->data_mem); sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, 4); /* register q35 0xcf8 port as coalesced pio */ @@ -643,12 +645,12 @@ static void mch_realize(PCIDevice *d, Error **errp) object_property_add_const_link(qdev_get_machine(), "smram", OBJECT(&mch->smram)); - init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, - mch->pci_address_space, &mch->pam_regions[0], + init_pam(&mch->pam_regions[0], OBJECT(mch), mch->ram_memory, + mch->system_memory, mch->pci_address_space, PAM_BIOS_BASE, PAM_BIOS_SIZE); for (i = 0; i < ARRAY_SIZE(mch->pam_regions) - 1; ++i) { - init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, - mch->pci_address_space, &mch->pam_regions[i+1], + init_pam(&mch->pam_regions[i + 1], OBJECT(mch), mch->ram_memory, + mch->system_memory, mch->pci_address_space, PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); } } diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c index 072ffe3c5e..9a11ac4b2b 100644 --- a/hw/pci-host/raven.c +++ b/hw/pci-host/raven.c @@ -294,6 +294,13 @@ static void raven_pcihost_initfn(Object *obj) memory_region_init(&s->pci_memory, obj, "pci-memory", 0x3f000000); address_space_init(&s->pci_io_as, &s->pci_io, "raven-io"); + /* + * Raven's raven_io_ops use the address-space API to access pci-conf-idx + * (which is also owned by the raven device). As such, mark the + * pci_io_non_contiguous as re-entrancy safe. + */ + s->pci_io_non_contiguous.disable_reentrancy_guard = true; + /* CPU address space */ memory_region_add_subregion(address_space_mem, PCI_IO_BASE_ADDR, &s->pci_io); diff --git a/hw/pci/msi.c b/hw/pci/msi.c index 1cadf150bc..041b0bdbec 100644 --- a/hw/pci/msi.c +++ b/hw/pci/msi.c @@ -24,6 +24,8 @@ #include "qemu/range.h" #include "qapi/error.h" +#include "hw/i386/kvm/xen_evtchn.h" + /* PCI_MSI_ADDRESS_LO */ #define PCI_MSI_ADDRESS_LO_MASK (~0x3) @@ -414,6 +416,15 @@ void msi_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) fprintf(stderr, "\n"); #endif + if (xen_mode == XEN_EMULATE) { + for (vector = 0; vector < msi_nr_vectors(flags); vector++) { + MSIMessage msg = msi_prepare_message(dev, vector); + + xen_evtchn_snoop_msi(dev, false, vector, msg.address, msg.data, + msi_is_masked(dev, vector)); + } + } + if (!(flags & PCI_MSI_FLAGS_ENABLE)) { return; } diff --git a/hw/pci/msix.c b/hw/pci/msix.c index 9e70fcd6fa..ab8869d9d0 100644 --- a/hw/pci/msix.c +++ b/hw/pci/msix.c @@ -26,6 +26,8 @@ #include "qapi/error.h" #include "trace.h" +#include "hw/i386/kvm/xen_evtchn.h" + /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */ #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1) #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8) @@ -124,6 +126,13 @@ static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked) { bool is_masked = msix_is_masked(dev, vector); + if (xen_mode == XEN_EMULATE) { + MSIMessage msg = msix_prepare_message(dev, vector); + + xen_evtchn_snoop_msi(dev, true, vector, msg.address, msg.data, + is_masked); + } + if (is_masked == was_masked) { return; } diff --git a/hw/pci/pci-internal.h b/hw/pci/pci-internal.h index 2ea356bdf5..a7d6d8a732 100644 --- a/hw/pci/pci-internal.h +++ b/hw/pci/pci-internal.h @@ -20,6 +20,5 @@ void pcibus_dev_print(Monitor *mon, DeviceState *dev, int indent); int pcie_aer_parse_error_string(const char *error_name, uint32_t *status, bool *correctable); -int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err); #endif diff --git a/hw/pci/pci.c b/hw/pci/pci.c index cc51f98593..1cc7c89036 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -49,6 +49,9 @@ #include "qemu/cutils.h" #include "pci-internal.h" +#include "hw/xen/xen.h" +#include "hw/i386/kvm/xen_evtchn.h" + //#define DEBUG_PCI #ifdef DEBUG_PCI # define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__) @@ -76,6 +79,8 @@ static Property pci_props[] = { DEFINE_PROP_STRING("failover_pair_id", PCIDevice, failover_pair_id), DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), + DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, + QEMU_PCIE_ERR_UNC_MASK_BITNR, true), DEFINE_PROP_END_OF_LIST() }; @@ -92,6 +97,21 @@ static const VMStateDescription vmstate_pcibus = { } }; +static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data) +{ + return a - b; +} + +static GSequence *pci_acpi_index_list(void) +{ + static GSequence *used_acpi_index_list; + + if (!used_acpi_index_list) { + used_acpi_index_list = g_sequence_new(NULL); + } + return used_acpi_index_list; +} + static void pci_init_bus_master(PCIDevice *pci_dev) { AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); @@ -279,9 +299,13 @@ static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) { PCIBus *bus; for (;;) { + int dev_irq = irq_num; bus = pci_get_bus(pci_dev); assert(bus->map_irq); irq_num = bus->map_irq(pci_dev, irq_num); + trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, + pci_bus_is_root(bus) ? "root-complex" + : DEVICE(bus->parent_dev)->canonical_path); if (bus->set_irq) break; pci_dev = bus->parent_dev; @@ -319,6 +343,17 @@ static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg) { MemTxAttrs attrs = {}; + /* + * Xen uses the high bits of the address to contain some of the bits + * of the PIRQ#. Therefore we can't just send the write cycle and + * trust that it's caught by the APIC at 0xfee00000 because the + * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166. + * So we intercept the delivery here instead of in kvm_send_msi(). + */ + if (xen_mode == XEN_EMULATE && + xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) { + return; + } attrs.requester_id = pci_requester_id(dev); address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, attrs, NULL); @@ -988,6 +1023,9 @@ static void do_pci_unregister_device(PCIDevice *pci_dev) pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL; pci_config_free(pci_dev); + if (xen_mode == XEN_EMULATE) { + xen_evtchn_remove_pci_device(pci_dev); + } if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) { memory_region_del_subregion(&pci_dev->bus_master_container_region, &pci_dev->bus_master_enable_region); @@ -1080,6 +1118,21 @@ static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn) return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); } +uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus) +{ + return bus->slot_reserved_mask; +} + +void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask) +{ + bus->slot_reserved_mask |= mask; +} + +void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask) +{ + bus->slot_reserved_mask &= ~mask; +} + /* -1 for devfn means auto assign */ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, const char *name, int devfn, @@ -1225,6 +1278,17 @@ static void pci_qdev_unrealize(DeviceState *dev) do_pci_unregister_device(pci_dev); pci_dev->msi_trigger = NULL; + + /* + * clean up acpi-index so it could reused by another device + */ + if (pci_dev->acpi_index) { + GSequence *used_indexes = pci_acpi_index_list(); + + g_sequence_remove(g_sequence_lookup(used_indexes, + GINT_TO_POINTER(pci_dev->acpi_index), + g_cmp_uint32, NULL)); + } } void pci_register_bar(PCIDevice *pci_dev, int region_num, @@ -1600,8 +1664,12 @@ PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) PCIBus *bus; do { + int dev_irq = pin; bus = pci_get_bus(dev); pin = bus->map_irq(dev, pin); + trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, + pci_bus_is_root(bus) ? "root-complex" + : DEVICE(bus->parent_dev)->canonical_path); dev = bus->parent_dev; } while (dev); @@ -1648,7 +1716,7 @@ void pci_device_set_intx_routing_notifier(PCIDevice *dev, * 9.1: Interrupt routing. Table 9-1 * * the PCI Express Base Specification, Revision 2.1 - * 2.2.8.1: INTx interrutp signaling - Rules + * 2.2.8.1: INTx interrupt signaling - Rules * the Implementation Note * Table 2-20 */ @@ -1980,6 +2048,8 @@ PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn) return bus->devices[devfn]; } +#define ONBOARD_INDEX_MAX (16 * 1024 - 1) + static void pci_qdev_realize(DeviceState *qdev, Error **errp) { PCIDevice *pci_dev = (PCIDevice *)qdev; @@ -1989,6 +2059,35 @@ static void pci_qdev_realize(DeviceState *qdev, Error **errp) bool is_default_rom; uint16_t class_id; + /* + * capped by systemd (see: udev-builtin-net_id.c) + * as it's the only known user honor it to avoid users + * misconfigure QEMU and then wonder why acpi-index doesn't work + */ + if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) { + error_setg(errp, "acpi-index should be less or equal to %u", + ONBOARD_INDEX_MAX); + return; + } + + /* + * make sure that acpi-index is unique across all present PCI devices + */ + if (pci_dev->acpi_index) { + GSequence *used_indexes = pci_acpi_index_list(); + + if (g_sequence_lookup(used_indexes, + GINT_TO_POINTER(pci_dev->acpi_index), + g_cmp_uint32, NULL)) { + error_setg(errp, "a PCI device with acpi-index = %" PRIu32 + " already exist", pci_dev->acpi_index); + return; + } + g_sequence_insert_sorted(used_indexes, + GINT_TO_POINTER(pci_dev->acpi_index), + g_cmp_uint32, NULL); + } + if (pci_dev->romsize != -1 && !is_power_of_2(pci_dev->romsize)) { error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize); return; @@ -2210,15 +2309,14 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **errp) { int64_t size; - char *path; + g_autofree char *path = NULL; void *ptr; char name[32]; const VMStateDescription *vmsd; - if (!pdev->romfile) - return; - if (strlen(pdev->romfile) == 0) + if (!pdev->romfile || !strlen(pdev->romfile)) { return; + } if (!pdev->rom_bar) { /* @@ -2253,23 +2351,20 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, size = get_image_size(path); if (size < 0) { error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); - g_free(path); return; } else if (size == 0) { error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); - g_free(path); return; } else if (size > 2 * GiB) { error_setg(errp, "romfile \"%s\" too large (size cannot exceed 2 GiB)", pdev->romfile); - g_free(path); return; } if (pdev->romsize != -1) { if (size > pdev->romsize) { - error_setg(errp, "romfile \"%s\" (%u bytes) is too large for ROM size %u", + error_setg(errp, "romfile \"%s\" (%u bytes) " + "is too large for ROM size %u", pdev->romfile, (uint32_t)size, pdev->romsize); - g_free(path); return; } } else { @@ -2277,21 +2372,18 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, } vmsd = qdev_get_vmsd(DEVICE(pdev)); + snprintf(name, sizeof(name), "%s.rom", + vmsd ? vmsd->name : object_get_typename(OBJECT(pdev))); - if (vmsd) { - snprintf(name, sizeof(name), "%s.rom", vmsd->name); - } else { - snprintf(name, sizeof(name), "%s.rom", object_get_typename(OBJECT(pdev))); - } pdev->has_rom = true; - memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, &error_fatal); + memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, + &error_fatal); + ptr = memory_region_get_ram_ptr(&pdev->rom); if (load_image_size(path, ptr, size) < 0) { error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); - g_free(path); return; } - g_free(path); if (is_default_rom) { /* Only the default rom images will be patched (if needed). */ diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c index dd5af508f9..e7b9345615 100644 --- a/hw/pci/pci_bridge.c +++ b/hw/pci/pci_bridge.c @@ -184,11 +184,11 @@ static void pci_bridge_init_vga_aliases(PCIBridge *br, PCIBus *parent, } } -static PCIBridgeWindows *pci_bridge_region_init(PCIBridge *br) +static void pci_bridge_region_init(PCIBridge *br) { PCIDevice *pd = PCI_DEVICE(br); PCIBus *parent = pci_get_bus(pd); - PCIBridgeWindows *w = g_new(PCIBridgeWindows, 1); + PCIBridgeWindows *w = &br->windows; uint16_t cmd = pci_get_word(pd->config + PCI_COMMAND); pci_bridge_init_alias(br, &w->alias_pref_mem, @@ -211,8 +211,6 @@ static PCIBridgeWindows *pci_bridge_region_init(PCIBridge *br) cmd & PCI_COMMAND_IO); pci_bridge_init_vga_aliases(br, parent, w->alias_vga); - - return w; } static void pci_bridge_region_del(PCIBridge *br, PCIBridgeWindows *w) @@ -234,19 +232,18 @@ static void pci_bridge_region_cleanup(PCIBridge *br, PCIBridgeWindows *w) object_unparent(OBJECT(&w->alias_vga[QEMU_PCI_VGA_IO_LO])); object_unparent(OBJECT(&w->alias_vga[QEMU_PCI_VGA_IO_HI])); object_unparent(OBJECT(&w->alias_vga[QEMU_PCI_VGA_MEM])); - g_free(w); } void pci_bridge_update_mappings(PCIBridge *br) { - PCIBridgeWindows *w = br->windows; + PCIBridgeWindows *w = &br->windows; /* Make updates atomic to: handle the case of one VCPU updating the bridge * while another accesses an unaffected region. */ memory_region_transaction_begin(); - pci_bridge_region_del(br, br->windows); + pci_bridge_region_del(br, w); pci_bridge_region_cleanup(br, w); - br->windows = pci_bridge_region_init(br); + pci_bridge_region_init(br); memory_region_transaction_commit(); } @@ -385,7 +382,7 @@ void pci_bridge_initfn(PCIDevice *dev, const char *typename) sec_bus->address_space_io = &br->address_space_io; memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io", 4 * GiB); - br->windows = pci_bridge_region_init(br); + pci_bridge_region_init(br); QLIST_INIT(&sec_bus->child); QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling); } @@ -396,8 +393,8 @@ void pci_bridge_exitfn(PCIDevice *pci_dev) PCIBridge *s = PCI_BRIDGE(pci_dev); assert(QLIST_EMPTY(&s->sec_bus.child)); QLIST_REMOVE(&s->sec_bus, sibling); - pci_bridge_region_del(s, s->windows); - pci_bridge_region_cleanup(s, s->windows); + pci_bridge_region_del(s, &s->windows); + pci_bridge_region_cleanup(s, &s->windows); /* object_unparent() is called automatically during device deletion */ } diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index 924fdabd15..b8c24cf45f 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -39,6 +39,11 @@ #define PCIE_DEV_PRINTF(dev, fmt, ...) \ PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__) +static bool pcie_sltctl_powered_off(uint16_t sltctl) +{ + return (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_OFF + && (sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF; +} /*************************************************************************** * pci express capability helper functions @@ -373,8 +378,8 @@ void pcie_cap_slot_enable_power(PCIDevice *dev) uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP); if (sltcap & PCI_EXP_SLTCAP_PCP) { - pci_set_word_by_mask(exp_cap + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_PCC, PCI_EXP_SLTCTL_PWR_ON); + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PCC); } } @@ -395,6 +400,7 @@ static void pcie_cap_update_power(PCIDevice *hotplug_dev) if (sltcap & PCI_EXP_SLTCAP_PCP) { power = (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_ON; + /* Don't we need to check also (sltctl & PCI_EXP_SLTCTL_PIC) ? */ } pci_for_each_device(sec_bus, pci_bus_num(sec_bus), @@ -579,8 +585,7 @@ void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev, return; } - if (((sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF) && - ((sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_OFF)) { + if (pcie_sltctl_powered_off(sltctl)) { /* slot is powered off -> unplug without round-trip to the guest */ pcie_cap_slot_do_unplug(hotplug_pdev); hotplug_event_notify(hotplug_pdev); @@ -634,8 +639,8 @@ void pcie_cap_slot_init(PCIDevice *dev, PCIESlot *s) PCI_EXP_SLTCTL_PIC | PCI_EXP_SLTCTL_AIC); pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_PIC_OFF | - PCI_EXP_SLTCTL_AIC_OFF); + PCI_EXP_SLTCTL_PWR_IND_OFF | + PCI_EXP_SLTCTL_ATTN_IND_OFF); pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PIC | PCI_EXP_SLTCTL_AIC | @@ -679,7 +684,8 @@ void pcie_cap_slot_reset(PCIDevice *dev) PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE); pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_AIC_OFF); + PCI_EXP_SLTCTL_PWR_IND_OFF | + PCI_EXP_SLTCTL_ATTN_IND_OFF); if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) { /* Downstream ports enforce device number 0. */ @@ -694,7 +700,8 @@ void pcie_cap_slot_reset(PCIDevice *dev) PCI_EXP_SLTCTL_PCC); } - pic = populated ? PCI_EXP_SLTCTL_PIC_ON : PCI_EXP_SLTCTL_PIC_OFF; + pic = populated ? + PCI_EXP_SLTCTL_PWR_IND_ON : PCI_EXP_SLTCTL_PWR_IND_OFF; pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, pic); } @@ -769,10 +776,9 @@ void pcie_cap_slot_write_config(PCIDevice *dev, * this is a work around for guests that overwrite * control of powered off slots before powering them on. */ - if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) && - (val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF && - (!(old_slt_ctl & PCI_EXP_SLTCTL_PCC) || - (old_slt_ctl & PCI_EXP_SLTCTL_PIC_OFF) != PCI_EXP_SLTCTL_PIC_OFF)) { + if ((sltsta & PCI_EXP_SLTSTA_PDS) && pcie_sltctl_powered_off(val) && + !pcie_sltctl_powered_off(old_slt_ctl)) + { pcie_cap_slot_do_unplug(dev); } pcie_cap_update_power(dev); diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index 9a19be44ae..374d593ead 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -113,6 +113,13 @@ int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset, pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, PCI_ERR_UNC_SUPPORTED); + if (dev->cap_present & QEMU_PCIE_ERR_UNC_MASK) { + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, + PCI_ERR_UNC_MASK_DEFAULT); + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, + PCI_ERR_UNC_SUPPORTED); + } + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, PCI_ERR_UNC_SEVERITY_DEFAULT); pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER, @@ -188,8 +195,16 @@ static void pcie_aer_update_uncor_status(PCIDevice *dev) static bool pcie_aer_msg_alldev(PCIDevice *dev, const PCIEAERMsg *msg) { + uint16_t devctl = pci_get_word(dev->config + dev->exp.exp_cap + + PCI_EXP_DEVCTL); if (!(pcie_aer_msg_is_uncor(msg) && - (pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_SERR))) { + (pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_SERR)) && + !((msg->severity == PCI_ERR_ROOT_CMD_NONFATAL_EN) && + (devctl & PCI_EXP_DEVCTL_NFERE)) && + !((msg->severity == PCI_ERR_ROOT_CMD_COR_EN) && + (devctl & PCI_EXP_DEVCTL_CERE)) && + !((msg->severity == PCI_ERR_ROOT_CMD_FATAL_EN) && + (devctl & PCI_EXP_DEVCTL_FERE))) { return false; } diff --git a/hw/pci/pcie_port.c b/hw/pci/pcie_port.c index 65a397ad23..20ff2b39e8 100644 --- a/hw/pci/pcie_port.c +++ b/hw/pci/pcie_port.c @@ -161,6 +161,51 @@ PCIDevice *pcie_find_port_by_pn(PCIBus *bus, uint8_t pn) return NULL; } +/* Find first port in devfn number order */ +PCIDevice *pcie_find_port_first(PCIBus *bus) +{ + int devfn; + + for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + PCIDevice *d = bus->devices[devfn]; + + if (!d || !pci_is_express(d) || !d->exp.exp_cap) { + continue; + } + + if (object_dynamic_cast(OBJECT(d), TYPE_PCIE_PORT)) { + return d; + } + } + + return NULL; +} + +int pcie_count_ds_ports(PCIBus *bus) +{ + int dsp_count = 0; + int devfn; + + for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + PCIDevice *d = bus->devices[devfn]; + + if (!d || !pci_is_express(d) || !d->exp.exp_cap) { + continue; + } + if (object_dynamic_cast(OBJECT(d), TYPE_PCIE_PORT)) { + dsp_count++; + } + } + return dsp_count; +} + +static bool pcie_slot_is_hotpluggbale_bus(HotplugHandler *plug_handler, + BusState *bus) +{ + PCIESlot *s = PCIE_SLOT(bus->parent); + return s->hotplug; +} + static const TypeInfo pcie_port_type_info = { .name = TYPE_PCIE_PORT, .parent = TYPE_PCI_BRIDGE, @@ -188,6 +233,7 @@ static void pcie_slot_class_init(ObjectClass *oc, void *data) hc->plug = pcie_cap_slot_plug_cb; hc->unplug = pcie_cap_slot_unplug_cb; hc->unplug_request = pcie_cap_slot_unplug_request_cb; + hc->is_hotpluggable_bus = pcie_slot_is_hotpluggbale_bus; } static const TypeInfo pcie_slot_type_info = { diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c index f0bd72e069..aa5a757b11 100644 --- a/hw/pci/pcie_sriov.c +++ b/hw/pci/pcie_sriov.c @@ -300,3 +300,8 @@ PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n) } return NULL; } + +uint16_t pcie_sriov_num_vfs(PCIDevice *dev) +{ + return dev->exp.sriov_pf.num_vfs; +} diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c index fca7f6691a..e7bc7192f1 100644 --- a/hw/pci/shpc.c +++ b/hw/pci/shpc.c @@ -123,10 +123,13 @@ #define SHPC_PCI_TO_IDX(pci_slot) ((pci_slot) - 1) #define SHPC_IDX_TO_PHYSICAL(slot) ((slot) + 1) -static uint16_t shpc_get_status(SHPCDevice *shpc, int slot, uint16_t msk) +static uint8_t shpc_get_status(SHPCDevice *shpc, int slot, uint16_t msk) { uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot); - return (pci_get_word(status) & msk) >> ctz32(msk); + uint16_t result = (pci_get_word(status) & msk) >> ctz32(msk); + + assert(result <= UINT8_MAX); + return result; } static void shpc_set_status(SHPCDevice *shpc, @@ -223,6 +226,7 @@ void shpc_reset(PCIDevice *d) SHPC_SLOT_STATUS_PRSNT_MASK); shpc_set_status(shpc, i, SHPC_LED_OFF, SHPC_SLOT_PWR_LED_MASK); } + shpc_set_status(shpc, i, SHPC_LED_OFF, SHPC_SLOT_ATTN_LED_MASK); shpc_set_status(shpc, i, 0, SHPC_SLOT_STATUS_66); } shpc_set_sec_bus_speed(shpc, SHPC_SEC_BUS_33); @@ -254,60 +258,66 @@ static void shpc_free_devices_in_slot(SHPCDevice *shpc, int slot) } } -static void shpc_slot_command(SHPCDevice *shpc, uint8_t target, +static bool shpc_slot_is_off(uint8_t state, uint8_t power, uint8_t attn) +{ + return state == SHPC_STATE_DISABLED && power == SHPC_LED_OFF; +} + +static void shpc_slot_command(PCIDevice *d, uint8_t target, uint8_t state, uint8_t power, uint8_t attn) { - uint8_t current_state; + SHPCDevice *shpc = d->shpc; int slot = SHPC_LOGICAL_TO_IDX(target); + uint8_t old_state = shpc_get_status(shpc, slot, SHPC_SLOT_STATE_MASK); + uint8_t old_power = shpc_get_status(shpc, slot, SHPC_SLOT_PWR_LED_MASK); + uint8_t old_attn = shpc_get_status(shpc, slot, SHPC_SLOT_ATTN_LED_MASK); + if (target < SHPC_CMD_TRGT_MIN || slot >= shpc->nslots) { shpc_invalid_command(shpc); return; } - current_state = shpc_get_status(shpc, slot, SHPC_SLOT_STATE_MASK); - if (current_state == SHPC_STATE_ENABLED && state == SHPC_STATE_PWRONLY) { + + if (old_state == SHPC_STATE_ENABLED && state == SHPC_STATE_PWRONLY) { shpc_invalid_command(shpc); return; } - switch (power) { - case SHPC_LED_NO: - break; - default: + if (power == SHPC_LED_NO) { + power = old_power; + } else { /* TODO: send event to monitor */ shpc_set_status(shpc, slot, power, SHPC_SLOT_PWR_LED_MASK); } - switch (attn) { - case SHPC_LED_NO: - break; - default: + + if (attn == SHPC_LED_NO) { + attn = old_attn; + } else { /* TODO: send event to monitor */ shpc_set_status(shpc, slot, attn, SHPC_SLOT_ATTN_LED_MASK); } - if ((current_state == SHPC_STATE_DISABLED && state == SHPC_STATE_PWRONLY) || - (current_state == SHPC_STATE_DISABLED && state == SHPC_STATE_ENABLED)) { + if (state == SHPC_STATE_NO) { + state = old_state; + } else { shpc_set_status(shpc, slot, state, SHPC_SLOT_STATE_MASK); - } else if ((current_state == SHPC_STATE_ENABLED || - current_state == SHPC_STATE_PWRONLY) && - state == SHPC_STATE_DISABLED) { - shpc_set_status(shpc, slot, state, SHPC_SLOT_STATE_MASK); - power = shpc_get_status(shpc, slot, SHPC_SLOT_PWR_LED_MASK); - /* TODO: track what monitor requested. */ - /* Look at LED to figure out whether it's ok to remove the device. */ - if (power == SHPC_LED_OFF) { - shpc_free_devices_in_slot(shpc, slot); - shpc_set_status(shpc, slot, 1, SHPC_SLOT_STATUS_MRL_OPEN); - shpc_set_status(shpc, slot, SHPC_SLOT_STATUS_PRSNT_EMPTY, - SHPC_SLOT_STATUS_PRSNT_MASK); - shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |= - SHPC_SLOT_EVENT_MRL | - SHPC_SLOT_EVENT_PRESENCE; - } + } + + if (!shpc_slot_is_off(old_state, old_power, old_attn) && + shpc_slot_is_off(state, power, attn)) + { + shpc_free_devices_in_slot(shpc, slot); + shpc_set_status(shpc, slot, 1, SHPC_SLOT_STATUS_MRL_OPEN); + shpc_set_status(shpc, slot, SHPC_SLOT_STATUS_PRSNT_EMPTY, + SHPC_SLOT_STATUS_PRSNT_MASK); + shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |= + SHPC_SLOT_EVENT_MRL | + SHPC_SLOT_EVENT_PRESENCE; } } -static void shpc_command(SHPCDevice *shpc) +static void shpc_command(PCIDevice *d) { + SHPCDevice *shpc = d->shpc; uint8_t code = pci_get_byte(shpc->config + SHPC_CMD_CODE); uint8_t speed; uint8_t target; @@ -328,7 +338,7 @@ static void shpc_command(SHPCDevice *shpc) state = (code & SHPC_SLOT_STATE_MASK) >> SHPC_SLOT_STATE_SHIFT; power = (code & SHPC_SLOT_PWR_LED_MASK) >> SHPC_SLOT_PWR_LED_SHIFT; attn = (code & SHPC_SLOT_ATTN_LED_MASK) >> SHPC_SLOT_ATTN_LED_SHIFT; - shpc_slot_command(shpc, target, state, power, attn); + shpc_slot_command(d, target, state, power, attn); break; case 0x40 ... 0x47: speed = code & SHPC_SEC_BUS_MASK; @@ -346,10 +356,10 @@ static void shpc_command(SHPCDevice *shpc) } for (i = 0; i < shpc->nslots; ++i) { if (!(shpc_get_status(shpc, i, SHPC_SLOT_STATUS_MRL_OPEN))) { - shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_PWRONLY, SHPC_LED_ON, SHPC_LED_NO); } else { - shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_NO, SHPC_LED_OFF, SHPC_LED_NO); } } @@ -367,10 +377,10 @@ static void shpc_command(SHPCDevice *shpc) } for (i = 0; i < shpc->nslots; ++i) { if (!(shpc_get_status(shpc, i, SHPC_SLOT_STATUS_MRL_OPEN))) { - shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_ENABLED, SHPC_LED_ON, SHPC_LED_NO); } else { - shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_NO, SHPC_LED_OFF, SHPC_LED_NO); } } @@ -402,7 +412,7 @@ static void shpc_write(PCIDevice *d, unsigned addr, uint64_t val, int l) shpc->config[a] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ } if (ranges_overlap(addr, l, SHPC_CMD_CODE, 2)) { - shpc_command(shpc); + shpc_command(d); } shpc_interrupt_update(d); } @@ -486,8 +496,9 @@ static const MemoryRegionOps shpc_mmio_ops = { .max_access_size = 4, }, }; -static void shpc_device_plug_common(PCIDevice *affected_dev, int *slot, - SHPCDevice *shpc, Error **errp) + +static bool shpc_device_get_slot(PCIDevice *affected_dev, int *slot, + SHPCDevice *shpc, Error **errp) { int pci_slot = PCI_SLOT(affected_dev->devfn); *slot = SHPC_PCI_TO_IDX(pci_slot); @@ -497,21 +508,20 @@ static void shpc_device_plug_common(PCIDevice *affected_dev, int *slot, "controller. Valid slots are between %d and %d.", pci_slot, SHPC_IDX_TO_PCI(0), SHPC_IDX_TO_PCI(shpc->nslots) - 1); - return; + return false; } + + return true; } void shpc_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { - Error *local_err = NULL; PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev); SHPCDevice *shpc = pci_hotplug_dev->shpc; int slot; - shpc_device_plug_common(PCI_DEVICE(dev), &slot, shpc, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!shpc_device_get_slot(PCI_DEVICE(dev), &slot, shpc, errp)) { return; } @@ -553,16 +563,13 @@ void shpc_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, void shpc_device_unplug_request_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { - Error *local_err = NULL; PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev); SHPCDevice *shpc = pci_hotplug_dev->shpc; uint8_t state; uint8_t led; int slot; - shpc_device_plug_common(PCI_DEVICE(dev), &slot, shpc, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!shpc_device_get_slot(PCI_DEVICE(dev), &slot, shpc, errp)) { return; } diff --git a/hw/pci/trace-events b/hw/pci/trace-events index aaf46bc92d..42430869ce 100644 --- a/hw/pci/trace-events +++ b/hw/pci/trace-events @@ -3,6 +3,7 @@ # pci.c pci_update_mappings_del(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 pci_update_mappings_add(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 +pci_route_irq(int dev_irq, const char *dev_path, int parent_irq, const char *parent_path) "IRQ %d @%s -> IRQ %d @%s" # pci_host.c pci_cfg_read(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x -> 0x%x" diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index c898021b5f..a689d9b219 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -3,7 +3,7 @@ config PSERIES imply PCI_DEVICES imply TEST_DEVICES imply VIRTIO_VGA - imply NVDIMM + select NVDIMM select DIMM select PCI select SPAPR_VSCSI diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 117c9c08ed..b6eb599751 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -898,6 +898,7 @@ void ppce500_init(MachineState *machine) MemoryRegion *address_space_mem = get_system_memory(); PPCE500MachineState *pms = PPCE500_MACHINE(machine); const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(machine); + MachineClass *mc = MACHINE_CLASS(pmc); PCIBus *pci_bus; CPUPPCState *env = NULL; uint64_t loadaddr; @@ -1073,7 +1074,7 @@ void ppce500_init(MachineState *machine) if (pci_bus) { /* Register network interfaces. */ for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "virtio-net-pci", NULL); + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); } } diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c index 3032bd3f6d..7aa2f2107a 100644 --- a/hw/ppc/e500plat.c +++ b/hw/ppc/e500plat.c @@ -99,6 +99,7 @@ static void e500plat_machine_class_init(ObjectClass *oc, void *data) mc->max_cpus = 32; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("e500v2_v30"); mc->default_ram_id = "mpc8544ds.ram"; + mc->default_nic = "virtio-net-pci"; machine_class_allow_dynamic_sysbus_dev(mc, TYPE_ETSEC_COMMON); } diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index 460c14b5e3..535710314a 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -132,6 +132,7 @@ static void ppc_core99_reset(void *opaque) static void ppc_core99_init(MachineState *machine) { Core99MachineState *core99_machine = CORE99_MACHINE(machine); + MachineClass *mc = MACHINE_GET_CLASS(machine); PowerPCCPU *cpu = NULL; CPUPPCState *env = NULL; char *filename; @@ -444,7 +445,7 @@ static void ppc_core99_init(MachineState *machine) } for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "sungem", NULL); + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); } /* The NewWorld NVRAM is not located in the MacIO device */ @@ -577,6 +578,7 @@ static void core99_machine_class_init(ObjectClass *oc, void *data) mc->max_cpus = 1; mc->default_boot_order = "cd"; mc->default_display = "std"; + mc->default_nic = "sungem"; mc->kvm_type = core99_kvm_type; #ifdef TARGET_PPC64 mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("970fx_v3.1"); diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c index 2e4cc3fe0b..510ff0eaaf 100644 --- a/hw/ppc/mac_oldworld.c +++ b/hw/ppc/mac_oldworld.c @@ -87,6 +87,7 @@ static void ppc_heathrow_reset(void *opaque) static void ppc_heathrow_init(MachineState *machine) { const char *bios_name = machine->firmware ?: PROM_FILENAME; + MachineClass *mc = MACHINE_GET_CLASS(machine); PowerPCCPU *cpu = NULL; CPUPPCState *env = NULL; char *filename; @@ -276,7 +277,7 @@ static void ppc_heathrow_init(MachineState *machine) pci_vga_init(pci_bus); for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "ne2k_pci", NULL); + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, NULL); } /* MacIO IDE */ @@ -424,6 +425,7 @@ static void heathrow_class_init(ObjectClass *oc, void *data) mc->kvm_type = heathrow_kvm_type; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("750_v3.1"); mc->default_display = "std"; + mc->default_nic = "ne2k_pci"; mc->ignore_boot_device_suffixes = true; mc->default_ram_id = "ppc_heathrow.ram"; fwc->get_dev_path = heathrow_fw_dev_path; diff --git a/hw/ppc/mpc8544ds.c b/hw/ppc/mpc8544ds.c index 7dd5219736..b7130903d6 100644 --- a/hw/ppc/mpc8544ds.c +++ b/hw/ppc/mpc8544ds.c @@ -61,6 +61,7 @@ static void mpc8544ds_machine_class_init(ObjectClass *oc, void *data) mc->max_cpus = 15; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("e500v2_v30"); mc->default_ram_id = "mpc8544ds.ram"; + mc->default_nic = "virtio-net-pci"; } #define TYPE_MPC8544DS_MACHINE MACHINE_TYPE_NAME("mpc8544ds") diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index 7cc375df05..af5489de26 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -73,6 +73,8 @@ struct Pegasos2MachineState { MachineState parent_obj; PowerPCCPU *cpu; DeviceState *mv; + qemu_irq mv_pirq[PCI_NUM_PINS]; + qemu_irq via_pirq[PCI_NUM_PINS]; Vof *vof; void *fdt_blob; uint64_t kernel_addr; @@ -95,6 +97,15 @@ static void pegasos2_cpu_reset(void *opaque) } } +static void pegasos2_pci_irq(void *opaque, int n, int level) +{ + Pegasos2MachineState *pm = opaque; + + /* PCI interrupt lines are connected to both MV64361 and VT8231 */ + qemu_set_irq(pm->mv_pirq[n], level); + qemu_set_irq(pm->via_pirq[n], level); +} + static void pegasos2_init(MachineState *machine) { Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine); @@ -106,7 +117,7 @@ static void pegasos2_init(MachineState *machine) I2CBus *i2c_bus; const char *fwname = machine->firmware ?: PROM_FILENAME; char *filename; - int sz; + int i, sz; uint8_t *spd_data; /* init CPU */ @@ -156,11 +167,18 @@ static void pegasos2_init(MachineState *machine) /* Marvell Discovery II system controller */ pm->mv = DEVICE(sysbus_create_simple(TYPE_MV64361, -1, qdev_get_gpio_in(DEVICE(pm->cpu), PPC6xx_INPUT_INT))); + for (i = 0; i < PCI_NUM_PINS; i++) { + pm->mv_pirq[i] = qdev_get_gpio_in_named(pm->mv, "gpp", 12 + i); + } pci_bus = mv64361_get_pci_bus(pm->mv, 1); + pci_bus_irqs(pci_bus, pegasos2_pci_irq, pm, PCI_NUM_PINS); /* VIA VT8231 South Bridge (multifunction PCI device) */ via = OBJECT(pci_create_simple_multifunction(pci_bus, PCI_DEVFN(12, 0), true, TYPE_VT8231_ISA)); + for (i = 0; i < PCI_NUM_PINS; i++) { + pm->via_pirq[i] = qdev_get_gpio_in_named(DEVICE(via), "pirq", i); + } object_property_add_alias(OBJECT(machine), "rtc-time", object_resolve_path_component(via, "rtc"), "date"); @@ -267,6 +285,12 @@ static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason) PCI_INTERRUPT_LINE, 2, 0x9); pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | 0x50, 1, 0x2); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | + 0x55, 1, 0x90); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | + 0x56, 1, 0x99); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | + 0x57, 1, 0x90); pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | PCI_INTERRUPT_LINE, 2, 0x109); @@ -500,7 +524,7 @@ static void pegasos2_machine_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_IDE; mc->default_boot_order = "cd"; mc->default_display = "std"; - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("7400_v2.9"); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("7457_v1.2"); mc->default_ram_id = "pegasos2.ram"; mc->default_ram_size = 512 * MiB; diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 44b1fbbc93..590fc64b32 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -284,73 +284,19 @@ static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t pir, g_free(reg); } -static PnvPhb4PecState *pnv_phb4_get_pec(PnvChip *chip, PnvPHB4 *phb, - Error **errp) -{ - PnvPHB *phb_base = phb->phb_base; - PnvPhb4PecState *pecs = NULL; - int chip_id = phb->chip_id; - int index = phb->phb_id; - int i, j; - - if (phb_base->version == 4) { - Pnv9Chip *chip9 = PNV9_CHIP(chip); - - pecs = chip9->pecs; - } else if (phb_base->version == 5) { - Pnv10Chip *chip10 = PNV10_CHIP(chip); - - pecs = chip10->pecs; - } else { - g_assert_not_reached(); - } - - for (i = 0; i < chip->num_pecs; i++) { - /* - * For each PEC, check the amount of phbs it supports - * and see if the given phb4 index matches an index. - */ - PnvPhb4PecState *pec = &pecs[i]; - - for (j = 0; j < pec->num_phbs; j++) { - if (index == pnv_phb4_pec_get_phb_id(pec, j)) { - return pec; - } - } - } - error_setg(errp, - "pnv-phb4 chip-id %d index %d didn't match any existing PEC", - chip_id, index); - - return NULL; -} - /* - * Adds a PnvPHB to the chip. Returns the parent obj of the - * PHB which varies with each version (phb version 3 is parented - * by the chip, version 4 and 5 are parented by the PEC - * device). - * - * TODO: for version 3 we're still parenting the PHB with the - * chip. We should parent with a (so far not implemented) - * PHB3 PEC device. + * Adds a PnvPHB to the chip on P8. + * Implemented here, like for defaults PHBs */ -Object *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp) +PnvChip *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb) { - if (phb->version == 3) { - Pnv8Chip *chip8 = PNV8_CHIP(chip); + Pnv8Chip *chip8 = PNV8_CHIP(chip); - phb->chip = chip; + phb->chip = chip; - chip8->phbs[chip8->num_phbs] = phb; - chip8->num_phbs++; - - return OBJECT(chip); - } - - phb->pec = pnv_phb4_get_pec(chip, PNV_PHB4(phb->backend), errp); - - return OBJECT(phb->pec); + chip8->phbs[chip8->num_phbs] = phb; + chip8->num_phbs++; + return chip; } static void pnv_chip_power8_dt_populate(PnvChip *chip, void *fdt) @@ -2225,7 +2171,7 @@ static void pnv_machine_power9_class_init(ObjectClass *oc, void *data) }; mc->desc = "IBM PowerNV (Non-Virtualized) POWER9"; - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2"); compat_props_add(mc->compat_props, phb_compat, G_N_ELEMENTS(phb_compat)); xfc->match_nvt = pnv_match_nvt; diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c index 410f31bdf8..0bc3ad41c8 100644 --- a/hw/ppc/pnv_core.c +++ b/hw/ppc/pnv_core.c @@ -348,7 +348,7 @@ static const TypeInfo pnv_core_infos[] = { DEFINE_PNV_CORE_TYPE(power8, "power8e_v2.1"), DEFINE_PNV_CORE_TYPE(power8, "power8_v2.0"), DEFINE_PNV_CORE_TYPE(power8, "power8nvl_v1.0"), - DEFINE_PNV_CORE_TYPE(power9, "power9_v2.0"), + DEFINE_PNV_CORE_TYPE(power9, "power9_v2.2"), DEFINE_PNV_CORE_TYPE(power10, "power10_v2.0"), }; diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c index 71143b7692..605d390861 100644 --- a/hw/ppc/pnv_lpc.c +++ b/hw/ppc/pnv_lpc.c @@ -734,10 +734,13 @@ static void pnv_lpc_realize(DeviceState *dev, Error **errp) /* Create MMIO regions for LPC HC and OPB registers */ memory_region_init_io(&lpc->opb_master_regs, OBJECT(dev), &opb_master_ops, lpc, "lpc-opb-master", LPC_OPB_REGS_OPB_SIZE); + lpc->opb_master_regs.disable_reentrancy_guard = true; memory_region_add_subregion(&lpc->opb_mr, LPC_OPB_REGS_OPB_ADDR, &lpc->opb_master_regs); memory_region_init_io(&lpc->lpc_hc_regs, OBJECT(dev), &lpc_hc_ops, lpc, "lpc-hc", LPC_HC_REGS_OPB_SIZE); + /* xscom writes to lpc-hc. As such mark lpc-hc re-entrancy safe */ + lpc->lpc_hc_regs.disable_reentrancy_guard = true; memory_region_add_subregion(&lpc->opb_mr, LPC_HC_REGS_OPB_ADDR, &lpc->lpc_hc_regs); @@ -837,7 +840,7 @@ ISABus *pnv_lpc_isa_create(PnvLpcController *lpc, bool use_cpld, Error **errp) irqs = qemu_allocate_irqs(handler, lpc, ISA_NUM_IRQS); - isa_bus_irqs(isa_bus, irqs); + isa_bus_register_input_irqs(isa_bus, irqs); return isa_bus; } diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c index 2880c81cb1..f969fa3c29 100644 --- a/hw/ppc/ppc440_bamboo.c +++ b/hw/ppc/ppc440_bamboo.c @@ -161,6 +161,7 @@ static void bamboo_init(MachineState *machine) { const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; + MachineClass *mc = MACHINE_GET_CLASS(machine); unsigned int pci_irq_nrs[4] = { 28, 27, 26, 25 }; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *isa = g_new(MemoryRegion, 1); @@ -246,7 +247,7 @@ static void bamboo_init(MachineState *machine) * There are no PCI NICs on the Bamboo board, but there are * PCI slots, so we can pick whatever default model we want. */ - pci_nic_init_nofail(&nd_table[i], pcibus, "e1000", NULL); + pci_nic_init_nofail(&nd_table[i], pcibus, mc->default_nic, NULL); } } @@ -296,6 +297,7 @@ static void bamboo_machine_init(MachineClass *mc) mc->init = bamboo_init; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("440epb"); mc->default_ram_id = "ppc4xx.sdram"; + mc->default_nic = "e1000"; } DEFINE_MACHINE("bamboo", bamboo_machine_init) diff --git a/hw/ppc/ppc4xx_sdram.c b/hw/ppc/ppc4xx_sdram.c index 4501fb28a5..c0c87ff636 100644 --- a/hw/ppc/ppc4xx_sdram.c +++ b/hw/ppc/ppc4xx_sdram.c @@ -33,6 +33,7 @@ #include "qemu/units.h" #include "qapi/error.h" #include "qemu/log.h" +#include "qemu/error-report.h" #include "exec/address-spaces.h" /* get_system_memory() */ #include "hw/irq.h" #include "hw/qdev-properties.h" diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index fcbe4c5837..33bf232f8b 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -212,14 +212,13 @@ static int PPC_NVRAM_set_params (Nvram *nvram, uint16_t NVRAM_size, static int prep_set_cmos_checksum(DeviceState *dev, void *opaque) { uint16_t checksum = *(uint16_t *)opaque; - ISADevice *rtc; if (object_dynamic_cast(OBJECT(dev), TYPE_MC146818_RTC)) { - rtc = ISA_DEVICE(dev); - rtc_set_memory(rtc, 0x2e, checksum & 0xff); - rtc_set_memory(rtc, 0x3e, checksum & 0xff); - rtc_set_memory(rtc, 0x2f, checksum >> 8); - rtc_set_memory(rtc, 0x3f, checksum >> 8); + MC146818RtcState *rtc = MC146818_RTC(dev); + mc146818rtc_set_cmos_data(rtc, 0x2e, checksum & 0xff); + mc146818rtc_set_cmos_data(rtc, 0x3e, checksum & 0xff); + mc146818rtc_set_cmos_data(rtc, 0x2f, checksum >> 8); + mc146818rtc_set_cmos_data(rtc, 0x3f, checksum >> 8); object_property_add_alias(qdev_get_machine(), "rtc-time", OBJECT(rtc), "date"); @@ -230,6 +229,7 @@ static int prep_set_cmos_checksum(DeviceState *dev, void *opaque) static void ibm_40p_init(MachineState *machine) { const char *bios_name = machine->firmware ?: "openbios-ppc"; + MachineClass *mc = MACHINE_GET_CLASS(machine); CPUPPCState *env = NULL; uint16_t cmos_checksum; PowerPCCPU *cpu; @@ -271,9 +271,11 @@ static void ibm_40p_init(MachineState *machine) } /* PCI -> ISA bridge */ - i82378_dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(11, 0), "i82378")); + i82378_dev = DEVICE(pci_new(PCI_DEVFN(11, 0), "i82378")); qdev_connect_gpio_out(i82378_dev, 0, qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT)); + qdev_realize_and_unref(i82378_dev, BUS(pci_bus), &error_fatal); + sysbus_connect_irq(pcihost, 0, qdev_get_gpio_in(i82378_dev, 15)); isa_bus = ISA_BUS(qdev_get_child_bus(i82378_dev, "isa.0")); @@ -324,7 +326,7 @@ static void ibm_40p_init(MachineState *machine) pci_vga_init(pci_bus); for (i = 0; i < nb_nics; i++) { - pci_nic_init_nofail(&nd_table[i], pci_bus, "pcnet", + pci_nic_init_nofail(&nd_table[i], pci_bus, mc->default_nic, i == 0 ? "3" : NULL); } } @@ -428,6 +430,7 @@ static void ibm_40p_machine_init(MachineClass *mc) mc->default_boot_order = "c"; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("604"); mc->default_display = "std"; + mc->default_nic = "pcnet"; } DEFINE_MACHINE("40p", ibm_40p_machine_init) diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c index 4a22ce3761..cf065aae0e 100644 --- a/hw/ppc/sam460ex.c +++ b/hw/ppc/sam460ex.c @@ -389,8 +389,8 @@ static void sam460ex_init(MachineState *machine) /* MAL */ dev = qdev_new(TYPE_PPC4xx_MAL); - qdev_prop_set_uint32(dev, "txc-num", 4); - qdev_prop_set_uint32(dev, "rxc-num", 16); + qdev_prop_set_uint8(dev, "txc-num", 4); + qdev_prop_set_uint8(dev, "rxc-num", 16); ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(dev), cpu, &error_fatal); object_unref(OBJECT(dev)); sbdev = SYS_BUS_DEVICE(dev); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 4921198b9d..dcb7f1c70a 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -2166,7 +2166,7 @@ static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr, break; } } - } while ((index < htabslots) && !qemu_file_rate_limit(f)); + } while ((index < htabslots) && !migration_rate_exceeded(f)); if (index >= htabslots) { assert(index == htabslots); @@ -2237,7 +2237,7 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr, assert(index == htabslots); index = 0; } - } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final)); + } while ((examined < htabslots) && (!migration_rate_exceeded(f) || final)); if (index >= htabslots) { assert(index == htabslots); @@ -4631,7 +4631,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) smc->dr_lmb_enabled = true; smc->update_dt_enabled = true; - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2"); mc->has_hotpluggable_cpus = true; mc->nvdimm_supported = true; smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; @@ -4673,6 +4673,13 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF; + + /* + * This cap specifies whether the AIL 3 mode for + * H_SET_RESOURCE is supported. The default is modified + * by default_caps_with_cpu(). + */ + smc->default_caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_ON; spapr_caps_add_properties(smc); smc->irq = &spapr_irq_dual; smc->dr_phb_enabled = true; @@ -4735,14 +4742,25 @@ static void spapr_machine_latest_class_options(MachineClass *mc) type_init(spapr_machine_register_##suffix) /* - * pseries-8.0 + * pseries-8.1 */ -static void spapr_machine_8_0_class_options(MachineClass *mc) +static void spapr_machine_8_1_class_options(MachineClass *mc) { /* Defaults for the latest behaviour inherited from the base class */ } -DEFINE_SPAPR_MACHINE(8_0, "8.0", true); +DEFINE_SPAPR_MACHINE(8_1, "8.1", true); + +/* + * pseries-8.0 + */ +static void spapr_machine_8_0_class_options(MachineClass *mc) +{ + spapr_machine_8_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len); +} + +DEFINE_SPAPR_MACHINE(8_0, "8.0", false); /* * pseries-7.2 diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index b4283055c1..3fd45a6dec 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -614,6 +614,33 @@ static void cap_rpt_invalidate_apply(SpaprMachineState *spapr, } } +static void cap_ail_mode_3_apply(SpaprMachineState *spapr, + uint8_t val, Error **errp) +{ + ERRP_GUARD(); + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + if (!val) { + return; + } + + if (tcg_enabled()) { + /* AIL-3 is only supported on POWER8 and above CPUs. */ + if (!(pcc->insns_flags2 & PPC2_ISA207S)) { + error_setg(errp, "TCG only supports cap-ail-mode-3 on POWER8 and later CPUs"); + error_append_hint(errp, "Try appending -machine cap-ail-mode-3=off\n"); + return; + } + } else if (kvm_enabled()) { + if (!kvmppc_supports_ail_3()) { + error_setg(errp, "KVM implementation does not support cap-ail-mode-3"); + error_append_hint(errp, "Try appending -machine cap-ail-mode-3=off\n"); + return; + } + } +} + SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = { [SPAPR_CAP_HTM] = { .name = "htm", @@ -731,6 +758,15 @@ SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = { .type = "bool", .apply = cap_rpt_invalidate_apply, }, + [SPAPR_CAP_AIL_MODE_3] = { + .name = "ail-mode-3", + .description = "Alternate Interrupt Location (AIL) mode 3 support", + .index = SPAPR_CAP_AIL_MODE_3, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_ail_mode_3_apply, + }, }; static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr, @@ -750,6 +786,7 @@ static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr, 0, spapr->max_compat_pvr)) { caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF; caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN; + caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_OFF; } if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_2_06_PLUS, diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 8a4861f45a..9b88dd549a 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -390,6 +390,7 @@ static const TypeInfo spapr_cpu_core_type_infos[] = { DEFINE_SPAPR_CPU_CORE_TYPE("power8nvl_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.2"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"), #ifdef CONFIG_KVM diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 925ff523cc..b904755575 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -8,6 +8,7 @@ #include "qemu/module.h" #include "qemu/error-report.h" #include "exec/exec-all.h" +#include "exec/tb-flush.h" #include "helper_regs.h" #include "hw/ppc/ppc.h" #include "hw/ppc/spapr.h" @@ -816,30 +817,32 @@ static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu, } static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu, + SpaprMachineState *spapr, target_ulong mflags, target_ulong value1, target_ulong value2) { - PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); - - if (!(pcc->insns_flags2 & PPC2_ISA207S)) { - return H_P2; - } if (value1) { return H_P3; } + if (value2) { return H_P4; } - if (mflags == 1) { - /* AIL=1 is reserved in POWER8/POWER9/POWER10 */ + /* + * AIL-1 is not architected, and AIL-2 is not supported by QEMU spapr. + * It is supported for faithful emulation of bare metal systems, but for + * compatibility concerns we leave it out of the pseries machine. + */ + if (mflags != 0 && mflags != 3) { return H_UNSUPPORTED_FLAG; } - if (mflags == 2 && (pcc->insns_flags2 & PPC2_ISA310)) { - /* AIL=2 is reserved in POWER10 (ISA v3.1) */ - return H_UNSUPPORTED_FLAG; + if (mflags == 3) { + if (!spapr_get_cap(spapr, SPAPR_CAP_AIL_MODE_3)) { + return H_UNSUPPORTED_FLAG; + } } spapr_set_all_lpcrs(mflags << LPCR_AIL_SHIFT, LPCR_AIL); @@ -858,7 +861,7 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr, ret = h_set_mode_resource_le(cpu, spapr, args[0], args[2], args[3]); break; case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: - ret = h_set_mode_resource_addr_trans_mode(cpu, args[0], + ret = h_set_mode_resource_addr_trans_mode(cpu, spapr, args[0], args[2], args[3]); break; } @@ -1565,8 +1568,6 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, struct kvmppc_hv_guest_state hv_state; struct kvmppc_pt_regs *regs; hwaddr len; - uint64_t cr; - int i; if (spapr->nested_ptcr == 0) { return H_NOT_AVAILABLE; @@ -1615,12 +1616,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu, env->lr = regs->link; env->ctr = regs->ctr; cpu_write_xer(env, regs->xer); - - cr = regs->ccr; - for (i = 7; i >= 0; i--) { - env->crf[i] = cr & 15; - cr >>= 4; - } + ppc_set_cr(env, regs->ccr); env->msr = regs->msr; env->nip = regs->nip; @@ -1697,8 +1693,6 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp) struct kvmppc_hv_guest_state *hvstate; struct kvmppc_pt_regs *regs; hwaddr len; - uint64_t cr; - int i; assert(spapr_cpu->in_nested); @@ -1756,12 +1750,7 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp) regs->link = env->lr; regs->ctr = env->ctr; regs->xer = cpu_read_xer(env); - - cr = 0; - for (i = 0; i < 8; i++) { - cr |= (env->crf[i] & 15) << (4 * (7 - i)); - } - regs->ccr = cr; + regs->ccr = ppc_get_cr(env); if (excp == POWERPC_EXCP_MCHECK || excp == POWERPC_EXCP_RESET || diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c index 04a64cada3..a8688243a6 100644 --- a/hw/ppc/spapr_nvdimm.c +++ b/hw/ppc/spapr_nvdimm.c @@ -496,7 +496,6 @@ static int spapr_nvdimm_flush_post_load(void *opaque, int version_id) { SpaprNVDIMMDevice *s_nvdimm = (SpaprNVDIMMDevice *)opaque; SpaprNVDIMMDeviceFlushState *state; - ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(s_nvdimm)->hostmem); bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL); bool pmem_override = object_property_get_bool(OBJECT(s_nvdimm), @@ -517,7 +516,7 @@ static int spapr_nvdimm_flush_post_load(void *opaque, int version_id) } QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) { - thread_pool_submit_aio(pool, flush_worker_cb, state, + thread_pool_submit_aio(flush_worker_cb, state, spapr_nvdimm_flush_completion_cb, state); } @@ -664,7 +663,6 @@ static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr, PCDIMMDevice *dimm; HostMemoryBackend *backend = NULL; SpaprNVDIMMDeviceFlushState *state; - ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); int fd; if (!drc || !drc->dev || @@ -699,7 +697,7 @@ static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr, state->drcidx = drc_index; - thread_pool_submit_aio(pool, flush_worker_cb, state, + thread_pool_submit_aio(flush_worker_cb, state, spapr_nvdimm_flush_completion_cb, state); continue_token = state->continue_token; diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c index 3f664ea02c..7df21581c2 100644 --- a/hw/ppc/spapr_rtas.c +++ b/hw/ppc/spapr_rtas.c @@ -33,6 +33,7 @@ #include "sysemu/cpus.h" #include "sysemu/hw_accel.h" #include "sysemu/runstate.h" +#include "sysemu/qtest.h" #include "kvm_ppc.h" #include "hw/ppc/spapr.h" @@ -548,6 +549,32 @@ uint64_t qtest_rtas_call(char *cmd, uint32_t nargs, uint64_t args, return H_PARAMETER; } +static bool spapr_qtest_callback(CharBackend *chr, gchar **words) +{ + if (strcmp(words[0], "rtas") == 0) { + uint64_t res, args, ret; + unsigned long nargs, nret; + int rc; + + rc = qemu_strtoul(words[2], NULL, 0, &nargs); + g_assert(rc == 0); + rc = qemu_strtou64(words[3], NULL, 0, &args); + g_assert(rc == 0); + rc = qemu_strtoul(words[4], NULL, 0, &nret); + g_assert(rc == 0); + rc = qemu_strtou64(words[5], NULL, 0, &ret); + g_assert(rc == 0); + res = qtest_rtas_call(words[1], nargs, args, nret, ret); + + qtest_send_prefix(chr); + qtest_sendf(chr, "OK %"PRIu64"\n", res); + + return true; + } + + return false; +} + void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn) { assert((token >= RTAS_TOKEN_BASE) && (token < RTAS_TOKEN_MAX)); @@ -630,6 +657,8 @@ static void core_rtas_register_types(void) rtas_ibm_nmi_register); spapr_rtas_register(RTAS_IBM_NMI_INTERLOCK, "ibm,nmi-interlock", rtas_ibm_nmi_interlock); + + qtest_set_command_cb(spapr_qtest_callback); } type_init(core_rtas_register_types) diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c index 5170a33369..278666317e 100644 --- a/hw/ppc/spapr_softmmu.c +++ b/hw/ppc/spapr_softmmu.c @@ -1,12 +1,14 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "qemu/memalign.h" +#include "qemu/error-report.h" #include "cpu.h" #include "helper_regs.h" #include "hw/ppc/spapr.h" #include "mmu-hash64.h" #include "mmu-book3s-v3.h" + static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex) { /* diff --git a/hw/rdma/Kconfig b/hw/rdma/Kconfig index 8e2211288f..840320bdc0 100644 --- a/hw/rdma/Kconfig +++ b/hw/rdma/Kconfig @@ -1,3 +1,3 @@ config VMW_PVRDMA default y if PCI_DEVICES - depends on PVRDMA && PCI && MSI_NONBROKEN + depends on PVRDMA && MSI_NONBROKEN && VMXNET3_PCI diff --git a/hw/rdma/meson.build b/hw/rdma/meson.build index 7325f40c32..fc7917192f 100644 --- a/hw/rdma/meson.build +++ b/hw/rdma/meson.build @@ -1,10 +1,12 @@ -specific_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files( +softmmu_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files( 'rdma.c', 'rdma_backend.c', - 'rdma_rm.c', 'rdma_utils.c', + 'vmw/pvrdma_qp_ops.c', +)) +specific_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files( + 'rdma_rm.c', 'vmw/pvrdma_cmd.c', 'vmw/pvrdma_dev_ring.c', 'vmw/pvrdma_main.c', - 'vmw/pvrdma_qp_ops.c', )) diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c index cfd85de3e6..038d564433 100644 --- a/hw/rdma/rdma_rm.c +++ b/hw/rdma/rdma_rm.c @@ -23,10 +23,6 @@ #include "rdma_backend.h" #include "rdma_rm.h" -/* Page directory and page tables */ -#define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } -#define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } - void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf) { g_string_append_printf(buf, "\ttx : %" PRId64 "\n", diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig index 4550b3b938..6528ebfa3a 100644 --- a/hw/riscv/Kconfig +++ b/hw/riscv/Kconfig @@ -44,6 +44,7 @@ config RISCV_VIRT select VIRTIO_MMIO select FW_CFG_DMA select PLATFORM_BUS + select ACPI config SHAKTI_C bool diff --git a/hw/riscv/meson.build b/hw/riscv/meson.build index ab6cae57ea..2f7ee81be3 100644 --- a/hw/riscv/meson.build +++ b/hw/riscv/meson.build @@ -9,5 +9,6 @@ riscv_ss.add(when: 'CONFIG_SIFIVE_E', if_true: files('sifive_e.c')) riscv_ss.add(when: 'CONFIG_SIFIVE_U', if_true: files('sifive_u.c')) riscv_ss.add(when: 'CONFIG_SPIKE', if_true: files('spike.c')) riscv_ss.add(when: 'CONFIG_MICROCHIP_PFSOC', if_true: files('microchip_pfsoc.c')) +riscv_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c')) hw_arch += {'riscv': riscv_ss} diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c index b06944d382..bc678766e7 100644 --- a/hw/riscv/opentitan.c +++ b/hw/riscv/opentitan.c @@ -22,6 +22,7 @@ #include "qemu/cutils.h" #include "hw/riscv/opentitan.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "hw/boards.h" #include "hw/misc/unimp.h" #include "hw/riscv/boot.h" diff --git a/hw/riscv/shakti_c.c b/hw/riscv/shakti_c.c index e43cc9445c..12ea74b032 100644 --- a/hw/riscv/shakti_c.c +++ b/hw/riscv/shakti_c.c @@ -20,6 +20,7 @@ #include "hw/boards.h" #include "hw/riscv/shakti_c.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "hw/intc/sifive_plic.h" #include "hw/intc/riscv_aclint.h" #include "sysemu/sysemu.h" diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c index ad3bb35b34..35a335b8d0 100644 --- a/hw/riscv/sifive_u.c +++ b/hw/riscv/sifive_u.c @@ -99,7 +99,7 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, MachineState *ms = MACHINE(s); uint64_t mem_size = ms->ram_size; void *fdt; - int cpu, fdt_size; + int cpu; uint32_t *cells; char *nodename; uint32_t plic_phandle, prci_phandle, gpio_phandle, phandle = 1; @@ -112,18 +112,10 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, "sifive,plic-1.0.0", "riscv,plic0" }; - if (ms->dtb) { - fdt = ms->fdt = load_device_tree(ms->dtb, &fdt_size); - if (!fdt) { - error_report("load_device_tree() failed"); - exit(1); - } - } else { - fdt = ms->fdt = create_device_tree(&fdt_size); - if (!fdt) { - error_report("create_device_tree() failed"); - exit(1); - } + fdt = ms->fdt = create_device_tree(&s->fdt_size); + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); } qemu_fdt_setprop_string(fdt, "/", "model", "SiFive HiFive Unleashed A00"); @@ -560,8 +552,16 @@ static void sifive_u_machine_init(MachineState *machine) qdev_connect_gpio_out(DEVICE(&(s->soc.gpio)), 10, qemu_allocate_irq(sifive_u_machine_reset, NULL, 0)); - /* create device tree */ - create_fdt(s, memmap, riscv_is_32bit(&s->soc.u_cpus)); + /* load/create device tree */ + if (machine->dtb) { + machine->fdt = load_device_tree(machine->dtb, &s->fdt_size); + if (!machine->fdt) { + error_report("load_device_tree() failed"); + exit(1); + } + } else { + create_fdt(s, memmap, riscv_is_32bit(&s->soc.u_cpus)); + } if (s->start_in_flash) { /* diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index a584d5b3a2..2c5546560a 100644 --- a/hw/riscv/spike.c +++ b/hw/riscv/spike.c @@ -332,6 +332,11 @@ static void spike_board_init(MachineState *machine) htif_custom_base); } +static void spike_set_signature(Object *obj, const char *val, Error **errp) +{ + sig_file = g_strdup(val); +} + static void spike_machine_instance_init(Object *obj) { } @@ -350,6 +355,14 @@ static void spike_machine_class_init(ObjectClass *oc, void *data) mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id; mc->numa_mem_supported = true; mc->default_ram_id = "riscv.spike.ram"; + object_class_property_add_str(oc, "signature", NULL, spike_set_signature); + object_class_property_set_description(oc, "signature", + "File to write ACT test signature"); + object_class_property_add_uint8_ptr(oc, "signature-granularity", + &line_size, OBJ_PROP_FLAG_WRITE); + object_class_property_set_description(oc, "signature-granularity", + "Size of each line in ACT signature " + "file"); } static const TypeInfo spike_machine_typeinfo = { diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c new file mode 100644 index 0000000000..7331248f59 --- /dev/null +++ b/hw/riscv/virt-acpi-build.c @@ -0,0 +1,417 @@ +/* + * Support for generating ACPI tables and passing them to Guests + * + * RISC-V virt ACPI generation + * + * Copyright (C) 2008-2010 Kevin O'Connor + * Copyright (C) 2006 Fabrice Bellard + * Copyright (C) 2013 Red Hat Inc + * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD. + * Copyright (C) 2021-2023 Ventana Micro Systems Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/acpi/acpi-defs.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/utils.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "sysemu/reset.h" +#include "migration/vmstate.h" +#include "hw/riscv/virt.h" +#include "hw/riscv/numa.h" +#include "hw/intc/riscv_aclint.h" + +#define ACPI_BUILD_TABLE_SIZE 0x20000 + +typedef struct AcpiBuildState { + /* Copy of table in RAM (for patching) */ + MemoryRegion *table_mr; + MemoryRegion *rsdp_mr; + MemoryRegion *linker_mr; + /* Is table patched? */ + bool patched; +} AcpiBuildState; + +static void acpi_align_size(GArray *blob, unsigned align) +{ + /* + * Align size to multiple of given size. This reduces the chance + * we need to change size in the future (breaking cross version migration). + */ + g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); +} + +static void riscv_acpi_madt_add_rintc(uint32_t uid, + const CPUArchIdList *arch_ids, + GArray *entry) +{ + uint64_t hart_id = arch_ids->cpus[uid].arch_id; + + build_append_int_noprefix(entry, 0x18, 1); /* Type */ + build_append_int_noprefix(entry, 20, 1); /* Length */ + build_append_int_noprefix(entry, 1, 1); /* Version */ + build_append_int_noprefix(entry, 0, 1); /* Reserved */ + build_append_int_noprefix(entry, 0x1, 4); /* Flags */ + build_append_int_noprefix(entry, hart_id, 8); /* Hart ID */ + build_append_int_noprefix(entry, uid, 4); /* ACPI Processor UID */ +} + +static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s) +{ + MachineClass *mc = MACHINE_GET_CLASS(s); + MachineState *ms = MACHINE(s); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); + + for (int i = 0; i < arch_ids->len; i++) { + Aml *dev; + GArray *madt_buf = g_array_new(0, 1, 1); + + dev = aml_device("C%.03X", i); + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007"))); + aml_append(dev, aml_name_decl("_UID", + aml_int(arch_ids->cpus[i].arch_id))); + + /* build _MAT object */ + riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf); + aml_append(dev, aml_name_decl("_MAT", + aml_buffer(madt_buf->len, + (uint8_t *)madt_buf->data))); + g_array_free(madt_buf, true); + + aml_append(scope, dev); + } +} + +static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap) +{ + Aml *dev = aml_device("FWCF"); + aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); + + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base, + fw_cfg_memmap->size, AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} + +/* RHCT Node[N] starts at offset 56 */ +#define RHCT_NODE_ARRAY_OFFSET 56 + +/* + * ACPI spec, Revision 6.5+ + * 5.2.36 RISC-V Hart Capabilities Table (RHCT) + * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/16 + * https://drive.google.com/file/d/1nP3nFiH4jkPMp6COOxP6123DCZKR-tia/view + */ +static void build_rhct(GArray *table_data, + BIOSLinker *linker, + RISCVVirtState *s) +{ + MachineClass *mc = MACHINE_GET_CLASS(s); + MachineState *ms = MACHINE(s); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); + size_t len, aligned_len; + uint32_t isa_offset, num_rhct_nodes; + RISCVCPU *cpu; + char *isa; + + AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id, + .oem_table_id = s->oem_table_id }; + + acpi_table_begin(&table, table_data); + + build_append_int_noprefix(table_data, 0x0, 4); /* Reserved */ + + /* Time Base Frequency */ + build_append_int_noprefix(table_data, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, 8); + + /* ISA + N hart info */ + num_rhct_nodes = 1 + ms->smp.cpus; + + /* Number of RHCT nodes*/ + build_append_int_noprefix(table_data, num_rhct_nodes, 4); + + /* Offset to the RHCT node array */ + build_append_int_noprefix(table_data, RHCT_NODE_ARRAY_OFFSET, 4); + + /* ISA String Node */ + isa_offset = table_data->len - table.table_offset; + build_append_int_noprefix(table_data, 0, 2); /* Type 0 */ + + cpu = &s->soc[0].harts[0]; + isa = riscv_isa_string(cpu); + len = 8 + strlen(isa) + 1; + aligned_len = (len % 2) ? (len + 1) : len; + + build_append_int_noprefix(table_data, aligned_len, 2); /* Length */ + build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ + + /* ISA string length including NUL */ + build_append_int_noprefix(table_data, strlen(isa) + 1, 2); + g_array_append_vals(table_data, isa, strlen(isa) + 1); /* ISA string */ + + if (aligned_len != len) { + build_append_int_noprefix(table_data, 0x0, 1); /* Optional Padding */ + } + + /* Hart Info Node */ + for (int i = 0; i < arch_ids->len; i++) { + build_append_int_noprefix(table_data, 0xFFFF, 2); /* Type */ + build_append_int_noprefix(table_data, 16, 2); /* Length */ + build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ + build_append_int_noprefix(table_data, 1, 2); /* Number of offsets */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + build_append_int_noprefix(table_data, isa_offset, 4); /* Offsets[0] */ + } + + acpi_table_end(linker, &table); +} + +/* FADT */ +static void build_fadt_rev6(GArray *table_data, + BIOSLinker *linker, + RISCVVirtState *s, + unsigned dsdt_tbl_offset) +{ + AcpiFadtData fadt = { + .rev = 6, + .minor_ver = 5, + .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI, + .xdsdt_tbl_offset = &dsdt_tbl_offset, + }; + + build_fadt(table_data, linker, &fadt, s->oem_id, s->oem_table_id); +} + +/* DSDT */ +static void build_dsdt(GArray *table_data, + BIOSLinker *linker, + RISCVVirtState *s) +{ + Aml *scope, *dsdt; + const MemMapEntry *memmap = s->memmap; + AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = s->oem_id, + .oem_table_id = s->oem_table_id }; + + + acpi_table_begin(&table, table_data); + dsdt = init_aml_allocator(); + + /* + * When booting the VM with UEFI, UEFI takes ownership of the RTC hardware. + * While UEFI can use libfdt to disable the RTC device node in the DTB that + * it passes to the OS, it cannot modify AML. Therefore, we won't generate + * the RTC ACPI device at all when using UEFI. + */ + scope = aml_scope("\\_SB"); + acpi_dsdt_add_cpus(scope, s); + + acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]); + + aml_append(dsdt, scope); + + /* copy AML table into ACPI tables blob and patch header there */ + g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); + + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +/* + * ACPI spec, Revision 6.5+ + * 5.2.12 Multiple APIC Description Table (MADT) + * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/15 + * https://drive.google.com/file/d/1R6k4MshhN3WTT-hwqAquu5nX6xSEqK2l/view + */ +static void build_madt(GArray *table_data, + BIOSLinker *linker, + RISCVVirtState *s) +{ + MachineClass *mc = MACHINE_GET_CLASS(s); + MachineState *ms = MACHINE(s); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); + + AcpiTable table = { .sig = "APIC", .rev = 6, .oem_id = s->oem_id, + .oem_table_id = s->oem_table_id }; + + acpi_table_begin(&table, table_data); + /* Local Interrupt Controller Address */ + build_append_int_noprefix(table_data, 0, 4); + build_append_int_noprefix(table_data, 0, 4); /* MADT Flags */ + + /* RISC-V Local INTC structures per HART */ + for (int i = 0; i < arch_ids->len; i++) { + riscv_acpi_madt_add_rintc(i, arch_ids, table_data); + } + + acpi_table_end(linker, &table); +} + +static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables) +{ + GArray *table_offsets; + unsigned dsdt, xsdt; + GArray *tables_blob = tables->table_data; + + table_offsets = g_array_new(false, true, + sizeof(uint32_t)); + + bios_linker_loader_alloc(tables->linker, + ACPI_BUILD_TABLE_FILE, tables_blob, + 64, false); + + /* DSDT is pointed to by FADT */ + dsdt = tables_blob->len; + build_dsdt(tables_blob, tables->linker, s); + + /* FADT and others pointed to by XSDT */ + acpi_add_table(table_offsets, tables_blob); + build_fadt_rev6(tables_blob, tables->linker, s, dsdt); + + acpi_add_table(table_offsets, tables_blob); + build_madt(tables_blob, tables->linker, s); + + acpi_add_table(table_offsets, tables_blob); + build_rhct(tables_blob, tables->linker, s); + + /* XSDT is pointed to by RSDP */ + xsdt = tables_blob->len; + build_xsdt(tables_blob, tables->linker, table_offsets, s->oem_id, + s->oem_table_id); + + /* RSDP is in FSEG memory, so allocate it separately */ + { + AcpiRsdpData rsdp_data = { + .revision = 2, + .oem_id = s->oem_id, + .xsdt_tbl_offset = &xsdt, + .rsdt_tbl_offset = NULL, + }; + build_rsdp(tables->rsdp, tables->linker, &rsdp_data); + } + + /* + * The align size is 128, warn if 64k is not enough therefore + * the align size could be resized. + */ + if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { + warn_report("ACPI table size %u exceeds %d bytes," + " migration may not work", + tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); + error_printf("Try removing some objects."); + } + + acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE); + + /* Clean up memory that's no longer used */ + g_array_free(table_offsets, true); +} + +static void acpi_ram_update(MemoryRegion *mr, GArray *data) +{ + uint32_t size = acpi_data_len(data); + + /* + * Make sure RAM size is correct - in case it got changed + * e.g. by migration + */ + memory_region_ram_resize(mr, size, &error_abort); + + memcpy(memory_region_get_ram_ptr(mr), data->data, size); + memory_region_set_dirty(mr, 0, size); +} + +static void virt_acpi_build_update(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + AcpiBuildTables tables; + + /* No state to update or already patched? Nothing to do. */ + if (!build_state || build_state->patched) { + return; + } + + build_state->patched = true; + + acpi_build_tables_init(&tables); + + virt_acpi_build(RISCV_VIRT_MACHINE(qdev_get_machine()), &tables); + + acpi_ram_update(build_state->table_mr, tables.table_data); + acpi_ram_update(build_state->rsdp_mr, tables.rsdp); + acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob); + + acpi_build_tables_cleanup(&tables, true); +} + +static void virt_acpi_build_reset(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + build_state->patched = false; +} + +static const VMStateDescription vmstate_virt_acpi_build = { + .name = "virt_acpi_build", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(patched, AcpiBuildState), + VMSTATE_END_OF_LIST() + }, +}; + +void virt_acpi_setup(RISCVVirtState *s) +{ + AcpiBuildTables tables; + AcpiBuildState *build_state; + + build_state = g_malloc0(sizeof *build_state); + + acpi_build_tables_init(&tables); + virt_acpi_build(s, &tables); + + /* Now expose it all to Guest */ + build_state->table_mr = acpi_add_rom_blob(virt_acpi_build_update, + build_state, tables.table_data, + ACPI_BUILD_TABLE_FILE); + assert(build_state->table_mr != NULL); + + build_state->linker_mr = acpi_add_rom_blob(virt_acpi_build_update, + build_state, + tables.linker->cmd_blob, + ACPI_BUILD_LOADER_FILE); + + build_state->rsdp_mr = acpi_add_rom_blob(virt_acpi_build_update, + build_state, tables.rsdp, + ACPI_BUILD_RSDP_FILE); + + qemu_register_reset(virt_acpi_build_reset, build_state); + virt_acpi_build_reset(build_state); + vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state); + + /* + * Clean up tables but don't free the memory: we track it + * in build_state. + */ + acpi_build_tables_cleanup(&tables, false); +} diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index 86c4adc0c9..4e3efbee16 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -49,6 +49,8 @@ #include "hw/pci/pci.h" #include "hw/pci-host/gpex.h" #include "hw/display/ramfb.h" +#include "hw/acpi/aml-build.h" +#include "qapi/qapi-visit-common.h" /* * The virt machine physical address space used by some of the devices @@ -228,26 +230,41 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, int cpu; uint32_t cpu_phandle; MachineState *ms = MACHINE(s); - char *name, *cpu_name, *core_name, *intc_name; + char *name, *cpu_name, *core_name, *intc_name, *sv_name; bool is_32_bit = riscv_is_32bit(&s->soc[0]); + uint8_t satp_mode_max; for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) { + RISCVCPU *cpu_ptr = &s->soc[socket].harts[cpu]; + cpu_phandle = (*phandle)++; cpu_name = g_strdup_printf("/cpus/cpu@%d", s->soc[socket].hartid_base + cpu); qemu_fdt_add_subnode(ms->fdt, cpu_name); - if (riscv_feature(&s->soc[socket].harts[cpu].env, - RISCV_FEATURE_MMU)) { - qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", - (is_32_bit) ? "riscv,sv32" : "riscv,sv48"); - } else { - qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", - "riscv,none"); - } - name = riscv_isa_string(&s->soc[socket].harts[cpu]); + + satp_mode_max = satp_mode_max_from_map( + s->soc[socket].harts[cpu].cfg.satp_mode.map); + sv_name = g_strdup_printf("riscv,%s", + satp_mode_str(satp_mode_max, is_32_bit)); + qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", sv_name); + g_free(sv_name); + + + name = riscv_isa_string(cpu_ptr); qemu_fdt_setprop_string(ms->fdt, cpu_name, "riscv,isa", name); g_free(name); + + if (cpu_ptr->cfg.ext_icbom) { + qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbom-block-size", + cpu_ptr->cfg.cbom_blocksize); + } + + if (cpu_ptr->cfg.ext_icboz) { + qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cboz-block-size", + cpu_ptr->cfg.cboz_blocksize); + } + qemu_fdt_setprop_string(ms->fdt, cpu_name, "compatible", "riscv"); qemu_fdt_setprop_string(ms->fdt, cpu_name, "status", "okay"); qemu_fdt_setprop_cell(ms->fdt, cpu_name, "reg", @@ -1008,18 +1025,10 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap) uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1; uint8_t rng_seed[32]; - if (ms->dtb) { - ms->fdt = load_device_tree(ms->dtb, &s->fdt_size); - if (!ms->fdt) { - error_report("load_device_tree() failed"); - exit(1); - } - } else { - ms->fdt = create_device_tree(&s->fdt_size); - if (!ms->fdt) { - error_report("create_device_tree() failed"); - exit(1); - } + ms->fdt = create_device_tree(&s->fdt_size); + if (!ms->fdt) { + error_report("create_device_tree() failed"); + exit(1); } qemu_fdt_setprop_string(ms->fdt, "/", "model", "riscv-virtio,qemu"); @@ -1314,6 +1323,10 @@ static void virt_machine_done(Notifier *notifier, void *data) if (kvm_enabled()) { riscv_setup_direct_kernel(kernel_entry, fdt_load_addr); } + + if (virt_is_acpi_enabled(s)) { + virt_acpi_setup(s); + } } static void virt_machine_init(MachineState *machine) @@ -1449,6 +1462,8 @@ static void virt_machine_init(MachineState *machine) ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size); } + s->memmap = virt_memmap; + /* register system main memory (actual RAM) */ memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base, machine->ram); @@ -1504,8 +1519,16 @@ static void virt_machine_init(MachineState *machine) } virt_flash_map(s, system_memory); - /* create device tree */ - create_fdt(s, memmap); + /* load/create device tree */ + if (machine->dtb) { + machine->fdt = load_device_tree(machine->dtb, &s->fdt_size); + if (!machine->fdt) { + error_report("load_device_tree() failed"); + exit(1); + } + } else { + create_fdt(s, memmap); + } s->machine_done.notify = virt_machine_done; qemu_add_machine_init_done_notifier(&s->machine_done); @@ -1513,6 +1536,11 @@ static void virt_machine_init(MachineState *machine) static void virt_machine_instance_init(Object *obj) { + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + + s->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); + s->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); + s->acpi = ON_OFF_AUTO_AUTO; } static char *virt_get_aia_guests(Object *obj, Error **errp) @@ -1587,6 +1615,28 @@ static void virt_set_aclint(Object *obj, bool value, Error **errp) s->have_aclint = value; } +bool virt_is_acpi_enabled(RISCVVirtState *s) +{ + return s->acpi != ON_OFF_AUTO_OFF; +} + +static void virt_get_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + OnOffAuto acpi = s->acpi; + + visit_type_OnOffAuto(v, name, &acpi, errp); +} + +static void virt_set_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &s->acpi, errp); +} + static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, DeviceState *dev) { @@ -1658,6 +1708,11 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) sprintf(str, "Set number of guest MMIO pages for AIA IMSIC. Valid value " "should be between 0 and %d.", VIRT_IRQCHIP_MAX_GUESTS); object_class_property_set_description(oc, "aia-guests", str); + object_class_property_add(oc, "acpi", "OnOffAuto", + virt_get_acpi, virt_set_acpi, + NULL, NULL); + object_class_property_set_description(oc, "acpi", + "Enable ACPI"); } static const TypeInfo virt_machine_typeinfo = { diff --git a/hw/rtc/m48t59-isa.c b/hw/rtc/m48t59-isa.c index e61f7ec370..5bb46f2383 100644 --- a/hw/rtc/m48t59-isa.c +++ b/hw/rtc/m48t59-isa.c @@ -47,7 +47,7 @@ struct M48txxISAState { }; struct M48txxISADeviceClass { - ISADeviceClass parent_class; + DeviceClass parent_class; M48txxInfo info; }; diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c index 74345d9d90..ec3e56e84f 100644 --- a/hw/rtc/m48t59.c +++ b/hw/rtc/m48t59.c @@ -93,9 +93,9 @@ static void alarm_cb (void *opaque) qemu_set_irq(NVRAM->IRQ, 1); if ((NVRAM->buffer[0x1FF5] & 0x80) == 0 && - (NVRAM->buffer[0x1FF4] & 0x80) == 0 && - (NVRAM->buffer[0x1FF3] & 0x80) == 0 && - (NVRAM->buffer[0x1FF2] & 0x80) == 0) { + (NVRAM->buffer[0x1FF4] & 0x80) == 0 && + (NVRAM->buffer[0x1FF3] & 0x80) == 0 && + (NVRAM->buffer[0x1FF2] & 0x80) == 0) { /* Repeat once a month */ qemu_get_timedate(&tm, NVRAM->time_offset); tm.tm_mon++; @@ -105,21 +105,21 @@ static void alarm_cb (void *opaque) } next_time = qemu_timedate_diff(&tm) - NVRAM->time_offset; } else if ((NVRAM->buffer[0x1FF5] & 0x80) != 0 && - (NVRAM->buffer[0x1FF4] & 0x80) == 0 && - (NVRAM->buffer[0x1FF3] & 0x80) == 0 && - (NVRAM->buffer[0x1FF2] & 0x80) == 0) { + (NVRAM->buffer[0x1FF4] & 0x80) == 0 && + (NVRAM->buffer[0x1FF3] & 0x80) == 0 && + (NVRAM->buffer[0x1FF2] & 0x80) == 0) { /* Repeat once a day */ next_time = 24 * 60 * 60; } else if ((NVRAM->buffer[0x1FF5] & 0x80) != 0 && - (NVRAM->buffer[0x1FF4] & 0x80) != 0 && - (NVRAM->buffer[0x1FF3] & 0x80) == 0 && - (NVRAM->buffer[0x1FF2] & 0x80) == 0) { + (NVRAM->buffer[0x1FF4] & 0x80) != 0 && + (NVRAM->buffer[0x1FF3] & 0x80) == 0 && + (NVRAM->buffer[0x1FF2] & 0x80) == 0) { /* Repeat once an hour */ next_time = 60 * 60; } else if ((NVRAM->buffer[0x1FF5] & 0x80) != 0 && - (NVRAM->buffer[0x1FF4] & 0x80) != 0 && - (NVRAM->buffer[0x1FF3] & 0x80) != 0 && - (NVRAM->buffer[0x1FF2] & 0x80) == 0) { + (NVRAM->buffer[0x1FF4] & 0x80) != 0 && + (NVRAM->buffer[0x1FF3] & 0x80) != 0 && + (NVRAM->buffer[0x1FF2] & 0x80) == 0) { /* Repeat once a minute */ next_time = 60; } else { @@ -161,13 +161,13 @@ static void watchdog_cb (void *opaque) NVRAM->buffer[0x1FF0] |= 0x80; if (NVRAM->buffer[0x1FF7] & 0x80) { - NVRAM->buffer[0x1FF7] = 0x00; - NVRAM->buffer[0x1FFC] &= ~0x40; + NVRAM->buffer[0x1FF7] = 0x00; + NVRAM->buffer[0x1FFC] &= ~0x40; /* May it be a hw CPU Reset instead ? */ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); } else { - qemu_set_irq(NVRAM->IRQ, 1); - qemu_set_irq(NVRAM->IRQ, 0); + qemu_set_irq(NVRAM->IRQ, 1); + qemu_set_irq(NVRAM->IRQ, 0); } } @@ -262,80 +262,80 @@ void m48t59_write(M48t59State *NVRAM, uint32_t addr, uint32_t val) case 0x1FF9: case 0x07F9: /* seconds (BCD) */ - tmp = from_bcd(val & 0x7F); - if (tmp >= 0 && tmp <= 59) { - get_time(NVRAM, &tm); - tm.tm_sec = tmp; - set_time(NVRAM, &tm); - } + tmp = from_bcd(val & 0x7F); + if (tmp >= 0 && tmp <= 59) { + get_time(NVRAM, &tm); + tm.tm_sec = tmp; + set_time(NVRAM, &tm); + } if ((val & 0x80) ^ (NVRAM->buffer[addr] & 0x80)) { - if (val & 0x80) { - NVRAM->stop_time = time(NULL); - } else { - NVRAM->time_offset += NVRAM->stop_time - time(NULL); - NVRAM->stop_time = 0; - } - } + if (val & 0x80) { + NVRAM->stop_time = time(NULL); + } else { + NVRAM->time_offset += NVRAM->stop_time - time(NULL); + NVRAM->stop_time = 0; + } + } NVRAM->buffer[addr] = val & 0x80; break; case 0x1FFA: case 0x07FA: /* minutes (BCD) */ - tmp = from_bcd(val & 0x7F); - if (tmp >= 0 && tmp <= 59) { - get_time(NVRAM, &tm); - tm.tm_min = tmp; - set_time(NVRAM, &tm); - } + tmp = from_bcd(val & 0x7F); + if (tmp >= 0 && tmp <= 59) { + get_time(NVRAM, &tm); + tm.tm_min = tmp; + set_time(NVRAM, &tm); + } break; case 0x1FFB: case 0x07FB: /* hours (BCD) */ - tmp = from_bcd(val & 0x3F); - if (tmp >= 0 && tmp <= 23) { - get_time(NVRAM, &tm); - tm.tm_hour = tmp; - set_time(NVRAM, &tm); - } + tmp = from_bcd(val & 0x3F); + if (tmp >= 0 && tmp <= 23) { + get_time(NVRAM, &tm); + tm.tm_hour = tmp; + set_time(NVRAM, &tm); + } break; case 0x1FFC: case 0x07FC: /* day of the week / century */ - tmp = from_bcd(val & 0x07); - get_time(NVRAM, &tm); - tm.tm_wday = tmp; - set_time(NVRAM, &tm); + tmp = from_bcd(val & 0x07); + get_time(NVRAM, &tm); + tm.tm_wday = tmp; + set_time(NVRAM, &tm); NVRAM->buffer[addr] = val & 0x40; break; case 0x1FFD: case 0x07FD: /* date (BCD) */ - tmp = from_bcd(val & 0x3F); - if (tmp != 0) { - get_time(NVRAM, &tm); - tm.tm_mday = tmp; - set_time(NVRAM, &tm); - } + tmp = from_bcd(val & 0x3F); + if (tmp != 0) { + get_time(NVRAM, &tm); + tm.tm_mday = tmp; + set_time(NVRAM, &tm); + } break; case 0x1FFE: case 0x07FE: /* month */ - tmp = from_bcd(val & 0x1F); - if (tmp >= 1 && tmp <= 12) { - get_time(NVRAM, &tm); - tm.tm_mon = tmp - 1; - set_time(NVRAM, &tm); - } + tmp = from_bcd(val & 0x1F); + if (tmp >= 1 && tmp <= 12) { + get_time(NVRAM, &tm); + tm.tm_mon = tmp - 1; + set_time(NVRAM, &tm); + } break; case 0x1FFF: case 0x07FF: /* year */ - tmp = from_bcd(val); - if (tmp >= 0 && tmp <= 99) { - get_time(NVRAM, &tm); + tmp = from_bcd(val); + if (tmp >= 0 && tmp <= 99) { + get_time(NVRAM, &tm); tm.tm_year = from_bcd(val) + NVRAM->base_year - 1900; - set_time(NVRAM, &tm); - } + set_time(NVRAM, &tm); + } break; default: /* Check lock registers state */ @@ -346,7 +346,7 @@ void m48t59_write(M48t59State *NVRAM, uint32_t addr, uint32_t val) do_write: if (addr < NVRAM->size) { NVRAM->buffer[addr] = val & 0xFF; - } + } break; } } @@ -367,34 +367,34 @@ uint32_t m48t59_read(M48t59State *NVRAM, uint32_t addr) switch (addr) { case 0x1FF0: /* flags register */ - goto do_read; + goto do_read; case 0x1FF1: /* unused */ - retval = 0; + retval = 0; break; case 0x1FF2: /* alarm seconds */ - goto do_read; + goto do_read; case 0x1FF3: /* alarm minutes */ - goto do_read; + goto do_read; case 0x1FF4: /* alarm hours */ - goto do_read; + goto do_read; case 0x1FF5: /* alarm date */ - goto do_read; + goto do_read; case 0x1FF6: /* interrupts */ - goto do_read; + goto do_read; case 0x1FF7: - /* A read resets the watchdog */ - set_up_watchdog(NVRAM, NVRAM->buffer[0x1FF7]); - goto do_read; + /* A read resets the watchdog */ + set_up_watchdog(NVRAM, NVRAM->buffer[0x1FF7]); + goto do_read; case 0x1FF8: case 0x07F8: /* control */ - goto do_read; + goto do_read; case 0x1FF9: case 0x07F9: /* seconds (BCD) */ @@ -446,7 +446,7 @@ uint32_t m48t59_read(M48t59State *NVRAM, uint32_t addr) do_read: if (addr < NVRAM->size) { retval = NVRAM->buffer[addr]; - } + } break; } trace_m48txx_nvram_mem_read(addr, retval); diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c index ba612a151d..c27c362db9 100644 --- a/hw/rtc/mc146818rtc.c +++ b/hw/rtc/mc146818rtc.c @@ -71,19 +71,19 @@ #define RTC_ISA_BASE 0x70 -static void rtc_set_time(RTCState *s); -static void rtc_update_time(RTCState *s); -static void rtc_set_cmos(RTCState *s, const struct tm *tm); -static inline int rtc_from_bcd(RTCState *s, int a); -static uint64_t get_next_alarm(RTCState *s); +static void rtc_set_time(MC146818RtcState *s); +static void rtc_update_time(MC146818RtcState *s); +static void rtc_set_cmos(MC146818RtcState *s, const struct tm *tm); +static inline int rtc_from_bcd(MC146818RtcState *s, int a); +static uint64_t get_next_alarm(MC146818RtcState *s); -static inline bool rtc_running(RTCState *s) +static inline bool rtc_running(MC146818RtcState *s) { return (!(s->cmos_data[RTC_REG_B] & REG_B_SET) && (s->cmos_data[RTC_REG_A] & 0x70) <= 0x20); } -static uint64_t get_guest_rtc_ns(RTCState *s) +static uint64_t get_guest_rtc_ns(MC146818RtcState *s) { uint64_t guest_clock = qemu_clock_get_ns(rtc_clock); @@ -91,7 +91,7 @@ static uint64_t get_guest_rtc_ns(RTCState *s) guest_clock - s->last_update + s->offset; } -static void rtc_coalesced_timer_update(RTCState *s) +static void rtc_coalesced_timer_update(MC146818RtcState *s) { if (s->irq_coalesced == 0) { timer_del(s->coalesced_timer); @@ -104,19 +104,19 @@ static void rtc_coalesced_timer_update(RTCState *s) } } -static QLIST_HEAD(, RTCState) rtc_devices = +static QLIST_HEAD(, MC146818RtcState) rtc_devices = QLIST_HEAD_INITIALIZER(rtc_devices); void qmp_rtc_reset_reinjection(Error **errp) { - RTCState *s; + MC146818RtcState *s; QLIST_FOREACH(s, &rtc_devices, link) { s->irq_coalesced = 0; } } -static bool rtc_policy_slew_deliver_irq(RTCState *s) +static bool rtc_policy_slew_deliver_irq(MC146818RtcState *s) { kvm_reset_irq_delivered(); qemu_irq_raise(s->irq); @@ -125,7 +125,7 @@ static bool rtc_policy_slew_deliver_irq(RTCState *s) static void rtc_coalesced_timer(void *opaque) { - RTCState *s = opaque; + MC146818RtcState *s = opaque; if (s->irq_coalesced != 0) { s->cmos_data[RTC_REG_C] |= 0xc0; @@ -140,7 +140,7 @@ static void rtc_coalesced_timer(void *opaque) rtc_coalesced_timer_update(s); } -static uint32_t rtc_periodic_clock_ticks(RTCState *s) +static uint32_t rtc_periodic_clock_ticks(MC146818RtcState *s) { int period_code; @@ -157,8 +157,8 @@ static uint32_t rtc_periodic_clock_ticks(RTCState *s) * handle periodic timer. @old_period indicates the periodic timer update * is just due to period adjustment. */ -static void -periodic_timer_update(RTCState *s, int64_t current_time, uint32_t old_period, bool period_change) +static void periodic_timer_update(MC146818RtcState *s, int64_t current_time, + uint32_t old_period, bool period_change) { uint32_t period; int64_t cur_clock, next_irq_clock, lost_clock = 0; @@ -234,7 +234,7 @@ periodic_timer_update(RTCState *s, int64_t current_time, uint32_t old_period, bo static void rtc_periodic_timer(void *opaque) { - RTCState *s = opaque; + MC146818RtcState *s = opaque; periodic_timer_update(s, s->next_periodic_time, s->period, false); s->cmos_data[RTC_REG_C] |= REG_C_PF; @@ -255,7 +255,7 @@ static void rtc_periodic_timer(void *opaque) } /* handle update-ended timer */ -static void check_update_timer(RTCState *s) +static void check_update_timer(MC146818RtcState *s) { uint64_t next_update_time; uint64_t guest_nsec; @@ -306,7 +306,7 @@ static void check_update_timer(RTCState *s) } } -static inline uint8_t convert_hour(RTCState *s, uint8_t hour) +static inline uint8_t convert_hour(MC146818RtcState *s, uint8_t hour) { if (!(s->cmos_data[RTC_REG_B] & REG_B_24H)) { hour %= 12; @@ -317,7 +317,7 @@ static inline uint8_t convert_hour(RTCState *s, uint8_t hour) return hour; } -static uint64_t get_next_alarm(RTCState *s) +static uint64_t get_next_alarm(MC146818RtcState *s) { int32_t alarm_sec, alarm_min, alarm_hour, cur_hour, cur_min, cur_sec; int32_t hour, min, sec; @@ -410,7 +410,7 @@ static uint64_t get_next_alarm(RTCState *s) static void rtc_update_timer(void *opaque) { - RTCState *s = opaque; + MC146818RtcState *s = opaque; int32_t irqs = REG_C_UF; int32_t new_irqs; @@ -439,7 +439,7 @@ static void rtc_update_timer(void *opaque) static void cmos_ioport_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { - RTCState *s = opaque; + MC146818RtcState *s = opaque; uint32_t old_period; bool update_periodic_timer; @@ -557,7 +557,7 @@ static void cmos_ioport_write(void *opaque, hwaddr addr, } } -static inline int rtc_to_bcd(RTCState *s, int a) +static inline int rtc_to_bcd(MC146818RtcState *s, int a) { if (s->cmos_data[RTC_REG_B] & REG_B_DM) { return a; @@ -566,7 +566,7 @@ static inline int rtc_to_bcd(RTCState *s, int a) } } -static inline int rtc_from_bcd(RTCState *s, int a) +static inline int rtc_from_bcd(MC146818RtcState *s, int a) { if ((a & 0xc0) == 0xc0) { return -1; @@ -578,7 +578,7 @@ static inline int rtc_from_bcd(RTCState *s, int a) } } -static void rtc_get_time(RTCState *s, struct tm *tm) +static void rtc_get_time(MC146818RtcState *s, struct tm *tm) { tm->tm_sec = rtc_from_bcd(s, s->cmos_data[RTC_SECONDS]); tm->tm_min = rtc_from_bcd(s, s->cmos_data[RTC_MINUTES]); @@ -597,7 +597,7 @@ static void rtc_get_time(RTCState *s, struct tm *tm) rtc_from_bcd(s, s->cmos_data[RTC_CENTURY]) * 100 - 1900; } -static void rtc_set_time(RTCState *s) +static void rtc_set_time(MC146818RtcState *s) { struct tm tm; g_autofree const char *qom_path = object_get_canonical_path(OBJECT(s)); @@ -609,7 +609,7 @@ static void rtc_set_time(RTCState *s) qapi_event_send_rtc_change(qemu_timedate_diff(&tm), qom_path); } -static void rtc_set_cmos(RTCState *s, const struct tm *tm) +static void rtc_set_cmos(MC146818RtcState *s, const struct tm *tm) { int year; @@ -633,7 +633,7 @@ static void rtc_set_cmos(RTCState *s, const struct tm *tm) s->cmos_data[RTC_CENTURY] = rtc_to_bcd(s, year / 100); } -static void rtc_update_time(RTCState *s) +static void rtc_update_time(MC146818RtcState *s) { struct tm ret; time_t guest_sec; @@ -649,7 +649,7 @@ static void rtc_update_time(RTCState *s) } } -static int update_in_progress(RTCState *s) +static int update_in_progress(MC146818RtcState *s) { int64_t guest_nsec; @@ -678,7 +678,7 @@ static int update_in_progress(RTCState *s) static uint64_t cmos_ioport_read(void *opaque, hwaddr addr, unsigned size) { - RTCState *s = opaque; + MC146818RtcState *s = opaque; int ret; if ((addr & 1) == 0) { return 0xff; @@ -739,23 +739,21 @@ static uint64_t cmos_ioport_read(void *opaque, hwaddr addr, } } -void rtc_set_memory(ISADevice *dev, int addr, int val) +void mc146818rtc_set_cmos_data(MC146818RtcState *s, int addr, int val) { - RTCState *s = MC146818_RTC(dev); if (addr >= 0 && addr <= 127) s->cmos_data[addr] = val; } -int rtc_get_memory(ISADevice *dev, int addr) +int mc146818rtc_get_cmos_data(MC146818RtcState *s, int addr) { - RTCState *s = MC146818_RTC(dev); assert(addr >= 0 && addr <= 127); return s->cmos_data[addr]; } static void rtc_set_date_from_host(ISADevice *dev) { - RTCState *s = MC146818_RTC(dev); + MC146818RtcState *s = MC146818_RTC(dev); struct tm tm; qemu_get_timedate(&tm, 0); @@ -770,7 +768,7 @@ static void rtc_set_date_from_host(ISADevice *dev) static int rtc_pre_save(void *opaque) { - RTCState *s = opaque; + MC146818RtcState *s = opaque; rtc_update_time(s); @@ -779,7 +777,7 @@ static int rtc_pre_save(void *opaque) static int rtc_post_load(void *opaque, int version_id) { - RTCState *s = opaque; + MC146818RtcState *s = opaque; if (version_id <= 2 || rtc_clock == QEMU_CLOCK_REALTIME) { rtc_set_time(s); @@ -810,7 +808,7 @@ static int rtc_post_load(void *opaque, int version_id) static bool rtc_irq_reinject_on_ack_count_needed(void *opaque) { - RTCState *s = (RTCState *)opaque; + MC146818RtcState *s = (MC146818RtcState *)opaque; return s->irq_reinject_on_ack_count != 0; } @@ -820,7 +818,7 @@ static const VMStateDescription vmstate_rtc_irq_reinject_on_ack_count = { .minimum_version_id = 1, .needed = rtc_irq_reinject_on_ack_count_needed, .fields = (VMStateField[]) { - VMSTATE_UINT16(irq_reinject_on_ack_count, RTCState), + VMSTATE_UINT16(irq_reinject_on_ack_count, MC146818RtcState), VMSTATE_END_OF_LIST() } }; @@ -832,19 +830,19 @@ static const VMStateDescription vmstate_rtc = { .pre_save = rtc_pre_save, .post_load = rtc_post_load, .fields = (VMStateField[]) { - VMSTATE_BUFFER(cmos_data, RTCState), - VMSTATE_UINT8(cmos_index, RTCState), + VMSTATE_BUFFER(cmos_data, MC146818RtcState), + VMSTATE_UINT8(cmos_index, MC146818RtcState), VMSTATE_UNUSED(7*4), - VMSTATE_TIMER_PTR(periodic_timer, RTCState), - VMSTATE_INT64(next_periodic_time, RTCState), + VMSTATE_TIMER_PTR(periodic_timer, MC146818RtcState), + VMSTATE_INT64(next_periodic_time, MC146818RtcState), VMSTATE_UNUSED(3*8), - VMSTATE_UINT32_V(irq_coalesced, RTCState, 2), - VMSTATE_UINT32_V(period, RTCState, 2), - VMSTATE_UINT64_V(base_rtc, RTCState, 3), - VMSTATE_UINT64_V(last_update, RTCState, 3), - VMSTATE_INT64_V(offset, RTCState, 3), - VMSTATE_TIMER_PTR_V(update_timer, RTCState, 3), - VMSTATE_UINT64_V(next_alarm_time, RTCState, 3), + VMSTATE_UINT32_V(irq_coalesced, MC146818RtcState, 2), + VMSTATE_UINT32_V(period, MC146818RtcState, 2), + VMSTATE_UINT64_V(base_rtc, MC146818RtcState, 3), + VMSTATE_UINT64_V(last_update, MC146818RtcState, 3), + VMSTATE_INT64_V(offset, MC146818RtcState, 3), + VMSTATE_TIMER_PTR_V(update_timer, MC146818RtcState, 3), + VMSTATE_UINT64_V(next_alarm_time, MC146818RtcState, 3), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription*[]) { @@ -857,8 +855,9 @@ static const VMStateDescription vmstate_rtc = { BIOS will read it and start S3 resume at POST Entry */ static void rtc_notify_suspend(Notifier *notifier, void *data) { - RTCState *s = container_of(notifier, RTCState, suspend_notifier); - rtc_set_memory(ISA_DEVICE(s), 0xF, 0xFE); + MC146818RtcState *s = container_of(notifier, MC146818RtcState, + suspend_notifier); + mc146818rtc_set_cmos_data(s, 0xF, 0xFE); } static const MemoryRegionOps cmos_ops = { @@ -873,7 +872,7 @@ static const MemoryRegionOps cmos_ops = { static void rtc_get_date(Object *obj, struct tm *current_tm, Error **errp) { - RTCState *s = MC146818_RTC(obj); + MC146818RtcState *s = MC146818_RTC(obj); rtc_update_time(s); rtc_get_time(s, current_tm); @@ -882,7 +881,7 @@ static void rtc_get_date(Object *obj, struct tm *current_tm, Error **errp) static void rtc_realizefn(DeviceState *dev, Error **errp) { ISADevice *isadev = ISA_DEVICE(dev); - RTCState *s = MC146818_RTC(dev); + MC146818RtcState *s = MC146818_RTC(dev); s->cmos_data[RTC_REG_A] = 0x26; s->cmos_data[RTC_REG_B] = 0x02; @@ -945,11 +944,12 @@ static void rtc_realizefn(DeviceState *dev, Error **errp) QLIST_INSERT_HEAD(&rtc_devices, s, link); } -ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq) +MC146818RtcState *mc146818_rtc_init(ISABus *bus, int base_year, + qemu_irq intercept_irq) { DeviceState *dev; ISADevice *isadev; - RTCState *s; + MC146818RtcState *s; isadev = isa_new(TYPE_MC146818_RTC); dev = DEVICE(isadev); @@ -965,21 +965,21 @@ ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq) object_property_add_alias(qdev_get_machine(), "rtc-time", OBJECT(isadev), "date"); - return isadev; + return s; } static Property mc146818rtc_properties[] = { - DEFINE_PROP_INT32("base_year", RTCState, base_year, 1980), - DEFINE_PROP_UINT16("iobase", RTCState, io_base, RTC_ISA_BASE), - DEFINE_PROP_UINT8("irq", RTCState, isairq, RTC_ISA_IRQ), - DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", RTCState, + DEFINE_PROP_INT32("base_year", MC146818RtcState, base_year, 1980), + DEFINE_PROP_UINT16("iobase", MC146818RtcState, io_base, RTC_ISA_BASE), + DEFINE_PROP_UINT8("irq", MC146818RtcState, isairq, RTC_ISA_IRQ), + DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", MC146818RtcState, lost_tick_policy, LOST_TICK_POLICY_DISCARD), DEFINE_PROP_END_OF_LIST(), }; static void rtc_reset_enter(Object *obj, ResetType type) { - RTCState *s = MC146818_RTC(obj); + MC146818RtcState *s = MC146818_RTC(obj); /* Reason: VM do suspend self will set 0xfe * Reset any values other than 0xfe(Guest suspend case) */ @@ -1000,14 +1000,14 @@ static void rtc_reset_enter(Object *obj, ResetType type) static void rtc_reset_hold(Object *obj) { - RTCState *s = MC146818_RTC(obj); + MC146818RtcState *s = MC146818_RTC(obj); qemu_irq_lower(s->irq); } static void rtc_build_aml(AcpiDevAmlIf *adev, Aml *scope) { - RTCState *s = MC146818_RTC(adev); + MC146818RtcState *s = MC146818_RTC(adev); Aml *dev; Aml *crs; @@ -1045,7 +1045,7 @@ static void rtc_class_initfn(ObjectClass *klass, void *data) static const TypeInfo mc146818rtc_info = { .name = TYPE_MC146818_RTC, .parent = TYPE_ISA_DEVICE, - .instance_size = sizeof(RTCState), + .instance_size = sizeof(MC146818RtcState), .class_init = rtc_class_initfn, .interfaces = (InterfaceInfo[]) { { TYPE_ACPI_DEV_AML_IF }, diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c index e8d5eda3fc..d8534dad94 100644 --- a/hw/rtc/twl92230.c +++ b/hw/rtc/twl92230.c @@ -112,19 +112,19 @@ static void menelaus_rtc_hz(void *opaque) s->rtc.alm_sec --; s->rtc.next += 1000; timer_mod(s->rtc.hz_tm, s->rtc.next); - if ((s->rtc.ctrl >> 3) & 3) { /* EVERY */ + if ((s->rtc.ctrl >> 3) & 3) { /* EVERY */ menelaus_rtc_update(s); if (((s->rtc.ctrl >> 3) & 3) == 1 && !s->rtc.tm.tm_sec) - s->status |= 1 << 8; /* RTCTMR */ + s->status |= 1 << 8; /* RTCTMR */ else if (((s->rtc.ctrl >> 3) & 3) == 2 && !s->rtc.tm.tm_min) - s->status |= 1 << 8; /* RTCTMR */ + s->status |= 1 << 8; /* RTCTMR */ else if (!s->rtc.tm.tm_hour) - s->status |= 1 << 8; /* RTCTMR */ + s->status |= 1 << 8; /* RTCTMR */ } else - s->status |= 1 << 8; /* RTCTMR */ - if ((s->rtc.ctrl >> 1) & 1) { /* RTC_AL_EN */ + s->status |= 1 << 8; /* RTCTMR */ + if ((s->rtc.ctrl >> 1) & 1) { /* RTC_AL_EN */ if (s->rtc.alm_sec == 0) - s->status |= 1 << 9; /* RTCALM */ + s->status |= 1 << 9; /* RTCALM */ /* TODO: wake-up */ } if (s->rtc.next_comp <= 0) { @@ -140,19 +140,19 @@ static void menelaus_reset(I2CSlave *i2c) s->reg = 0x00; - s->vcore[0] = 0x0c; /* XXX: X-loader needs 0x8c? check! */ + s->vcore[0] = 0x0c; /* XXX: X-loader needs 0x8c? check! */ s->vcore[1] = 0x05; s->vcore[2] = 0x02; s->vcore[3] = 0x0c; s->vcore[4] = 0x03; - s->dcdc[0] = 0x33; /* Depends on wiring */ + s->dcdc[0] = 0x33; /* Depends on wiring */ s->dcdc[1] = 0x03; s->dcdc[2] = 0x00; s->ldo[0] = 0x95; s->ldo[1] = 0x7e; s->ldo[2] = 0x00; - s->ldo[3] = 0x00; /* Depends on wiring */ - s->ldo[4] = 0x03; /* Depends on wiring */ + s->ldo[3] = 0x00; /* Depends on wiring */ + s->ldo[4] = 0x03; /* Depends on wiring */ s->ldo[5] = 0x00; s->ldo[6] = 0x00; s->ldo[7] = 0x00; @@ -203,70 +203,70 @@ static void menelaus_gpio_set(void *opaque, int line, int level) } if (!s->pwrbtn_state && level) { - s->status |= 1 << 11; /* PSHBTN */ + s->status |= 1 << 11; /* PSHBTN */ menelaus_update(s); } s->pwrbtn_state = level; } -#define MENELAUS_REV 0x01 -#define MENELAUS_VCORE_CTRL1 0x02 -#define MENELAUS_VCORE_CTRL2 0x03 -#define MENELAUS_VCORE_CTRL3 0x04 -#define MENELAUS_VCORE_CTRL4 0x05 -#define MENELAUS_VCORE_CTRL5 0x06 -#define MENELAUS_DCDC_CTRL1 0x07 -#define MENELAUS_DCDC_CTRL2 0x08 -#define MENELAUS_DCDC_CTRL3 0x09 -#define MENELAUS_LDO_CTRL1 0x0a -#define MENELAUS_LDO_CTRL2 0x0b -#define MENELAUS_LDO_CTRL3 0x0c -#define MENELAUS_LDO_CTRL4 0x0d -#define MENELAUS_LDO_CTRL5 0x0e -#define MENELAUS_LDO_CTRL6 0x0f -#define MENELAUS_LDO_CTRL7 0x10 -#define MENELAUS_LDO_CTRL8 0x11 -#define MENELAUS_SLEEP_CTRL1 0x12 -#define MENELAUS_SLEEP_CTRL2 0x13 -#define MENELAUS_DEVICE_OFF 0x14 -#define MENELAUS_OSC_CTRL 0x15 -#define MENELAUS_DETECT_CTRL 0x16 -#define MENELAUS_INT_MASK1 0x17 -#define MENELAUS_INT_MASK2 0x18 -#define MENELAUS_INT_STATUS1 0x19 -#define MENELAUS_INT_STATUS2 0x1a -#define MENELAUS_INT_ACK1 0x1b -#define MENELAUS_INT_ACK2 0x1c -#define MENELAUS_GPIO_CTRL 0x1d -#define MENELAUS_GPIO_IN 0x1e -#define MENELAUS_GPIO_OUT 0x1f -#define MENELAUS_BBSMS 0x20 -#define MENELAUS_RTC_CTRL 0x21 -#define MENELAUS_RTC_UPDATE 0x22 -#define MENELAUS_RTC_SEC 0x23 -#define MENELAUS_RTC_MIN 0x24 -#define MENELAUS_RTC_HR 0x25 -#define MENELAUS_RTC_DAY 0x26 -#define MENELAUS_RTC_MON 0x27 -#define MENELAUS_RTC_YR 0x28 -#define MENELAUS_RTC_WKDAY 0x29 -#define MENELAUS_RTC_AL_SEC 0x2a -#define MENELAUS_RTC_AL_MIN 0x2b -#define MENELAUS_RTC_AL_HR 0x2c -#define MENELAUS_RTC_AL_DAY 0x2d -#define MENELAUS_RTC_AL_MON 0x2e -#define MENELAUS_RTC_AL_YR 0x2f -#define MENELAUS_RTC_COMP_MSB 0x30 -#define MENELAUS_RTC_COMP_LSB 0x31 -#define MENELAUS_S1_PULL_EN 0x32 -#define MENELAUS_S1_PULL_DIR 0x33 -#define MENELAUS_S2_PULL_EN 0x34 -#define MENELAUS_S2_PULL_DIR 0x35 -#define MENELAUS_MCT_CTRL1 0x36 -#define MENELAUS_MCT_CTRL2 0x37 -#define MENELAUS_MCT_CTRL3 0x38 -#define MENELAUS_MCT_PIN_ST 0x39 -#define MENELAUS_DEBOUNCE1 0x3a +#define MENELAUS_REV 0x01 +#define MENELAUS_VCORE_CTRL1 0x02 +#define MENELAUS_VCORE_CTRL2 0x03 +#define MENELAUS_VCORE_CTRL3 0x04 +#define MENELAUS_VCORE_CTRL4 0x05 +#define MENELAUS_VCORE_CTRL5 0x06 +#define MENELAUS_DCDC_CTRL1 0x07 +#define MENELAUS_DCDC_CTRL2 0x08 +#define MENELAUS_DCDC_CTRL3 0x09 +#define MENELAUS_LDO_CTRL1 0x0a +#define MENELAUS_LDO_CTRL2 0x0b +#define MENELAUS_LDO_CTRL3 0x0c +#define MENELAUS_LDO_CTRL4 0x0d +#define MENELAUS_LDO_CTRL5 0x0e +#define MENELAUS_LDO_CTRL6 0x0f +#define MENELAUS_LDO_CTRL7 0x10 +#define MENELAUS_LDO_CTRL8 0x11 +#define MENELAUS_SLEEP_CTRL1 0x12 +#define MENELAUS_SLEEP_CTRL2 0x13 +#define MENELAUS_DEVICE_OFF 0x14 +#define MENELAUS_OSC_CTRL 0x15 +#define MENELAUS_DETECT_CTRL 0x16 +#define MENELAUS_INT_MASK1 0x17 +#define MENELAUS_INT_MASK2 0x18 +#define MENELAUS_INT_STATUS1 0x19 +#define MENELAUS_INT_STATUS2 0x1a +#define MENELAUS_INT_ACK1 0x1b +#define MENELAUS_INT_ACK2 0x1c +#define MENELAUS_GPIO_CTRL 0x1d +#define MENELAUS_GPIO_IN 0x1e +#define MENELAUS_GPIO_OUT 0x1f +#define MENELAUS_BBSMS 0x20 +#define MENELAUS_RTC_CTRL 0x21 +#define MENELAUS_RTC_UPDATE 0x22 +#define MENELAUS_RTC_SEC 0x23 +#define MENELAUS_RTC_MIN 0x24 +#define MENELAUS_RTC_HR 0x25 +#define MENELAUS_RTC_DAY 0x26 +#define MENELAUS_RTC_MON 0x27 +#define MENELAUS_RTC_YR 0x28 +#define MENELAUS_RTC_WKDAY 0x29 +#define MENELAUS_RTC_AL_SEC 0x2a +#define MENELAUS_RTC_AL_MIN 0x2b +#define MENELAUS_RTC_AL_HR 0x2c +#define MENELAUS_RTC_AL_DAY 0x2d +#define MENELAUS_RTC_AL_MON 0x2e +#define MENELAUS_RTC_AL_YR 0x2f +#define MENELAUS_RTC_COMP_MSB 0x30 +#define MENELAUS_RTC_COMP_LSB 0x31 +#define MENELAUS_S1_PULL_EN 0x32 +#define MENELAUS_S1_PULL_DIR 0x33 +#define MENELAUS_S2_PULL_EN 0x34 +#define MENELAUS_S2_PULL_DIR 0x35 +#define MENELAUS_MCT_CTRL1 0x36 +#define MENELAUS_MCT_CTRL2 0x37 +#define MENELAUS_MCT_CTRL3 0x38 +#define MENELAUS_MCT_PIN_ST 0x39 +#define MENELAUS_DEBOUNCE1 0x3a static uint8_t menelaus_read(void *opaque, uint8_t addr) { @@ -293,7 +293,7 @@ static uint8_t menelaus_read(void *opaque, uint8_t addr) return 0; case MENELAUS_OSC_CTRL: - return s->osc | (1 << 7); /* CLK32K_GOOD */ + return s->osc | (1 << 7); /* CLK32K_GOOD */ case MENELAUS_DETECT_CTRL: return s->detect; @@ -334,9 +334,9 @@ static uint8_t menelaus_read(void *opaque, uint8_t addr) return to_bcd(s->rtc.tm.tm_min); case MENELAUS_RTC_HR: menelaus_rtc_update(s); - if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */ + if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */ return to_bcd((s->rtc.tm.tm_hour % 12) + 1) | - (!!(s->rtc.tm.tm_hour >= 12) << 7); /* PM_nAM */ + (!!(s->rtc.tm.tm_hour >= 12) << 7); /* PM_nAM */ else return to_bcd(s->rtc.tm.tm_hour); case MENELAUS_RTC_DAY: @@ -356,7 +356,7 @@ static uint8_t menelaus_read(void *opaque, uint8_t addr) case MENELAUS_RTC_AL_MIN: return to_bcd(s->rtc.alm.tm_min); case MENELAUS_RTC_AL_HR: - if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */ + if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */ return to_bcd((s->rtc.alm.tm_hour % 12) + 1) | (!!(s->rtc.alm.tm_hour >= 12) << 7);/* AL_PM_nAM */ else @@ -541,7 +541,7 @@ static void menelaus_write(void *opaque, uint8_t addr, uint8_t value) break; case MENELAUS_RTC_CTRL: - if ((s->rtc.ctrl ^ value) & 1) { /* RTC_EN */ + if ((s->rtc.ctrl ^ value) & 1) { /* RTC_EN */ if (value & 1) menelaus_rtc_start(s); else @@ -603,7 +603,7 @@ static void menelaus_write(void *opaque, uint8_t addr, uint8_t value) default: fprintf(stderr, "%s: bad RTC_UPDATE value %02x\n", __func__, value); - s->status |= 1 << 10; /* RTCERR */ + s->status |= 1 << 10; /* RTCERR */ menelaus_update(s); } s->rtc.sec_offset = qemu_timedate_diff(&tm); @@ -615,7 +615,7 @@ static void menelaus_write(void *opaque, uint8_t addr, uint8_t value) s->rtc.tm.tm_min = from_bcd(value & 0x7f); break; case MENELAUS_RTC_HR: - s->rtc.tm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */ + s->rtc.tm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */ MIN(from_bcd(value & 0x3f), 12) + ((value >> 7) ? 11 : -1) : from_bcd(value & 0x3f); break; @@ -640,7 +640,7 @@ static void menelaus_write(void *opaque, uint8_t addr, uint8_t value) menelaus_alm_update(s); break; case MENELAUS_RTC_AL_HR: - s->rtc.alm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */ + s->rtc.alm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */ MIN(from_bcd(value & 0x3f), 12) + ((value >> 7) ? 11 : -1) : from_bcd(value & 0x3f); menelaus_alm_update(s); @@ -792,14 +792,14 @@ static int menelaus_post_load(void *opaque, int version_id) { MenelausState *s = opaque; - if (s->rtc.ctrl & 1) /* RTC_EN */ + if (s->rtc.ctrl & 1) /* RTC_EN */ menelaus_rtc_stop(s); s->rtc.next = s->rtc_next_vmstate; menelaus_alm_update(s); menelaus_update(s); - if (s->rtc.ctrl & 1) /* RTC_EN */ + if (s->rtc.ctrl & 1) /* RTC_EN */ menelaus_rtc_start(s); return 0; } diff --git a/hw/s390x/pv.c b/hw/s390x/pv.c index 49ea38236c..b63f3784c6 100644 --- a/hw/s390x/pv.c +++ b/hw/s390x/pv.c @@ -13,6 +13,7 @@ #include +#include "qemu/units.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "sysemu/kvm.h" @@ -115,7 +116,7 @@ static void *s390_pv_do_unprot_async_fn(void *p) return NULL; } -bool s390_pv_vm_try_disable_async(void) +bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) { /* * t is only needed to create the thread; once qemu_thread_create @@ -123,7 +124,12 @@ bool s390_pv_vm_try_disable_async(void) */ QemuThread t; - if (!kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) { + /* + * If the feature is not present or if the VM is not larger than 2 GiB, + * KVM_PV_ASYNC_CLEANUP_PREPARE fill fail; no point in attempting it. + */ + if ((MACHINE(ms)->maxram_size <= 2 * GiB) || + !kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) { return false; } if (s390_pv_cmd(KVM_PV_ASYNC_CLEANUP_PREPARE, NULL) != 0) { diff --git a/hw/s390x/s390-stattrib.c b/hw/s390x/s390-stattrib.c index aed919ad7d..220e845d12 100644 --- a/hw/s390x/s390-stattrib.c +++ b/hw/s390x/s390-stattrib.c @@ -209,7 +209,7 @@ static int cmma_save(QEMUFile *f, void *opaque, int final) return -ENOMEM; } - while (final ? 1 : qemu_file_rate_limit(f) == 0) { + while (final ? 1 : migration_rate_exceeded(f) == 0) { reallen = sac->get_stattr(sas, &start_gfn, buflen, buf); if (reallen < 0) { g_free(buf); diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 503f212a31..2dece8eab8 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -245,6 +245,7 @@ static void s390_create_sclpconsole(const char *type, Chardev *chardev) static void ccw_init(MachineState *machine) { + MachineClass *mc = MACHINE_GET_CLASS(machine); int ret; VirtualCssBus *css_bus; DeviceState *dev; @@ -292,7 +293,7 @@ static void ccw_init(MachineState *machine) } /* Create VirtIO network adapters */ - s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw"); + s390_create_virtio_net(BUS(css_bus), mc->default_nic); /* init consoles */ if (serial_hd(0)) { @@ -330,7 +331,7 @@ static inline void s390_do_cpu_ipl(CPUState *cs, run_on_cpu_data arg) static void s390_machine_unprotect(S390CcwMachineState *ms) { - if (!s390_pv_vm_try_disable_async()) { + if (!s390_pv_vm_try_disable_async(ms)) { s390_pv_vm_disable(); } ms->pv = false; @@ -746,6 +747,7 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data) hc->unplug_request = s390_machine_device_unplug_request; nc->nmi_monitor_handler = s390_nmi; mc->default_ram_id = "s390.ram"; + mc->default_nic = "virtio-net-ccw"; object_class_property_add_bool(oc, "aes-key-wrap", machine_get_aes_key_wrap, @@ -826,14 +828,26 @@ bool css_migration_enabled(void) } \ type_init(ccw_machine_register_##suffix) +static void ccw_machine_8_1_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_8_1_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE(8_1, "8.1", true); + static void ccw_machine_8_0_instance_options(MachineState *machine) { + ccw_machine_8_1_instance_options(machine); } static void ccw_machine_8_0_class_options(MachineClass *mc) { + ccw_machine_8_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len); } -DEFINE_CCW_MACHINE(8_0, "8.0", true); +DEFINE_CCW_MACHINE(8_0, "8.0", false); static void ccw_machine_7_2_instance_options(MachineState *machine) { diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index e33e5207ab..f44de1a8c1 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -237,6 +237,7 @@ static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, return -EINVAL; } virtio_queue_set_num(vdev, index, num); + virtio_init_region_cache(vdev, index); } else if (virtio_queue_get_num(vdev, index) > num) { /* Fail if we don't have a big enough queue. */ return -EINVAL; diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index af93557a9a..f7d45b0b20 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -1134,15 +1134,24 @@ static void lsi_execute_script(LSIState *s) uint32_t addr, addr_high; int opcode; int insn_processed = 0; + static int reentrancy_level; + + reentrancy_level++; s->istat1 |= LSI_ISTAT1_SRUN; again: - if (++insn_processed > LSI_MAX_INSN) { - /* Some windows drivers make the device spin waiting for a memory - location to change. If we have been executed a lot of code then - assume this is the case and force an unexpected device disconnect. - This is apparently sufficient to beat the drivers into submission. - */ + /* + * Some windows drivers make the device spin waiting for a memory location + * to change. If we have executed more than LSI_MAX_INSN instructions then + * assume this is the case and force an unexpected device disconnect. This + * is apparently sufficient to beat the drivers into submission. + * + * Another issue (CVE-2023-0330) can occur if the script is programmed to + * trigger itself again and again. Avoid this problem by stopping after + * being called multiple times in a reentrant way (8 is an arbitrary value + * which should be enough for all valid use cases). + */ + if (++insn_processed > LSI_MAX_INSN || reentrancy_level > 8) { if (!(s->sien0 & LSI_SIST0_UDC)) { qemu_log_mask(LOG_GUEST_ERROR, "lsi_scsi: inf. loop with UDC masked"); @@ -1596,6 +1605,8 @@ again: } } trace_lsi_execute_script_stop(); + + reentrancy_level--; } static uint8_t lsi_reg_readb(LSIState *s, int offset) @@ -2302,6 +2313,13 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp) memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s, "lsi-io", 256); + /* + * Since we use the address-space API to interact with ram_io, disable the + * re-entrancy guard. + */ + s->ram_io.disable_reentrancy_guard = true; + s->mmio_io.disable_reentrancy_guard = true; + address_space_init(&s->pci_io_as, pci_address_space_io(dev), "lsi-pci-io"); qdev_init_gpio_out(d, &s->ext_irq, 1); diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c index c485da792c..3de288b454 100644 --- a/hw/scsi/mptsas.c +++ b/hw/scsi/mptsas.c @@ -1322,7 +1322,8 @@ static void mptsas_scsi_realize(PCIDevice *dev, Error **errp) } s->max_devices = MPTSAS_NUM_PORTS; - s->request_bh = qemu_bh_new(mptsas_fetch_requests, s); + s->request_bh = qemu_bh_new_guarded(mptsas_fetch_requests, s, + &DEVICE(dev)->mem_reentrancy_guard); scsi_bus_init(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info); } diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c index ceceafb2cd..f80f4cb4fc 100644 --- a/hw/scsi/scsi-bus.c +++ b/hw/scsi/scsi-bus.c @@ -60,8 +60,7 @@ static SCSIDevice *do_scsi_device_find(SCSIBus *bus, * the user access the device. */ - if (retval && !include_unrealized && - !qatomic_load_acquire(&retval->qdev.realized)) { + if (retval && !include_unrealized && !qdev_is_realized(&retval->qdev)) { retval = NULL; } @@ -193,14 +192,15 @@ static void scsi_dma_restart_cb(void *opaque, bool running, RunState state) AioContext *ctx = blk_get_aio_context(s->conf.blk); /* The reference is dropped in scsi_dma_restart_bh.*/ object_ref(OBJECT(s)); - s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s); + s->bh = aio_bh_new_guarded(ctx, scsi_dma_restart_bh, s, + &DEVICE(s)->mem_reentrancy_guard); qemu_bh_schedule(s->bh); } } static bool scsi_bus_is_address_free(SCSIBus *bus, - int channel, int target, int lun, - SCSIDevice **p_dev) + int channel, int target, int lun, + SCSIDevice **p_dev) { SCSIDevice *d; @@ -487,7 +487,8 @@ static bool scsi_target_emulate_report_luns(SCSITargetReq *r) DeviceState *qdev = kid->child; SCSIDevice *dev = SCSI_DEVICE(qdev); - if (dev->channel == channel && dev->id == id && dev->lun != 0) { + if (dev->channel == channel && dev->id == id && dev->lun != 0 && + qdev_is_realized(&dev->qdev)) { store_lun(tmp, dev->lun); g_byte_array_append(buf, tmp, 8); len += 8; @@ -1668,6 +1669,46 @@ void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) scsi_device_set_ua(sdev, sense); } +void scsi_device_drained_begin(SCSIDevice *sdev) +{ + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus); + if (!bus) { + return; + } + + assert(qemu_get_current_aio_context() == qemu_get_aio_context()); + assert(bus->drain_count < INT_MAX); + + /* + * Multiple BlockBackends can be on a SCSIBus and each may begin/end + * draining at any time. Keep a counter so HBAs only see begin/end once. + */ + if (bus->drain_count++ == 0) { + trace_scsi_bus_drained_begin(bus, sdev); + if (bus->info->drained_begin) { + bus->info->drained_begin(bus); + } + } +} + +void scsi_device_drained_end(SCSIDevice *sdev) +{ + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus); + if (!bus) { + return; + } + + assert(qemu_get_current_aio_context() == qemu_get_aio_context()); + assert(bus->drain_count > 0); + + if (bus->drain_count-- == 1) { + trace_scsi_bus_drained_end(bus, sdev); + if (bus->info->drained_end) { + bus->info->drained_end(bus); + } + } +} + static char *scsibus_get_dev_path(DeviceState *dev) { SCSIDevice *d = SCSI_DEVICE(dev); diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index 97c9b1c8cd..e0d79c7966 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -2360,6 +2360,20 @@ static void scsi_disk_reset(DeviceState *dev) s->qdev.scsi_version = s->qdev.default_scsi_version; } +static void scsi_disk_drained_begin(void *opaque) +{ + SCSIDiskState *s = opaque; + + scsi_device_drained_begin(&s->qdev); +} + +static void scsi_disk_drained_end(void *opaque) +{ + SCSIDiskState *s = opaque; + + scsi_device_drained_end(&s->qdev); +} + static void scsi_disk_resize_cb(void *opaque) { SCSIDiskState *s = opaque; @@ -2414,16 +2428,19 @@ static bool scsi_cd_is_medium_locked(void *opaque) } static const BlockDevOps scsi_disk_removable_block_ops = { - .change_media_cb = scsi_cd_change_media_cb, + .change_media_cb = scsi_cd_change_media_cb, + .drained_begin = scsi_disk_drained_begin, + .drained_end = scsi_disk_drained_end, .eject_request_cb = scsi_cd_eject_request_cb, - .is_tray_open = scsi_cd_is_tray_open, .is_medium_locked = scsi_cd_is_medium_locked, - - .resize_cb = scsi_disk_resize_cb, + .is_tray_open = scsi_cd_is_tray_open, + .resize_cb = scsi_disk_resize_cb, }; static const BlockDevOps scsi_disk_block_ops = { - .resize_cb = scsi_disk_resize_cb, + .drained_begin = scsi_disk_drained_begin, + .drained_end = scsi_disk_drained_end, + .resize_cb = scsi_disk_resize_cb, }; static void scsi_disk_unit_attention_reported(SCSIDevice *dev) diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index ac9fa662b4..2417f0ad84 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -191,12 +191,16 @@ static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) && (r->req.cmd.buf[1] & 0x01)) { page = r->req.cmd.buf[2]; - if (page == 0xb0) { + if (page == 0xb0 && r->buflen >= 8) { + uint8_t buf[16] = {}; + uint8_t buf_used = MIN(r->buflen, 16); uint64_t max_transfer = calculate_max_transfer(s); - stl_be_p(&r->buf[8], max_transfer); - /* Also take care of the opt xfer len. */ - stl_be_p(&r->buf[12], - MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); + + memcpy(buf, r->buf, buf_used); + stl_be_p(&buf[8], max_transfer); + stl_be_p(&buf[12], MIN_NON_ZERO(max_transfer, ldl_be_p(&buf[12]))); + memcpy(r->buf + 8, buf + 8, buf_used - 8); + } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { /* * Now we're capable of supplying the VPD Block Limits diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events index ab238293f0..bdd4e2c7c7 100644 --- a/hw/scsi/trace-events +++ b/hw/scsi/trace-events @@ -6,6 +6,8 @@ scsi_req_cancel(int target, int lun, int tag) "target %d lun %d tag %d" scsi_req_data(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d" scsi_req_data_canceled(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d" scsi_req_dequeue(int target, int lun, int tag) "target %d lun %d tag %d" +scsi_bus_drained_begin(void *bus, void *sdev) "bus %p sdev %p" +scsi_bus_drained_end(void *bus, void *sdev) "bus %p sdev %p" scsi_req_continue(int target, int lun, int tag) "target %d lun %d tag %d" scsi_req_continue_canceled(int target, int lun, int tag) "target %d lun %d tag %d" scsi_req_parsed(int target, int lun, int tag, int cmd, int mode, int xfer) "target %d lun %d tag %d command %d dir %d length %d" diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c index 20bb91766e..d55de4c8ca 100644 --- a/hw/scsi/virtio-scsi-dataplane.c +++ b/hw/scsi/virtio-scsi-dataplane.c @@ -71,12 +71,26 @@ static void virtio_scsi_dataplane_stop_bh(void *opaque) { VirtIOSCSI *s = opaque; VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); + EventNotifier *host_notifier; int i; virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx); + host_notifier = virtio_queue_get_host_notifier(vs->ctrl_vq); + + /* + * Test and clear notifier after disabling event, in case poll callback + * didn't have time to run. + */ + virtio_queue_host_notifier_read(host_notifier); + virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx); + host_notifier = virtio_queue_get_host_notifier(vs->event_vq); + virtio_queue_host_notifier_read(host_notifier); + for (i = 0; i < vs->conf.num_queues; i++) { virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx); + host_notifier = virtio_queue_get_host_notifier(vs->cmd_vqs[i]); + virtio_queue_host_notifier_read(host_notifier); } } @@ -144,14 +158,16 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev) s->dataplane_starting = false; s->dataplane_started = true; - aio_context_acquire(s->ctx); - virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx); - virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx); + if (s->bus.drain_count == 0) { + aio_context_acquire(s->ctx); + virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx); + virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx); - for (i = 0; i < vs->conf.num_queues; i++) { - virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx); + for (i = 0; i < vs->conf.num_queues; i++) { + virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx); + } + aio_context_release(s->ctx); } - aio_context_release(s->ctx); return 0; fail_host_notifiers: @@ -197,9 +213,9 @@ void virtio_scsi_dataplane_stop(VirtIODevice *vdev) } s->dataplane_stopping = true; - aio_context_acquire(s->ctx); - aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s); - aio_context_release(s->ctx); + if (s->bus.drain_count == 0) { + aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s); + } blk_drain_all(); /* ensure there are no in-flight requests */ diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 612c525d9d..4a8849cc7e 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -933,13 +933,27 @@ static void virtio_scsi_reset(VirtIODevice *vdev) s->events_dropped = false; } -static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, - uint32_t event, uint32_t reason) +typedef struct { + uint32_t event; + uint32_t reason; + union { + /* Used by messages specific to a device */ + struct { + uint32_t id; + uint32_t lun; + } address; + }; +} VirtIOSCSIEventInfo; + +static void virtio_scsi_push_event(VirtIOSCSI *s, + const VirtIOSCSIEventInfo *info) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); VirtIOSCSIReq *req; VirtIOSCSIEvent *evt; VirtIODevice *vdev = VIRTIO_DEVICE(s); + uint32_t event = info->event; + uint32_t reason = info->reason; if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { return; @@ -965,27 +979,28 @@ static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, memset(evt, 0, sizeof(VirtIOSCSIEvent)); evt->event = virtio_tswap32(vdev, event); evt->reason = virtio_tswap32(vdev, reason); - if (!dev) { - assert(event == VIRTIO_SCSI_T_EVENTS_MISSED); - } else { + if (event != VIRTIO_SCSI_T_EVENTS_MISSED) { evt->lun[0] = 1; - evt->lun[1] = dev->id; + evt->lun[1] = info->address.id; /* Linux wants us to keep the same encoding we use for REPORT LUNS. */ - if (dev->lun >= 256) { - evt->lun[2] = (dev->lun >> 8) | 0x40; + if (info->address.lun >= 256) { + evt->lun[2] = (info->address.lun >> 8) | 0x40; } - evt->lun[3] = dev->lun & 0xFF; + evt->lun[3] = info->address.lun & 0xFF; } trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason); - + virtio_scsi_complete_req(req); } static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq) { if (s->events_dropped) { - virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); + VirtIOSCSIEventInfo info = { + .event = VIRTIO_SCSI_T_NO_EVENT, + }; + virtio_scsi_push_event(s, &info); } } @@ -1009,9 +1024,17 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) && dev->type != TYPE_ROM) { + VirtIOSCSIEventInfo info = { + .event = VIRTIO_SCSI_T_PARAM_CHANGE, + .reason = sense.asc | (sense.ascq << 8), + .address = { + .id = dev->id, + .lun = dev->lun, + }, + }; + virtio_scsi_acquire(s); - virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE, - sense.asc | (sense.ascq << 8)); + virtio_scsi_push_event(s, &info); virtio_scsi_release(s); } } @@ -1046,10 +1069,17 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, } if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { + VirtIOSCSIEventInfo info = { + .event = VIRTIO_SCSI_T_TRANSPORT_RESET, + .reason = VIRTIO_SCSI_EVT_RESET_RESCAN, + .address = { + .id = sd->id, + .lun = sd->lun, + }, + }; + virtio_scsi_acquire(s); - virtio_scsi_push_event(s, sd, - VIRTIO_SCSI_T_TRANSPORT_RESET, - VIRTIO_SCSI_EVT_RESET_RESCAN); + virtio_scsi_push_event(s, &info); scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); virtio_scsi_release(s); } @@ -1061,20 +1091,16 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); VirtIOSCSI *s = VIRTIO_SCSI(vdev); SCSIDevice *sd = SCSI_DEVICE(dev); - AioContext *ctx = s->ctx ?: qemu_get_aio_context(); + VirtIOSCSIEventInfo info = { + .event = VIRTIO_SCSI_T_TRANSPORT_RESET, + .reason = VIRTIO_SCSI_EVT_RESET_REMOVED, + .address = { + .id = sd->id, + .lun = sd->lun, + }, + }; - if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { - virtio_scsi_acquire(s); - virtio_scsi_push_event(s, sd, - VIRTIO_SCSI_T_TRANSPORT_RESET, - VIRTIO_SCSI_EVT_RESET_REMOVED); - scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); - virtio_scsi_release(s); - } - - aio_disable_external(ctx); qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); - aio_enable_external(ctx); if (s->ctx) { virtio_scsi_acquire(s); @@ -1082,6 +1108,49 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL); virtio_scsi_release(s); } + + if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { + virtio_scsi_acquire(s); + virtio_scsi_push_event(s, &info); + scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); + virtio_scsi_release(s); + } +} + +/* Suspend virtqueue ioeventfd processing during drain */ +static void virtio_scsi_drained_begin(SCSIBus *bus) +{ + VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus); + VirtIODevice *vdev = VIRTIO_DEVICE(s); + uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED + + s->parent_obj.conf.num_queues; + + if (!s->dataplane_started) { + return; + } + + for (uint32_t i = 0; i < total_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_detach_host_notifier(vq, s->ctx); + } +} + +/* Resume virtqueue ioeventfd processing after drain */ +static void virtio_scsi_drained_end(SCSIBus *bus) +{ + VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus); + VirtIODevice *vdev = VIRTIO_DEVICE(s); + uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED + + s->parent_obj.conf.num_queues; + + if (!s->dataplane_started) { + return; + } + + for (uint32_t i = 0; i < total_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_attach_host_notifier(vq, s->ctx); + } } static struct SCSIBusInfo virtio_scsi_scsi_info = { @@ -1098,6 +1167,8 @@ static struct SCSIBusInfo virtio_scsi_scsi_info = { .get_sg_list = virtio_scsi_get_sg_list, .save_request = virtio_scsi_save_request, .load_request = virtio_scsi_load_request, + .drained_begin = virtio_scsi_drained_begin, + .drained_end = virtio_scsi_drained_end, }; void virtio_scsi_common_realize(DeviceState *dev, diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c index fa76696855..4de34536e9 100644 --- a/hw/scsi/vmw_pvscsi.c +++ b/hw/scsi/vmw_pvscsi.c @@ -1184,7 +1184,8 @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp) pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET); } - s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s); + s->completion_worker = qemu_bh_new_guarded(pvscsi_process_completion_queue, s, + &DEVICE(pci_dev)->mem_reentrancy_guard); scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info); /* override default SCSI bus hotplug-handler, with pvscsi's one */ diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c index 51e5e90830..92a0f42708 100644 --- a/hw/sd/allwinner-sdhost.c +++ b/hw/sd/allwinner-sdhost.c @@ -302,6 +302,30 @@ static void allwinner_sdhost_auto_stop(AwSdHostState *s) } } +static void read_descriptor(AwSdHostState *s, hwaddr desc_addr, + TransferDescriptor *desc) +{ + uint32_t desc_words[4]; + dma_memory_read(&s->dma_as, desc_addr, &desc_words, sizeof(desc_words), + MEMTXATTRS_UNSPECIFIED); + desc->status = le32_to_cpu(desc_words[0]); + desc->size = le32_to_cpu(desc_words[1]); + desc->addr = le32_to_cpu(desc_words[2]); + desc->next = le32_to_cpu(desc_words[3]); +} + +static void write_descriptor(AwSdHostState *s, hwaddr desc_addr, + const TransferDescriptor *desc) +{ + uint32_t desc_words[4]; + desc_words[0] = cpu_to_le32(desc->status); + desc_words[1] = cpu_to_le32(desc->size); + desc_words[2] = cpu_to_le32(desc->addr); + desc_words[3] = cpu_to_le32(desc->next); + dma_memory_write(&s->dma_as, desc_addr, &desc_words, sizeof(desc_words), + MEMTXATTRS_UNSPECIFIED); +} + static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, hwaddr desc_addr, TransferDescriptor *desc, @@ -312,9 +336,7 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, uint32_t num_bytes = max_bytes; uint8_t buf[1024]; - /* Read descriptor */ - dma_memory_read(&s->dma_as, desc_addr, desc, sizeof(*desc), - MEMTXATTRS_UNSPECIFIED); + read_descriptor(s, desc_addr, desc); if (desc->size == 0) { desc->size = klass->max_desc_size; } else if (desc->size > klass->max_desc_size) { @@ -356,8 +378,7 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, /* Clear hold flag and flush descriptor */ desc->status &= ~DESC_STATUS_HOLD; - dma_memory_write(&s->dma_as, desc_addr, desc, sizeof(*desc), - MEMTXATTRS_UNSPECIFIED); + write_descriptor(s, desc_addr, desc); return num_done; } diff --git a/hw/sd/sd.c b/hw/sd/sd.c index da5bdd134a..77a717d355 100644 --- a/hw/sd/sd.c +++ b/hw/sd/sd.c @@ -342,39 +342,39 @@ static void sd_set_scr(SDState *sd) sd->scr[7] = 0x00; } -#define MID 0xaa -#define OID "XY" -#define PNM "QEMU!" -#define PRV 0x01 -#define MDT_YR 2006 -#define MDT_MON 2 +#define MID 0xaa +#define OID "XY" +#define PNM "QEMU!" +#define PRV 0x01 +#define MDT_YR 2006 +#define MDT_MON 2 static void sd_set_cid(SDState *sd) { - sd->cid[0] = MID; /* Fake card manufacturer ID (MID) */ - sd->cid[1] = OID[0]; /* OEM/Application ID (OID) */ + sd->cid[0] = MID; /* Fake card manufacturer ID (MID) */ + sd->cid[1] = OID[0]; /* OEM/Application ID (OID) */ sd->cid[2] = OID[1]; - sd->cid[3] = PNM[0]; /* Fake product name (PNM) */ + sd->cid[3] = PNM[0]; /* Fake product name (PNM) */ sd->cid[4] = PNM[1]; sd->cid[5] = PNM[2]; sd->cid[6] = PNM[3]; sd->cid[7] = PNM[4]; - sd->cid[8] = PRV; /* Fake product revision (PRV) */ - sd->cid[9] = 0xde; /* Fake serial number (PSN) */ + sd->cid[8] = PRV; /* Fake product revision (PRV) */ + sd->cid[9] = 0xde; /* Fake serial number (PSN) */ sd->cid[10] = 0xad; sd->cid[11] = 0xbe; sd->cid[12] = 0xef; - sd->cid[13] = 0x00 | /* Manufacture date (MDT) */ + sd->cid[13] = 0x00 | /* Manufacture date (MDT) */ ((MDT_YR - 2000) / 10); sd->cid[14] = ((MDT_YR % 10) << 4) | MDT_MON; sd->cid[15] = (sd_crc7(sd->cid, 15) << 1) | 1; } -#define HWBLOCK_SHIFT 9 /* 512 bytes */ -#define SECTOR_SHIFT 5 /* 16 kilobytes */ -#define WPGROUP_SHIFT 7 /* 2 megs */ -#define CMULT_SHIFT 9 /* 512 times HWBLOCK_SIZE */ -#define WPGROUP_SIZE (1 << (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT)) +#define HWBLOCK_SHIFT 9 /* 512 bytes */ +#define SECTOR_SHIFT 5 /* 16 kilobytes */ +#define WPGROUP_SHIFT 7 /* 2 megs */ +#define CMULT_SHIFT 9 /* 512 times HWBLOCK_SIZE */ +#define WPGROUP_SIZE (1 << (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT)) static const uint8_t sd_csd_rw_mask[16] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -395,31 +395,31 @@ static void sd_set_csd(SDState *sd, uint64_t size) csize = (size >> (CMULT_SHIFT + hwblock_shift)) - 1; if (size <= SDSC_MAX_CAPACITY) { /* Standard Capacity SD */ - sd->csd[0] = 0x00; /* CSD structure */ - sd->csd[1] = 0x26; /* Data read access-time-1 */ - sd->csd[2] = 0x00; /* Data read access-time-2 */ + sd->csd[0] = 0x00; /* CSD structure */ + sd->csd[1] = 0x26; /* Data read access-time-1 */ + sd->csd[2] = 0x00; /* Data read access-time-2 */ sd->csd[3] = 0x32; /* Max. data transfer rate: 25 MHz */ - sd->csd[4] = 0x5f; /* Card Command Classes */ - sd->csd[5] = 0x50 | /* Max. read data block length */ + sd->csd[4] = 0x5f; /* Card Command Classes */ + sd->csd[5] = 0x50 | /* Max. read data block length */ hwblock_shift; - sd->csd[6] = 0xe0 | /* Partial block for read allowed */ + sd->csd[6] = 0xe0 | /* Partial block for read allowed */ ((csize >> 10) & 0x03); - sd->csd[7] = 0x00 | /* Device size */ + sd->csd[7] = 0x00 | /* Device size */ ((csize >> 2) & 0xff); - sd->csd[8] = 0x3f | /* Max. read current */ + sd->csd[8] = 0x3f | /* Max. read current */ ((csize << 6) & 0xc0); - sd->csd[9] = 0xfc | /* Max. write current */ + sd->csd[9] = 0xfc | /* Max. write current */ ((CMULT_SHIFT - 2) >> 1); - sd->csd[10] = 0x40 | /* Erase sector size */ + sd->csd[10] = 0x40 | /* Erase sector size */ (((CMULT_SHIFT - 2) << 7) & 0x80) | (sectsize >> 1); - sd->csd[11] = 0x00 | /* Write protect group size */ + sd->csd[11] = 0x00 | /* Write protect group size */ ((sectsize << 7) & 0x80) | wpsize; - sd->csd[12] = 0x90 | /* Write speed factor */ + sd->csd[12] = 0x90 | /* Write speed factor */ (hwblock_shift >> 2); - sd->csd[13] = 0x20 | /* Max. write data block length */ + sd->csd[13] = 0x20 | /* Max. write data block length */ ((hwblock_shift << 6) & 0xc0); - sd->csd[14] = 0x00; /* File format group */ - } else { /* SDHC */ + sd->csd[14] = 0x00; /* File format group */ + } else { /* SDHC */ size /= 512 * KiB; size -= 1; sd->csd[0] = 0x40; @@ -513,7 +513,7 @@ static int sd_req_crc_validate(SDRequest *req) buffer[0] = 0x40 | req->cmd; stl_be_p(&buffer[1], req->arg); return 0; - return sd_crc7(buffer, 5) != req->crc; /* TODO */ + return sd_crc7(buffer, 5) != req->crc; /* TODO */ } static void sd_response_r1_make(SDState *sd, uint8_t *response) @@ -851,19 +851,19 @@ static void sd_function_switch(SDState *sd, uint32_t arg) int i, mode, new_func; mode = !!(arg & 0x80000000); - sd->data[0] = 0x00; /* Maximum current consumption */ + sd->data[0] = 0x00; /* Maximum current consumption */ sd->data[1] = 0x01; - sd->data[2] = 0x80; /* Supported group 6 functions */ + sd->data[2] = 0x80; /* Supported group 6 functions */ sd->data[3] = 0x01; - sd->data[4] = 0x80; /* Supported group 5 functions */ + sd->data[4] = 0x80; /* Supported group 5 functions */ sd->data[5] = 0x01; - sd->data[6] = 0x80; /* Supported group 4 functions */ + sd->data[6] = 0x80; /* Supported group 4 functions */ sd->data[7] = 0x01; - sd->data[8] = 0x80; /* Supported group 3 functions */ + sd->data[8] = 0x80; /* Supported group 3 functions */ sd->data[9] = 0x01; - sd->data[10] = 0x80; /* Supported group 2 functions */ + sd->data[10] = 0x80; /* Supported group 2 functions */ sd->data[11] = 0x43; - sd->data[12] = 0x80; /* Supported group 1 functions */ + sd->data[12] = 0x80; /* Supported group 1 functions */ sd->data[13] = 0x03; memset(&sd->data[14], 0, 3); @@ -1001,7 +1001,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) switch (req.cmd) { /* Basic commands (Class 0 and Class 1) */ - case 0: /* CMD0: GO_IDLE_STATE */ + case 0: /* CMD0: GO_IDLE_STATE */ switch (sd->state) { case sd_inactive_state: return sd->spi ? sd_r1 : sd_r0; @@ -1013,14 +1013,14 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 1: /* CMD1: SEND_OP_CMD */ + case 1: /* CMD1: SEND_OP_CMD */ if (!sd->spi) goto bad_cmd; sd->state = sd_transfer_state; return sd_r1; - case 2: /* CMD2: ALL_SEND_CID */ + case 2: /* CMD2: ALL_SEND_CID */ if (sd->spi) goto bad_cmd; switch (sd->state) { @@ -1033,7 +1033,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 3: /* CMD3: SEND_RELATIVE_ADDR */ + case 3: /* CMD3: SEND_RELATIVE_ADDR */ if (sd->spi) goto bad_cmd; switch (sd->state) { @@ -1048,7 +1048,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 4: /* CMD4: SEND_DSR */ + case 4: /* CMD4: SEND_DSR */ if (sd->spi) goto bad_cmd; switch (sd->state) { @@ -1063,7 +1063,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) case 5: /* CMD5: reserved for SDIO cards */ return sd_illegal; - case 6: /* CMD6: SWITCH_FUNCTION */ + case 6: /* CMD6: SWITCH_FUNCTION */ switch (sd->mode) { case sd_data_transfer_mode: sd_function_switch(sd, req.arg); @@ -1077,7 +1077,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 7: /* CMD7: SELECT/DESELECT_CARD */ + case 7: /* CMD7: SELECT/DESELECT_CARD */ if (sd->spi) goto bad_cmd; switch (sd->state) { @@ -1115,7 +1115,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 8: /* CMD8: SEND_IF_COND */ + case 8: /* CMD8: SEND_IF_COND */ if (sd->spec_version < SD_PHY_SPECv2_00_VERS) { break; } @@ -1133,7 +1133,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) sd->vhs = req.arg; return sd_r7; - case 9: /* CMD9: SEND_CSD */ + case 9: /* CMD9: SEND_CSD */ switch (sd->state) { case sd_standby_state: if (sd->rca != rca) @@ -1155,7 +1155,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 10: /* CMD10: SEND_CID */ + case 10: /* CMD10: SEND_CID */ switch (sd->state) { case sd_standby_state: if (sd->rca != rca) @@ -1177,7 +1177,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 12: /* CMD12: STOP_TRANSMISSION */ + case 12: /* CMD12: STOP_TRANSMISSION */ switch (sd->state) { case sd_sendingdata_state: sd->state = sd_transfer_state; @@ -1194,7 +1194,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 13: /* CMD13: SEND_STATUS */ + case 13: /* CMD13: SEND_STATUS */ switch (sd->mode) { case sd_data_transfer_mode: if (!sd->spi && sd->rca != rca) { @@ -1208,7 +1208,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 15: /* CMD15: GO_INACTIVE_STATE */ + case 15: /* CMD15: GO_INACTIVE_STATE */ if (sd->spi) goto bad_cmd; switch (sd->mode) { @@ -1225,7 +1225,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) break; /* Block read commands (Classs 2) */ - case 16: /* CMD16: SET_BLOCKLEN */ + case 16: /* CMD16: SET_BLOCKLEN */ switch (sd->state) { case sd_transfer_state: if (req.arg > (1 << HWBLOCK_SHIFT)) { @@ -1242,8 +1242,8 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 17: /* CMD17: READ_SINGLE_BLOCK */ - case 18: /* CMD18: READ_MULTIPLE_BLOCK */ + case 17: /* CMD17: READ_SINGLE_BLOCK */ + case 18: /* CMD18: READ_MULTIPLE_BLOCK */ switch (sd->state) { case sd_transfer_state: @@ -1287,8 +1287,8 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) break; /* Block write commands (Class 4) */ - case 24: /* CMD24: WRITE_SINGLE_BLOCK */ - case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */ + case 24: /* CMD24: WRITE_SINGLE_BLOCK */ + case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */ switch (sd->state) { case sd_transfer_state: @@ -1316,7 +1316,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 26: /* CMD26: PROGRAM_CID */ + case 26: /* CMD26: PROGRAM_CID */ if (sd->spi) goto bad_cmd; switch (sd->state) { @@ -1331,7 +1331,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 27: /* CMD27: PROGRAM_CSD */ + case 27: /* CMD27: PROGRAM_CSD */ switch (sd->state) { case sd_transfer_state: sd->state = sd_receivingdata_state; @@ -1345,7 +1345,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) break; /* Write protection (Class 6) */ - case 28: /* CMD28: SET_WRITE_PROT */ + case 28: /* CMD28: SET_WRITE_PROT */ if (sd->size > SDSC_MAX_CAPACITY) { return sd_illegal; } @@ -1367,7 +1367,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 29: /* CMD29: CLR_WRITE_PROT */ + case 29: /* CMD29: CLR_WRITE_PROT */ if (sd->size > SDSC_MAX_CAPACITY) { return sd_illegal; } @@ -1389,7 +1389,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 30: /* CMD30: SEND_WRITE_PROT */ + case 30: /* CMD30: SEND_WRITE_PROT */ if (sd->size > SDSC_MAX_CAPACITY) { return sd_illegal; } @@ -1413,7 +1413,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) break; /* Erase commands (Class 5) */ - case 32: /* CMD32: ERASE_WR_BLK_START */ + case 32: /* CMD32: ERASE_WR_BLK_START */ switch (sd->state) { case sd_transfer_state: sd->erase_start = req.arg; @@ -1424,7 +1424,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 33: /* CMD33: ERASE_WR_BLK_END */ + case 33: /* CMD33: ERASE_WR_BLK_END */ switch (sd->state) { case sd_transfer_state: sd->erase_end = req.arg; @@ -1435,7 +1435,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) } break; - case 38: /* CMD38: ERASE */ + case 38: /* CMD38: ERASE */ switch (sd->state) { case sd_transfer_state: if (sd->csd[14] & 0x30) { @@ -1455,7 +1455,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) break; /* Lock card commands (Class 7) */ - case 42: /* CMD42: LOCK_UNLOCK */ + case 42: /* CMD42: LOCK_UNLOCK */ switch (sd->state) { case sd_transfer_state: sd->state = sd_receivingdata_state; @@ -1478,7 +1478,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) return sd_illegal; /* Application specific commands (Class 8) */ - case 55: /* CMD55: APP_CMD */ + case 55: /* CMD55: APP_CMD */ switch (sd->state) { case sd_ready_state: case sd_identification_state: @@ -1501,7 +1501,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) sd->card_status |= APP_CMD; return sd_r1; - case 56: /* CMD56: GEN_CMD */ + case 56: /* CMD56: GEN_CMD */ switch (sd->state) { case sd_transfer_state: sd->data_offset = 0; @@ -1546,7 +1546,7 @@ static sd_rsp_type_t sd_app_command(SDState *sd, req.cmd, req.arg, sd_state_name(sd->state)); sd->card_status |= APP_CMD; switch (req.cmd) { - case 6: /* ACMD6: SET_BUS_WIDTH */ + case 6: /* ACMD6: SET_BUS_WIDTH */ if (sd->spi) { goto unimplemented_spi_cmd; } @@ -1561,7 +1561,7 @@ static sd_rsp_type_t sd_app_command(SDState *sd, } break; - case 13: /* ACMD13: SD_STATUS */ + case 13: /* ACMD13: SD_STATUS */ switch (sd->state) { case sd_transfer_state: sd->state = sd_sendingdata_state; @@ -1574,7 +1574,7 @@ static sd_rsp_type_t sd_app_command(SDState *sd, } break; - case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */ + case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */ switch (sd->state) { case sd_transfer_state: *(uint32_t *) sd->data = sd->blk_written; @@ -1589,7 +1589,7 @@ static sd_rsp_type_t sd_app_command(SDState *sd, } break; - case 23: /* ACMD23: SET_WR_BLK_ERASE_COUNT */ + case 23: /* ACMD23: SET_WR_BLK_ERASE_COUNT */ switch (sd->state) { case sd_transfer_state: return sd_r1; @@ -1599,7 +1599,7 @@ static sd_rsp_type_t sd_app_command(SDState *sd, } break; - case 41: /* ACMD41: SD_APP_OP_COND */ + case 41: /* ACMD41: SD_APP_OP_COND */ if (sd->spi) { /* SEND_OP_CMD */ sd->state = sd_transfer_state; @@ -1641,7 +1641,7 @@ static sd_rsp_type_t sd_app_command(SDState *sd, return sd_r3; - case 42: /* ACMD42: SET_CLR_CARD_DETECT */ + case 42: /* ACMD42: SET_CLR_CARD_DETECT */ switch (sd->state) { case sd_transfer_state: /* Bringing in the 50KOhm pull-up resistor... Done. */ @@ -1652,7 +1652,7 @@ static sd_rsp_type_t sd_app_command(SDState *sd, } break; - case 51: /* ACMD51: SEND_SCR */ + case 51: /* ACMD51: SEND_SCR */ switch (sd->state) { case sd_transfer_state: sd->state = sd_sendingdata_state; @@ -1840,7 +1840,7 @@ void sd_write_byte(SDState *sd, uint8_t value) sd_acmd_name(sd->current_cmd), sd->current_cmd, value); switch (sd->current_cmd) { - case 24: /* CMD24: WRITE_SINGLE_BLOCK */ + case 24: /* CMD24: WRITE_SINGLE_BLOCK */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { /* TODO: Check CRC before committing */ @@ -1853,7 +1853,7 @@ void sd_write_byte(SDState *sd, uint8_t value) } break; - case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */ + case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */ if (sd->data_offset == 0) { /* Start of the block - let's check the address is valid */ if (!address_in_range(sd, "WRITE_MULTIPLE_BLOCK", @@ -1890,7 +1890,7 @@ void sd_write_byte(SDState *sd, uint8_t value) } break; - case 26: /* CMD26: PROGRAM_CID */ + case 26: /* CMD26: PROGRAM_CID */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sizeof(sd->cid)) { /* TODO: Check CRC before committing */ @@ -1909,7 +1909,7 @@ void sd_write_byte(SDState *sd, uint8_t value) } break; - case 27: /* CMD27: PROGRAM_CSD */ + case 27: /* CMD27: PROGRAM_CSD */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sizeof(sd->csd)) { /* TODO: Check CRC before committing */ @@ -1933,7 +1933,7 @@ void sd_write_byte(SDState *sd, uint8_t value) } break; - case 42: /* CMD42: LOCK_UNLOCK */ + case 42: /* CMD42: LOCK_UNLOCK */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { /* TODO: Check CRC before committing */ @@ -1944,7 +1944,7 @@ void sd_write_byte(SDState *sd, uint8_t value) } break; - case 56: /* CMD56: GEN_CMD */ + case 56: /* CMD56: GEN_CMD */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { APP_WRITE_BLOCK(sd->data_start, sd->data_offset); @@ -1996,29 +1996,29 @@ uint8_t sd_read_byte(SDState *sd) sd_acmd_name(sd->current_cmd), sd->current_cmd, io_len); switch (sd->current_cmd) { - case 6: /* CMD6: SWITCH_FUNCTION */ + case 6: /* CMD6: SWITCH_FUNCTION */ ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= 64) sd->state = sd_transfer_state; break; - case 9: /* CMD9: SEND_CSD */ - case 10: /* CMD10: SEND_CID */ + case 9: /* CMD9: SEND_CSD */ + case 10: /* CMD10: SEND_CID */ ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= 16) sd->state = sd_transfer_state; break; - case 13: /* ACMD13: SD_STATUS */ + case 13: /* ACMD13: SD_STATUS */ ret = sd->sd_status[sd->data_offset ++]; if (sd->data_offset >= sizeof(sd->sd_status)) sd->state = sd_transfer_state; break; - case 17: /* CMD17: READ_SINGLE_BLOCK */ + case 17: /* CMD17: READ_SINGLE_BLOCK */ if (sd->data_offset == 0) BLK_READ_BLOCK(sd->data_start, io_len); ret = sd->data[sd->data_offset ++]; @@ -2027,7 +2027,7 @@ uint8_t sd_read_byte(SDState *sd) sd->state = sd_transfer_state; break; - case 18: /* CMD18: READ_MULTIPLE_BLOCK */ + case 18: /* CMD18: READ_MULTIPLE_BLOCK */ if (sd->data_offset == 0) { if (!address_in_range(sd, "READ_MULTIPLE_BLOCK", sd->data_start, io_len)) { @@ -2058,28 +2058,28 @@ uint8_t sd_read_byte(SDState *sd) ret = sd_tuning_block_pattern[sd->data_offset++]; break; - case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */ + case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */ ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= 4) sd->state = sd_transfer_state; break; - case 30: /* CMD30: SEND_WRITE_PROT */ + case 30: /* CMD30: SEND_WRITE_PROT */ ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= 4) sd->state = sd_transfer_state; break; - case 51: /* ACMD51: SEND_SCR */ + case 51: /* ACMD51: SEND_SCR */ ret = sd->scr[sd->data_offset ++]; if (sd->data_offset >= sizeof(sd->scr)) sd->state = sd_transfer_state; break; - case 56: /* CMD56: GEN_CMD */ + case 56: /* CMD56: GEN_CMD */ if (sd->data_offset == 0) APP_READ_BLOCK(sd->data_start, sd->blk_len); ret = sd->data[sd->data_offset ++]; diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c index 39fc4f19d9..4944994e9c 100644 --- a/hw/sh4/r2d.c +++ b/hw/sh4/r2d.c @@ -38,7 +38,7 @@ #include "hw/qdev-properties.h" #include "net/net.h" #include "sh7750_regs.h" -#include "hw/ide.h" +#include "hw/ide/mmio.h" #include "hw/irq.h" #include "hw/loader.h" #include "hw/usb.h" @@ -232,6 +232,7 @@ static void r2d_init(MachineState *machine) const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; + MachineClass *mc = MACHINE_GET_CLASS(machine); SuperHCPU *cpu; CPUSH4State *env; ResetData *reset_info; @@ -274,7 +275,7 @@ static void r2d_init(MachineState *machine) dev = qdev_new("sysbus-sm501"); busdev = SYS_BUS_DEVICE(dev); qdev_prop_set_uint32(dev, "vram-size", SM501_VRAM_SIZE); - qdev_prop_set_uint32(dev, "base", 0x10000000); + qdev_prop_set_uint64(dev, "dma-offset", 0x10000000); qdev_prop_set_chr(dev, "chardev", serial_hd(2)); sysbus_realize_and_unref(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, 0x10000000); @@ -310,7 +311,7 @@ static void r2d_init(MachineState *machine) /* NIC: rtl8139 on-board, and 2 slots. */ for (i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], pci_bus, - "rtl8139", i == 0 ? "2" : NULL); + mc->default_nic, i == 0 ? "2" : NULL); /* USB keyboard */ usb_create_simple(usb_bus_find(-1), "usb-kbd"); @@ -375,6 +376,7 @@ static void r2d_machine_init(MachineClass *mc) mc->init = r2d_init; mc->block_default_type = IF_IDE; mc->default_cpu_type = TYPE_SH7751R_CPU; + mc->default_nic = "rtl8139"; } DEFINE_MACHINE("r2d", r2d_machine_init) diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index 4869566cf5..d2007e70fb 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -750,14 +750,16 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance) t->core_count = (ms->smp.cores > 255) ? 0xFF : ms->smp.cores; t->core_enabled = t->core_count; - t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores); - t->thread_count = (ms->smp.threads > 255) ? 0xFF : ms->smp.threads; - t->thread_count2 = cpu_to_le16(ms->smp.threads); t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */ t->processor_family2 = cpu_to_le16(0x01); /* Other */ + if (tbl_len == SMBIOS_TYPE_4_LEN_V30) { + t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores); + t->thread_count2 = cpu_to_le16(ms->smp.threads); + } + SMBIOS_BUILD_TABLE_POST; smbios_type4_count++; } diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c index 387181ff77..e2858a0331 100644 --- a/hw/sparc64/sun4u.c +++ b/hw/sparc64/sun4u.c @@ -28,9 +28,9 @@ #include "qapi/error.h" #include "qemu/datadir.h" #include "cpu.h" +#include "hw/irq.h" #include "hw/pci/pci.h" #include "hw/pci/pci_bridge.h" -#include "hw/pci/pci_bus.h" #include "hw/pci/pci_host.h" #include "hw/qdev-properties.h" #include "hw/pci-host/sabre.h" @@ -84,7 +84,8 @@ struct EbusState { PCIDevice parent_obj; ISABus *isa_bus; - qemu_irq isa_bus_irqs[ISA_NUM_IRQS]; + qemu_irq *isa_irqs_in; + qemu_irq isa_irqs_out[ISA_NUM_IRQS]; uint64_t console_serial_base; MemoryRegion bar0; MemoryRegion bar1; @@ -287,7 +288,7 @@ static const TypeInfo power_info = { static void ebus_isa_irq_handler(void *opaque, int n, int level) { EbusState *s = EBUS(opaque); - qemu_irq irq = s->isa_bus_irqs[n]; + qemu_irq irq = s->isa_irqs_out[n]; /* Pass ISA bus IRQs onto their gpio equivalent */ trace_ebus_isa_irq_handler(n, level); @@ -303,7 +304,6 @@ static void ebus_realize(PCIDevice *pci_dev, Error **errp) ISADevice *isa_dev; SysBusDevice *sbd; DeviceState *dev; - qemu_irq *isa_irq; DriveInfo *fd[MAX_FD]; int i; @@ -315,9 +315,9 @@ static void ebus_realize(PCIDevice *pci_dev, Error **errp) } /* ISA bus */ - isa_irq = qemu_allocate_irqs(ebus_isa_irq_handler, s, ISA_NUM_IRQS); - isa_bus_irqs(s->isa_bus, isa_irq); - qdev_init_gpio_out_named(DEVICE(s), s->isa_bus_irqs, "isa-irq", + s->isa_irqs_in = qemu_allocate_irqs(ebus_isa_irq_handler, s, ISA_NUM_IRQS); + isa_bus_register_input_irqs(s->isa_bus, s->isa_irqs_in); + qdev_init_gpio_out_named(DEVICE(s), s->isa_irqs_out, "isa-irq", ISA_NUM_IRQS); /* Serial ports */ @@ -553,6 +553,7 @@ static void sun4uv_init(MemoryRegion *address_space_mem, MachineState *machine, const struct hwdef *hwdef) { + MachineClass *mc = MACHINE_GET_CLASS(machine); SPARCCPU *cpu; Nvram *nvram; unsigned int i; @@ -607,9 +608,9 @@ static void sun4uv_init(MemoryRegion *address_space_mem, /* Only in-built Simba APBs can exist on the root bus, slot 0 on busA is reserved (leaving no slots free after on-board devices) however slots 0-3 are free on busB */ - pci_bus->slot_reserved_mask = 0xfffffffc; - pci_busA->slot_reserved_mask = 0xfffffff1; - pci_busB->slot_reserved_mask = 0xfffffff0; + pci_bus_set_slot_reserved_mask(pci_bus, 0xfffffffc); + pci_bus_set_slot_reserved_mask(pci_busA, 0xfffffff1); + pci_bus_set_slot_reserved_mask(pci_busB, 0xfffffff0); ebus = pci_new_multifunction(PCI_DEVFN(1, 0), true, TYPE_EBUS); qdev_prop_set_uint64(DEVICE(ebus), "console-serial-base", @@ -645,15 +646,15 @@ static void sun4uv_init(MemoryRegion *address_space_mem, PCIBus *bus; nd = &nd_table[i]; - if (!nd->model || strcmp(nd->model, "sunhme") == 0) { + if (!nd->model || strcmp(nd->model, mc->default_nic) == 0) { if (!onboard_nic) { pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), - true, "sunhme"); + true, mc->default_nic); bus = pci_busA; memcpy(&macaddr, &nd->macaddr.a, sizeof(MACAddr)); onboard_nic = true; } else { - pci_dev = pci_new(-1, "sunhme"); + pci_dev = pci_new(-1, mc->default_nic); bus = pci_busB; } } else { @@ -816,6 +817,8 @@ static void sun4u_class_init(ObjectClass *oc, void *data) mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-UltraSparc-IIi"); mc->ignore_boot_device_suffixes = true; mc->default_display = "std"; + mc->default_nic = "sunhme"; + mc->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); fwc->get_dev_path = sun4u_fw_dev_path; } @@ -840,6 +843,8 @@ static void sun4v_class_init(ObjectClass *oc, void *data) mc->default_boot_order = "c"; mc->default_cpu_type = SPARC_CPU_TYPE_NAME("Sun-UltraSparc-T1"); mc->default_display = "std"; + mc->default_nic = "sunhme"; + mc->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); } static const TypeInfo sun4v_type = { diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index 22df4be528..7281169322 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -1134,10 +1134,7 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp) /* Setup cs_lines for peripherals */ s->cs_lines = g_new0(qemu_irq, asc->cs_num_max); - - for (i = 0; i < asc->cs_num_max; ++i) { - sysbus_init_irq(sbd, &s->cs_lines[i]); - } + qdev_init_gpio_out_named(DEVICE(s), s->cs_lines, "cs", asc->cs_num_max); /* The memory region for the controller registers */ memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s, diff --git a/hw/ssi/xilinx_spi.c b/hw/ssi/xilinx_spi.c index 552927622f..d4de2e7aab 100644 --- a/hw/ssi/xilinx_spi.c +++ b/hw/ssi/xilinx_spi.c @@ -156,6 +156,7 @@ static void xlx_spi_do_reset(XilinxSPI *s) txfifo_reset(s); s->regs[R_SPISSR] = ~0; + s->regs[R_SPICR] = R_SPICR_MTI; xlx_spi_update_irq(s); xlx_spi_update_cs(s); } diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c index c17b247da3..446bbd2b96 100644 --- a/hw/timer/exynos4210_mct.c +++ b/hw/timer/exynos4210_mct.c @@ -480,11 +480,14 @@ static int32_t exynos4210_gcomp_find(Exynos4210MCTState *s) res = min_comp_i; } - DPRINTF("found comparator %d: comp 0x%llx distance 0x%llx, gfrc 0x%llx\n", - res, - s->g_timer.reg.comp[res], - distance_min, - gfrc); + if (res >= 0) { + DPRINTF("found comparator %d: " + "comp 0x%llx distance 0x%llx, gfrc 0x%llx\n", + res, + s->g_timer.reg.comp[res], + distance_min, + gfrc); + } return res; } diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c index 9520471be2..6998094233 100644 --- a/hw/timer/hpet.c +++ b/hw/timer/hpet.c @@ -30,6 +30,7 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/timer.h" +#include "hw/qdev-properties.h" #include "hw/timer/hpet.h" #include "hw/sysbus.h" #include "hw/rtc/mc146818rtc.h" @@ -352,6 +353,16 @@ static const VMStateDescription vmstate_hpet = { } }; +static void hpet_arm(HPETTimer *t, uint64_t ticks) +{ + if (ticks < ns_to_ticks(INT64_MAX / 2)) { + timer_mod(t->qemu_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ticks_to_ns(ticks)); + } else { + timer_del(t->qemu_timer); + } +} + /* * timer expiration callback */ @@ -374,13 +385,11 @@ static void hpet_timer(void *opaque) } } diff = hpet_calculate_diff(t, cur_tick); - timer_mod(t->qemu_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); } else if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) { if (t->wrap_flag) { diff = hpet_calculate_diff(t, cur_tick); - timer_mod(t->qemu_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); t->wrap_flag = 0; } } @@ -407,8 +416,7 @@ static void hpet_set_timer(HPETTimer *t) t->wrap_flag = 1; } } - timer_mod(t->qemu_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); } static void hpet_del_timer(HPETTimer *t) diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c index 3a869782bc..640e4399c2 100644 --- a/hw/timer/imx_epit.c +++ b/hw/timer/imx_epit.c @@ -179,7 +179,7 @@ static void imx_epit_update_compare_timer(IMXEPITState *s) * the compare value. Otherwise it may fire at most once in the * current round. */ - bool is_oneshot = (limit >= s->cmp); + is_oneshot = (limit < s->cmp); if (counter >= s->cmp) { /* The compare timer fires in the current round. */ counter -= s->cmp; diff --git a/hw/tpm/Kconfig b/hw/tpm/Kconfig index 29e82f3c92..a46663288c 100644 --- a/hw/tpm/Kconfig +++ b/hw/tpm/Kconfig @@ -1,3 +1,10 @@ +config TPM_TIS_I2C + bool + depends on TPM + select TPM_BACKEND + select I2C + select TPM_TIS + config TPM_TIS_ISA bool depends on TPM && ISA_BUS diff --git a/hw/tpm/meson.build b/hw/tpm/meson.build index 7abc2d794a..76fe3cb098 100644 --- a/hw/tpm/meson.build +++ b/hw/tpm/meson.build @@ -1,6 +1,7 @@ softmmu_ss.add(when: 'CONFIG_TPM_TIS', if_true: files('tpm_tis_common.c')) softmmu_ss.add(when: 'CONFIG_TPM_TIS_ISA', if_true: files('tpm_tis_isa.c')) softmmu_ss.add(when: 'CONFIG_TPM_TIS_SYSBUS', if_true: files('tpm_tis_sysbus.c')) +softmmu_ss.add(when: 'CONFIG_TPM_TIS_I2C', if_true: files('tpm_tis_i2c.c')) softmmu_ss.add(when: 'CONFIG_TPM_CRB', if_true: files('tpm_crb.c')) softmmu_ss.add(when: 'CONFIG_TPM_TIS', if_true: files('tpm_ppi.c')) softmmu_ss.add(when: 'CONFIG_TPM_CRB', if_true: files('tpm_ppi.c')) diff --git a/hw/tpm/tpm_tis.h b/hw/tpm/tpm_tis.h index f6b5872ba6..6f29a508dd 100644 --- a/hw/tpm/tpm_tis.h +++ b/hw/tpm/tpm_tis.h @@ -86,5 +86,8 @@ int tpm_tis_pre_save(TPMState *s); void tpm_tis_reset(TPMState *s); enum TPMVersion tpm_tis_get_tpm_version(TPMState *s); void tpm_tis_request_completed(TPMState *s, int ret); +uint32_t tpm_tis_read_data(TPMState *s, hwaddr addr, unsigned size); +void tpm_tis_write_data(TPMState *s, hwaddr addr, uint64_t val, uint32_t size); +uint16_t tpm_tis_get_checksum(TPMState *s); #endif /* TPM_TPM_TIS_H */ diff --git a/hw/tpm/tpm_tis_common.c b/hw/tpm/tpm_tis_common.c index 503be2a541..c07c179dbc 100644 --- a/hw/tpm/tpm_tis_common.c +++ b/hw/tpm/tpm_tis_common.c @@ -26,6 +26,8 @@ #include "hw/irq.h" #include "hw/isa/isa.h" #include "qapi/error.h" +#include "qemu/bswap.h" +#include "qemu/crc-ccitt.h" #include "qemu/module.h" #include "hw/acpi/tpm.h" @@ -447,6 +449,23 @@ static uint64_t tpm_tis_mmio_read(void *opaque, hwaddr addr, return val; } +/* + * A wrapper read function so that it can be directly called without + * mmio. + */ +uint32_t tpm_tis_read_data(TPMState *s, hwaddr addr, unsigned size) +{ + return tpm_tis_mmio_read(s, addr, size); +} + +/* + * Calculate current data buffer checksum + */ +uint16_t tpm_tis_get_checksum(TPMState *s) +{ + return bswap16(crc_ccitt(0, s->buffer, s->rw_offset)); +} + /* * Write a value to a register of the TIS interface * See specs pages 33-63 for description of the registers @@ -588,10 +607,6 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr, break; case TPM_TIS_REG_INT_ENABLE: - if (s->active_locty != locty) { - break; - } - s->loc[locty].inte &= mask; s->loc[locty].inte |= (val & (TPM_TIS_INT_ENABLED | TPM_TIS_INT_POLARITY_MASK | @@ -601,10 +616,6 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr, /* hard wired -- ignore */ break; case TPM_TIS_REG_INT_STATUS: - if (s->active_locty != locty) { - break; - } - /* clearing of interrupt flags */ if (((val & TPM_TIS_INTERRUPTS_SUPPORTED)) && (s->loc[locty].ints & TPM_TIS_INTERRUPTS_SUPPORTED)) { @@ -767,6 +778,15 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr, } } +/* + * A wrapper write function so that it can be directly called without + * mmio. + */ +void tpm_tis_write_data(TPMState *s, hwaddr addr, uint64_t val, uint32_t size) +{ + tpm_tis_mmio_write(s, addr, val, size); +} + const MemoryRegionOps tpm_tis_memory_ops = { .read = tpm_tis_mmio_read, .write = tpm_tis_mmio_write, diff --git a/hw/tpm/tpm_tis_i2c.c b/hw/tpm/tpm_tis_i2c.c new file mode 100644 index 0000000000..b695fd3a46 --- /dev/null +++ b/hw/tpm/tpm_tis_i2c.c @@ -0,0 +1,571 @@ +/* + * tpm_tis_i2c.c - QEMU's TPM TIS I2C Device + * + * Copyright (c) 2023 IBM Corporation + * + * Authors: + * Ninad Palsule + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * TPM I2C implementation follows TCG TPM I2c Interface specification, + * Family 2.0, Level 00, Revision 1.00 + * + * TPM TIS for TPM 2 implementation following TCG PC Client Platform + * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43 + * + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "hw/sysbus.h" +#include "hw/acpi/tpm.h" +#include "migration/vmstate.h" +#include "tpm_prop.h" +#include "qemu/log.h" +#include "trace.h" +#include "tpm_tis.h" + +/* Operations */ +#define OP_SEND 1 +#define OP_RECV 2 + +/* Is locality valid */ +#define TPM_TIS_I2C_IS_VALID_LOCTY(x) TPM_TIS_IS_VALID_LOCTY(x) + +typedef struct TPMStateI2C { + /*< private >*/ + I2CSlave parent_obj; + + uint8_t offset; /* offset into data[] */ + uint8_t operation; /* OP_SEND & OP_RECV */ + uint8_t data[5]; /* Data */ + + /* i2c registers */ + uint8_t loc_sel; /* Current locality */ + uint8_t csum_enable; /* Is checksum enabled */ + + /* Derived from the above */ + const char *reg_name; /* Register name */ + uint32_t tis_addr; /* Converted tis address including locty */ + + /*< public >*/ + TPMState state; /* not a QOM object */ + +} TPMStateI2C; + +DECLARE_INSTANCE_CHECKER(TPMStateI2C, TPM_TIS_I2C, + TYPE_TPM_TIS_I2C) + +/* Prototype */ +static inline void tpm_tis_i2c_to_tis_reg(TPMStateI2C *i2cst, uint8_t i2c_reg); + +/* Register map */ +typedef struct regMap { + uint8_t i2c_reg; /* I2C register */ + uint16_t tis_reg; /* TIS register */ + const char *reg_name; /* Register name */ +} I2CRegMap; + +/* + * The register values in the common code is different than the latest + * register numbers as per the spec hence add the conversion map + */ +static const I2CRegMap tpm_tis_reg_map[] = { + /* + * These registers are sent to TIS layer. The register with UNKNOWN + * mapping are not sent to TIS layer and handled in I2c layer. + * NOTE: Adding frequently used registers at the start + */ + { TPM_I2C_REG_DATA_FIFO, TPM_TIS_REG_DATA_FIFO, "FIFO", }, + { TPM_I2C_REG_STS, TPM_TIS_REG_STS, "STS", }, + { TPM_I2C_REG_DATA_CSUM_GET, TPM_I2C_REG_UNKNOWN, "CSUM_GET", }, + { TPM_I2C_REG_LOC_SEL, TPM_I2C_REG_UNKNOWN, "LOC_SEL", }, + { TPM_I2C_REG_ACCESS, TPM_TIS_REG_ACCESS, "ACCESS", }, + { TPM_I2C_REG_INT_ENABLE, TPM_TIS_REG_INT_ENABLE, "INTR_ENABLE",}, + { TPM_I2C_REG_INT_CAPABILITY, TPM_I2C_REG_UNKNOWN, "INTR_CAP", }, + { TPM_I2C_REG_INTF_CAPABILITY, TPM_TIS_REG_INTF_CAPABILITY, "INTF_CAP", }, + { TPM_I2C_REG_DID_VID, TPM_TIS_REG_DID_VID, "DID_VID", }, + { TPM_I2C_REG_RID, TPM_TIS_REG_RID, "RID", }, + { TPM_I2C_REG_I2C_DEV_ADDRESS, TPM_I2C_REG_UNKNOWN, "DEV_ADDRESS",}, + { TPM_I2C_REG_DATA_CSUM_ENABLE, TPM_I2C_REG_UNKNOWN, "CSUM_ENABLE",}, +}; + +static int tpm_tis_i2c_pre_save(void *opaque) +{ + TPMStateI2C *i2cst = opaque; + + return tpm_tis_pre_save(&i2cst->state); +} + +static int tpm_tis_i2c_post_load(void *opaque, int version_id) +{ + TPMStateI2C *i2cst = opaque; + + if (i2cst->offset >= 1) { + tpm_tis_i2c_to_tis_reg(i2cst, i2cst->data[0]); + } + + return 0; +} + +static const VMStateDescription vmstate_tpm_tis_i2c = { + .name = "tpm-tis-i2c", + .version_id = 0, + .pre_save = tpm_tis_i2c_pre_save, + .post_load = tpm_tis_i2c_post_load, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(state.buffer, TPMStateI2C), + VMSTATE_UINT16(state.rw_offset, TPMStateI2C), + VMSTATE_UINT8(state.active_locty, TPMStateI2C), + VMSTATE_UINT8(state.aborting_locty, TPMStateI2C), + VMSTATE_UINT8(state.next_locty, TPMStateI2C), + + VMSTATE_STRUCT_ARRAY(state.loc, TPMStateI2C, TPM_TIS_NUM_LOCALITIES, 0, + vmstate_locty, TPMLocality), + + /* i2c specifics */ + VMSTATE_UINT8(offset, TPMStateI2C), + VMSTATE_UINT8(operation, TPMStateI2C), + VMSTATE_BUFFER(data, TPMStateI2C), + VMSTATE_UINT8(loc_sel, TPMStateI2C), + VMSTATE_UINT8(csum_enable, TPMStateI2C), + + VMSTATE_END_OF_LIST() + } +}; + +/* + * Set data value. The i2cst->offset is not updated as called in + * the read path. + */ +static void tpm_tis_i2c_set_data(TPMStateI2C *i2cst, uint32_t data) +{ + i2cst->data[1] = data; + i2cst->data[2] = data >> 8; + i2cst->data[3] = data >> 16; + i2cst->data[4] = data >> 24; +} +/* + * Generate interface capability based on what is returned by TIS and what is + * expected by I2C. Save the capability in the data array overwriting the TIS + * capability. + */ +static uint32_t tpm_tis_i2c_interface_capability(TPMStateI2C *i2cst, + uint32_t tis_cap) +{ + uint32_t i2c_cap; + + /* Now generate i2c capability */ + i2c_cap = (TPM_I2C_CAP_INTERFACE_TYPE | + TPM_I2C_CAP_INTERFACE_VER | + TPM_I2C_CAP_TPM2_FAMILY | + TPM_I2C_CAP_LOCALITY_CAP | + TPM_I2C_CAP_BUS_SPEED | + TPM_I2C_CAP_DEV_ADDR_CHANGE); + + /* Now check the TIS and set some capabilities */ + + /* Static burst count set */ + if (tis_cap & TPM_TIS_CAP_BURST_COUNT_STATIC) { + i2c_cap |= TPM_I2C_CAP_BURST_COUNT_STATIC; + } + + return i2c_cap; +} + +/* Convert I2C register to TIS address and returns the name of the register */ +static inline void tpm_tis_i2c_to_tis_reg(TPMStateI2C *i2cst, uint8_t i2c_reg) +{ + const I2CRegMap *reg_map; + int i; + + i2cst->tis_addr = 0xffffffff; + + /* Special case for the STS register. */ + if (i2c_reg >= TPM_I2C_REG_STS && i2c_reg <= TPM_I2C_REG_STS + 3) { + i2c_reg = TPM_I2C_REG_STS; + } + + for (i = 0; i < ARRAY_SIZE(tpm_tis_reg_map); i++) { + reg_map = &tpm_tis_reg_map[i]; + if (reg_map->i2c_reg == i2c_reg) { + i2cst->reg_name = reg_map->reg_name; + i2cst->tis_addr = reg_map->tis_reg; + + /* Include the locality in the address. */ + assert(TPM_TIS_I2C_IS_VALID_LOCTY(i2cst->loc_sel)); + i2cst->tis_addr += (i2cst->loc_sel << TPM_TIS_LOCALITY_SHIFT); + break; + } + } +} + +/* Clear some fields from the structure. */ +static inline void tpm_tis_i2c_clear_data(TPMStateI2C *i2cst) +{ + /* Clear operation and offset */ + i2cst->operation = 0; + i2cst->offset = 0; + i2cst->tis_addr = 0xffffffff; + i2cst->reg_name = NULL; + memset(i2cst->data, 0, sizeof(i2cst->data)); + + return; +} + +/* Send data to TPM */ +static inline void tpm_tis_i2c_tpm_send(TPMStateI2C *i2cst) +{ + uint32_t data; + size_t offset = 0; + uint32_t sz = 4; + + if ((i2cst->operation == OP_SEND) && (i2cst->offset > 1)) { + + switch (i2cst->data[0]) { + case TPM_I2C_REG_DATA_CSUM_ENABLE: + /* + * Checksum is not handled by TIS code hence we will consume the + * register here. + */ + i2cst->csum_enable = i2cst->data[1] & TPM_DATA_CSUM_ENABLED; + break; + case TPM_I2C_REG_DATA_FIFO: + /* Handled in the main i2c_send function */ + break; + case TPM_I2C_REG_LOC_SEL: + /* + * This register is not handled by TIS so save the locality + * locally + */ + if (TPM_TIS_I2C_IS_VALID_LOCTY(i2cst->data[1])) { + i2cst->loc_sel = i2cst->data[1]; + } + break; + default: + /* We handle non-FIFO here */ + + /* Index 0 is a register. Convert byte stream to uint32_t */ + data = i2cst->data[1]; + data |= i2cst->data[2] << 8; + data |= i2cst->data[3] << 16; + data |= i2cst->data[4] << 24; + + /* Add register specific masking */ + switch (i2cst->data[0]) { + case TPM_I2C_REG_INT_ENABLE: + data &= TPM_I2C_INT_ENABLE_MASK; + break; + case TPM_I2C_REG_STS ... TPM_I2C_REG_STS + 3: + /* + * STS register has 4 bytes data. + * As per the specs following writes must be allowed. + * - From base address 1 to 4 bytes are allowed. + * - Single byte write to first or last byte must + * be allowed. + */ + offset = i2cst->data[0] - TPM_I2C_REG_STS; + if (offset > 0) { + sz = 1; + } + data &= (TPM_I2C_STS_WRITE_MASK >> (offset * 8)); + break; + } + + tpm_tis_write_data(&i2cst->state, i2cst->tis_addr + offset, data, + sz); + break; + } + + tpm_tis_i2c_clear_data(i2cst); + } + + return; +} + +/* Callback from TPM to indicate that response is copied */ +static void tpm_tis_i2c_request_completed(TPMIf *ti, int ret) +{ + TPMStateI2C *i2cst = TPM_TIS_I2C(ti); + TPMState *s = &i2cst->state; + + /* Inform the common code. */ + tpm_tis_request_completed(s, ret); +} + +static enum TPMVersion tpm_tis_i2c_get_tpm_version(TPMIf *ti) +{ + TPMStateI2C *i2cst = TPM_TIS_I2C(ti); + TPMState *s = &i2cst->state; + + return tpm_tis_get_tpm_version(s); +} + +static int tpm_tis_i2c_event(I2CSlave *i2c, enum i2c_event event) +{ + TPMStateI2C *i2cst = TPM_TIS_I2C(i2c); + int ret = 0; + + switch (event) { + case I2C_START_RECV: + trace_tpm_tis_i2c_event("START_RECV"); + break; + case I2C_START_SEND: + trace_tpm_tis_i2c_event("START_SEND"); + tpm_tis_i2c_clear_data(i2cst); + break; + case I2C_FINISH: + trace_tpm_tis_i2c_event("FINISH"); + if (i2cst->operation == OP_SEND) { + tpm_tis_i2c_tpm_send(i2cst); + } else { + tpm_tis_i2c_clear_data(i2cst); + } + break; + default: + break; + } + + return ret; +} + +/* + * If data is for FIFO then it is received from tpm_tis_common buffer + * otherwise it will be handled using single call to common code and + * cached in the local buffer. + */ +static uint8_t tpm_tis_i2c_recv(I2CSlave *i2c) +{ + int ret = 0; + uint32_t data_read; + TPMStateI2C *i2cst = TPM_TIS_I2C(i2c); + TPMState *s = &i2cst->state; + uint16_t i2c_reg = i2cst->data[0]; + size_t offset; + + if (i2cst->operation == OP_RECV) { + + /* Do not cache FIFO data. */ + if (i2cst->data[0] == TPM_I2C_REG_DATA_FIFO) { + data_read = tpm_tis_read_data(s, i2cst->tis_addr, 1); + ret = (data_read & 0xff); + } else if (i2cst->offset < sizeof(i2cst->data)) { + ret = i2cst->data[i2cst->offset++]; + } + + } else if ((i2cst->operation == OP_SEND) && (i2cst->offset < 2)) { + /* First receive call after send */ + + i2cst->operation = OP_RECV; + + switch (i2c_reg) { + case TPM_I2C_REG_LOC_SEL: + /* Location selection register is managed by i2c */ + tpm_tis_i2c_set_data(i2cst, i2cst->loc_sel); + break; + case TPM_I2C_REG_DATA_FIFO: + /* FIFO data is directly read from TPM TIS */ + data_read = tpm_tis_read_data(s, i2cst->tis_addr, 1); + tpm_tis_i2c_set_data(i2cst, (data_read & 0xff)); + break; + case TPM_I2C_REG_DATA_CSUM_ENABLE: + tpm_tis_i2c_set_data(i2cst, i2cst->csum_enable); + break; + case TPM_I2C_REG_INT_CAPABILITY: + /* + * Interrupt is not supported in the linux kernel hence we cannot + * test this model with interrupts. + */ + tpm_tis_i2c_set_data(i2cst, TPM_I2C_INT_ENABLE_MASK); + break; + case TPM_I2C_REG_DATA_CSUM_GET: + /* + * Checksum registers are not supported by common code hence + * call a common code to get the checksum. + */ + data_read = tpm_tis_get_checksum(s); + + /* Save the byte stream in data field */ + tpm_tis_i2c_set_data(i2cst, data_read); + break; + default: + data_read = tpm_tis_read_data(s, i2cst->tis_addr, 4); + + switch (i2c_reg) { + case TPM_I2C_REG_INTF_CAPABILITY: + /* Prepare the capabilities as per I2C interface */ + data_read = tpm_tis_i2c_interface_capability(i2cst, + data_read); + break; + case TPM_I2C_REG_STS ... TPM_I2C_REG_STS + 3: + offset = i2c_reg - TPM_I2C_REG_STS; + /* + * As per specs, STS bit 31:26 are reserved and must + * be set to 0 + */ + data_read &= TPM_I2C_STS_READ_MASK; + /* + * STS register has 4 bytes data. + * As per the specs following reads must be allowed. + * - From base address 1 to 4 bytes are allowed. + * - Last byte must be allowed to read as a single byte + * - Second and third byte must be allowed to read as two + * two bytes. + */ + data_read >>= (offset * 8); + break; + } + + /* Save byte stream in data[] */ + tpm_tis_i2c_set_data(i2cst, data_read); + break; + } + + /* Return first byte with this call */ + i2cst->offset = 1; /* keep the register value intact for debug */ + ret = i2cst->data[i2cst->offset++]; + } else { + i2cst->operation = OP_RECV; + } + + trace_tpm_tis_i2c_recv(ret); + + return ret; +} + +/* + * Send function only remembers data in the buffer and then calls + * TPM TIS common code during FINISH event. + */ +static int tpm_tis_i2c_send(I2CSlave *i2c, uint8_t data) +{ + TPMStateI2C *i2cst = TPM_TIS_I2C(i2c); + + /* Reject non-supported registers. */ + if (i2cst->offset == 0) { + /* Convert I2C register to TIS register */ + tpm_tis_i2c_to_tis_reg(i2cst, data); + if (i2cst->tis_addr == 0xffffffff) { + return 0xffffffff; + } + + trace_tpm_tis_i2c_send_reg(i2cst->reg_name, data); + + /* We do not support device address change */ + if (data == TPM_I2C_REG_I2C_DEV_ADDRESS) { + qemu_log_mask(LOG_UNIMP, "%s: Device address change " + "is not supported.\n", __func__); + return 0xffffffff; + } + } else { + trace_tpm_tis_i2c_send(data); + } + + if (i2cst->offset < sizeof(i2cst->data)) { + i2cst->operation = OP_SEND; + + /* + * In two cases, we save values in the local buffer. + * 1) The first value is always a register. + * 2) In case of non-FIFO multibyte registers, TIS expects full + * register value hence I2C layer cache the register value and send + * to TIS during FINISH event. + */ + if ((i2cst->offset == 0) || + (i2cst->data[0] != TPM_I2C_REG_DATA_FIFO)) { + i2cst->data[i2cst->offset++] = data; + } else { + /* + * The TIS can process FIFO data one byte at a time hence the FIFO + * data is sent to TIS directly. + */ + tpm_tis_write_data(&i2cst->state, i2cst->tis_addr, data, 1); + } + + return 0; + } + + /* Return non-zero to indicate NAK */ + return 1; +} + +static Property tpm_tis_i2c_properties[] = { + DEFINE_PROP_TPMBE("tpmdev", TPMStateI2C, state.be_driver), + DEFINE_PROP_END_OF_LIST(), +}; + +static void tpm_tis_i2c_realizefn(DeviceState *dev, Error **errp) +{ + TPMStateI2C *i2cst = TPM_TIS_I2C(dev); + TPMState *s = &i2cst->state; + + if (!tpm_find()) { + error_setg(errp, "at most one TPM device is permitted"); + return; + } + + /* + * Get the backend pointer. It is not initialized propery during + * device_class_set_props + */ + s->be_driver = qemu_find_tpm_be("tpm0"); + + if (!s->be_driver) { + error_setg(errp, "'tpmdev' property is required"); + return; + } +} + +static void tpm_tis_i2c_reset(DeviceState *dev) +{ + TPMStateI2C *i2cst = TPM_TIS_I2C(dev); + TPMState *s = &i2cst->state; + + tpm_tis_i2c_clear_data(i2cst); + + i2cst->csum_enable = 0; + i2cst->loc_sel = 0x00; + + return tpm_tis_reset(s); +} + +static void tpm_tis_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + TPMIfClass *tc = TPM_IF_CLASS(klass); + + dc->realize = tpm_tis_i2c_realizefn; + dc->reset = tpm_tis_i2c_reset; + dc->vmsd = &vmstate_tpm_tis_i2c; + device_class_set_props(dc, tpm_tis_i2c_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + + k->event = tpm_tis_i2c_event; + k->recv = tpm_tis_i2c_recv; + k->send = tpm_tis_i2c_send; + + tc->model = TPM_MODEL_TPM_TIS; + tc->request_completed = tpm_tis_i2c_request_completed; + tc->get_version = tpm_tis_i2c_get_tpm_version; +} + +static const TypeInfo tpm_tis_i2c_info = { + .name = TYPE_TPM_TIS_I2C, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(TPMStateI2C), + .class_init = tpm_tis_i2c_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_TPM_IF }, + { } + } +}; + +static void tpm_tis_i2c_register_types(void) +{ + type_register_static(&tpm_tis_i2c_info); +} + +type_init(tpm_tis_i2c_register_types) diff --git a/hw/tpm/trace-events b/hw/tpm/trace-events index f17110458e..fa882dfefe 100644 --- a/hw/tpm/trace-events +++ b/hw/tpm/trace-events @@ -36,3 +36,9 @@ tpm_spapr_do_crq_unknown_msg_type(uint8_t type) "Unknown message type 0x%02x" tpm_spapr_do_crq_unknown_crq(uint8_t raw1, uint8_t raw2) "unknown CRQ 0x%02x 0x%02x ..." tpm_spapr_post_load(void) "Delivering TPM response after resume" tpm_spapr_caught_response(uint32_t v) "Caught response to deliver after resume: %u bytes" + +# tpm_tis_i2c.c +tpm_tis_i2c_recv(uint8_t data) "TPM I2C read: 0x%X" +tpm_tis_i2c_send(uint8_t data) "TPM I2C write: 0x%X" +tpm_tis_i2c_event(const char *event) "TPM I2C event: %s" +tpm_tis_i2c_send_reg(const char *name, int reg) "TPM I2C write register: %s(0x%X)" diff --git a/hw/usb/Kconfig b/hw/usb/Kconfig index ce4f433976..0ec6def4b8 100644 --- a/hw/usb/Kconfig +++ b/hw/usb/Kconfig @@ -136,5 +136,4 @@ config USB_DWC3 config XLNX_USB_SUBSYS bool - default y if XLNX_VERSAL select USB_DWC3 diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c index 28164d89be..be0a4fc3bc 100644 --- a/hw/usb/dev-smartcard-reader.c +++ b/hw/usb/dev-smartcard-reader.c @@ -278,7 +278,9 @@ typedef struct BulkIn { struct CCIDBus { BusState qbus; }; -typedef struct CCIDBus CCIDBus; + +#define TYPE_CCID_BUS "ccid-bus" +OBJECT_DECLARE_SIMPLE_TYPE(CCIDBus, CCID_BUS) /* * powered - defaults to true, changed by PowerOn/PowerOff messages @@ -1174,9 +1176,6 @@ static Property ccid_props[] = { DEFINE_PROP_END_OF_LIST(), }; -#define TYPE_CCID_BUS "ccid-bus" -OBJECT_DECLARE_SIMPLE_TYPE(CCIDBus, CCID_BUS) - static const TypeInfo ccid_bus_info = { .name = TYPE_CCID_BUS, .parent = TYPE_BUS, diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c index 88f99c05d5..f013ded91e 100644 --- a/hw/usb/dev-uas.c +++ b/hw/usb/dev-uas.c @@ -937,7 +937,8 @@ static void usb_uas_realize(USBDevice *dev, Error **errp) QTAILQ_INIT(&uas->results); QTAILQ_INIT(&uas->requests); - uas->status_bh = qemu_bh_new(usb_uas_send_status_bh, uas); + uas->status_bh = qemu_bh_new_guarded(usb_uas_send_status_bh, uas, + &d->mem_reentrancy_guard); dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); scsi_bus_init(&uas->bus, sizeof(uas->bus), DEVICE(dev), &usb_uas_scsi_info); diff --git a/hw/usb/hcd-dwc2.c b/hw/usb/hcd-dwc2.c index 8755e9cbb0..a0c4e782b2 100644 --- a/hw/usb/hcd-dwc2.c +++ b/hw/usb/hcd-dwc2.c @@ -1364,7 +1364,8 @@ static void dwc2_realize(DeviceState *dev, Error **errp) s->fi = USB_FRMINTVL - 1; s->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_frame_boundary, s); s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_work_timer, s); - s->async_bh = qemu_bh_new(dwc2_work_bh, s); + s->async_bh = qemu_bh_new_guarded(dwc2_work_bh, s, + &dev->mem_reentrancy_guard); sysbus_init_irq(sbd, &s->irq); } diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c index d4da8dcb8d..c930c60921 100644 --- a/hw/usb/hcd-ehci.c +++ b/hw/usb/hcd-ehci.c @@ -2533,7 +2533,8 @@ void usb_ehci_realize(EHCIState *s, DeviceState *dev, Error **errp) } s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ehci_work_timer, s); - s->async_bh = qemu_bh_new(ehci_work_bh, s); + s->async_bh = qemu_bh_new_guarded(ehci_work_bh, s, + &dev->mem_reentrancy_guard); s->device = dev; s->vmstate = qemu_add_vm_change_state_handler(usb_ehci_vm_state_change, s); diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 9d68036d23..cc5cde6983 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -23,7 +23,7 @@ * o Disable timers when nothing needs to be done, or remove timer usage * all together. * o BIOS work to boot from USB storage -*/ + */ #include "qemu/osdep.h" #include "hw/irq.h" @@ -39,7 +39,7 @@ #include "hcd-ohci.h" /* This causes frames to occur 1000x slower */ -//#define OHCI_TIME_WARP 1 +/*#define OHCI_TIME_WARP 1*/ #define ED_LINK_LIMIT 32 @@ -58,48 +58,48 @@ struct ohci_hcca { #define ED_WBACK_OFFSET offsetof(struct ohci_ed, head) #define ED_WBACK_SIZE 4 -/* Bitfields for the first word of an Endpoint Desciptor. */ +/* Bitfields for the first word of an Endpoint Descriptor. */ #define OHCI_ED_FA_SHIFT 0 -#define OHCI_ED_FA_MASK (0x7f<> 2 < ARRAY_SIZE(ohci_reg_names)) { + return ohci_reg_names[addr >> 2]; + } else { + return ""; + } +} + static void ohci_die(OHCIState *ohci) { ohci->ohci_die(ohci); @@ -335,8 +353,8 @@ static void ohci_soft_reset(OHCIState *ohci) ohci->per_cur = 0; ohci->done = 0; ohci->done_count = 7; - - /* FSMPS is marked TBD in OCHI 1.0, what gives ffs? + /* + * FSMPS is marked TBD in OCHI 1.0, what gives ffs? * I took the value linux sets ... */ ohci->fsmps = 0x2778; @@ -460,10 +478,10 @@ static inline int ohci_read_hcca(OHCIState *ohci, static inline int ohci_put_ed(OHCIState *ohci, dma_addr_t addr, struct ohci_ed *ed) { - /* ed->tail is under control of the HCD. + /* + * ed->tail is under control of the HCD. * Since just ed->head is changed by HC, just write back this */ - return put_dwords(ohci, addr + ED_WBACK_OFFSET, (uint32_t *)((char *)ed + ED_WBACK_OFFSET), ED_WBACK_SIZE >> 2); @@ -499,9 +517,9 @@ static int ohci_copy_td(OHCIState *ohci, struct ohci_td *td, ptr = td->cbp; n = 0x1000 - (ptr & 0xfff); - if (n > len) + if (n > len) { n = len; - + } if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, n, dir, MEMTXATTRS_UNSPECIFIED)) { return -1; @@ -527,9 +545,9 @@ static int ohci_copy_iso_td(OHCIState *ohci, ptr = start_addr; n = 0x1000 - (ptr & 0xfff); - if (n > len) + if (n > len) { n = len; - + } if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, n, dir, MEMTXATTRS_UNSPECIFIED)) { return -1; @@ -584,7 +602,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed) starting_frame = OHCI_BM(iso_td.flags, TD_SF); frame_count = OHCI_BM(iso_td.flags, TD_FC); - relative_frame_number = USUB(ohci->frame_number, starting_frame); + relative_frame_number = USUB(ohci->frame_number, starting_frame); trace_usb_ohci_iso_td_head( ed->head & OHCI_DPTR_MASK, ed->tail & OHCI_DPTR_MASK, @@ -601,8 +619,10 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed) trace_usb_ohci_iso_td_relative_frame_number_neg(relative_frame_number); return 1; } else if (relative_frame_number > frame_count) { - /* ISO TD expired - retire the TD to the Done Queue and continue with - the next ISO TD of the same ED */ + /* + * ISO TD expired - retire the TD to the Done Queue and continue with + * the next ISO TD of the same ED + */ trace_usb_ohci_iso_td_relative_frame_number_big(relative_frame_number, frame_count); if (OHCI_CC_DATAOVERRUN == OHCI_BM(iso_td.flags, TD_CC)) { @@ -615,8 +635,9 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed) iso_td.next = ohci->done; ohci->done = addr; i = OHCI_BM(iso_td.flags, TD_DI); - if (i < ohci->done_count) + if (i < ohci->done_count) { ohci->done_count = i; + } if (ohci_put_iso_td(ohci, addr, &iso_td)) { ohci_die(ohci); return 1; @@ -655,8 +676,8 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed) next_offset = iso_td.be; } - if (!(OHCI_BM(start_offset, TD_PSW_CC) & 0xe) || - ((relative_frame_number < frame_count) && + if (!(OHCI_BM(start_offset, TD_PSW_CC) & 0xe) || + ((relative_frame_number < frame_count) && !(OHCI_BM(next_offset, TD_PSW_CC) & 0xe))) { trace_usb_ohci_iso_td_bad_cc_not_accessed(start_offset, next_offset); return 1; @@ -801,8 +822,9 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed) iso_td.next = ohci->done; ohci->done = addr; i = OHCI_BM(iso_td.flags, TD_DI); - if (i < ohci->done_count) + if (i < ohci->done_count) { ohci->done_count = i; + } } if (ohci_put_iso_td(ohci, addr, &iso_td)) { ohci_die(ohci); @@ -845,9 +867,10 @@ static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len) } } -/* Service a transport descriptor. - Returns nonzero to terminate processing of this endpoint. */ - +/* + * Service a transport descriptor. + * Returns nonzero to terminate processing of this endpoint. + */ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) { int dir; @@ -869,7 +892,7 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) return 1; } - /* See if this TD has already been submitted to the device. */ + /* See if this TD has already been submitted to the device. */ completion = (addr == ohci->async_td); if (completion && !ohci->async_complete) { trace_usb_ohci_td_skip_async(); @@ -885,7 +908,7 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) switch (dir) { case OHCI_TD_DIR_OUT: case OHCI_TD_DIR_IN: - /* Same value. */ + /* Same value. */ break; default: dir = OHCI_BM(td.flags, TD_DP); @@ -956,11 +979,12 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) } ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN)); if (ohci->async_td) { - /* ??? The hardware should allow one active packet per - endpoint. We only allow one active packet per controller. - This should be sufficient as long as devices respond in a - timely manner. - */ + /* + * ??? The hardware should allow one active packet per + * endpoint. We only allow one active packet per controller. + * This should be sufficient as long as devices respond in a + * timely manner. + */ trace_usb_ohci_td_too_many_pending(ep->nr); return 1; } @@ -996,7 +1020,7 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) /* Writeback */ if (ret == pktlen || (dir == OHCI_TD_DIR_IN && ret >= 0 && flag_r)) { - /* Transmission succeeded. */ + /* Transmission succeeded. */ if (ret == len) { td.cbp = 0; } else { @@ -1018,8 +1042,9 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) /* Setting ED_C is part of the TD retirement process */ ed->head &= ~OHCI_ED_C; - if (td.flags & OHCI_TD_T0) + if (td.flags & OHCI_TD_T0) { ed->head |= OHCI_ED_C; + } } else { if (ret >= 0) { trace_usb_ohci_td_underrun(); @@ -1048,8 +1073,10 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) OHCI_SET_BM(td.flags, TD_EC, 3); break; } - /* An error occurred so we have to clear the interrupt counter. See - * spec at 6.4.4 on page 104 */ + /* + * An error occurred so we have to clear the interrupt counter. + * See spec at 6.4.4 on page 104 + */ ohci->done_count = 0; } ed->head |= OHCI_ED_H; @@ -1061,8 +1088,9 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) td.next = ohci->done; ohci->done = addr; i = OHCI_BM(td.flags, TD_DI); - if (i < ohci->done_count) + if (i < ohci->done_count) { ohci->done_count = i; + } exit_no_retire: if (ohci_put_td(ohci, addr, &td)) { ohci_die(ohci); @@ -1071,7 +1099,7 @@ exit_no_retire: return OHCI_BM(td.flags, TD_CC) != OHCI_CC_NOERROR; } -/* Service an endpoint list. Returns nonzero if active TD were found. */ +/* Service an endpoint list. Returns nonzero if active TD were found. */ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head) { struct ohci_ed ed; @@ -1081,9 +1109,9 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head) uint32_t link_cnt = 0; active = 0; - if (head == 0) + if (head == 0) { return 0; - + } for (cur = head; cur && link_cnt++ < ED_LINK_LIMIT; cur = next_ed) { if (ohci_read_ed(ohci, cur, &ed)) { trace_usb_ohci_ed_read_error(cur); @@ -1095,7 +1123,7 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head) if ((ed.head & OHCI_ED_H) || (ed.flags & OHCI_ED_K)) { uint32_t addr; - /* Cancel pending packets for ED that have been paused. */ + /* Cancel pending packets for ED that have been paused. */ addr = ed.head & OHCI_DPTR_MASK; if (ohci->async_td && addr == ohci->async_td) { usb_cancel_packet(&ohci->usb_packet); @@ -1112,15 +1140,16 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head) ed.tail & OHCI_DPTR_MASK, ed.next & OHCI_DPTR_MASK); trace_usb_ohci_ed_pkt_flags( OHCI_BM(ed.flags, ED_FA), OHCI_BM(ed.flags, ED_EN), - OHCI_BM(ed.flags, ED_D), (ed.flags & OHCI_ED_S)!= 0, + OHCI_BM(ed.flags, ED_D), (ed.flags & OHCI_ED_S) != 0, (ed.flags & OHCI_ED_K) != 0, (ed.flags & OHCI_ED_F) != 0, OHCI_BM(ed.flags, ED_MPS)); active = 1; if ((ed.flags & OHCI_ED_F) == 0) { - if (ohci_service_td(ohci, &ed)) + if (ohci_service_td(ohci, &ed)) { break; + } } else { /* Handle isochronous endpoints */ if (ohci_service_iso_td(ohci, &ed)) { @@ -1151,7 +1180,7 @@ static void ohci_sof(OHCIState *ohci) ohci_set_interrupt(ohci, OHCI_INTR_SF); } -/* Process Control and Bulk lists. */ +/* Process Control and Bulk lists. */ static void ohci_process_lists(OHCIState *ohci) { if ((ohci->ctl & OHCI_CTL_CLE) && (ohci->status & OHCI_STATUS_CLF)) { @@ -1192,7 +1221,7 @@ static void ohci_frame_boundary(void *opaque) ohci_service_ed_list(ohci, le32_to_cpu(hcca.intr[n])); } - /* Cancel all pending packets if either of the lists has been disabled. */ + /* Cancel all pending packets if either of the lists has been disabled. */ if (ohci->old_ctl & (~ohci->ctl) & (OHCI_CTL_BLE | OHCI_CTL_CLE)) { ohci_stop_endpoints(ohci); } @@ -1210,21 +1239,25 @@ static void ohci_frame_boundary(void *opaque) /* Increment frame number and take care of endianness. */ ohci->frame_number = (ohci->frame_number + 1) & 0xffff; hcca.frame = cpu_to_le16(ohci->frame_number); + /* When the HC updates frame number, set pad to 0. Ref OHCI Spec 4.4.1*/ + hcca.pad = 0; if (ohci->done_count == 0 && !(ohci->intr_status & OHCI_INTR_WD)) { - if (!ohci->done) + if (!ohci->done) { abort(); - if (ohci->intr & ohci->intr_status) + } + if (ohci->intr & ohci->intr_status) { ohci->done |= 1; + } hcca.done = cpu_to_le32(ohci->done); ohci->done = 0; ohci->done_count = 7; ohci_set_interrupt(ohci, OHCI_INTR_WD); } - if (ohci->done_count != 7 && ohci->done_count != 0) + if (ohci->done_count != 7 && ohci->done_count != 0) { ohci->done_count--; - + } /* Do SOF stuff here */ ohci_sof(ohci); @@ -1234,18 +1267,17 @@ static void ohci_frame_boundary(void *opaque) } } -/* Start sending SOF tokens across the USB bus, lists are processed in +/* + * Start sending SOF tokens across the USB bus, lists are processed in * next frame */ static int ohci_bus_start(OHCIState *ohci) { trace_usb_ohci_start(ohci->name); - - /* Delay the first SOF event by one frame time as - * linux driver is not ready to receive it and - * can meet some race conditions + /* + * Delay the first SOF event by one frame time as linux driver is + * not ready to receive it and can meet some race conditions */ - ohci->sof_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); ohci_eof_timer(ohci); @@ -1259,39 +1291,7 @@ void ohci_bus_stop(OHCIState *ohci) timer_del(ohci->eof_timer); } -/* Sets a flag in a port status register but only set it if the port is - * connected, if not set ConnectStatusChange flag. If flag is enabled - * return 1. - */ -static int ohci_port_set_if_connected(OHCIState *ohci, int i, uint32_t val) -{ - int ret = 1; - - /* writing a 0 has no effect */ - if (val == 0) - return 0; - - /* If CurrentConnectStatus is cleared we set - * ConnectStatusChange - */ - if (!(ohci->rhport[i].ctrl & OHCI_PORT_CCS)) { - ohci->rhport[i].ctrl |= OHCI_PORT_CSC; - if (ohci->rhstatus & OHCI_RHS_DRWE) { - /* TODO: CSC is a wakeup event */ - } - return 0; - } - - if (ohci->rhport[i].ctrl & val) - ret = 0; - - /* set the bit */ - ohci->rhport[i].ctrl |= val; - - return ret; -} - -/* Set the frame interval - frame interval toggle is manipulated by the hcd only */ +/* Frame interval toggle is manipulated by the hcd only */ static void ohci_set_frame_interval(OHCIState *ohci, uint16_t val) { val &= OHCI_FMI_FI; @@ -1308,10 +1308,8 @@ static void ohci_port_power(OHCIState *ohci, int i, int p) if (p) { ohci->rhport[i].ctrl |= OHCI_PORT_PPS; } else { - ohci->rhport[i].ctrl &= ~(OHCI_PORT_PPS| - OHCI_PORT_CCS| - OHCI_PORT_PSS| - OHCI_PORT_PRS); + ohci->rhport[i].ctrl &= ~(OHCI_PORT_PPS | OHCI_PORT_CCS | + OHCI_PORT_PSS | OHCI_PORT_PRS); } } @@ -1326,9 +1324,9 @@ static void ohci_set_ctl(OHCIState *ohci, uint32_t val) new_state = ohci->ctl & OHCI_CTL_HCFS; /* no state change */ - if (old_state == new_state) + if (old_state == new_state) { return; - + } trace_usb_ohci_set_ctl(ohci->name, new_state); switch (new_state) { case OHCI_USB_OPERATIONAL: @@ -1354,21 +1352,19 @@ static uint32_t ohci_get_frame_remaining(OHCIState *ohci) uint16_t fr; int64_t tks; - if ((ohci->ctl & OHCI_CTL_HCFS) != OHCI_USB_OPERATIONAL) - return (ohci->frt << 31); - - /* Being in USB operational state guarnatees sof_time was - * set already. - */ + if ((ohci->ctl & OHCI_CTL_HCFS) != OHCI_USB_OPERATIONAL) { + return ohci->frt << 31; + } + /* Being in USB operational state guarnatees sof_time was set already. */ tks = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - ohci->sof_time; if (tks < 0) { tks = 0; } /* avoid muldiv if possible */ - if (tks >= usb_frame_time) - return (ohci->frt << 31); - + if (tks >= usb_frame_time) { + return ohci->frt << 31; + } tks = tks / usb_bit_time; fr = (uint16_t)(ohci->fi - tks); @@ -1384,33 +1380,81 @@ static void ohci_set_hub_status(OHCIState *ohci, uint32_t val) old_state = ohci->rhstatus; /* write 1 to clear OCIC */ - if (val & OHCI_RHS_OCIC) + if (val & OHCI_RHS_OCIC) { ohci->rhstatus &= ~OHCI_RHS_OCIC; - + } if (val & OHCI_RHS_LPS) { int i; - for (i = 0; i < ohci->num_ports; i++) + for (i = 0; i < ohci->num_ports; i++) { ohci_port_power(ohci, i, 0); + } trace_usb_ohci_hub_power_down(); } if (val & OHCI_RHS_LPSC) { int i; - for (i = 0; i < ohci->num_ports; i++) + for (i = 0; i < ohci->num_ports; i++) { ohci_port_power(ohci, i, 1); + } trace_usb_ohci_hub_power_up(); } - if (val & OHCI_RHS_DRWE) + if (val & OHCI_RHS_DRWE) { ohci->rhstatus |= OHCI_RHS_DRWE; - - if (val & OHCI_RHS_CRWE) + } + if (val & OHCI_RHS_CRWE) { ohci->rhstatus &= ~OHCI_RHS_DRWE; - - if (old_state != ohci->rhstatus) + } + if (old_state != ohci->rhstatus) { ohci_set_interrupt(ohci, OHCI_INTR_RHSC); + } +} + +/* This is the one state transition the controller can do by itself */ +static bool ohci_resume(OHCIState *s) +{ + if ((s->ctl & OHCI_CTL_HCFS) == OHCI_USB_SUSPEND) { + trace_usb_ohci_remote_wakeup(s->name); + s->ctl &= ~OHCI_CTL_HCFS; + s->ctl |= OHCI_USB_RESUME; + return true; + } + return false; +} + +/* + * Sets a flag in a port status reg but only set it if the port is connected. + * If not set ConnectStatusChange flag. If flag is enabled return 1. + */ +static int ohci_port_set_if_connected(OHCIState *ohci, int i, uint32_t val) +{ + int ret = 1; + + /* writing a 0 has no effect */ + if (val == 0) { + return 0; + } + /* If CurrentConnectStatus is cleared we set ConnectStatusChange */ + if (!(ohci->rhport[i].ctrl & OHCI_PORT_CCS)) { + ohci->rhport[i].ctrl |= OHCI_PORT_CSC; + if (ohci->rhstatus & OHCI_RHS_DRWE) { + /* CSC is a wakeup event */ + if (ohci_resume(ohci)) { + ohci_set_interrupt(ohci, OHCI_INTR_RD); + } + } + return 0; + } + + if (ohci->rhport[i].ctrl & val) { + ret = 0; + } + /* set the bit */ + ohci->rhport[i].ctrl |= val; + + return ret; } /* Set root hub port status */ @@ -1423,12 +1467,12 @@ static void ohci_port_set_status(OHCIState *ohci, int portnum, uint32_t val) old_state = port->ctrl; /* Write to clear CSC, PESC, PSSC, OCIC, PRSC */ - if (val & OHCI_PORT_WTC) + if (val & OHCI_PORT_WTC) { port->ctrl &= ~(val & OHCI_PORT_WTC); - - if (val & OHCI_PORT_CCS) + } + if (val & OHCI_PORT_CCS) { port->ctrl &= ~OHCI_PORT_PES; - + } ohci_port_set_if_connected(ohci, portnum, val & OHCI_PORT_PES); if (ohci_port_set_if_connected(ohci, portnum, val & OHCI_PORT_PSS)) { @@ -1439,20 +1483,20 @@ static void ohci_port_set_status(OHCIState *ohci, int portnum, uint32_t val) trace_usb_ohci_port_reset(portnum); usb_device_reset(port->port.dev); port->ctrl &= ~OHCI_PORT_PRS; - /* ??? Should this also set OHCI_PORT_PESC. */ + /* ??? Should this also set OHCI_PORT_PESC. */ port->ctrl |= OHCI_PORT_PES | OHCI_PORT_PRSC; } - /* Invert order here to ensure in ambiguous case, device is - * powered up... - */ - if (val & OHCI_PORT_LSDA) + /* Invert order here to ensure in ambiguous case, device is powered up. */ + if (val & OHCI_PORT_LSDA) { ohci_port_power(ohci, portnum, 0); - if (val & OHCI_PORT_PPS) + } + if (val & OHCI_PORT_PPS) { ohci_port_power(ohci, portnum, 1); - - if (old_state != port->ctrl) + } + if (old_state != port->ctrl) { ohci_set_interrupt(ohci, OHCI_INTR_RHSC); + } } static uint64_t ohci_mem_read(void *opaque, @@ -1469,6 +1513,8 @@ static uint64_t ohci_mem_read(void *opaque, } else if (addr >= 0x54 && addr < 0x54 + ohci->num_ports * 4) { /* HcRhPortStatus */ retval = ohci->rhport[(addr - 0x54) >> 2].ctrl | OHCI_PORT_PPS; + trace_usb_ohci_mem_port_read(size, "HcRhPortStatus", (addr - 0x50) >> 2, + addr, addr >> 2, retval); } else { switch (addr >> 2) { case 0: /* HcRevision */ @@ -1573,6 +1619,10 @@ static uint64_t ohci_mem_read(void *opaque, trace_usb_ohci_mem_read_bad_offset(addr); retval = 0xffffffff; } + if (addr != 0xc || retval) { + trace_usb_ohci_mem_read(size, ohci_reg_name(addr), addr, addr >> 2, + retval); + } } return retval; @@ -1593,10 +1643,13 @@ static void ohci_mem_write(void *opaque, if (addr >= 0x54 && addr < 0x54 + ohci->num_ports * 4) { /* HcRhPortStatus */ + trace_usb_ohci_mem_port_write(size, "HcRhPortStatus", + (addr - 0x50) >> 2, addr, addr >> 2, val); ohci_port_set_status(ohci, (addr - 0x54) >> 2, val); return; } + trace_usb_ohci_mem_write(size, ohci_reg_name(addr), addr, addr >> 2, val); switch (addr >> 2) { case 1: /* HcControl */ ohci_set_ctl(ohci, val); @@ -1609,8 +1662,9 @@ static void ohci_mem_write(void *opaque, /* Bits written as '0' remain unchanged in the register */ ohci->status |= val; - if (ohci->status & OHCI_STATUS_HCR) + if (ohci->status & OHCI_STATUS_HCR) { ohci_soft_reset(ohci); + } break; case 3: /* HcInterruptStatus */ @@ -1688,8 +1742,9 @@ static void ohci_mem_write(void *opaque, case 25: /* HcHReset */ ohci->hreset = val & ~OHCI_HRESET_FSBIR; - if (val & OHCI_HRESET_FSBIR) + if (val & OHCI_HRESET_FSBIR) { ohci_hard_reset(ohci); + } break; case 26: /* HcHInterruptEnable */ @@ -1790,11 +1845,7 @@ static void ohci_wakeup(USBPort *port1) intr = OHCI_INTR_RHSC; } /* Note that the controller can be suspended even if this port is not */ - if ((s->ctl & OHCI_CTL_HCFS) == OHCI_USB_SUSPEND) { - trace_usb_ohci_remote_wakeup(s->name); - /* This is the one state transition the controller can do by itself */ - s->ctl &= ~OHCI_CTL_HCFS; - s->ctl |= OHCI_USB_RESUME; + if (ohci_resume(s)) { /* * In suspend mode only ResumeDetected is possible, not RHSC: * see the OHCI spec 5.1.2.3. @@ -1827,7 +1878,7 @@ static USBBusOps ohci_bus_ops = { void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports, dma_addr_t localmem_base, char *masterbus, uint32_t firstport, AddressSpace *as, - void (*ohci_die_fn)(struct OHCIState *), Error **errp) + void (*ohci_die_fn)(OHCIState *), Error **errp) { Error *err = NULL; int i; @@ -1859,7 +1910,7 @@ void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports, ohci->num_ports = num_ports; if (masterbus) { USBPort *ports[OHCI_MAX_PORTS]; - for(i = 0; i < num_ports; i++) { + for (i = 0; i < num_ports; i++) { ports[i] = &ohci->rhport[i].port; } usb_register_companion(masterbus, ports, num_ports, @@ -1892,7 +1943,7 @@ void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports, ohci_frame_boundary, ohci); } -/** +/* * A typical OHCI will stop operating and set itself into error state * (which can be queried by MMIO) to signal that it got an error. */ diff --git a/hw/usb/hcd-ohci.h b/hw/usb/hcd-ohci.h index 11ac57058d..e1827227ac 100644 --- a/hw/usb/hcd-ohci.h +++ b/hw/usb/hcd-ohci.h @@ -21,6 +21,7 @@ #ifndef HCD_OHCI_H #define HCD_OHCI_H +#include "hw/sysbus.h" #include "sysemu/dma.h" #include "hw/usb.h" #include "qom/object.h" @@ -33,7 +34,9 @@ typedef struct OHCIPort { uint32_t ctrl; } OHCIPort; -typedef struct OHCIState { +typedef struct OHCIState OHCIState; + +struct OHCIState { USBBus bus; qemu_irq irq; MemoryRegion mem; @@ -89,8 +92,8 @@ typedef struct OHCIState { uint32_t async_td; bool async_complete; - void (*ohci_die)(struct OHCIState *ohci); -} OHCIState; + void (*ohci_die)(OHCIState *ohci); +}; #define TYPE_SYSBUS_OHCI "sysbus-ohci" OBJECT_DECLARE_SIMPLE_TYPE(OHCISysBusState, SYSBUS_OHCI) @@ -112,7 +115,7 @@ extern const VMStateDescription vmstate_ohci_state; void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports, dma_addr_t localmem_base, char *masterbus, uint32_t firstport, AddressSpace *as, - void (*ohci_die_fn)(struct OHCIState *), Error **errp); + void (*ohci_die_fn)(OHCIState *), Error **errp); void ohci_bus_stop(OHCIState *ohci); void ohci_stop_endpoints(OHCIState *ohci); void ohci_hard_reset(OHCIState *ohci); diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index 30ae0104bb..77baaa7a6b 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -60,9 +60,7 @@ enum { TD_RESULT_ASYNC_CONT, }; -typedef struct UHCIState UHCIState; typedef struct UHCIAsync UHCIAsync; -typedef struct UHCIPCIDeviceClass UHCIPCIDeviceClass; struct UHCIPCIDeviceClass { PCIDeviceClass parent_class; @@ -1161,8 +1159,7 @@ static USBBusOps uhci_bus_ops = { void usb_uhci_common_realize(PCIDevice *dev, Error **errp) { Error *err = NULL; - PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); - UHCIPCIDeviceClass *u = container_of(pc, UHCIPCIDeviceClass, parent_class); + UHCIPCIDeviceClass *u = UHCI_GET_CLASS(dev); UHCIState *s = UHCI(dev); uint8_t *pci_conf = s->dev.config; int i; @@ -1193,7 +1190,7 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp) USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); } } - s->bh = qemu_bh_new(uhci_bh, s); + s->bh = qemu_bh_new_guarded(uhci_bh, s, &DEVICE(dev)->mem_reentrancy_guard); s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, uhci_frame_timer, s); s->num_ports_vmstate = NB_PORTS; QTAILQ_INIT(&s->queues); @@ -1269,7 +1266,7 @@ void uhci_data_class_init(ObjectClass *klass, void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); - UHCIPCIDeviceClass *u = container_of(k, UHCIPCIDeviceClass, parent_class); + UHCIPCIDeviceClass *u = UHCI_CLASS(klass); UHCIInfo *info = data; k->realize = info->realize ? info->realize : usb_uhci_common_realize; diff --git a/hw/usb/hcd-uhci.h b/hw/usb/hcd-uhci.h index e0fdb98ef1..69f8b40c49 100644 --- a/hw/usb/hcd-uhci.h +++ b/hw/usb/hcd-uhci.h @@ -75,7 +75,7 @@ typedef struct UHCIState { } UHCIState; #define TYPE_UHCI "pci-uhci-usb" -DECLARE_INSTANCE_CHECKER(UHCIState, UHCI, TYPE_UHCI) +OBJECT_DECLARE_TYPE(UHCIState, UHCIPCIDeviceClass, UHCI) typedef struct UHCIInfo { const char *name; diff --git a/hw/usb/hcd-xhci-nec.c b/hw/usb/hcd-xhci-nec.c index 13c9ac5dbd..328e5bfe7c 100644 --- a/hw/usb/hcd-xhci-nec.c +++ b/hw/usb/hcd-xhci-nec.c @@ -27,14 +27,16 @@ #include "hcd-xhci-pci.h" -typedef struct XHCINecState { +OBJECT_DECLARE_SIMPLE_TYPE(XHCINecState, NEC_XHCI) + +struct XHCINecState { /*< private >*/ XHCIPciState parent_obj; /*< public >*/ uint32_t flags; uint32_t intrs; uint32_t slots; -} XHCINecState; +}; static Property nec_xhci_properties[] = { DEFINE_PROP_ON_OFF_AUTO("msi", XHCIPciState, msi, ON_OFF_AUTO_AUTO), @@ -51,7 +53,7 @@ static Property nec_xhci_properties[] = { static void nec_xhci_instance_init(Object *obj) { XHCIPciState *pci = XHCI_PCI(obj); - XHCINecState *nec = container_of(pci, XHCINecState, parent_obj); + XHCINecState *nec = NEC_XHCI(obj); pci->xhci.flags = nec->flags; pci->xhci.numintrs = nec->intrs; diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c index 176868d345..f500db85ab 100644 --- a/hw/usb/host-libusb.c +++ b/hw/usb/host-libusb.c @@ -1141,7 +1141,8 @@ static void usb_host_nodev_bh(void *opaque) static void usb_host_nodev(USBHostDevice *s) { if (!s->bh_nodev) { - s->bh_nodev = qemu_bh_new(usb_host_nodev_bh, s); + s->bh_nodev = qemu_bh_new_guarded(usb_host_nodev_bh, s, + &DEVICE(s)->mem_reentrancy_guard); } qemu_bh_schedule(s->bh_nodev); } @@ -1739,7 +1740,8 @@ static int usb_host_post_load(void *opaque, int version_id) USBHostDevice *dev = opaque; if (!dev->bh_postld) { - dev->bh_postld = qemu_bh_new(usb_host_post_load_bh, dev); + dev->bh_postld = qemu_bh_new_guarded(usb_host_post_load_bh, dev, + &DEVICE(dev)->mem_reentrancy_guard); } qemu_bh_schedule(dev->bh_postld); dev->bh_postld_pending = true; diff --git a/hw/usb/imx-usb-phy.c b/hw/usb/imx-usb-phy.c index 5d7a549e34..1a97b36a11 100644 --- a/hw/usb/imx-usb-phy.c +++ b/hw/usb/imx-usb-phy.c @@ -13,6 +13,7 @@ #include "qemu/osdep.h" #include "hw/usb/imx-usb-phy.h" #include "migration/vmstate.h" +#include "qemu/log.h" #include "qemu/module.h" static const VMStateDescription vmstate_imx_usbphy = { @@ -90,7 +91,15 @@ static uint64_t imx_usbphy_read(void *opaque, hwaddr offset, unsigned size) value = s->usbphy[index - 3]; break; default: - value = s->usbphy[index]; + if (index < USBPHY_MAX) { + value = s->usbphy[index]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Read from non-existing USB PHY register 0x%" + HWADDR_PRIx "\n", + __func__, offset); + value = 0; + } break; } return (uint64_t)value; @@ -168,7 +177,13 @@ static void imx_usbphy_write(void *opaque, hwaddr offset, uint64_t value, s->usbphy[index - 3] ^= value; break; default: - /* Other registers are read-only */ + /* Other registers are read-only or do not exist */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write to %s USB PHY register 0x%" + HWADDR_PRIx "\n", + __func__, + index >= USBPHY_MAX ? "non-existing" : "read-only", + offset); break; } } diff --git a/hw/usb/meson.build b/hw/usb/meson.build index bdf34cbd3e..599dc24f0d 100644 --- a/hw/usb/meson.build +++ b/hw/usb/meson.build @@ -84,6 +84,6 @@ if libusb.found() hw_usb_modules += {'host': usbhost_ss} endif -softmmu_ss.add(when: ['CONFIG_USB', 'CONFIG_XEN', libusb], if_true: files('xen-usb.c')) +softmmu_ss.add(when: ['CONFIG_USB', 'CONFIG_XEN_BUS', libusb], if_true: files('xen-usb.c')) modules += { 'hw-usb': hw_usb_modules } diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index fd7df599bc..39fbaaab16 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -1441,8 +1441,10 @@ static void usbredir_realize(USBDevice *udev, Error **errp) } } - dev->chardev_close_bh = qemu_bh_new(usbredir_chardev_close_bh, dev); - dev->device_reject_bh = qemu_bh_new(usbredir_device_reject_bh, dev); + dev->chardev_close_bh = qemu_bh_new_guarded(usbredir_chardev_close_bh, dev, + &DEVICE(dev)->mem_reentrancy_guard); + dev->device_reject_bh = qemu_bh_new_guarded(usbredir_device_reject_bh, dev, + &DEVICE(dev)->mem_reentrancy_guard); dev->attach_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, usbredir_do_attach, dev); packet_id_queue_init(&dev->cancelled, dev, "cancelled"); diff --git a/hw/usb/trace-events b/hw/usb/trace-events index b65269892c..6bb9655c8d 100644 --- a/hw/usb/trace-events +++ b/hw/usb/trace-events @@ -57,8 +57,12 @@ usb_ohci_ed_read_error(uint32_t addr) "ED read error at 0x%x" usb_ohci_ed_pkt(uint32_t cur, int h, int c, uint32_t head, uint32_t tail, uint32_t next) "ED @ 0x%.8x h=%u c=%u\n head=0x%.8x tailp=0x%.8x next=0x%.8x" usb_ohci_ed_pkt_flags(uint32_t fa, uint32_t en, uint32_t d, int s, int k, int f, uint32_t mps) "fa=%u en=%u d=%u s=%u k=%u f=%u mps=%u" usb_ohci_hcca_read_error(uint32_t addr) "HCCA read error at 0x%x" +usb_ohci_mem_read(uint32_t size, const char *name, uint32_t addr, uint32_t offs, uint32_t val) "%d %s 0x%x %d -> 0x%x" +usb_ohci_mem_port_read(uint32_t size, const char *name, uint32_t port, uint32_t addr, uint32_t offs, uint32_t val) "%d %s[%d] 0x%x %d -> 0x%x" usb_ohci_mem_read_unaligned(uint32_t addr) "at 0x%x" usb_ohci_mem_read_bad_offset(uint32_t addr) "0x%x" +usb_ohci_mem_write(uint32_t size, const char *name, uint32_t addr, uint32_t offs, uint32_t val) "%d %s 0x%x %d <- 0x%x" +usb_ohci_mem_port_write(uint32_t size, const char *name, uint32_t port, uint32_t addr, uint32_t offs, uint32_t val) "%d %s[%d] 0x%x %d <- 0x%x" usb_ohci_mem_write_unaligned(uint32_t addr) "at 0x%x" usb_ohci_mem_write_bad_offset(uint32_t addr) "0x%x" usb_ohci_process_lists(uint32_t head, uint32_t cur) "head 0x%x, cur 0x%x" diff --git a/hw/usb/u2f.h b/hw/usb/u2f.h index a408a82927..8bff13141a 100644 --- a/hw/usb/u2f.h +++ b/hw/usb/u2f.h @@ -31,22 +31,16 @@ #define U2FHID_PACKET_SIZE 64 #define U2FHID_PENDING_IN_NUM 32 -typedef struct U2FKeyState U2FKeyState; typedef struct U2FKeyInfo U2FKeyInfo; #define TYPE_U2F_KEY "u2f-key" -#define U2F_KEY(obj) \ - OBJECT_CHECK(U2FKeyState, (obj), TYPE_U2F_KEY) -#define U2F_KEY_CLASS(klass) \ - OBJECT_CLASS_CHECK(U2FKeyClass, (klass), TYPE_U2F_KEY) -#define U2F_KEY_GET_CLASS(obj) \ - OBJECT_GET_CLASS(U2FKeyClass, (obj), TYPE_U2F_KEY) +OBJECT_DECLARE_TYPE(U2FKeyState, U2FKeyClass, U2F_KEY) /* * Callbacks to be used by the U2F key base device (i.e. hw/u2f.c) * to interact with its variants (i.e. hw/u2f-*.c) */ -typedef struct U2FKeyClass { +struct U2FKeyClass { /*< private >*/ USBDeviceClass parent_class; @@ -55,12 +49,12 @@ typedef struct U2FKeyClass { const uint8_t packet[U2FHID_PACKET_SIZE]); void (*realize)(U2FKeyState *key, Error **errp); void (*unrealize)(U2FKeyState *key); -} U2FKeyClass; +}; /* * State of the U2F key base device (i.e. hw/u2f.c) */ -typedef struct U2FKeyState { +struct U2FKeyState { USBDevice dev; USBEndpoint *ep; uint8_t idle; @@ -70,7 +64,7 @@ typedef struct U2FKeyState { uint8_t pending_in_start; uint8_t pending_in_end; uint8_t pending_in_num; -} U2FKeyState; +}; /* * API to be used by the U2F key device variants (i.e. hw/u2f-*.c) diff --git a/hw/usb/vt82c686-uhci-pci.c b/hw/usb/vt82c686-uhci-pci.c index 46a901f56f..b4884c9011 100644 --- a/hw/usb/vt82c686-uhci-pci.c +++ b/hw/usb/vt82c686-uhci-pci.c @@ -1,17 +1,7 @@ #include "qemu/osdep.h" -#include "hw/irq.h" #include "hw/isa/vt82c686.h" #include "hcd-uhci.h" -static void uhci_isa_set_irq(void *opaque, int irq_num, int level) -{ - UHCIState *s = opaque; - uint8_t irq = pci_get_byte(s->dev.config + PCI_INTERRUPT_LINE); - if (irq > 0 && irq < 15) { - via_isa_set_irq(pci_get_function_0(&s->dev), irq, level); - } -} - static void usb_uhci_vt82c686b_realize(PCIDevice *dev, Error **errp) { UHCIState *s = UHCI(dev); @@ -25,8 +15,6 @@ static void usb_uhci_vt82c686b_realize(PCIDevice *dev, Error **errp) pci_set_long(pci_conf + 0xc0, 0x00002000); usb_uhci_common_realize(dev, errp); - object_unref(s->irq); - s->irq = qemu_allocate_irq(uhci_isa_set_irq, s, 0); } static UHCIInfo uhci_info[] = { diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c index 0f7369e7ed..38ee660a30 100644 --- a/hw/usb/xen-usb.c +++ b/hw/usb/xen-usb.c @@ -101,6 +101,8 @@ struct usbback_hotplug { struct usbback_info { struct XenLegacyDevice xendev; /* must be first */ USBBus bus; + uint32_t urb_ring_ref; + uint32_t conn_ring_ref; void *urb_sring; void *conn_sring; struct usbif_urb_back_ring urb_ring; @@ -159,7 +161,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req) for (i = 0; i < nr_segs; i++) { if ((unsigned)usbback_req->req.seg[i].offset + - (unsigned)usbback_req->req.seg[i].length > XC_PAGE_SIZE) { + (unsigned)usbback_req->req.seg[i].length > XEN_PAGE_SIZE) { xen_pv_printf(xendev, 0, "segment crosses page boundary\n"); return -EINVAL; } @@ -183,7 +185,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req) for (i = 0; i < usbback_req->nr_buffer_segs; i++) { seg = usbback_req->req.seg + i; - addr = usbback_req->buffer + i * XC_PAGE_SIZE + seg->offset; + addr = usbback_req->buffer + i * XEN_PAGE_SIZE + seg->offset; qemu_iovec_add(&usbback_req->packet.iov, addr, seg->length); } } @@ -277,10 +279,11 @@ static int usbback_init_packet(struct usbback_req *usbback_req) static void usbback_do_response(struct usbback_req *usbback_req, int32_t status, int32_t actual_length, int32_t error_count) { + uint32_t ref[USBIF_MAX_SEGMENTS_PER_REQUEST]; struct usbback_info *usbif; struct usbif_urb_response *res; struct XenLegacyDevice *xendev; - unsigned int notify; + unsigned int notify, i; usbif = usbback_req->usbif; xendev = &usbif->xendev; @@ -293,13 +296,19 @@ static void usbback_do_response(struct usbback_req *usbback_req, int32_t status, } if (usbback_req->buffer) { - xen_be_unmap_grant_refs(xendev, usbback_req->buffer, + for (i = 0; i < usbback_req->nr_buffer_segs; i++) { + ref[i] = usbback_req->req.seg[i].gref; + } + xen_be_unmap_grant_refs(xendev, usbback_req->buffer, ref, usbback_req->nr_buffer_segs); usbback_req->buffer = NULL; } if (usbback_req->isoc_buffer) { - xen_be_unmap_grant_refs(xendev, usbback_req->isoc_buffer, + for (i = 0; i < usbback_req->nr_extra_segs; i++) { + ref[i] = usbback_req->req.seg[i + usbback_req->req.nr_buffer_segs].gref; + } + xen_be_unmap_grant_refs(xendev, usbback_req->isoc_buffer, ref, usbback_req->nr_extra_segs); usbback_req->isoc_buffer = NULL; } @@ -832,11 +841,11 @@ static void usbback_disconnect(struct XenLegacyDevice *xendev) xen_pv_unbind_evtchn(xendev); if (usbif->urb_sring) { - xen_be_unmap_grant_ref(xendev, usbif->urb_sring); + xen_be_unmap_grant_ref(xendev, usbif->urb_sring, usbif->urb_ring_ref); usbif->urb_sring = NULL; } if (usbif->conn_sring) { - xen_be_unmap_grant_ref(xendev, usbif->conn_sring); + xen_be_unmap_grant_ref(xendev, usbif->conn_sring, usbif->conn_ring_ref); usbif->conn_sring = NULL; } @@ -889,10 +898,12 @@ static int usbback_connect(struct XenLegacyDevice *xendev) return -1; } + usbif->urb_ring_ref = urb_ring_ref; + usbif->conn_ring_ref = conn_ring_ref; urb_sring = usbif->urb_sring; conn_sring = usbif->conn_sring; - BACK_RING_INIT(&usbif->urb_ring, urb_sring, XC_PAGE_SIZE); - BACK_RING_INIT(&usbif->conn_ring, conn_sring, XC_PAGE_SIZE); + BACK_RING_INIT(&usbif->urb_ring, urb_sring, XEN_PAGE_SIZE); + BACK_RING_INIT(&usbif->conn_ring, conn_sring, XEN_PAGE_SIZE); xen_be_bind_evtchn(xendev); @@ -1021,7 +1032,8 @@ static void usbback_alloc(struct XenLegacyDevice *xendev) QTAILQ_INIT(&usbif->req_free_q); QSIMPLEQ_INIT(&usbif->hotplug_q); - usbif->bh = qemu_bh_new(usbback_bh, usbif); + usbif->bh = qemu_bh_new_guarded(usbback_bh, usbif, + &DEVICE(xendev)->mem_reentrancy_guard); } static int usbback_free(struct XenLegacyDevice *xendev) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index bab83c0e55..78358ede27 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -42,6 +42,7 @@ #include "migration/migration.h" #include "migration/misc.h" #include "migration/blocker.h" +#include "migration/qemu-file.h" #include "sysemu/tpm.h" VFIOGroupList vfio_group_list = @@ -319,6 +320,28 @@ const MemoryRegionOps vfio_region_ops = { * Device state interfaces */ +typedef struct { + unsigned long *bitmap; + hwaddr size; + hwaddr pages; +} VFIOBitmap; + +static int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size) +{ + vbmap->pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size(); + vbmap->size = ROUND_UP(vbmap->pages, sizeof(__u64) * BITS_PER_BYTE) / + BITS_PER_BYTE; + vbmap->bitmap = g_try_malloc0(vbmap->size); + if (!vbmap->bitmap) { + return -ENOMEM; + } + + return 0; +} + +static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, + uint64_t size, ram_addr_t ram_addr); + bool vfio_mig_active(void) { VFIOGroup *group; @@ -339,6 +362,7 @@ bool vfio_mig_active(void) } static Error *multiple_devices_migration_blocker; +static Error *giommu_migration_blocker; static unsigned int vfio_migratable_device_num(void) { @@ -390,13 +414,72 @@ void vfio_unblock_multiple_devices_migration(void) multiple_devices_migration_blocker = NULL; } +static bool vfio_viommu_preset(void) +{ + VFIOAddressSpace *space; + + QLIST_FOREACH(space, &vfio_address_spaces, list) { + if (space->as != &address_space_memory) { + return true; + } + } + + return false; +} + +int vfio_block_giommu_migration(Error **errp) +{ + int ret; + + if (giommu_migration_blocker || + !vfio_viommu_preset()) { + return 0; + } + + error_setg(&giommu_migration_blocker, + "Migration is currently not supported with vIOMMU enabled"); + ret = migrate_add_blocker(giommu_migration_blocker, errp); + if (ret < 0) { + error_free(giommu_migration_blocker); + giommu_migration_blocker = NULL; + } + + return ret; +} + +void vfio_migration_finalize(void) +{ + if (!giommu_migration_blocker || + vfio_viommu_preset()) { + return; + } + + migrate_del_blocker(giommu_migration_blocker); + error_free(giommu_migration_blocker); + giommu_migration_blocker = NULL; +} + +static void vfio_set_migration_error(int err) +{ + MigrationState *ms = migrate_get_current(); + + if (migration_is_setup_or_active(ms->state)) { + WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { + if (ms->to_dst_file) { + qemu_file_set_error(ms->to_dst_file, err); + } + } + } +} + static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) { VFIOGroup *group; VFIODevice *vbasedev; MigrationState *ms = migrate_get_current(); - if (!migration_is_setup_or_active(ms->state)) { + if (ms->state != MIGRATION_STATUS_ACTIVE && + ms->state != MIGRATION_STATUS_DEVICE) { return false; } @@ -417,6 +500,22 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) return true; } +static bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container) +{ + VFIOGroup *group; + VFIODevice *vbasedev; + + QLIST_FOREACH(group, &container->group_list, container_next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + if (!vbasedev->dirty_pages_supported) { + return false; + } + } + } + + return true; +} + /* * Check if all VFIO devices are running and migration is active, which is * essentially equivalent to the migration being in pre-copy phase. @@ -454,9 +553,14 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container, { struct vfio_iommu_type1_dma_unmap *unmap; struct vfio_bitmap *bitmap; - uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size(); + VFIOBitmap vbmap; int ret; + ret = vfio_bitmap_alloc(&vbmap, size); + if (ret) { + return ret; + } + unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); @@ -470,35 +574,28 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container, * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize * to qemu_real_host_page_size. */ - bitmap->pgsize = qemu_real_host_page_size(); - bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / - BITS_PER_BYTE; + bitmap->size = vbmap.size; + bitmap->data = (__u64 *)vbmap.bitmap; - if (bitmap->size > container->max_dirty_bitmap_size) { - error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, - (uint64_t)bitmap->size); + if (vbmap.size > container->max_dirty_bitmap_size) { + error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size); ret = -E2BIG; goto unmap_exit; } - bitmap->data = g_try_malloc0(bitmap->size); - if (!bitmap->data) { - ret = -ENOMEM; - goto unmap_exit; - } - ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); if (!ret) { - cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data, - iotlb->translated_addr, pages); + cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, + iotlb->translated_addr, vbmap.pages); } else { error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); } - g_free(bitmap->data); unmap_exit: g_free(unmap); + g_free(vbmap.bitmap); + return ret; } @@ -515,10 +612,16 @@ static int vfio_dma_unmap(VFIOContainer *container, .iova = iova, .size = size, }; + bool need_dirty_sync = false; + int ret; - if (iotlb && container->dirty_pages_supported && - vfio_devices_all_running_and_mig_active(container)) { - return vfio_dma_unmap_bitmap(container, iova, size, iotlb); + if (iotlb && vfio_devices_all_running_and_mig_active(container)) { + if (!vfio_devices_all_device_dirty_tracking(container) && + container->dirty_pages_supported) { + return vfio_dma_unmap_bitmap(container, iova, size, iotlb); + } + + need_dirty_sync = true; } while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { @@ -544,10 +647,12 @@ static int vfio_dma_unmap(VFIOContainer *container, return -errno; } - if (iotlb && vfio_devices_all_running_and_mig_active(container)) { - cpu_physical_memory_set_dirty_range(iotlb->translated_addr, size, - tcg_enabled() ? DIRTY_CLIENTS_ALL : - DIRTY_CLIENTS_NOCODE); + if (need_dirty_sync) { + ret = vfio_get_dirty_bitmap(container, iova, size, + iotlb->translated_addr); + if (ret) { + return ret; + } } return 0; @@ -680,6 +785,7 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) if (iotlb->target_as != &address_space_memory) { error_report("Wrong target AS \"%s\", only system memory is allowed", iotlb->target_as->name ? iotlb->target_as->name : "none"); + vfio_set_migration_error(-EINVAL); return; } @@ -703,17 +809,18 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) read_only); if (ret) { error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx", %p) = %d (%m)", + "0x%"HWADDR_PRIx", %p) = %d (%s)", container, iova, - iotlb->addr_mask + 1, vaddr, ret); + iotlb->addr_mask + 1, vaddr, ret, strerror(-ret)); } } else { ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb); if (ret) { error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx") = %d (%m)", + "0x%"HWADDR_PRIx") = %d (%s)", container, iova, - iotlb->addr_mask + 1, ret); + iotlb->addr_mask + 1, ret, strerror(-ret)); + vfio_set_migration_error(ret); } } out: @@ -868,6 +975,22 @@ static void vfio_unregister_ram_discard_listener(VFIOContainer *container, g_free(vrdl); } +static VFIOHostDMAWindow *vfio_find_hostwin(VFIOContainer *container, + hwaddr iova, hwaddr end) +{ + VFIOHostDMAWindow *hostwin; + bool hostwin_found = false; + + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { + hostwin_found = true; + break; + } + } + + return hostwin_found ? hostwin : NULL; +} + static bool vfio_known_safe_misalignment(MemoryRegionSection *section) { MemoryRegion *mr = section->mr; @@ -884,24 +1007,15 @@ static bool vfio_known_safe_misalignment(MemoryRegionSection *section) return true; } -static void vfio_listener_region_add(MemoryListener *listener, - MemoryRegionSection *section) +static bool vfio_listener_valid_section(MemoryRegionSection *section, + const char *name) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); - hwaddr iova, end; - Int128 llend, llsize; - void *vaddr; - int ret; - VFIOHostDMAWindow *hostwin; - bool hostwin_found; - Error *err = NULL; - if (vfio_listener_skipped_section(section)) { - trace_vfio_listener_region_add_skip( + trace_vfio_listener_region_skip(name, section->offset_within_address_space, section->offset_within_address_space + int128_get64(int128_sub(section->size, int128_one()))); - return; + return false; } if (unlikely((section->offset_within_address_space & @@ -916,15 +1030,53 @@ static void vfio_listener_region_add(MemoryListener *listener, section->offset_within_region, qemu_real_host_page_size()); } - return; + return false; } + return true; +} + +static bool vfio_get_section_iova_range(VFIOContainer *container, + MemoryRegionSection *section, + hwaddr *out_iova, hwaddr *out_end, + Int128 *out_llend) +{ + Int128 llend; + hwaddr iova; + iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); llend = int128_make64(section->offset_within_address_space); llend = int128_add(llend, section->size); llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask())); if (int128_ge(int128_make64(iova), llend)) { + return false; + } + + *out_iova = iova; + *out_end = int128_get64(int128_sub(llend, int128_one())); + if (out_llend) { + *out_llend = llend; + } + return true; +} + +static void vfio_listener_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, listener); + hwaddr iova, end; + Int128 llend, llsize; + void *vaddr; + int ret; + VFIOHostDMAWindow *hostwin; + Error *err = NULL; + + if (!vfio_listener_valid_section(section, "region_add")) { + return; + } + + if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) { if (memory_region_is_ram_device(section->mr)) { trace_vfio_listener_region_add_no_dma_map( memory_region_name(section->mr), @@ -934,7 +1086,6 @@ static void vfio_listener_region_add(MemoryListener *listener, } return; } - end = int128_get64(int128_sub(llend, int128_one())); if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { hwaddr pgsize = 0; @@ -994,15 +1145,8 @@ static void vfio_listener_region_add(MemoryListener *listener, #endif } - hostwin_found = false; - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { - if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { - hostwin_found = true; - break; - } - } - - if (!hostwin_found) { + hostwin = vfio_find_hostwin(container, iova, end); + if (!hostwin) { error_setg(&err, "Container %p can't map guest IOVA region" " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end); goto fail; @@ -1095,8 +1239,9 @@ static void vfio_listener_region_add(MemoryListener *listener, vaddr, section->readonly); if (ret) { error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx", %p) = %d (%m)", - container, iova, int128_get64(llsize), vaddr, ret); + "0x%"HWADDR_PRIx", %p) = %d (%s)", + container, iova, int128_get64(llsize), vaddr, ret, + strerror(-ret)); if (memory_region_is_ram_device(section->mr)) { /* Allow unexpected mappings not to be fatal for RAM devices */ error_report_err(err); @@ -1140,26 +1285,7 @@ static void vfio_listener_region_del(MemoryListener *listener, int ret; bool try_unmap = true; - if (vfio_listener_skipped_section(section)) { - trace_vfio_listener_region_del_skip( - section->offset_within_address_space, - section->offset_within_address_space + - int128_get64(int128_sub(section->size, int128_one()))); - return; - } - - if (unlikely((section->offset_within_address_space & - ~qemu_real_host_page_mask()) != - (section->offset_within_region & ~qemu_real_host_page_mask()))) { - if (!vfio_known_safe_misalignment(section)) { - error_report("%s received unaligned region %s iova=0x%"PRIx64 - " offset_within_region=0x%"PRIx64 - " qemu_real_host_page_size=0x%"PRIxPTR, - __func__, memory_region_name(section->mr), - section->offset_within_address_space, - section->offset_within_region, - qemu_real_host_page_size()); - } + if (!vfio_listener_valid_section(section, "region_del")) { return; } @@ -1186,15 +1312,9 @@ static void vfio_listener_region_del(MemoryListener *listener, */ } - iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); - llend = int128_make64(section->offset_within_address_space); - llend = int128_add(llend, section->size); - llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask())); - - if (int128_ge(int128_make64(iova), llend)) { + if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) { return; } - end = int128_get64(int128_sub(llend, int128_one())); llsize = int128_sub(llend, int128_make64(iova)); @@ -1203,15 +1323,9 @@ static void vfio_listener_region_del(MemoryListener *listener, if (memory_region_is_ram_device(section->mr)) { hwaddr pgmask; VFIOHostDMAWindow *hostwin; - bool hostwin_found = false; - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { - if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { - hostwin_found = true; - break; - } - } - assert(hostwin_found); /* or region_add() would have failed */ + hostwin = vfio_find_hostwin(container, iova, end); + assert(hostwin); /* or region_add() would have failed */ pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); @@ -1228,16 +1342,18 @@ static void vfio_listener_region_del(MemoryListener *listener, ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); if (ret) { error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx") = %d (%m)", - container, iova, int128_get64(llsize), ret); + "0x%"HWADDR_PRIx") = %d (%s)", + container, iova, int128_get64(llsize), ret, + strerror(-ret)); } iova += int128_get64(llsize); } ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); if (ret) { error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx") = %d (%m)", - container, iova, int128_get64(llsize), ret); + "0x%"HWADDR_PRIx") = %d (%s)", + container, iova, int128_get64(llsize), ret, + strerror(-ret)); } } @@ -1256,7 +1372,7 @@ static void vfio_listener_region_del(MemoryListener *listener, } } -static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) +static int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) { int ret; struct vfio_iommu_type1_dirty_bitmap dirty = { @@ -1264,7 +1380,7 @@ static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) }; if (!container->dirty_pages_supported) { - return; + return 0; } if (start) { @@ -1275,40 +1391,327 @@ static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); if (ret) { + ret = -errno; error_report("Failed to set dirty tracking flag 0x%x errno: %d", dirty.flags, errno); } + + return ret; +} + +typedef struct VFIODirtyRanges { + hwaddr min32; + hwaddr max32; + hwaddr min64; + hwaddr max64; +} VFIODirtyRanges; + +typedef struct VFIODirtyRangesListener { + VFIOContainer *container; + VFIODirtyRanges ranges; + MemoryListener listener; +} VFIODirtyRangesListener; + +static void vfio_dirty_tracking_update(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIODirtyRangesListener *dirty = container_of(listener, + VFIODirtyRangesListener, + listener); + VFIODirtyRanges *range = &dirty->ranges; + hwaddr iova, end, *min, *max; + + if (!vfio_listener_valid_section(section, "tracking_update") || + !vfio_get_section_iova_range(dirty->container, section, + &iova, &end, NULL)) { + return; + } + + /* + * The address space passed to the dirty tracker is reduced to two ranges: + * one for 32-bit DMA ranges, and another one for 64-bit DMA ranges. + * The underlying reports of dirty will query a sub-interval of each of + * these ranges. + * + * The purpose of the dual range handling is to handle known cases of big + * holes in the address space, like the x86 AMD 1T hole. The alternative + * would be an IOVATree but that has a much bigger runtime overhead and + * unnecessary complexity. + */ + min = (end <= UINT32_MAX) ? &range->min32 : &range->min64; + max = (end <= UINT32_MAX) ? &range->max32 : &range->max64; + + if (*min > iova) { + *min = iova; + } + if (*max < end) { + *max = end; + } + + trace_vfio_device_dirty_tracking_update(iova, end, *min, *max); + return; +} + +static const MemoryListener vfio_dirty_tracking_listener = { + .name = "vfio-tracking", + .region_add = vfio_dirty_tracking_update, +}; + +static void vfio_dirty_tracking_init(VFIOContainer *container, + VFIODirtyRanges *ranges) +{ + VFIODirtyRangesListener dirty; + + memset(&dirty, 0, sizeof(dirty)); + dirty.ranges.min32 = UINT32_MAX; + dirty.ranges.min64 = UINT64_MAX; + dirty.listener = vfio_dirty_tracking_listener; + dirty.container = container; + + memory_listener_register(&dirty.listener, + container->space->as); + + *ranges = dirty.ranges; + + /* + * The memory listener is synchronous, and used to calculate the range + * to dirty tracking. Unregister it after we are done as we are not + * interested in any follow-up updates. + */ + memory_listener_unregister(&dirty.listener); +} + +static void vfio_devices_dma_logging_stop(VFIOContainer *container) +{ + uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature), + sizeof(uint64_t))] = {}; + struct vfio_device_feature *feature = (struct vfio_device_feature *)buf; + VFIODevice *vbasedev; + VFIOGroup *group; + + feature->argsz = sizeof(buf); + feature->flags = VFIO_DEVICE_FEATURE_SET | + VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP; + + QLIST_FOREACH(group, &container->group_list, container_next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + if (!vbasedev->dirty_tracking) { + continue; + } + + if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) { + warn_report("%s: Failed to stop DMA logging, err %d (%s)", + vbasedev->name, -errno, strerror(errno)); + } + vbasedev->dirty_tracking = false; + } + } +} + +static struct vfio_device_feature * +vfio_device_feature_dma_logging_start_create(VFIOContainer *container, + VFIODirtyRanges *tracking) +{ + struct vfio_device_feature *feature; + size_t feature_size; + struct vfio_device_feature_dma_logging_control *control; + struct vfio_device_feature_dma_logging_range *ranges; + + feature_size = sizeof(struct vfio_device_feature) + + sizeof(struct vfio_device_feature_dma_logging_control); + feature = g_try_malloc0(feature_size); + if (!feature) { + errno = ENOMEM; + return NULL; + } + feature->argsz = feature_size; + feature->flags = VFIO_DEVICE_FEATURE_SET | + VFIO_DEVICE_FEATURE_DMA_LOGGING_START; + + control = (struct vfio_device_feature_dma_logging_control *)feature->data; + control->page_size = qemu_real_host_page_size(); + + /* + * DMA logging uAPI guarantees to support at least a number of ranges that + * fits into a single host kernel base page. + */ + control->num_ranges = !!tracking->max32 + !!tracking->max64; + ranges = g_try_new0(struct vfio_device_feature_dma_logging_range, + control->num_ranges); + if (!ranges) { + g_free(feature); + errno = ENOMEM; + + return NULL; + } + + control->ranges = (__u64)(uintptr_t)ranges; + if (tracking->max32) { + ranges->iova = tracking->min32; + ranges->length = (tracking->max32 - tracking->min32) + 1; + ranges++; + } + if (tracking->max64) { + ranges->iova = tracking->min64; + ranges->length = (tracking->max64 - tracking->min64) + 1; + } + + trace_vfio_device_dirty_tracking_start(control->num_ranges, + tracking->min32, tracking->max32, + tracking->min64, tracking->max64); + + return feature; +} + +static void vfio_device_feature_dma_logging_start_destroy( + struct vfio_device_feature *feature) +{ + struct vfio_device_feature_dma_logging_control *control = + (struct vfio_device_feature_dma_logging_control *)feature->data; + struct vfio_device_feature_dma_logging_range *ranges = + (struct vfio_device_feature_dma_logging_range *)(uintptr_t)control->ranges; + + g_free(ranges); + g_free(feature); +} + +static int vfio_devices_dma_logging_start(VFIOContainer *container) +{ + struct vfio_device_feature *feature; + VFIODirtyRanges ranges; + VFIODevice *vbasedev; + VFIOGroup *group; + int ret = 0; + + vfio_dirty_tracking_init(container, &ranges); + feature = vfio_device_feature_dma_logging_start_create(container, + &ranges); + if (!feature) { + return -errno; + } + + QLIST_FOREACH(group, &container->group_list, container_next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + if (vbasedev->dirty_tracking) { + continue; + } + + ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature); + if (ret) { + ret = -errno; + error_report("%s: Failed to start DMA logging, err %d (%s)", + vbasedev->name, ret, strerror(errno)); + goto out; + } + vbasedev->dirty_tracking = true; + } + } + +out: + if (ret) { + vfio_devices_dma_logging_stop(container); + } + + vfio_device_feature_dma_logging_start_destroy(feature); + + return ret; } static void vfio_listener_log_global_start(MemoryListener *listener) { VFIOContainer *container = container_of(listener, VFIOContainer, listener); + int ret; - vfio_set_dirty_page_tracking(container, true); + if (vfio_devices_all_device_dirty_tracking(container)) { + ret = vfio_devices_dma_logging_start(container); + } else { + ret = vfio_set_dirty_page_tracking(container, true); + } + + if (ret) { + error_report("vfio: Could not start dirty page tracking, err: %d (%s)", + ret, strerror(-ret)); + vfio_set_migration_error(ret); + } } static void vfio_listener_log_global_stop(MemoryListener *listener) { VFIOContainer *container = container_of(listener, VFIOContainer, listener); + int ret = 0; - vfio_set_dirty_page_tracking(container, false); + if (vfio_devices_all_device_dirty_tracking(container)) { + vfio_devices_dma_logging_stop(container); + } else { + ret = vfio_set_dirty_page_tracking(container, false); + } + + if (ret) { + error_report("vfio: Could not stop dirty page tracking, err: %d (%s)", + ret, strerror(-ret)); + vfio_set_migration_error(ret); + } } -static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, - uint64_t size, ram_addr_t ram_addr) +static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova, + hwaddr size, void *bitmap) +{ + uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) + + sizeof(struct vfio_device_feature_dma_logging_report), + sizeof(__u64))] = {}; + struct vfio_device_feature *feature = (struct vfio_device_feature *)buf; + struct vfio_device_feature_dma_logging_report *report = + (struct vfio_device_feature_dma_logging_report *)feature->data; + + report->iova = iova; + report->length = size; + report->page_size = qemu_real_host_page_size(); + report->bitmap = (__u64)(uintptr_t)bitmap; + + feature->argsz = sizeof(buf); + feature->flags = VFIO_DEVICE_FEATURE_GET | + VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT; + + if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) { + return -errno; + } + + return 0; +} + +static int vfio_devices_query_dirty_bitmap(VFIOContainer *container, + VFIOBitmap *vbmap, hwaddr iova, + hwaddr size) +{ + VFIODevice *vbasedev; + VFIOGroup *group; + int ret; + + QLIST_FOREACH(group, &container->group_list, container_next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + ret = vfio_device_dma_logging_report(vbasedev, iova, size, + vbmap->bitmap); + if (ret) { + error_report("%s: Failed to get DMA logging report, iova: " + "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx + ", err: %d (%s)", + vbasedev->name, iova, size, ret, strerror(-ret)); + + return ret; + } + } + } + + return 0; +} + +static int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap, + hwaddr iova, hwaddr size) { struct vfio_iommu_type1_dirty_bitmap *dbitmap; struct vfio_iommu_type1_dirty_bitmap_get *range; - uint64_t pages; int ret; - if (!container->dirty_pages_supported) { - cpu_physical_memory_set_dirty_range(ram_addr, size, - tcg_enabled() ? DIRTY_CLIENTS_ALL : - DIRTY_CLIENTS_NOCODE); - return 0; - } - dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); @@ -1323,36 +1726,63 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, * to qemu_real_host_page_size. */ range->bitmap.pgsize = qemu_real_host_page_size(); - - pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size(); - range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / - BITS_PER_BYTE; - range->bitmap.data = g_try_malloc0(range->bitmap.size); - if (!range->bitmap.data) { - ret = -ENOMEM; - goto err_out; - } + range->bitmap.size = vbmap->size; + range->bitmap.data = (__u64 *)vbmap->bitmap; ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); if (ret) { + ret = -errno; error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64 " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova, (uint64_t)range->size, errno); - goto err_out; } - cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data, - ram_addr, pages); - - trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size, - range->bitmap.size, ram_addr); -err_out: - g_free(range->bitmap.data); g_free(dbitmap); return ret; } +static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, + uint64_t size, ram_addr_t ram_addr) +{ + bool all_device_dirty_tracking = + vfio_devices_all_device_dirty_tracking(container); + VFIOBitmap vbmap; + int ret; + + if (!container->dirty_pages_supported && !all_device_dirty_tracking) { + cpu_physical_memory_set_dirty_range(ram_addr, size, + tcg_enabled() ? DIRTY_CLIENTS_ALL : + DIRTY_CLIENTS_NOCODE); + return 0; + } + + ret = vfio_bitmap_alloc(&vbmap, size); + if (ret) { + return ret; + } + + if (all_device_dirty_tracking) { + ret = vfio_devices_query_dirty_bitmap(container, &vbmap, iova, size); + } else { + ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size); + } + + if (ret) { + goto out; + } + + cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, + vbmap.pages); + + trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size, + ram_addr); +out: + g_free(vbmap.bitmap); + + return ret; +} + typedef struct { IOMMUNotifier n; VFIOGuestIOMMU *giommu; @@ -1366,29 +1796,33 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) VFIOContainer *container = giommu->container; hwaddr iova = iotlb->iova + giommu->iommu_offset; ram_addr_t translated_addr; + int ret = -EINVAL; trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); if (iotlb->target_as != &address_space_memory) { error_report("Wrong target AS \"%s\", only system memory is allowed", iotlb->target_as->name ? iotlb->target_as->name : "none"); - return; + goto out; } rcu_read_lock(); if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { - int ret; - ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, translated_addr); if (ret) { error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx") = %d (%m)", - container, iova, - iotlb->addr_mask + 1, ret); + "0x%"HWADDR_PRIx") = %d (%s)", + container, iova, iotlb->addr_mask + 1, ret, + strerror(-ret)); } } rcu_read_unlock(); + +out: + if (ret) { + vfio_set_migration_error(ret); + } } static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section, @@ -1481,13 +1915,19 @@ static void vfio_listener_log_sync(MemoryListener *listener, MemoryRegionSection *section) { VFIOContainer *container = container_of(listener, VFIOContainer, listener); + int ret; if (vfio_listener_skipped_section(section)) { return; } if (vfio_devices_all_dirty_tracking(container)) { - vfio_sync_dirty_bitmap(container, section); + ret = vfio_sync_dirty_bitmap(container, section); + if (ret) { + error_report("vfio: Failed to sync dirty bitmap, err: %d (%s)", ret, + strerror(-ret)); + vfio_set_migration_error(ret); + } } } diff --git a/hw/vfio/display.c b/hw/vfio/display.c index 78f4d82c1c..bec864f482 100644 --- a/hw/vfio/display.c +++ b/hw/vfio/display.c @@ -14,6 +14,7 @@ #include #include +#include "qemu/error-report.h" #include "hw/display/edid.h" #include "ui/console.h" #include "qapi/error.h" diff --git a/hw/vfio/igd.c b/hw/vfio/igd.c index afe3fe7efc..b31ee79c60 100644 --- a/hw/vfio/igd.c +++ b/hw/vfio/igd.c @@ -12,6 +12,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "qemu/error-report.h" #include "qapi/error.h" #include "hw/hw.h" #include "hw/nvram/fw_cfg.h" diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c index a2c3d9bade..6b58dddb88 100644 --- a/hw/vfio/migration.c +++ b/hw/vfio/migration.c @@ -11,6 +11,7 @@ #include "qemu/main-loop.h" #include "qemu/cutils.h" #include "qemu/units.h" +#include "qemu/error-report.h" #include #include @@ -521,7 +522,7 @@ static void vfio_migration_state_notifier(Notifier *notifier, void *data) } } -static void vfio_migration_exit(VFIODevice *vbasedev) +static void vfio_migration_free(VFIODevice *vbasedev) { g_free(vbasedev->migration); vbasedev->migration = NULL; @@ -555,6 +556,19 @@ static int vfio_migration_query_flags(VFIODevice *vbasedev, uint64_t *mig_flags) return 0; } +static bool vfio_dma_logging_supported(VFIODevice *vbasedev) +{ + uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature), + sizeof(uint64_t))] = {}; + struct vfio_device_feature *feature = (struct vfio_device_feature *)buf; + + feature->argsz = sizeof(buf); + feature->flags = VFIO_DEVICE_FEATURE_PROBE | + VFIO_DEVICE_FEATURE_DMA_LOGGING_START; + + return !ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature); +} + static int vfio_migration_init(VFIODevice *vbasedev) { int ret; @@ -589,6 +603,8 @@ static int vfio_migration_init(VFIODevice *vbasedev) migration->device_state = VFIO_DEVICE_STATE_RUNNING; migration->data_fd = -1; + vbasedev->dirty_pages_supported = vfio_dma_logging_supported(vbasedev); + oid = vmstate_if_get_id(VMSTATE_IF(DEVICE(obj))); if (oid) { path = g_strdup_printf("%s/vfio", oid); @@ -616,7 +632,7 @@ int64_t vfio_mig_bytes_transferred(void) return bytes_transferred; } -int vfio_migration_probe(VFIODevice *vbasedev, Error **errp) +int vfio_migration_realize(VFIODevice *vbasedev, Error **errp) { int ret = -ENOTSUP; @@ -634,6 +650,11 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error **errp) return ret; } + ret = vfio_block_giommu_migration(errp); + if (ret) { + return ret; + } + trace_vfio_migration_probe(vbasedev->name); return 0; @@ -649,7 +670,7 @@ add_blocker: return ret; } -void vfio_migration_finalize(VFIODevice *vbasedev) +void vfio_migration_exit(VFIODevice *vbasedev) { if (vbasedev->migration) { VFIOMigration *migration = vbasedev->migration; @@ -657,7 +678,7 @@ void vfio_migration_finalize(VFIODevice *vbasedev) remove_migration_state_change_notifier(&migration->migration_state); qemu_del_vm_change_state_handler(migration->vm_state); unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev); - vfio_migration_exit(vbasedev); + vfio_migration_free(vbasedev); vfio_unblock_multiple_devices_migration(); } diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 939dcc3d4a..73874a94de 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -2066,6 +2066,54 @@ static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp) return 0; } +static int vfio_setup_rebar_ecap(VFIOPCIDevice *vdev, uint16_t pos) +{ + uint32_t ctrl; + int i, nbar; + + ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL); + nbar = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; + + for (i = 0; i < nbar; i++) { + uint32_t cap; + int size; + + ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL + (i * 8)); + size = (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT; + + /* The cap register reports sizes 1MB to 128TB, with 4 reserved bits */ + cap = size <= 27 ? 1U << (size + 4) : 0; + + /* + * The PCIe spec (v6.0.1, 7.8.6) requires HW to support at least one + * size in the range 1MB to 512GB. We intend to mask all sizes except + * the one currently enabled in the size field, therefore if it's + * outside the range, hide the whole capability as this virtualization + * trick won't work. If >512GB resizable BARs start to appear, we + * might need an opt-in or reservation scheme in the kernel. + */ + if (!(cap & PCI_REBAR_CAP_SIZES)) { + return -EINVAL; + } + + /* Hide all sizes reported in the ctrl reg per above requirement. */ + ctrl &= (PCI_REBAR_CTRL_BAR_SIZE | + PCI_REBAR_CTRL_NBAR_MASK | + PCI_REBAR_CTRL_BAR_IDX); + + /* + * The BAR size field is RW, however we've mangled the capability + * register such that we only report a single size, ie. the current + * BAR size. A write of an unsupported value is undefined, therefore + * the register field is essentially RO. + */ + vfio_add_emulated_long(vdev, pos + PCI_REBAR_CAP + (i * 8), cap, ~0); + vfio_add_emulated_long(vdev, pos + PCI_REBAR_CTRL + (i * 8), ctrl, ~0); + } + + return 0; +} + static void vfio_add_ext_cap(VFIOPCIDevice *vdev) { PCIDevice *pdev = &vdev->pdev; @@ -2139,9 +2187,13 @@ static void vfio_add_ext_cap(VFIOPCIDevice *vdev) case 0: /* kernel masked capability */ case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */ case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */ - case PCI_EXT_CAP_ID_REBAR: /* Can't expose read-only */ trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next); break; + case PCI_EXT_CAP_ID_REBAR: + if (!vfio_setup_rebar_ecap(vdev, next)) { + pcie_add_capability(pdev, cap_id, cap_ver, next, size); + } + break; default: pcie_add_capability(pdev, cap_id, cap_ver, next, size); } @@ -2856,6 +2908,8 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) int groupid; int i, ret; bool is_mdev; + char uuid[UUID_FMT_LEN]; + char *name; if (!vbasedev->sysfsdev) { if (!(~vdev->host.domain || ~vdev->host.bus || @@ -2936,7 +2990,15 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) goto error; } - ret = vfio_get_device(group, vbasedev->name, vbasedev, errp); + if (!qemu_uuid_is_null(&vdev->vf_token)) { + qemu_uuid_unparse(&vdev->vf_token, uuid); + name = g_strdup_printf("%s vf_token=%s", vbasedev->name, uuid); + } else { + name = g_strdup(vbasedev->name); + } + + ret = vfio_get_device(group, name, vbasedev, errp); + g_free(name); if (ret) { vfio_put_group(group); goto error; @@ -3145,7 +3207,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) } if (!pdev->failover_pair_id) { - ret = vfio_migration_probe(vbasedev, errp); + ret = vfio_migration_realize(vbasedev, errp); if (ret) { error_report("%s: Migration disabled", vbasedev->name); } @@ -3185,6 +3247,7 @@ static void vfio_instance_finalize(Object *obj) */ vfio_put_device(vdev); vfio_put_group(group); + vfio_migration_finalize(); } static void vfio_exitfn(PCIDevice *pdev) @@ -3203,7 +3266,7 @@ static void vfio_exitfn(PCIDevice *pdev) } vfio_teardown_msi(vdev); vfio_bars_exit(vdev); - vfio_migration_finalize(&vdev->vbasedev); + vfio_migration_exit(&vdev->vbasedev); } static void vfio_pci_reset(DeviceState *dev) @@ -3267,6 +3330,7 @@ static void vfio_instance_init(Object *obj) static Property vfio_pci_dev_properties[] = { DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host), + DEFINE_PROP_UUID_NODEFAULT("vf-token", VFIOPCIDevice, vf_token), DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev), DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice, vbasedev.pre_copy_dirty_page_tracking, diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h index 177abcc8fb..2674476d6c 100644 --- a/hw/vfio/pci.h +++ b/hw/vfio/pci.h @@ -137,6 +137,7 @@ struct VFIOPCIDevice { VFIOVGA *vga; /* 0xa0000, 0x3b0, 0x3c0 */ void *igd_opregion; PCIHostDeviceAddress host; + QemuUUID vf_token; EventNotifier err_notifier; EventNotifier req_notifier; int (*resetfn)(struct VFIOPCIDevice *); diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 669d9fe07c..646e42fd27 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -96,14 +96,15 @@ vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s" vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)" vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64 vfio_iommu_map_notify(const char *op, uint64_t iova_start, uint64_t iova_end) "iommu %s @ 0x%"PRIx64" - 0x%"PRIx64 -vfio_listener_region_add_skip(uint64_t start, uint64_t end) "SKIPPING region_add 0x%"PRIx64" - 0x%"PRIx64 +vfio_listener_region_skip(const char *name, uint64_t start, uint64_t end) "SKIPPING %s 0x%"PRIx64" - 0x%"PRIx64 vfio_spapr_group_attach(int groupfd, int tablefd) "Attached groupfd %d to liobn fd %d" vfio_listener_region_add_iommu(uint64_t start, uint64_t end) "region_add [iommu] 0x%"PRIx64" - 0x%"PRIx64 vfio_listener_region_add_ram(uint64_t iova_start, uint64_t iova_end, void *vaddr) "region_add [ram] 0x%"PRIx64" - 0x%"PRIx64" [%p]" vfio_known_safe_misalignment(const char *name, uint64_t iova, uint64_t offset_within_region, uintptr_t page_size) "Region \"%s\" iova=0x%"PRIx64" offset_within_region=0x%"PRIx64" qemu_real_host_page_size=0x%"PRIxPTR vfio_listener_region_add_no_dma_map(const char *name, uint64_t iova, uint64_t size, uint64_t page_size) "Region \"%s\" 0x%"PRIx64" size=0x%"PRIx64" is not aligned to 0x%"PRIx64" and cannot be mapped for DMA" -vfio_listener_region_del_skip(uint64_t start, uint64_t end) "SKIPPING region_del 0x%"PRIx64" - 0x%"PRIx64 vfio_listener_region_del(uint64_t start, uint64_t end) "region_del 0x%"PRIx64" - 0x%"PRIx64 +vfio_device_dirty_tracking_update(uint64_t start, uint64_t end, uint64_t min, uint64_t max) "section 0x%"PRIx64" - 0x%"PRIx64" -> update [0x%"PRIx64" - 0x%"PRIx64"]" +vfio_device_dirty_tracking_start(int nr_ranges, uint64_t min32, uint64_t max32, uint64_t min64, uint64_t max64) "nr_ranges %d 32:[0x%"PRIx64" - 0x%"PRIx64"], 64:[0x%"PRIx64" - 0x%"PRIx64"]" vfio_disconnect_container(int fd) "close container->fd=%d" vfio_put_group(int fd) "close group->fd=%d" vfio_get_device(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u" @@ -117,7 +118,7 @@ vfio_region_mmaps_set_enabled(const char *name, bool enabled) "Region %s mmaps e vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Region %s unmap [0x%lx - 0x%lx]" vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries" vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]" -vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%0x8" +vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x" vfio_dma_unmap_overflow_workaround(void) "" vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64 vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index a87c5f39a2..8f8d05cf9b 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -50,6 +50,7 @@ vhost_vdpa_set_vring_ready(void *dev) "dev: %p" vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s" vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size, uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags: 0x%"PRIx32 vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len) "dev: %p config: %p config_len: %"PRIu32 +vhost_vdpa_suspend(void *dev) "dev: %p" vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d" vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p" vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64 diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 4307296358..bd7c12b6d3 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -68,7 +68,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp) */ static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) { - return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx); + return svq->num_free; } /** @@ -263,6 +263,7 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, return -EINVAL; } + svq->num_free -= ndescs; svq->desc_state[qemu_head].elem = elem; svq->desc_state[qemu_head].ndescs = ndescs; vhost_svq_kick(svq); @@ -449,6 +450,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); svq->desc_next[last_used_chain] = svq->free_head; svq->free_head = used_elem.id; + svq->num_free += num; *len = used_elem.len; return g_steal_pointer(&svq->desc_state[used_elem.id].elem); @@ -522,7 +524,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, size_t vhost_svq_poll(VhostShadowVirtqueue *svq) { int64_t start_us = g_get_monotonic_time(); - uint32_t len; + uint32_t len = 0; do { if (vhost_svq_more_used(svq)) { @@ -659,6 +661,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, svq->iova_tree = iova_tree; svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq)); + svq->num_free = svq->vring.num; driver_size = vhost_svq_driver_area_size(svq); device_size = vhost_svq_device_area_size(svq); svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size); @@ -694,13 +697,17 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) g_autofree VirtQueueElement *elem = NULL; elem = g_steal_pointer(&svq->desc_state[i].elem); if (elem) { - virtqueue_detach_element(svq->vq, elem, 0); + /* + * TODO: This is ok for networking, but other kinds of devices + * might have problems with just unpop these. + */ + virtqueue_unpop(svq->vq, elem, 0); } } next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem); if (next_avail_elem) { - virtqueue_detach_element(svq->vq, next_avail_elem, 0); + virtqueue_unpop(svq->vq, next_avail_elem, 0); } svq->vq = NULL; g_free(svq->desc_next); diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index 926a4897b1..6efe051a70 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -107,6 +107,9 @@ typedef struct VhostShadowVirtqueue { /* Next head to consume from the device */ uint16_t last_used_idx; + + /* Size of SVQ vring free descriptors */ + uint16_t num_free; } VhostShadowVirtqueue; bool vhost_svq_valid_features(uint64_t features, Error **errp); diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c index fe3da32c74..d6927b610a 100644 --- a/hw/virtio/vhost-user-gpio.c +++ b/hw/virtio/vhost-user-gpio.c @@ -16,6 +16,7 @@ #include "trace.h" #define REALIZE_CONNECTION_RETRIES 3 +#define VHOST_NVQS 2 /* Features required from VirtIO */ static const int feature_bits[] = { @@ -208,8 +209,7 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio) { virtio_delete_queue(gpio->command_vq); virtio_delete_queue(gpio->interrupt_vq); - g_free(gpio->vhost_dev.vqs); - gpio->vhost_dev.vqs = NULL; + g_free(gpio->vhost_vqs); virtio_cleanup(vdev); vhost_user_cleanup(&gpio->vhost_user); } @@ -229,6 +229,9 @@ static int vu_gpio_connect(DeviceState *dev, Error **errp) vhost_dev_set_config_notifier(vhost_dev, &gpio_ops); gpio->vhost_user.supports_config = true; + gpio->vhost_dev.nvqs = VHOST_NVQS; + gpio->vhost_dev.vqs = gpio->vhost_vqs; + ret = vhost_dev_init(vhost_dev, &gpio->vhost_user, VHOST_BACKEND_TYPE_USER, 0, errp); if (ret < 0) { @@ -347,10 +350,9 @@ static void vu_gpio_device_realize(DeviceState *dev, Error **errp) virtio_init(vdev, VIRTIO_ID_GPIO, sizeof(gpio->config)); - gpio->vhost_dev.nvqs = 2; gpio->command_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); gpio->interrupt_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); - gpio->vhost_dev.vqs = g_new0(struct vhost_virtqueue, gpio->vhost_dev.nvqs); + gpio->vhost_vqs = g_new0(struct vhost_virtqueue, VHOST_NVQS); gpio->connected = false; diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c index dc5c828ba6..4eef3f0633 100644 --- a/hw/virtio/vhost-user-i2c.c +++ b/hw/virtio/vhost-user-i2c.c @@ -128,6 +128,14 @@ static void vu_i2c_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) { VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + /* + * We don't support interrupts, return early if index is set to + * VIRTIO_CONFIG_IRQ_IDX. + */ + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return; + } + vhost_virtqueue_mask(&i2c->vhost_dev, vdev, idx, mask); } @@ -135,6 +143,14 @@ static bool vu_i2c_guest_notifier_pending(VirtIODevice *vdev, int idx) { VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + /* + * We don't support interrupts, return early if index is set to + * VIRTIO_CONFIG_IRQ_IDX. + */ + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return false; + } + return vhost_virtqueue_pending(&i2c->vhost_dev, idx); } @@ -143,8 +159,6 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserI2C *i2c) vhost_user_cleanup(&i2c->vhost_user); virtio_delete_queue(i2c->vq); virtio_cleanup(vdev); - g_free(i2c->vhost_dev.vqs); - i2c->vhost_dev.vqs = NULL; } static int vu_i2c_connect(DeviceState *dev) @@ -228,6 +242,7 @@ static void vu_i2c_device_realize(DeviceState *dev, Error **errp) ret = vhost_dev_init(&i2c->vhost_dev, &i2c->vhost_user, VHOST_BACKEND_TYPE_USER, 0, errp); if (ret < 0) { + g_free(i2c->vhost_dev.vqs); do_vhost_user_cleanup(vdev, i2c); } @@ -239,10 +254,12 @@ static void vu_i2c_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserI2C *i2c = VHOST_USER_I2C(dev); + struct vhost_virtqueue *vhost_vqs = i2c->vhost_dev.vqs; /* This will stop vhost backend if appropriate. */ vu_i2c_set_status(vdev, 0); vhost_dev_cleanup(&i2c->vhost_dev); + g_free(vhost_vqs); do_vhost_user_cleanup(vdev, i2c); } diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c index 201a39e220..efc54cd3fb 100644 --- a/hw/virtio/vhost-user-rng.c +++ b/hw/virtio/vhost-user-rng.c @@ -229,6 +229,7 @@ static void vu_rng_device_realize(DeviceState *dev, Error **errp) return; vhost_dev_init_failed: + g_free(rng->vhost_dev.vqs); virtio_delete_queue(rng->req_vq); virtio_add_queue_failed: virtio_cleanup(vdev); @@ -239,12 +240,12 @@ static void vu_rng_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserRNG *rng = VHOST_USER_RNG(dev); + struct vhost_virtqueue *vhost_vqs = rng->vhost_dev.vqs; vu_rng_set_status(vdev, 0); vhost_dev_cleanup(&rng->vhost_dev); - g_free(rng->vhost_dev.vqs); - rng->vhost_dev.vqs = NULL; + g_free(vhost_vqs); virtio_delete_queue(rng->req_vq); virtio_cleanup(vdev); vhost_user_cleanup(&rng->vhost_user); diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index e68daa35d4..74a2a28663 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -40,19 +40,9 @@ #define VHOST_MEMORY_BASELINE_NREGIONS 8 #define VHOST_USER_F_PROTOCOL_FEATURES 30 -#define VHOST_USER_SLAVE_MAX_FDS 8 +#define VHOST_USER_BACKEND_MAX_FDS 8 -/* - * Set maximum number of RAM slots supported to - * the maximum number supported by the target - * hardware plaform. - */ -#if defined(TARGET_X86) || defined(TARGET_X86_64) || \ - defined(TARGET_ARM) || defined(TARGET_AARCH64) -#include "hw/acpi/acpi.h" -#define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS - -#elif defined(TARGET_PPC) || defined(TARGET_PPC64) +#if defined(TARGET_PPC) || defined(TARGET_PPC64) #include "hw/ppc/spapr.h" #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS @@ -71,12 +61,12 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_RARP = 2, VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, VHOST_USER_PROTOCOL_F_NET_MTU = 4, - VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, + VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5, VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, VHOST_USER_PROTOCOL_F_CONFIG = 9, - VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, + VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10, VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13, @@ -110,7 +100,7 @@ typedef enum VhostUserRequest { VHOST_USER_SET_VRING_ENABLE = 18, VHOST_USER_SEND_RARP = 19, VHOST_USER_NET_SET_MTU = 20, - VHOST_USER_SET_SLAVE_REQ_FD = 21, + VHOST_USER_SET_BACKEND_REQ_FD = 21, VHOST_USER_IOTLB_MSG = 22, VHOST_USER_SET_VRING_ENDIAN = 23, VHOST_USER_GET_CONFIG = 24, @@ -134,11 +124,11 @@ typedef enum VhostUserRequest { } VhostUserRequest; typedef enum VhostUserSlaveRequest { - VHOST_USER_SLAVE_NONE = 0, - VHOST_USER_SLAVE_IOTLB_MSG = 1, - VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2, - VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3, - VHOST_USER_SLAVE_MAX + VHOST_USER_BACKEND_NONE = 0, + VHOST_USER_BACKEND_IOTLB_MSG = 1, + VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2, + VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3, + VHOST_USER_BACKEND_MAX } VhostUserSlaveRequest; typedef struct VhostUserMemoryRegion { @@ -483,6 +473,7 @@ static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset, assert((uintptr_t)addr == addr); mr = memory_region_from_host((void *)(uintptr_t)addr, offset); *fd = memory_region_get_fd(mr); + *offset += mr->ram_block->fd_offset; return mr; } @@ -1638,13 +1629,13 @@ static gboolean slave_read(QIOChannel *ioc, GIOCondition condition, } switch (hdr.request) { - case VHOST_USER_SLAVE_IOTLB_MSG: + case VHOST_USER_BACKEND_IOTLB_MSG: ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb); break; - case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG : + case VHOST_USER_BACKEND_CONFIG_CHANGE_MSG: ret = vhost_user_slave_handle_config_change(dev); break; - case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG: + case VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG: ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area, fd ? fd[0] : -1); break; @@ -1696,7 +1687,7 @@ fdcleanup: static int vhost_setup_slave_channel(struct vhost_dev *dev) { VhostUserMsg msg = { - .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD, + .hdr.request = VHOST_USER_SET_BACKEND_REQ_FD, .hdr.flags = VHOST_USER_VERSION, }; struct vhost_user *u = dev->opaque; @@ -1707,7 +1698,7 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev) QIOChannel *ioc; if (!virtio_has_feature(dev->protocol_features, - VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { + VHOST_USER_PROTOCOL_F_BACKEND_REQ)) { return 0; } @@ -2031,8 +2022,8 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, } else { if (virtio_has_feature(protocol_features, VHOST_USER_PROTOCOL_F_CONFIG)) { - warn_reportf_err(*errp, "vhost-user backend supports " - "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not."); + warn_report("vhost-user backend supports " + "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not."); protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG); } } @@ -2065,7 +2056,7 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) && !(virtio_has_feature(dev->protocol_features, - VHOST_USER_PROTOCOL_F_SLAVE_REQ) && + VHOST_USER_PROTOCOL_F_BACKEND_REQ) && virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { error_setg(errp, "IOMMU support requires reply-ack and " @@ -2677,7 +2668,20 @@ static int vhost_user_dev_start(struct vhost_dev *dev, bool started) VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK); } else { - return vhost_user_set_status(dev, 0); + return 0; + } +} + +static void vhost_user_reset_status(struct vhost_dev *dev) +{ + /* Set device status only for last queue pair */ + if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + return; + } + + if (virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_STATUS)) { + vhost_user_set_status(dev, 0); } } @@ -2716,4 +2720,5 @@ const VhostOps user_ops = { .vhost_get_inflight_fd = vhost_user_get_inflight_fd, .vhost_set_inflight_fd = vhost_user_set_inflight_fd, .vhost_dev_start = vhost_user_dev_start, + .vhost_reset_status = vhost_user_reset_status, }; diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 542e003101..b3094e8a8b 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -26,6 +26,7 @@ #include "cpu.h" #include "trace.h" #include "qapi/error.h" +#include "hw/virtio/virtio-access.h" /* * Return one past the end of the end of section. Be careful with uint64_t @@ -60,13 +61,21 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, iova_min, section->offset_within_address_space); return true; } + /* + * While using vIOMMU, sometimes the section will be larger than iova_max, + * but the memory that actually maps is smaller, so move the check to + * function vhost_vdpa_iommu_map_notify(). That function will use the actual + * size that maps to the kernel + */ - llend = vhost_vdpa_section_end(section); - if (int128_gt(llend, int128_make64(iova_max))) { - error_report("RAM section out of device range (max=0x%" PRIx64 - ", end addr=0x%" PRIx64 ")", - iova_max, int128_get64(llend)); - return true; + if (!memory_region_is_iommu(section->mr)) { + llend = vhost_vdpa_section_end(section); + if (int128_gt(llend, int128_make64(iova_max))) { + error_report("RAM section out of device range (max=0x%" PRIx64 + ", end addr=0x%" PRIx64 ")", + iova_max, int128_get64(llend)); + return true; + } } return false; @@ -185,6 +194,115 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener) v->iotlb_batch_begin_sent = false; } +static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n); + + hwaddr iova = iotlb->iova + iommu->iommu_offset; + struct vhost_vdpa *v = iommu->dev; + void *vaddr; + int ret; + Int128 llend; + + if (iotlb->target_as != &address_space_memory) { + error_report("Wrong target AS \"%s\", only system memory is allowed", + iotlb->target_as->name ? iotlb->target_as->name : "none"); + return; + } + RCU_READ_LOCK_GUARD(); + /* check if RAM section out of device range */ + llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova)); + if (int128_gt(llend, int128_make64(v->iova_range.last))) { + error_report("RAM section out of device range (max=0x%" PRIx64 + ", end addr=0x%" PRIx64 ")", + v->iova_range.last, int128_get64(llend)); + return; + } + + if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { + bool read_only; + + if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) { + return; + } + ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, + iotlb->addr_mask + 1, vaddr, read_only); + if (ret) { + error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", " + "0x%" HWADDR_PRIx ", %p) = %d (%m)", + v, iova, iotlb->addr_mask + 1, vaddr, ret); + } + } else { + ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + iotlb->addr_mask + 1); + if (ret) { + error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " + "0x%" HWADDR_PRIx ") = %d (%m)", + v, iova, iotlb->addr_mask + 1, ret); + } + } +} + +static void vhost_vdpa_iommu_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + + struct vdpa_iommu *iommu; + Int128 end; + int iommu_idx; + IOMMUMemoryRegion *iommu_mr; + int ret; + + iommu_mr = IOMMU_MEMORY_REGION(section->mr); + + iommu = g_malloc0(sizeof(*iommu)); + end = int128_add(int128_make64(section->offset_within_region), + section->size); + end = int128_sub(end, int128_one()); + iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, + MEMTXATTRS_UNSPECIFIED); + iommu->iommu_mr = iommu_mr; + iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify, + IOMMU_NOTIFIER_IOTLB_EVENTS, + section->offset_within_region, + int128_get64(end), + iommu_idx); + iommu->iommu_offset = section->offset_within_address_space - + section->offset_within_region; + iommu->dev = v; + + ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL); + if (ret) { + g_free(iommu); + return; + } + + QLIST_INSERT_HEAD(&v->iommu_list, iommu, iommu_next); + memory_region_iommu_replay(iommu->iommu_mr, &iommu->n); + + return; +} + +static void vhost_vdpa_iommu_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + + struct vdpa_iommu *iommu; + + QLIST_FOREACH(iommu, &v->iommu_list, iommu_next) + { + if (MEMORY_REGION(iommu->iommu_mr) == section->mr && + iommu->n.start == section->offset_within_region) { + memory_region_unregister_iommu_notifier(section->mr, &iommu->n); + QLIST_REMOVE(iommu, iommu_next); + g_free(iommu); + break; + } + } +} + static void vhost_vdpa_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { @@ -199,6 +317,10 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, v->iova_range.last)) { return; } + if (memory_region_is_iommu(section->mr)) { + vhost_vdpa_iommu_region_add(listener, section); + return; + } if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != (section->offset_within_region & ~TARGET_PAGE_MASK))) { @@ -278,6 +400,9 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, v->iova_range.last)) { return; } + if (memory_region_is_iommu(section->mr)) { + vhost_vdpa_iommu_region_del(listener, section); + } if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != (section->offset_within_region & ~TARGET_PAGE_MASK))) { @@ -288,7 +413,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); llend = vhost_vdpa_section_end(section); - trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend)); + trace_vhost_vdpa_listener_region_del(v, iova, + int128_get64(int128_sub(llend, int128_one()))); if (int128_ge(int128_make64(iova), llend)) { return; @@ -315,10 +441,28 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, vhost_iova_tree_remove(v->iova_tree, *result); } vhost_vdpa_iotlb_batch_begin_once(v); + /* + * The unmap ioctl doesn't accept a full 64-bit. need to check it + */ + if (int128_eq(llsize, int128_2_64())) { + llsize = int128_rshift(llsize, 1); + ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + int128_get64(llsize)); + + if (ret) { + error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " + "0x%" HWADDR_PRIx ") = %d (%m)", + v, iova, int128_get64(llsize), ret); + } + iova += int128_get64(llsize); + } ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize)); + if (ret) { - error_report("vhost_vdpa dma unmap error!"); + error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " + "0x%" HWADDR_PRIx ") = %d (%m)", + v, iova, int128_get64(llsize), ret); } memory_region_unref(section->mr); @@ -431,6 +575,33 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) trace_vhost_vdpa_init(dev, opaque); int ret; + v = opaque; + v->dev = dev; + dev->opaque = opaque ; + v->listener = vhost_vdpa_memory_listener; + v->msg_type = VHOST_IOTLB_MSG_V2; + vhost_vdpa_init_svq(dev, v); + + error_propagate(&dev->migration_blocker, v->migration_blocker); + if (!vhost_vdpa_first_dev(dev)) { + return 0; + } + + /* + * If dev->shadow_vqs_enabled at initialization that means the device has + * been started with x-svq=on, so don't block migration + */ + if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) { + /* We don't have dev->features yet */ + uint64_t features; + ret = vhost_vdpa_get_dev_features(dev, &features); + if (unlikely(ret)) { + error_setg_errno(errp, -ret, "Could not get device features"); + return ret; + } + vhost_svq_valid_features(features, &dev->migration_blocker); + } + /* * Similar to VFIO, we end up pinning all guest memory and have to * disable discarding of RAM. @@ -441,17 +612,6 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) return ret; } - v = opaque; - v->dev = dev; - dev->opaque = opaque ; - v->listener = vhost_vdpa_memory_listener; - v->msg_type = VHOST_IOTLB_MSG_V2; - vhost_vdpa_init_svq(dev, v); - - if (!vhost_vdpa_first_dev(dev)) { - return 0; - } - vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); @@ -577,12 +737,15 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev) assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); v = dev->opaque; trace_vhost_vdpa_cleanup(dev, v); + if (vhost_vdpa_first_dev(dev)) { + ram_block_discard_disable(false); + } + vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); memory_listener_unregister(&v->listener); vhost_vdpa_svq_cleanup(dev); dev->opaque = NULL; - ram_block_discard_disable(false); return 0; } @@ -659,7 +822,8 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) uint64_t features; uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | - 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID; + 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID | + 0x1ULL << VHOST_BACKEND_F_SUSPEND; int r; if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { @@ -689,28 +853,15 @@ static int vhost_vdpa_get_device_id(struct vhost_dev *dev, return ret; } -static void vhost_vdpa_reset_svq(struct vhost_vdpa *v) -{ - if (!v->shadow_vqs_enabled) { - return; - } - - for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { - VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); - vhost_svq_stop(svq); - } -} - static int vhost_vdpa_reset_device(struct vhost_dev *dev) { struct vhost_vdpa *v = dev->opaque; int ret; uint8_t status = 0; - vhost_vdpa_reset_svq(v); - ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); trace_vhost_vdpa_reset_device(dev, status); + v->suspended = false; return ret; } @@ -1100,6 +1251,8 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); + + vhost_svq_stop(svq); vhost_vdpa_svq_unmap_rings(dev, svq); event_notifier_cleanup(&svq->hdev_kick); @@ -1107,6 +1260,29 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) } } +static void vhost_vdpa_suspend(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + int r; + + if (!vhost_vdpa_first_dev(dev)) { + return; + } + + if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) { + trace_vhost_vdpa_suspend(dev); + r = ioctl(v->device_fd, VHOST_VDPA_SUSPEND); + if (unlikely(r)) { + error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno); + } else { + v->suspended = true; + return; + } + } + + vhost_vdpa_reset_device(dev); +} + static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) { struct vhost_vdpa *v = dev->opaque; @@ -1121,6 +1297,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) } vhost_vdpa_set_vring_ready(dev); } else { + vhost_vdpa_suspend(dev); vhost_vdpa_svqs_stop(dev); vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); } @@ -1130,16 +1307,31 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) } if (started) { - memory_listener_register(&v->listener, &address_space_memory); - return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); - } else { - vhost_vdpa_reset_device(dev); - vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | - VIRTIO_CONFIG_S_DRIVER); - memory_listener_unregister(&v->listener); + if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) { + error_report("SVQ can not work while IOMMU enable, please disable" + "IOMMU and try again"); + return -1; + } + memory_listener_register(&v->listener, dev->vdev->dma_as); - return 0; + return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); } + + return 0; +} + +static void vhost_vdpa_reset_status(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + + if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + return; + } + + vhost_vdpa_reset_device(dev); + vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | + VIRTIO_CONFIG_S_DRIVER); + memory_listener_unregister(&v->listener); } static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, @@ -1182,18 +1374,7 @@ static int vhost_vdpa_set_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { struct vhost_vdpa *v = dev->opaque; - VirtQueue *vq = virtio_get_queue(dev->vdev, ring->index); - /* - * vhost-vdpa devices does not support in-flight requests. Set all of them - * as available. - * - * TODO: This is ok for networking, but other kinds of devices might - * have problems with these retransmissions. - */ - while (virtqueue_rewind(vq, 1)) { - continue; - } if (v->shadow_vqs_enabled) { /* * Device vring base was set at device start. SVQ base is handled by @@ -1216,6 +1397,14 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, return 0; } + if (!v->suspended) { + /* + * Cannot trust in value returned by device, let vhost recover used + * idx from guest. + */ + return -1; + } + ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); return ret; @@ -1240,25 +1429,24 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, struct vhost_vring_file *file) { struct vhost_vdpa *v = dev->opaque; + int vdpa_idx = file->index - dev->vq_index; + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); + /* Remember last call fd because we can switch to SVQ anytime. */ + vhost_svq_set_svq_call_fd(svq, file->fd); if (v->shadow_vqs_enabled) { - int vdpa_idx = file->index - dev->vq_index; - VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); - - vhost_svq_set_svq_call_fd(svq, file->fd); return 0; - } else { - return vhost_vdpa_set_vring_dev_call(dev, file); } + + return vhost_vdpa_set_vring_dev_call(dev, file); } static int vhost_vdpa_get_features(struct vhost_dev *dev, uint64_t *features) { - struct vhost_vdpa *v = dev->opaque; int ret = vhost_vdpa_get_dev_features(dev, features); - if (ret == 0 && v->shadow_vqs_enabled) { + if (ret == 0) { /* Add SVQ logging capabilities */ *features |= BIT_ULL(VHOST_F_LOG_ALL); } @@ -1326,4 +1514,5 @@ const VhostOps vdpa_ops = { .vhost_vq_get_addr = vhost_vdpa_vq_get_addr, .vhost_force_iommu = vhost_vdpa_force_iommu, .vhost_set_config_call = vhost_vdpa_set_config_call, + .vhost_reset_status = vhost_vdpa_reset_status, }; diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index eb8c4c378c..23da579ce2 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -107,7 +107,7 @@ static void vhost_dev_sync_region(struct vhost_dev *dev, } } -static bool vhost_dev_has_iommu(struct vhost_dev *dev) +bool vhost_dev_has_iommu(struct vhost_dev *dev) { VirtIODevice *vdev = dev->vdev; @@ -1291,18 +1291,6 @@ void vhost_virtqueue_stop(struct vhost_dev *dev, 0, virtio_queue_get_desc_size(vdev, idx)); } -static void vhost_eventfd_add(MemoryListener *listener, - MemoryRegionSection *section, - bool match_data, uint64_t data, EventNotifier *e) -{ -} - -static void vhost_eventfd_del(MemoryListener *listener, - MemoryRegionSection *section, - bool match_data, uint64_t data, EventNotifier *e) -{ -} - static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, int n, uint32_t timeout) { @@ -1457,8 +1445,6 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, .log_sync = vhost_log_sync, .log_global_start = vhost_log_global_start, .log_global_stop = vhost_log_global_stop, - .eventfd_add = vhost_eventfd_add, - .eventfd_del = vhost_eventfd_del, .priority = 10 }; @@ -2049,6 +2035,9 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) hdev->vqs + i, hdev->vq_index + i); } + if (hdev->vhost_ops->vhost_reset_status) { + hdev->vhost_ops->vhost_reset_status(hdev); + } if (vhost_dev_has_iommu(hdev)) { if (hdev->vhost_ops->vhost_set_iotlb_callback) { diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index 746f07c4d2..d004cf29d2 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -32,6 +32,7 @@ #include "qemu/error-report.h" #include "migration/misc.h" #include "migration/migration.h" +#include "migration/options.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" @@ -729,37 +730,14 @@ static void virtio_balloon_get_config(VirtIODevice *vdev, uint8_t *config_data) memcpy(config_data, &config, virtio_balloon_config_size(dev)); } -static int build_dimm_list(Object *obj, void *opaque) -{ - GSList **list = opaque; - - if (object_dynamic_cast(obj, TYPE_PC_DIMM)) { - DeviceState *dev = DEVICE(obj); - if (dev->realized) { /* only realized DIMMs matter */ - *list = g_slist_prepend(*list, dev); - } - } - - object_child_foreach(obj, build_dimm_list, opaque); - return 0; -} - static ram_addr_t get_current_ram_size(void) { - GSList *list = NULL, *item; - ram_addr_t size = current_machine->ram_size; - - build_dimm_list(qdev_get_machine(), &list); - for (item = list; item; item = g_slist_next(item)) { - Object *obj = OBJECT(item->data); - if (!strcmp(object_get_typename(obj), TYPE_PC_DIMM)) { - size += object_property_get_int(obj, PC_DIMM_SIZE_PROP, - &error_abort); - } + MachineState *machine = MACHINE(qdev_get_machine()); + if (machine->device_memory) { + return machine->ram_size + machine->device_memory->dimm_size; + } else { + return machine->ram_size; } - g_slist_free(list); - - return size; } static bool virtio_balloon_page_poison_support(void *opaque) @@ -908,8 +886,9 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) precopy_add_notifier(&s->free_page_hint_notify); object_ref(OBJECT(s->iothread)); - s->free_page_bh = aio_bh_new(iothread_get_aio_context(s->iothread), - virtio_ballloon_get_free_page_hints, s); + s->free_page_bh = aio_bh_new_guarded(iothread_get_aio_context(s->iothread), + virtio_ballloon_get_free_page_hints, s, + &dev->mem_reentrancy_guard); } if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_REPORTING)) { diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 516425e26a..c729a1f79e 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -462,7 +462,7 @@ static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq, req->in_iov = NULL; req->in_num = 0; req->in_len = 0; - req->flags = CRYPTODEV_BACKEND_ALG__MAX; + req->flags = QCRYPTODEV_BACKEND_ALG__MAX; memset(&req->op_info, 0x00, sizeof(req->op_info)); } @@ -472,20 +472,22 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req) return; } - if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { + if (req->flags == QCRYPTODEV_BACKEND_ALG_SYM) { size_t max_len; CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info; - max_len = op_info->iv_len + - op_info->aad_len + - op_info->src_len + - op_info->dst_len + - op_info->digest_result_len; + if (op_info) { + max_len = op_info->iv_len + + op_info->aad_len + + op_info->src_len + + op_info->dst_len + + op_info->digest_result_len; - /* Zeroize and free request data structure */ - memset(op_info, 0, sizeof(*op_info) + max_len); - g_free(op_info); - } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) { + /* Zeroize and free request data structure */ + memset(op_info, 0, sizeof(*op_info) + max_len); + g_free(op_info); + } + } else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) { CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info; if (op_info) { g_free(op_info->src); @@ -570,10 +572,10 @@ static void virtio_crypto_req_complete(void *opaque, int ret) VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); uint8_t status = -ret; - if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { + if (req->flags == QCRYPTODEV_BACKEND_ALG_SYM) { virtio_crypto_sym_input_data_helper(vdev, req, status, req->op_info.u.sym_op_info); - } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) { + } else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) { virtio_crypto_akcipher_input_data_helper(vdev, req, status, req->op_info.u.asym_op_info); } @@ -871,11 +873,14 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) opcode = ldl_le_p(&req.header.opcode); op_info->session_id = ldq_le_p(&req.header.session_id); op_info->op_code = opcode; + op_info->queue_index = queue_index; + op_info->cb = virtio_crypto_req_complete; + op_info->opaque = request; switch (opcode) { case VIRTIO_CRYPTO_CIPHER_ENCRYPT: case VIRTIO_CRYPTO_CIPHER_DECRYPT: - op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_SYM; + op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALG_SYM; ret = virtio_crypto_handle_sym_req(vcrypto, &req.u.sym_req, op_info, out_iov, out_num); @@ -885,7 +890,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) case VIRTIO_CRYPTO_AKCIPHER_DECRYPT: case VIRTIO_CRYPTO_AKCIPHER_SIGN: case VIRTIO_CRYPTO_AKCIPHER_VERIFY: - op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_ASYM; + op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALG_ASYM; ret = virtio_crypto_handle_asym_req(vcrypto, &req.u.akcipher_req, op_info, out_iov, out_num); @@ -898,9 +903,7 @@ check_result: virtio_crypto_req_complete(request, -VIRTIO_CRYPTO_NOTSUPP); } else { ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev, - request, queue_index, - virtio_crypto_req_complete, - request); + op_info); if (ret < 0) { virtio_crypto_req_complete(request, ret); } @@ -997,12 +1000,35 @@ static void virtio_crypto_reset(VirtIODevice *vdev) } } +static uint32_t virtio_crypto_init_services(uint32_t qservices) +{ + uint32_t vservices = 0; + + if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_CIPHER)) { + vservices |= (1 << VIRTIO_CRYPTO_SERVICE_CIPHER); + } + if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_HASH)) { + vservices |= (1 << VIRTIO_CRYPTO_SERVICE_HASH); + } + if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_MAC)) { + vservices |= (1 << VIRTIO_CRYPTO_SERVICE_MAC); + } + if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_AEAD)) { + vservices |= (1 << VIRTIO_CRYPTO_SERVICE_AEAD); + } + if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER)) { + vservices |= (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER); + } + + return vservices; +} + static void virtio_crypto_init_config(VirtIODevice *vdev) { VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); - vcrypto->conf.crypto_services = - vcrypto->conf.cryptodev->conf.crypto_services; + vcrypto->conf.crypto_services = virtio_crypto_init_services( + vcrypto->conf.cryptodev->conf.crypto_services); vcrypto->conf.cipher_algo_l = vcrypto->conf.cryptodev->conf.cipher_algo_l; vcrypto->conf.cipher_algo_h = @@ -1050,7 +1076,8 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp) vcrypto->vqs[i].dataq = virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh); vcrypto->vqs[i].dataq_bh = - qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]); + qemu_bh_new_guarded(virtio_crypto_dataq_bh, &vcrypto->vqs[i], + &dev->mem_reentrancy_guard); vcrypto->vqs[i].vcrypto = vcrypto; } diff --git a/hw/virtio/virtio-input-pci.c b/hw/virtio/virtio-input-pci.c index a9d0992389..a53edf46c4 100644 --- a/hw/virtio/virtio-input-pci.c +++ b/hw/virtio/virtio-input-pci.c @@ -25,10 +25,11 @@ struct VirtIOInputPCI { VirtIOInput vdev; }; -#define TYPE_VIRTIO_INPUT_HID_PCI "virtio-input-hid-pci" -#define TYPE_VIRTIO_KEYBOARD_PCI "virtio-keyboard-pci" -#define TYPE_VIRTIO_MOUSE_PCI "virtio-mouse-pci" -#define TYPE_VIRTIO_TABLET_PCI "virtio-tablet-pci" +#define TYPE_VIRTIO_INPUT_HID_PCI "virtio-input-hid-pci" +#define TYPE_VIRTIO_KEYBOARD_PCI "virtio-keyboard-pci" +#define TYPE_VIRTIO_MOUSE_PCI "virtio-mouse-pci" +#define TYPE_VIRTIO_TABLET_PCI "virtio-tablet-pci" +#define TYPE_VIRTIO_MULTITOUCH_PCI "virtio-multitouch-pci" OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHIDPCI, VIRTIO_INPUT_HID_PCI) struct VirtIOInputHIDPCI { @@ -102,6 +103,14 @@ static void virtio_tablet_initfn(Object *obj) TYPE_VIRTIO_TABLET); } +static void virtio_multitouch_initfn(Object *obj) +{ + VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_MULTITOUCH); +} + static const TypeInfo virtio_input_pci_info = { .name = TYPE_VIRTIO_INPUT_PCI, .parent = TYPE_VIRTIO_PCI, @@ -140,6 +149,13 @@ static const VirtioPCIDeviceTypeInfo virtio_tablet_pci_info = { .instance_init = virtio_tablet_initfn, }; +static const VirtioPCIDeviceTypeInfo virtio_multitouch_pci_info = { + .generic_name = TYPE_VIRTIO_MULTITOUCH_PCI, + .parent = TYPE_VIRTIO_INPUT_HID_PCI, + .instance_size = sizeof(VirtIOInputHIDPCI), + .instance_init = virtio_multitouch_initfn, +}; + static void virtio_pci_input_register(void) { /* Base types: */ @@ -150,6 +166,7 @@ static void virtio_pci_input_register(void) virtio_pci_types_register(&virtio_keyboard_pci_info); virtio_pci_types_register(&virtio_mouse_pci_info); virtio_pci_types_register(&virtio_tablet_pci_info); + virtio_pci_types_register(&virtio_multitouch_pci_info); } type_init(virtio_pci_input_register) diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index 957fe77dc0..538b695c29 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -1341,7 +1341,7 @@ static Property virtio_mem_properties[] = { TYPE_MEMORY_BACKEND, HostMemoryBackend *), #if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS) DEFINE_PROP_ON_OFF_AUTO(VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP, VirtIOMEM, - unplugged_inaccessible, ON_OFF_AUTO_AUTO), + unplugged_inaccessible, ON_OFF_AUTO_ON), #endif DEFINE_PROP_BOOL(VIRTIO_MEM_EARLY_MIGRATION_PROP, VirtIOMEM, early_migration, true), diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 23ba625eb6..c2c6d85475 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -354,6 +354,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, if (proxy->legacy) { virtio_queue_update_rings(vdev, vdev->queue_sel); } else { + virtio_init_region_cache(vdev, vdev->queue_sel); proxy->vqs[vdev->queue_sel].num = value; } break; diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 247325c193..edbc0daa18 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -716,6 +716,38 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, } } +static void virtio_pci_ats_ctrl_trigger(PCIDevice *pci_dev, bool enable) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + + vdev->device_iotlb_enabled = enable; + + if (k->toggle_device_iotlb) { + k->toggle_device_iotlb(vdev); + } +} + +static void pcie_ats_config_write(PCIDevice *dev, uint32_t address, + uint32_t val, int len) +{ + uint32_t off; + uint16_t ats_cap = dev->exp.ats_cap; + + if (!ats_cap || address < ats_cap) { + return; + } + off = address - ats_cap; + if (off >= PCI_EXT_CAP_ATS_SIZEOF) { + return; + } + + if (range_covers_byte(off, len, PCI_ATS_CTRL + 1)) { + virtio_pci_ats_ctrl_trigger(dev, !!(val & PCI_ATS_CTRL_ENABLE)); + } +} + static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, uint32_t val, int len) { @@ -729,6 +761,10 @@ static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, pcie_cap_flr_write_config(pci_dev, address, val, len); } + if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { + pcie_ats_config_write(pci_dev, address, val, len); + } + if (range_covers_byte(address, len, PCI_COMMAND)) { if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { virtio_set_disabled(vdev, true); @@ -1554,6 +1590,7 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr, proxy->vqs[vdev->queue_sel].num = val; virtio_queue_set_num(vdev, vdev->queue_sel, proxy->vqs[vdev->queue_sel].num); + virtio_init_region_cache(vdev, vdev->queue_sel); break; case VIRTIO_PCI_COMMON_Q_MSIX: vector = virtio_queue_vector(vdev, vdev->queue_sel); diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c index dff402f08f..c3512c2dae 100644 --- a/hw/virtio/virtio-pmem.c +++ b/hw/virtio/virtio-pmem.c @@ -70,7 +70,6 @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq) VirtIODeviceRequest *req_data; VirtIOPMEM *pmem = VIRTIO_PMEM(vdev); HostMemoryBackend *backend = MEMORY_BACKEND(pmem->memdev); - ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); trace_virtio_pmem_flush_request(); req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest)); @@ -88,7 +87,7 @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq) req_data->fd = memory_region_get_fd(&backend->mr); req_data->pmem = pmem; req_data->vdev = vdev; - thread_pool_submit_aio(pool, worker_cb, req_data, done_cb, req_data); + thread_pool_submit_aio(worker_cb, req_data, done_cb, req_data); } static void virtio_pmem_get_config(VirtIODevice *vdev, uint8_t *config) diff --git a/hw/virtio/virtio-qmp.c b/hw/virtio/virtio-qmp.c index e4d4bece2d..b5e1835299 100644 --- a/hw/virtio/virtio-qmp.c +++ b/hw/virtio/virtio-qmp.c @@ -42,12 +42,12 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_RARP = 2, VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, VHOST_USER_PROTOCOL_F_NET_MTU = 4, - VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, + VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5, VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, VHOST_USER_PROTOCOL_F_CONFIG = 9, - VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, + VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10, VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13, @@ -101,8 +101,8 @@ static const qmp_virtio_feature_map_t vhost_user_protocol_map[] = { "supported"), FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_NET_MTU, \ "VHOST_USER_PROTOCOL_F_NET_MTU: Expose host MTU to guest supported"), - FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_REQ, \ - "VHOST_USER_PROTOCOL_F_SLAVE_REQ: Socket fd for back-end initiated " + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_BACKEND_REQ, \ + "VHOST_USER_PROTOCOL_F_BACKEND_REQ: Socket fd for back-end initiated " "requests supported"), FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CROSS_ENDIAN, \ "VHOST_USER_PROTOCOL_F_CROSS_ENDIAN: Endianness of VQs for legacy " @@ -116,8 +116,8 @@ static const qmp_virtio_feature_map_t vhost_user_protocol_map[] = { FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIG, \ "VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio " "device configuration space supported"), - FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD, \ - "VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD: Slave fd communication " + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD, \ + "VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD: Slave fd communication " "channel supported"), FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \ "VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified " @@ -176,6 +176,8 @@ static const qmp_virtio_feature_map_t virtio_blk_feature_map[] = { "VIRTIO_BLK_F_DISCARD: Discard command supported"), FEATURE_ENTRY(VIRTIO_BLK_F_WRITE_ZEROES, \ "VIRTIO_BLK_F_WRITE_ZEROES: Write zeroes command supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_ZONED, \ + "VIRTIO_BLK_F_ZONED: Zoned block devices"), #ifndef VIRTIO_BLK_NO_LEGACY FEATURE_ENTRY(VIRTIO_BLK_F_BARRIER, \ "VIRTIO_BLK_F_BARRIER: Request barriers supported"), @@ -666,7 +668,7 @@ VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, uint64_t bitmap) VirtioInfoList *qmp_x_query_virtio(Error **errp) { VirtioInfoList *list = NULL; - VirtioInfoList *node; + VirtioInfo *node; VirtIODevice *vdev; QTAILQ_FOREACH(vdev, &virtio_list, next) { @@ -680,11 +682,10 @@ VirtioInfoList *qmp_x_query_virtio(Error **errp) if (!strncmp(is_realized->str, "false", 4)) { QTAILQ_REMOVE(&virtio_list, vdev, next); } else { - node = g_new0(VirtioInfoList, 1); - node->value = g_new(VirtioInfo, 1); - node->value->path = g_strdup(dev->canonical_path); - node->value->name = g_strdup(vdev->name); - QAPI_LIST_PREPEND(list, node->value); + node = g_new(VirtioInfo, 1); + node->path = g_strdup(dev->canonical_path); + node->name = g_strdup(vdev->name); + QAPI_LIST_PREPEND(list, node); } g_string_free(is_realized, true); } diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index f35178f5fc..295a603e58 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -226,7 +226,7 @@ static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq) } } -static void virtio_init_region_cache(VirtIODevice *vdev, int n) +void virtio_init_region_cache(VirtIODevice *vdev, int n) { VirtQueue *vq = &vdev->vq[n]; VRingMemoryRegionCaches *old = vq->vring.caches; @@ -1069,7 +1069,7 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq, VRingMemoryRegionCaches *caches) { VirtIODevice *vdev = vq->vdev; - unsigned int max, idx; + unsigned int idx; unsigned int total_bufs, in_total, out_total; MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; int64_t len = 0; @@ -1078,13 +1078,12 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq, idx = vq->last_avail_idx; total_bufs = in_total = out_total = 0; - max = vq->vring.num; - while ((rc = virtqueue_num_heads(vq, idx)) > 0) { MemoryRegionCache *desc_cache = &caches->desc; unsigned int num_bufs; VRingDesc desc; unsigned int i; + unsigned int max = vq->vring.num; num_bufs = total_bufs; @@ -1206,7 +1205,7 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, VRingMemoryRegionCaches *caches) { VirtIODevice *vdev = vq->vdev; - unsigned int max, idx; + unsigned int idx; unsigned int total_bufs, in_total, out_total; MemoryRegionCache *desc_cache; MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; @@ -1218,14 +1217,14 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, wrap_counter = vq->last_avail_wrap_counter; total_bufs = in_total = out_total = 0; - max = vq->vring.num; - for (;;) { unsigned int num_bufs = total_bufs; unsigned int i = idx; int rc; + unsigned int max = vq->vring.num; desc_cache = &caches->desc; + vring_packed_desc_read(vdev, &desc, desc_cache, idx, true); if (!is_desc_avail(desc.flags, wrap_counter)) { break; @@ -3492,7 +3491,7 @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx) { - aio_set_event_notifier(ctx, &vq->host_notifier, true, + aio_set_event_notifier(ctx, &vq->host_notifier, virtio_queue_host_notifier_read, virtio_queue_host_notifier_aio_poll, virtio_queue_host_notifier_aio_poll_ready); @@ -3509,17 +3508,14 @@ void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx) */ void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx) { - aio_set_event_notifier(ctx, &vq->host_notifier, true, + aio_set_event_notifier(ctx, &vq->host_notifier, virtio_queue_host_notifier_read, NULL, NULL); } void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx) { - aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL); - /* Test and clear notifier before after disabling event, - * in case poll callback didn't have time to run. */ - virtio_queue_host_notifier_read(&vq->host_notifier); + aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL); } void virtio_queue_host_notifier_read(EventNotifier *n) diff --git a/hw/watchdog/Kconfig b/hw/watchdog/Kconfig index 66e1d029e3..861fd00334 100644 --- a/hw/watchdog/Kconfig +++ b/hw/watchdog/Kconfig @@ -20,3 +20,7 @@ config WDT_IMX2 config WDT_SBSA bool + +config ALLWINNER_WDT + bool + select PTIMER diff --git a/hw/watchdog/allwinner-wdt.c b/hw/watchdog/allwinner-wdt.c new file mode 100644 index 0000000000..6205765efe --- /dev/null +++ b/hw/watchdog/allwinner-wdt.c @@ -0,0 +1,416 @@ +/* + * Allwinner Watchdog emulation + * + * Copyright (C) 2023 Strahinja Jankovic + * + * This file is derived from Allwinner RTC, + * by Niek Linnenbank. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/units.h" +#include "qemu/module.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "hw/registerfields.h" +#include "hw/watchdog/allwinner-wdt.h" +#include "sysemu/watchdog.h" +#include "migration/vmstate.h" + +/* WDT registers */ +enum { + REG_IRQ_EN = 0, /* Watchdog interrupt enable */ + REG_IRQ_STA, /* Watchdog interrupt status */ + REG_CTRL, /* Watchdog control register */ + REG_CFG, /* Watchdog configuration register */ + REG_MODE, /* Watchdog mode register */ +}; + +/* Universal WDT register flags */ +#define WDT_RESTART_MASK (1 << 0) +#define WDT_EN_MASK (1 << 0) + +/* sun4i specific WDT register flags */ +#define RST_EN_SUN4I_MASK (1 << 1) +#define INTV_VALUE_SUN4I_SHIFT (3) +#define INTV_VALUE_SUN4I_MASK (0xfu << INTV_VALUE_SUN4I_SHIFT) + +/* sun6i specific WDT register flags */ +#define RST_EN_SUN6I_MASK (1 << 0) +#define KEY_FIELD_SUN6I_SHIFT (1) +#define KEY_FIELD_SUN6I_MASK (0xfffu << KEY_FIELD_SUN6I_SHIFT) +#define KEY_FIELD_SUN6I (0xA57u) +#define INTV_VALUE_SUN6I_SHIFT (4) +#define INTV_VALUE_SUN6I_MASK (0xfu << INTV_VALUE_SUN6I_SHIFT) + +/* Map of INTV_VALUE to 0.5s units. */ +static const uint8_t allwinner_wdt_count_map[] = { + 1, + 2, + 4, + 6, + 8, + 10, + 12, + 16, + 20, + 24, + 28, + 32 +}; + +/* WDT sun4i register map (offset to name) */ +const uint8_t allwinner_wdt_sun4i_regmap[] = { + [0x0000] = REG_CTRL, + [0x0004] = REG_MODE, +}; + +/* WDT sun6i register map (offset to name) */ +const uint8_t allwinner_wdt_sun6i_regmap[] = { + [0x0000] = REG_IRQ_EN, + [0x0004] = REG_IRQ_STA, + [0x0010] = REG_CTRL, + [0x0014] = REG_CFG, + [0x0018] = REG_MODE, +}; + +static bool allwinner_wdt_sun4i_read(AwWdtState *s, uint32_t offset) +{ + /* no sun4i specific registers currently implemented */ + return false; +} + +static bool allwinner_wdt_sun4i_write(AwWdtState *s, uint32_t offset, + uint32_t data) +{ + /* no sun4i specific registers currently implemented */ + return false; +} + +static bool allwinner_wdt_sun4i_can_reset_system(AwWdtState *s) +{ + if (s->regs[REG_MODE] & RST_EN_SUN4I_MASK) { + return true; + } else { + return false; + } +} + +static bool allwinner_wdt_sun4i_is_key_valid(AwWdtState *s, uint32_t val) +{ + /* sun4i has no key */ + return true; +} + +static uint8_t allwinner_wdt_sun4i_get_intv_value(AwWdtState *s) +{ + return ((s->regs[REG_MODE] & INTV_VALUE_SUN4I_MASK) >> + INTV_VALUE_SUN4I_SHIFT); +} + +static bool allwinner_wdt_sun6i_read(AwWdtState *s, uint32_t offset) +{ + const AwWdtClass *c = AW_WDT_GET_CLASS(s); + + switch (c->regmap[offset]) { + case REG_IRQ_EN: + case REG_IRQ_STA: + case REG_CFG: + return true; + default: + break; + } + return false; +} + +static bool allwinner_wdt_sun6i_write(AwWdtState *s, uint32_t offset, + uint32_t data) +{ + const AwWdtClass *c = AW_WDT_GET_CLASS(s); + + switch (c->regmap[offset]) { + case REG_IRQ_EN: + case REG_IRQ_STA: + case REG_CFG: + return true; + default: + break; + } + return false; +} + +static bool allwinner_wdt_sun6i_can_reset_system(AwWdtState *s) +{ + if (s->regs[REG_CFG] & RST_EN_SUN6I_MASK) { + return true; + } else { + return false; + } +} + +static bool allwinner_wdt_sun6i_is_key_valid(AwWdtState *s, uint32_t val) +{ + uint16_t key = (val & KEY_FIELD_SUN6I_MASK) >> KEY_FIELD_SUN6I_SHIFT; + return (key == KEY_FIELD_SUN6I); +} + +static uint8_t allwinner_wdt_sun6i_get_intv_value(AwWdtState *s) +{ + return ((s->regs[REG_MODE] & INTV_VALUE_SUN6I_MASK) >> + INTV_VALUE_SUN6I_SHIFT); +} + +static void allwinner_wdt_update_timer(AwWdtState *s) +{ + const AwWdtClass *c = AW_WDT_GET_CLASS(s); + uint8_t count = c->get_intv_value(s); + + ptimer_transaction_begin(s->timer); + ptimer_stop(s->timer); + + /* Use map to convert. */ + if (count < sizeof(allwinner_wdt_count_map)) { + ptimer_set_count(s->timer, allwinner_wdt_count_map[count]); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: incorrect INTV_VALUE 0x%02x\n", + __func__, count); + } + + ptimer_run(s->timer, 1); + ptimer_transaction_commit(s->timer); + + trace_allwinner_wdt_update_timer(count); +} + +static uint64_t allwinner_wdt_read(void *opaque, hwaddr offset, + unsigned size) +{ + AwWdtState *s = AW_WDT(opaque); + const AwWdtClass *c = AW_WDT_GET_CLASS(s); + uint64_t r; + + if (offset >= c->regmap_size) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + switch (c->regmap[offset]) { + case REG_CTRL: + case REG_MODE: + r = s->regs[c->regmap[offset]]; + break; + default: + if (!c->read(s, offset)) { + qemu_log_mask(LOG_UNIMP, "%s: unimplemented register 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + r = s->regs[c->regmap[offset]]; + break; + } + + trace_allwinner_wdt_read(offset, r, size); + + return r; +} + +static void allwinner_wdt_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwWdtState *s = AW_WDT(opaque); + const AwWdtClass *c = AW_WDT_GET_CLASS(s); + uint32_t old_val; + + if (offset >= c->regmap_size) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return; + } + + trace_allwinner_wdt_write(offset, val, size); + + switch (c->regmap[offset]) { + case REG_CTRL: + if (c->is_key_valid(s, val)) { + if (val & WDT_RESTART_MASK) { + /* Kick timer */ + allwinner_wdt_update_timer(s); + } + } + break; + case REG_MODE: + old_val = s->regs[REG_MODE]; + s->regs[REG_MODE] = (uint32_t)val; + + /* Check for rising edge on WDOG_MODE_EN */ + if ((s->regs[REG_MODE] & ~old_val) & WDT_EN_MASK) { + allwinner_wdt_update_timer(s); + } + break; + default: + if (!c->write(s, offset, val)) { + qemu_log_mask(LOG_UNIMP, "%s: unimplemented register 0x%04x\n", + __func__, (uint32_t)offset); + } + s->regs[c->regmap[offset]] = (uint32_t)val; + break; + } +} + +static const MemoryRegionOps allwinner_wdt_ops = { + .read = allwinner_wdt_read, + .write = allwinner_wdt_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static void allwinner_wdt_expired(void *opaque) +{ + AwWdtState *s = AW_WDT(opaque); + const AwWdtClass *c = AW_WDT_GET_CLASS(s); + + bool enabled = s->regs[REG_MODE] & WDT_EN_MASK; + bool reset_enabled = c->can_reset_system(s); + + trace_allwinner_wdt_expired(enabled, reset_enabled); + + /* Perform watchdog action if watchdog is enabled and can trigger reset */ + if (enabled && reset_enabled) { + watchdog_perform_action(); + } +} + +static void allwinner_wdt_reset_enter(Object *obj, ResetType type) +{ + AwWdtState *s = AW_WDT(obj); + + trace_allwinner_wdt_reset_enter(); + + /* Clear registers */ + memset(s->regs, 0, sizeof(s->regs)); +} + +static const VMStateDescription allwinner_wdt_vmstate = { + .name = "allwinner-wdt", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PTIMER(timer, AwWdtState), + VMSTATE_UINT32_ARRAY(regs, AwWdtState, AW_WDT_REGS_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_wdt_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwWdtState *s = AW_WDT(obj); + const AwWdtClass *c = AW_WDT_GET_CLASS(s); + + /* Memory mapping */ + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_wdt_ops, s, + TYPE_AW_WDT, c->regmap_size * 4); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void allwinner_wdt_realize(DeviceState *dev, Error **errp) +{ + AwWdtState *s = AW_WDT(dev); + + s->timer = ptimer_init(allwinner_wdt_expired, s, + PTIMER_POLICY_NO_IMMEDIATE_TRIGGER | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD | + PTIMER_POLICY_NO_COUNTER_ROUND_DOWN); + + ptimer_transaction_begin(s->timer); + /* Set to 2Hz (0.5s period); other periods are multiples of 0.5s. */ + ptimer_set_freq(s->timer, 2); + ptimer_set_limit(s->timer, 0xff, 1); + ptimer_transaction_commit(s->timer); +} + +static void allwinner_wdt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.enter = allwinner_wdt_reset_enter; + dc->realize = allwinner_wdt_realize; + dc->vmsd = &allwinner_wdt_vmstate; +} + +static void allwinner_wdt_sun4i_class_init(ObjectClass *klass, void *data) +{ + AwWdtClass *awc = AW_WDT_CLASS(klass); + + awc->regmap = allwinner_wdt_sun4i_regmap; + awc->regmap_size = sizeof(allwinner_wdt_sun4i_regmap); + awc->read = allwinner_wdt_sun4i_read; + awc->write = allwinner_wdt_sun4i_write; + awc->can_reset_system = allwinner_wdt_sun4i_can_reset_system; + awc->is_key_valid = allwinner_wdt_sun4i_is_key_valid; + awc->get_intv_value = allwinner_wdt_sun4i_get_intv_value; +} + +static void allwinner_wdt_sun6i_class_init(ObjectClass *klass, void *data) +{ + AwWdtClass *awc = AW_WDT_CLASS(klass); + + awc->regmap = allwinner_wdt_sun6i_regmap; + awc->regmap_size = sizeof(allwinner_wdt_sun6i_regmap); + awc->read = allwinner_wdt_sun6i_read; + awc->write = allwinner_wdt_sun6i_write; + awc->can_reset_system = allwinner_wdt_sun6i_can_reset_system; + awc->is_key_valid = allwinner_wdt_sun6i_is_key_valid; + awc->get_intv_value = allwinner_wdt_sun6i_get_intv_value; +} + +static const TypeInfo allwinner_wdt_info = { + .name = TYPE_AW_WDT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_wdt_init, + .instance_size = sizeof(AwWdtState), + .class_init = allwinner_wdt_class_init, + .class_size = sizeof(AwWdtClass), + .abstract = true, +}; + +static const TypeInfo allwinner_wdt_sun4i_info = { + .name = TYPE_AW_WDT_SUN4I, + .parent = TYPE_AW_WDT, + .class_init = allwinner_wdt_sun4i_class_init, +}; + +static const TypeInfo allwinner_wdt_sun6i_info = { + .name = TYPE_AW_WDT_SUN6I, + .parent = TYPE_AW_WDT, + .class_init = allwinner_wdt_sun6i_class_init, +}; + +static void allwinner_wdt_register(void) +{ + type_register_static(&allwinner_wdt_info); + type_register_static(&allwinner_wdt_sun4i_info); + type_register_static(&allwinner_wdt_sun6i_info); +} + +type_init(allwinner_wdt_register) diff --git a/hw/watchdog/meson.build b/hw/watchdog/meson.build index 8974b5cf4c..5dcd4fbe2f 100644 --- a/hw/watchdog/meson.build +++ b/hw/watchdog/meson.build @@ -1,4 +1,5 @@ softmmu_ss.add(files('watchdog.c')) +softmmu_ss.add(when: 'CONFIG_ALLWINNER_WDT', if_true: files('allwinner-wdt.c')) softmmu_ss.add(when: 'CONFIG_CMSDK_APB_WATCHDOG', if_true: files('cmsdk-apb-watchdog.c')) softmmu_ss.add(when: 'CONFIG_WDT_IB6300ESB', if_true: files('wdt_i6300esb.c')) softmmu_ss.add(when: 'CONFIG_WDT_IB700', if_true: files('wdt_ib700.c')) diff --git a/hw/watchdog/trace-events b/hw/watchdog/trace-events index 54371ae075..2739570652 100644 --- a/hw/watchdog/trace-events +++ b/hw/watchdog/trace-events @@ -1,5 +1,12 @@ # See docs/devel/tracing.rst for syntax documentation. +# allwinner-wdt.c +allwinner_wdt_read(uint64_t offset, uint64_t data, unsigned size) "Allwinner watchdog read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +allwinner_wdt_write(uint64_t offset, uint64_t data, unsigned size) "Allwinner watchdog write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +allwinner_wdt_reset_enter(void) "Allwinner watchdog: reset" +allwinner_wdt_update_timer(uint8_t count) "Allwinner watchdog: count %" PRIu8 +allwinner_wdt_expired(bool enabled, bool reset_enabled) "Allwinner watchdog: enabled %u reset_enabled %u" + # cmsdk-apb-watchdog.c cmsdk_apb_watchdog_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_watchdog_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c index b116c3a3aa..a1750a4957 100644 --- a/hw/watchdog/wdt_ib700.c +++ b/hw/watchdog/wdt_ib700.c @@ -30,7 +30,7 @@ /*#define IB700_DEBUG 1*/ #ifdef IB700_DEBUG -#define ib700_debug(fs,...) \ +#define ib700_debug(fs,...) \ fprintf(stderr,"ib700: %s: "fs,__func__,##__VA_ARGS__) #else #define ib700_debug(fs,...) diff --git a/hw/xen/Kconfig b/hw/xen/Kconfig new file mode 100644 index 0000000000..3467efb986 --- /dev/null +++ b/hw/xen/Kconfig @@ -0,0 +1,3 @@ +config XEN_BUS + bool + default y if (XEN || XEN_EMU) diff --git a/hw/xen/meson.build b/hw/xen/meson.build index ae0ace3046..19c6aabc7c 100644 --- a/hw/xen/meson.build +++ b/hw/xen/meson.build @@ -1,4 +1,4 @@ -softmmu_ss.add(when: ['CONFIG_XEN', xen], if_true: files( +softmmu_ss.add(when: ['CONFIG_XEN_BUS'], if_true: files( 'xen-backend.c', 'xen-bus-helper.c', 'xen-bus.c', @@ -7,6 +7,10 @@ softmmu_ss.add(when: ['CONFIG_XEN', xen], if_true: files( 'xen_pvdev.c', )) +softmmu_ss.add(when: ['CONFIG_XEN', xen], if_true: files( + 'xen-operations.c', +)) + xen_specific_ss = ss.source_set() if have_xen_pci_passthrough xen_specific_ss.add(files( diff --git a/hw/xen/trace-events b/hw/xen/trace-events index 3da3fd8348..55c9e1df68 100644 --- a/hw/xen/trace-events +++ b/hw/xen/trace-events @@ -1,6 +1,6 @@ # See docs/devel/tracing.rst for syntax documentation. -# ../../include/hw/xen/xen_common.h +# ../../include/hw/xen/xen_native.h xen_default_ioreq_server(void) "" xen_ioreq_server_create(uint32_t id) "id: %u" xen_ioreq_server_destroy(uint32_t id) "id: %u" diff --git a/hw/xen/xen-bus-helper.c b/hw/xen/xen-bus-helper.c index 5a1e12b374..b2b2cc9c5d 100644 --- a/hw/xen/xen-bus-helper.c +++ b/hw/xen/xen-bus-helper.c @@ -10,6 +10,7 @@ #include "hw/xen/xen-bus.h" #include "hw/xen/xen-bus-helper.h" #include "qapi/error.h" +#include "trace.h" #include @@ -46,34 +47,28 @@ const char *xs_strstate(enum xenbus_state state) return "INVALID"; } -void xs_node_create(struct xs_handle *xsh, xs_transaction_t tid, - const char *node, struct xs_permissions perms[], - unsigned int nr_perms, Error **errp) +void xs_node_create(struct qemu_xs_handle *h, xs_transaction_t tid, + const char *node, unsigned int owner, unsigned int domid, + unsigned int perms, Error **errp) { trace_xs_node_create(node); - if (!xs_write(xsh, tid, node, "", 0)) { + if (!qemu_xen_xs_create(h, tid, owner, domid, perms, node)) { error_setg_errno(errp, errno, "failed to create node '%s'", node); - return; - } - - if (!xs_set_permissions(xsh, tid, node, perms, nr_perms)) { - error_setg_errno(errp, errno, "failed to set node '%s' permissions", - node); } } -void xs_node_destroy(struct xs_handle *xsh, xs_transaction_t tid, +void xs_node_destroy(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, Error **errp) { trace_xs_node_destroy(node); - if (!xs_rm(xsh, tid, node)) { + if (!qemu_xen_xs_destroy(h, tid, node)) { error_setg_errno(errp, errno, "failed to destroy node '%s'", node); } } -void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid, +void xs_node_vprintf(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, va_list ap) { @@ -86,7 +81,7 @@ void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid, trace_xs_node_vprintf(path, value); - if (!xs_write(xsh, tid, path, value, len)) { + if (!qemu_xen_xs_write(h, tid, path, value, len)) { error_setg_errno(errp, errno, "failed to write '%s' to '%s'", value, path); } @@ -95,18 +90,18 @@ void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid, g_free(path); } -void xs_node_printf(struct xs_handle *xsh, xs_transaction_t tid, +void xs_node_printf(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, ...) { va_list ap; va_start(ap, fmt); - xs_node_vprintf(xsh, tid, node, key, errp, fmt, ap); + xs_node_vprintf(h, tid, node, key, errp, fmt, ap); va_end(ap); } -int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid, +int xs_node_vscanf(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, va_list ap) { @@ -115,7 +110,7 @@ int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid, path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) : g_strdup(key); - value = xs_read(xsh, tid, path, NULL); + value = qemu_xen_xs_read(h, tid, path, NULL); trace_xs_node_vscanf(path, value); @@ -133,7 +128,7 @@ int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid, return rc; } -int xs_node_scanf(struct xs_handle *xsh, xs_transaction_t tid, +int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, ...) { @@ -141,42 +136,35 @@ int xs_node_scanf(struct xs_handle *xsh, xs_transaction_t tid, int rc; va_start(ap, fmt); - rc = xs_node_vscanf(xsh, tid, node, key, errp, fmt, ap); + rc = xs_node_vscanf(h, tid, node, key, errp, fmt, ap); va_end(ap); return rc; } -void xs_node_watch(struct xs_handle *xsh, const char *node, const char *key, - char *token, Error **errp) +struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node, + const char *key, xs_watch_fn fn, + void *opaque, Error **errp) { char *path; + struct qemu_xs_watch *w; path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) : g_strdup(key); trace_xs_node_watch(path); - if (!xs_watch(xsh, path, token)) { + w = qemu_xen_xs_watch(h, path, fn, opaque); + if (!w) { error_setg_errno(errp, errno, "failed to watch node '%s'", path); } g_free(path); + + return w; } -void xs_node_unwatch(struct xs_handle *xsh, const char *node, - const char *key, const char *token, Error **errp) +void xs_node_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w) { - char *path; - - path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) : - g_strdup(key); - - trace_xs_node_unwatch(path); - - if (!xs_unwatch(xsh, path, token)) { - error_setg_errno(errp, errno, "failed to unwatch node '%s'", path); - } - - g_free(path); + qemu_xen_xs_unwatch(h, w); } diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c index df3f6b9ae0..1e08cf027a 100644 --- a/hw/xen/xen-bus.c +++ b/hw/xen/xen-bus.c @@ -62,7 +62,7 @@ static void xen_device_unplug(XenDevice *xendev, Error **errp) /* Mimic the way the Xen toolstack does an unplug */ again: - tid = xs_transaction_start(xenbus->xsh); + tid = qemu_xen_xs_transaction_start(xenbus->xsh); if (tid == XBT_NULL) { error_setg_errno(errp, errno, "failed xs_transaction_start"); return; @@ -80,7 +80,7 @@ again: goto abort; } - if (!xs_transaction_end(xenbus->xsh, tid, false)) { + if (!qemu_xen_xs_transaction_end(xenbus->xsh, tid, false)) { if (errno == EAGAIN) { goto again; } @@ -95,7 +95,7 @@ abort: * We only abort if there is already a failure so ignore any error * from ending the transaction. */ - xs_transaction_end(xenbus->xsh, tid, true); + qemu_xen_xs_transaction_end(xenbus->xsh, tid, true); } static void xen_bus_print_dev(Monitor *mon, DeviceState *dev, int indent) @@ -111,143 +111,6 @@ static char *xen_bus_get_dev_path(DeviceState *dev) return xen_device_get_backend_path(XEN_DEVICE(dev)); } -struct XenWatch { - char *node, *key; - char *token; - XenWatchHandler handler; - void *opaque; - Notifier notifier; -}; - -static void watch_notify(Notifier *n, void *data) -{ - XenWatch *watch = container_of(n, XenWatch, notifier); - const char *token = data; - - if (!strcmp(watch->token, token)) { - watch->handler(watch->opaque); - } -} - -static XenWatch *new_watch(const char *node, const char *key, - XenWatchHandler handler, void *opaque) -{ - XenWatch *watch = g_new0(XenWatch, 1); - QemuUUID uuid; - - qemu_uuid_generate(&uuid); - - watch->token = qemu_uuid_unparse_strdup(&uuid); - watch->node = g_strdup(node); - watch->key = g_strdup(key); - watch->handler = handler; - watch->opaque = opaque; - watch->notifier.notify = watch_notify; - - return watch; -} - -static void free_watch(XenWatch *watch) -{ - g_free(watch->token); - g_free(watch->key); - g_free(watch->node); - - g_free(watch); -} - -struct XenWatchList { - struct xs_handle *xsh; - NotifierList notifiers; -}; - -static void watch_list_event(void *opaque) -{ - XenWatchList *watch_list = opaque; - char **v; - const char *token; - - v = xs_check_watch(watch_list->xsh); - if (!v) { - return; - } - - token = v[XS_WATCH_TOKEN]; - - notifier_list_notify(&watch_list->notifiers, (void *)token); - - free(v); -} - -static XenWatchList *watch_list_create(struct xs_handle *xsh) -{ - XenWatchList *watch_list = g_new0(XenWatchList, 1); - - g_assert(xsh); - - watch_list->xsh = xsh; - notifier_list_init(&watch_list->notifiers); - qemu_set_fd_handler(xs_fileno(watch_list->xsh), watch_list_event, NULL, - watch_list); - - return watch_list; -} - -static void watch_list_destroy(XenWatchList *watch_list) -{ - g_assert(notifier_list_empty(&watch_list->notifiers)); - qemu_set_fd_handler(xs_fileno(watch_list->xsh), NULL, NULL, NULL); - g_free(watch_list); -} - -static XenWatch *watch_list_add(XenWatchList *watch_list, const char *node, - const char *key, XenWatchHandler handler, - void *opaque, Error **errp) -{ - ERRP_GUARD(); - XenWatch *watch = new_watch(node, key, handler, opaque); - - notifier_list_add(&watch_list->notifiers, &watch->notifier); - - xs_node_watch(watch_list->xsh, node, key, watch->token, errp); - if (*errp) { - notifier_remove(&watch->notifier); - free_watch(watch); - - return NULL; - } - - return watch; -} - -static void watch_list_remove(XenWatchList *watch_list, XenWatch *watch, - Error **errp) -{ - xs_node_unwatch(watch_list->xsh, watch->node, watch->key, watch->token, - errp); - - notifier_remove(&watch->notifier); - free_watch(watch); -} - -static XenWatch *xen_bus_add_watch(XenBus *xenbus, const char *node, - const char *key, XenWatchHandler handler, - Error **errp) -{ - trace_xen_bus_add_watch(node, key); - - return watch_list_add(xenbus->watch_list, node, key, handler, xenbus, - errp); -} - -static void xen_bus_remove_watch(XenBus *xenbus, XenWatch *watch, - Error **errp) -{ - trace_xen_bus_remove_watch(watch->node, watch->key); - - watch_list_remove(xenbus->watch_list, watch, errp); -} - static void xen_bus_backend_create(XenBus *xenbus, const char *type, const char *name, char *path, Error **errp) @@ -261,15 +124,15 @@ static void xen_bus_backend_create(XenBus *xenbus, const char *type, trace_xen_bus_backend_create(type, path); again: - tid = xs_transaction_start(xenbus->xsh); + tid = qemu_xen_xs_transaction_start(xenbus->xsh); if (tid == XBT_NULL) { error_setg(errp, "failed xs_transaction_start"); return; } - key = xs_directory(xenbus->xsh, tid, path, &n); + key = qemu_xen_xs_directory(xenbus->xsh, tid, path, &n); if (!key) { - if (!xs_transaction_end(xenbus->xsh, tid, true)) { + if (!qemu_xen_xs_transaction_end(xenbus->xsh, tid, true)) { error_setg_errno(errp, errno, "failed xs_transaction_end"); } return; @@ -300,7 +163,7 @@ again: free(key); - if (!xs_transaction_end(xenbus->xsh, tid, false)) { + if (!qemu_xen_xs_transaction_end(xenbus->xsh, tid, false)) { qobject_unref(opts); if (errno == EAGAIN) { @@ -327,7 +190,7 @@ static void xen_bus_type_enumerate(XenBus *xenbus, const char *type) trace_xen_bus_type_enumerate(type); - backend = xs_directory(xenbus->xsh, XBT_NULL, domain_path, &n); + backend = qemu_xen_xs_directory(xenbus->xsh, XBT_NULL, domain_path, &n); if (!backend) { goto out; } @@ -372,7 +235,7 @@ static void xen_bus_enumerate(XenBus *xenbus) trace_xen_bus_enumerate(); - type = xs_directory(xenbus->xsh, XBT_NULL, "backend", &n); + type = qemu_xen_xs_directory(xenbus->xsh, XBT_NULL, "backend", &n); if (!type) { return; } @@ -415,7 +278,7 @@ static void xen_bus_cleanup(XenBus *xenbus) } } -static void xen_bus_backend_changed(void *opaque) +static void xen_bus_backend_changed(void *opaque, const char *path) { XenBus *xenbus = opaque; @@ -434,7 +297,7 @@ static void xen_bus_unrealize(BusState *bus) for (i = 0; i < xenbus->backend_types; i++) { if (xenbus->backend_watch[i]) { - xen_bus_remove_watch(xenbus, xenbus->backend_watch[i], NULL); + xs_node_unwatch(xenbus->xsh, xenbus->backend_watch[i]); } } @@ -442,13 +305,8 @@ static void xen_bus_unrealize(BusState *bus) xenbus->backend_watch = NULL; } - if (xenbus->watch_list) { - watch_list_destroy(xenbus->watch_list); - xenbus->watch_list = NULL; - } - if (xenbus->xsh) { - xs_close(xenbus->xsh); + qemu_xen_xs_close(xenbus->xsh); } } @@ -463,7 +321,7 @@ static void xen_bus_realize(BusState *bus, Error **errp) trace_xen_bus_realize(); - xenbus->xsh = xs_open(0); + xenbus->xsh = qemu_xen_xs_open(); if (!xenbus->xsh) { error_setg_errno(errp, errno, "failed xs_open"); goto fail; @@ -476,19 +334,18 @@ static void xen_bus_realize(BusState *bus, Error **errp) xenbus->backend_id = 0; /* Assume lack of node means dom0 */ } - xenbus->watch_list = watch_list_create(xenbus->xsh); - module_call_init(MODULE_INIT_XEN_BACKEND); type = xen_backend_get_types(&xenbus->backend_types); - xenbus->backend_watch = g_new(XenWatch *, xenbus->backend_types); + xenbus->backend_watch = g_new(struct qemu_xs_watch *, + xenbus->backend_types); for (i = 0; i < xenbus->backend_types; i++) { char *node = g_strdup_printf("backend/%s", type[i]); xenbus->backend_watch[i] = - xen_bus_add_watch(xenbus, node, key, xen_bus_backend_changed, - &local_err); + xs_node_watch(xenbus->xsh, node, key, xen_bus_backend_changed, + xenbus, &local_err); if (local_err) { /* This need not be treated as a hard error so don't propagate */ error_reportf_err(local_err, @@ -631,7 +488,7 @@ static bool xen_device_frontend_is_active(XenDevice *xendev) } } -static void xen_device_backend_changed(void *opaque) +static void xen_device_backend_changed(void *opaque, const char *path) { XenDevice *xendev = opaque; const char *type = object_get_typename(OBJECT(xendev)); @@ -685,66 +542,35 @@ static void xen_device_backend_changed(void *opaque) } } -static XenWatch *xen_device_add_watch(XenDevice *xendev, const char *node, - const char *key, - XenWatchHandler handler, - Error **errp) -{ - const char *type = object_get_typename(OBJECT(xendev)); - - trace_xen_device_add_watch(type, xendev->name, node, key); - - return watch_list_add(xendev->watch_list, node, key, handler, xendev, - errp); -} - -static void xen_device_remove_watch(XenDevice *xendev, XenWatch *watch, - Error **errp) -{ - const char *type = object_get_typename(OBJECT(xendev)); - - trace_xen_device_remove_watch(type, xendev->name, watch->node, - watch->key); - - watch_list_remove(xendev->watch_list, watch, errp); -} - - static void xen_device_backend_create(XenDevice *xendev, Error **errp) { ERRP_GUARD(); XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); - struct xs_permissions perms[2]; xendev->backend_path = xen_device_get_backend_path(xendev); - perms[0].id = xenbus->backend_id; - perms[0].perms = XS_PERM_NONE; - perms[1].id = xendev->frontend_id; - perms[1].perms = XS_PERM_READ; - g_assert(xenbus->xsh); - xs_node_create(xenbus->xsh, XBT_NULL, xendev->backend_path, perms, - ARRAY_SIZE(perms), errp); + xs_node_create(xenbus->xsh, XBT_NULL, xendev->backend_path, + xenbus->backend_id, xendev->frontend_id, XS_PERM_READ, errp); if (*errp) { error_prepend(errp, "failed to create backend: "); return; } xendev->backend_state_watch = - xen_device_add_watch(xendev, xendev->backend_path, - "state", xen_device_backend_changed, - errp); + xs_node_watch(xendev->xsh, xendev->backend_path, + "state", xen_device_backend_changed, xendev, + errp); if (*errp) { error_prepend(errp, "failed to watch backend state: "); return; } xendev->backend_online_watch = - xen_device_add_watch(xendev, xendev->backend_path, - "online", xen_device_backend_changed, - errp); + xs_node_watch(xendev->xsh, xendev->backend_path, + "online", xen_device_backend_changed, xendev, + errp); if (*errp) { error_prepend(errp, "failed to watch backend online: "); return; @@ -757,12 +583,12 @@ static void xen_device_backend_destroy(XenDevice *xendev) Error *local_err = NULL; if (xendev->backend_online_watch) { - xen_device_remove_watch(xendev, xendev->backend_online_watch, NULL); + xs_node_unwatch(xendev->xsh, xendev->backend_online_watch); xendev->backend_online_watch = NULL; } if (xendev->backend_state_watch) { - xen_device_remove_watch(xendev, xendev->backend_state_watch, NULL); + xs_node_unwatch(xendev->xsh, xendev->backend_state_watch); xendev->backend_state_watch = NULL; } @@ -837,7 +663,7 @@ static void xen_device_frontend_set_state(XenDevice *xendev, } } -static void xen_device_frontend_changed(void *opaque) +static void xen_device_frontend_changed(void *opaque, const char *path) { XenDevice *xendev = opaque; XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); @@ -885,7 +711,6 @@ static void xen_device_frontend_create(XenDevice *xendev, Error **errp) { ERRP_GUARD(); XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); - struct xs_permissions perms[2]; xendev->frontend_path = xen_device_get_frontend_path(xendev); @@ -894,15 +719,11 @@ static void xen_device_frontend_create(XenDevice *xendev, Error **errp) * toolstack. */ if (!xen_device_frontend_exists(xendev)) { - perms[0].id = xendev->frontend_id; - perms[0].perms = XS_PERM_NONE; - perms[1].id = xenbus->backend_id; - perms[1].perms = XS_PERM_READ | XS_PERM_WRITE; - g_assert(xenbus->xsh); - xs_node_create(xenbus->xsh, XBT_NULL, xendev->frontend_path, perms, - ARRAY_SIZE(perms), errp); + xs_node_create(xenbus->xsh, XBT_NULL, xendev->frontend_path, + xendev->frontend_id, xenbus->backend_id, + XS_PERM_READ | XS_PERM_WRITE, errp); if (*errp) { error_prepend(errp, "failed to create frontend: "); return; @@ -910,8 +731,8 @@ static void xen_device_frontend_create(XenDevice *xendev, Error **errp) } xendev->frontend_state_watch = - xen_device_add_watch(xendev, xendev->frontend_path, "state", - xen_device_frontend_changed, errp); + xs_node_watch(xendev->xsh, xendev->frontend_path, "state", + xen_device_frontend_changed, xendev, errp); if (*errp) { error_prepend(errp, "failed to watch frontend state: "); } @@ -923,8 +744,7 @@ static void xen_device_frontend_destroy(XenDevice *xendev) Error *local_err = NULL; if (xendev->frontend_state_watch) { - xen_device_remove_watch(xendev, xendev->frontend_state_watch, - NULL); + xs_node_unwatch(xendev->xsh, xendev->frontend_state_watch); xendev->frontend_state_watch = NULL; } @@ -947,7 +767,7 @@ static void xen_device_frontend_destroy(XenDevice *xendev) void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs, Error **errp) { - if (xengnttab_set_max_grants(xendev->xgth, nr_refs)) { + if (qemu_xen_gnttab_set_max_grants(xendev->xgth, nr_refs)) { error_setg_errno(errp, errno, "xengnttab_set_max_grants failed"); } } @@ -956,9 +776,8 @@ void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs, unsigned int nr_refs, int prot, Error **errp) { - void *map = xengnttab_map_domain_grant_refs(xendev->xgth, nr_refs, - xendev->frontend_id, refs, - prot); + void *map = qemu_xen_gnttab_map_refs(xendev->xgth, nr_refs, + xendev->frontend_id, refs, prot); if (!map) { error_setg_errno(errp, errno, @@ -968,112 +787,20 @@ void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs, return map; } -void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, +void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, uint32_t *refs, unsigned int nr_refs, Error **errp) { - if (xengnttab_unmap(xendev->xgth, map, nr_refs)) { + if (qemu_xen_gnttab_unmap(xendev->xgth, map, refs, nr_refs)) { error_setg_errno(errp, errno, "xengnttab_unmap failed"); } } -static void compat_copy_grant_refs(XenDevice *xendev, bool to_domain, - XenDeviceGrantCopySegment segs[], - unsigned int nr_segs, Error **errp) -{ - uint32_t *refs = g_new(uint32_t, nr_segs); - int prot = to_domain ? PROT_WRITE : PROT_READ; - void *map; - unsigned int i; - - for (i = 0; i < nr_segs; i++) { - XenDeviceGrantCopySegment *seg = &segs[i]; - - refs[i] = to_domain ? seg->dest.foreign.ref : - seg->source.foreign.ref; - } - - map = xengnttab_map_domain_grant_refs(xendev->xgth, nr_segs, - xendev->frontend_id, refs, - prot); - if (!map) { - error_setg_errno(errp, errno, - "xengnttab_map_domain_grant_refs failed"); - goto done; - } - - for (i = 0; i < nr_segs; i++) { - XenDeviceGrantCopySegment *seg = &segs[i]; - void *page = map + (i * XC_PAGE_SIZE); - - if (to_domain) { - memcpy(page + seg->dest.foreign.offset, seg->source.virt, - seg->len); - } else { - memcpy(seg->dest.virt, page + seg->source.foreign.offset, - seg->len); - } - } - - if (xengnttab_unmap(xendev->xgth, map, nr_segs)) { - error_setg_errno(errp, errno, "xengnttab_unmap failed"); - } - -done: - g_free(refs); -} - void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain, XenDeviceGrantCopySegment segs[], unsigned int nr_segs, Error **errp) { - xengnttab_grant_copy_segment_t *xengnttab_segs; - unsigned int i; - - if (!xendev->feature_grant_copy) { - compat_copy_grant_refs(xendev, to_domain, segs, nr_segs, errp); - return; - } - - xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs); - - for (i = 0; i < nr_segs; i++) { - XenDeviceGrantCopySegment *seg = &segs[i]; - xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i]; - - if (to_domain) { - xengnttab_seg->flags = GNTCOPY_dest_gref; - xengnttab_seg->dest.foreign.domid = xendev->frontend_id; - xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref; - xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset; - xengnttab_seg->source.virt = seg->source.virt; - } else { - xengnttab_seg->flags = GNTCOPY_source_gref; - xengnttab_seg->source.foreign.domid = xendev->frontend_id; - xengnttab_seg->source.foreign.ref = seg->source.foreign.ref; - xengnttab_seg->source.foreign.offset = - seg->source.foreign.offset; - xengnttab_seg->dest.virt = seg->dest.virt; - } - - xengnttab_seg->len = seg->len; - } - - if (xengnttab_grant_copy(xendev->xgth, nr_segs, xengnttab_segs)) { - error_setg_errno(errp, errno, "xengnttab_grant_copy failed"); - goto done; - } - - for (i = 0; i < nr_segs; i++) { - xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i]; - - if (xengnttab_seg->status != GNTST_okay) { - error_setg(errp, "xengnttab_grant_copy seg[%u] failed", i); - break; - } - } - -done: - g_free(xengnttab_segs); + qemu_xen_gnttab_grant_copy(xendev->xgth, to_domain, xendev->frontend_id, + (XenGrantCopySegment *)segs, nr_segs, errp); } struct XenEventChannel { @@ -1095,12 +822,12 @@ static bool xen_device_poll(void *opaque) static void xen_device_event(void *opaque) { XenEventChannel *channel = opaque; - unsigned long port = xenevtchn_pending(channel->xeh); + unsigned long port = qemu_xen_evtchn_pending(channel->xeh); if (port == channel->local_port) { xen_device_poll(channel); - xenevtchn_unmask(channel->xeh, port); + qemu_xen_evtchn_unmask(channel->xeh, port); } } @@ -1115,12 +842,15 @@ void xen_device_set_event_channel_context(XenDevice *xendev, } if (channel->ctx) - aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, + aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), NULL, NULL, NULL, NULL, NULL); channel->ctx = ctx; - aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, - xen_device_event, NULL, xen_device_poll, NULL, channel); + if (ctx) { + aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), + xen_device_event, NULL, xen_device_poll, NULL, + channel); + } } XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev, @@ -1131,13 +861,13 @@ XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev, XenEventChannel *channel = g_new0(XenEventChannel, 1); xenevtchn_port_or_error_t local_port; - channel->xeh = xenevtchn_open(NULL, 0); + channel->xeh = qemu_xen_evtchn_open(); if (!channel->xeh) { error_setg_errno(errp, errno, "failed xenevtchn_open"); goto fail; } - local_port = xenevtchn_bind_interdomain(channel->xeh, + local_port = qemu_xen_evtchn_bind_interdomain(channel->xeh, xendev->frontend_id, port); if (local_port < 0) { @@ -1160,7 +890,7 @@ XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev, fail: if (channel->xeh) { - xenevtchn_close(channel->xeh); + qemu_xen_evtchn_close(channel->xeh); } g_free(channel); @@ -1177,7 +907,7 @@ void xen_device_notify_event_channel(XenDevice *xendev, return; } - if (xenevtchn_notify(channel->xeh, channel->local_port) < 0) { + if (qemu_xen_evtchn_notify(channel->xeh, channel->local_port) < 0) { error_setg_errno(errp, errno, "xenevtchn_notify failed"); } } @@ -1193,14 +923,14 @@ void xen_device_unbind_event_channel(XenDevice *xendev, QLIST_REMOVE(channel, list); - aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, + aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), NULL, NULL, NULL, NULL, NULL); - if (xenevtchn_unbind(channel->xeh, channel->local_port) < 0) { + if (qemu_xen_evtchn_unbind(channel->xeh, channel->local_port) < 0) { error_setg_errno(errp, errno, "xenevtchn_unbind failed"); } - xenevtchn_close(channel->xeh); + qemu_xen_evtchn_close(channel->xeh); g_free(channel); } @@ -1235,17 +965,12 @@ static void xen_device_unrealize(DeviceState *dev) xen_device_backend_destroy(xendev); if (xendev->xgth) { - xengnttab_close(xendev->xgth); + qemu_xen_gnttab_close(xendev->xgth); xendev->xgth = NULL; } - if (xendev->watch_list) { - watch_list_destroy(xendev->watch_list); - xendev->watch_list = NULL; - } - if (xendev->xsh) { - xs_close(xendev->xsh); + qemu_xen_xs_close(xendev->xsh); xendev->xsh = NULL; } @@ -1290,23 +1015,18 @@ static void xen_device_realize(DeviceState *dev, Error **errp) trace_xen_device_realize(type, xendev->name); - xendev->xsh = xs_open(0); + xendev->xsh = qemu_xen_xs_open(); if (!xendev->xsh) { error_setg_errno(errp, errno, "failed xs_open"); goto unrealize; } - xendev->watch_list = watch_list_create(xendev->xsh); - - xendev->xgth = xengnttab_open(NULL, 0); + xendev->xgth = qemu_xen_gnttab_open(); if (!xendev->xgth) { error_setg_errno(errp, errno, "failed xengnttab_open"); goto unrealize; } - xendev->feature_grant_copy = - (xengnttab_grant_copy(xendev->xgth, 0, NULL) == 0); - xen_device_backend_create(xendev, errp); if (*errp) { goto unrealize; @@ -1317,13 +1037,6 @@ static void xen_device_realize(DeviceState *dev, Error **errp) goto unrealize; } - if (xendev_class->realize) { - xendev_class->realize(xendev, errp); - if (*errp) { - goto unrealize; - } - } - xen_device_backend_printf(xendev, "frontend", "%s", xendev->frontend_path); xen_device_backend_printf(xendev, "frontend-id", "%u", @@ -1342,6 +1055,13 @@ static void xen_device_realize(DeviceState *dev, Error **errp) xen_device_frontend_set_state(xendev, XenbusStateInitialising, true); } + if (xendev_class->realize) { + xendev_class->realize(xendev, errp); + if (*errp) { + goto unrealize; + } + } + xendev->exit.notify = xen_device_exit; qemu_add_exit_notifier(&xendev->exit); return; diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c index 085fd31ef7..4ded3cec23 100644 --- a/hw/xen/xen-legacy-backend.c +++ b/hw/xen/xen-legacy-backend.c @@ -39,11 +39,10 @@ BusState *xen_sysbus; /* ------------------------------------------------------------- */ /* public */ -struct xs_handle *xenstore; +struct qemu_xs_handle *xenstore; const char *xen_protocol; /* private */ -static bool xen_feature_grant_copy; static int debug; int xenstore_write_be_str(struct XenLegacyDevice *xendev, const char *node, @@ -113,7 +112,7 @@ void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev, { assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); - if (xengnttab_set_max_grants(xendev->gnttabdev, nr_refs)) { + if (qemu_xen_gnttab_set_max_grants(xendev->gnttabdev, nr_refs)) { xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n", strerror(errno)); } @@ -126,8 +125,8 @@ void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs, assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); - ptr = xengnttab_map_domain_grant_refs(xendev->gnttabdev, nr_refs, - xen_domid, refs, prot); + ptr = qemu_xen_gnttab_map_refs(xendev->gnttabdev, nr_refs, xen_domid, refs, + prot); if (!ptr) { xen_pv_printf(xendev, 0, "xengnttab_map_domain_grant_refs failed: %s\n", @@ -138,123 +137,31 @@ void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs, } void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr, - unsigned int nr_refs) + uint32_t *refs, unsigned int nr_refs) { assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); - if (xengnttab_unmap(xendev->gnttabdev, ptr, nr_refs)) { + if (qemu_xen_gnttab_unmap(xendev->gnttabdev, ptr, refs, nr_refs)) { xen_pv_printf(xendev, 0, "xengnttab_unmap failed: %s\n", strerror(errno)); } } -static int compat_copy_grant_refs(struct XenLegacyDevice *xendev, - bool to_domain, - XenGrantCopySegment segs[], - unsigned int nr_segs) -{ - uint32_t *refs = g_new(uint32_t, nr_segs); - int prot = to_domain ? PROT_WRITE : PROT_READ; - void *pages; - unsigned int i; - - for (i = 0; i < nr_segs; i++) { - XenGrantCopySegment *seg = &segs[i]; - - refs[i] = to_domain ? - seg->dest.foreign.ref : seg->source.foreign.ref; - } - - pages = xengnttab_map_domain_grant_refs(xendev->gnttabdev, nr_segs, - xen_domid, refs, prot); - if (!pages) { - xen_pv_printf(xendev, 0, - "xengnttab_map_domain_grant_refs failed: %s\n", - strerror(errno)); - g_free(refs); - return -1; - } - - for (i = 0; i < nr_segs; i++) { - XenGrantCopySegment *seg = &segs[i]; - void *page = pages + (i * XC_PAGE_SIZE); - - if (to_domain) { - memcpy(page + seg->dest.foreign.offset, seg->source.virt, - seg->len); - } else { - memcpy(seg->dest.virt, page + seg->source.foreign.offset, - seg->len); - } - } - - if (xengnttab_unmap(xendev->gnttabdev, pages, nr_segs)) { - xen_pv_printf(xendev, 0, "xengnttab_unmap failed: %s\n", - strerror(errno)); - } - - g_free(refs); - return 0; -} - int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev, bool to_domain, XenGrantCopySegment segs[], unsigned int nr_segs) { - xengnttab_grant_copy_segment_t *xengnttab_segs; - unsigned int i; int rc; assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); - if (!xen_feature_grant_copy) { - return compat_copy_grant_refs(xendev, to_domain, segs, nr_segs); - } - - xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs); - - for (i = 0; i < nr_segs; i++) { - XenGrantCopySegment *seg = &segs[i]; - xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i]; - - if (to_domain) { - xengnttab_seg->flags = GNTCOPY_dest_gref; - xengnttab_seg->dest.foreign.domid = xen_domid; - xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref; - xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset; - xengnttab_seg->source.virt = seg->source.virt; - } else { - xengnttab_seg->flags = GNTCOPY_source_gref; - xengnttab_seg->source.foreign.domid = xen_domid; - xengnttab_seg->source.foreign.ref = seg->source.foreign.ref; - xengnttab_seg->source.foreign.offset = - seg->source.foreign.offset; - xengnttab_seg->dest.virt = seg->dest.virt; - } - - xengnttab_seg->len = seg->len; - } - - rc = xengnttab_grant_copy(xendev->gnttabdev, nr_segs, xengnttab_segs); - + rc = qemu_xen_gnttab_grant_copy(xendev->gnttabdev, to_domain, xen_domid, + segs, nr_segs, NULL); if (rc) { - xen_pv_printf(xendev, 0, "xengnttab_copy failed: %s\n", - strerror(errno)); + xen_pv_printf(xendev, 0, "xengnttab_grant_copy failed: %s\n", + strerror(-rc)); } - - for (i = 0; i < nr_segs; i++) { - xengnttab_grant_copy_segment_t *xengnttab_seg = - &xengnttab_segs[i]; - - if (xengnttab_seg->status != GNTST_okay) { - xen_pv_printf(xendev, 0, "segment[%u] status: %d\n", i, - xengnttab_seg->status); - rc = -1; - } - } - - g_free(xengnttab_segs); return rc; } @@ -294,13 +201,13 @@ static struct XenLegacyDevice *xen_be_get_xendev(const char *type, int dom, xendev->debug = debug; xendev->local_port = -1; - xendev->evtchndev = xenevtchn_open(NULL, 0); + xendev->evtchndev = qemu_xen_evtchn_open(); if (xendev->evtchndev == NULL) { xen_pv_printf(NULL, 0, "can't open evtchn device\n"); qdev_unplug(DEVICE(xendev), NULL); return NULL; } - qemu_set_cloexec(xenevtchn_fd(xendev->evtchndev)); + qemu_set_cloexec(qemu_xen_evtchn_fd(xendev->evtchndev)); xen_pv_insert_xendev(xendev); @@ -367,6 +274,25 @@ static void xen_be_frontend_changed(struct XenLegacyDevice *xendev, } } +static void xenstore_update_fe(void *opaque, const char *watch) +{ + struct XenLegacyDevice *xendev = opaque; + const char *node; + unsigned int len; + + len = strlen(xendev->fe); + if (strncmp(xendev->fe, watch, len) != 0) { + return; + } + if (watch[len] != '/') { + return; + } + node = watch + len + 1; + + xen_be_frontend_changed(xendev, node); + xen_be_check_state(xendev); +} + /* ------------------------------------------------------------- */ /* Check for possible state transitions and perform them. */ @@ -380,7 +306,6 @@ static void xen_be_frontend_changed(struct XenLegacyDevice *xendev, */ static int xen_be_try_setup(struct XenLegacyDevice *xendev) { - char token[XEN_BUFSIZE]; int be_state; if (xenstore_read_be_int(xendev, "state", &be_state) == -1) { @@ -401,8 +326,9 @@ static int xen_be_try_setup(struct XenLegacyDevice *xendev) } /* setup frontend watch */ - snprintf(token, sizeof(token), "fe:%p", xendev); - if (!xs_watch(xenstore, xendev->fe, token)) { + xendev->watch = qemu_xen_xs_watch(xenstore, xendev->fe, xenstore_update_fe, + xendev); + if (!xendev->watch) { xen_pv_printf(xendev, 0, "watching frontend path (%s) failed\n", xendev->fe); return -1; @@ -466,7 +392,7 @@ static int xen_be_try_initialise(struct XenLegacyDevice *xendev) } if (xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV) { - xendev->gnttabdev = xengnttab_open(NULL, 0); + xendev->gnttabdev = qemu_xen_gnttab_open(); if (xendev->gnttabdev == NULL) { xen_pv_printf(NULL, 0, "can't open gnttab device\n"); return -1; @@ -524,7 +450,7 @@ static void xen_be_disconnect(struct XenLegacyDevice *xendev, xendev->ops->disconnect(xendev); } if (xendev->gnttabdev) { - xengnttab_close(xendev->gnttabdev); + qemu_xen_gnttab_close(xendev->gnttabdev); xendev->gnttabdev = NULL; } if (xendev->be_state != state) { @@ -591,24 +517,67 @@ void xen_be_check_state(struct XenLegacyDevice *xendev) /* ------------------------------------------------------------- */ +struct xenstore_be { + const char *type; + int dom; + struct XenDevOps *ops; +}; + +static void xenstore_update_be(void *opaque, const char *watch) +{ + struct xenstore_be *be = opaque; + struct XenLegacyDevice *xendev; + char path[XEN_BUFSIZE], *bepath; + unsigned int len, dev; + + len = snprintf(path, sizeof(path), "backend/%s/%d", be->type, be->dom); + if (strncmp(path, watch, len) != 0) { + return; + } + if (sscanf(watch + len, "/%u/%255s", &dev, path) != 2) { + strcpy(path, ""); + if (sscanf(watch + len, "/%u", &dev) != 1) { + dev = -1; + } + } + if (dev == -1) { + return; + } + + xendev = xen_be_get_xendev(be->type, be->dom, dev, be->ops); + if (xendev != NULL) { + bepath = qemu_xen_xs_read(xenstore, 0, xendev->be, &len); + if (bepath == NULL) { + xen_pv_del_xendev(xendev); + } else { + free(bepath); + xen_be_backend_changed(xendev, path); + xen_be_check_state(xendev); + } + } +} + static int xenstore_scan(const char *type, int dom, struct XenDevOps *ops) { struct XenLegacyDevice *xendev; - char path[XEN_BUFSIZE], token[XEN_BUFSIZE]; + char path[XEN_BUFSIZE]; + struct xenstore_be *be = g_new0(struct xenstore_be, 1); char **dev = NULL; unsigned int cdev, j; /* setup watch */ - snprintf(token, sizeof(token), "be:%p:%d:%p", type, dom, ops); + be->type = type; + be->dom = dom; + be->ops = ops; snprintf(path, sizeof(path), "backend/%s/%d", type, dom); - if (!xs_watch(xenstore, path, token)) { + if (!qemu_xen_xs_watch(xenstore, path, xenstore_update_be, be)) { xen_pv_printf(NULL, 0, "xen be: watching backend path (%s) failed\n", path); return -1; } /* look for backends */ - dev = xs_directory(xenstore, 0, path, &cdev); + dev = qemu_xen_xs_directory(xenstore, 0, path, &cdev); if (!dev) { return 0; } @@ -623,99 +592,8 @@ static int xenstore_scan(const char *type, int dom, struct XenDevOps *ops) return 0; } -void xenstore_update_be(char *watch, char *type, int dom, - struct XenDevOps *ops) -{ - struct XenLegacyDevice *xendev; - char path[XEN_BUFSIZE], *bepath; - unsigned int len, dev; - - len = snprintf(path, sizeof(path), "backend/%s/%d", type, dom); - if (strncmp(path, watch, len) != 0) { - return; - } - if (sscanf(watch + len, "/%u/%255s", &dev, path) != 2) { - strcpy(path, ""); - if (sscanf(watch + len, "/%u", &dev) != 1) { - dev = -1; - } - } - if (dev == -1) { - return; - } - - xendev = xen_be_get_xendev(type, dom, dev, ops); - if (xendev != NULL) { - bepath = xs_read(xenstore, 0, xendev->be, &len); - if (bepath == NULL) { - xen_pv_del_xendev(xendev); - } else { - free(bepath); - xen_be_backend_changed(xendev, path); - xen_be_check_state(xendev); - } - } -} - -void xenstore_update_fe(char *watch, struct XenLegacyDevice *xendev) -{ - char *node; - unsigned int len; - - len = strlen(xendev->fe); - if (strncmp(xendev->fe, watch, len) != 0) { - return; - } - if (watch[len] != '/') { - return; - } - node = watch + len + 1; - - xen_be_frontend_changed(xendev, node); - xen_be_check_state(xendev); -} /* -------------------------------------------------------------------- */ -int xen_be_init(void) -{ - xengnttab_handle *gnttabdev; - - xenstore = xs_daemon_open(); - if (!xenstore) { - xen_pv_printf(NULL, 0, "can't connect to xenstored\n"); - return -1; - } - - qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL); - - if (xen_xc == NULL || xen_fmem == NULL) { - /* Check if xen_init() have been called */ - goto err; - } - - gnttabdev = xengnttab_open(NULL, 0); - if (gnttabdev != NULL) { - if (xengnttab_grant_copy(gnttabdev, 0, NULL) == 0) { - xen_feature_grant_copy = true; - } - xengnttab_close(gnttabdev); - } - - xen_sysdev = qdev_new(TYPE_XENSYSDEV); - sysbus_realize_and_unref(SYS_BUS_DEVICE(xen_sysdev), &error_fatal); - xen_sysbus = qbus_new(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus"); - qbus_set_bus_hotplug_handler(xen_sysbus); - - return 0; - -err: - qemu_set_fd_handler(xs_fileno(xenstore), NULL, NULL, NULL); - xs_daemon_close(xenstore); - xenstore = NULL; - - return -1; -} - static void xen_set_dynamic_sysbus(void) { Object *machine = qdev_get_machine(); @@ -725,6 +603,36 @@ static void xen_set_dynamic_sysbus(void) machine_class_allow_dynamic_sysbus_dev(mc, TYPE_XENSYSDEV); } +void xen_be_init(void) +{ + xenstore = qemu_xen_xs_open(); + if (!xenstore) { + xen_pv_printf(NULL, 0, "can't connect to xenstored\n"); + exit(1); + } + + if (xen_evtchn_ops == NULL || xen_gnttab_ops == NULL) { + xen_pv_printf(NULL, 0, "Xen operations not set up\n"); + exit(1); + } + + xen_sysdev = qdev_new(TYPE_XENSYSDEV); + sysbus_realize_and_unref(SYS_BUS_DEVICE(xen_sysdev), &error_fatal); + xen_sysbus = qbus_new(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus"); + qbus_set_bus_hotplug_handler(xen_sysbus); + + xen_set_dynamic_sysbus(); + + xen_be_register("console", &xen_console_ops); + xen_be_register("vkbd", &xen_kbdmouse_ops); +#ifdef CONFIG_VIRTFS + xen_be_register("9pfs", &xen_9pfs_ops); +#endif +#ifdef CONFIG_USB_LIBUSB + xen_be_register("qusb", &xen_usb_ops); +#endif +} + int xen_be_register(const char *type, struct XenDevOps *ops) { char path[50]; @@ -744,33 +652,19 @@ int xen_be_register(const char *type, struct XenDevOps *ops) return xenstore_scan(type, xen_domid, ops); } -void xen_be_register_common(void) -{ - xen_set_dynamic_sysbus(); - - xen_be_register("console", &xen_console_ops); - xen_be_register("vkbd", &xen_kbdmouse_ops); -#ifdef CONFIG_VIRTFS - xen_be_register("9pfs", &xen_9pfs_ops); -#endif -#ifdef CONFIG_USB_LIBUSB - xen_be_register("qusb", &xen_usb_ops); -#endif -} - int xen_be_bind_evtchn(struct XenLegacyDevice *xendev) { if (xendev->local_port != -1) { return 0; } - xendev->local_port = xenevtchn_bind_interdomain + xendev->local_port = qemu_xen_evtchn_bind_interdomain (xendev->evtchndev, xendev->dom, xendev->remote_port); if (xendev->local_port == -1) { xen_pv_printf(xendev, 0, "xenevtchn_bind_interdomain failed\n"); return -1; } xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); - qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev), + qemu_set_fd_handler(qemu_xen_evtchn_fd(xendev->evtchndev), xen_pv_evtchn_event, NULL, xendev); return 0; } diff --git a/hw/xen/xen-operations.c b/hw/xen/xen-operations.c new file mode 100644 index 0000000000..4b78fbf4bd --- /dev/null +++ b/hw/xen/xen-operations.c @@ -0,0 +1,478 @@ +/* + * QEMU Xen backend support: Operations for true Xen + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/uuid.h" +#include "qapi/error.h" + +#include "hw/xen/xen_native.h" +#include "hw/xen/xen_backend_ops.h" + +/* + * If we have new enough libxenctrl then we do not want/need these compat + * interfaces, despite what the user supplied cflags might say. They + * must be undefined before including xenctrl.h + */ +#undef XC_WANT_COMPAT_EVTCHN_API +#undef XC_WANT_COMPAT_GNTTAB_API +#undef XC_WANT_COMPAT_MAP_FOREIGN_API + +#include + +/* + * We don't support Xen prior to 4.2.0. + */ + +/* Xen 4.2 through 4.6 */ +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 + +typedef xc_evtchn xenevtchn_handle; +typedef evtchn_port_or_error_t xenevtchn_port_or_error_t; + +#define xenevtchn_open(l, f) xc_evtchn_open(l, f); +#define xenevtchn_close(h) xc_evtchn_close(h) +#define xenevtchn_fd(h) xc_evtchn_fd(h) +#define xenevtchn_pending(h) xc_evtchn_pending(h) +#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p) +#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p) +#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p) +#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p) + +typedef xc_gnttab xengnttab_handle; + +#define xengnttab_open(l, f) xc_gnttab_open(l, f) +#define xengnttab_close(h) xc_gnttab_close(h) +#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n) +#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p) +#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n) +#define xengnttab_map_grant_refs(h, c, d, r, p) \ + xc_gnttab_map_grant_refs(h, c, d, r, p) +#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \ + xc_gnttab_map_domain_grant_refs(h, c, d, r, p) + +typedef xc_interface xenforeignmemory_handle; + +#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ + +#include +#include +#include + +#endif + +/* Xen before 4.8 */ + +static int libxengnttab_fallback_grant_copy(xengnttab_handle *xgt, + bool to_domain, uint32_t domid, + XenGrantCopySegment segs[], + unsigned int nr_segs, Error **errp) +{ + uint32_t *refs = g_new(uint32_t, nr_segs); + int prot = to_domain ? PROT_WRITE : PROT_READ; + void *map; + unsigned int i; + int rc = 0; + + for (i = 0; i < nr_segs; i++) { + XenGrantCopySegment *seg = &segs[i]; + + refs[i] = to_domain ? seg->dest.foreign.ref : + seg->source.foreign.ref; + } + map = xengnttab_map_domain_grant_refs(xgt, nr_segs, domid, refs, prot); + if (!map) { + if (errp) { + error_setg_errno(errp, errno, + "xengnttab_map_domain_grant_refs failed"); + } + rc = -errno; + goto done; + } + + for (i = 0; i < nr_segs; i++) { + XenGrantCopySegment *seg = &segs[i]; + void *page = map + (i * XEN_PAGE_SIZE); + + if (to_domain) { + memcpy(page + seg->dest.foreign.offset, seg->source.virt, + seg->len); + } else { + memcpy(seg->dest.virt, page + seg->source.foreign.offset, + seg->len); + } + } + + if (xengnttab_unmap(xgt, map, nr_segs)) { + if (errp) { + error_setg_errno(errp, errno, "xengnttab_unmap failed"); + } + rc = -errno; + } + +done: + g_free(refs); + return rc; +} + +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800 + +static int libxengnttab_backend_grant_copy(xengnttab_handle *xgt, + bool to_domain, uint32_t domid, + XenGrantCopySegment *segs, + uint32_t nr_segs, Error **errp) +{ + xengnttab_grant_copy_segment_t *xengnttab_segs; + unsigned int i; + int rc; + + xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs); + + for (i = 0; i < nr_segs; i++) { + XenGrantCopySegment *seg = &segs[i]; + xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i]; + + if (to_domain) { + xengnttab_seg->flags = GNTCOPY_dest_gref; + xengnttab_seg->dest.foreign.domid = domid; + xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref; + xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset; + xengnttab_seg->source.virt = seg->source.virt; + } else { + xengnttab_seg->flags = GNTCOPY_source_gref; + xengnttab_seg->source.foreign.domid = domid; + xengnttab_seg->source.foreign.ref = seg->source.foreign.ref; + xengnttab_seg->source.foreign.offset = + seg->source.foreign.offset; + xengnttab_seg->dest.virt = seg->dest.virt; + } + + xengnttab_seg->len = seg->len; + } + + if (xengnttab_grant_copy(xgt, nr_segs, xengnttab_segs)) { + if (errp) { + error_setg_errno(errp, errno, "xengnttab_grant_copy failed"); + } + rc = -errno; + goto done; + } + + rc = 0; + for (i = 0; i < nr_segs; i++) { + xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i]; + + if (xengnttab_seg->status != GNTST_okay) { + if (errp) { + error_setg(errp, "xengnttab_grant_copy seg[%u] failed", i); + } + rc = -EIO; + break; + } + } + +done: + g_free(xengnttab_segs); + return rc; +} +#endif + +static xenevtchn_handle *libxenevtchn_backend_open(void) +{ + return xenevtchn_open(NULL, 0); +} + +struct evtchn_backend_ops libxenevtchn_backend_ops = { + .open = libxenevtchn_backend_open, + .close = xenevtchn_close, + .bind_interdomain = xenevtchn_bind_interdomain, + .unbind = xenevtchn_unbind, + .get_fd = xenevtchn_fd, + .notify = xenevtchn_notify, + .unmask = xenevtchn_unmask, + .pending = xenevtchn_pending, +}; + +static xengnttab_handle *libxengnttab_backend_open(void) +{ + return xengnttab_open(NULL, 0); +} + +static int libxengnttab_backend_unmap(xengnttab_handle *xgt, + void *start_address, uint32_t *refs, + uint32_t count) +{ + return xengnttab_unmap(xgt, start_address, count); +} + + +static struct gnttab_backend_ops libxengnttab_backend_ops = { + .features = XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE, + .open = libxengnttab_backend_open, + .close = xengnttab_close, + .grant_copy = libxengnttab_fallback_grant_copy, + .set_max_grants = xengnttab_set_max_grants, + .map_refs = xengnttab_map_domain_grant_refs, + .unmap = libxengnttab_backend_unmap, +}; + +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 + +static void *libxenforeignmem_backend_map(uint32_t dom, void *addr, int prot, + size_t pages, xfn_pfn_t *pfns, + int *errs) +{ + if (errs) { + return xc_map_foreign_bulk(xen_xc, dom, prot, pfns, errs, pages); + } else { + return xc_map_foreign_pages(xen_xc, dom, prot, pfns, pages); + } +} + +static int libxenforeignmem_backend_unmap(void *addr, size_t pages) +{ + return munmap(addr, pages * XC_PAGE_SIZE); +} + +#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ + +static void *libxenforeignmem_backend_map(uint32_t dom, void *addr, int prot, + size_t pages, xen_pfn_t *pfns, + int *errs) +{ + return xenforeignmemory_map2(xen_fmem, dom, addr, prot, 0, pages, pfns, + errs); +} + +static int libxenforeignmem_backend_unmap(void *addr, size_t pages) +{ + return xenforeignmemory_unmap(xen_fmem, addr, pages); +} + +#endif + +struct foreignmem_backend_ops libxenforeignmem_backend_ops = { + .map = libxenforeignmem_backend_map, + .unmap = libxenforeignmem_backend_unmap, +}; + +struct qemu_xs_handle { + struct xs_handle *xsh; + NotifierList notifiers; +}; + +static void watch_event(void *opaque) +{ + struct qemu_xs_handle *h = opaque; + + for (;;) { + char **v = xs_check_watch(h->xsh); + + if (!v) { + break; + } + + notifier_list_notify(&h->notifiers, v); + free(v); + } +} + +static struct qemu_xs_handle *libxenstore_open(void) +{ + struct xs_handle *xsh = xs_open(0); + struct qemu_xs_handle *h = g_new0(struct qemu_xs_handle, 1); + + if (!xsh) { + return NULL; + } + + h = g_new0(struct qemu_xs_handle, 1); + h->xsh = xsh; + + notifier_list_init(&h->notifiers); + qemu_set_fd_handler(xs_fileno(h->xsh), watch_event, NULL, h); + + return h; +} + +static void libxenstore_close(struct qemu_xs_handle *h) +{ + g_assert(notifier_list_empty(&h->notifiers)); + qemu_set_fd_handler(xs_fileno(h->xsh), NULL, NULL, NULL); + xs_close(h->xsh); + g_free(h); +} + +static char *libxenstore_get_domain_path(struct qemu_xs_handle *h, + unsigned int domid) +{ + return xs_get_domain_path(h->xsh, domid); +} + +static char **libxenstore_directory(struct qemu_xs_handle *h, + xs_transaction_t t, const char *path, + unsigned int *num) +{ + return xs_directory(h->xsh, t, path, num); +} + +static void *libxenstore_read(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path, unsigned int *len) +{ + return xs_read(h->xsh, t, path, len); +} + +static bool libxenstore_write(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path, const void *data, + unsigned int len) +{ + return xs_write(h->xsh, t, path, data, len); +} + +static bool libxenstore_create(struct qemu_xs_handle *h, xs_transaction_t t, + unsigned int owner, unsigned int domid, + unsigned int perms, const char *path) +{ + struct xs_permissions perms_list[] = { + { + .id = owner, + .perms = XS_PERM_NONE, + }, + { + .id = domid, + .perms = perms, + }, + }; + + if (!xs_mkdir(h->xsh, t, path)) { + return false; + } + + return xs_set_permissions(h->xsh, t, path, perms_list, + ARRAY_SIZE(perms_list)); +} + +static bool libxenstore_destroy(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path) +{ + return xs_rm(h->xsh, t, path); +} + +struct qemu_xs_watch { + char *path; + char *token; + xs_watch_fn fn; + void *opaque; + Notifier notifier; +}; + +static void watch_notify(Notifier *n, void *data) +{ + struct qemu_xs_watch *w = container_of(n, struct qemu_xs_watch, notifier); + const char **v = data; + + if (!strcmp(w->token, v[XS_WATCH_TOKEN])) { + w->fn(w->opaque, v[XS_WATCH_PATH]); + } +} + +static struct qemu_xs_watch *new_watch(const char *path, xs_watch_fn fn, + void *opaque) +{ + struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1); + QemuUUID uuid; + + qemu_uuid_generate(&uuid); + + w->token = qemu_uuid_unparse_strdup(&uuid); + w->path = g_strdup(path); + w->fn = fn; + w->opaque = opaque; + w->notifier.notify = watch_notify; + + return w; +} + +static void free_watch(struct qemu_xs_watch *w) +{ + g_free(w->token); + g_free(w->path); + + g_free(w); +} + +static struct qemu_xs_watch *libxenstore_watch(struct qemu_xs_handle *h, + const char *path, xs_watch_fn fn, + void *opaque) +{ + struct qemu_xs_watch *w = new_watch(path, fn, opaque); + + notifier_list_add(&h->notifiers, &w->notifier); + + if (!xs_watch(h->xsh, path, w->token)) { + notifier_remove(&w->notifier); + free_watch(w); + return NULL; + } + + return w; +} + +static void libxenstore_unwatch(struct qemu_xs_handle *h, + struct qemu_xs_watch *w) +{ + xs_unwatch(h->xsh, w->path, w->token); + notifier_remove(&w->notifier); + free_watch(w); +} + +static xs_transaction_t libxenstore_transaction_start(struct qemu_xs_handle *h) +{ + return xs_transaction_start(h->xsh); +} + +static bool libxenstore_transaction_end(struct qemu_xs_handle *h, + xs_transaction_t t, bool abort) +{ + return xs_transaction_end(h->xsh, t, abort); +} + +struct xenstore_backend_ops libxenstore_backend_ops = { + .open = libxenstore_open, + .close = libxenstore_close, + .get_domain_path = libxenstore_get_domain_path, + .directory = libxenstore_directory, + .read = libxenstore_read, + .write = libxenstore_write, + .create = libxenstore_create, + .destroy = libxenstore_destroy, + .watch = libxenstore_watch, + .unwatch = libxenstore_unwatch, + .transaction_start = libxenstore_transaction_start, + .transaction_end = libxenstore_transaction_end, +}; + +void setup_xen_backend_ops(void) +{ +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800 + xengnttab_handle *xgt = xengnttab_open(NULL, 0); + + if (xgt) { + if (xengnttab_grant_copy(xgt, 0, NULL) == 0) { + libxengnttab_backend_ops.grant_copy = libxengnttab_backend_grant_copy; + } + xengnttab_close(xgt); + } +#endif + xen_evtchn_ops = &libxenevtchn_backend_ops; + xen_gnttab_ops = &libxengnttab_backend_ops; + xen_foreignmem_ops = &libxenforeignmem_backend_ops; + xen_xenstore_ops = &libxenstore_backend_ops; +} diff --git a/hw/xen/xen_devconfig.c b/hw/xen/xen_devconfig.c index 46ee4a7f02..9b7304e544 100644 --- a/hw/xen/xen_devconfig.c +++ b/hw/xen/xen_devconfig.c @@ -11,11 +11,11 @@ static int xen_config_dev_dirs(const char *ftype, const char *btype, int vdev, { char *dom; - dom = xs_get_domain_path(xenstore, xen_domid); + dom = qemu_xen_xs_get_domain_path(xenstore, xen_domid); snprintf(fe, len, "%s/device/%s/%d", dom, ftype, vdev); free(dom); - dom = xs_get_domain_path(xenstore, 0); + dom = qemu_xen_xs_get_domain_path(xenstore, 0); snprintf(be, len, "%s/backend/%s/%d/%d", dom, btype, xen_domid, vdev); free(dom); diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index 8db0532632..a540149639 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -59,9 +59,9 @@ #include "hw/pci/pci.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" +#include "xen_pt.h" #include "hw/xen/xen.h" #include "hw/xen/xen-legacy-backend.h" -#include "xen_pt.h" #include "qemu/range.h" static bool has_igd_gfx_passthru; @@ -780,15 +780,6 @@ static void xen_pt_realize(PCIDevice *d, Error **errp) s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function, s->dev.devfn); - xen_host_pci_device_get(&s->real_device, - s->hostaddr.domain, s->hostaddr.bus, - s->hostaddr.slot, s->hostaddr.function, - errp); - if (*errp) { - error_append_hint(errp, "Failed to \"open\" the real pci device"); - return; - } - s->is_virtfn = s->real_device.is_virtfn; if (s->is_virtfn) { XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n", @@ -803,8 +794,10 @@ static void xen_pt_realize(PCIDevice *d, Error **errp) s->io_listener = xen_pt_io_listener; /* Setup VGA bios for passthrough GFX */ - if ((s->real_device.domain == 0) && (s->real_device.bus == 0) && - (s->real_device.dev == 2) && (s->real_device.func == 0)) { + if ((s->real_device.domain == XEN_PCI_IGD_DOMAIN) && + (s->real_device.bus == XEN_PCI_IGD_BUS) && + (s->real_device.dev == XEN_PCI_IGD_DEV) && + (s->real_device.func == XEN_PCI_IGD_FN)) { if (!is_igd_vga_passthrough(&s->real_device)) { error_setg(errp, "Need to enable igd-passthru if you're trying" " to passthrough IGD GFX"); @@ -950,11 +943,58 @@ static void xen_pci_passthrough_instance_init(Object *obj) PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS; } +void xen_igd_reserve_slot(PCIBus *pci_bus) +{ + if (!xen_igd_gfx_pt_enabled()) { + return; + } + + XEN_PT_LOG(0, "Reserving PCI slot 2 for IGD\n"); + pci_bus_set_slot_reserved_mask(pci_bus, XEN_PCI_IGD_SLOT_MASK); +} + +static void xen_igd_clear_slot(DeviceState *qdev, Error **errp) +{ + ERRP_GUARD(); + PCIDevice *pci_dev = (PCIDevice *)qdev; + XenPCIPassthroughState *s = XEN_PT_DEVICE(pci_dev); + XenPTDeviceClass *xpdc = XEN_PT_DEVICE_GET_CLASS(s); + PCIBus *pci_bus = pci_get_bus(pci_dev); + + xen_host_pci_device_get(&s->real_device, + s->hostaddr.domain, s->hostaddr.bus, + s->hostaddr.slot, s->hostaddr.function, + errp); + if (*errp) { + error_append_hint(errp, "Failed to \"open\" the real pci device"); + return; + } + + if (!(pci_bus_get_slot_reserved_mask(pci_bus) & XEN_PCI_IGD_SLOT_MASK)) { + xpdc->pci_qdev_realize(qdev, errp); + return; + } + + if (is_igd_vga_passthrough(&s->real_device) && + s->real_device.domain == XEN_PCI_IGD_DOMAIN && + s->real_device.bus == XEN_PCI_IGD_BUS && + s->real_device.dev == XEN_PCI_IGD_DEV && + s->real_device.func == XEN_PCI_IGD_FN && + s->real_device.vendor_id == PCI_VENDOR_ID_INTEL) { + pci_bus_clear_slot_reserved_mask(pci_bus, XEN_PCI_IGD_SLOT_MASK); + XEN_PT_LOG(pci_dev, "Intel IGD found, using slot 2\n"); + } + xpdc->pci_qdev_realize(qdev, errp); +} + static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + XenPTDeviceClass *xpdc = XEN_PT_DEVICE_CLASS(klass); + xpdc->pci_qdev_realize = dc->realize; + dc->realize = xen_igd_clear_slot; k->realize = xen_pt_realize; k->exit = xen_pt_unregister_device; k->config_read = xen_pt_pci_read_config; @@ -977,6 +1017,7 @@ static const TypeInfo xen_pci_passthrough_info = { .instance_size = sizeof(XenPCIPassthroughState), .instance_finalize = xen_pci_passthrough_finalize, .class_init = xen_pci_passthrough_class_init, + .class_size = sizeof(XenPTDeviceClass), .instance_init = xen_pci_passthrough_instance_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h index cf10fc7bbf..b20744f7c7 100644 --- a/hw/xen/xen_pt.h +++ b/hw/xen/xen_pt.h @@ -1,7 +1,7 @@ #ifndef XEN_PT_H #define XEN_PT_H -#include "hw/xen/xen_common.h" +#include "hw/xen/xen_native.h" #include "xen-host-pci-device.h" #include "qom/object.h" @@ -40,7 +40,20 @@ typedef struct XenPTReg XenPTReg; #define TYPE_XEN_PT_DEVICE "xen-pci-passthrough" OBJECT_DECLARE_SIMPLE_TYPE(XenPCIPassthroughState, XEN_PT_DEVICE) +#define XEN_PT_DEVICE_CLASS(klass) \ + OBJECT_CLASS_CHECK(XenPTDeviceClass, klass, TYPE_XEN_PT_DEVICE) +#define XEN_PT_DEVICE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(XenPTDeviceClass, obj, TYPE_XEN_PT_DEVICE) + +typedef void (*XenPTQdevRealize)(DeviceState *qdev, Error **errp); + +typedef struct XenPTDeviceClass { + PCIDeviceClass parent_class; + XenPTQdevRealize pci_qdev_realize; +} XenPTDeviceClass; + uint32_t igd_read_opregion(XenPCIPassthroughState *s); +void xen_igd_reserve_slot(PCIBus *pci_bus); void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val); void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s, XenHostPCIDevice *dev); @@ -75,6 +88,13 @@ typedef int (*xen_pt_conf_byte_read) #define XEN_PCI_INTEL_OPREGION 0xfc +#define XEN_PCI_IGD_DOMAIN 0 +#define XEN_PCI_IGD_BUS 0 +#define XEN_PCI_IGD_DEV 2 +#define XEN_PCI_IGD_FN 0 +#define XEN_PCI_IGD_SLOT_MASK \ + (1UL << PCI_SLOT(PCI_DEVFN(XEN_PCI_IGD_DEV, XEN_PCI_IGD_FN))) + typedef enum { XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */ XEN_PT_GRP_TYPE_EMU, /* emul reg group */ diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c index cde898b744..2b8680b112 100644 --- a/hw/xen/xen_pt_config_init.c +++ b/hw/xen/xen_pt_config_init.c @@ -15,8 +15,8 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/timer.h" -#include "hw/xen/xen-legacy-backend.h" #include "xen_pt.h" +#include "hw/xen/xen-legacy-backend.h" #define XEN_PT_MERGE_VALUE(value, data, val_mask) \ (((value) & (val_mask)) | ((data) & ~(val_mask))) @@ -1924,7 +1924,7 @@ static void xen_pt_config_reg_init(XenPCIPassthroughState *s, if (reg->init) { uint32_t host_mask, size_mask; unsigned int offset; - uint32_t val; + uint32_t val = 0; /* initialize emulate register */ rc = reg->init(s, reg_entry->reg, diff --git a/hw/xen/xen_pt_graphics.c b/hw/xen/xen_pt_graphics.c index f303f67c9c..0aed3bb6fd 100644 --- a/hw/xen/xen_pt_graphics.c +++ b/hw/xen/xen_pt_graphics.c @@ -5,7 +5,6 @@ #include "qapi/error.h" #include "xen_pt.h" #include "xen-host-pci-device.h" -#include "hw/xen/xen-legacy-backend.h" static unsigned long igd_guest_opregion; static unsigned long igd_host_opregion; diff --git a/hw/xen/xen_pt_msi.c b/hw/xen/xen_pt_msi.c index b71563f98a..09cca4eecb 100644 --- a/hw/xen/xen_pt_msi.c +++ b/hw/xen/xen_pt_msi.c @@ -11,9 +11,9 @@ #include "qemu/osdep.h" -#include "hw/xen/xen-legacy-backend.h" -#include "xen_pt.h" #include "hw/i386/apic-msidef.h" +#include "xen_pt.h" +#include "hw/xen/xen-legacy-backend.h" #define XEN_PT_AUTO_ASSIGN -1 diff --git a/hw/xen/xen_pt_stub.c b/hw/xen/xen_pt_stub.c index 2d8cac8d54..5c108446a8 100644 --- a/hw/xen/xen_pt_stub.c +++ b/hw/xen/xen_pt_stub.c @@ -20,3 +20,7 @@ void xen_igd_gfx_pt_set(bool value, Error **errp) error_setg(errp, "Xen PCI passthrough support not built in"); } } + +void xen_igd_reserve_slot(PCIBus *pci_bus) +{ +} diff --git a/hw/xen/xen_pvdev.c b/hw/xen/xen_pvdev.c index 1a5177b354..be1504b82c 100644 --- a/hw/xen/xen_pvdev.c +++ b/hw/xen/xen_pvdev.c @@ -54,31 +54,17 @@ void xen_config_cleanup(void) struct xs_dirs *d; QTAILQ_FOREACH(d, &xs_cleanup, list) { - xs_rm(xenstore, 0, d->xs_dir); + qemu_xen_xs_destroy(xenstore, 0, d->xs_dir); } } int xenstore_mkdir(char *path, int p) { - struct xs_permissions perms[2] = { - { - .id = 0, /* set owner: dom0 */ - }, { - .id = xen_domid, - .perms = p, - } - }; - - if (!xs_mkdir(xenstore, 0, path)) { + if (!qemu_xen_xs_create(xenstore, 0, 0, xen_domid, p, path)) { xen_pv_printf(NULL, 0, "xs_mkdir %s: failed\n", path); return -1; } xenstore_cleanup_dir(g_strdup(path)); - - if (!xs_set_permissions(xenstore, 0, path, perms, 2)) { - xen_pv_printf(NULL, 0, "xs_set_permissions %s: failed\n", path); - return -1; - } return 0; } @@ -87,7 +73,7 @@ int xenstore_write_str(const char *base, const char *node, const char *val) char abspath[XEN_BUFSIZE]; snprintf(abspath, sizeof(abspath), "%s/%s", base, node); - if (!xs_write(xenstore, 0, abspath, val, strlen(val))) { + if (!qemu_xen_xs_write(xenstore, 0, abspath, val, strlen(val))) { return -1; } return 0; @@ -100,7 +86,7 @@ char *xenstore_read_str(const char *base, const char *node) char *str, *ret = NULL; snprintf(abspath, sizeof(abspath), "%s/%s", base, node); - str = xs_read(xenstore, 0, abspath, &len); + str = qemu_xen_xs_read(xenstore, 0, abspath, &len); if (str != NULL) { /* move to qemu-allocated memory to make sure * callers can savely g_free() stuff. */ @@ -152,29 +138,6 @@ int xenstore_read_uint64(const char *base, const char *node, uint64_t *uval) return rc; } -void xenstore_update(void *unused) -{ - char **vec = NULL; - intptr_t type, ops, ptr; - unsigned int dom, count; - - vec = xs_read_watch(xenstore, &count); - if (vec == NULL) { - goto cleanup; - } - - if (sscanf(vec[XS_WATCH_TOKEN], "be:%" PRIxPTR ":%d:%" PRIxPTR, - &type, &dom, &ops) == 3) { - xenstore_update_be(vec[XS_WATCH_PATH], (void *)type, dom, (void*)ops); - } - if (sscanf(vec[XS_WATCH_TOKEN], "fe:%" PRIxPTR, &ptr) == 1) { - xenstore_update_fe(vec[XS_WATCH_PATH], (void *)ptr); - } - -cleanup: - free(vec); -} - const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { @@ -238,14 +201,14 @@ void xen_pv_evtchn_event(void *opaque) struct XenLegacyDevice *xendev = opaque; evtchn_port_t port; - port = xenevtchn_pending(xendev->evtchndev); + port = qemu_xen_evtchn_pending(xendev->evtchndev); if (port != xendev->local_port) { xen_pv_printf(xendev, 0, "xenevtchn_pending returned %d (expected %d)\n", port, xendev->local_port); return; } - xenevtchn_unmask(xendev->evtchndev, port); + qemu_xen_evtchn_unmask(xendev->evtchndev, port); if (xendev->ops->event) { xendev->ops->event(xendev); @@ -257,15 +220,15 @@ void xen_pv_unbind_evtchn(struct XenLegacyDevice *xendev) if (xendev->local_port == -1) { return; } - qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev), NULL, NULL, NULL); - xenevtchn_unbind(xendev->evtchndev, xendev->local_port); + qemu_set_fd_handler(qemu_xen_evtchn_fd(xendev->evtchndev), NULL, NULL, NULL); + qemu_xen_evtchn_unbind(xendev->evtchndev, xendev->local_port); xen_pv_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port); xendev->local_port = -1; } int xen_pv_send_notify(struct XenLegacyDevice *xendev) { - return xenevtchn_notify(xendev->evtchndev, xendev->local_port); + return qemu_xen_evtchn_notify(xendev->evtchndev, xendev->local_port); } /* ------------------------------------------------------------- */ @@ -299,17 +262,15 @@ void xen_pv_del_xendev(struct XenLegacyDevice *xendev) } if (xendev->fe) { - char token[XEN_BUFSIZE]; - snprintf(token, sizeof(token), "fe:%p", xendev); - xs_unwatch(xenstore, xendev->fe, token); + qemu_xen_xs_unwatch(xenstore, xendev->watch); g_free(xendev->fe); } if (xendev->evtchndev != NULL) { - xenevtchn_close(xendev->evtchndev); + qemu_xen_evtchn_close(xendev->evtchndev); } if (xendev->gnttabdev != NULL) { - xengnttab_close(xendev->gnttabdev); + qemu_xen_gnttab_close(xendev->gnttabdev); } QTAILQ_REMOVE(&xendevs, xendev, next); diff --git a/hw/xenpv/xen_machine_pv.c b/hw/xenpv/xen_machine_pv.c index 20c9611d71..17cda5ec13 100644 --- a/hw/xenpv/xen_machine_pv.c +++ b/hw/xenpv/xen_machine_pv.c @@ -35,11 +35,10 @@ static void xen_init_pv(MachineState *machine) DriveInfo *dinfo; int i; + setup_xen_backend_ops(); + /* Initialize backend core & drivers */ - if (xen_be_init() != 0) { - error_report("%s: xen backend core setup failed", __func__); - exit(1); - } + xen_be_init(); switch (xen_mode) { case XEN_ATTACH: @@ -55,7 +54,6 @@ static void xen_init_pv(MachineState *machine) break; } - xen_be_register_common(); xen_be_register("vfb", &xen_framebuffer_ops); xen_be_register("qnic", &xen_netdev_ops); diff --git a/hw/xtensa/virt.c b/hw/xtensa/virt.c index a18e3fc910..b87f842e74 100644 --- a/hw/xtensa/virt.c +++ b/hw/xtensa/virt.c @@ -38,7 +38,8 @@ #include "xtensa_memory.h" #include "xtensa_sim.h" -static void create_pcie(CPUXtensaState *env, int irq_base, hwaddr addr_base) +static void create_pcie(MachineState *ms, CPUXtensaState *env, int irq_base, + hwaddr addr_base) { hwaddr base_ecam = addr_base + 0x00100000; hwaddr size_ecam = 0x03f00000; @@ -54,6 +55,7 @@ static void create_pcie(CPUXtensaState *env, int irq_base, hwaddr addr_base) MemoryRegion *mmio_alias; MemoryRegion *mmio_reg; + MachineClass *mc = MACHINE_GET_CLASS(ms); DeviceState *dev; PCIHostState *pci; qemu_irq *extints; @@ -104,7 +106,7 @@ static void create_pcie(CPUXtensaState *env, int irq_base, hwaddr addr_base) NICInfo *nd = &nd_table[i]; if (!nd->model) { - nd->model = g_strdup("virtio"); + nd->model = g_strdup(mc->default_nic); } pci_nic_init_nofail(nd, pci->bus, nd->model, NULL); @@ -117,7 +119,7 @@ static void xtensa_virt_init(MachineState *machine) XtensaCPU *cpu = xtensa_sim_common_init(machine); CPUXtensaState *env = &cpu->env; - create_pcie(env, 0, 0xf0000000); + create_pcie(machine, env, 0, 0xf0000000); xtensa_sim_load_kernel(cpu, machine); } @@ -127,6 +129,7 @@ static void xtensa_virt_machine_init(MachineClass *mc) mc->init = xtensa_virt_init; mc->max_cpus = 32; mc->default_cpu_type = XTENSA_DEFAULT_CPU_TYPE; + mc->default_nic = "virtio-net-pci"; } DEFINE_MACHINE("virt", xtensa_virt_machine_init) diff --git a/include/block/accounting.h b/include/block/accounting.h index b9caad60d5..a59e39f49d 100644 --- a/include/block/accounting.h +++ b/include/block/accounting.h @@ -37,6 +37,7 @@ enum BlockAcctType { BLOCK_ACCT_READ, BLOCK_ACCT_WRITE, BLOCK_ACCT_FLUSH, + BLOCK_ACCT_ZONE_APPEND, BLOCK_ACCT_UNMAP, BLOCK_MAX_IOTYPE, }; diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h index dd9a7f6461..5449b6d742 100644 --- a/include/block/aio-wait.h +++ b/include/block/aio-wait.h @@ -63,7 +63,7 @@ extern AioWait global_aio_wait; * @ctx: the aio context, or NULL if multiple aio contexts (for which the * caller does not hold a lock) are involved in the polling condition. * @cond: wait while this conditional expression is true - * @unlock: whether to unlock and then lock again @ctx. This apples + * @unlock: whether to unlock and then lock again @ctx. This applies * only when waiting for another AioContext from the main loop. * Otherwise it's ignored. * @@ -85,7 +85,7 @@ extern AioWait global_aio_wait; /* Increment wait_->num_waiters before evaluating cond. */ \ qatomic_inc(&wait_->num_waiters); \ /* Paired with smp_mb in aio_wait_kick(). */ \ - smp_mb(); \ + smp_mb__after_rmw(); \ if (ctx_ && in_aio_context_home_thread(ctx_)) { \ while ((cond)) { \ aio_poll(ctx_, true); \ @@ -131,7 +131,7 @@ void aio_wait_kick(void); * * Run a BH in @ctx and wait for it to complete. * - * Must be called from the main loop thread with @ctx acquired exactly once. + * Must be called from the main loop thread without @ctx acquired. * Note that main loop event processing may occur. */ void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque); diff --git a/include/block/aio.h b/include/block/aio.h index 8fba6a3584..32042e8905 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -23,6 +23,8 @@ #include "qemu/thread.h" #include "qemu/timer.h" #include "block/graph-lock.h" +#include "hw/qdev-core.h" + typedef struct BlockAIOCB BlockAIOCB; typedef void BlockCompletionFunc(void *opaque, int ret); @@ -208,17 +210,9 @@ struct AioContext { struct ThreadPool *thread_pool; #ifdef CONFIG_LINUX_AIO - /* - * State for native Linux AIO. Uses aio_context_acquire/release for - * locking. - */ struct LinuxAioState *linux_aio; #endif #ifdef CONFIG_LINUX_IO_URING - /* - * State for Linux io_uring. Uses aio_context_acquire/release for - * locking. - */ struct LuringState *linux_io_uring; /* State for file descriptor monitoring using Linux io_uring */ @@ -231,8 +225,6 @@ struct AioContext { */ QEMUTimerListGroup tlg; - int external_disable_cnt; - /* Number of AioHandlers without .io_poll() */ int poll_disable_cnt; @@ -331,9 +323,11 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, * is opaque and must be allocated prior to its use. * * @name: A human-readable identifier for debugging purposes. + * @reentrancy_guard: A guard set when entering a cb to prevent + * device-reentrancy issues */ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, - const char *name); + const char *name, MemReentrancyGuard *reentrancy_guard); /** * aio_bh_new: Allocate a new bottom half structure @@ -342,7 +336,17 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, * string. */ #define aio_bh_new(ctx, cb, opaque) \ - aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb))) + aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL) + +/** + * aio_bh_new_guarded: Allocate a new bottom half structure with a + * reentrancy_guard + * + * A convenience wrapper for aio_bh_new_full() that uses the cb as the name + * string. + */ +#define aio_bh_new_guarded(ctx, cb, opaque, guard) \ + aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard) /** * aio_notify: Force processing of pending events. @@ -475,21 +479,12 @@ bool aio_poll(AioContext *ctx, bool blocking); */ void aio_set_fd_handler(AioContext *ctx, int fd, - bool is_external, IOHandler *io_read, IOHandler *io_write, AioPollFn *io_poll, IOHandler *io_poll_ready, void *opaque); -/* Set polling begin/end callbacks for a file descriptor that has already been - * registered with aio_set_fd_handler. Do nothing if the file descriptor is - * not registered. - */ -void aio_set_fd_poll(AioContext *ctx, int fd, - IOHandler *io_poll_begin, - IOHandler *io_poll_end); - /* Register an event notifier and associated callbacks. Behaves very similarly * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks * will be invoked when using aio_poll(). @@ -499,7 +494,6 @@ void aio_set_fd_poll(AioContext *ctx, int fd, */ void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, - bool is_external, EventNotifierHandler *io_read, AioPollFn *io_poll, EventNotifierHandler *io_poll_ready); @@ -628,59 +622,6 @@ static inline void aio_timer_init(AioContext *ctx, */ int64_t aio_compute_timeout(AioContext *ctx); -/** - * aio_disable_external: - * @ctx: the aio context - * - * Disable the further processing of external clients. - */ -static inline void aio_disable_external(AioContext *ctx) -{ - qatomic_inc(&ctx->external_disable_cnt); -} - -/** - * aio_enable_external: - * @ctx: the aio context - * - * Enable the processing of external clients. - */ -static inline void aio_enable_external(AioContext *ctx) -{ - int old; - - old = qatomic_fetch_dec(&ctx->external_disable_cnt); - assert(old > 0); - if (old == 1) { - /* Kick event loop so it re-arms file descriptors */ - aio_notify(ctx); - } -} - -/** - * aio_external_disabled: - * @ctx: the aio context - * - * Return true if the external clients are disabled. - */ -static inline bool aio_external_disabled(AioContext *ctx) -{ - return qatomic_read(&ctx->external_disable_cnt); -} - -/** - * aio_node_check: - * @ctx: the aio context - * @is_external: Whether or not the checked node is an external event source. - * - * Check if the node's is_external flag is okay to be polled by the ctx at this - * moment. True means green light. - */ -static inline bool aio_node_check(AioContext *ctx, bool is_external) -{ - return !is_external || !qatomic_read(&ctx->external_disable_cnt); -} - /** * aio_co_schedule: * @ctx: the aio context diff --git a/include/block/block-common.h b/include/block/block-common.h index b5122ef8ab..e15395f2cb 100644 --- a/include/block/block-common.h +++ b/include/block/block-common.h @@ -65,6 +65,9 @@ * scheduling a BH in the bottom half that runs the respective non-coroutine * function. The coroutine yields after scheduling the BH and is reentered when * the wrapped function returns. + * + * If the first parameter of the function is a BlockDriverState, BdrvChild or + * BlockBackend pointer, the AioContext lock for it is taken in the wrapper. */ #define no_co_wrapper @@ -75,6 +78,57 @@ typedef struct BlockDriver BlockDriver; typedef struct BdrvChild BdrvChild; typedef struct BdrvChildClass BdrvChildClass; +typedef enum BlockZoneOp { + BLK_ZO_OPEN, + BLK_ZO_CLOSE, + BLK_ZO_FINISH, + BLK_ZO_RESET, +} BlockZoneOp; + +typedef enum BlockZoneModel { + BLK_Z_NONE = 0x0, /* Regular block device */ + BLK_Z_HM = 0x1, /* Host-managed zoned block device */ + BLK_Z_HA = 0x2, /* Host-aware zoned block device */ +} BlockZoneModel; + +typedef enum BlockZoneState { + BLK_ZS_NOT_WP = 0x0, + BLK_ZS_EMPTY = 0x1, + BLK_ZS_IOPEN = 0x2, + BLK_ZS_EOPEN = 0x3, + BLK_ZS_CLOSED = 0x4, + BLK_ZS_RDONLY = 0xD, + BLK_ZS_FULL = 0xE, + BLK_ZS_OFFLINE = 0xF, +} BlockZoneState; + +typedef enum BlockZoneType { + BLK_ZT_CONV = 0x1, /* Conventional random writes supported */ + BLK_ZT_SWR = 0x2, /* Sequential writes required */ + BLK_ZT_SWP = 0x3, /* Sequential writes preferred */ +} BlockZoneType; + +/* + * Zone descriptor data structure. + * Provides information on a zone with all position and size values in bytes. + */ +typedef struct BlockZoneDescriptor { + uint64_t start; + uint64_t length; + uint64_t cap; + uint64_t wp; + BlockZoneType type; + BlockZoneState state; +} BlockZoneDescriptor; + +/* + * Track write pointers of a zone in bytes. + */ +typedef struct BlockZoneWps { + CoMutex colock; + uint64_t wp[]; +} BlockZoneWps; + typedef struct BlockDriverInfo { /* in bytes, 0 if irrelevant */ int cluster_size; @@ -197,6 +251,12 @@ typedef enum { #define BDRV_SECTOR_BITS 9 #define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS) +/* + * Get the first most significant bit of wp. If it is zero, then + * the zone type is SWR. + */ +#define BDRV_ZT_IS_CONV(wp) (wp & (1ULL << 63)) + #define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \ INT_MAX >> BDRV_SECTOR_BITS) #define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h index 399200a9a3..f347199bff 100644 --- a/include/block/block-global-state.h +++ b/include/block/block-global-state.h @@ -58,14 +58,14 @@ BlockDriver *bdrv_find_protocol(const char *filename, Error **errp); BlockDriver *bdrv_find_format(const char *format_name); -int coroutine_fn GRAPH_RDLOCK +int coroutine_fn GRAPH_UNLOCKED bdrv_co_create(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp); -int co_wrapper_bdrv_rdlock bdrv_create(BlockDriver *drv, const char *filename, - QemuOpts *opts, Error **errp); +int co_wrapper bdrv_create(BlockDriver *drv, const char *filename, + QemuOpts *opts, Error **errp); -int coroutine_fn GRAPH_RDLOCK +int coroutine_fn GRAPH_UNLOCKED bdrv_co_create_file(const char *filename, QemuOpts *opts, Error **errp); BlockDriverState *bdrv_new(void); @@ -133,7 +133,10 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, const char *backing_file); void bdrv_refresh_filename(BlockDriverState *bs); -void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp); + +void GRAPH_RDLOCK +bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp); + int bdrv_commit(BlockDriverState *bs); int bdrv_make_empty(BdrvChild *c, Error **errp); int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file, @@ -163,10 +166,15 @@ int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts, Error **errp); /* check if a named node can be replaced when doing drive-mirror */ -BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, - const char *node_name, Error **errp); +BlockDriverState * GRAPH_RDLOCK +check_to_replace_node(BlockDriverState *parent_bs, const char *node_name, + Error **errp); + +int no_coroutine_fn bdrv_activate(BlockDriverState *bs, Error **errp); + +int coroutine_fn no_co_wrapper +bdrv_co_activate(BlockDriverState *bs, Error **errp); -int bdrv_activate(BlockDriverState *bs, Error **errp); void bdrv_activate_all(Error **errp); int bdrv_inactivate_all(void); @@ -214,7 +222,8 @@ void bdrv_img_create(const char *filename, const char *fmt, bool quiet, Error **errp); void bdrv_ref(BlockDriverState *bs); -void bdrv_unref(BlockDriverState *bs); +void no_coroutine_fn bdrv_unref(BlockDriverState *bs); +void coroutine_fn no_co_wrapper bdrv_co_unref(BlockDriverState *bs); void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child); BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, diff --git a/include/block/block-io.h b/include/block/block-io.h index 5da99d4d60..a27e471a87 100644 --- a/include/block/block-io.h +++ b/include/block/block-io.h @@ -79,17 +79,19 @@ bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, PreallocMode prealloc, BdrvRequestFlags flags, Error **errp); int64_t coroutine_fn GRAPH_RDLOCK bdrv_co_nb_sectors(BlockDriverState *bs); -int64_t co_wrapper_mixed_bdrv_rdlock bdrv_nb_sectors(BlockDriverState *bs); +int64_t coroutine_mixed_fn bdrv_nb_sectors(BlockDriverState *bs); int64_t coroutine_fn GRAPH_RDLOCK bdrv_co_getlength(BlockDriverState *bs); int64_t co_wrapper_mixed_bdrv_rdlock bdrv_getlength(BlockDriverState *bs); -int64_t coroutine_fn bdrv_co_get_allocated_file_size(BlockDriverState *bs); -int64_t co_wrapper bdrv_get_allocated_file_size(BlockDriverState *bs); +int64_t coroutine_fn GRAPH_RDLOCK +bdrv_co_get_allocated_file_size(BlockDriverState *bs); + +int64_t co_wrapper_bdrv_rdlock +bdrv_get_allocated_file_size(BlockDriverState *bs); BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts, BlockDriverState *in_bs, Error **errp); -void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); int coroutine_fn GRAPH_RDLOCK bdrv_co_delete_file(BlockDriverState *bs, Error **errp); @@ -112,6 +114,19 @@ int coroutine_fn GRAPH_RDLOCK bdrv_co_flush(BlockDriverState *bs); int coroutine_fn GRAPH_RDLOCK bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes); +/* Report zone information of zone block device. */ +int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_report(BlockDriverState *bs, + int64_t offset, + unsigned int *nr_zones, + BlockZoneDescriptor *zones); +int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_mgmt(BlockDriverState *bs, + BlockZoneOp op, + int64_t offset, int64_t len); +int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_append(BlockDriverState *bs, + int64_t *offset, + QEMUIOVector *qiov, + BdrvRequestFlags flags); + bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs); int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, @@ -165,8 +180,11 @@ const char *bdrv_get_node_name(const BlockDriverState *bs); const char *bdrv_get_device_name(const BlockDriverState *bs); const char *bdrv_get_device_or_node_name(const BlockDriverState *bs); -int coroutine_fn bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); -int co_wrapper_mixed bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); +int coroutine_fn GRAPH_RDLOCK +bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); + +int co_wrapper_mixed_bdrv_rdlock +bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs, Error **errp); @@ -200,10 +218,11 @@ void *qemu_try_blockalign0(BlockDriverState *bs, size_t size); void bdrv_enable_copy_on_read(BlockDriverState *bs); void bdrv_disable_copy_on_read(BlockDriverState *bs); -void coroutine_fn bdrv_co_debug_event(BlockDriverState *bs, - BlkdebugEvent event); -void co_wrapper_mixed bdrv_debug_event(BlockDriverState *bs, - BlkdebugEvent event); +void coroutine_fn GRAPH_RDLOCK +bdrv_co_debug_event(BlockDriverState *bs, BlkdebugEvent event); + +void co_wrapper_mixed_bdrv_rdlock +bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event); #define BLKDBG_EVENT(child, evt) \ do { \ diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h index d419017328..b1cbc1e00c 100644 --- a/include/block/block_int-common.h +++ b/include/block/block_int-common.h @@ -137,6 +137,11 @@ struct BlockDriver { */ bool is_format; + /* + * Set to true if the BlockDriver supports zoned children. + */ + bool supports_zoned_children; + /* * Drivers not implementing bdrv_parse_filename nor bdrv_open should have * this field set to true, except ones that are defined only by their @@ -158,8 +163,6 @@ struct BlockDriver { */ bool supports_backing; - bool has_variable_length; - /* * Drivers setting this field must be able to work with just a plain * filename with ':' as a prefix, and no other options. @@ -206,20 +209,21 @@ struct BlockDriver { * to allow driver-specific initialization code that requires * the BQL, like setting up specific permission flags. */ - int (*bdrv_amend_pre_run)(BlockDriverState *bs, Error **errp); + int GRAPH_RDLOCK_PTR (*bdrv_amend_pre_run)( + BlockDriverState *bs, Error **errp); /* * This function is invoked under BQL after .bdrv_co_amend() * to allow cleaning up what was done in .bdrv_amend_pre_run(). */ - void (*bdrv_amend_clean)(BlockDriverState *bs); + void GRAPH_RDLOCK_PTR (*bdrv_amend_clean)(BlockDriverState *bs); /* * Return true if @to_replace can be replaced by a BDS with the * same data as @bs without it affecting @bs's behavior (that is, * without it being visible to @bs's parents). */ - bool (*bdrv_recurse_can_replace)(BlockDriverState *bs, - BlockDriverState *to_replace); + bool GRAPH_RDLOCK_PTR (*bdrv_recurse_can_replace)( + BlockDriverState *bs, BlockDriverState *to_replace); int (*bdrv_probe_device)(const char *filename); @@ -238,18 +242,18 @@ struct BlockDriver { void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); void (*bdrv_join_options)(QDict *options, QDict *old_options); - int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags, - Error **errp); + int GRAPH_UNLOCKED_PTR (*bdrv_open)( + BlockDriverState *bs, QDict *options, int flags, Error **errp); /* Protocol drivers should implement this instead of bdrv_open */ - int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags, - Error **errp); + int GRAPH_UNLOCKED_PTR (*bdrv_file_open)( + BlockDriverState *bs, QDict *options, int flags, Error **errp); void (*bdrv_close)(BlockDriverState *bs); - int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_create)( + int coroutine_fn GRAPH_UNLOCKED_PTR (*bdrv_co_create)( BlockdevCreateOptions *opts, Error **errp); - int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_create_opts)( + int coroutine_fn GRAPH_UNLOCKED_PTR (*bdrv_co_create_opts)( BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp); int (*bdrv_amend_options)(BlockDriverState *bs, @@ -335,7 +339,8 @@ struct BlockDriver { int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); - void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp); + void GRAPH_RDLOCK_PTR (*bdrv_refresh_limits)( + BlockDriverState *bs, Error **errp); /* * Returns 1 if newly created images are guaranteed to contain only @@ -358,6 +363,21 @@ struct BlockDriver { void (*bdrv_attach_aio_context)(BlockDriverState *bs, AioContext *new_context); + /** + * bdrv_drain_begin is called if implemented in the beginning of a + * drain operation to drain and stop any internal sources of requests in + * the driver. + * bdrv_drain_end is called if implemented at the end of the drain. + * + * They should be used by the driver to e.g. manage scheduled I/O + * requests, or toggle an internal state. After the end of the drain new + * requests will continue normally. + * + * Implementations of both functions must not call aio_poll(). + */ + void (*bdrv_drain_begin)(BlockDriverState *bs); + void (*bdrv_drain_end)(BlockDriverState *bs); + /** * Try to get @bs's logical and physical block size. * On success, store them in @bsz and return zero. @@ -465,10 +485,9 @@ struct BlockDriver { int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); - int coroutine_fn (*bdrv_co_amend)(BlockDriverState *bs, - BlockdevAmendOptions *opts, - bool force, - Error **errp); + int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_amend)( + BlockDriverState *bs, BlockdevAmendOptions *opts, bool force, + Error **errp); /* aio */ BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_preadv)(BlockDriverState *bs, @@ -687,7 +706,7 @@ struct BlockDriver { int64_t coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_getlength)( BlockDriverState *bs); - int64_t coroutine_fn (*bdrv_co_get_allocated_file_size)( + int64_t coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_get_allocated_file_size)( BlockDriverState *bs); BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs, @@ -701,8 +720,8 @@ struct BlockDriver { BlockDriverState *bs, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset); - int coroutine_fn (*bdrv_co_get_info)(BlockDriverState *bs, - BlockDriverInfo *bdi); + int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_get_info)( + BlockDriverState *bs, BlockDriverInfo *bdi); ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs, Error **errp); @@ -714,6 +733,15 @@ struct BlockDriver { int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_load_vmstate)( BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); + int coroutine_fn (*bdrv_co_zone_report)(BlockDriverState *bs, + int64_t offset, unsigned int *nr_zones, + BlockZoneDescriptor *zones); + int coroutine_fn (*bdrv_co_zone_mgmt)(BlockDriverState *bs, BlockZoneOp op, + int64_t offset, int64_t len); + int coroutine_fn (*bdrv_co_zone_append)(BlockDriverState *bs, + int64_t *offset, QEMUIOVector *qiov, + BdrvRequestFlags flags); + /* removable device specific */ bool coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_is_inserted)( BlockDriverState *bs); @@ -737,29 +765,14 @@ struct BlockDriver { int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_check)( BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix); - void coroutine_fn (*bdrv_co_debug_event)(BlockDriverState *bs, - BlkdebugEvent event); + void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_debug_event)( + BlockDriverState *bs, BlkdebugEvent event); /* io queue for linux-aio */ void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_io_plug)(BlockDriverState *bs); void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_io_unplug)( BlockDriverState *bs); - /** - * bdrv_drain_begin is called if implemented in the beginning of a - * drain operation to drain and stop any internal sources of requests in - * the driver. - * bdrv_drain_end is called if implemented at the end of the drain. - * - * They should be used by the driver to e.g. manage scheduled I/O - * requests, or toggle an internal state. After the end of the drain new - * requests will continue normally. - * - * Implementations of both functions must not call aio_poll(). - */ - void (*bdrv_drain_begin)(BlockDriverState *bs); - void (*bdrv_drain_end)(BlockDriverState *bs); - bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs); bool coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_can_store_new_dirty_bitmap)( @@ -855,6 +868,34 @@ typedef struct BlockLimits { /* maximum number of iovec elements */ int max_iov; + + /* + * true if the length of the underlying file can change, and QEMU + * is expected to adjust automatically. Mostly for CD-ROM drives, + * whose length is zero when the tray is empty (they don't need + * an explicit monitor command to load the disk inside the guest). + */ + bool has_variable_length; + + /* device zone model */ + BlockZoneModel zoned; + + /* zone size expressed in bytes */ + uint32_t zone_size; + + /* total number of zones */ + uint32_t nr_zones; + + /* maximum sectors of a zone append write operation */ + uint32_t max_append_sectors; + + /* maximum number of open zones */ + uint32_t max_open_zones; + + /* maximum number of active zones */ + uint32_t max_active_zones; + + uint32_t write_granularity; } BlockLimits; typedef struct BdrvOpBlocker BdrvOpBlocker; @@ -914,6 +955,27 @@ struct BdrvChildClass { void GRAPH_WRLOCK_PTR (*attach)(BdrvChild *child); void GRAPH_WRLOCK_PTR (*detach)(BdrvChild *child); + /* + * If this pair of functions is implemented, the parent doesn't issue new + * requests after returning from .drained_begin() until .drained_end() is + * called. + * + * These functions must not change the graph (and therefore also must not + * call aio_poll(), which could change the graph indirectly). + * + * Note that this can be nested. If drained_begin() was called twice, new + * I/O is allowed only after drained_end() was called twice, too. + */ + void (*drained_begin)(BdrvChild *child); + void (*drained_end)(BdrvChild *child); + + /* + * Returns whether the parent has pending requests for the child. This + * callback is polled after .drained_begin() has been called until all + * activity on the child has stopped. + */ + bool (*drained_poll)(BdrvChild *child); + /* * Notifies the parent that the filename of its child has changed (e.g. * because the direct child was removed from the backing chain), so that it @@ -943,27 +1005,6 @@ struct BdrvChildClass { const char *(*get_name)(BdrvChild *child); AioContext *(*get_parent_aio_context)(BdrvChild *child); - - /* - * If this pair of functions is implemented, the parent doesn't issue new - * requests after returning from .drained_begin() until .drained_end() is - * called. - * - * These functions must not change the graph (and therefore also must not - * call aio_poll(), which could change the graph indirectly). - * - * Note that this can be nested. If drained_begin() was called twice, new - * I/O is allowed only after drained_end() was called twice, too. - */ - void (*drained_begin)(BdrvChild *child); - void (*drained_end)(BdrvChild *child); - - /* - * Returns whether the parent has pending requests for the child. This - * callback is polled after .drained_begin() has been called until all - * activity on the child has stopped. - */ - bool (*drained_poll)(BdrvChild *child); }; extern const BdrvChildClass child_of_bds; @@ -1216,6 +1257,9 @@ struct BlockDriverState { CoMutex bsc_modify_lock; /* Always non-NULL, but must only be dereferenced under an RCU read guard */ BdrvBlockStatusCache *block_status_cache; + + /* array of write pointers' location of each zone in the zoned device. */ + BlockZoneWps *wps; }; struct BlockBackendRootState { @@ -1254,7 +1298,7 @@ extern QemuOptsList bdrv_create_opts_simple; /* * Common functions that are neither I/O nor Global State. * - * See include/block/block-commmon.h for more information about + * See include/block/block-common.h for more information about * the Common API. */ diff --git a/include/block/block_int-global-state.h b/include/block/block_int-global-state.h index 902406eb99..da5fb31089 100644 --- a/include/block/block_int-global-state.h +++ b/include/block/block_int-global-state.h @@ -225,8 +225,8 @@ int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, */ int bdrv_child_refresh_perms(BlockDriverState *bs, BdrvChild *c, Error **errp); -bool bdrv_recurse_can_replace(BlockDriverState *bs, - BlockDriverState *to_replace); +bool GRAPH_RDLOCK bdrv_recurse_can_replace(BlockDriverState *bs, + BlockDriverState *to_replace); /* * Default implementation for BlockDriver.bdrv_child_perm() that can diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h index f008446285..104824040c 100644 --- a/include/block/blockjob_int.h +++ b/include/block/blockjob_int.h @@ -126,12 +126,18 @@ void block_job_user_resume(Job *job); */ /** - * block_job_ratelimit_get_delay: + * block_job_ratelimit_processed_bytes: * - * Calculate and return delay for the next request in ns. See the documentation - * of ratelimit_calculate_delay() for details. + * To be called after some work has been done. Adjusts the delay for the next + * request. See the documentation of ratelimit_calculate_delay() for details. */ -int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n); +void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n); + +/** + * Put the job to sleep (assuming that it wasn't canceled) to throttle it to the + * right speed according to its rate limiting. + */ +void block_job_ratelimit_sleep(BlockJob *job); /** * block_job_error_action: diff --git a/include/block/export.h b/include/block/export.h index 7feb02e10d..f2fe0f8078 100644 --- a/include/block/export.h +++ b/include/block/export.h @@ -57,6 +57,8 @@ struct BlockExport { * Reference count for this block export. This includes strong references * both from the owner (qemu-nbd or the monitor) and clients connected to * the export. + * + * Use atomics to access this field. */ int refcount; diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h index 18cc14de22..7574a2de5b 100644 --- a/include/block/graph-lock.h +++ b/include/block/graph-lock.h @@ -73,6 +73,7 @@ extern BdrvGraphLock graph_lock; */ #define GRAPH_WRLOCK TSA_REQUIRES(graph_lock) #define GRAPH_RDLOCK TSA_REQUIRES_SHARED(graph_lock) +#define GRAPH_UNLOCKED TSA_EXCLUDES(graph_lock) /* * TSA annotations are not part of function types, so checks are defeated when @@ -83,6 +84,7 @@ extern BdrvGraphLock graph_lock; */ #define GRAPH_RDLOCK_PTR TSA_GUARDED_BY(graph_lock) #define GRAPH_WRLOCK_PTR TSA_GUARDED_BY(graph_lock) +#define GRAPH_UNLOCKED_PTR /* * register_aiocontext: @@ -203,19 +205,19 @@ typedef struct GraphLockable { } GraphLockable; #define GML_OBJ_() (&(GraphLockable) { }) /* - * This is not marked as TSA_ACQUIRE() because TSA doesn't understand the + * This is not marked as TSA_ACQUIRE_SHARED() because TSA doesn't understand the * cleanup attribute and would therefore complain that the graph is never - * unlocked. TSA_ASSERT() makes sure that the following calls know that we - * hold the lock while unlocking is left unchecked. + * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that + * we hold the lock while unlocking is left unchecked. */ -static inline GraphLockable * TSA_ASSERT(graph_lock) TSA_NO_TSA +static inline GraphLockable * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA coroutine_fn graph_lockable_auto_lock(GraphLockable *x) { bdrv_graph_co_rdlock(); return x; } -static inline void TSA_NO_TSA +static inline void TSA_NO_TSA coroutine_fn graph_lockable_auto_unlock(GraphLockable *x) { bdrv_graph_co_rdunlock(); @@ -247,12 +249,12 @@ typedef struct GraphLockableMainloop { } GraphLockableMainloop; #define GMLML_OBJ_() (&(GraphLockableMainloop) { }) /* - * This is not marked as TSA_ACQUIRE() because TSA doesn't understand the + * This is not marked as TSA_ACQUIRE_SHARED() because TSA doesn't understand the * cleanup attribute and would therefore complain that the graph is never - * unlocked. TSA_ASSERT() makes sure that the following calls know that we - * hold the lock while unlocking is left unchecked. + * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that + * we hold the lock while unlocking is left unchecked. */ -static inline GraphLockableMainloop * TSA_ASSERT(graph_lock) TSA_NO_TSA +static inline GraphLockableMainloop * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA graph_lockable_auto_lock_mainloop(GraphLockableMainloop *x) { bdrv_graph_rdlock_main_loop(); diff --git a/include/block/nvme.h b/include/block/nvme.h index 8027b7126b..bb231d0b9a 100644 --- a/include/block/nvme.h +++ b/include/block/nvme.h @@ -1,6 +1,8 @@ #ifndef BLOCK_NVME_H #define BLOCK_NVME_H +#include "hw/registerfields.h" + typedef struct QEMU_PACKED NvmeBar { uint64_t cap; uint32_t vs; @@ -58,6 +60,24 @@ enum NvmeBarRegs { NVME_REG_PMRMSCU = offsetof(NvmeBar, pmrmscu), }; +typedef struct QEMU_PACKED NvmeEndGrpLog { + uint8_t critical_warning; + uint8_t rsvd[2]; + uint8_t avail_spare; + uint8_t avail_spare_thres; + uint8_t percet_used; + uint8_t rsvd1[26]; + uint64_t end_estimate[2]; + uint64_t data_units_read[2]; + uint64_t data_units_written[2]; + uint64_t media_units_written[2]; + uint64_t host_read_commands[2]; + uint64_t host_write_commands[2]; + uint64_t media_integrity_errors[2]; + uint64_t no_err_info_log_entries[2]; + uint8_t rsvd2[352]; +} NvmeEndGrpLog; + enum NvmeCapShift { CAP_MQES_SHIFT = 0, CAP_CQR_SHIFT = 16, @@ -595,7 +615,9 @@ enum NvmeAdminCommands { NVME_ADM_CMD_ACTIVATE_FW = 0x10, NVME_ADM_CMD_DOWNLOAD_FW = 0x11, NVME_ADM_CMD_NS_ATTACHMENT = 0x15, + NVME_ADM_CMD_DIRECTIVE_SEND = 0x19, NVME_ADM_CMD_VIRT_MNGMT = 0x1c, + NVME_ADM_CMD_DIRECTIVE_RECV = 0x1a, NVME_ADM_CMD_DBBUF_CONFIG = 0x7c, NVME_ADM_CMD_FORMAT_NVM = 0x80, NVME_ADM_CMD_SECURITY_SEND = 0x81, @@ -611,7 +633,9 @@ enum NvmeIoCommands { NVME_CMD_WRITE_ZEROES = 0x08, NVME_CMD_DSM = 0x09, NVME_CMD_VERIFY = 0x0c, + NVME_CMD_IO_MGMT_RECV = 0x12, NVME_CMD_COPY = 0x19, + NVME_CMD_IO_MGMT_SEND = 0x1d, NVME_CMD_ZONE_MGMT_SEND = 0x79, NVME_CMD_ZONE_MGMT_RECV = 0x7a, NVME_CMD_ZONE_APPEND = 0x7d, @@ -704,7 +728,9 @@ typedef struct QEMU_PACKED NvmeRwCmd { uint64_t slba; uint16_t nlb; uint16_t control; - uint32_t dsmgmt; + uint8_t dsmgmt; + uint8_t rsvd; + uint16_t dspec; uint32_t reftag; uint16_t apptag; uint16_t appmask; @@ -875,6 +901,8 @@ enum NvmeStatusCodes { NVME_INVALID_PRP_OFFSET = 0x0013, NVME_CMD_SET_CMB_REJECTED = 0x002b, NVME_INVALID_CMD_SET = 0x002c, + NVME_FDP_DISABLED = 0x0029, + NVME_INVALID_PHID_LIST = 0x002a, NVME_LBA_RANGE = 0x0080, NVME_CAP_EXCEEDED = 0x0081, NVME_NS_NOT_READY = 0x0082, @@ -1005,11 +1033,16 @@ enum { }; enum NvmeLogIdentifier { - NVME_LOG_ERROR_INFO = 0x01, - NVME_LOG_SMART_INFO = 0x02, - NVME_LOG_FW_SLOT_INFO = 0x03, - NVME_LOG_CHANGED_NSLIST = 0x04, - NVME_LOG_CMD_EFFECTS = 0x05, + NVME_LOG_ERROR_INFO = 0x01, + NVME_LOG_SMART_INFO = 0x02, + NVME_LOG_FW_SLOT_INFO = 0x03, + NVME_LOG_CHANGED_NSLIST = 0x04, + NVME_LOG_CMD_EFFECTS = 0x05, + NVME_LOG_ENDGRP = 0x09, + NVME_LOG_FDP_CONFS = 0x20, + NVME_LOG_FDP_RUH_USAGE = 0x21, + NVME_LOG_FDP_STATS = 0x22, + NVME_LOG_FDP_EVENTS = 0x23, }; typedef struct QEMU_PACKED NvmePSD { @@ -1091,7 +1124,10 @@ typedef struct QEMU_PACKED NvmeIdCtrl { uint16_t mntmt; uint16_t mxtmt; uint32_t sanicap; - uint8_t rsvd332[180]; + uint8_t rsvd332[6]; + uint16_t nsetidmax; + uint16_t endgidmax; + uint8_t rsvd342[170]; uint8_t sqes; uint8_t cqes; uint16_t maxcmd; @@ -1134,15 +1170,18 @@ enum NvmeIdCtrlOaes { }; enum NvmeIdCtrlCtratt { + NVME_CTRATT_ENDGRPS = 1 << 4, NVME_CTRATT_ELBAS = 1 << 15, + NVME_CTRATT_FDPS = 1 << 19, }; enum NvmeIdCtrlOacs { - NVME_OACS_SECURITY = 1 << 0, - NVME_OACS_FORMAT = 1 << 1, - NVME_OACS_FW = 1 << 2, - NVME_OACS_NS_MGMT = 1 << 3, - NVME_OACS_DBBUF = 1 << 8, + NVME_OACS_SECURITY = 1 << 0, + NVME_OACS_FORMAT = 1 << 1, + NVME_OACS_FW = 1 << 2, + NVME_OACS_NS_MGMT = 1 << 3, + NVME_OACS_DIRECTIVES = 1 << 5, + NVME_OACS_DBBUF = 1 << 8, }; enum NvmeIdCtrlOncs { @@ -1227,6 +1266,7 @@ enum NvmeNsAttachmentOperation { #define NVME_AEC_SMART(aec) (aec & 0xff) #define NVME_AEC_NS_ATTR(aec) ((aec >> 8) & 0x1) #define NVME_AEC_FW_ACTIVATION(aec) ((aec >> 9) & 0x1) +#define NVME_AEC_ENDGRP_NOTICE(aec) ((aec >> 14) & 0x1) #define NVME_ERR_REC_TLER(err_rec) (err_rec & 0xffff) #define NVME_ERR_REC_DULBE(err_rec) (err_rec & 0x10000) @@ -1246,6 +1286,8 @@ enum NvmeFeatureIds { NVME_TIMESTAMP = 0xe, NVME_HOST_BEHAVIOR_SUPPORT = 0x16, NVME_COMMAND_SET_PROFILE = 0x19, + NVME_FDP_MODE = 0x1d, + NVME_FDP_EVENTS = 0x1e, NVME_SOFTWARE_PROGRESS_MARKER = 0x80, NVME_FID_MAX = 0x100, }; @@ -1338,7 +1380,10 @@ typedef struct QEMU_PACKED NvmeIdNs { uint16_t mssrl; uint32_t mcl; uint8_t msrc; - uint8_t rsvd81[23]; + uint8_t rsvd81[18]; + uint8_t nsattr; + uint16_t nvmsetid; + uint16_t endgid; uint8_t nguid[16]; uint64_t eui64; NvmeLBAF lbaf[NVME_MAX_NLBAF]; @@ -1617,6 +1662,169 @@ typedef enum NvmeVirtualResourceType { NVME_VIRT_RES_INTERRUPT = 0x01, } NvmeVirtualResourceType; +typedef struct NvmeDirectiveIdentify { + uint8_t supported; + uint8_t unused1[31]; + uint8_t enabled; + uint8_t unused33[31]; + uint8_t persistent; + uint8_t unused65[31]; + uint8_t rsvd64[4000]; +} NvmeDirectiveIdentify; + +enum NvmeDirectiveTypes { + NVME_DIRECTIVE_IDENTIFY = 0x0, + NVME_DIRECTIVE_DATA_PLACEMENT = 0x2, +}; + +enum NvmeDirectiveOperations { + NVME_DIRECTIVE_RETURN_PARAMS = 0x1, +}; + +typedef struct QEMU_PACKED NvmeFdpConfsHdr { + uint16_t num_confs; + uint8_t version; + uint8_t rsvd3; + uint32_t size; + uint8_t rsvd8[8]; +} NvmeFdpConfsHdr; + +REG8(FDPA, 0x0) + FIELD(FDPA, RGIF, 0, 4) + FIELD(FDPA, VWC, 4, 1) + FIELD(FDPA, VALID, 7, 1); + +typedef struct QEMU_PACKED NvmeFdpDescrHdr { + uint16_t descr_size; + uint8_t fdpa; + uint8_t vss; + uint32_t nrg; + uint16_t nruh; + uint16_t maxpids; + uint32_t nnss; + uint64_t runs; + uint32_t erutl; + uint8_t rsvd28[36]; +} NvmeFdpDescrHdr; + +enum NvmeRuhType { + NVME_RUHT_INITIALLY_ISOLATED = 1, + NVME_RUHT_PERSISTENTLY_ISOLATED = 2, +}; + +typedef struct QEMU_PACKED NvmeRuhDescr { + uint8_t ruht; + uint8_t rsvd1[3]; +} NvmeRuhDescr; + +typedef struct QEMU_PACKED NvmeRuhuLog { + uint16_t nruh; + uint8_t rsvd2[6]; +} NvmeRuhuLog; + +enum NvmeRuhAttributes { + NVME_RUHA_UNUSED = 0, + NVME_RUHA_HOST = 1, + NVME_RUHA_CTRL = 2, +}; + +typedef struct QEMU_PACKED NvmeRuhuDescr { + uint8_t ruha; + uint8_t rsvd1[7]; +} NvmeRuhuDescr; + +typedef struct QEMU_PACKED NvmeFdpStatsLog { + uint64_t hbmw[2]; + uint64_t mbmw[2]; + uint64_t mbe[2]; + uint8_t rsvd48[16]; +} NvmeFdpStatsLog; + +typedef struct QEMU_PACKED NvmeFdpEventsLog { + uint32_t num_events; + uint8_t rsvd4[60]; +} NvmeFdpEventsLog; + +enum NvmeFdpEventType { + FDP_EVT_RU_NOT_FULLY_WRITTEN = 0x0, + FDP_EVT_RU_ATL_EXCEEDED = 0x1, + FDP_EVT_CTRL_RESET_RUH = 0x2, + FDP_EVT_INVALID_PID = 0x3, + FDP_EVT_MEDIA_REALLOC = 0x80, + FDP_EVT_RUH_IMPLICIT_RU_CHANGE = 0x81, +}; + +enum NvmeFdpEventFlags { + FDPEF_PIV = 1 << 0, + FDPEF_NSIDV = 1 << 1, + FDPEF_LV = 1 << 2, +}; + +typedef struct QEMU_PACKED NvmeFdpEvent { + uint8_t type; + uint8_t flags; + uint16_t pid; + uint64_t timestamp; + uint32_t nsid; + uint64_t type_specific[2]; + uint16_t rgid; + uint8_t ruhid; + uint8_t rsvd35[5]; + uint64_t vendor[3]; +} NvmeFdpEvent; + +typedef struct QEMU_PACKED NvmePhidList { + uint16_t nnruhd; + uint8_t rsvd2[6]; +} NvmePhidList; + +typedef struct QEMU_PACKED NvmePhidDescr { + uint8_t ruht; + uint8_t rsvd1; + uint16_t ruhid; +} NvmePhidDescr; + +REG32(FEAT_FDP, 0x0) + FIELD(FEAT_FDP, FDPE, 0, 1) + FIELD(FEAT_FDP, CONF_NDX, 8, 8); + +typedef struct QEMU_PACKED NvmeFdpEventDescr { + uint8_t evt; + uint8_t evta; +} NvmeFdpEventDescr; + +REG32(NVME_IOMR, 0x0) + FIELD(NVME_IOMR, MO, 0, 8) + FIELD(NVME_IOMR, MOS, 16, 16); + +enum NvmeIomr2Mo { + NVME_IOMR_MO_NOP = 0x0, + NVME_IOMR_MO_RUH_STATUS = 0x1, + NVME_IOMR_MO_VENDOR_SPECIFIC = 0x255, +}; + +typedef struct QEMU_PACKED NvmeRuhStatus { + uint8_t rsvd0[14]; + uint16_t nruhsd; +} NvmeRuhStatus; + +typedef struct QEMU_PACKED NvmeRuhStatusDescr { + uint16_t pid; + uint16_t ruhid; + uint32_t earutr; + uint64_t ruamw; + uint8_t rsvd16[16]; +} NvmeRuhStatusDescr; + +REG32(NVME_IOMS, 0x0) + FIELD(NVME_IOMS, MO, 0, 8) + FIELD(NVME_IOMS, MOS, 16, 16); + +enum NvmeIoms2Mo { + NVME_IOMS_MO_NOP = 0x0, + NVME_IOMS_MO_RUH_UPDATE = 0x1, +}; + static inline void _nvme_check_size(void) { QEMU_BUILD_BUG_ON(sizeof(NvmeBar) != 4096); @@ -1655,5 +1863,7 @@ static inline void _nvme_check_size(void) QEMU_BUILD_BUG_ON(sizeof(NvmePriCtrlCap) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlEntry) != 32); QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlList) != 4096); + QEMU_BUILD_BUG_ON(sizeof(NvmeEndGrpLog) != 512); + QEMU_BUILD_BUG_ON(sizeof(NvmeDirectiveIdentify) != 4096); } #endif diff --git a/include/block/qapi.h b/include/block/qapi.h index 8773b9b191..18d48ddb70 100644 --- a/include/block/qapi.h +++ b/include/block/qapi.h @@ -25,6 +25,7 @@ #ifndef BLOCK_QAPI_H #define BLOCK_QAPI_H +#include "block/graph-lock.h" #include "block/snapshot.h" #include "qapi/qapi-types-block-core.h" @@ -43,9 +44,9 @@ void bdrv_query_image_info(BlockDriverState *bs, bool flat, bool skip_implicit_filters, Error **errp); -void bdrv_query_block_graph_info(BlockDriverState *bs, - BlockGraphInfo **p_info, - Error **errp); +void GRAPH_RDLOCK +bdrv_query_block_graph_info(BlockDriverState *bs, BlockGraphInfo **p_info, + Error **errp); void bdrv_snapshot_dump(QEMUSnapshotInfo *sn); void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec, diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h index f8cda9df91..0fe85ade77 100644 --- a/include/block/raw-aio.h +++ b/include/block/raw-aio.h @@ -28,6 +28,9 @@ #define QEMU_AIO_WRITE_ZEROES 0x0020 #define QEMU_AIO_COPY_RANGE 0x0040 #define QEMU_AIO_TRUNCATE 0x0080 +#define QEMU_AIO_ZONE_REPORT 0x0100 +#define QEMU_AIO_ZONE_MGMT 0x0200 +#define QEMU_AIO_ZONE_APPEND 0x0400 #define QEMU_AIO_TYPE_MASK \ (QEMU_AIO_READ | \ QEMU_AIO_WRITE | \ @@ -36,7 +39,10 @@ QEMU_AIO_DISCARD | \ QEMU_AIO_WRITE_ZEROES | \ QEMU_AIO_COPY_RANGE | \ - QEMU_AIO_TRUNCATE) + QEMU_AIO_TRUNCATE | \ + QEMU_AIO_ZONE_REPORT | \ + QEMU_AIO_ZONE_MGMT | \ + QEMU_AIO_ZONE_APPEND) /* AIO flags */ #define QEMU_AIO_MISALIGNED 0x1000 @@ -49,26 +55,39 @@ typedef struct LinuxAioState LinuxAioState; LinuxAioState *laio_init(Error **errp); void laio_cleanup(LinuxAioState *s); -int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, - uint64_t offset, QEMUIOVector *qiov, int type, - uint64_t dev_max_batch); + +/* laio_co_submit: submit I/O requests in the thread's current AioContext. */ +int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov, + int type, uint64_t dev_max_batch); + void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context); void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context); -void laio_io_plug(BlockDriverState *bs, LinuxAioState *s); -void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s, - uint64_t dev_max_batch); + +/* + * laio_io_plug/unplug work in the thread's current AioContext, therefore the + * caller must ensure that they are paired in the same IOThread. + */ +void laio_io_plug(void); +void laio_io_unplug(uint64_t dev_max_batch); #endif /* io_uring.c - Linux io_uring implementation */ #ifdef CONFIG_LINUX_IO_URING typedef struct LuringState LuringState; LuringState *luring_init(Error **errp); void luring_cleanup(LuringState *s); -int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, - uint64_t offset, QEMUIOVector *qiov, int type); + +/* luring_co_submit: submit I/O requests in the thread's current AioContext. */ +int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset, + QEMUIOVector *qiov, int type); void luring_detach_aio_context(LuringState *s, AioContext *old_context); void luring_attach_aio_context(LuringState *s, AioContext *new_context); -void luring_io_plug(BlockDriverState *bs, LuringState *s); -void luring_io_unplug(BlockDriverState *bs, LuringState *s); + +/* + * luring_io_plug/unplug work in the thread's current AioContext, therefore the + * caller must ensure that they are paired in the same IOThread. + */ +void luring_io_plug(void); +void luring_io_unplug(void); #endif #ifdef _WIN32 diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h index 95ff2b0bdb..948ff5f30c 100644 --- a/include/block/thread-pool.h +++ b/include/block/thread-pool.h @@ -29,12 +29,15 @@ typedef struct ThreadPool ThreadPool; ThreadPool *thread_pool_new(struct AioContext *ctx); void thread_pool_free(ThreadPool *pool); -BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool, - ThreadPoolFunc *func, void *arg, - BlockCompletionFunc *cb, void *opaque); -int coroutine_fn thread_pool_submit_co(ThreadPool *pool, - ThreadPoolFunc *func, void *arg); -void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg); +/* + * thread_pool_submit* API: submit I/O requests in the thread's + * current AioContext. + */ +BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, + BlockCompletionFunc *cb, void *opaque); +int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg); +void thread_pool_submit(ThreadPoolFunc *func, void *arg); + void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx); #endif diff --git a/include/crypto/aes.h b/include/crypto/aes.h index ba297d6a73..822d64588c 100644 --- a/include/crypto/aes.h +++ b/include/crypto/aes.h @@ -18,14 +18,14 @@ typedef struct aes_key_st AES_KEY; #define AES_decrypt QEMU_AES_decrypt int AES_set_encrypt_key(const unsigned char *userKey, const int bits, - AES_KEY *key); + AES_KEY *key); int AES_set_decrypt_key(const unsigned char *userKey, const int bits, - AES_KEY *key); + AES_KEY *key); void AES_encrypt(const unsigned char *in, unsigned char *out, - const AES_KEY *key); + const AES_KEY *key); void AES_decrypt(const unsigned char *in, unsigned char *out, - const AES_KEY *key); + const AES_KEY *key); extern const uint8_t AES_sbox[256]; extern const uint8_t AES_isbox[256]; diff --git a/include/crypto/desrfb.h b/include/crypto/desrfb.h index 7ca596c387..af8d12234d 100644 --- a/include/crypto/desrfb.h +++ b/include/crypto/desrfb.h @@ -15,30 +15,30 @@ /* d3des.h - * - * Headers and defines for d3des.c - * Graven Imagery, 1992. + * Headers and defines for d3des.c + * Graven Imagery, 1992. * * Copyright (c) 1988,1989,1990,1991,1992 by Richard Outerbridge - * (GEnie : OUTER; CIS : [71755,204]) + * (GEnie : OUTER; CIS : [71755,204]) */ -#define EN0 0 /* MODE == encrypt */ -#define DE1 1 /* MODE == decrypt */ +#define EN0 0 /* MODE == encrypt */ +#define DE1 1 /* MODE == decrypt */ void deskey(unsigned char *, int); -/* hexkey[8] MODE +/* hexkey[8] MODE * Sets the internal key register according to the hexadecimal * key contained in the 8 bytes of hexkey, according to the DES, * for encryption or decryption according to MODE. */ void usekey(unsigned long *); -/* cookedkey[32] +/* cookedkey[32] * Loads the internal key register with the data in cookedkey. */ void des(unsigned char *, unsigned char *); -/* from[8] to[8] +/* from[8] to[8] * Encrypts/Decrypts (according to the key currently loaded in the * internal key register) one block of eight bytes at address 'from' * into the block at address 'to'. They can be the same. diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h index 32cda9ef14..2f6f91c2ee 100644 --- a/include/disas/dis-asm.h +++ b/include/disas/dis-asm.h @@ -188,20 +188,20 @@ enum bfd_architecture #define bfd_mach_alpha_ev5 0x20 #define bfd_mach_alpha_ev6 0x30 bfd_arch_arm, /* Advanced Risc Machines ARM */ -#define bfd_mach_arm_unknown 0 -#define bfd_mach_arm_2 1 -#define bfd_mach_arm_2a 2 -#define bfd_mach_arm_3 3 -#define bfd_mach_arm_3M 4 -#define bfd_mach_arm_4 5 -#define bfd_mach_arm_4T 6 -#define bfd_mach_arm_5 7 -#define bfd_mach_arm_5T 8 -#define bfd_mach_arm_5TE 9 -#define bfd_mach_arm_XScale 10 -#define bfd_mach_arm_ep9312 11 -#define bfd_mach_arm_iWMMXt 12 -#define bfd_mach_arm_iWMMXt2 13 +#define bfd_mach_arm_unknown 0 +#define bfd_mach_arm_2 1 +#define bfd_mach_arm_2a 2 +#define bfd_mach_arm_3 3 +#define bfd_mach_arm_3M 4 +#define bfd_mach_arm_4 5 +#define bfd_mach_arm_4T 6 +#define bfd_mach_arm_5 7 +#define bfd_mach_arm_5T 8 +#define bfd_mach_arm_5TE 9 +#define bfd_mach_arm_XScale 10 +#define bfd_mach_arm_ep9312 11 +#define bfd_mach_arm_iWMMXt 12 +#define bfd_mach_arm_iWMMXt2 13 bfd_arch_ns32k, /* National Semiconductors ns32000 */ bfd_arch_w65, /* WDC 65816 */ bfd_arch_tic30, /* Texas Instruments TMS320C30 */ @@ -241,7 +241,7 @@ enum bfd_architecture bfd_arch_ia64, /* HP/Intel ia64 */ #define bfd_mach_ia64_elf64 64 #define bfd_mach_ia64_elf32 32 - bfd_arch_nios2, /* Nios II */ + bfd_arch_nios2, /* Nios II */ #define bfd_mach_nios2 0 #define bfd_mach_nios2r1 1 #define bfd_mach_nios2r2 2 @@ -269,14 +269,14 @@ typedef int (*fprintf_function)(FILE *f, const char *fmt, ...) G_GNUC_PRINTF(2, 3); enum dis_insn_type { - dis_noninsn, /* Not a valid instruction */ - dis_nonbranch, /* Not a branch instruction */ - dis_branch, /* Unconditional branch */ - dis_condbranch, /* Conditional branch */ - dis_jsr, /* Jump to subroutine */ - dis_condjsr, /* Conditional jump to subroutine */ - dis_dref, /* Data reference instruction */ - dis_dref2 /* Two data references in instruction */ + dis_noninsn, /* Not a valid instruction */ + dis_nonbranch, /* Not a branch instruction */ + dis_branch, /* Unconditional branch */ + dis_condbranch, /* Conditional branch */ + dis_jsr, /* Jump to subroutine */ + dis_condjsr, /* Conditional jump to subroutine */ + dis_dref, /* Data reference instruction */ + dis_dref2 /* Two data references in instruction */ }; /* This struct is passed into the instruction decoding routine, @@ -319,8 +319,8 @@ typedef struct disassemble_info { The top 16 bits are reserved for public use (and are documented here). The bottom 16 bits are for the internal use of the disassembler. */ unsigned long flags; -#define INSN_HAS_RELOC 0x80000000 -#define INSN_ARM_BE32 0x00010000 +#define INSN_HAS_RELOC 0x80000000 +#define INSN_ARM_BE32 0x00010000 PTR private_data; /* Function used to get bytes to disassemble. MEMADDR is the @@ -330,7 +330,7 @@ typedef struct disassemble_info { Returns an errno value or 0 for success. */ int (*read_memory_func) (bfd_vma memaddr, bfd_byte *myaddr, int length, - struct disassemble_info *info); + struct disassemble_info *info); /* Function which should be called if we get an error that we can't recover from. STATUS is the errno value from read_memory_func and @@ -384,14 +384,14 @@ typedef struct disassemble_info { To determine whether this decoder supports this information, set insn_info_valid to 0, decode an instruction, then check it. */ - char insn_info_valid; /* Branch info has been set. */ - char branch_delay_insns; /* How many sequential insn's will run before - a branch takes effect. (0 = normal) */ - char data_size; /* Size of data reference in insn, in bytes */ - enum dis_insn_type insn_type; /* Type of instruction */ - bfd_vma target; /* Target address of branch or dref, if known; - zero if unknown. */ - bfd_vma target2; /* Second target address for dref2 */ + char insn_info_valid; /* Branch info has been set. */ + char branch_delay_insns; /* How many sequential insn's will run before + a branch takes effect. (0 = normal) */ + char data_size; /* Size of data reference in insn, in bytes */ + enum dis_insn_type insn_type; /* Type of instruction */ + bfd_vma target; /* Target address of branch or dref, if known; + zero if unknown. */ + bfd_vma target2; /* Second target address for dref2 */ /* Command line options specific to the target disassembler. */ char * disassembler_options; diff --git a/include/disas/disas.h b/include/disas/disas.h index d363e95ede..176775eff7 100644 --- a/include/disas/disas.h +++ b/include/disas/disas.h @@ -1,34 +1,23 @@ #ifndef QEMU_DISAS_H #define QEMU_DISAS_H -#include "exec/hwaddr.h" - -#ifdef NEED_CPU_H -#include "cpu.h" - /* Disassemble this for me please... (debugging). */ -void disas(FILE *out, const void *code, unsigned long size); -void target_disas(FILE *out, CPUState *cpu, target_ulong code, - target_ulong size); +void disas(FILE *out, const void *code, size_t size); +void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size); -void monitor_disas(Monitor *mon, CPUState *cpu, - target_ulong pc, int nb_insn, int is_physical); +void monitor_disas(Monitor *mon, CPUState *cpu, uint64_t pc, + int nb_insn, bool is_physical); char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size); /* Look up symbol for debugging purpose. Returns "" if unknown. */ -const char *lookup_symbol(target_ulong orig_addr); -#endif +const char *lookup_symbol(uint64_t orig_addr); struct syminfo; struct elf32_sym; struct elf64_sym; -#if defined(CONFIG_USER_ONLY) -typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_ulong orig_addr); -#else -typedef const char *(*lookup_symbol_t)(struct syminfo *s, hwaddr orig_addr); -#endif +typedef const char *(*lookup_symbol_t)(struct syminfo *s, uint64_t orig_addr); struct syminfo { lookup_symbol_t lookup_symbol; diff --git a/include/elf.h b/include/elf.h index 8bf1e72720..2f4d0e56d1 100644 --- a/include/elf.h +++ b/include/elf.h @@ -11,25 +11,25 @@ typedef uint32_t Elf32_Word; /* 64-bit ELF base types. */ typedef uint64_t Elf64_Addr; typedef uint16_t Elf64_Half; -typedef int16_t Elf64_SHalf; +typedef int16_t Elf64_SHalf; typedef uint64_t Elf64_Off; -typedef int32_t Elf64_Sword; +typedef int32_t Elf64_Sword; typedef uint32_t Elf64_Word; typedef uint64_t Elf64_Xword; typedef int64_t Elf64_Sxword; /* These constants are for the segment types stored in the image headers */ -#define PT_NULL 0 -#define PT_LOAD 1 -#define PT_DYNAMIC 2 -#define PT_INTERP 3 -#define PT_NOTE 4 -#define PT_SHLIB 5 -#define PT_PHDR 6 -#define PT_LOOS 0x60000000 -#define PT_HIOS 0x6fffffff -#define PT_LOPROC 0x70000000 -#define PT_HIPROC 0x7fffffff +#define PT_NULL 0 +#define PT_LOAD 1 +#define PT_DYNAMIC 2 +#define PT_INTERP 3 +#define PT_NOTE 4 +#define PT_SHLIB 5 +#define PT_PHDR 6 +#define PT_LOOS 0x60000000 +#define PT_HIOS 0x6fffffff +#define PT_LOPROC 0x70000000 +#define PT_HIPROC 0x7fffffff #define PT_GNU_STACK (PT_LOOS + 0x474e551) #define PT_GNU_PROPERTY (PT_LOOS + 0x474e553) @@ -41,34 +41,34 @@ typedef int64_t Elf64_Sxword; /* Flags in the e_flags field of the header */ /* MIPS architecture level. */ -#define EF_MIPS_ARCH 0xf0000000 +#define EF_MIPS_ARCH 0xf0000000 /* Legal values for MIPS architecture level. */ -#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ -#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ -#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ -#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ -#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ -#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */ -#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */ -#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32r2 code. */ -#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64r2 code. */ -#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */ -#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */ +#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ +#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ +#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ +#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ +#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ +#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */ +#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */ +#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32r2 code. */ +#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64r2 code. */ +#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */ +#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */ /* The ABI of a file. */ -#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */ -#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */ +#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */ +#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */ -#define EF_MIPS_NOREORDER 0x00000001 -#define EF_MIPS_PIC 0x00000002 -#define EF_MIPS_CPIC 0x00000004 -#define EF_MIPS_ABI2 0x00000020 -#define EF_MIPS_OPTIONS_FIRST 0x00000080 -#define EF_MIPS_32BITMODE 0x00000100 -#define EF_MIPS_ABI 0x0000f000 -#define EF_MIPS_FP64 0x00000200 -#define EF_MIPS_NAN2008 0x00000400 +#define EF_MIPS_NOREORDER 0x00000001 +#define EF_MIPS_PIC 0x00000002 +#define EF_MIPS_CPIC 0x00000004 +#define EF_MIPS_ABI2 0x00000020 +#define EF_MIPS_OPTIONS_FIRST 0x00000080 +#define EF_MIPS_32BITMODE 0x00000100 +#define EF_MIPS_ABI 0x0000f000 +#define EF_MIPS_FP64 0x00000200 +#define EF_MIPS_NAN2008 0x00000400 /* MIPS machine variant */ #define EF_MIPS_MACH_NONE 0x00000000 /* A standard MIPS implementation */ @@ -129,162 +129,162 @@ typedef struct mips_elf_abiflags_v0 { #define ET_HIPROC 0xffff /* These constants define the various ELF target machines */ -#define EM_NONE 0 -#define EM_M32 1 -#define EM_SPARC 2 -#define EM_386 3 -#define EM_68K 4 -#define EM_88K 5 -#define EM_486 6 /* Perhaps disused */ -#define EM_860 7 +#define EM_NONE 0 +#define EM_M32 1 +#define EM_SPARC 2 +#define EM_386 3 +#define EM_68K 4 +#define EM_88K 5 +#define EM_486 6 /* Perhaps disused */ +#define EM_860 7 -#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */ +#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */ -#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */ +#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */ -#define EM_PARISC 15 /* HPPA */ +#define EM_PARISC 15 /* HPPA */ -#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */ +#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */ -#define EM_PPC 20 /* PowerPC */ -#define EM_PPC64 21 /* PowerPC64 */ +#define EM_PPC 20 /* PowerPC */ +#define EM_PPC64 21 /* PowerPC64 */ -#define EM_ARM 40 /* ARM */ +#define EM_ARM 40 /* ARM */ -#define EM_SH 42 /* SuperH */ +#define EM_SH 42 /* SuperH */ -#define EM_SPARCV9 43 /* SPARC v9 64-bit */ +#define EM_SPARCV9 43 /* SPARC v9 64-bit */ -#define EM_TRICORE 44 /* Infineon TriCore */ +#define EM_TRICORE 44 /* Infineon TriCore */ -#define EM_IA_64 50 /* HP/Intel IA-64 */ +#define EM_IA_64 50 /* HP/Intel IA-64 */ -#define EM_X86_64 62 /* AMD x86-64 */ +#define EM_X86_64 62 /* AMD x86-64 */ -#define EM_S390 22 /* IBM S/390 */ +#define EM_S390 22 /* IBM S/390 */ -#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ +#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ -#define EM_AVR 83 /* AVR 8-bit microcontroller */ +#define EM_AVR 83 /* AVR 8-bit microcontroller */ -#define EM_V850 87 /* NEC v850 */ +#define EM_V850 87 /* NEC v850 */ -#define EM_H8_300H 47 /* Hitachi H8/300H */ -#define EM_H8S 48 /* Hitachi H8S */ -#define EM_LATTICEMICO32 138 /* LatticeMico32 */ +#define EM_H8_300H 47 /* Hitachi H8/300H */ +#define EM_H8S 48 /* Hitachi H8S */ +#define EM_LATTICEMICO32 138 /* LatticeMico32 */ -#define EM_OPENRISC 92 /* OpenCores OpenRISC */ +#define EM_OPENRISC 92 /* OpenCores OpenRISC */ -#define EM_HEXAGON 164 /* Qualcomm Hexagon */ +#define EM_HEXAGON 164 /* Qualcomm Hexagon */ -#define EM_RX 173 /* Renesas RX family */ +#define EM_RX 173 /* Renesas RX family */ -#define EM_RISCV 243 /* RISC-V */ +#define EM_RISCV 243 /* RISC-V */ -#define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */ +#define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */ -#define EM_LOONGARCH 258 /* LoongArch */ +#define EM_LOONGARCH 258 /* LoongArch */ /* * This is an interim value that we will use until the committee comes * up with a final number. */ -#define EM_ALPHA 0x9026 +#define EM_ALPHA 0x9026 /* Bogus old v850 magic number, used by old tools. */ -#define EM_CYGNUS_V850 0x9080 +#define EM_CYGNUS_V850 0x9080 /* * This is the old interim value for S/390 architecture */ -#define EM_S390_OLD 0xA390 +#define EM_S390_OLD 0xA390 -#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ +#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ -#define EM_MICROBLAZE 189 -#define EM_MICROBLAZE_OLD 0xBAAB +#define EM_MICROBLAZE 189 +#define EM_MICROBLAZE_OLD 0xBAAB -#define EM_XTENSA 94 /* Tensilica Xtensa */ +#define EM_XTENSA 94 /* Tensilica Xtensa */ -#define EM_AARCH64 183 +#define EM_AARCH64 183 -#define EF_AVR_MACH 0x7F /* Mask for AVR e_flags to get core type */ +#define EF_AVR_MACH 0x7F /* Mask for AVR e_flags to get core type */ /* This is the info that is needed to parse the dynamic section of the file */ -#define DT_NULL 0 -#define DT_NEEDED 1 -#define DT_PLTRELSZ 2 -#define DT_PLTGOT 3 -#define DT_HASH 4 -#define DT_STRTAB 5 -#define DT_SYMTAB 6 -#define DT_RELA 7 -#define DT_RELASZ 8 -#define DT_RELAENT 9 -#define DT_STRSZ 10 -#define DT_SYMENT 11 -#define DT_INIT 12 -#define DT_FINI 13 -#define DT_SONAME 14 -#define DT_RPATH 15 -#define DT_SYMBOLIC 16 -#define DT_REL 17 -#define DT_RELSZ 18 -#define DT_RELENT 19 -#define DT_PLTREL 20 -#define DT_DEBUG 21 -#define DT_TEXTREL 22 -#define DT_JMPREL 23 -#define DT_BINDNOW 24 -#define DT_INIT_ARRAY 25 -#define DT_FINI_ARRAY 26 -#define DT_INIT_ARRAYSZ 27 -#define DT_FINI_ARRAYSZ 28 -#define DT_RUNPATH 29 -#define DT_FLAGS 30 -#define DT_LOOS 0x6000000d -#define DT_HIOS 0x6ffff000 -#define DT_LOPROC 0x70000000 -#define DT_HIPROC 0x7fffffff +#define DT_NULL 0 +#define DT_NEEDED 1 +#define DT_PLTRELSZ 2 +#define DT_PLTGOT 3 +#define DT_HASH 4 +#define DT_STRTAB 5 +#define DT_SYMTAB 6 +#define DT_RELA 7 +#define DT_RELASZ 8 +#define DT_RELAENT 9 +#define DT_STRSZ 10 +#define DT_SYMENT 11 +#define DT_INIT 12 +#define DT_FINI 13 +#define DT_SONAME 14 +#define DT_RPATH 15 +#define DT_SYMBOLIC 16 +#define DT_REL 17 +#define DT_RELSZ 18 +#define DT_RELENT 19 +#define DT_PLTREL 20 +#define DT_DEBUG 21 +#define DT_TEXTREL 22 +#define DT_JMPREL 23 +#define DT_BINDNOW 24 +#define DT_INIT_ARRAY 25 +#define DT_FINI_ARRAY 26 +#define DT_INIT_ARRAYSZ 27 +#define DT_FINI_ARRAYSZ 28 +#define DT_RUNPATH 29 +#define DT_FLAGS 30 +#define DT_LOOS 0x6000000d +#define DT_HIOS 0x6ffff000 +#define DT_LOPROC 0x70000000 +#define DT_HIPROC 0x7fffffff /* DT_ entries which fall between DT_VALRNGLO and DT_VALRNDHI use the d_val field of the Elf*_Dyn structure. I.e. they contain scalars. */ -#define DT_VALRNGLO 0x6ffffd00 -#define DT_VALRNGHI 0x6ffffdff +#define DT_VALRNGLO 0x6ffffd00 +#define DT_VALRNGHI 0x6ffffdff /* DT_ entries which fall between DT_ADDRRNGLO and DT_ADDRRNGHI use the d_ptr field of the Elf*_Dyn structure. I.e. they contain pointers. */ -#define DT_ADDRRNGLO 0x6ffffe00 -#define DT_ADDRRNGHI 0x6ffffeff +#define DT_ADDRRNGLO 0x6ffffe00 +#define DT_ADDRRNGHI 0x6ffffeff -#define DT_VERSYM 0x6ffffff0 -#define DT_RELACOUNT 0x6ffffff9 -#define DT_RELCOUNT 0x6ffffffa -#define DT_FLAGS_1 0x6ffffffb -#define DT_VERDEF 0x6ffffffc -#define DT_VERDEFNUM 0x6ffffffd -#define DT_VERNEED 0x6ffffffe -#define DT_VERNEEDNUM 0x6fffffff +#define DT_VERSYM 0x6ffffff0 +#define DT_RELACOUNT 0x6ffffff9 +#define DT_RELCOUNT 0x6ffffffa +#define DT_FLAGS_1 0x6ffffffb +#define DT_VERDEF 0x6ffffffc +#define DT_VERDEFNUM 0x6ffffffd +#define DT_VERNEED 0x6ffffffe +#define DT_VERNEEDNUM 0x6fffffff -#define DT_MIPS_RLD_VERSION 0x70000001 -#define DT_MIPS_TIME_STAMP 0x70000002 -#define DT_MIPS_ICHECKSUM 0x70000003 -#define DT_MIPS_IVERSION 0x70000004 -#define DT_MIPS_FLAGS 0x70000005 - #define RHF_NONE 0 - #define RHF_HARDWAY 1 - #define RHF_NOTPOT 2 -#define DT_MIPS_BASE_ADDRESS 0x70000006 -#define DT_MIPS_CONFLICT 0x70000008 -#define DT_MIPS_LIBLIST 0x70000009 -#define DT_MIPS_LOCAL_GOTNO 0x7000000a -#define DT_MIPS_CONFLICTNO 0x7000000b -#define DT_MIPS_LIBLISTNO 0x70000010 -#define DT_MIPS_SYMTABNO 0x70000011 -#define DT_MIPS_UNREFEXTNO 0x70000012 -#define DT_MIPS_GOTSYM 0x70000013 -#define DT_MIPS_HIPAGENO 0x70000014 -#define DT_MIPS_RLD_MAP 0x70000016 +#define DT_MIPS_RLD_VERSION 0x70000001 +#define DT_MIPS_TIME_STAMP 0x70000002 +#define DT_MIPS_ICHECKSUM 0x70000003 +#define DT_MIPS_IVERSION 0x70000004 +#define DT_MIPS_FLAGS 0x70000005 + #define RHF_NONE 0 + #define RHF_HARDWAY 1 + #define RHF_NOTPOT 2 +#define DT_MIPS_BASE_ADDRESS 0x70000006 +#define DT_MIPS_CONFLICT 0x70000008 +#define DT_MIPS_LIBLIST 0x70000009 +#define DT_MIPS_LOCAL_GOTNO 0x7000000a +#define DT_MIPS_CONFLICTNO 0x7000000b +#define DT_MIPS_LIBLISTNO 0x70000010 +#define DT_MIPS_SYMTABNO 0x70000011 +#define DT_MIPS_UNREFEXTNO 0x70000012 +#define DT_MIPS_GOTSYM 0x70000013 +#define DT_MIPS_HIPAGENO 0x70000014 +#define DT_MIPS_RLD_MAP 0x70000016 /* This info is needed when parsing the symbol table */ #define STB_LOCAL 0 @@ -297,61 +297,61 @@ typedef struct mips_elf_abiflags_v0 { #define STT_SECTION 3 #define STT_FILE 4 -#define ELF_ST_BIND(x) ((x) >> 4) -#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf) +#define ELF_ST_BIND(x) ((x) >> 4) +#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf) #define ELF_ST_INFO(bind, type) (((bind) << 4) | ((type) & 0xf)) -#define ELF32_ST_BIND(x) ELF_ST_BIND(x) -#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x) -#define ELF64_ST_BIND(x) ELF_ST_BIND(x) -#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x) +#define ELF32_ST_BIND(x) ELF_ST_BIND(x) +#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x) +#define ELF64_ST_BIND(x) ELF_ST_BIND(x) +#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x) /* Symbolic values for the entries in the auxiliary table put on the initial stack */ -#define AT_NULL 0 /* end of vector */ -#define AT_IGNORE 1 /* entry should be ignored */ -#define AT_EXECFD 2 /* file descriptor of program */ -#define AT_PHDR 3 /* program headers for program */ -#define AT_PHENT 4 /* size of program header entry */ -#define AT_PHNUM 5 /* number of program headers */ -#define AT_PAGESZ 6 /* system page size */ -#define AT_BASE 7 /* base address of interpreter */ -#define AT_FLAGS 8 /* flags */ -#define AT_ENTRY 9 /* entry point of program */ -#define AT_NOTELF 10 /* program is not ELF */ -#define AT_UID 11 /* real uid */ -#define AT_EUID 12 /* effective uid */ -#define AT_GID 13 /* real gid */ -#define AT_EGID 14 /* effective gid */ -#define AT_PLATFORM 15 /* string identifying CPU for optimizations */ -#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ -#define AT_CLKTCK 17 /* frequency at which times() increments */ -#define AT_FPUCW 18 /* info about fpu initialization by kernel */ -#define AT_DCACHEBSIZE 19 /* data cache block size */ -#define AT_ICACHEBSIZE 20 /* instruction cache block size */ -#define AT_UCACHEBSIZE 21 /* unified cache block size */ -#define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */ -#define AT_SECURE 23 /* boolean, was exec suid-like? */ -#define AT_BASE_PLATFORM 24 /* string identifying real platforms */ -#define AT_RANDOM 25 /* address of 16 random bytes */ -#define AT_HWCAP2 26 /* extension of AT_HWCAP */ -#define AT_EXECFN 31 /* filename of the executable */ -#define AT_SYSINFO 32 /* address of kernel entry point */ -#define AT_SYSINFO_EHDR 33 /* address of kernel vdso */ -#define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */ -#define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */ -#define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */ -#define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */ +#define AT_NULL 0 /* end of vector */ +#define AT_IGNORE 1 /* entry should be ignored */ +#define AT_EXECFD 2 /* file descriptor of program */ +#define AT_PHDR 3 /* program headers for program */ +#define AT_PHENT 4 /* size of program header entry */ +#define AT_PHNUM 5 /* number of program headers */ +#define AT_PAGESZ 6 /* system page size */ +#define AT_BASE 7 /* base address of interpreter */ +#define AT_FLAGS 8 /* flags */ +#define AT_ENTRY 9 /* entry point of program */ +#define AT_NOTELF 10 /* program is not ELF */ +#define AT_UID 11 /* real uid */ +#define AT_EUID 12 /* effective uid */ +#define AT_GID 13 /* real gid */ +#define AT_EGID 14 /* effective gid */ +#define AT_PLATFORM 15 /* string identifying CPU for optimizations */ +#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ +#define AT_CLKTCK 17 /* frequency at which times() increments */ +#define AT_FPUCW 18 /* info about fpu initialization by kernel */ +#define AT_DCACHEBSIZE 19 /* data cache block size */ +#define AT_ICACHEBSIZE 20 /* instruction cache block size */ +#define AT_UCACHEBSIZE 21 /* unified cache block size */ +#define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */ +#define AT_SECURE 23 /* boolean, was exec suid-like? */ +#define AT_BASE_PLATFORM 24 /* string identifying real platforms */ +#define AT_RANDOM 25 /* address of 16 random bytes */ +#define AT_HWCAP2 26 /* extension of AT_HWCAP */ +#define AT_EXECFN 31 /* filename of the executable */ +#define AT_SYSINFO 32 /* address of kernel entry point */ +#define AT_SYSINFO_EHDR 33 /* address of kernel vdso */ +#define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */ +#define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */ +#define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */ +#define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */ typedef struct dynamic{ Elf32_Sword d_tag; union{ - Elf32_Sword d_val; - Elf32_Addr d_ptr; + Elf32_Sword d_val; + Elf32_Addr d_ptr; } d_un; } Elf32_Dyn; typedef struct { - Elf64_Sxword d_tag; /* entry tag value */ + Elf64_Sxword d_tag; /* entry tag value */ union { Elf64_Xword d_val; Elf64_Addr d_ptr; @@ -362,72 +362,72 @@ typedef struct { #define ELF32_R_SYM(x) ((x) >> 8) #define ELF32_R_TYPE(x) ((x) & 0xff) -#define ELF64_R_SYM(i) ((i) >> 32) -#define ELF64_R_TYPE(i) ((i) & 0xffffffff) +#define ELF64_R_SYM(i) ((i) >> 32) +#define ELF64_R_TYPE(i) ((i) & 0xffffffff) #define ELF64_R_TYPE_DATA(i) (((ELF64_R_TYPE(i) >> 8) ^ 0x00800000) - 0x00800000) -#define R_386_NONE 0 -#define R_386_32 1 -#define R_386_PC32 2 -#define R_386_GOT32 3 -#define R_386_PLT32 4 -#define R_386_COPY 5 -#define R_386_GLOB_DAT 6 -#define R_386_JMP_SLOT 7 -#define R_386_RELATIVE 8 -#define R_386_GOTOFF 9 -#define R_386_GOTPC 10 -#define R_386_NUM 11 +#define R_386_NONE 0 +#define R_386_32 1 +#define R_386_PC32 2 +#define R_386_GOT32 3 +#define R_386_PLT32 4 +#define R_386_COPY 5 +#define R_386_GLOB_DAT 6 +#define R_386_JMP_SLOT 7 +#define R_386_RELATIVE 8 +#define R_386_GOTOFF 9 +#define R_386_GOTPC 10 +#define R_386_NUM 11 /* Not a dynamic reloc, so not included in R_386_NUM. Used in TCG. */ -#define R_386_PC8 23 +#define R_386_PC8 23 -#define R_MIPS_NONE 0 -#define R_MIPS_16 1 -#define R_MIPS_32 2 -#define R_MIPS_REL32 3 -#define R_MIPS_26 4 -#define R_MIPS_HI16 5 -#define R_MIPS_LO16 6 -#define R_MIPS_GPREL16 7 -#define R_MIPS_LITERAL 8 -#define R_MIPS_GOT16 9 -#define R_MIPS_PC16 10 -#define R_MIPS_CALL16 11 -#define R_MIPS_GPREL32 12 +#define R_MIPS_NONE 0 +#define R_MIPS_16 1 +#define R_MIPS_32 2 +#define R_MIPS_REL32 3 +#define R_MIPS_26 4 +#define R_MIPS_HI16 5 +#define R_MIPS_LO16 6 +#define R_MIPS_GPREL16 7 +#define R_MIPS_LITERAL 8 +#define R_MIPS_GOT16 9 +#define R_MIPS_PC16 10 +#define R_MIPS_CALL16 11 +#define R_MIPS_GPREL32 12 /* The remaining relocs are defined on Irix, although they are not in the MIPS ELF ABI. */ -#define R_MIPS_UNUSED1 13 -#define R_MIPS_UNUSED2 14 -#define R_MIPS_UNUSED3 15 -#define R_MIPS_SHIFT5 16 -#define R_MIPS_SHIFT6 17 -#define R_MIPS_64 18 -#define R_MIPS_GOT_DISP 19 -#define R_MIPS_GOT_PAGE 20 -#define R_MIPS_GOT_OFST 21 +#define R_MIPS_UNUSED1 13 +#define R_MIPS_UNUSED2 14 +#define R_MIPS_UNUSED3 15 +#define R_MIPS_SHIFT5 16 +#define R_MIPS_SHIFT6 17 +#define R_MIPS_64 18 +#define R_MIPS_GOT_DISP 19 +#define R_MIPS_GOT_PAGE 20 +#define R_MIPS_GOT_OFST 21 /* * The following two relocation types are specified in the MIPS ABI * conformance guide version 1.2 but not yet in the psABI. */ -#define R_MIPS_GOTHI16 22 -#define R_MIPS_GOTLO16 23 -#define R_MIPS_SUB 24 -#define R_MIPS_INSERT_A 25 -#define R_MIPS_INSERT_B 26 -#define R_MIPS_DELETE 27 -#define R_MIPS_HIGHER 28 -#define R_MIPS_HIGHEST 29 +#define R_MIPS_GOTHI16 22 +#define R_MIPS_GOTLO16 23 +#define R_MIPS_SUB 24 +#define R_MIPS_INSERT_A 25 +#define R_MIPS_INSERT_B 26 +#define R_MIPS_DELETE 27 +#define R_MIPS_HIGHER 28 +#define R_MIPS_HIGHEST 29 /* * The following two relocation types are specified in the MIPS ABI * conformance guide version 1.2 but not yet in the psABI. */ -#define R_MIPS_CALLHI16 30 -#define R_MIPS_CALLLO16 31 +#define R_MIPS_CALLHI16 30 +#define R_MIPS_CALLLO16 31 /* * This range is reserved for vendor specific relocations. */ -#define R_MIPS_LOVENDOR 100 -#define R_MIPS_HIVENDOR 127 +#define R_MIPS_LOVENDOR 100 +#define R_MIPS_HIVENDOR 127 /* SUN SPARC specific definitions. */ @@ -448,48 +448,48 @@ typedef struct { /* * Sparc ELF relocation types */ -#define R_SPARC_NONE 0 -#define R_SPARC_8 1 -#define R_SPARC_16 2 -#define R_SPARC_32 3 -#define R_SPARC_DISP8 4 -#define R_SPARC_DISP16 5 -#define R_SPARC_DISP32 6 -#define R_SPARC_WDISP30 7 -#define R_SPARC_WDISP22 8 -#define R_SPARC_HI22 9 -#define R_SPARC_22 10 -#define R_SPARC_13 11 -#define R_SPARC_LO10 12 -#define R_SPARC_GOT10 13 -#define R_SPARC_GOT13 14 -#define R_SPARC_GOT22 15 -#define R_SPARC_PC10 16 -#define R_SPARC_PC22 17 -#define R_SPARC_WPLT30 18 -#define R_SPARC_COPY 19 -#define R_SPARC_GLOB_DAT 20 -#define R_SPARC_JMP_SLOT 21 -#define R_SPARC_RELATIVE 22 -#define R_SPARC_UA32 23 -#define R_SPARC_PLT32 24 -#define R_SPARC_HIPLT22 25 -#define R_SPARC_LOPLT10 26 -#define R_SPARC_PCPLT32 27 -#define R_SPARC_PCPLT22 28 -#define R_SPARC_PCPLT10 29 -#define R_SPARC_10 30 -#define R_SPARC_11 31 -#define R_SPARC_64 32 -#define R_SPARC_OLO10 33 -#define R_SPARC_HH22 34 -#define R_SPARC_HM10 35 -#define R_SPARC_LM22 36 -#define R_SPARC_WDISP16 40 -#define R_SPARC_WDISP19 41 -#define R_SPARC_7 43 -#define R_SPARC_5 44 -#define R_SPARC_6 45 +#define R_SPARC_NONE 0 +#define R_SPARC_8 1 +#define R_SPARC_16 2 +#define R_SPARC_32 3 +#define R_SPARC_DISP8 4 +#define R_SPARC_DISP16 5 +#define R_SPARC_DISP32 6 +#define R_SPARC_WDISP30 7 +#define R_SPARC_WDISP22 8 +#define R_SPARC_HI22 9 +#define R_SPARC_22 10 +#define R_SPARC_13 11 +#define R_SPARC_LO10 12 +#define R_SPARC_GOT10 13 +#define R_SPARC_GOT13 14 +#define R_SPARC_GOT22 15 +#define R_SPARC_PC10 16 +#define R_SPARC_PC22 17 +#define R_SPARC_WPLT30 18 +#define R_SPARC_COPY 19 +#define R_SPARC_GLOB_DAT 20 +#define R_SPARC_JMP_SLOT 21 +#define R_SPARC_RELATIVE 22 +#define R_SPARC_UA32 23 +#define R_SPARC_PLT32 24 +#define R_SPARC_HIPLT22 25 +#define R_SPARC_LOPLT10 26 +#define R_SPARC_PCPLT32 27 +#define R_SPARC_PCPLT22 28 +#define R_SPARC_PCPLT10 29 +#define R_SPARC_10 30 +#define R_SPARC_11 31 +#define R_SPARC_64 32 +#define R_SPARC_OLO10 33 +#define R_SPARC_HH22 34 +#define R_SPARC_HM10 35 +#define R_SPARC_LM22 36 +#define R_SPARC_WDISP16 40 +#define R_SPARC_WDISP19 41 +#define R_SPARC_7 43 +#define R_SPARC_5 44 +#define R_SPARC_6 45 /* Bits present in AT_HWCAP for ARM. */ @@ -647,29 +647,29 @@ typedef struct { /* * 68k ELF relocation types */ -#define R_68K_NONE 0 -#define R_68K_32 1 -#define R_68K_16 2 -#define R_68K_8 3 -#define R_68K_PC32 4 -#define R_68K_PC16 5 -#define R_68K_PC8 6 -#define R_68K_GOT32 7 -#define R_68K_GOT16 8 -#define R_68K_GOT8 9 -#define R_68K_GOT32O 10 -#define R_68K_GOT16O 11 -#define R_68K_GOT8O 12 -#define R_68K_PLT32 13 -#define R_68K_PLT16 14 -#define R_68K_PLT8 15 -#define R_68K_PLT32O 16 -#define R_68K_PLT16O 17 -#define R_68K_PLT8O 18 -#define R_68K_COPY 19 -#define R_68K_GLOB_DAT 20 -#define R_68K_JMP_SLOT 21 -#define R_68K_RELATIVE 22 +#define R_68K_NONE 0 +#define R_68K_32 1 +#define R_68K_16 2 +#define R_68K_8 3 +#define R_68K_PC32 4 +#define R_68K_PC16 5 +#define R_68K_PC8 6 +#define R_68K_GOT32 7 +#define R_68K_GOT16 8 +#define R_68K_GOT8 9 +#define R_68K_GOT32O 10 +#define R_68K_GOT16O 11 +#define R_68K_GOT8O 12 +#define R_68K_PLT32 13 +#define R_68K_PLT16 14 +#define R_68K_PLT8 15 +#define R_68K_PLT32O 16 +#define R_68K_PLT16O 17 +#define R_68K_PLT8O 18 +#define R_68K_COPY 19 +#define R_68K_GLOB_DAT 20 +#define R_68K_JMP_SLOT 21 +#define R_68K_RELATIVE 22 /* * Alpha ELF relocation types @@ -693,7 +693,7 @@ typedef struct { #define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */ #define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ #define R_ALPHA_RELATIVE 27 /* Adjust by program base */ -#define R_ALPHA_BRSGP 28 +#define R_ALPHA_BRSGP 28 #define R_ALPHA_TLSGD 29 #define R_ALPHA_TLS_LDM 30 #define R_ALPHA_DTPMOD64 31 @@ -708,78 +708,78 @@ typedef struct { #define R_ALPHA_TPRELLO 40 #define R_ALPHA_TPREL16 41 -#define SHF_ALPHA_GPREL 0x10000000 +#define SHF_ALPHA_GPREL 0x10000000 /* PowerPC specific definitions. */ /* Processor specific flags for the ELF header e_flags field. */ -#define EF_PPC64_ABI 0x3 +#define EF_PPC64_ABI 0x3 /* PowerPC relocations defined by the ABIs */ -#define R_PPC_NONE 0 -#define R_PPC_ADDR32 1 /* 32bit absolute address */ -#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */ -#define R_PPC_ADDR16 3 /* 16bit absolute address */ -#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */ -#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */ -#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */ -#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */ -#define R_PPC_ADDR14_BRTAKEN 8 -#define R_PPC_ADDR14_BRNTAKEN 9 -#define R_PPC_REL24 10 /* PC relative 26 bit */ -#define R_PPC_REL14 11 /* PC relative 16 bit */ -#define R_PPC_REL14_BRTAKEN 12 -#define R_PPC_REL14_BRNTAKEN 13 -#define R_PPC_GOT16 14 -#define R_PPC_GOT16_LO 15 -#define R_PPC_GOT16_HI 16 -#define R_PPC_GOT16_HA 17 -#define R_PPC_PLTREL24 18 -#define R_PPC_COPY 19 -#define R_PPC_GLOB_DAT 20 -#define R_PPC_JMP_SLOT 21 -#define R_PPC_RELATIVE 22 -#define R_PPC_LOCAL24PC 23 -#define R_PPC_UADDR32 24 -#define R_PPC_UADDR16 25 -#define R_PPC_REL32 26 -#define R_PPC_PLT32 27 -#define R_PPC_PLTREL32 28 -#define R_PPC_PLT16_LO 29 -#define R_PPC_PLT16_HI 30 -#define R_PPC_PLT16_HA 31 -#define R_PPC_SDAREL16 32 -#define R_PPC_SECTOFF 33 -#define R_PPC_SECTOFF_LO 34 -#define R_PPC_SECTOFF_HI 35 -#define R_PPC_SECTOFF_HA 36 +#define R_PPC_NONE 0 +#define R_PPC_ADDR32 1 /* 32bit absolute address */ +#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */ +#define R_PPC_ADDR16 3 /* 16bit absolute address */ +#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */ +#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */ +#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */ +#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */ +#define R_PPC_ADDR14_BRTAKEN 8 +#define R_PPC_ADDR14_BRNTAKEN 9 +#define R_PPC_REL24 10 /* PC relative 26 bit */ +#define R_PPC_REL14 11 /* PC relative 16 bit */ +#define R_PPC_REL14_BRTAKEN 12 +#define R_PPC_REL14_BRNTAKEN 13 +#define R_PPC_GOT16 14 +#define R_PPC_GOT16_LO 15 +#define R_PPC_GOT16_HI 16 +#define R_PPC_GOT16_HA 17 +#define R_PPC_PLTREL24 18 +#define R_PPC_COPY 19 +#define R_PPC_GLOB_DAT 20 +#define R_PPC_JMP_SLOT 21 +#define R_PPC_RELATIVE 22 +#define R_PPC_LOCAL24PC 23 +#define R_PPC_UADDR32 24 +#define R_PPC_UADDR16 25 +#define R_PPC_REL32 26 +#define R_PPC_PLT32 27 +#define R_PPC_PLTREL32 28 +#define R_PPC_PLT16_LO 29 +#define R_PPC_PLT16_HI 30 +#define R_PPC_PLT16_HA 31 +#define R_PPC_SDAREL16 32 +#define R_PPC_SECTOFF 33 +#define R_PPC_SECTOFF_LO 34 +#define R_PPC_SECTOFF_HI 35 +#define R_PPC_SECTOFF_HA 36 /* Keep this the last entry. */ #ifndef R_PPC_NUM -#define R_PPC_NUM 37 +#define R_PPC_NUM 37 #endif /* ARM specific declarations */ /* Processor specific flags for the ELF header e_flags field. */ -#define EF_ARM_RELEXEC 0x01 -#define EF_ARM_HASENTRY 0x02 -#define EF_ARM_INTERWORK 0x04 -#define EF_ARM_APCS_26 0x08 -#define EF_ARM_APCS_FLOAT 0x10 -#define EF_ARM_PIC 0x20 -#define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */ -#define EF_NEW_ABI 0x80 -#define EF_OLD_ABI 0x100 -#define EF_ARM_SOFT_FLOAT 0x200 -#define EF_ARM_VFP_FLOAT 0x400 +#define EF_ARM_RELEXEC 0x01 +#define EF_ARM_HASENTRY 0x02 +#define EF_ARM_INTERWORK 0x04 +#define EF_ARM_APCS_26 0x08 +#define EF_ARM_APCS_FLOAT 0x10 +#define EF_ARM_PIC 0x20 +#define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */ +#define EF_NEW_ABI 0x80 +#define EF_OLD_ABI 0x100 +#define EF_ARM_SOFT_FLOAT 0x200 +#define EF_ARM_VFP_FLOAT 0x400 #define EF_ARM_MAVERICK_FLOAT 0x800 /* Other constants defined in the ARM ELF spec. version B-01. */ -#define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */ -#define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */ -#define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */ -#define EF_ARM_EABIMASK 0xFF000000 +#define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */ +#define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */ +#define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */ +#define EF_ARM_EABIMASK 0xFF000000 /* Constants defined in AAELF. */ #define EF_ARM_BE8 0x00800000 @@ -806,46 +806,46 @@ typedef struct { addressed by the static base */ /* ARM relocs. */ -#define R_ARM_NONE 0 /* No reloc */ -#define R_ARM_PC24 1 /* PC relative 26 bit branch */ -#define R_ARM_ABS32 2 /* Direct 32 bit */ -#define R_ARM_REL32 3 /* PC relative 32 bit */ -#define R_ARM_PC13 4 -#define R_ARM_ABS16 5 /* Direct 16 bit */ -#define R_ARM_ABS12 6 /* Direct 12 bit */ -#define R_ARM_THM_ABS5 7 -#define R_ARM_ABS8 8 /* Direct 8 bit */ -#define R_ARM_SBREL32 9 -#define R_ARM_THM_PC22 10 -#define R_ARM_THM_PC8 11 -#define R_ARM_AMP_VCALL9 12 -#define R_ARM_SWI24 13 -#define R_ARM_THM_SWI8 14 -#define R_ARM_XPC25 15 -#define R_ARM_THM_XPC22 16 -#define R_ARM_COPY 20 /* Copy symbol at runtime */ -#define R_ARM_GLOB_DAT 21 /* Create GOT entry */ -#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */ -#define R_ARM_RELATIVE 23 /* Adjust by program base */ -#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */ -#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */ -#define R_ARM_GOT32 26 /* 32 bit GOT entry */ -#define R_ARM_PLT32 27 /* 32 bit PLT address */ +#define R_ARM_NONE 0 /* No reloc */ +#define R_ARM_PC24 1 /* PC relative 26 bit branch */ +#define R_ARM_ABS32 2 /* Direct 32 bit */ +#define R_ARM_REL32 3 /* PC relative 32 bit */ +#define R_ARM_PC13 4 +#define R_ARM_ABS16 5 /* Direct 16 bit */ +#define R_ARM_ABS12 6 /* Direct 12 bit */ +#define R_ARM_THM_ABS5 7 +#define R_ARM_ABS8 8 /* Direct 8 bit */ +#define R_ARM_SBREL32 9 +#define R_ARM_THM_PC22 10 +#define R_ARM_THM_PC8 11 +#define R_ARM_AMP_VCALL9 12 +#define R_ARM_SWI24 13 +#define R_ARM_THM_SWI8 14 +#define R_ARM_XPC25 15 +#define R_ARM_THM_XPC22 16 +#define R_ARM_COPY 20 /* Copy symbol at runtime */ +#define R_ARM_GLOB_DAT 21 /* Create GOT entry */ +#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */ +#define R_ARM_RELATIVE 23 /* Adjust by program base */ +#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */ +#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */ +#define R_ARM_GOT32 26 /* 32 bit GOT entry */ +#define R_ARM_PLT32 27 /* 32 bit PLT address */ #define R_ARM_CALL 28 #define R_ARM_JUMP24 29 -#define R_ARM_GNU_VTENTRY 100 -#define R_ARM_GNU_VTINHERIT 101 -#define R_ARM_THM_PC11 102 /* thumb unconditional branch */ -#define R_ARM_THM_PC9 103 /* thumb conditional branch */ -#define R_ARM_RXPC25 249 -#define R_ARM_RSBREL32 250 -#define R_ARM_THM_RPC22 251 -#define R_ARM_RREL32 252 -#define R_ARM_RABS22 253 -#define R_ARM_RPC24 254 -#define R_ARM_RBASE 255 +#define R_ARM_GNU_VTENTRY 100 +#define R_ARM_GNU_VTINHERIT 101 +#define R_ARM_THM_PC11 102 /* thumb unconditional branch */ +#define R_ARM_THM_PC9 103 /* thumb conditional branch */ +#define R_ARM_RXPC25 249 +#define R_ARM_RSBREL32 250 +#define R_ARM_THM_RPC22 251 +#define R_ARM_RREL32 252 +#define R_ARM_RABS22 253 +#define R_ARM_RPC24 254 +#define R_ARM_RBASE 255 /* Keep this the last entry. */ -#define R_ARM_NUM 256 +#define R_ARM_NUM 256 /* ARM Aarch64 relocation types */ #define R_AARCH64_NONE 256 /* also accepts R_ARM_NONE (0) */ @@ -975,385 +975,385 @@ typedef struct { #define R_AARCH64_TLS_TPREL32 1033 /* s390 relocations defined by the ABIs */ -#define R_390_NONE 0 /* No reloc. */ -#define R_390_8 1 /* Direct 8 bit. */ -#define R_390_12 2 /* Direct 12 bit. */ -#define R_390_16 3 /* Direct 16 bit. */ -#define R_390_32 4 /* Direct 32 bit. */ -#define R_390_PC32 5 /* PC relative 32 bit. */ -#define R_390_GOT12 6 /* 12 bit GOT offset. */ -#define R_390_GOT32 7 /* 32 bit GOT offset. */ -#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ -#define R_390_COPY 9 /* Copy symbol at runtime. */ -#define R_390_GLOB_DAT 10 /* Create GOT entry. */ -#define R_390_JMP_SLOT 11 /* Create PLT entry. */ -#define R_390_RELATIVE 12 /* Adjust by program base. */ -#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ -#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */ -#define R_390_GOT16 15 /* 16 bit GOT offset. */ -#define R_390_PC16 16 /* PC relative 16 bit. */ -#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ -#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ -#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ -#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ -#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ -#define R_390_64 22 /* Direct 64 bit. */ -#define R_390_PC64 23 /* PC relative 64 bit. */ -#define R_390_GOT64 24 /* 64 bit GOT offset. */ -#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ -#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ -#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ -#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ -#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ -#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ -#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ -#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ -#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ -#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ -#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ -#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ -#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ -#define R_390_TLS_GDCALL 38 /* Tag for function call in general - dynamic TLS code. */ -#define R_390_TLS_LDCALL 39 /* Tag for function call in local - dynamic TLS code. */ -#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic - thread local data. */ -#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic - thread local data. */ -#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS - block offset. */ -#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS - block offset. */ -#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS - block offset. */ -#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic - thread local data in LD code. */ -#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic - thread local data in LD code. */ -#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for - negated static TLS block offset. */ -#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for - negated static TLS block offset. */ -#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for - negated static TLS block offset. */ -#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to - static TLS block. */ -#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to - static TLS block. */ -#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS - block. */ -#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS - block. */ -#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ -#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ -#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS +#define R_390_NONE 0 /* No reloc. */ +#define R_390_8 1 /* Direct 8 bit. */ +#define R_390_12 2 /* Direct 12 bit. */ +#define R_390_16 3 /* Direct 16 bit. */ +#define R_390_32 4 /* Direct 32 bit. */ +#define R_390_PC32 5 /* PC relative 32 bit. */ +#define R_390_GOT12 6 /* 12 bit GOT offset. */ +#define R_390_GOT32 7 /* 32 bit GOT offset. */ +#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ +#define R_390_COPY 9 /* Copy symbol at runtime. */ +#define R_390_GLOB_DAT 10 /* Create GOT entry. */ +#define R_390_JMP_SLOT 11 /* Create PLT entry. */ +#define R_390_RELATIVE 12 /* Adjust by program base. */ +#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ +#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */ +#define R_390_GOT16 15 /* 16 bit GOT offset. */ +#define R_390_PC16 16 /* PC relative 16 bit. */ +#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ +#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ +#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ +#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ +#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ +#define R_390_64 22 /* Direct 64 bit. */ +#define R_390_PC64 23 /* PC relative 64 bit. */ +#define R_390_GOT64 24 /* 64 bit GOT offset. */ +#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ +#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ +#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ +#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ +#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ +#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ +#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ +#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ +#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ +#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ +#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ +#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ +#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ +#define R_390_TLS_GDCALL 38 /* Tag for function call in general + dynamic TLS code. */ +#define R_390_TLS_LDCALL 39 /* Tag for function call in local + dynamic TLS code. */ +#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic + thread local data. */ +#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic + thread local data. */ +#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS + block offset. */ +#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS + block offset. */ +#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS + block offset. */ +#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic + thread local data in LD code. */ +#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic + thread local data in LD code. */ +#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for + negated static TLS block offset. */ +#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for + negated static TLS block offset. */ +#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for + negated static TLS block offset. */ +#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to + static TLS block. */ +#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to + static TLS block. */ +#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS + block. */ +#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS + block. */ +#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ +#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ +#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS block. */ #define R_390_20 57 /* Keep this the last entry. */ #define R_390_NUM 58 /* x86-64 relocation types */ -#define R_X86_64_NONE 0 /* No reloc */ -#define R_X86_64_64 1 /* Direct 64 bit */ -#define R_X86_64_PC32 2 /* PC relative 32 bit signed */ -#define R_X86_64_GOT32 3 /* 32 bit GOT entry */ -#define R_X86_64_PLT32 4 /* 32 bit PLT address */ -#define R_X86_64_COPY 5 /* Copy symbol at runtime */ -#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ -#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ -#define R_X86_64_RELATIVE 8 /* Adjust by program base */ -#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative +#define R_X86_64_NONE 0 /* No reloc */ +#define R_X86_64_64 1 /* Direct 64 bit */ +#define R_X86_64_PC32 2 /* PC relative 32 bit signed */ +#define R_X86_64_GOT32 3 /* 32 bit GOT entry */ +#define R_X86_64_PLT32 4 /* 32 bit PLT address */ +#define R_X86_64_COPY 5 /* Copy symbol at runtime */ +#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ +#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ +#define R_X86_64_RELATIVE 8 /* Adjust by program base */ +#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative offset to GOT */ -#define R_X86_64_32 10 /* Direct 32 bit zero extended */ -#define R_X86_64_32S 11 /* Direct 32 bit sign extended */ -#define R_X86_64_16 12 /* Direct 16 bit zero extended */ -#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ -#define R_X86_64_8 14 /* Direct 8 bit sign extended */ -#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ +#define R_X86_64_32 10 /* Direct 32 bit zero extended */ +#define R_X86_64_32S 11 /* Direct 32 bit sign extended */ +#define R_X86_64_16 12 /* Direct 16 bit zero extended */ +#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ +#define R_X86_64_8 14 /* Direct 8 bit sign extended */ +#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ -#define R_X86_64_NUM 16 +#define R_X86_64_NUM 16 /* Legal values for e_flags field of Elf64_Ehdr. */ -#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */ +#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */ /* HPPA specific definitions. */ /* Legal values for e_flags field of Elf32_Ehdr. */ -#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */ -#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */ -#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */ -#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */ -#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch +#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */ +#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */ +#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */ +#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */ +#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch prediction. */ -#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */ -#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */ +#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */ +#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */ /* Defined values for `e_flags & EF_PARISC_ARCH' are: */ -#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */ -#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */ -#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */ +#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */ +#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */ +#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */ /* Additional section indeces. */ -#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared +#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared symbols in ANSI C. */ -#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */ +#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */ /* Legal values for sh_type field of Elf32_Shdr. */ -#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */ -#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */ -#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */ +#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */ +#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */ +#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */ /* Legal values for sh_flags field of Elf32_Shdr. */ -#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */ -#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */ -#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */ +#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */ +#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */ +#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */ /* Legal values for ST_TYPE subfield of st_info (symbol type). */ -#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */ +#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */ -#define STT_HP_OPAQUE (STT_LOOS + 0x1) -#define STT_HP_STUB (STT_LOOS + 0x2) +#define STT_HP_OPAQUE (STT_LOOS + 0x1) +#define STT_HP_STUB (STT_LOOS + 0x2) /* HPPA relocs. */ -#define R_PARISC_NONE 0 /* No reloc. */ -#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */ -#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */ -#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */ -#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */ -#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */ -#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */ -#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */ -#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */ -#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */ -#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */ -#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */ -#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */ -#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */ -#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */ -#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */ -#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */ -#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */ -#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */ -#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */ -#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */ -#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */ -#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */ -#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */ -#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */ -#define R_PARISC_FPTR64 64 /* 64 bits function address. */ -#define R_PARISC_PLABEL32 65 /* 32 bits function address. */ -#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */ -#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */ -#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */ -#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */ -#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */ -#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */ -#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */ -#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */ -#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */ -#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */ -#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */ -#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */ -#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */ -#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */ -#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */ -#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */ -#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */ -#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */ -#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */ -#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */ -#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */ -#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */ -#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */ -#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */ -#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */ -#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */ -#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */ -#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */ -#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */ -#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */ -#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */ -#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */ -#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */ -#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */ -#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */ -#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */ -#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */ -#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */ -#define R_PARISC_LORESERVE 128 -#define R_PARISC_COPY 128 /* Copy relocation. */ -#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */ -#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */ -#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */ -#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */ -#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */ -#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */ -#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/ -#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */ -#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */ -#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */ -#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */ -#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */ -#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */ -#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */ -#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */ -#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/ -#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/ -#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */ -#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */ -#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */ -#define R_PARISC_HIRESERVE 255 +#define R_PARISC_NONE 0 /* No reloc. */ +#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */ +#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */ +#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */ +#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */ +#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */ +#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */ +#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */ +#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */ +#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */ +#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */ +#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */ +#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */ +#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */ +#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */ +#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */ +#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */ +#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */ +#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */ +#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */ +#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */ +#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */ +#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */ +#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */ +#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */ +#define R_PARISC_FPTR64 64 /* 64 bits function address. */ +#define R_PARISC_PLABEL32 65 /* 32 bits function address. */ +#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */ +#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */ +#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */ +#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */ +#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */ +#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */ +#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */ +#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */ +#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */ +#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */ +#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */ +#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */ +#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */ +#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */ +#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */ +#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */ +#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */ +#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */ +#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */ +#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */ +#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */ +#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */ +#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */ +#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */ +#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */ +#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */ +#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */ +#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */ +#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */ +#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */ +#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */ +#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */ +#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */ +#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */ +#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */ +#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */ +#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */ +#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */ +#define R_PARISC_LORESERVE 128 +#define R_PARISC_COPY 128 /* Copy relocation. */ +#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */ +#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */ +#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */ +#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */ +#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */ +#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */ +#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/ +#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */ +#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */ +#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */ +#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */ +#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */ +#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */ +#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */ +#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */ +#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/ +#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/ +#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */ +#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */ +#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */ +#define R_PARISC_HIRESERVE 255 /* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */ -#define PT_HP_TLS (PT_LOOS + 0x0) -#define PT_HP_CORE_NONE (PT_LOOS + 0x1) -#define PT_HP_CORE_VERSION (PT_LOOS + 0x2) -#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3) -#define PT_HP_CORE_COMM (PT_LOOS + 0x4) -#define PT_HP_CORE_PROC (PT_LOOS + 0x5) -#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6) -#define PT_HP_CORE_STACK (PT_LOOS + 0x7) -#define PT_HP_CORE_SHM (PT_LOOS + 0x8) -#define PT_HP_CORE_MMF (PT_LOOS + 0x9) -#define PT_HP_PARALLEL (PT_LOOS + 0x10) -#define PT_HP_FASTBIND (PT_LOOS + 0x11) -#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12) -#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13) -#define PT_HP_STACK (PT_LOOS + 0x14) +#define PT_HP_TLS (PT_LOOS + 0x0) +#define PT_HP_CORE_NONE (PT_LOOS + 0x1) +#define PT_HP_CORE_VERSION (PT_LOOS + 0x2) +#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3) +#define PT_HP_CORE_COMM (PT_LOOS + 0x4) +#define PT_HP_CORE_PROC (PT_LOOS + 0x5) +#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6) +#define PT_HP_CORE_STACK (PT_LOOS + 0x7) +#define PT_HP_CORE_SHM (PT_LOOS + 0x8) +#define PT_HP_CORE_MMF (PT_LOOS + 0x9) +#define PT_HP_PARALLEL (PT_LOOS + 0x10) +#define PT_HP_FASTBIND (PT_LOOS + 0x11) +#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12) +#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13) +#define PT_HP_STACK (PT_LOOS + 0x14) -#define PT_PARISC_ARCHEXT 0x70000000 -#define PT_PARISC_UNWIND 0x70000001 +#define PT_PARISC_ARCHEXT 0x70000000 +#define PT_PARISC_UNWIND 0x70000001 /* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */ -#define PF_PARISC_SBP 0x08000000 +#define PF_PARISC_SBP 0x08000000 -#define PF_HP_PAGE_SIZE 0x00100000 -#define PF_HP_FAR_SHARED 0x00200000 -#define PF_HP_NEAR_SHARED 0x00400000 -#define PF_HP_CODE 0x01000000 -#define PF_HP_MODIFY 0x02000000 -#define PF_HP_LAZYSWAP 0x04000000 -#define PF_HP_SBP 0x08000000 +#define PF_HP_PAGE_SIZE 0x00100000 +#define PF_HP_FAR_SHARED 0x00200000 +#define PF_HP_NEAR_SHARED 0x00400000 +#define PF_HP_CODE 0x01000000 +#define PF_HP_MODIFY 0x02000000 +#define PF_HP_LAZYSWAP 0x04000000 +#define PF_HP_SBP 0x08000000 /* IA-64 specific declarations. */ /* Processor specific flags for the Ehdr e_flags field. */ -#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */ -#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */ -#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */ +#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */ +#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */ +#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */ /* Processor specific values for the Phdr p_type field. */ -#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */ -#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */ +#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */ +#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */ /* Processor specific flags for the Phdr p_flags field. */ -#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */ +#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */ /* Processor specific values for the Shdr sh_type field. */ -#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */ -#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */ +#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */ +#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */ /* Processor specific flags for the Shdr sh_flags field. */ -#define SHF_IA_64_SHORT 0x10000000 /* section near gp */ -#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */ +#define SHF_IA_64_SHORT 0x10000000 /* section near gp */ +#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */ /* Processor specific values for the Dyn d_tag field. */ -#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0) -#define DT_IA_64_NUM 1 +#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0) +#define DT_IA_64_NUM 1 /* IA-64 relocations. */ -#define R_IA64_NONE 0x00 /* none */ -#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */ -#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */ -#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */ -#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */ -#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */ -#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */ -#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */ -#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */ -#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */ -#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */ -#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */ -#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */ -#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */ -#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */ -#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */ -#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */ -#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */ -#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */ -#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */ -#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */ -#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */ -#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */ -#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */ -#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */ -#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */ -#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */ -#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */ -#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */ -#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */ -#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */ -#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */ -#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */ -#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */ -#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */ -#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */ -#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */ -#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */ -#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */ -#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */ -#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */ -#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */ -#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */ -#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */ -#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */ -#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */ -#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */ -#define R_IA64_REL32MSB 0x6c /* data 4 + REL */ -#define R_IA64_REL32LSB 0x6d /* data 4 + REL */ -#define R_IA64_REL64MSB 0x6e /* data 8 + REL */ -#define R_IA64_REL64LSB 0x6f /* data 8 + REL */ -#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */ -#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */ -#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */ -#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */ -#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */ -#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */ -#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */ -#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */ -#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */ -#define R_IA64_COPY 0x84 /* copy relocation */ -#define R_IA64_SUB 0x85 /* Addend and symbol difference */ -#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */ -#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */ -#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */ -#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */ -#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */ -#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */ -#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */ -#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */ -#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */ -#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */ -#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */ -#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */ -#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */ -#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */ -#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */ -#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */ -#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */ -#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */ -#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */ +#define R_IA64_NONE 0x00 /* none */ +#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */ +#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */ +#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */ +#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */ +#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */ +#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */ +#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */ +#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */ +#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */ +#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */ +#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */ +#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */ +#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */ +#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */ +#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */ +#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */ +#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */ +#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */ +#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */ +#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */ +#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */ +#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */ +#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */ +#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */ +#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */ +#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */ +#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */ +#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */ +#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */ +#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */ +#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */ +#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */ +#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */ +#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */ +#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */ +#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */ +#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */ +#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */ +#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */ +#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */ +#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */ +#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */ +#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */ +#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */ +#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */ +#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */ +#define R_IA64_REL32MSB 0x6c /* data 4 + REL */ +#define R_IA64_REL32LSB 0x6d /* data 4 + REL */ +#define R_IA64_REL64MSB 0x6e /* data 8 + REL */ +#define R_IA64_REL64LSB 0x6f /* data 8 + REL */ +#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */ +#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */ +#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */ +#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */ +#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */ +#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */ +#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */ +#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */ +#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */ +#define R_IA64_COPY 0x84 /* copy relocation */ +#define R_IA64_SUB 0x85 /* Addend and symbol difference */ +#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */ +#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */ +#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */ +#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */ +#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */ +#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */ +#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */ +#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */ +#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */ +#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */ +#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */ +#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */ +#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */ +#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */ +#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */ +#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */ +#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */ +#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */ +#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */ /* RISC-V relocations. */ #define R_RISCV_NONE 0 @@ -1421,47 +1421,47 @@ typedef struct { #define EF_RISCV_TSO 0x0010 typedef struct elf32_rel { - Elf32_Addr r_offset; - Elf32_Word r_info; + Elf32_Addr r_offset; + Elf32_Word r_info; } Elf32_Rel; typedef struct elf64_rel { - Elf64_Addr r_offset; /* Location at which to apply the action */ - Elf64_Xword r_info; /* index and type of relocation */ + Elf64_Addr r_offset; /* Location at which to apply the action */ + Elf64_Xword r_info; /* index and type of relocation */ } Elf64_Rel; typedef struct elf32_rela{ - Elf32_Addr r_offset; - Elf32_Word r_info; - Elf32_Sword r_addend; + Elf32_Addr r_offset; + Elf32_Word r_info; + Elf32_Sword r_addend; } Elf32_Rela; typedef struct elf64_rela { - Elf64_Addr r_offset; /* Location at which to apply the action */ - Elf64_Xword r_info; /* index and type of relocation */ - Elf64_Sxword r_addend; /* Constant addend used to compute value */ + Elf64_Addr r_offset; /* Location at which to apply the action */ + Elf64_Xword r_info; /* index and type of relocation */ + Elf64_Sxword r_addend; /* Constant addend used to compute value */ } Elf64_Rela; typedef struct elf32_sym{ - Elf32_Word st_name; - Elf32_Addr st_value; - Elf32_Word st_size; - unsigned char st_info; - unsigned char st_other; - Elf32_Half st_shndx; + Elf32_Word st_name; + Elf32_Addr st_value; + Elf32_Word st_size; + unsigned char st_info; + unsigned char st_other; + Elf32_Half st_shndx; } Elf32_Sym; typedef struct elf64_sym { - Elf64_Word st_name; /* Symbol name, index in string tbl */ - unsigned char st_info; /* Type and binding attributes */ - unsigned char st_other; /* No defined meaning, 0 */ - Elf64_Half st_shndx; /* Associated section index */ - Elf64_Addr st_value; /* Value of the symbol */ - Elf64_Xword st_size; /* Associated symbol size */ + Elf64_Word st_name; /* Symbol name, index in string tbl */ + unsigned char st_info; /* Type and binding attributes */ + unsigned char st_other; /* No defined meaning, 0 */ + Elf64_Half st_shndx; /* Associated section index */ + Elf64_Addr st_value; /* Value of the symbol */ + Elf64_Xword st_size; /* Associated symbol size */ } Elf64_Sym; -#define EI_NIDENT 16 +#define EI_NIDENT 16 /* Special value for e_phnum. This indicates that the real number of program headers is too large to fit into e_phnum. Instead the real @@ -1469,30 +1469,30 @@ typedef struct elf64_sym { #define PN_XNUM 0xffff typedef struct elf32_hdr{ - unsigned char e_ident[EI_NIDENT]; - Elf32_Half e_type; - Elf32_Half e_machine; - Elf32_Word e_version; - Elf32_Addr e_entry; /* Entry point */ - Elf32_Off e_phoff; - Elf32_Off e_shoff; - Elf32_Word e_flags; - Elf32_Half e_ehsize; - Elf32_Half e_phentsize; - Elf32_Half e_phnum; - Elf32_Half e_shentsize; - Elf32_Half e_shnum; - Elf32_Half e_shstrndx; + unsigned char e_ident[EI_NIDENT]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; /* Entry point */ + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; } Elf32_Ehdr; typedef struct elf64_hdr { - unsigned char e_ident[16]; /* ELF "magic number" */ + unsigned char e_ident[16]; /* ELF "magic number" */ Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; - Elf64_Addr e_entry; /* Entry point virtual address */ - Elf64_Off e_phoff; /* Program header table file offset */ - Elf64_Off e_shoff; /* Section header table file offset */ + Elf64_Addr e_entry; /* Entry point virtual address */ + Elf64_Off e_phoff; /* Program header table file offset */ + Elf64_Off e_shoff; /* Section header table file offset */ Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; @@ -1504,107 +1504,107 @@ typedef struct elf64_hdr { /* These constants define the permissions on sections in the program header, p_flags. */ -#define PF_R 0x4 -#define PF_W 0x2 -#define PF_X 0x1 +#define PF_R 0x4 +#define PF_W 0x2 +#define PF_X 0x1 typedef struct elf32_phdr{ - Elf32_Word p_type; - Elf32_Off p_offset; - Elf32_Addr p_vaddr; - Elf32_Addr p_paddr; - Elf32_Word p_filesz; - Elf32_Word p_memsz; - Elf32_Word p_flags; - Elf32_Word p_align; + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; } Elf32_Phdr; typedef struct elf64_phdr { Elf64_Word p_type; Elf64_Word p_flags; - Elf64_Off p_offset; /* Segment file offset */ - Elf64_Addr p_vaddr; /* Segment virtual address */ - Elf64_Addr p_paddr; /* Segment physical address */ - Elf64_Xword p_filesz; /* Segment size in file */ - Elf64_Xword p_memsz; /* Segment size in memory */ - Elf64_Xword p_align; /* Segment alignment, file & memory */ + Elf64_Off p_offset; /* Segment file offset */ + Elf64_Addr p_vaddr; /* Segment virtual address */ + Elf64_Addr p_paddr; /* Segment physical address */ + Elf64_Xword p_filesz; /* Segment size in file */ + Elf64_Xword p_memsz; /* Segment size in memory */ + Elf64_Xword p_align; /* Segment alignment, file & memory */ } Elf64_Phdr; /* sh_type */ -#define SHT_NULL 0 -#define SHT_PROGBITS 1 -#define SHT_SYMTAB 2 -#define SHT_STRTAB 3 -#define SHT_RELA 4 -#define SHT_HASH 5 -#define SHT_DYNAMIC 6 -#define SHT_NOTE 7 -#define SHT_NOBITS 8 -#define SHT_REL 9 -#define SHT_SHLIB 10 -#define SHT_DYNSYM 11 -#define SHT_NUM 12 -#define SHT_LOPROC 0x70000000 -#define SHT_HIPROC 0x7fffffff -#define SHT_LOUSER 0x80000000 -#define SHT_HIUSER 0xffffffff -#define SHT_MIPS_LIST 0x70000000 -#define SHT_MIPS_CONFLICT 0x70000002 -#define SHT_MIPS_GPTAB 0x70000003 -#define SHT_MIPS_UCODE 0x70000004 +#define SHT_NULL 0 +#define SHT_PROGBITS 1 +#define SHT_SYMTAB 2 +#define SHT_STRTAB 3 +#define SHT_RELA 4 +#define SHT_HASH 5 +#define SHT_DYNAMIC 6 +#define SHT_NOTE 7 +#define SHT_NOBITS 8 +#define SHT_REL 9 +#define SHT_SHLIB 10 +#define SHT_DYNSYM 11 +#define SHT_NUM 12 +#define SHT_LOPROC 0x70000000 +#define SHT_HIPROC 0x7fffffff +#define SHT_LOUSER 0x80000000 +#define SHT_HIUSER 0xffffffff +#define SHT_MIPS_LIST 0x70000000 +#define SHT_MIPS_CONFLICT 0x70000002 +#define SHT_MIPS_GPTAB 0x70000003 +#define SHT_MIPS_UCODE 0x70000004 /* sh_flags */ -#define SHF_WRITE 0x1 -#define SHF_ALLOC 0x2 -#define SHF_EXECINSTR 0x4 -#define SHF_MASKPROC 0xf0000000 -#define SHF_MIPS_GPREL 0x10000000 +#define SHF_WRITE 0x1 +#define SHF_ALLOC 0x2 +#define SHF_EXECINSTR 0x4 +#define SHF_MASKPROC 0xf0000000 +#define SHF_MIPS_GPREL 0x10000000 /* special section indexes */ -#define SHN_UNDEF 0 -#define SHN_LORESERVE 0xff00 -#define SHN_LOPROC 0xff00 -#define SHN_HIPROC 0xff1f -#define SHN_ABS 0xfff1 -#define SHN_COMMON 0xfff2 -#define SHN_HIRESERVE 0xffff -#define SHN_MIPS_ACCOMON 0xff00 +#define SHN_UNDEF 0 +#define SHN_LORESERVE 0xff00 +#define SHN_LOPROC 0xff00 +#define SHN_HIPROC 0xff1f +#define SHN_ABS 0xfff1 +#define SHN_COMMON 0xfff2 +#define SHN_HIRESERVE 0xffff +#define SHN_MIPS_ACCOMON 0xff00 typedef struct elf32_shdr { - Elf32_Word sh_name; - Elf32_Word sh_type; - Elf32_Word sh_flags; - Elf32_Addr sh_addr; - Elf32_Off sh_offset; - Elf32_Word sh_size; - Elf32_Word sh_link; - Elf32_Word sh_info; - Elf32_Word sh_addralign; - Elf32_Word sh_entsize; + Elf32_Word sh_name; + Elf32_Word sh_type; + Elf32_Word sh_flags; + Elf32_Addr sh_addr; + Elf32_Off sh_offset; + Elf32_Word sh_size; + Elf32_Word sh_link; + Elf32_Word sh_info; + Elf32_Word sh_addralign; + Elf32_Word sh_entsize; } Elf32_Shdr; typedef struct elf64_shdr { - Elf64_Word sh_name; /* Section name, index in string tbl */ - Elf64_Word sh_type; /* Type of section */ - Elf64_Xword sh_flags; /* Miscellaneous section attributes */ - Elf64_Addr sh_addr; /* Section virtual addr at execution */ - Elf64_Off sh_offset; /* Section file offset */ - Elf64_Xword sh_size; /* Size of section in bytes */ - Elf64_Word sh_link; /* Index of another section */ - Elf64_Word sh_info; /* Additional section information */ - Elf64_Xword sh_addralign; /* Section alignment */ - Elf64_Xword sh_entsize; /* Entry size if section holds table */ + Elf64_Word sh_name; /* Section name, index in string tbl */ + Elf64_Word sh_type; /* Type of section */ + Elf64_Xword sh_flags; /* Miscellaneous section attributes */ + Elf64_Addr sh_addr; /* Section virtual addr at execution */ + Elf64_Off sh_offset; /* Section file offset */ + Elf64_Xword sh_size; /* Size of section in bytes */ + Elf64_Word sh_link; /* Index of another section */ + Elf64_Word sh_info; /* Additional section information */ + Elf64_Xword sh_addralign; /* Section alignment */ + Elf64_Xword sh_entsize; /* Entry size if section holds table */ } Elf64_Shdr; -#define EI_MAG0 0 /* e_ident[] indexes */ -#define EI_MAG1 1 -#define EI_MAG2 2 -#define EI_MAG3 3 -#define EI_CLASS 4 -#define EI_DATA 5 -#define EI_VERSION 6 -#define EI_OSABI 7 -#define EI_PAD 8 +#define EI_MAG0 0 /* e_ident[] indexes */ +#define EI_MAG1 1 +#define EI_MAG2 2 +#define EI_MAG3 3 +#define EI_CLASS 4 +#define EI_DATA 5 +#define EI_VERSION 6 +#define EI_OSABI 7 +#define EI_PAD 8 #define ELFOSABI_NONE 0 /* UNIX System V ABI */ #define ELFOSABI_SYSV 0 /* Alias. */ @@ -1619,56 +1619,57 @@ typedef struct elf64_shdr { #define ELFOSABI_MODESTO 11 /* Novell Modesto. */ #define ELFOSABI_OPENBSD 12 /* OpenBSD. */ #define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC */ +#define ELFOSABI_XTENSA_FDPIC 65 /* Xtensa FDPIC */ #define ELFOSABI_ARM 97 /* ARM */ #define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */ -#define ELFMAG0 0x7f /* EI_MAG */ -#define ELFMAG1 'E' -#define ELFMAG2 'L' -#define ELFMAG3 'F' -#define ELFMAG "\177ELF" -#define SELFMAG 4 +#define ELFMAG0 0x7f /* EI_MAG */ +#define ELFMAG1 'E' +#define ELFMAG2 'L' +#define ELFMAG3 'F' +#define ELFMAG "\177ELF" +#define SELFMAG 4 -#define ELFCLASSNONE 0 /* EI_CLASS */ -#define ELFCLASS32 1 -#define ELFCLASS64 2 -#define ELFCLASSNUM 3 +#define ELFCLASSNONE 0 /* EI_CLASS */ +#define ELFCLASS32 1 +#define ELFCLASS64 2 +#define ELFCLASSNUM 3 -#define ELFDATANONE 0 /* e_ident[EI_DATA] */ -#define ELFDATA2LSB 1 -#define ELFDATA2MSB 2 +#define ELFDATANONE 0 /* e_ident[EI_DATA] */ +#define ELFDATA2LSB 1 +#define ELFDATA2MSB 2 -#define EV_NONE 0 /* e_version, EI_VERSION */ -#define EV_CURRENT 1 -#define EV_NUM 2 +#define EV_NONE 0 /* e_version, EI_VERSION */ +#define EV_CURRENT 1 +#define EV_NUM 2 /* Notes used in ET_CORE */ -#define NT_PRSTATUS 1 -#define NT_FPREGSET 2 -#define NT_PRFPREG 2 -#define NT_PRPSINFO 3 -#define NT_TASKSTRUCT 4 -#define NT_AUXV 6 -#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ -#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */ -#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */ -#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */ -#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */ -#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */ -#define NT_S390_PREFIX 0x305 /* s390 prefix register */ -#define NT_S390_CTRS 0x304 /* s390 control registers */ -#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */ -#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */ -#define NT_S390_TIMER 0x301 /* s390 timer register */ -#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ -#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ -#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ -#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ -#define NT_ARM_TLS 0x401 /* ARM TLS register */ -#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ -#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ -#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ -#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension regs */ +#define NT_PRSTATUS 1 +#define NT_FPREGSET 2 +#define NT_PRFPREG 2 +#define NT_PRPSINFO 3 +#define NT_TASKSTRUCT 4 +#define NT_AUXV 6 +#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ +#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */ +#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */ +#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */ +#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */ +#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */ +#define NT_S390_PREFIX 0x305 /* s390 prefix register */ +#define NT_S390_CTRS 0x304 /* s390 control registers */ +#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */ +#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */ +#define NT_S390_TIMER 0x301 /* s390 timer register */ +#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ +#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ +#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ +#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ +#define NT_ARM_TLS 0x401 /* ARM TLS register */ +#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ +#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ +#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ +#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension regs */ /* Defined note types for GNU systems. */ @@ -1701,16 +1702,16 @@ typedef struct elf64_shdr { /* Note header in a PT_NOTE section */ typedef struct elf32_note { - Elf32_Word n_namesz; /* Name size */ - Elf32_Word n_descsz; /* Content size */ - Elf32_Word n_type; /* Content type */ + Elf32_Word n_namesz; /* Name size */ + Elf32_Word n_descsz; /* Content size */ + Elf32_Word n_type; /* Content type */ } Elf32_Nhdr; /* Note header in a PT_NOTE section */ typedef struct elf64_note { - Elf64_Word n_namesz; /* Name size */ - Elf64_Word n_descsz; /* Content size */ - Elf64_Word n_type; /* Content type */ + Elf64_Word n_namesz; /* Name size */ + Elf64_Word n_descsz; /* Content size */ + Elf64_Word n_type; /* Content type */ } Elf64_Nhdr; @@ -1735,13 +1736,13 @@ struct elf32_fdpic_loadmap { #ifdef ELF_CLASS #if ELF_CLASS == ELFCLASS32 -#define elfhdr elf32_hdr -#define elf_phdr elf32_phdr -#define elf_note elf32_note -#define elf_shdr elf32_shdr -#define elf_sym elf32_sym -#define elf_addr_t Elf32_Off -#define elf_rela elf32_rela +#define elfhdr elf32_hdr +#define elf_phdr elf32_phdr +#define elf_note elf32_note +#define elf_shdr elf32_shdr +#define elf_sym elf32_sym +#define elf_addr_t Elf32_Off +#define elf_rela elf32_rela #ifdef ELF_USES_RELOCA # define ELF_RELOC Elf32_Rela @@ -1751,13 +1752,13 @@ struct elf32_fdpic_loadmap { #else -#define elfhdr elf64_hdr -#define elf_phdr elf64_phdr -#define elf_note elf64_note -#define elf_shdr elf64_shdr -#define elf_sym elf64_sym -#define elf_addr_t Elf64_Off -#define elf_rela elf64_rela +#define elfhdr elf64_hdr +#define elf_phdr elf64_phdr +#define elf_note elf64_note +#define elf_shdr elf64_shdr +#define elf_sym elf64_sym +#define elf_addr_t Elf64_Off +#define elf_rela elf64_rela #ifdef ELF_USES_RELOCA # define ELF_RELOC Elf64_Rela diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 2eb1176538..78d258af44 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -21,11 +21,12 @@ #include "exec/cpu-common.h" #include "exec/memory.h" +#include "exec/tswap.h" #include "qemu/thread.h" #include "hw/core/cpu.h" #include "qemu/rcu.h" -#define EXCP_INTERRUPT 0x10000 /* async interruption */ +#define EXCP_INTERRUPT 0x10000 /* async interruption */ #define EXCP_HLT 0x10001 /* hlt instruction reached */ #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ @@ -44,69 +45,6 @@ #define BSWAP_NEEDED #endif -#ifdef BSWAP_NEEDED - -static inline uint16_t tswap16(uint16_t s) -{ - return bswap16(s); -} - -static inline uint32_t tswap32(uint32_t s) -{ - return bswap32(s); -} - -static inline uint64_t tswap64(uint64_t s) -{ - return bswap64(s); -} - -static inline void tswap16s(uint16_t *s) -{ - *s = bswap16(*s); -} - -static inline void tswap32s(uint32_t *s) -{ - *s = bswap32(*s); -} - -static inline void tswap64s(uint64_t *s) -{ - *s = bswap64(*s); -} - -#else - -static inline uint16_t tswap16(uint16_t s) -{ - return s; -} - -static inline uint32_t tswap32(uint32_t s) -{ - return s; -} - -static inline uint64_t tswap64(uint64_t s) -{ - return s; -} - -static inline void tswap16s(uint16_t *s) -{ -} - -static inline void tswap32s(uint32_t *s) -{ -} - -static inline void tswap64s(uint64_t *s) -{ -} - -#endif - #if TARGET_LONG_SIZE == 4 #define tswapl(s) tswap32(s) #define tswapls(s) tswap32s((uint32_t *)(s)) @@ -146,12 +84,18 @@ static inline void tswap64s(uint64_t *s) #if defined(CONFIG_USER_ONLY) #include "exec/user/abitypes.h" +#include "exec/user/guest-base.h" -/* On some host systems the guest address space is reserved on the host. - * This allows the guest address space to be offset to a convenient location. - */ -extern uintptr_t guest_base; extern bool have_guest_base; + +/* + * If non-zero, the guest virtual address space is a contiguous subset + * of the host virtual address space, i.e. '-R reserved_va' is in effect + * either from the command-line or by default. The value is the last + * byte of the guest address space e.g. UINT32_MAX. + * + * If zero, the host and guest virtual address spaces are intermingled. + */ extern unsigned long reserved_va; /* @@ -171,7 +115,7 @@ extern unsigned long reserved_va; #define GUEST_ADDR_MAX_ \ ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \ UINT32_MAX : ~0ul) -#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_) +#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_) #else @@ -276,8 +220,8 @@ typedef int (*walk_memory_regions_fn)(void *, target_ulong, int walk_memory_regions(void *, walk_memory_regions_fn); int page_get_flags(target_ulong address); -void page_set_flags(target_ulong start, target_ulong end, int flags); -void page_reset_target_data(target_ulong start, target_ulong end); +void page_set_flags(target_ulong start, target_ulong last, int flags); +void page_reset_target_data(target_ulong start, target_ulong last); int page_check_range(target_ulong start, target_ulong len, int flags); /** diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 6feaa40ca7..e5a55ede5f 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -32,6 +32,7 @@ extern intptr_t qemu_host_page_mask; #define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size()) /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ +extern QemuMutex qemu_cpu_list_lock; void qemu_init_cpu_list(void); void cpu_list_lock(void); void cpu_list_unlock(void); @@ -163,8 +164,6 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, void *ptr, size_t len, bool is_write); /* vl.c */ -extern int singlestep; - -void list_cpus(const char *optarg); +void list_cpus(void); #endif /* CPU_COMMON_H */ diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index d5a4f30717..a6e0cf1812 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -36,9 +36,6 @@ #ifndef TARGET_LONG_BITS # error TARGET_LONG_BITS must be defined in cpu-param.h #endif -#ifndef NB_MMU_MODES -# error NB_MMU_MODES must be defined in cpu-param.h -#endif #ifndef TARGET_PHYS_ADDR_SPACE_BITS # error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h #endif @@ -54,28 +51,14 @@ # error TARGET_PAGE_BITS must be defined in cpu-param.h # endif #endif -#ifndef TARGET_TB_PCREL -# define TARGET_TB_PCREL 0 -#endif -#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) +#include "exec/target_long.h" -/* target_ulong is the type of a virtual address */ -#if TARGET_LONG_SIZE == 4 -typedef int32_t target_long; -typedef uint32_t target_ulong; -#define TARGET_FMT_lx "%08x" -#define TARGET_FMT_ld "%d" -#define TARGET_FMT_lu "%u" -#elif TARGET_LONG_SIZE == 8 -typedef int64_t target_long; -typedef uint64_t target_ulong; -#define TARGET_FMT_lx "%016" PRIx64 -#define TARGET_FMT_ld "%" PRId64 -#define TARGET_FMT_lu "%" PRIu64 -#else -#error TARGET_LONG_SIZE undefined -#endif +/* + * Fix the number of mmu modes to 16, which is also the maximum + * supported by the softmmu tlb api. + */ +#define NB_MMU_MODES 16 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) @@ -128,8 +111,11 @@ typedef struct CPUTLBEntry { use the corresponding iotlb value. */ uintptr_t addend; }; - /* padding to get a power of two size */ - uint8_t dummy[1 << CPU_TLB_ENTRY_BITS]; + /* + * Padding to get a power of two size, as well as index + * access to addr_{read,write,code}. + */ + target_ulong addr_idx[(1 << CPU_TLB_ENTRY_BITS) / TARGET_LONG_SIZE]; }; } CPUTLBEntry; diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h index 09b55cc0ee..5939688f69 100644 --- a/include/exec/cpu_ldst.h +++ b/include/exec/cpu_ldst.h @@ -207,43 +207,21 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, int mmu_idx, uintptr_t ra); uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); -uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr, - MemOpIdx oi, uintptr_t ra); -uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr, - MemOpIdx oi, uintptr_t ra); -uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr, - MemOpIdx oi, uintptr_t ra); -uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr, - MemOpIdx oi, uintptr_t ra); -uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr, - MemOpIdx oi, uintptr_t ra); -uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr, - MemOpIdx oi, uintptr_t ra); - -Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra); -Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra); +uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); +uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); +uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); +Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra); void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val, MemOpIdx oi, uintptr_t ra); -void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, - MemOpIdx oi, uintptr_t ra); -void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, - MemOpIdx oi, uintptr_t ra); -void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, - MemOpIdx oi, uintptr_t ra); -void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, - MemOpIdx oi, uintptr_t ra); -void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, - MemOpIdx oi, uintptr_t ra); -void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, - MemOpIdx oi, uintptr_t ra); - -void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val, - MemOpIdx oi, uintptr_t ra); -void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, - MemOpIdx oi, uintptr_t ra); +void cpu_stw_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stl_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, + MemOpIdx oi, uintptr_t ra); uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, uint32_t cmpv, uint32_t newv, @@ -322,15 +300,6 @@ Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, Int128 cmpv, Int128 newv, MemOpIdx oi, uintptr_t retaddr); -Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, - MemOpIdx oi, uintptr_t retaddr); -void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, - MemOpIdx oi, uintptr_t retaddr); - #if defined(CONFIG_USER_ONLY) extern __thread uintptr_t helper_retaddr; @@ -360,13 +329,29 @@ static inline void clear_helper_retaddr(void) /* Needed for TCG_OVERSIZED_GUEST */ #include "tcg/tcg.h" +static inline target_ulong tlb_read_idx(const CPUTLBEntry *entry, + MMUAccessType access_type) +{ + /* Do not rearrange the CPUTLBEntry structure members. */ + QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) != + MMU_DATA_LOAD * TARGET_LONG_SIZE); + QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) != + MMU_DATA_STORE * TARGET_LONG_SIZE); + QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) != + MMU_INST_FETCH * TARGET_LONG_SIZE); + + const target_ulong *ptr = &entry->addr_idx[access_type]; +#if TCG_OVERSIZED_GUEST + return *ptr; +#else + /* ofs might correspond to .addr_write, so use qatomic_read */ + return qatomic_read(ptr); +#endif +} + static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry) { -#if TCG_OVERSIZED_GUEST - return entry->addr_write; -#else - return qatomic_read(&entry->addr_write); -#endif + return tlb_read_idx(entry, MMU_DATA_STORE); } /* Find the TLB index corresponding to the mmu_idx + address pair. */ @@ -400,9 +385,6 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, # define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra # define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra # define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra -# define cpu_ldw_mmu cpu_ldw_be_mmu -# define cpu_ldl_mmu cpu_ldl_be_mmu -# define cpu_ldq_mmu cpu_ldq_be_mmu # define cpu_stw_data cpu_stw_be_data # define cpu_stl_data cpu_stl_be_data # define cpu_stq_data cpu_stq_be_data @@ -412,9 +394,6 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, # define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra # define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra # define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra -# define cpu_stw_mmu cpu_stw_be_mmu -# define cpu_stl_mmu cpu_stl_be_mmu -# define cpu_stq_mmu cpu_stq_be_mmu #else # define cpu_lduw_data cpu_lduw_le_data # define cpu_ldsw_data cpu_ldsw_le_data @@ -428,9 +407,6 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, # define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra # define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra # define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra -# define cpu_ldw_mmu cpu_ldw_le_mmu -# define cpu_ldl_mmu cpu_ldl_le_mmu -# define cpu_ldq_mmu cpu_ldq_le_mmu # define cpu_stw_data cpu_stw_le_data # define cpu_stl_data cpu_stl_le_data # define cpu_stq_data cpu_stq_le_data @@ -440,11 +416,17 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, # define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra # define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra # define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra -# define cpu_stw_mmu cpu_stw_le_mmu -# define cpu_stl_mmu cpu_stl_le_mmu -# define cpu_stq_mmu cpu_stq_le_mmu #endif +uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra); +uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra); +uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra); +uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra); + uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr); uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr); diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 0e36f4d063..4d2b151986 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -27,9 +27,6 @@ #include "qemu/interval-tree.h" #include "qemu/clang-tsa.h" -/* allow to see translation results - the slowdown should be negligible, so we leave it */ -#define DEBUG_DISAS - /* Page tracking code uses ram addresses in system mode, and virtual addresses in userspace mode. Define tb_page_addr_t to be an appropriate type. */ @@ -447,6 +444,7 @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size, * probe_access_flags: * @env: CPUArchState * @addr: guest virtual address to look up + * @size: size of the access * @access_type: read, write or execute permission * @mmu_idx: MMU index to use for lookup * @nonfault: suppress the fault @@ -461,7 +459,7 @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size, * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags. * For simplicity, all "mmio-like" flags are folded to TLB_MMIO. */ -int probe_access_flags(CPUArchState *env, target_ulong addr, +int probe_access_flags(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t retaddr); @@ -474,7 +472,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, * and must be consumed or copied immediately, before any further * access or changes to TLB @mmu_idx. */ -int probe_access_full(CPUArchState *env, target_ulong addr, +int probe_access_full(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, CPUTLBEntryFull **pfull, uintptr_t retaddr); @@ -505,22 +503,20 @@ struct tb_tc { }; struct TranslationBlock { -#if !TARGET_TB_PCREL /* * Guest PC corresponding to this block. This must be the true * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or * privilege, must store those bits elsewhere. * - * If TARGET_TB_PCREL, the opcodes for the TranslationBlock are - * written such that the TB is associated only with the physical - * page and may be run in any virtual address context. In this case, - * PC must always be taken from ENV in a target-specific manner. + * If CF_PCREL, the opcodes for the TranslationBlock are written + * such that the TB is associated only with the physical page and + * may be run in any virtual address context. In this case, PC + * must always be taken from ENV in a target-specific manner. * Unwind information is taken as offsets from the page, to be * deposited into the "current" PC. */ target_ulong pc; -#endif /* * Target-specific data associated with the TranslationBlock, e.g.: @@ -545,6 +541,7 @@ struct TranslationBlock { #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ #define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */ +#define CF_PCREL 0x00200000 /* Opcodes in TB are PC-relative */ #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ #define CF_CLUSTER_SHIFT 24 @@ -613,16 +610,6 @@ struct TranslationBlock { uintptr_t jmp_dest[2]; }; -/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */ -static inline target_ulong tb_pc(const TranslationBlock *tb) -{ -#if TARGET_TB_PCREL - qemu_build_not_reached(); -#else - return tb->pc; -#endif -} - /* Hide the qatomic_read to make code a little easier on the eyes */ static inline uint32_t tb_cflags(const TranslationBlock *tb) { @@ -687,9 +674,8 @@ void tb_invalidate_phys_addr(target_ulong addr); #else void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); #endif -void tb_flush(CPUState *cpu); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end); +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); /* GETPC is the true target of the return instruction that we'll execute. */ diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h index f667014888..7d743fe1e9 100644 --- a/include/exec/gdbstub.h +++ b/include/exec/gdbstub.h @@ -10,122 +10,7 @@ #define GDB_WATCHPOINT_READ 3 #define GDB_WATCHPOINT_ACCESS 4 -/* For gdb file i/o remote protocol open flags. */ -#define GDB_O_RDONLY 0 -#define GDB_O_WRONLY 1 -#define GDB_O_RDWR 2 -#define GDB_O_APPEND 8 -#define GDB_O_CREAT 0x200 -#define GDB_O_TRUNC 0x400 -#define GDB_O_EXCL 0x800 -/* For gdb file i/o remote protocol errno values */ -#define GDB_EPERM 1 -#define GDB_ENOENT 2 -#define GDB_EINTR 4 -#define GDB_EBADF 9 -#define GDB_EACCES 13 -#define GDB_EFAULT 14 -#define GDB_EBUSY 16 -#define GDB_EEXIST 17 -#define GDB_ENODEV 19 -#define GDB_ENOTDIR 20 -#define GDB_EISDIR 21 -#define GDB_EINVAL 22 -#define GDB_ENFILE 23 -#define GDB_EMFILE 24 -#define GDB_EFBIG 27 -#define GDB_ENOSPC 28 -#define GDB_ESPIPE 29 -#define GDB_EROFS 30 -#define GDB_ENAMETOOLONG 91 -#define GDB_EUNKNOWN 9999 - -/* For gdb file i/o remote protocol lseek whence. */ -#define GDB_SEEK_SET 0 -#define GDB_SEEK_CUR 1 -#define GDB_SEEK_END 2 - -/* For gdb file i/o stat/fstat. */ -typedef uint32_t gdb_mode_t; -typedef uint32_t gdb_time_t; - -struct gdb_stat { - uint32_t gdb_st_dev; /* device */ - uint32_t gdb_st_ino; /* inode */ - gdb_mode_t gdb_st_mode; /* protection */ - uint32_t gdb_st_nlink; /* number of hard links */ - uint32_t gdb_st_uid; /* user ID of owner */ - uint32_t gdb_st_gid; /* group ID of owner */ - uint32_t gdb_st_rdev; /* device type (if inode device) */ - uint64_t gdb_st_size; /* total size, in bytes */ - uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */ - uint64_t gdb_st_blocks; /* number of blocks allocated */ - gdb_time_t gdb_st_atime; /* time of last access */ - gdb_time_t gdb_st_mtime; /* time of last modification */ - gdb_time_t gdb_st_ctime; /* time of last change */ -} QEMU_PACKED; - -struct gdb_timeval { - gdb_time_t tv_sec; /* second */ - uint64_t tv_usec; /* microsecond */ -} QEMU_PACKED; - -#ifdef NEED_CPU_H -#include "cpu.h" - -typedef void (*gdb_syscall_complete_cb)(CPUState *cpu, uint64_t ret, int err); - -/** - * gdb_do_syscall: - * @cb: function to call when the system call has completed - * @fmt: gdb syscall format string - * ...: list of arguments to interpolate into @fmt - * - * Send a GDB syscall request. This function will return immediately; - * the callback function will be called later when the remote system - * call has completed. - * - * @fmt should be in the 'call-id,parameter,parameter...' format documented - * for the F request packet in the GDB remote protocol. A limited set of - * printf-style format specifiers is supported: - * %x - target_ulong argument printed in hex - * %lx - 64-bit argument printed in hex - * %s - string pointer (target_ulong) and length (int) pair - */ -void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...); -/** - * gdb_do_syscallv: - * @cb: function to call when the system call has completed - * @fmt: gdb syscall format string - * @va: arguments to interpolate into @fmt - * - * As gdb_do_syscall, but taking a va_list rather than a variable - * argument list. - */ -void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va); -int use_gdb_syscalls(void); - -#ifdef CONFIG_USER_ONLY -/** - * gdb_handlesig: yield control to gdb - * @cpu: CPU - * @sig: if non-zero, the signal number which caused us to stop - * - * This function yields control to gdb, when a user-mode-only target - * needs to stop execution. If @sig is non-zero, then we will send a - * stop packet to tell gdb that we have stopped because of this signal. - * - * This function will block (handling protocol requests from gdb) - * until gdb tells us to continue target execution. When it does - * return, the return value is a signal to deliver to the target, - * or 0 if no signal should be delivered, ie the signal that caused - * us to stop should be ignored. - */ -int gdb_handlesig(CPUState *, int); -void gdb_signalled(CPUArchState *, int); -void gdbserver_fork(CPUState *); -#endif /* Get or set a register. Returns the size of the register. */ typedef int (*gdb_get_reg_cb)(CPUArchState *env, GByteArray *buf, int reg); typedef int (*gdb_set_reg_cb)(CPUArchState *env, uint8_t *buf, int reg); @@ -133,89 +18,6 @@ void gdb_register_coprocessor(CPUState *cpu, gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg, int num_regs, const char *xml, int g_pos); -/* - * The GDB remote protocol transfers values in target byte order. As - * the gdbstub may be batching up several register values we always - * append to the array. - */ - -static inline int gdb_get_reg8(GByteArray *buf, uint8_t val) -{ - g_byte_array_append(buf, &val, 1); - return 1; -} - -static inline int gdb_get_reg16(GByteArray *buf, uint16_t val) -{ - uint16_t to_word = tswap16(val); - g_byte_array_append(buf, (uint8_t *) &to_word, 2); - return 2; -} - -static inline int gdb_get_reg32(GByteArray *buf, uint32_t val) -{ - uint32_t to_long = tswap32(val); - g_byte_array_append(buf, (uint8_t *) &to_long, 4); - return 4; -} - -static inline int gdb_get_reg64(GByteArray *buf, uint64_t val) -{ - uint64_t to_quad = tswap64(val); - g_byte_array_append(buf, (uint8_t *) &to_quad, 8); - return 8; -} - -static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi, - uint64_t val_lo) -{ - uint64_t to_quad; -#if TARGET_BIG_ENDIAN - to_quad = tswap64(val_hi); - g_byte_array_append(buf, (uint8_t *) &to_quad, 8); - to_quad = tswap64(val_lo); - g_byte_array_append(buf, (uint8_t *) &to_quad, 8); -#else - to_quad = tswap64(val_lo); - g_byte_array_append(buf, (uint8_t *) &to_quad, 8); - to_quad = tswap64(val_hi); - g_byte_array_append(buf, (uint8_t *) &to_quad, 8); -#endif - return 16; -} - -static inline int gdb_get_zeroes(GByteArray *array, size_t len) -{ - guint oldlen = array->len; - g_byte_array_set_size(array, oldlen + len); - memset(array->data + oldlen, 0, len); - - return len; -} - -/** - * gdb_get_reg_ptr: get pointer to start of last element - * @len: length of element - * - * This is a helper function to extract the pointer to the last - * element for additional processing. Some front-ends do additional - * dynamic swapping of the elements based on CPU state. - */ -static inline uint8_t * gdb_get_reg_ptr(GByteArray *buf, int len) -{ - return buf->data + buf->len - len; -} - -#if TARGET_LONG_BITS == 64 -#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val) -#define ldtul_p(addr) ldq_p(addr) -#else -#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val) -#define ldtul_p(addr) ldl_p(addr) -#endif - -#endif /* NEED_CPU_H */ - /** * gdbserver_start: start the gdb server * @port_or_device: connection spec for gdb @@ -226,16 +28,6 @@ static inline uint8_t * gdb_get_reg_ptr(GByteArray *buf, int len) */ int gdbserver_start(const char *port_or_device); -/** - * gdb_exit: exit gdb session, reporting inferior status - * @code: exit code reported - * - * This closes the session and sends a final packet to GDB reporting - * the exit status of the program. It also cleans up any connections - * detritus before returning. - */ -void gdb_exit(int code); - void gdb_set_stop_cpu(CPUState *cpu); /** diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h index c57204ddad..f6de79a6b4 100644 --- a/include/exec/gen-icount.h +++ b/include/exec/gen-icount.h @@ -2,7 +2,6 @@ #define GEN_ICOUNT_H #include "exec/exec-all.h" -#include "qemu/timer.h" /* Helpers for instruction counting code generation. */ @@ -10,22 +9,14 @@ static TCGOp *icount_start_insn; static inline void gen_io_start(void) { - TCGv_i32 tmp = tcg_const_i32(1); - tcg_gen_st_i32(tmp, cpu_env, + tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, offsetof(ArchCPU, parent_obj.can_do_io) - offsetof(ArchCPU, env)); - tcg_temp_free_i32(tmp); } static inline void gen_tb_start(const TranslationBlock *tb) { - TCGv_i32 count; - - if (tb_cflags(tb) & CF_USE_ICOUNT) { - count = tcg_temp_local_new_i32(); - } else { - count = tcg_temp_new_i32(); - } + TCGv_i32 count = tcg_temp_new_i32(); tcg_gen_ld_i32(count, cpu_env, offsetof(ArchCPU, neg.icount_decr.u32) - @@ -70,8 +61,6 @@ static inline void gen_tb_start(const TranslationBlock *tb) offsetof(ArchCPU, parent_obj.can_do_io) - offsetof(ArchCPU, env)); } - - tcg_temp_free_i32(count); } static inline void gen_tb_end(const TranslationBlock *tb, int num_insns) diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h index b8d1140dc7..f863a6ef5d 100644 --- a/include/exec/helper-head.h +++ b/include/exec/helper-head.h @@ -18,6 +18,8 @@ #ifndef EXEC_HELPER_HEAD_H #define EXEC_HELPER_HEAD_H +#include "fpu/softfloat-types.h" + #define HELPER(name) glue(helper_, name) /* Some types that make sense in C, but not for TCG. */ diff --git a/include/exec/memop.h b/include/exec/memop.h index 25d027434a..a86dc6743a 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -47,8 +47,6 @@ typedef enum MemOp { * MO_UNALN accesses are never checked for alignment. * MO_ALIGN accesses will result in a call to the CPU's * do_unaligned_access hook if the guest address is not aligned. - * The default depends on whether the target CPU defines - * TARGET_ALIGNED_ONLY. * * Some architectures (e.g. ARMv8) need the address which is aligned * to a size more than the size of the memory access. @@ -65,21 +63,51 @@ typedef enum MemOp { */ MO_ASHIFT = 5, MO_AMASK = 0x7 << MO_ASHIFT, -#ifdef NEED_CPU_H -#ifdef TARGET_ALIGNED_ONLY - MO_ALIGN = 0, - MO_UNALN = MO_AMASK, -#else - MO_ALIGN = MO_AMASK, - MO_UNALN = 0, -#endif -#endif + MO_UNALN = 0, MO_ALIGN_2 = 1 << MO_ASHIFT, MO_ALIGN_4 = 2 << MO_ASHIFT, MO_ALIGN_8 = 3 << MO_ASHIFT, MO_ALIGN_16 = 4 << MO_ASHIFT, MO_ALIGN_32 = 5 << MO_ASHIFT, MO_ALIGN_64 = 6 << MO_ASHIFT, + MO_ALIGN = MO_AMASK, + + /* + * MO_ATOM_* describes the atomicity requirements of the operation: + * MO_ATOM_IFALIGN: the operation must be single-copy atomic if it + * is aligned; if unaligned there is no atomicity. + * MO_ATOM_IFALIGN_PAIR: the entire operation may be considered to + * be a pair of half-sized operations which are packed together + * for convenience, with single-copy atomicity on each half if + * the half is aligned. + * This is the atomicity e.g. of Arm pre-FEAT_LSE2 LDP. + * MO_ATOM_WITHIN16: the operation is single-copy atomic, even if it + * is unaligned, so long as it does not cross a 16-byte boundary; + * if it crosses a 16-byte boundary there is no atomicity. + * This is the atomicity e.g. of Arm FEAT_LSE2 LDR. + * MO_ATOM_WITHIN16_PAIR: the entire operation is single-copy atomic, + * if it happens to be within a 16-byte boundary, otherwise it + * devolves to a pair of half-sized MO_ATOM_WITHIN16 operations. + * Depending on alignment, one or both will be single-copy atomic. + * This is the atomicity e.g. of Arm FEAT_LSE2 LDP. + * MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts + * by the alignment. E.g. if the address is 0 mod 4, then each + * 4-byte subobject is single-copy atomic. + * This is the atomicity e.g. of IBM Power. + * MO_ATOM_NONE: the operation has no atomicity requirements. + * + * Note the default (i.e. 0) value is single-copy atomic to the + * size of the operation, if aligned. This retains the behaviour + * from before this field was introduced. + */ + MO_ATOM_SHIFT = 8, + MO_ATOM_IFALIGN = 0 << MO_ATOM_SHIFT, + MO_ATOM_IFALIGN_PAIR = 1 << MO_ATOM_SHIFT, + MO_ATOM_WITHIN16 = 2 << MO_ATOM_SHIFT, + MO_ATOM_WITHIN16_PAIR = 3 << MO_ATOM_SHIFT, + MO_ATOM_SUBALIGN = 4 << MO_ATOM_SHIFT, + MO_ATOM_NONE = 5 << MO_ATOM_SHIFT, + MO_ATOM_MASK = 7 << MO_ATOM_SHIFT, /* Combinations of the above, for ease of use. */ MO_UB = MO_8, diff --git a/include/exec/memory.h b/include/exec/memory.h index 2e602a2fad..c3661b2276 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -767,6 +767,8 @@ struct MemoryRegion { bool is_iommu; RAMBlock *ram_block; Object *owner; + /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */ + DeviceState *dev; const MemoryRegionOps *ops; void *opaque; @@ -791,6 +793,9 @@ struct MemoryRegion { unsigned ioeventfd_nb; MemoryRegionIoeventfd *ioeventfds; RamDiscardManager *rdm; /* Only for RAM */ + + /* For devices designed to perform re-entrant IO into their own IO MRs */ + bool disable_reentrancy_guard; }; struct IOMMUMemoryRegion { @@ -929,8 +934,11 @@ struct MemoryListener { * its @log_sync must be NULL. Vice versa. * * @listener: The #MemoryListener. + * @last_stage: The last stage to synchronize the log during migration. + * The caller should gurantee that the synchronization with true for + * @last_stage is triggered for once after all VCPUs have been stopped. */ - void (*log_sync_global)(MemoryListener *listener); + void (*log_sync_global)(MemoryListener *listener, bool last_stage); /** * @log_clear: @@ -1318,6 +1326,7 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM, * RAM_NORESERVE, * @path: the path in which to allocate the RAM. + * @offset: offset within the file referenced by path * @readonly: true to open @path for reading, false for read/write. * @errp: pointer to Error*, to store an error if it happens. * @@ -1331,6 +1340,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, uint64_t align, uint32_t ram_flags, const char *path, + ram_addr_t offset, bool readonly, Error **errp); @@ -1731,6 +1741,16 @@ void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, void memory_region_notify_iommu_one(IOMMUNotifier *notifier, IOMMUTLBEvent *event); +/** + * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU + * translation that covers the + * range of a notifier + * + * @notifier: the notifier to be notified + */ +void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier); + + /** * memory_region_register_iommu_notifier: register a notifier for changes to * IOMMU translation entries. @@ -2407,8 +2427,10 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr, * memory_global_dirty_log_sync: synchronize the dirty log for all memory * * Synchronizes the dirty page log for all address spaces. + * + * @last_stage: whether this is the last stage of live migration */ -void memory_global_dirty_log_sync(void); +void memory_global_dirty_log_sync(bool last_stage); /** * memory_global_dirty_log_sync: synchronize the dirty log for all memory diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h index 5f5506f1cc..3af0168e65 100644 --- a/include/exec/plugin-gen.h +++ b/include/exec/plugin-gen.h @@ -27,7 +27,7 @@ void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db); void plugin_gen_insn_end(void); void plugin_gen_disable_mem_helpers(void); -void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info); +void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info); static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size) { @@ -69,7 +69,7 @@ static inline void plugin_gen_tb_end(CPUState *cpu) static inline void plugin_gen_disable_mem_helpers(void) { } -static inline void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) +static inline void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info) { } static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size) diff --git a/include/exec/poison.h b/include/exec/poison.h index 140daa4a85..256736e11a 100644 --- a/include/exec/poison.h +++ b/include/exec/poison.h @@ -35,7 +35,6 @@ #pragma GCC poison TARGET_TRICORE #pragma GCC poison TARGET_XTENSA -#pragma GCC poison TARGET_ALIGNED_ONLY #pragma GCC poison TARGET_HAS_BFLT #pragma GCC poison TARGET_NAME #pragma GCC poison TARGET_SUPPORTS_MTTCG diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index f4fb6a2111..90a8269290 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -110,6 +110,7 @@ long qemu_maxrampagesize(void); * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM, * RAM_NORESERVE. * @mem_path or @fd: specify the backing file or device + * @offset: Offset into target file * @readonly: true to open @path for reading, false for read/write. * @errp: pointer to Error*, to store an error if it happens * @@ -119,7 +120,7 @@ long qemu_maxrampagesize(void); */ RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, uint32_t ram_flags, const char *mem_path, - bool readonly, Error **errp); + off_t offset, bool readonly, Error **errp); RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, uint32_t ram_flags, int fd, off_t offset, bool readonly, Error **errp); diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h index adc03df59c..69c6a53902 100644 --- a/include/exec/ramblock.h +++ b/include/exec/ramblock.h @@ -40,6 +40,7 @@ struct RAMBlock { QLIST_ENTRY(RAMBlock) next; QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers; int fd; + uint64_t fd_offset; size_t page_size; /* dirty bitmap used during migration */ unsigned long *bmap; diff --git a/include/exec/replay-core.h b/include/exec/replay-core.h new file mode 100644 index 0000000000..244c77acce --- /dev/null +++ b/include/exec/replay-core.h @@ -0,0 +1,80 @@ +/* + * QEMU replay core API + * + * Copyright (c) 2010-2015 Institute for System Programming + * of the Russian Academy of Sciences. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef EXEC_REPLAY_H +#define EXEC_REPLAY_H + +#include "qapi/qapi-types-replay.h" + +extern ReplayMode replay_mode; + +/* Replay process control functions */ + +/* Enables recording or saving event log with specified parameters */ +void replay_configure(struct QemuOpts *opts); +/* Initializes timers used for snapshotting and enables events recording */ +void replay_start(void); +/* Closes replay log file and frees other resources. */ +void replay_finish(void); +/* Adds replay blocker with the specified error description */ +void replay_add_blocker(const char *feature); +/* Returns name of the replay log file */ +const char *replay_get_filename(void); + +/* + * Start making one step in backward direction. + * Used by gdbstub for backwards debugging. + * Returns true on success. + */ +bool replay_reverse_step(void); +/* + * Start searching the last breakpoint/watchpoint. + * Used by gdbstub for backwards debugging. + * Returns true if the process successfully started. + */ +bool replay_reverse_continue(void); +/* + * Returns true if replay module is processing + * reverse_continue or reverse_step request + */ +bool replay_running_debug(void); +/* Called in reverse debugging mode to collect breakpoint information */ +void replay_breakpoint(void); +/* Called when gdb is attached to gdbstub */ +void replay_gdb_attached(void); + +/* Interrupts and exceptions */ + +/* Called by exception handler to write or read exception processing events */ +bool replay_exception(void); +/* + * Used to determine that exception is pending. + * Does not proceed to the next event in the log. + */ +bool replay_has_exception(void); +/* + * Called by interrupt handlers to write or read interrupt processing events. + * Returns true if interrupt should be processed. + */ +bool replay_interrupt(void); +/* + * Tries to read interrupt event from the file. + * Returns true, when interrupt request is pending. + */ +bool replay_has_interrupt(void); + +/* Processing data from random generators */ + +/* Saves the values from the random number generator */ +void replay_save_random(int ret, void *buf, size_t len); +/* Loads the saved values for the random number generator */ +int replay_read_random(void *buf, size_t len); + +#endif diff --git a/include/exec/target_long.h b/include/exec/target_long.h new file mode 100644 index 0000000000..93c9472971 --- /dev/null +++ b/include/exec/target_long.h @@ -0,0 +1,42 @@ +/* + * Target Long Definitions + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2023 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef _TARGET_LONG_H_ +#define _TARGET_LONG_H_ + +/* + * Usually this should only be included via cpu-defs.h however for + * certain cases where we want to build only two versions of a binary + * object we can include directly. However the build-system must + * ensure TARGET_LONG_BITS is defined directly. + */ +#ifndef TARGET_LONG_BITS +#error TARGET_LONG_BITS not defined +#endif + +#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) + +/* target_ulong is the type of a virtual address */ +#if TARGET_LONG_SIZE == 4 +typedef int32_t target_long; +typedef uint32_t target_ulong; +#define TARGET_FMT_lx "%08x" +#define TARGET_FMT_ld "%d" +#define TARGET_FMT_lu "%u" +#elif TARGET_LONG_SIZE == 8 +typedef int64_t target_long; +typedef uint64_t target_ulong; +#define TARGET_FMT_lx "%016" PRIx64 +#define TARGET_FMT_ld "%" PRId64 +#define TARGET_FMT_lu "%" PRIu64 +#else +#error TARGET_LONG_SIZE undefined +#endif + +#endif /* _TARGET_LONG_H_ */ diff --git a/include/exec/target_page.h b/include/exec/target_page.h index 96726c36a4..bbf37aea17 100644 --- a/include/exec/target_page.h +++ b/include/exec/target_page.h @@ -18,4 +18,5 @@ size_t qemu_target_page_size(void); int qemu_target_page_bits(void); int qemu_target_page_bits_min(void); +size_t qemu_target_pages_to_MiB(size_t pages); #endif diff --git a/include/exec/tb-flush.h b/include/exec/tb-flush.h new file mode 100644 index 0000000000..d92d06565b --- /dev/null +++ b/include/exec/tb-flush.h @@ -0,0 +1,26 @@ +/* + * tb-flush prototype for use by the rest of the system. + * + * Copyright (c) 2022 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef _TB_FLUSH_H_ +#define _TB_FLUSH_H_ + +/** + * tb_flush() - flush all translation blocks + * @cs: CPUState (must be valid, but treated as anonymous pointer) + * + * Used to flush all the translation blocks in the system. Sometimes + * it is simpler to flush everything than work out which individual + * translations are now invalid and ensure they are not called + * anymore. + * + * tb_flush() takes care of running the flush in an exclusive context + * if it is not already running in one. This means no guest code will + * run until this complete. + */ +void tb_flush(CPUState *cs); + +#endif /* _TB_FLUSH_H_ */ diff --git a/include/exec/translator.h b/include/exec/translator.h index af2ff95cd5..797fef7515 100644 --- a/include/exec/translator.h +++ b/include/exec/translator.h @@ -37,7 +37,7 @@ * This function must be provided by the target, which should create * the target-specific DisasContext, and then invoke translator_loop. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc); /** @@ -146,12 +146,10 @@ typedef struct TranslatorOps { * - When single-stepping is enabled (system-wide or on the current vCPU). * - When too many instructions have been translated. */ -void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns, +void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc, const TranslatorOps *ops, DisasContextBase *db); -void translator_loop_temp_check(DisasContextBase *db); - /** * translator_use_goto_tb * @db: Disassembly context diff --git a/include/exec/tswap.h b/include/exec/tswap.h new file mode 100644 index 0000000000..68944a880b --- /dev/null +++ b/include/exec/tswap.h @@ -0,0 +1,72 @@ +/* + * Macros for swapping a value if the endianness is different + * between the target and the host. + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef TSWAP_H +#define TSWAP_H + +#include "hw/core/cpu.h" +#include "qemu/bswap.h" + +/* + * If we're in target-specific code, we can hard-code the swapping + * condition, otherwise we have to do (slower) run-time checks. + */ +#ifdef NEED_CPU_H +#define target_needs_bswap() (HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN) +#else +#define target_needs_bswap() (target_words_bigendian() != HOST_BIG_ENDIAN) +#endif + +static inline uint16_t tswap16(uint16_t s) +{ + if (target_needs_bswap()) { + return bswap16(s); + } else { + return s; + } +} + +static inline uint32_t tswap32(uint32_t s) +{ + if (target_needs_bswap()) { + return bswap32(s); + } else { + return s; + } +} + +static inline uint64_t tswap64(uint64_t s) +{ + if (target_needs_bswap()) { + return bswap64(s); + } else { + return s; + } +} + +static inline void tswap16s(uint16_t *s) +{ + if (target_needs_bswap()) { + *s = bswap16(*s); + } +} + +static inline void tswap32s(uint32_t *s) +{ + if (target_needs_bswap()) { + *s = bswap32(*s); + } +} + +static inline void tswap64s(uint64_t *s) +{ + if (target_needs_bswap()) { + *s = bswap64(*s); + } +} + +#endif /* TSWAP_H */ diff --git a/include/exec/user/guest-base.h b/include/exec/user/guest-base.h new file mode 100644 index 0000000000..afe2ab7fbb --- /dev/null +++ b/include/exec/user/guest-base.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Declaration of guest_base. + * Copyright (c) 2003 Fabrice Bellard + */ + +#ifndef EXEC_USER_GUEST_BASE_H +#define EXEC_USER_GUEST_BASE_H + +extern uintptr_t guest_base; + +#endif diff --git a/include/gdbstub/helpers.h b/include/gdbstub/helpers.h new file mode 100644 index 0000000000..c573aef2dc --- /dev/null +++ b/include/gdbstub/helpers.h @@ -0,0 +1,103 @@ +/* + * gdbstub helpers + * + * These are all used by the various frontends and have to be host + * aware to ensure things are store in target order. + * + * Copyright (c) 2022 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef _GDBSTUB_HELPERS_H_ +#define _GDBSTUB_HELPERS_H_ + +#ifdef NEED_CPU_H +#include "cpu.h" + +/* + * The GDB remote protocol transfers values in target byte order. As + * the gdbstub may be batching up several register values we always + * append to the array. + */ + +static inline int gdb_get_reg8(GByteArray *buf, uint8_t val) +{ + g_byte_array_append(buf, &val, 1); + return 1; +} + +static inline int gdb_get_reg16(GByteArray *buf, uint16_t val) +{ + uint16_t to_word = tswap16(val); + g_byte_array_append(buf, (uint8_t *) &to_word, 2); + return 2; +} + +static inline int gdb_get_reg32(GByteArray *buf, uint32_t val) +{ + uint32_t to_long = tswap32(val); + g_byte_array_append(buf, (uint8_t *) &to_long, 4); + return 4; +} + +static inline int gdb_get_reg64(GByteArray *buf, uint64_t val) +{ + uint64_t to_quad = tswap64(val); + g_byte_array_append(buf, (uint8_t *) &to_quad, 8); + return 8; +} + +static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi, + uint64_t val_lo) +{ + uint64_t to_quad; +#if TARGET_BIG_ENDIAN + to_quad = tswap64(val_hi); + g_byte_array_append(buf, (uint8_t *) &to_quad, 8); + to_quad = tswap64(val_lo); + g_byte_array_append(buf, (uint8_t *) &to_quad, 8); +#else + to_quad = tswap64(val_lo); + g_byte_array_append(buf, (uint8_t *) &to_quad, 8); + to_quad = tswap64(val_hi); + g_byte_array_append(buf, (uint8_t *) &to_quad, 8); +#endif + return 16; +} + +static inline int gdb_get_zeroes(GByteArray *array, size_t len) +{ + guint oldlen = array->len; + g_byte_array_set_size(array, oldlen + len); + memset(array->data + oldlen, 0, len); + + return len; +} + +/** + * gdb_get_reg_ptr: get pointer to start of last element + * @len: length of element + * + * This is a helper function to extract the pointer to the last + * element for additional processing. Some front-ends do additional + * dynamic swapping of the elements based on CPU state. + */ +static inline uint8_t *gdb_get_reg_ptr(GByteArray *buf, int len) +{ + return buf->data + buf->len - len; +} + +#if TARGET_LONG_BITS == 64 +#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val) +#define ldtul_p(addr) ldq_p(addr) +#else +#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val) +#define ldtul_p(addr) ldl_p(addr) +#endif + +#else +#error "gdbstub helpers should only be included by target specific code" +#endif + +#endif /* _GDBSTUB_HELPERS_H_ */ diff --git a/include/gdbstub/syscalls.h b/include/gdbstub/syscalls.h new file mode 100644 index 0000000000..243eaf8ce4 --- /dev/null +++ b/include/gdbstub/syscalls.h @@ -0,0 +1,113 @@ +/* + * GDB Syscall support + * + * Copyright (c) 2023 Linaro Ltd + * + * SPDX-License-Identifier: LGPL-2.0+ + */ + +#ifndef _SYSCALLS_H_ +#define _SYSCALLS_H_ + +/* For gdb file i/o remote protocol open flags. */ +#define GDB_O_RDONLY 0 +#define GDB_O_WRONLY 1 +#define GDB_O_RDWR 2 +#define GDB_O_APPEND 8 +#define GDB_O_CREAT 0x200 +#define GDB_O_TRUNC 0x400 +#define GDB_O_EXCL 0x800 + +/* For gdb file i/o remote protocol errno values */ +#define GDB_EPERM 1 +#define GDB_ENOENT 2 +#define GDB_EINTR 4 +#define GDB_EBADF 9 +#define GDB_EACCES 13 +#define GDB_EFAULT 14 +#define GDB_EBUSY 16 +#define GDB_EEXIST 17 +#define GDB_ENODEV 19 +#define GDB_ENOTDIR 20 +#define GDB_EISDIR 21 +#define GDB_EINVAL 22 +#define GDB_ENFILE 23 +#define GDB_EMFILE 24 +#define GDB_EFBIG 27 +#define GDB_ENOSPC 28 +#define GDB_ESPIPE 29 +#define GDB_EROFS 30 +#define GDB_ENAMETOOLONG 91 +#define GDB_EUNKNOWN 9999 + +/* For gdb file i/o remote protocol lseek whence. */ +#define GDB_SEEK_SET 0 +#define GDB_SEEK_CUR 1 +#define GDB_SEEK_END 2 + +/* For gdb file i/o stat/fstat. */ +typedef uint32_t gdb_mode_t; +typedef uint32_t gdb_time_t; + +struct gdb_stat { + uint32_t gdb_st_dev; /* device */ + uint32_t gdb_st_ino; /* inode */ + gdb_mode_t gdb_st_mode; /* protection */ + uint32_t gdb_st_nlink; /* number of hard links */ + uint32_t gdb_st_uid; /* user ID of owner */ + uint32_t gdb_st_gid; /* group ID of owner */ + uint32_t gdb_st_rdev; /* device type (if inode device) */ + uint64_t gdb_st_size; /* total size, in bytes */ + uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */ + uint64_t gdb_st_blocks; /* number of blocks allocated */ + gdb_time_t gdb_st_atime; /* time of last access */ + gdb_time_t gdb_st_mtime; /* time of last modification */ + gdb_time_t gdb_st_ctime; /* time of last change */ +} QEMU_PACKED; + +struct gdb_timeval { + gdb_time_t tv_sec; /* second */ + uint64_t tv_usec; /* microsecond */ +} QEMU_PACKED; + +typedef void (*gdb_syscall_complete_cb)(CPUState *cpu, uint64_t ret, int err); + +/** + * gdb_do_syscall: + * @cb: function to call when the system call has completed + * @fmt: gdb syscall format string + * ...: list of arguments to interpolate into @fmt + * + * Send a GDB syscall request. This function will return immediately; + * the callback function will be called later when the remote system + * call has completed. + * + * @fmt should be in the 'call-id,parameter,parameter...' format documented + * for the F request packet in the GDB remote protocol. A limited set of + * printf-style format specifiers is supported: + * %x - target_ulong argument printed in hex + * %lx - 64-bit argument printed in hex + * %s - string pointer (target_ulong) and length (int) pair + */ +void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...); + +/** + * use_gdb_syscalls() - report if GDB should be used for syscalls + * + * This is mostly driven by the semihosting mode the user configures + * but assuming GDB is allowed by that we report true if GDB is + * connected to the stub. + */ +int use_gdb_syscalls(void); + +/** + * gdb_exit: exit gdb session, reporting inferior status + * @code: exit code reported + * + * This closes the session and sends a final packet to GDB reporting + * the exit status of the program. It also cleans up any connections + * detritus before returning. + */ +void gdb_exit(int code); + +#endif /* _SYSCALLS_H_ */ diff --git a/include/gdbstub/user.h b/include/gdbstub/user.h new file mode 100644 index 0000000000..d392e510c5 --- /dev/null +++ b/include/gdbstub/user.h @@ -0,0 +1,43 @@ +/* + * gdbstub user-mode only APIs + * + * Copyright (c) 2022 Linaro Ltd + * + * SPDX-License-Identifier: LGPL-2.0+ + */ + +#ifndef GDBSTUB_USER_H +#define GDBSTUB_USER_H + +/** + * gdb_handlesig() - yield control to gdb + * @cpu: CPU + * @sig: if non-zero, the signal number which caused us to stop + * + * This function yields control to gdb, when a user-mode-only target + * needs to stop execution. If @sig is non-zero, then we will send a + * stop packet to tell gdb that we have stopped because of this signal. + * + * This function will block (handling protocol requests from gdb) + * until gdb tells us to continue target execution. When it does + * return, the return value is a signal to deliver to the target, + * or 0 if no signal should be delivered, ie the signal that caused + * us to stop should be ignored. + */ +int gdb_handlesig(CPUState *, int); + +/** + * gdb_signalled() - inform remote gdb of sig exit + * @as: current CPUArchState + * @sig: signal number + */ +void gdb_signalled(CPUArchState *as, int sig); + +/** + * gdbserver_fork() - disable gdb stub for child processes. + * @cs: CPU + */ +void gdbserver_fork(CPUState *cs); + + +#endif /* GDBSTUB_USER_H */ diff --git a/include/hw/acpi/acpi.h b/include/hw/acpi/acpi.h index cc0d370745..e0e51e85b4 100644 --- a/include/hw/acpi/acpi.h +++ b/include/hw/acpi/acpi.h @@ -66,7 +66,7 @@ #define ACPI_BITMASK_POWER_BUTTON_STATUS 0x0100 #define ACPI_BITMASK_SLEEP_BUTTON_STATUS 0x0200 #define ACPI_BITMASK_RT_CLOCK_STATUS 0x0400 -#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */ +#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */ #define ACPI_BITMASK_WAKE_STATUS 0x8000 #define ACPI_BITMASK_ALL_FIXED_STATUS (\ @@ -84,7 +84,7 @@ #define ACPI_BITMASK_POWER_BUTTON_ENABLE 0x0100 #define ACPI_BITMASK_SLEEP_BUTTON_ENABLE 0x0200 #define ACPI_BITMASK_RT_CLOCK_ENABLE 0x0400 -#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE 0x4000 /* ACPI 3.0 */ +#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE 0x4000 /* ACPI 3.0 */ #define ACPI_BITMASK_PM1_COMMON_ENABLED ( \ ACPI_BITMASK_RT_CLOCK_ENABLE | \ diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h index d41866a229..2faf7f0cae 100644 --- a/include/hw/acpi/ich9.h +++ b/include/hw/acpi/ich9.h @@ -64,7 +64,7 @@ typedef struct ICH9LPCPMRegs { uint8_t disable_s3; uint8_t disable_s4; uint8_t s4_val; - uint8_t smm_enabled; + bool smm_enabled; bool smm_compat; bool enable_tco; TCOIORegs tco_regs; @@ -72,9 +72,7 @@ typedef struct ICH9LPCPMRegs { #define ACPI_PM_PROP_TCO_ENABLED "enable_tco" -void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, - bool smm_enabled, - qemu_irq sci_irq); +void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq); void ich9_pm_iospace_update(ICH9LPCPMRegs *pm, uint32_t pm_io_base); extern const VMStateDescription vmstate_ich9_pm; @@ -89,6 +87,7 @@ void ich9_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); +bool ich9_pm_is_hotpluggable_bus(HotplugHandler *hotplug_dev, BusState *bus); void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list); #endif /* HW_ACPI_ICH9_H */ diff --git a/include/hw/acpi/pcihp.h b/include/hw/acpi/pcihp.h index 7e268c2c9c..ef59810c17 100644 --- a/include/hw/acpi/pcihp.h +++ b/include/hw/acpi/pcihp.h @@ -49,15 +49,16 @@ typedef struct AcpiPciHpState { uint32_t acpi_index; PCIBus *root; MemoryRegion io; - bool legacy_piix; uint16_t io_base; uint16_t io_len; + bool use_acpi_hotplug_bridge; + bool use_acpi_root_pci_hotplug; } AcpiPciHpState; void acpi_pcihp_init(Object *owner, AcpiPciHpState *, PCIBus *root, - MemoryRegion *address_space_io, bool bridges_enabled, - uint16_t io_base); + MemoryRegion *address_space_io, uint16_t io_base); +bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus); void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, @@ -69,7 +70,9 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, Error **errp); /* Called on reset */ -void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off); +void acpi_pcihp_reset(AcpiPciHpState *s); + +void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus); extern const VMStateDescription vmstate_acpi_pcihp_pci_status; diff --git a/include/hw/acpi/piix4.h b/include/hw/acpi/piix4.h index be1f8ea80e..eb1c122d80 100644 --- a/include/hw/acpi/piix4.h +++ b/include/hw/acpi/piix4.h @@ -57,8 +57,6 @@ struct PIIX4PMState { Notifier powerdown_notifier; AcpiPciHpState acpi_pci_hotplug; - bool use_acpi_hotplug_bridge; - bool use_acpi_root_pci_hotplug; bool not_migrate_acpi_index; uint8_t disable_s3; diff --git a/include/hw/acpi/tpm.h b/include/hw/acpi/tpm.h index 559ba6906c..579c45f5ba 100644 --- a/include/hw/acpi/tpm.h +++ b/include/hw/acpi/tpm.h @@ -93,6 +93,7 @@ #define TPM_TIS_CAP_DATA_TRANSFER_64B (3 << 9) #define TPM_TIS_CAP_DATA_TRANSFER_LEGACY (0 << 9) #define TPM_TIS_CAP_BURST_COUNT_DYNAMIC (0 << 8) +#define TPM_TIS_CAP_BURST_COUNT_STATIC (1 << 8) #define TPM_TIS_CAP_INTERRUPT_LOW_LEVEL (1 << 4) /* support is mandatory */ #define TPM_TIS_CAPABILITIES_SUPPORTED1_3 \ (TPM_TIS_CAP_INTERRUPT_LOW_LEVEL | \ @@ -209,6 +210,46 @@ REG32(CRB_DATA_BUFFER, 0x80) #define TPM_PPI_FUNC_ALLOWED_USR_NOT_REQ (4 << 0) #define TPM_PPI_FUNC_MASK (7 << 0) +/* TPM TIS I2C registers */ +#define TPM_I2C_REG_LOC_SEL 0x00 +#define TPM_I2C_REG_ACCESS 0x04 +#define TPM_I2C_REG_INT_ENABLE 0x08 +#define TPM_I2C_REG_INT_CAPABILITY 0x14 +#define TPM_I2C_REG_STS 0x18 +#define TPM_I2C_REG_DATA_FIFO 0x24 +#define TPM_I2C_REG_INTF_CAPABILITY 0x30 +#define TPM_I2C_REG_I2C_DEV_ADDRESS 0x38 +#define TPM_I2C_REG_DATA_CSUM_ENABLE 0x40 +#define TPM_I2C_REG_DATA_CSUM_GET 0x44 +#define TPM_I2C_REG_DID_VID 0x48 +#define TPM_I2C_REG_RID 0x4c +#define TPM_I2C_REG_UNKNOWN 0xff + +/* I2C specific interface capabilities */ +#define TPM_I2C_CAP_INTERFACE_TYPE (0x2 << 0) /* FIFO interface */ +#define TPM_I2C_CAP_INTERFACE_VER (0x0 << 4) /* TCG I2C intf 1.0 */ +#define TPM_I2C_CAP_TPM2_FAMILY (0x1 << 7) /* TPM 2.0 family. */ +#define TPM_I2C_CAP_DEV_ADDR_CHANGE (0x0 << 27) /* No dev addr chng */ +#define TPM_I2C_CAP_BURST_COUNT_STATIC (0x1 << 29) /* Burst count static */ +#define TPM_I2C_CAP_LOCALITY_CAP (0x1 << 25) /* 0-5 locality */ +#define TPM_I2C_CAP_BUS_SPEED (3 << 21) /* std and fast mode */ + +/* + * TPM_I2C_STS masks for read/writing bits from/to TIS + * TPM_STS mask for read bits 31:26 must be zero + */ +#define TPM_I2C_STS_READ_MASK 0x00ffffdd +#define TPM_I2C_STS_WRITE_MASK 0x03000062 + +/* Checksum enabled. */ +#define TPM_DATA_CSUM_ENABLED 0x1 + +/* + * TPM_I2C_INT_ENABLE mask. Linux kernel does not support + * interrupts hence setting it to 0. + */ +#define TPM_I2C_INT_ENABLE_MASK 0x0 + void tpm_build_ppi_acpi(TPMIf *tpm, Aml *dev); #endif /* CONFIG_TPM */ diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h index 095afb225d..cd1465c613 100644 --- a/include/hw/arm/allwinner-a10.h +++ b/include/hw/arm/allwinner-a10.h @@ -13,6 +13,7 @@ #include "hw/misc/allwinner-a10-ccm.h" #include "hw/misc/allwinner-a10-dramc.h" #include "hw/i2c/allwinner-i2c.h" +#include "hw/watchdog/allwinner-wdt.h" #include "sysemu/block-backend.h" #include "target/arm/cpu.h" @@ -41,6 +42,7 @@ struct AwA10State { AwSdHostState mmc0; AWI2CState i2c0; AwRtcState rtc; + AwWdtState wdt; MemoryRegion sram_a; EHCISysBusState ehci[AW_A10_NUM_USB]; OHCISysBusState ohci[AW_A10_NUM_USB]; diff --git a/include/hw/arm/allwinner-h3.h b/include/hw/arm/allwinner-h3.h index 1d7ce20589..f15d6d7cc7 100644 --- a/include/hw/arm/allwinner-h3.h +++ b/include/hw/arm/allwinner-h3.h @@ -48,6 +48,7 @@ #include "hw/net/allwinner-sun8i-emac.h" #include "hw/rtc/allwinner-rtc.h" #include "hw/i2c/allwinner-i2c.h" +#include "hw/watchdog/allwinner-wdt.h" #include "target/arm/cpu.h" #include "sysemu/block-backend.h" @@ -84,6 +85,8 @@ enum { AW_H3_DEV_UART3, AW_H3_DEV_EMAC, AW_H3_DEV_TWI0, + AW_H3_DEV_TWI1, + AW_H3_DEV_TWI2, AW_H3_DEV_DRAMCOM, AW_H3_DEV_DRAMCTL, AW_H3_DEV_DRAMPHY, @@ -93,7 +96,9 @@ enum { AW_H3_DEV_GIC_VCPU, AW_H3_DEV_RTC, AW_H3_DEV_CPUCFG, - AW_H3_DEV_SDRAM + AW_H3_DEV_R_TWI, + AW_H3_DEV_SDRAM, + AW_H3_DEV_WDT }; /** Total number of CPU cores in the H3 SoC */ @@ -133,8 +138,12 @@ struct AwH3State { AwSidState sid; AwSdHostState mmc0; AWI2CState i2c0; + AWI2CState i2c1; + AWI2CState i2c2; + AWI2CState r_twi; AwSun8iEmacState emac; AwRtcState rtc; + AwWdtState wdt; GICState gic; MemoryRegion sram_a1; MemoryRegion sram_a2; diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h index bd1e03e78a..8adff70072 100644 --- a/include/hw/arm/aspeed_soc.h +++ b/include/hw/arm/aspeed_soc.h @@ -58,6 +58,8 @@ struct AspeedSoCState { MemoryRegion *dram_mr; MemoryRegion dram_container; MemoryRegion sram; + MemoryRegion spi_boot_container; + MemoryRegion spi_boot; AspeedVICState vic; AspeedRtcState rtc; AspeedTimerCtrlState timerctrl; @@ -120,6 +122,7 @@ struct AspeedSoCClass { enum { + ASPEED_DEV_SPI_BOOT, ASPEED_DEV_IOMEM, ASPEED_DEV_UART1, ASPEED_DEV_UART2, @@ -190,6 +193,8 @@ enum { ASPEED_DEV_JTAG1, }; +#define ASPEED_SOC_SPI_BOOT_ADDR 0x0 + qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev); bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp); void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr); diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h index f18cc3064f..80c492d742 100644 --- a/include/hw/arm/boot.h +++ b/include/hw/arm/boot.h @@ -183,4 +183,53 @@ void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu, const struct arm_boot_info *info, hwaddr mvbar_addr); +typedef enum { + FIXUP_NONE = 0, /* do nothing */ + FIXUP_TERMINATOR, /* end of insns */ + FIXUP_BOARDID, /* overwrite with board ID number */ + FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */ + FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */ + FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */ + FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */ + FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */ + FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */ + FIXUP_BOOTREG, /* overwrite with boot register address */ + FIXUP_DSB, /* overwrite with correct DSB insn for cpu */ + FIXUP_MAX, +} FixupType; + +typedef struct ARMInsnFixup { + uint32_t insn; + FixupType fixup; +} ARMInsnFixup; + +/** + * arm_write_bootloader - write a bootloader to guest memory + * @name: name of the bootloader blob + * @as: AddressSpace to write the bootloader + * @addr: guest address to write it + * @insns: the blob to be loaded + * @fixupcontext: context to be used for any fixups in @insns + * + * Write a bootloader to guest memory at address @addr in the address + * space @as. @name is the name to use for the resulting ROM blob, so + * it should be unique in the system and reasonably identifiable for debugging. + * + * @insns must be an array of ARMInsnFixup structs, each of which has + * one 32-bit value to be written to the guest memory, and a fixup to be + * applied to the value. FIXUP_NONE (do nothing) is value 0, so effectively + * the fixup is optional when writing a struct initializer. + * The final entry in the array must be { 0, FIXUP_TERMINATOR }. + * + * All other supported fixup types have the semantics "ignore insn + * and instead use the value from the array element @fixupcontext[fixup]". + * The caller should therefore provide @fixupcontext as an array of + * size FIXUP_MAX whose elements have been initialized for at least + * the entries that @insns refers to. + */ +void arm_write_bootloader(const char *name, + AddressSpace *as, hwaddr addr, + const ARMInsnFixup *insns, + const uint32_t *fixupcontext); + #endif /* HW_ARM_BOOT_H */ diff --git a/include/hw/arm/fsl-imx6.h b/include/hw/arm/fsl-imx6.h index 83291457cf..5b4d48da08 100644 --- a/include/hw/arm/fsl-imx6.h +++ b/include/hw/arm/fsl-imx6.h @@ -21,6 +21,7 @@ #include "hw/cpu/a9mpcore.h" #include "hw/misc/imx6_ccm.h" #include "hw/misc/imx6_src.h" +#include "hw/misc/imx7_snvs.h" #include "hw/watchdog/wdt_imx2.h" #include "hw/char/imx_serial.h" #include "hw/timer/imx_gpt.h" @@ -59,6 +60,7 @@ struct FslIMX6State { A9MPPrivState a9mpcore; IMX6CCMState ccm; IMX6SRCState src; + IMX7SNVSState snvs; IMXSerialState uart[FSL_IMX6_NUM_UARTS]; IMXGPTState gpt; IMXEPITState epit[FSL_IMX6_NUM_EPITS]; diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h index 1952cb984d..9ee15ae38d 100644 --- a/include/hw/arm/fsl-imx6ul.h +++ b/include/hw/arm/fsl-imx6ul.h @@ -89,6 +89,7 @@ struct FslIMX6ULState { MemoryRegion ocram_alias; uint32_t phy_num[FSL_IMX6UL_NUM_ETHS]; + bool phy_connected[FSL_IMX6UL_NUM_ETHS]; }; enum FslIMX6ULMemoryMap { diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h index 355bd8ea83..54ea2f0890 100644 --- a/include/hw/arm/fsl-imx7.h +++ b/include/hw/arm/fsl-imx7.h @@ -82,6 +82,7 @@ struct FslIMX7State { ChipideaState usb[FSL_IMX7_NUM_USBS]; DesignwarePCIEHost pcie; uint32_t phy_num[FSL_IMX7_NUM_ETHS]; + bool phy_connected[FSL_IMX7_NUM_ETHS]; }; enum FslIMX7MemoryMap { diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index 9fcff26357..fd8d772da1 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -23,9 +23,19 @@ #include "hw/pci/pci.h" #include "qom/object.h" -#define SMMU_PCI_BUS_MAX 256 -#define SMMU_PCI_DEVFN_MAX 256 -#define SMMU_PCI_DEVFN(sid) (sid & 0xFF) +#define SMMU_PCI_BUS_MAX 256 +#define SMMU_PCI_DEVFN_MAX 256 +#define SMMU_PCI_DEVFN(sid) (sid & 0xFF) + +/* VMSAv8-64 Translation constants and functions */ +#define VMSA_LEVELS 4 +#define VMSA_MAX_S2_CONCAT 16 + +#define VMSA_STRIDE(gran) ((gran) - VMSA_LEVELS + 1) +#define VMSA_BIT_LVL(isz, strd, lvl) ((isz) - (strd) * \ + (VMSA_LEVELS - (lvl))) +#define VMSA_IDXMSK(isz, strd, lvl) ((1ULL << \ + VMSA_BIT_LVL(isz, strd, lvl)) - 1) /* * Page table walk error types @@ -40,6 +50,7 @@ typedef enum { } SMMUPTWEventType; typedef struct SMMUPTWEventInfo { + int stage; SMMUPTWEventType type; dma_addr_t addr; /* fetched address that induced an abort, if any */ } SMMUPTWEventInfo; @@ -58,25 +69,41 @@ typedef struct SMMUTLBEntry { uint8_t granule; } SMMUTLBEntry; +/* Stage-2 configuration. */ +typedef struct SMMUS2Cfg { + uint8_t tsz; /* Size of IPA input region (S2T0SZ) */ + uint8_t sl0; /* Start level of translation (S2SL0) */ + bool affd; /* AF Fault Disable (S2AFFD) */ + bool record_faults; /* Record fault events (S2R) */ + uint8_t granule_sz; /* Granule page shift (based on S2TG) */ + uint8_t eff_ps; /* Effective PA output range (based on S2PS) */ + uint16_t vmid; /* Virtual Machine ID (S2VMID) */ + uint64_t vttb; /* Address of translation table base (S2TTB) */ +} SMMUS2Cfg; + /* * Generic structure populated by derived SMMU devices * after decoding the configuration information and used as * input to the page table walk */ typedef struct SMMUTransCfg { + /* Shared fields between stage-1 and stage-2. */ int stage; /* translation stage */ - bool aa64; /* arch64 or aarch32 translation table */ bool disabled; /* smmu is disabled */ bool bypassed; /* translation is bypassed */ bool aborted; /* translation is aborted */ + uint32_t iotlb_hits; /* counts IOTLB hits */ + uint32_t iotlb_misses; /* counts IOTLB misses*/ + /* Used by stage-1 only. */ + bool aa64; /* arch64 or aarch32 translation table */ bool record_faults; /* record fault events */ uint64_t ttb; /* TT base address */ uint8_t oas; /* output address width */ uint8_t tbi; /* Top Byte Ignore */ uint16_t asid; SMMUTransTableInfo tt[2]; - uint32_t iotlb_hits; /* counts IOTLB hits for this asid */ - uint32_t iotlb_misses; /* counts IOTLB misses for this asid */ + /* Used by stage-2 only. */ + struct SMMUS2Cfg s2cfg; } SMMUTransCfg; typedef struct SMMUDevice { @@ -98,6 +125,7 @@ typedef struct SMMUPciBus { typedef struct SMMUIOTLBKey { uint64_t iova; uint16_t asid; + uint16_t vmid; uint8_t tg; uint8_t level; } SMMUIOTLBKey; @@ -161,11 +189,12 @@ IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid); SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, SMMUTransTableInfo *tt, hwaddr iova); void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *entry); -SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova, +SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova, uint8_t tg, uint8_t level); void smmu_iotlb_inv_all(SMMUState *s); void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid); -void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova, +void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid); +void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, uint8_t tg, uint64_t num_pages, uint8_t ttl); /* Unmap the range of all the notifiers registered to any IOMMU mr */ diff --git a/include/hw/arm/smmuv3.h b/include/hw/arm/smmuv3.h index a0c026402e..d183a62766 100644 --- a/include/hw/arm/smmuv3.h +++ b/include/hw/arm/smmuv3.h @@ -62,6 +62,7 @@ struct SMMUv3State { qemu_irq irq[4]; QemuMutex mutex; + char *stage; }; typedef enum { @@ -83,4 +84,7 @@ struct SMMUv3Class { #define TYPE_ARM_SMMUV3 "arm-smmuv3" OBJECT_DECLARE_TYPE(SMMUv3State, SMMUv3Class, ARM_SMMUV3) +#define STAGE1_SUPPORTED(s) FIELD_EX32(s->idr[0], IDR0, S1P) +#define STAGE2_SUPPORTED(s) FIELD_EX32(s->idr[0], IDR0, S2P) + #endif diff --git a/include/hw/block/flash.h b/include/hw/block/flash.h index 86d8363bb0..7198953702 100644 --- a/include/hw/block/flash.h +++ b/include/hw/block/flash.h @@ -53,22 +53,22 @@ void nand_setio(DeviceState *dev, uint32_t value); uint32_t nand_getio(DeviceState *dev); uint32_t nand_getbuswidth(DeviceState *dev); -#define NAND_MFR_TOSHIBA 0x98 -#define NAND_MFR_SAMSUNG 0xec -#define NAND_MFR_FUJITSU 0x04 -#define NAND_MFR_NATIONAL 0x8f -#define NAND_MFR_RENESAS 0x07 -#define NAND_MFR_STMICRO 0x20 -#define NAND_MFR_HYNIX 0xad -#define NAND_MFR_MICRON 0x2c +#define NAND_MFR_TOSHIBA 0x98 +#define NAND_MFR_SAMSUNG 0xec +#define NAND_MFR_FUJITSU 0x04 +#define NAND_MFR_NATIONAL 0x8f +#define NAND_MFR_RENESAS 0x07 +#define NAND_MFR_STMICRO 0x20 +#define NAND_MFR_HYNIX 0xad +#define NAND_MFR_MICRON 0x2c /* onenand.c */ void *onenand_raw_otp(DeviceState *onenand_device); /* ecc.c */ typedef struct { - uint8_t cp; /* Column parity */ - uint16_t lp[2]; /* Line parity */ + uint8_t cp; /* Column parity */ + uint16_t lp[2]; /* Line parity */ uint16_t count; } ECCState; diff --git a/include/hw/boards.h b/include/hw/boards.h index 6fbbfd56c8..a385010909 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -253,6 +253,7 @@ struct MachineClass { const char *default_machine_opts; const char *default_boot_order; const char *default_display; + const char *default_nic; GPtrArray *compat_props; const char *hw_version; ram_addr_t default_ram_size; @@ -292,10 +293,12 @@ struct MachineClass { * @base: address in guest physical address space where the memory * address space for memory devices starts * @mr: address space container for memory devices + * @dimm_size: the sum of plugged DIMMs' sizes */ typedef struct DeviceMemoryState { hwaddr base; MemoryRegion mr; + uint64_t dimm_size; } DeviceMemoryState; /** @@ -381,6 +384,9 @@ struct MachineState { } \ type_init(machine_initfn##_register_types) +extern GlobalProperty hw_compat_8_0[]; +extern const size_t hw_compat_8_0_len; + extern GlobalProperty hw_compat_7_2[]; extern const size_t hw_compat_7_2_len; diff --git a/include/hw/char/parallel.h b/include/hw/char/parallel.h index 0a23c0f57e..29d2876d00 100644 --- a/include/hw/char/parallel.h +++ b/include/hw/char/parallel.h @@ -4,6 +4,8 @@ #include "hw/isa/isa.h" #include "chardev/char.h" +#define TYPE_ISA_PARALLEL "isa-parallel" + void parallel_hds_isa_init(ISABus *bus, int n); bool parallel_mm_init(MemoryRegion *address_space, diff --git a/include/hw/char/riscv_htif.h b/include/hw/char/riscv_htif.h index 5958c5b986..df493fdf6b 100644 --- a/include/hw/char/riscv_htif.h +++ b/include/hw/char/riscv_htif.h @@ -40,6 +40,9 @@ typedef struct HTIFState { uint64_t pending_read; } HTIFState; +extern const char *sig_file; +extern uint8_t line_size; + /* HTIF symbol callback */ void htif_symbol_callback(const char *st_name, int st_info, uint64_t st_value, uint64_t st_size); diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 671f041bec..39150cf8f8 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -30,7 +30,7 @@ #include "qemu/rcu_queue.h" #include "qemu/queue.h" #include "qemu/thread.h" -#include "qemu/plugin.h" +#include "qemu/plugin-event.h" #include "qom/object.h" typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, @@ -106,6 +106,9 @@ struct SysemuCPUOps; * @has_work: Callback for checking if there is work to do. * @memory_rw_debug: Callback for GDB memory access. * @dump_state: Callback for dumping state. + * @query_cpu_fast: + * Fill in target specific information for the "query-cpus-fast" + * QAPI call. * @get_arch_id: Callback for getting architecture-dependent CPU ID. * @set_pc: Callback for setting the Program Counter register. This * should have the semantics used by the target architecture when @@ -151,6 +154,7 @@ struct CPUClass { int (*memory_rw_debug)(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); void (*dump_state)(CPUState *cpu, FILE *, int flags); + void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value); int64_t (*get_arch_id)(CPUState *cpu); void (*set_pc)(CPUState *cpu, vaddr value); vaddr (*get_pc)(CPUState *cpu); @@ -272,6 +276,8 @@ struct qemu_work_item; * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER * QOM parent. + * Under TCG this value is propagated to @tcg_cflags. + * See TranslationBlock::TCG CF_CLUSTER_MASK. * @tcg_cflags: Pre-computed cflags for this cpu. * @nr_cores: Number of cores within this CPU package. * @nr_threads: Number of threads within this CPU. @@ -921,9 +927,10 @@ void cpu_single_step(CPUState *cpu, int enabled); #define BP_GDB 0x10 #define BP_CPU 0x20 #define BP_ANY (BP_GDB | BP_CPU) -#define BP_WATCHPOINT_HIT_READ 0x40 -#define BP_WATCHPOINT_HIT_WRITE 0x80 -#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE) +#define BP_HIT_SHIFT 6 +#define BP_WATCHPOINT_HIT_READ (BP_MEM_READ << BP_HIT_SHIFT) +#define BP_WATCHPOINT_HIT_WRITE (BP_MEM_WRITE << BP_HIT_SHIFT) +#define BP_WATCHPOINT_HIT (BP_MEM_ACCESS << BP_HIT_SHIFT) int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, CPUBreakpoint **breakpoint); @@ -946,7 +953,7 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) return false; } -#ifdef CONFIG_USER_ONLY +#if defined(CONFIG_USER_ONLY) static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint) { @@ -967,17 +974,6 @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) { } - -static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, - MemTxAttrs atr, int fl, uintptr_t ra) -{ -} - -static inline int cpu_watchpoint_address_matches(CPUState *cpu, - vaddr addr, vaddr len) -{ - return 0; -} #else int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint); @@ -985,32 +981,6 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, int flags); void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); void cpu_watchpoint_remove_all(CPUState *cpu, int mask); - -/** - * cpu_check_watchpoint: - * @cpu: cpu context - * @addr: guest virtual address - * @len: access length - * @attrs: memory access attributes - * @flags: watchpoint access type - * @ra: unwind return address - * - * Check for a watchpoint hit in [addr, addr+len) of the type - * specified by @flags. Exit via exception with a hit. - */ -void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, - MemTxAttrs attrs, int flags, uintptr_t ra); - -/** - * cpu_watchpoint_address_matches: - * @cpu: cpu context - * @addr: guest virtual address - * @len: access length - * - * Return the watchpoint flags that apply to [addr, addr+len). - * If no watchpoint is registered for the range, the result is 0. - */ -int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); #endif /** @@ -1043,6 +1013,8 @@ void cpu_exec_unrealizefn(CPUState *cpu); */ bool target_words_bigendian(void); +const char *target_name(void); + void page_size_init(void); #ifdef NEED_CPU_H diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index 20e3c0ffbb..0ae08df47e 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -175,4 +175,47 @@ struct TCGCPUOps { }; +#if defined(CONFIG_USER_ONLY) + +static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs atr, int fl, uintptr_t ra) +{ +} + +static inline int cpu_watchpoint_address_matches(CPUState *cpu, + vaddr addr, vaddr len) +{ + return 0; +} + +#else + +/** + * cpu_check_watchpoint: + * @cpu: cpu context + * @addr: guest virtual address + * @len: access length + * @attrs: memory access attributes + * @flags: watchpoint access type + * @ra: unwind return address + * + * Check for a watchpoint hit in [addr, addr+len) of the type + * specified by @flags. Exit via exception with a hit. + */ +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs attrs, int flags, uintptr_t ra); + +/** + * cpu_watchpoint_address_matches: + * @cpu: cpu context + * @addr: guest virtual address + * @len: access length + * + * Return the watchpoint flags that apply to [addr, addr+len). + * If no watchpoint is registered for the range, the result is 0. + */ +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); + +#endif + #endif /* TCG_CPU_OPS_H */ diff --git a/include/hw/cxl/cxl.h b/include/hw/cxl/cxl.h index b161be59b7..c453983e83 100644 --- a/include/hw/cxl/cxl.h +++ b/include/hw/cxl/cxl.h @@ -23,12 +23,12 @@ #define CXL_WINDOW_MAX 10 -typedef struct PXBDev PXBDev; +typedef struct PXBCXLDev PXBCXLDev; typedef struct CXLFixedWindow { uint64_t size; char **targets; - PXBDev *target_hbs[8]; + PXBCXLDev *target_hbs[8]; uint8_t num_targets; uint8_t enc_int_ways; uint8_t enc_int_gran; @@ -49,6 +49,7 @@ struct CXLHost { PCIHostState parent_obj; CXLComponentState cxl_cstate; + bool passthrough; }; #define TYPE_PXB_CXL_HOST "pxb-cxl-host" diff --git a/include/hw/cxl/cxl_component.h b/include/hw/cxl/cxl_component.h index 692d7a5507..42c7e581a7 100644 --- a/include/hw/cxl/cxl_component.h +++ b/include/hw/cxl/cxl_component.h @@ -65,11 +65,37 @@ CXLx_CAPABILITY_HEADER(SNOOP, 0x14) #define CXL_RAS_REGISTERS_OFFSET 0x80 #define CXL_RAS_REGISTERS_SIZE 0x58 REG32(CXL_RAS_UNC_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET) +#define CXL_RAS_UNC_ERR_CACHE_DATA_PARITY 0 +#define CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY 1 +#define CXL_RAS_UNC_ERR_CACHE_BE_PARITY 2 +#define CXL_RAS_UNC_ERR_CACHE_DATA_ECC 3 +#define CXL_RAS_UNC_ERR_MEM_DATA_PARITY 4 +#define CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY 5 +#define CXL_RAS_UNC_ERR_MEM_BE_PARITY 6 +#define CXL_RAS_UNC_ERR_MEM_DATA_ECC 7 +#define CXL_RAS_UNC_ERR_REINIT_THRESHOLD 8 +#define CXL_RAS_UNC_ERR_RSVD_ENCODING 9 +#define CXL_RAS_UNC_ERR_POISON_RECEIVED 10 +#define CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW 11 +#define CXL_RAS_UNC_ERR_INTERNAL 14 +#define CXL_RAS_UNC_ERR_CXL_IDE_TX 15 +#define CXL_RAS_UNC_ERR_CXL_IDE_RX 16 +#define CXL_RAS_UNC_ERR_CXL_UNUSED 63 /* Magic value */ REG32(CXL_RAS_UNC_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x4) REG32(CXL_RAS_UNC_ERR_SEVERITY, CXL_RAS_REGISTERS_OFFSET + 0x8) REG32(CXL_RAS_COR_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET + 0xc) +#define CXL_RAS_COR_ERR_CACHE_DATA_ECC 0 +#define CXL_RAS_COR_ERR_MEM_DATA_ECC 1 +#define CXL_RAS_COR_ERR_CRC_THRESHOLD 2 +#define CXL_RAS_COR_ERR_RETRY_THRESHOLD 3 +#define CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED 4 +#define CXL_RAS_COR_ERR_MEM_POISON_RECEIVED 5 +#define CXL_RAS_COR_ERR_PHYSICAL 6 REG32(CXL_RAS_COR_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x10) REG32(CXL_RAS_ERR_CAP_CTRL, CXL_RAS_REGISTERS_OFFSET + 0x14) + FIELD(CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER, 0, 6) +REG32(CXL_RAS_ERR_HEADER0, CXL_RAS_REGISTERS_OFFSET + 0x18) +#define CXL_RAS_ERR_HEADER_NUM 32 /* Offset 0x18 - 0x58 reserved for RAS logs */ /* 8.2.5.10 - CXL Security Capability Structure */ @@ -221,6 +247,7 @@ static inline hwaddr cxl_decode_ig(int ig) } CXLComponentState *cxl_get_hb_cstate(PCIHostState *hb); +bool cxl_get_hb_passthrough(PCIHostState *hb); void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp); void cxl_doe_cdat_release(CXLComponentState *cxl_cstate); diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h index 250adf18b2..02befda0f6 100644 --- a/include/hw/cxl/cxl_device.h +++ b/include/hw/cxl/cxl_device.h @@ -119,8 +119,10 @@ typedef struct cxl_device_state { uint64_t host_set; } timestamp; - /* memory region for persistent memory, HDM */ + /* memory region size, HDM */ + uint64_t mem_size; uint64_t pmem_size; + uint64_t vmem_size; } CXLDeviceState; /* Initialize the register block for a device */ @@ -170,7 +172,7 @@ CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MEMORY_DEVICE, CXL_DEVICE_CAP_HDR1_OFFSET + CXL_DEVICE_CAP_REG_SIZE * 2) -int cxl_initialize_mailbox(CXLDeviceState *cxl_dstate); +void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate); void cxl_process_mailbox(CXLDeviceState *cxl_dstate); #define cxl_device_cap_init(dstate, reg, cap_id) \ @@ -232,22 +234,36 @@ REG64(CXL_MEM_DEV_STS, 0) FIELD(CXL_MEM_DEV_STS, MBOX_READY, 4, 1) FIELD(CXL_MEM_DEV_STS, RESET_NEEDED, 5, 3) +typedef struct CXLError { + QTAILQ_ENTRY(CXLError) node; + int type; /* Error code as per FE definition */ + uint32_t header[32]; +} CXLError; + +typedef QTAILQ_HEAD(, CXLError) CXLErrorList; + struct CXLType3Dev { /* Private */ PCIDevice parent_obj; /* Properties */ - HostMemoryBackend *hostmem; + HostMemoryBackend *hostmem; /* deprecated */ + HostMemoryBackend *hostvmem; + HostMemoryBackend *hostpmem; HostMemoryBackend *lsa; uint64_t sn; /* State */ - AddressSpace hostmem_as; + AddressSpace hostvmem_as; + AddressSpace hostpmem_as; CXLComponentState cxl_cstate; CXLDeviceState cxl_dstate; /* DOE */ DOECap doe_cdat; + + /* Error injection */ + CXLErrorList error_list; }; #define TYPE_CXL_TYPE3 "cxl-type3" @@ -271,4 +287,6 @@ MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, unsigned size, MemTxAttrs attrs); +uint64_t cxl_device_get_timestamp(CXLDeviceState *cxlds); + #endif diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h index fbe0b1e956..dffb0e73d2 100644 --- a/include/hw/elf_ops.h +++ b/include/hw/elf_ops.h @@ -1,30 +1,30 @@ static void glue(bswap_ehdr, SZ)(struct elfhdr *ehdr) { - bswap16s(&ehdr->e_type); /* Object file type */ - bswap16s(&ehdr->e_machine); /* Architecture */ - bswap32s(&ehdr->e_version); /* Object file version */ - bswapSZs(&ehdr->e_entry); /* Entry point virtual address */ - bswapSZs(&ehdr->e_phoff); /* Program header table file offset */ - bswapSZs(&ehdr->e_shoff); /* Section header table file offset */ - bswap32s(&ehdr->e_flags); /* Processor-specific flags */ - bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ - bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ - bswap16s(&ehdr->e_phnum); /* Program header table entry count */ - bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ - bswap16s(&ehdr->e_shnum); /* Section header table entry count */ - bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ + bswap16s(&ehdr->e_type); /* Object file type */ + bswap16s(&ehdr->e_machine); /* Architecture */ + bswap32s(&ehdr->e_version); /* Object file version */ + bswapSZs(&ehdr->e_entry); /* Entry point virtual address */ + bswapSZs(&ehdr->e_phoff); /* Program header table file offset */ + bswapSZs(&ehdr->e_shoff); /* Section header table file offset */ + bswap32s(&ehdr->e_flags); /* Processor-specific flags */ + bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ + bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ + bswap16s(&ehdr->e_phnum); /* Program header table entry count */ + bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ + bswap16s(&ehdr->e_shnum); /* Section header table entry count */ + bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ } static void glue(bswap_phdr, SZ)(struct elf_phdr *phdr) { - bswap32s(&phdr->p_type); /* Segment type */ - bswapSZs(&phdr->p_offset); /* Segment file offset */ - bswapSZs(&phdr->p_vaddr); /* Segment virtual address */ - bswapSZs(&phdr->p_paddr); /* Segment physical address */ - bswapSZs(&phdr->p_filesz); /* Segment size in file */ - bswapSZs(&phdr->p_memsz); /* Segment size in memory */ - bswap32s(&phdr->p_flags); /* Segment flags */ - bswapSZs(&phdr->p_align); /* Segment alignment */ + bswap32s(&phdr->p_type); /* Segment type */ + bswapSZs(&phdr->p_offset); /* Segment file offset */ + bswapSZs(&phdr->p_vaddr); /* Segment virtual address */ + bswapSZs(&phdr->p_paddr); /* Segment physical address */ + bswapSZs(&phdr->p_filesz); /* Segment size in file */ + bswapSZs(&phdr->p_memsz); /* Segment size in memory */ + bswap32s(&phdr->p_flags); /* Segment flags */ + bswapSZs(&phdr->p_align); /* Segment alignment */ } static void glue(bswap_shdr, SZ)(struct elf_shdr *shdr) diff --git a/include/hw/hotplug.h b/include/hw/hotplug.h index e15f59c8b3..a9840ed485 100644 --- a/include/hw/hotplug.h +++ b/include/hw/hotplug.h @@ -48,6 +48,7 @@ typedef void (*hotplug_fn)(HotplugHandler *plug_handler, * @unplug: unplug callback. * Used for device removal with devices that implement * asynchronous and synchronous (surprise) removal. + * @is_hotpluggable_bus: called to check if bus/its parent allow hotplug on bus */ struct HotplugHandlerClass { /* */ @@ -58,6 +59,7 @@ struct HotplugHandlerClass { hotplug_fn plug; hotplug_fn unplug_request; hotplug_fn unplug; + bool (*is_hotpluggable_bus)(HotplugHandler *plug_handler, BusState *bus); }; /** diff --git a/include/hw/i2c/allwinner-i2c.h b/include/hw/i2c/allwinner-i2c.h index 4f378b86ba..0e325d265e 100644 --- a/include/hw/i2c/allwinner-i2c.h +++ b/include/hw/i2c/allwinner-i2c.h @@ -28,6 +28,10 @@ #include "qom/object.h" #define TYPE_AW_I2C "allwinner.i2c" + +/** Allwinner I2C sun6i family and newer (A31, H2+, H3, etc) */ +#define TYPE_AW_I2C_SUN6I TYPE_AW_I2C "-sun6i" + OBJECT_DECLARE_SIMPLE_TYPE(AWI2CState, AW_I2C) #define AW_I2C_MEM_SIZE 0x24 @@ -50,6 +54,8 @@ struct AWI2CState { uint8_t srst; uint8_t efr; uint8_t lcr; + + bool irq_clear_inverted; }; #endif /* ALLWINNER_I2C_H */ diff --git a/include/hw/i2c/aspeed_i2c.h b/include/hw/i2c/aspeed_i2c.h index adc904d6c1..51c944efea 100644 --- a/include/hw/i2c/aspeed_i2c.h +++ b/include/hw/i2c/aspeed_i2c.h @@ -38,6 +38,13 @@ OBJECT_DECLARE_TYPE(AspeedI2CState, AspeedI2CClass, ASPEED_I2C) #define ASPEED_I2C_OLD_NUM_REG 11 #define ASPEED_I2C_NEW_NUM_REG 22 +#define A_I2CD_M_STOP_CMD BIT(5) +#define A_I2CD_M_RX_CMD BIT(3) +#define A_I2CD_M_TX_CMD BIT(1) +#define A_I2CD_M_START_CMD BIT(0) + +#define A_I2CD_MASTER_EN BIT(0) + /* Tx State Machine */ #define I2CD_TX_STATE_MASK 0xf #define I2CD_IDLE 0x0 diff --git a/include/hw/i2c/i2c.h b/include/hw/i2c/i2c.h index 9b9581d230..2a3abacd1b 100644 --- a/include/hw/i2c/i2c.h +++ b/include/hw/i2c/i2c.h @@ -141,6 +141,8 @@ int i2c_start_send(I2CBus *bus, uint8_t address); */ int i2c_start_send_async(I2CBus *bus, uint8_t address); +void i2c_schedule_pending_master(I2CBus *bus); + void i2c_end_transfer(I2CBus *bus); void i2c_nack(I2CBus *bus); void i2c_ack(I2CBus *bus); diff --git a/include/hw/i386/microvm.h b/include/hw/i386/microvm.h index e8af61f194..fad97a891d 100644 --- a/include/hw/i386/microvm.h +++ b/include/hw/i386/microvm.h @@ -50,9 +50,8 @@ */ /* Platform virtio definitions */ -#define VIRTIO_MMIO_BASE 0xfeb00000 -#define VIRTIO_CMDLINE_MAXLEN 64 -#define VIRTIO_CMDLINE_TOTAL_MAX_LEN ((VIRTIO_CMDLINE_MAXLEN + 1) * 16) +#define VIRTIO_MMIO_BASE 0xfeb00000 +#define VIRTIO_CMDLINE_MAXLEN 64 #define GED_MMIO_BASE 0xfea00000 #define GED_MMIO_BASE_MEMHP (GED_MMIO_BASE + 0x100) diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 66e3d059ef..c661e9cc80 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -93,7 +93,6 @@ struct PCMachineClass { /* Device configuration: */ bool pci_enabled; bool kvmclock_enabled; - const char *default_nic_model; /* Compat options: */ @@ -128,8 +127,8 @@ struct PCMachineClass { /* create kvmclock device even when KVM PV features are not exposed */ bool kvmclock_create_always; - /* skip passing an rng seed for legacy machines */ - bool legacy_no_rng_seed; + /* resizable acpi blob compat */ + bool resizable_acpi_blob; }; #define TYPE_PC_MACHINE "generic-pc-machine" @@ -162,13 +161,12 @@ void xen_load_linux(PCMachineState *pcms); void pc_memory_init(PCMachineState *pcms, MemoryRegion *system_memory, MemoryRegion *rom_memory, - MemoryRegion **ram_memory, uint64_t pci_hole64_size); uint64_t pc_pci_hole64_start(void); DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus); void pc_basic_device_init(struct PCMachineState *pcms, ISABus *isa_bus, qemu_irq *gsi, - ISADevice **rtc_state, + ISADevice *rtc_state, bool create_fdctrl, uint32_t hpet_irqs); void pc_cmos_init(PCMachineState *pcms, @@ -198,6 +196,9 @@ void pc_madt_cpu_entry(int uid, const CPUArchIdList *apic_ids, /* sgx.c */ void pc_machine_init_sgx_epc(PCMachineState *pcms); +extern GlobalProperty pc_compat_8_0[]; +extern const size_t pc_compat_8_0_len; + extern GlobalProperty pc_compat_7_2[]; extern const size_t pc_compat_7_2_len; @@ -291,12 +292,15 @@ extern const size_t pc_compat_1_5_len; extern GlobalProperty pc_compat_1_4[]; extern const size_t pc_compat_1_4_len; +int pc_machine_kvm_type(MachineState *machine, const char *vm_type); + #define DEFINE_PC_MACHINE(suffix, namestr, initfn, optsfn) \ static void pc_machine_##suffix##_class_init(ObjectClass *oc, void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ optsfn(mc); \ mc->init = initfn; \ + mc->kvm_type = pc_machine_kvm_type; \ } \ static const TypeInfo pc_machine_type_##suffix = { \ .name = namestr TYPE_MACHINE_SUFFIX, \ diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h index 62fa5774f8..da19ae1546 100644 --- a/include/hw/i386/x86.h +++ b/include/hw/i386/x86.h @@ -18,13 +18,10 @@ #define HW_I386_X86_H #include "exec/hwaddr.h" -#include "qemu/notify.h" -#include "hw/i386/topology.h" #include "hw/boards.h" -#include "hw/nmi.h" +#include "hw/intc/ioapic.h" #include "hw/isa/isa.h" -#include "hw/i386/ioapic.h" #include "qom/object.h" struct X86MachineClass { @@ -98,8 +95,6 @@ struct X86MachineState { #define TYPE_X86_MACHINE MACHINE_TYPE_NAME("x86") OBJECT_DECLARE_TYPE(X86MachineState, X86MachineClass, X86_MACHINE) -void init_topo_info(X86CPUTopoInfo *topo_info, const X86MachineState *x86ms); - uint32_t x86_cpu_apic_id_from_index(X86MachineState *pcms, unsigned int cpu_index); @@ -126,15 +121,13 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware, void x86_load_linux(X86MachineState *x86ms, FWCfgState *fw_cfg, int acpi_data_size, - bool pvh_enabled, - bool legacy_no_rng_seed); + bool pvh_enabled); bool x86_machine_is_smm_enabled(const X86MachineState *x86ms); bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms); /* Global System Interrupts */ -#define GSI_NUM_PINS IOAPIC_NUM_PINS #define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11)) typedef struct GSIState { diff --git a/include/hw/ide.h b/include/hw/ide.h index 60f1f4f714..db963bdb77 100644 --- a/include/hw/ide.h +++ b/include/hw/ide.h @@ -1,20 +1,8 @@ #ifndef HW_IDE_H #define HW_IDE_H -#include "hw/isa/isa.h" #include "exec/memory.h" -/* ide-isa.c */ -ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq, - DriveInfo *hd0, DriveInfo *hd1); - -/* ide-mmio.c */ -void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1); - -int ide_get_geometry(BusState *bus, int unit, - int16_t *cyls, int8_t *heads, int8_t *secs); -int ide_get_bios_chs_trans(BusState *bus, int unit); - /* ide/core.c */ void ide_drive_get(DriveInfo **hd, int max_bus); diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h index fc0aa81a88..2bfa7533d6 100644 --- a/include/hw/ide/internal.h +++ b/include/hw/ide/internal.h @@ -7,13 +7,10 @@ * non-internal declarations are in hw/ide.h */ -#include "qapi/qapi-types-run-state.h" #include "hw/ide.h" -#include "hw/irq.h" -#include "hw/isa/isa.h" #include "sysemu/dma.h" #include "hw/block/block.h" -#include "scsi/constants.h" +#include "exec/ioport.h" /* debug IDE devices */ #define USE_DMA_CDROM @@ -41,32 +38,32 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) /* Bits of HD_STATUS */ -#define ERR_STAT 0x01 -#define INDEX_STAT 0x02 -#define ECC_STAT 0x04 /* Corrected error */ -#define DRQ_STAT 0x08 -#define SEEK_STAT 0x10 -#define SRV_STAT 0x10 -#define WRERR_STAT 0x20 -#define READY_STAT 0x40 -#define BUSY_STAT 0x80 +#define ERR_STAT 0x01 +#define INDEX_STAT 0x02 +#define ECC_STAT 0x04 /* Corrected error */ +#define DRQ_STAT 0x08 +#define SEEK_STAT 0x10 +#define SRV_STAT 0x10 +#define WRERR_STAT 0x20 +#define READY_STAT 0x40 +#define BUSY_STAT 0x80 /* Bits for HD_ERROR */ -#define MARK_ERR 0x01 /* Bad address mark */ -#define TRK0_ERR 0x02 /* couldn't find track 0 */ -#define ABRT_ERR 0x04 /* Command aborted */ -#define MCR_ERR 0x08 /* media change request */ -#define ID_ERR 0x10 /* ID field not found */ -#define MC_ERR 0x20 /* media changed */ -#define ECC_ERR 0x40 /* Uncorrectable ECC error */ -#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ -#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ +#define MARK_ERR 0x01 /* Bad address mark */ +#define TRK0_ERR 0x02 /* couldn't find track 0 */ +#define ABRT_ERR 0x04 /* Command aborted */ +#define MCR_ERR 0x08 /* media change request */ +#define ID_ERR 0x10 /* ID field not found */ +#define MC_ERR 0x20 /* media changed */ +#define ECC_ERR 0x40 /* Uncorrectable ECC error */ +#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ +#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ /* Bits of HD_NSECTOR */ -#define CD 0x01 -#define IO 0x02 -#define REL 0x04 -#define TAG_MASK 0xf8 +#define CD 0x01 +#define IO 0x02 +#define REL 0x04 +#define TAG_MASK 0xf8 /* Bits of Device Control register */ #define IDE_CTRL_HOB 0x80 @@ -74,50 +71,50 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) #define IDE_CTRL_DISABLE_IRQ 0x02 /* ACS-2 T13/2015-D Table B.2 Command codes */ -#define WIN_NOP 0x00 +#define WIN_NOP 0x00 /* reserved 0x01..0x02 */ -#define CFA_REQ_EXT_ERROR_CODE 0x03 /* CFA Request Extended Error Code */ +#define CFA_REQ_EXT_ERROR_CODE 0x03 /* CFA Request Extended Error Code */ /* reserved 0x04..0x05 */ #define WIN_DSM 0x06 /* reserved 0x07 */ -#define WIN_DEVICE_RESET 0x08 +#define WIN_DEVICE_RESET 0x08 /* reserved 0x09..0x0a */ /* REQUEST SENSE DATA EXT 0x0B */ /* reserved 0x0C..0x0F */ #define WIN_RECAL 0x10 /* obsolete since ATA4 */ /* obsolete since ATA3, retired in ATA4 0x11..0x1F */ -#define WIN_READ 0x20 /* 28-Bit */ +#define WIN_READ 0x20 /* 28-Bit */ #define WIN_READ_ONCE 0x21 /* 28-Bit w/o retries, obsolete since ATA5 */ /* obsolete since ATA4 0x22..0x23 */ -#define WIN_READ_EXT 0x24 /* 48-Bit */ -#define WIN_READDMA_EXT 0x25 /* 48-Bit */ +#define WIN_READ_EXT 0x24 /* 48-Bit */ +#define WIN_READDMA_EXT 0x25 /* 48-Bit */ #define WIN_READDMA_QUEUED_EXT 0x26 /* 48-Bit, obsolete since ACS2 */ -#define WIN_READ_NATIVE_MAX_EXT 0x27 /* 48-Bit */ +#define WIN_READ_NATIVE_MAX_EXT 0x27 /* 48-Bit */ /* reserved 0x28 */ -#define WIN_MULTREAD_EXT 0x29 /* 48-Bit */ +#define WIN_MULTREAD_EXT 0x29 /* 48-Bit */ /* READ STREAM DMA EXT 0x2A */ /* READ STREAM EXT 0x2B */ /* reserved 0x2C..0x2E */ /* READ LOG EXT 0x2F */ -#define WIN_WRITE 0x30 /* 28-Bit */ +#define WIN_WRITE 0x30 /* 28-Bit */ #define WIN_WRITE_ONCE 0x31 /* 28-Bit w/o retries, obsolete since ATA5 */ /* obsolete since ATA4 0x32..0x33 */ -#define WIN_WRITE_EXT 0x34 /* 48-Bit */ -#define WIN_WRITEDMA_EXT 0x35 /* 48-Bit */ -#define WIN_WRITEDMA_QUEUED_EXT 0x36 /* 48-Bit */ +#define WIN_WRITE_EXT 0x34 /* 48-Bit */ +#define WIN_WRITEDMA_EXT 0x35 /* 48-Bit */ +#define WIN_WRITEDMA_QUEUED_EXT 0x36 /* 48-Bit */ #define WIN_SET_MAX_EXT 0x37 /* 48-Bit, obsolete since ACS2 */ -#define WIN_SET_MAX_EXT 0x37 /* 48-Bit */ -#define CFA_WRITE_SECT_WO_ERASE 0x38 /* CFA Write Sectors without erase */ -#define WIN_MULTWRITE_EXT 0x39 /* 48-Bit */ +#define WIN_SET_MAX_EXT 0x37 /* 48-Bit */ +#define CFA_WRITE_SECT_WO_ERASE 0x38 /* CFA Write Sectors without erase */ +#define WIN_MULTWRITE_EXT 0x39 /* 48-Bit */ /* WRITE STREAM DMA EXT 0x3A */ /* WRITE STREAM EXT 0x3B */ #define WIN_WRITE_VERIFY 0x3C /* 28-Bit, obsolete since ATA4 */ /* WRITE DMA FUA EXT 0x3D */ /* obsolete since ACS2 0x3E */ /* WRITE LOG EXT 0x3F */ -#define WIN_VERIFY 0x40 /* 28-Bit - Read Verify Sectors */ +#define WIN_VERIFY 0x40 /* 28-Bit - Read Verify Sectors */ #define WIN_VERIFY_ONCE 0x41 /* 28-Bit - w/o retries, obsolete since ATA5 */ -#define WIN_VERIFY_EXT 0x42 /* 48-Bit */ +#define WIN_VERIFY_EXT 0x42 /* 48-Bit */ /* reserved 0x43..0x44 */ /* WRITE UNCORRECTABLE EXT 0x45 */ /* reserved 0x46 */ @@ -139,11 +136,11 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) #define WIN_SEEK 0x70 /* obsolete since ATA7 */ /* reserved 0x71-0x7F */ /* vendor specific 0x80-0x86 */ -#define CFA_TRANSLATE_SECTOR 0x87 /* CFA Translate Sector */ +#define CFA_TRANSLATE_SECTOR 0x87 /* CFA Translate Sector */ /* vendor specific 0x88-0x8F */ -#define WIN_DIAGNOSE 0x90 +#define WIN_DIAGNOSE 0x90 #define WIN_SPECIFY 0x91 /* set drive geometry translation, obsolete since ATA6 */ -#define WIN_DOWNLOAD_MICROCODE 0x92 +#define WIN_DOWNLOAD_MICROCODE 0x92 /* DOWNLOAD MICROCODE DMA 0x93 */ #define WIN_STANDBYNOW2 0x94 /* retired in ATA4 */ #define WIN_IDLEIMMEDIATE2 0x95 /* force drive to become "ready", retired in ATA4 */ @@ -153,31 +150,31 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) #define WIN_SLEEPNOW2 0x99 /* retired in ATA4 */ /* vendor specific 0x9A */ /* reserved 0x9B..0x9F */ -#define WIN_PACKETCMD 0xA0 /* Send a packet command. */ -#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */ +#define WIN_PACKETCMD 0xA0 /* Send a packet command. */ +#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */ #define WIN_QUEUED_SERVICE 0xA2 /* obsolete since ACS2 */ /* reserved 0xA3..0xAF */ -#define WIN_SMART 0xB0 /* self-monitoring and reporting */ +#define WIN_SMART 0xB0 /* self-monitoring and reporting */ /* Device Configuration Overlay 0xB1 */ /* reserved 0xB2..0xB3 */ /* Sanitize Device 0xB4 */ /* reserved 0xB5 */ /* NV Cache 0xB6 */ /* reserved for CFA 0xB7..0xBB */ -#define CFA_ACCESS_METADATA_STORAGE 0xB8 +#define CFA_ACCESS_METADATA_STORAGE 0xB8 /* reserved 0xBC..0xBF */ -#define CFA_ERASE_SECTORS 0xC0 /* microdrives implement as NOP */ +#define CFA_ERASE_SECTORS 0xC0 /* microdrives implement as NOP */ /* vendor specific 0xC1..0xC3 */ -#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode*/ -#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */ -#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */ +#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode*/ +#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */ +#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */ #define WIN_READDMA_QUEUED 0xC7 /* read sectors using Queued DMA transfers, obsolete since ACS2 */ -#define WIN_READDMA 0xC8 /* read sectors using DMA transfers */ +#define WIN_READDMA 0xC8 /* read sectors using DMA transfers */ #define WIN_READDMA_ONCE 0xC9 /* 28-Bit - w/o retries, obsolete since ATA5 */ -#define WIN_WRITEDMA 0xCA /* write sectors using DMA transfers */ +#define WIN_WRITEDMA 0xCA /* write sectors using DMA transfers */ #define WIN_WRITEDMA_ONCE 0xCB /* 28-Bit - w/o retries, obsolete since ATA5 */ -#define WIN_WRITEDMA_QUEUED 0xCC /* write sectors using Queued DMA transfers, obsolete since ACS2 */ -#define CFA_WRITE_MULTI_WO_ERASE 0xCD /* CFA Write multiple without erase */ +#define WIN_WRITEDMA_QUEUED 0xCC /* write sectors using Queued DMA transfers, obsolete since ACS2 */ +#define CFA_WRITE_MULTI_WO_ERASE 0xCD /* CFA Write multiple without erase */ /* WRITE MULTIPLE FUA EXT 0xCE */ /* reserved 0xCF..0xDO */ /* CHECK MEDIA CARD TYPE 0xD1 */ @@ -187,33 +184,33 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) /* obsolete since ATA3, retired in ATA4 0xDB..0xDD */ #define WIN_DOORLOCK 0xDE /* lock door on removable drives, obsolete since ATA8 */ #define WIN_DOORUNLOCK 0xDF /* unlock door on removable drives, obsolete since ATA8 */ -#define WIN_STANDBYNOW1 0xE0 -#define WIN_IDLEIMMEDIATE 0xE1 /* force drive to become "ready" */ -#define WIN_STANDBY 0xE2 /* Set device in Standby Mode */ -#define WIN_SETIDLE1 0xE3 -#define WIN_READ_BUFFER 0xE4 /* force read only 1 sector */ -#define WIN_CHECKPOWERMODE1 0xE5 -#define WIN_SLEEPNOW1 0xE6 -#define WIN_FLUSH_CACHE 0xE7 -#define WIN_WRITE_BUFFER 0xE8 /* force write only 1 sector */ +#define WIN_STANDBYNOW1 0xE0 +#define WIN_IDLEIMMEDIATE 0xE1 /* force drive to become "ready" */ +#define WIN_STANDBY 0xE2 /* Set device in Standby Mode */ +#define WIN_SETIDLE1 0xE3 +#define WIN_READ_BUFFER 0xE4 /* force read only 1 sector */ +#define WIN_CHECKPOWERMODE1 0xE5 +#define WIN_SLEEPNOW1 0xE6 +#define WIN_FLUSH_CACHE 0xE7 +#define WIN_WRITE_BUFFER 0xE8 /* force write only 1 sector */ /* READ BUFFER DMA 0xE9 */ -#define WIN_FLUSH_CACHE_EXT 0xEA /* 48-Bit */ +#define WIN_FLUSH_CACHE_EXT 0xEA /* 48-Bit */ /* WRITE BUFFER DMA 0xEB */ -#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */ +#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */ #define WIN_MEDIAEJECT 0xED /* obsolete since ATA8 */ /* obsolete since ATA4 0xEE */ -#define WIN_SETFEATURES 0xEF /* set special drive features */ +#define WIN_SETFEATURES 0xEF /* set special drive features */ #define IBM_SENSE_CONDITION 0xF0 /* measure disk temperature, vendor specific */ -#define WIN_SECURITY_SET_PASS 0xF1 -#define WIN_SECURITY_UNLOCK 0xF2 -#define WIN_SECURITY_ERASE_PREPARE 0xF3 -#define WIN_SECURITY_ERASE_UNIT 0xF4 -#define WIN_SECURITY_FREEZE_LOCK 0xF5 +#define WIN_SECURITY_SET_PASS 0xF1 +#define WIN_SECURITY_UNLOCK 0xF2 +#define WIN_SECURITY_ERASE_PREPARE 0xF3 +#define WIN_SECURITY_ERASE_UNIT 0xF4 +#define WIN_SECURITY_FREEZE_LOCK 0xF5 #define CFA_WEAR_LEVEL 0xF5 /* microdrives implement as NOP; not specified in T13! */ -#define WIN_SECURITY_DISABLE 0xF6 +#define WIN_SECURITY_DISABLE 0xF6 /* vendor specific 0xF7 */ -#define WIN_READ_NATIVE_MAX 0xF8 /* return the native maximum address */ -#define WIN_SET_MAX 0xF9 +#define WIN_READ_NATIVE_MAX 0xF8 /* return the native maximum address */ +#define WIN_SET_MAX 0xF9 /* vendor specific 0xFA..0xFF */ /* set to 1 set disable mult support */ @@ -234,68 +231,68 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) /* The generic packet command opcodes for CD/DVD Logical Units, * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ -#define GPCMD_BLANK 0xa1 -#define GPCMD_CLOSE_TRACK 0x5b -#define GPCMD_FLUSH_CACHE 0x35 -#define GPCMD_FORMAT_UNIT 0x04 -#define GPCMD_GET_CONFIGURATION 0x46 +#define GPCMD_BLANK 0xa1 +#define GPCMD_CLOSE_TRACK 0x5b +#define GPCMD_FLUSH_CACHE 0x35 +#define GPCMD_FORMAT_UNIT 0x04 +#define GPCMD_GET_CONFIGURATION 0x46 #define GPCMD_GET_EVENT_STATUS_NOTIFICATION 0x4a -#define GPCMD_GET_PERFORMANCE 0xac -#define GPCMD_INQUIRY 0x12 -#define GPCMD_LOAD_UNLOAD 0xa6 -#define GPCMD_MECHANISM_STATUS 0xbd -#define GPCMD_MODE_SELECT_10 0x55 -#define GPCMD_MODE_SENSE_10 0x5a -#define GPCMD_PAUSE_RESUME 0x4b -#define GPCMD_PLAY_AUDIO_10 0x45 -#define GPCMD_PLAY_AUDIO_MSF 0x47 -#define GPCMD_PLAY_AUDIO_TI 0x48 -#define GPCMD_PLAY_CD 0xbc +#define GPCMD_GET_PERFORMANCE 0xac +#define GPCMD_INQUIRY 0x12 +#define GPCMD_LOAD_UNLOAD 0xa6 +#define GPCMD_MECHANISM_STATUS 0xbd +#define GPCMD_MODE_SELECT_10 0x55 +#define GPCMD_MODE_SENSE_10 0x5a +#define GPCMD_PAUSE_RESUME 0x4b +#define GPCMD_PLAY_AUDIO_10 0x45 +#define GPCMD_PLAY_AUDIO_MSF 0x47 +#define GPCMD_PLAY_AUDIO_TI 0x48 +#define GPCMD_PLAY_CD 0xbc #define GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e -#define GPCMD_READ_10 0x28 -#define GPCMD_READ_12 0xa8 -#define GPCMD_READ_CDVD_CAPACITY 0x25 -#define GPCMD_READ_CD 0xbe -#define GPCMD_READ_CD_MSF 0xb9 -#define GPCMD_READ_DISC_INFO 0x51 -#define GPCMD_READ_DVD_STRUCTURE 0xad -#define GPCMD_READ_FORMAT_CAPACITIES 0x23 -#define GPCMD_READ_HEADER 0x44 -#define GPCMD_READ_TRACK_RZONE_INFO 0x52 -#define GPCMD_READ_SUBCHANNEL 0x42 -#define GPCMD_READ_TOC_PMA_ATIP 0x43 -#define GPCMD_REPAIR_RZONE_TRACK 0x58 -#define GPCMD_REPORT_KEY 0xa4 -#define GPCMD_REQUEST_SENSE 0x03 -#define GPCMD_RESERVE_RZONE_TRACK 0x53 -#define GPCMD_SCAN 0xba -#define GPCMD_SEEK 0x2b -#define GPCMD_SEND_DVD_STRUCTURE 0xad -#define GPCMD_SEND_EVENT 0xa2 -#define GPCMD_SEND_KEY 0xa3 -#define GPCMD_SEND_OPC 0x54 -#define GPCMD_SET_READ_AHEAD 0xa7 -#define GPCMD_SET_STREAMING 0xb6 -#define GPCMD_START_STOP_UNIT 0x1b -#define GPCMD_STOP_PLAY_SCAN 0x4e -#define GPCMD_TEST_UNIT_READY 0x00 -#define GPCMD_VERIFY_10 0x2f -#define GPCMD_WRITE_10 0x2a -#define GPCMD_WRITE_AND_VERIFY_10 0x2e +#define GPCMD_READ_10 0x28 +#define GPCMD_READ_12 0xa8 +#define GPCMD_READ_CDVD_CAPACITY 0x25 +#define GPCMD_READ_CD 0xbe +#define GPCMD_READ_CD_MSF 0xb9 +#define GPCMD_READ_DISC_INFO 0x51 +#define GPCMD_READ_DVD_STRUCTURE 0xad +#define GPCMD_READ_FORMAT_CAPACITIES 0x23 +#define GPCMD_READ_HEADER 0x44 +#define GPCMD_READ_TRACK_RZONE_INFO 0x52 +#define GPCMD_READ_SUBCHANNEL 0x42 +#define GPCMD_READ_TOC_PMA_ATIP 0x43 +#define GPCMD_REPAIR_RZONE_TRACK 0x58 +#define GPCMD_REPORT_KEY 0xa4 +#define GPCMD_REQUEST_SENSE 0x03 +#define GPCMD_RESERVE_RZONE_TRACK 0x53 +#define GPCMD_SCAN 0xba +#define GPCMD_SEEK 0x2b +#define GPCMD_SEND_DVD_STRUCTURE 0xad +#define GPCMD_SEND_EVENT 0xa2 +#define GPCMD_SEND_KEY 0xa3 +#define GPCMD_SEND_OPC 0x54 +#define GPCMD_SET_READ_AHEAD 0xa7 +#define GPCMD_SET_STREAMING 0xb6 +#define GPCMD_START_STOP_UNIT 0x1b +#define GPCMD_STOP_PLAY_SCAN 0x4e +#define GPCMD_TEST_UNIT_READY 0x00 +#define GPCMD_VERIFY_10 0x2f +#define GPCMD_WRITE_10 0x2a +#define GPCMD_WRITE_AND_VERIFY_10 0x2e /* This is listed as optional in ATAPI 2.6, but is (curiously) * missing from Mt. Fuji, Table 57. It _is_ mentioned in Mt. Fuji * Table 377 as an MMC command for SCSi devices though... Most ATAPI * drives support it. */ -#define GPCMD_SET_SPEED 0xbb +#define GPCMD_SET_SPEED 0xbb /* This seems to be a SCSI specific CD-ROM opcode * to play data at track/index */ -#define GPCMD_PLAYAUDIO_TI 0x48 +#define GPCMD_PLAYAUDIO_TI 0x48 /* * From MS Media Status Notification Support Specification. For * older drives only. */ -#define GPCMD_GET_MEDIA_STATUS 0xda -#define GPCMD_MODE_SENSE_6 0x1a +#define GPCMD_GET_MEDIA_STATUS 0xda +#define GPCMD_MODE_SENSE_6 0x1a #define ATAPI_INT_REASON_CD 0x01 /* 0 = data transfer */ #define ATAPI_INT_REASON_IO 0x02 /* 1 = transfer to the host */ @@ -491,7 +488,7 @@ struct IDEBus { IDEDMA *dma; uint8_t unit; uint8_t cmd; - qemu_irq irq; + qemu_irq irq; /* bus output */ int error_status; uint8_t retry_unit; @@ -569,18 +566,11 @@ static inline uint8_t ide_dma_cmd_to_retry(uint8_t dma_cmd) return 0; } -static inline IDEState *idebus_active_if(IDEBus *bus) +static inline IDEState *ide_bus_active_if(IDEBus *bus) { return bus->ifs + bus->unit; } -static inline void ide_set_irq(IDEBus *bus) -{ - if (!(bus->cmd & IDE_CTRL_DISABLE_IRQ)) { - qemu_irq_raise(bus->irq); - } -} - /* hw/ide/core.c */ extern const VMStateDescription vmstate_ide_bus; @@ -626,12 +616,13 @@ int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, uint64_t wwn, uint32_t cylinders, uint32_t heads, uint32_t secs, int chs_trans, Error **errp); -void ide_init2(IDEBus *bus, qemu_irq irq); void ide_exit(IDEState *s); +void ide_bus_init_output_irq(IDEBus *bus, qemu_irq irq_out); int ide_init_ioport(IDEBus *bus, ISADevice *isa, int iobase, int iobase2); -void ide_register_restart_cb(IDEBus *bus); +void ide_bus_set_irq(IDEBus *bus); +void ide_bus_register_restart_cb(IDEBus *bus); -void ide_exec_cmd(IDEBus *bus, uint32_t val); +void ide_bus_exec_cmd(IDEBus *bus, uint32_t val); void ide_transfer_start(IDEState *s, uint8_t *buf, int size, EndTransferFunc *end_transfer_func); @@ -654,7 +645,11 @@ void ide_atapi_cmd_reply_end(IDEState *s); /* hw/ide/qdev.c */ void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev, int bus_id, int max_units); -IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive); +IDEDevice *ide_bus_create_drive(IDEBus *bus, int unit, DriveInfo *drive); + +int ide_get_geometry(BusState *bus, int unit, + int16_t *cyls, int8_t *heads, int8_t *secs); +int ide_get_bios_chs_trans(BusState *bus, int unit); int ide_handle_rw_error(IDEState *s, int error, int op); diff --git a/include/hw/ide/isa.h b/include/hw/ide/isa.h new file mode 100644 index 0000000000..1cd0ff1fa6 --- /dev/null +++ b/include/hw/ide/isa.h @@ -0,0 +1,20 @@ +/* + * QEMU IDE Emulation: ISA Bus support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * SPDX-License-Identifier: MIT + */ +#ifndef HW_IDE_ISA_H +#define HW_IDE_ISA_H + +#include "qom/object.h" + +#define TYPE_ISA_IDE "isa-ide" +OBJECT_DECLARE_SIMPLE_TYPE(ISAIDEState, ISA_IDE) + +ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int irqnum, + DriveInfo *hd0, DriveInfo *hd1); + +#endif diff --git a/include/hw/ide/mmio.h b/include/hw/ide/mmio.h new file mode 100644 index 0000000000..d726a49848 --- /dev/null +++ b/include/hw/ide/mmio.h @@ -0,0 +1,26 @@ +/* + * QEMU IDE Emulation: mmio support (for embedded). + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * SPDX-License-Identifier: MIT + */ + +#ifndef HW_IDE_MMIO_H +#define HW_IDE_MMIO_H + +#include "qom/object.h" + +/* + * QEMU interface: + * + sysbus IRQ 0: asserted by the IDE channel + * + sysbus MMIO region 0: data registers + * + sysbus MMIO region 1: status & control registers + */ +#define TYPE_MMIO_IDE "mmio-ide" +OBJECT_DECLARE_SIMPLE_TYPE(MMIOIDEState, MMIO_IDE) + +void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1); + +#endif diff --git a/include/hw/ide/pci.h b/include/hw/ide/pci.h index 2a6284acac..74c127e32f 100644 --- a/include/hw/ide/pci.h +++ b/include/hw/ide/pci.h @@ -49,18 +49,13 @@ struct PCIIDEState { IDEBus bus[2]; BMDMAState bmdma[2]; + qemu_irq isa_irq[2]; uint32_t secondary; /* used only for cmd646 */ MemoryRegion bmdma_bar; MemoryRegion cmd_bar[2]; MemoryRegion data_bar[2]; }; -static inline IDEState *bmdma_active_if(BMDMAState *bmdma) -{ - assert(bmdma->bus->retry_unit != (uint8_t)-1); - return bmdma->bus->ifs + bmdma->bus->retry_unit; -} - void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d); void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val); extern MemoryRegionOps bmdma_addr_ioport_ops; diff --git a/include/hw/intc/i8259.h b/include/hw/intc/i8259.h index a0e34dd990..c412575775 100644 --- a/include/hw/intc/i8259.h +++ b/include/hw/intc/i8259.h @@ -4,7 +4,15 @@ /* i8259.c */ extern PICCommonState *isa_pic; -qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq); + +/* + * i8259_init() + * + * Create a i8259 device on an ISA @bus, + * connect its output to @parent_irq_in, + * return an (allocated) array of 16 input IRQs. + */ +qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq_in); qemu_irq *kvm_i8259_init(ISABus *bus); int pic_get_output(PICCommonState *s); int pic_read_irq(PICCommonState *s); diff --git a/include/hw/i386/ioapic.h b/include/hw/intc/ioapic.h similarity index 93% rename from include/hw/i386/ioapic.h rename to include/hw/intc/ioapic.h index ef37b8a9fd..aa122e25e3 100644 --- a/include/hw/i386/ioapic.h +++ b/include/hw/intc/ioapic.h @@ -17,8 +17,8 @@ * License along with this library; if not, see . */ -#ifndef HW_IOAPIC_H -#define HW_IOAPIC_H +#ifndef HW_INTC_IOAPIC_H +#define HW_INTC_IOAPIC_H #define IOAPIC_NUM_PINS 24 #define IO_APIC_DEFAULT_ADDRESS 0xfec00000 @@ -30,4 +30,4 @@ void ioapic_eoi_broadcast(int vector); -#endif /* HW_IOAPIC_H */ +#endif /* HW_INTC_IOAPIC_H */ diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h index 15b8c999f6..fbdef9a7b3 100644 --- a/include/hw/intc/loongarch_extioi.h +++ b/include/hw/intc/loongarch_extioi.h @@ -14,6 +14,8 @@ #define LS3A_INTC_IP 8 #define EXTIOI_IRQS (256) #define EXTIOI_IRQS_BITMAP_SIZE (256 / 8) +/* irq from EXTIOI is routed to no more than 4 cpus */ +#define EXTIOI_CPUS (4) /* map to ipnum per 32 irqs */ #define EXTIOI_IRQS_IPMAP_SIZE (256 / 32) #define EXTIOI_IRQS_COREMAP_SIZE 256 @@ -46,17 +48,17 @@ struct LoongArchExtIOI { uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2]; uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT]; uint32_t isr[EXTIOI_IRQS / 32]; - uint32_t coreisr[LOONGARCH_MAX_VCPUS][EXTIOI_IRQS_GROUP_COUNT]; + uint32_t coreisr[EXTIOI_CPUS][EXTIOI_IRQS_GROUP_COUNT]; uint32_t enable[EXTIOI_IRQS / 32]; uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4]; uint32_t coremap[EXTIOI_IRQS / 4]; uint32_t sw_pending[EXTIOI_IRQS / 32]; - DECLARE_BITMAP(sw_isr[LOONGARCH_MAX_VCPUS][LS3A_INTC_IP], EXTIOI_IRQS); + DECLARE_BITMAP(sw_isr[EXTIOI_CPUS][LS3A_INTC_IP], EXTIOI_IRQS); uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE]; uint8_t sw_coremap[EXTIOI_IRQS]; - qemu_irq parent_irq[LOONGARCH_MAX_VCPUS][LS3A_INTC_IP]; + qemu_irq parent_irq[EXTIOI_CPUS][LS3A_INTC_IP]; qemu_irq irq[EXTIOI_IRQS]; - MemoryRegion extioi_iocsr_mem[LOONGARCH_MAX_VCPUS]; + MemoryRegion extioi_iocsr_mem[EXTIOI_CPUS]; MemoryRegion extioi_system_mem; }; #endif /* LOONGARCH_EXTIOI_H */ diff --git a/include/hw/intc/loongarch_ipi.h b/include/hw/intc/loongarch_ipi.h index 0ee48fca55..664e050b92 100644 --- a/include/hw/intc/loongarch_ipi.h +++ b/include/hw/intc/loongarch_ipi.h @@ -28,9 +28,6 @@ #define MAIL_SEND_OFFSET 0 #define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND) -#define MAX_IPI_CORE_NUM 4 -#define MAX_IPI_MBX_NUM 4 - #define TYPE_LOONGARCH_IPI "loongarch_ipi" OBJECT_DECLARE_SIMPLE_TYPE(LoongArchIPI, LOONGARCH_IPI) @@ -40,14 +37,15 @@ typedef struct IPICore { uint32_t set; uint32_t clear; /* 64bit buf divide into 2 32bit buf */ - uint32_t buf[MAX_IPI_MBX_NUM * 2]; + uint32_t buf[2]; qemu_irq irq; } IPICore; struct LoongArchIPI { SysBusDevice parent_obj; - MemoryRegion ipi_iocsr_mem[MAX_IPI_CORE_NUM]; - MemoryRegion ipi64_iocsr_mem[MAX_IPI_CORE_NUM]; + MemoryRegion ipi_iocsr_mem; + MemoryRegion ipi64_iocsr_mem; + IPICore ipi_core; }; #endif diff --git a/include/hw/intc/mips_gic.h b/include/hw/intc/mips_gic.h index eeb136e261..5e4c71edd4 100644 --- a/include/hw/intc/mips_gic.h +++ b/include/hw/intc/mips_gic.h @@ -211,8 +211,8 @@ struct MIPSGICState { /* GIC VP Timer */ MIPSGICTimerState *gic_timer; - int32_t num_vps; - int32_t num_irq; + uint32_t num_vps; + uint32_t num_irq; }; #endif /* MIPS_GIC_H */ diff --git a/include/hw/isa/i8259_internal.h b/include/hw/isa/i8259_internal.h index d272d879fb..f9dcc4163e 100644 --- a/include/hw/isa/i8259_internal.h +++ b/include/hw/isa/i8259_internal.h @@ -35,7 +35,7 @@ OBJECT_DECLARE_TYPE(PICCommonState, PICCommonClass, PIC_COMMON) struct PICCommonClass { - ISADeviceClass parent_class; + DeviceClass parent_class; void (*pre_save)(PICCommonState *s); void (*post_load)(PICCommonState *s); @@ -61,6 +61,7 @@ struct PICCommonState { uint8_t single_mode; /* true if slave pic is not initialized */ uint8_t elcr; /* PIIX edge/trigger selection*/ uint8_t elcr_mask; + uint8_t ltim; /* Edge/Level Bank Select (pre-PIIX, chip-wide) */ qemu_irq int_out[1]; uint32_t master; /* reflects /SP input pin */ uint32_t iobase; diff --git a/include/hw/isa/isa.h b/include/hw/isa/isa.h index 25acd5c34c..40d6224a4e 100644 --- a/include/hw/isa/isa.h +++ b/include/hw/isa/isa.h @@ -11,7 +11,7 @@ #define ISA_NUM_IRQS 16 #define TYPE_ISA_DEVICE "isa-device" -OBJECT_DECLARE_TYPE(ISADevice, ISADeviceClass, ISA_DEVICE) +OBJECT_DECLARE_SIMPLE_TYPE(ISADevice, ISA_DEVICE) #define TYPE_ISA_BUS "ISA" OBJECT_DECLARE_SIMPLE_TYPE(ISABus, ISA_BUS) @@ -48,10 +48,6 @@ struct IsaDmaClass { void *opaque); }; -struct ISADeviceClass { - DeviceClass parent_class; -}; - struct ISABus { /*< private >*/ BusState parent_obj; @@ -59,7 +55,7 @@ struct ISABus { MemoryRegion *address_space; MemoryRegion *address_space_io; - qemu_irq *irqs; + qemu_irq *irqs_in; IsaDma *dma[2]; }; @@ -73,13 +69,17 @@ struct ISADevice { ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space, MemoryRegion *address_space_io, Error **errp); -void isa_bus_irqs(ISABus *bus, qemu_irq *irqs); -qemu_irq isa_get_irq(ISADevice *dev, unsigned isairq); -void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, unsigned isairq); +void isa_bus_register_input_irqs(ISABus *bus, qemu_irq *irqs_in); void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16); -IsaDma *isa_get_dma(ISABus *bus, int nchan); -MemoryRegion *isa_address_space(ISADevice *dev); -MemoryRegion *isa_address_space_io(ISADevice *dev); +IsaDma *isa_bus_get_dma(ISABus *bus, int nchan); +/** + * isa_bus_get_irq: Return input IRQ on ISA bus. + * @bus: the #ISABus to plug ISA devices on. + * @irqnum: the ISA IRQ number. + * + * Return IRQ @irqnum from the PIC associated on ISA @bus. + */ +qemu_irq isa_bus_get_irq(ISABus *bus, unsigned irqnum); ISADevice *isa_new(const char *name); ISADevice *isa_try_new(const char *name); bool isa_realize_and_unref(ISADevice *dev, ISABus *bus, Error **errp); @@ -87,6 +87,12 @@ ISADevice *isa_create_simple(ISABus *bus, const char *name); ISADevice *isa_vga_init(ISABus *bus); +qemu_irq isa_get_irq(ISADevice *dev, unsigned isairq); +void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, unsigned isairq); +MemoryRegion *isa_address_space(ISADevice *dev); +MemoryRegion *isa_address_space_io(ISADevice *dev); +ISABus *isa_bus_from_device(ISADevice *dev); + /** * isa_register_ioport: Install an I/O port region on the ISA bus. * @@ -123,9 +129,4 @@ int isa_register_portio_list(ISADevice *dev, const MemoryRegionPortio *portio, void *opaque, const char *name); -static inline ISABus *isa_bus_from_device(ISADevice *d) -{ - return ISA_BUS(qdev_get_parent_bus(DEVICE(d))); -} - #endif diff --git a/include/hw/isa/superio.h b/include/hw/isa/superio.h index b9f5c19155..0dc45104d4 100644 --- a/include/hw/isa/superio.h +++ b/include/hw/isa/superio.h @@ -44,7 +44,7 @@ typedef struct ISASuperIOFuncs { struct ISASuperIOClass { /*< private >*/ - ISADeviceClass parent_class; + DeviceClass parent_class; /*< public >*/ DeviceRealize parent_realize; diff --git a/include/hw/isa/vt82c686.h b/include/hw/isa/vt82c686.h index e273cd38dc..da1722daf2 100644 --- a/include/hw/isa/vt82c686.h +++ b/include/hw/isa/vt82c686.h @@ -1,6 +1,8 @@ #ifndef HW_VT82C686_H #define HW_VT82C686_H +#include "hw/pci/pci_device.h" +#include "audio/audio.h" #define TYPE_VT82C686B_ISA "vt82c686b-isa" #define TYPE_VT82C686B_USB_UHCI "vt82c686b-usb-uhci" @@ -9,6 +11,29 @@ #define TYPE_VIA_IDE "via-ide" #define TYPE_VIA_MC97 "via-mc97" +typedef struct { + uint8_t stat; + uint8_t type; + uint32_t base; + uint32_t curr; + uint32_t addr; + uint32_t clen; +} ViaAC97SGDChannel; + +OBJECT_DECLARE_SIMPLE_TYPE(ViaAC97State, VIA_AC97); + +struct ViaAC97State { + PCIDevice dev; + QEMUSoundCard card; + MemoryRegion sgd; + MemoryRegion fm; + MemoryRegion midi; + SWVoiceOut *vo; + ViaAC97SGDChannel aur; + uint16_t codec_regs[128]; + uint32_t ac97_cmd; +}; + void via_isa_set_irq(PCIDevice *d, int n, int level); #endif diff --git a/include/hw/loader.h b/include/hw/loader.h index 1384796a4b..c4c14170ea 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -86,6 +86,25 @@ ssize_t load_image_gzipped_buffer(const char *filename, uint64_t max_sz, uint8_t **buffer); ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz); +/** + * unpack_efi_zboot_image: + * @buffer: pointer to a variable holding the address of a buffer containing the + * image + * @size: pointer to a variable holding the size of the buffer + * + * Check whether the buffer contains a EFI zboot image, and if it does, extract + * the compressed payload and decompress it into a new buffer. If successful, + * the old buffer is freed, and the *buffer and size variables pointed to by the + * function arguments are updated to refer to the newly populated buffer. + * + * Returns 0 if the image could not be identified as a EFI zboot image. + * Returns -1 if the buffer contents were identified as a EFI zboot image, but + * unpacking failed for any reason. + * Returns the size of the decompressed payload if decompression was performed + * successfully. + */ +ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size); + #define ELF_LOAD_FAILED -1 #define ELF_LOAD_NOT_ELF -2 #define ELF_LOAD_WRONG_ARCH -3 diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h index f5f818894e..f1659655c6 100644 --- a/include/hw/loongarch/virt.h +++ b/include/hw/loongarch/virt.h @@ -14,7 +14,7 @@ #include "hw/intc/loongarch_ipi.h" #include "hw/block/flash.h" -#define LOONGARCH_MAX_VCPUS 4 +#define LOONGARCH_MAX_CPUS 256 #define VIRT_ISA_IO_BASE 0x18000000UL #define VIRT_ISA_IO_SIZE 0x0004000 @@ -36,7 +36,6 @@ struct LoongArchMachineState { /*< private >*/ MachineState parent_obj; - IPICore ipi_core[MAX_IPI_CORE_NUM]; MemoryRegion lowmem; MemoryRegion highmem; MemoryRegion isa_io; @@ -45,6 +44,7 @@ struct LoongArchMachineState { /* State for other subsystems/APIs: */ FWCfgState *fw_cfg; Notifier machine_done; + Notifier powerdown_notifier; OnOffAuto acpi; char *oem_id; char *oem_table_id; diff --git a/include/hw/misc/bcm2835_property.h b/include/hw/misc/bcm2835_property.h index 712b76b7a3..ba8896610c 100644 --- a/include/hw/misc/bcm2835_property.h +++ b/include/hw/misc/bcm2835_property.h @@ -30,6 +30,7 @@ struct BCM2835PropertyState { MACAddr macaddr; uint32_t board_rev; uint32_t addr; + char *command_line; bool pending; }; diff --git a/include/hw/misc/lasi.h b/include/hw/misc/lasi.h index ecc7065ce8..0a8c7352be 100644 --- a/include/hw/misc/lasi.h +++ b/include/hw/misc/lasi.h @@ -69,8 +69,7 @@ struct LasiState { uint32_t errlog; uint32_t amr; - uint32_t rtc; - time_t rtc_ref; + uint32_t rtc_ref; MemoryRegion this_mem; }; diff --git a/include/hw/misc/mips_cmgcr.h b/include/hw/misc/mips_cmgcr.h index 9fa58942d7..db4bf5f449 100644 --- a/include/hw/misc/mips_cmgcr.h +++ b/include/hw/misc/mips_cmgcr.h @@ -75,7 +75,7 @@ struct MIPSGCRState { SysBusDevice parent_obj; int32_t gcr_rev; - int32_t num_vps; + uint32_t num_vps; hwaddr gcr_base; MemoryRegion iomem; MemoryRegion *cpc_mr; diff --git a/include/hw/misc/mips_itu.h b/include/hw/misc/mips_itu.h index 50d961106d..35218b2d14 100644 --- a/include/hw/misc/mips_itu.h +++ b/include/hw/misc/mips_itu.h @@ -57,8 +57,8 @@ struct MIPSITUState { SysBusDevice parent_obj; /*< public >*/ - int32_t num_fifo; - int32_t num_semaphores; + uint32_t num_fifo; + uint32_t num_semaphores; /* ITC Storage */ ITCStorageCell *cell; @@ -72,9 +72,8 @@ struct MIPSITUState { uint64_t icr0; /* SAAR */ - bool saar_present; - void *saar; - + uint64_t *saar; + MIPSCPU *cpu0; }; /* Get ITC Configuration Tag memory region. */ diff --git a/include/hw/net/imx_fec.h b/include/hw/net/imx_fec.h index e3a8755db9..2d13290c78 100644 --- a/include/hw/net/imx_fec.h +++ b/include/hw/net/imx_fec.h @@ -270,6 +270,8 @@ struct IMXFECState { uint32_t phy_int; uint32_t phy_int_mask; uint32_t phy_num; + bool phy_connected; + struct IMXFECState *phy_consumer; bool is_fec; diff --git a/include/hw/net/mii.h b/include/hw/net/mii.h index 4ae4dcce7e..ed1bb52b0f 100644 --- a/include/hw/net/mii.h +++ b/include/hw/net/mii.h @@ -55,6 +55,7 @@ #define MII_BMCR_CTST (1 << 7) /* Collision test */ #define MII_BMCR_SPEED1000 (1 << 6) /* MSB of Speed (1000) */ +#define MII_BMSR_100T4 (1 << 15) /* Can do 100mbps T4 */ #define MII_BMSR_100TX_FD (1 << 14) /* Can do 100mbps, full-duplex */ #define MII_BMSR_100TX_HD (1 << 13) /* Can do 100mbps, half-duplex */ #define MII_BMSR_10T_FD (1 << 12) /* Can do 10mbps, full-duplex */ @@ -81,20 +82,31 @@ #define MII_ANLPAR_ACK (1 << 14) #define MII_ANLPAR_PAUSEASY (1 << 11) /* can pause asymmetrically */ #define MII_ANLPAR_PAUSE (1 << 10) /* can pause */ +#define MII_ANLPAR_T4 (1 << 9) #define MII_ANLPAR_TXFD (1 << 8) #define MII_ANLPAR_TX (1 << 7) #define MII_ANLPAR_10FD (1 << 6) #define MII_ANLPAR_10 (1 << 5) #define MII_ANLPAR_CSMACD (1 << 0) -#define MII_ANER_NWAY (1 << 0) /* Can do N-way auto-nego */ +#define MII_ANER_NP (1 << 2) /* Next Page Able */ +#define MII_ANER_NWAY (1 << 0) /* Can do N-way auto-nego */ +#define MII_ANNP_MP (1 << 13) /* Message Page */ + +#define MII_CTRL1000_MASTER (1 << 11) /* MASTER-SLAVE Manual Configuration Value */ +#define MII_CTRL1000_PORT (1 << 10) /* T2_Repeater/DTE bit */ #define MII_CTRL1000_FULL (1 << 9) /* 1000BASE-T full duplex */ #define MII_CTRL1000_HALF (1 << 8) /* 1000BASE-T half duplex */ +#define MII_STAT1000_LOK (1 << 13) /* Local Receiver Status */ +#define MII_STAT1000_ROK (1 << 12) /* Remote Receiver Status */ #define MII_STAT1000_FULL (1 << 11) /* 1000BASE-T full duplex */ #define MII_STAT1000_HALF (1 << 10) /* 1000BASE-T half duplex */ +#define MII_EXTSTAT_1000T_FD (1 << 13) /* 1000BASE-T Full Duplex */ +#define MII_EXTSTAT_1000T_HD (1 << 12) /* 1000BASE-T Half Duplex */ + /* List of vendor identifiers */ /* RealTek 8201 */ #define RTL8201CP_PHYID1 0x0000 diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h index 990dcdbb2e..c1f81a5f13 100644 --- a/include/hw/nvram/fw_cfg.h +++ b/include/hw/nvram/fw_cfg.h @@ -117,37 +117,6 @@ struct FWCfgMemState { */ void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len); -/** - * fw_cfg_add_bytes_callback: - * @s: fw_cfg device being modified - * @key: selector key value for new fw_cfg item - * @select_cb: callback function when selecting - * @write_cb: callback function after a write - * @callback_opaque: argument to be passed into callback function - * @data: pointer to start of item data - * @len: size of item data - * @read_only: is file read only - * - * Add a new fw_cfg item, available by selecting the given key, as a raw - * "blob" of the given size. The data referenced by the starting pointer - * is only linked, NOT copied, into the data structure of the fw_cfg device. - */ -void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key, - FWCfgCallback select_cb, - FWCfgWriteCallback write_cb, - void *callback_opaque, - void *data, size_t len, - bool read_only); - -/** - * fw_cfg_read_bytes_ptr: - * @s: fw_cfg device being modified - * @key: selector key value for new fw_cfg item - * - * Reads an existing fw_cfg data pointer. - */ -void *fw_cfg_read_bytes_ptr(FWCfgState *s, uint16_t key); - /** * fw_cfg_add_string: * @s: fw_cfg device being modified diff --git a/include/hw/pci-host/ls7a.h b/include/hw/pci-host/ls7a.h index ff4b979912..e753449593 100644 --- a/include/hw/pci-host/ls7a.h +++ b/include/hw/pci-host/ls7a.h @@ -26,24 +26,25 @@ #define VIRT_PCH_MSI_ADDR_LOW 0x2FF00000UL /* - * According to the kernel pch irq start from 64 offset - * 0 ~ 16 irqs used for non-pci device while 16 ~ 64 irqs - * used for pci device. + * GSI_BASE is hard-coded with 64 in linux kernel, else kernel fails to boot + * 0 - 15 GSI for ISA devices even if there is no ISA devices + * 16 - 63 GSI for CPU devices such as timers/perf monitor etc + * 64 - GSI for external devices */ #define VIRT_PCH_PIC_IRQ_NUM 32 -#define PCH_PIC_IRQ_OFFSET 64 +#define VIRT_GSI_BASE 64 #define VIRT_DEVICE_IRQS 16 -#define VIRT_UART_IRQ (PCH_PIC_IRQ_OFFSET + 2) +#define VIRT_UART_IRQ (VIRT_GSI_BASE + 2) #define VIRT_UART_BASE 0x1fe001e0 #define VIRT_UART_SIZE 0X100 -#define VIRT_RTC_IRQ (PCH_PIC_IRQ_OFFSET + 3) +#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 3) #define VIRT_MISC_REG_BASE (VIRT_PCH_REG_BASE + 0x00080000) #define VIRT_RTC_REG_BASE (VIRT_MISC_REG_BASE + 0x00050100) #define VIRT_RTC_LEN 0x100 -#define VIRT_SCI_IRQ (PCH_PIC_IRQ_OFFSET + 4) +#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 4) #define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000 #define VIRT_PLATFORM_BUS_SIZE 0x2000000 #define VIRT_PLATFORM_BUS_NUM_IRQS 2 -#define VIRT_PLATFORM_BUS_IRQ 69 +#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 5) #endif diff --git a/include/hw/pci-host/pam.h b/include/hw/pci-host/pam.h index c1fd06ba2a..005916f826 100644 --- a/include/hw/pci-host/pam.h +++ b/include/hw/pci-host/pam.h @@ -87,8 +87,9 @@ typedef struct PAMMemoryRegion { unsigned current; } PAMMemoryRegion; -void init_pam(DeviceState *dev, MemoryRegion *ram, MemoryRegion *system, - MemoryRegion *pci, PAMMemoryRegion *mem, uint32_t start, uint32_t size); +void init_pam(PAMMemoryRegion *mem, Object *owner, MemoryRegion *ram, + MemoryRegion *system, MemoryRegion *pci, + uint32_t start, uint32_t size); void pam_update(PAMMemoryRegion *mem, int idx, uint8_t val); #endif /* QEMU_PAM_H */ diff --git a/include/hw/pci-host/pnv_phb4.h b/include/hw/pci-host/pnv_phb4.h index 28d61b96c7..2d026db9a3 100644 --- a/include/hw/pci-host/pnv_phb4.h +++ b/include/hw/pci-host/pnv_phb4.h @@ -157,6 +157,7 @@ struct PnvPHB4 { void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon); int pnv_phb4_pec_get_phb_id(PnvPhb4PecState *pec, int stack_index); +PnvPhb4PecState *pnv_pec_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp); void pnv_phb4_bus_init(DeviceState *dev, PnvPHB4 *phb); extern const MemoryRegionOps pnv_phb4_xscom_ops; @@ -185,6 +186,8 @@ struct PnvPhb4PecState { /* PHBs */ uint32_t num_phbs; +#define MAX_PHBS_PER_PEC 3 + PnvPHB *phbs[MAX_PHBS_PER_PEC]; PnvChip *chip; }; diff --git a/include/hw/pci/msi.h b/include/hw/pci/msi.h index ee8ee469a6..abcfd13925 100644 --- a/include/hw/pci/msi.h +++ b/include/hw/pci/msi.h @@ -33,6 +33,7 @@ extern bool msi_nonbroken; void msi_set_message(PCIDevice *dev, MSIMessage msg); MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector); bool msi_enabled(const PCIDevice *dev); +void msi_set_enabled(PCIDevice *dev); int msi_init(struct PCIDevice *dev, uint8_t offset, unsigned int nr_vectors, bool msi64bit, bool msi_per_vector_mask, Error **errp); diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index d5a40cd058..e6d0574a29 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -207,6 +207,8 @@ enum { QEMU_PCIE_EXTCAP_INIT = (1 << QEMU_PCIE_EXTCAP_INIT_BITNR), #define QEMU_PCIE_CXL_BITNR 10 QEMU_PCIE_CAP_CXL = (1 << QEMU_PCIE_CXL_BITNR), +#define QEMU_PCIE_ERR_UNC_MASK_BITNR 11 + QEMU_PCIE_ERR_UNC_MASK = (1 << QEMU_PCIE_ERR_UNC_MASK_BITNR), }; typedef struct PCIINTxRoute { @@ -287,6 +289,9 @@ void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq); void pci_bus_irqs_cleanup(PCIBus *bus); int pci_bus_get_irq_level(PCIBus *bus, int irq_num); +uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus); +void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask); +void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask); /* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */ static inline int pci_swizzle(int slot, int pin) { diff --git a/include/hw/pci/pci_bridge.h b/include/hw/pci/pci_bridge.h index 63a7521567..ea54a81a15 100644 --- a/include/hw/pci/pci_bridge.h +++ b/include/hw/pci/pci_bridge.h @@ -73,7 +73,7 @@ struct PCIBridge { MemoryRegion address_space_mem; MemoryRegion address_space_io; - PCIBridgeWindows *windows; + PCIBridgeWindows windows; pci_map_irq_fn map_irq; const char *bus_name; @@ -84,7 +84,7 @@ struct PCIBridge { #define PCI_BRIDGE_DEV_PROP_SHPC "shpc" typedef struct CXLHost CXLHost; -struct PXBDev { +typedef struct PXBDev { /*< private >*/ PCIDevice parent_obj; /*< public >*/ @@ -92,14 +92,27 @@ struct PXBDev { uint8_t bus_nr; uint16_t numa_node; bool bypass_iommu; - struct cxl_dev { - CXLHost *cxl_host_bridge; /* Pointer to a CXLHost */ - } cxl; -}; +} PXBDev; -#define TYPE_PXB_CXL_DEVICE "pxb-cxl" -DECLARE_INSTANCE_CHECKER(PXBDev, PXB_CXL_DEV, - TYPE_PXB_CXL_DEVICE) +typedef struct PXBPCIEDev { + /*< private >*/ + PXBDev parent_obj; +} PXBPCIEDev; + +#define TYPE_PXB_DEV "pxb" +OBJECT_DECLARE_SIMPLE_TYPE(PXBDev, PXB_DEV) + +typedef struct PXBCXLDev { + /*< private >*/ + PXBPCIEDev parent_obj; + /*< public >*/ + + bool hdm_for_passthrough; + CXLHost *cxl_host_bridge; /* Pointer to a CXLHost */ +} PXBCXLDev; + +#define TYPE_PXB_CXL_DEV "pxb-cxl" +OBJECT_DECLARE_SIMPLE_TYPE(PXBCXLDev, PXB_CXL_DEV) int pci_bridge_ssvid_init(PCIDevice *dev, uint8_t offset, uint16_t svid, uint16_t ssid, @@ -136,11 +149,11 @@ void pci_bridge_map_irq(PCIBridge *br, const char* bus_name, pci_map_irq_fn map_irq); /* TODO: add this define to pci_regs.h in linux and then in qemu. */ -#define PCI_BRIDGE_CTL_VGA_16BIT 0x10 /* VGA 16-bit decode */ -#define PCI_BRIDGE_CTL_DISCARD 0x100 /* Primary discard timer */ -#define PCI_BRIDGE_CTL_SEC_DISCARD 0x200 /* Secondary discard timer */ -#define PCI_BRIDGE_CTL_DISCARD_STATUS 0x400 /* Discard timer status */ -#define PCI_BRIDGE_CTL_DISCARD_SERR 0x800 /* Discard timer SERR# enable */ +#define PCI_BRIDGE_CTL_VGA_16BIT 0x10 /* VGA 16-bit decode */ +#define PCI_BRIDGE_CTL_DISCARD 0x100 /* Primary discard timer */ +#define PCI_BRIDGE_CTL_SEC_DISCARD 0x200 /* Secondary discard timer */ +#define PCI_BRIDGE_CTL_DISCARD_STATUS 0x400 /* Discard timer status */ +#define PCI_BRIDGE_CTL_DISCARD_SERR 0x800 /* Discard timer SERR# enable */ typedef struct PCIBridgeQemuCap { uint8_t id; /* Standard PCI capability header field */ diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h index 798a262a0a..3cc2b15957 100644 --- a/include/hw/pci/pcie.h +++ b/include/hw/pci/pcie.h @@ -27,14 +27,6 @@ #include "hw/pci/pcie_sriov.h" #include "hw/hotplug.h" -typedef enum { - /* for attention and power indicator */ - PCI_EXP_HP_IND_RESERVED = PCI_EXP_SLTCTL_IND_RESERVED, - PCI_EXP_HP_IND_ON = PCI_EXP_SLTCTL_IND_ON, - PCI_EXP_HP_IND_BLINK = PCI_EXP_SLTCTL_IND_BLINK, - PCI_EXP_HP_IND_OFF = PCI_EXP_SLTCTL_IND_OFF, -} PCIExpressIndicator; - typedef enum { /* these bits must match the bits in Slot Control/Status registers. * PCI_EXP_HP_EV_xxx = PCI_EXP_SLTCTL_xxxE = PCI_EXP_SLTSTA_xxx diff --git a/include/hw/pci/pcie_aer.h b/include/hw/pci/pcie_aer.h index 65e71d98fe..1234fdc4e2 100644 --- a/include/hw/pci/pcie_aer.h +++ b/include/hw/pci/pcie_aer.h @@ -100,4 +100,5 @@ void pcie_aer_root_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len, uint32_t root_cmd_prev); +int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err); #endif /* QEMU_PCIE_AER_H */ diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h index 6c40e3733f..90e6cf45b8 100644 --- a/include/hw/pci/pcie_port.h +++ b/include/hw/pci/pcie_port.h @@ -41,6 +41,8 @@ struct PCIEPort { void pcie_port_init_reg(PCIDevice *d); PCIDevice *pcie_find_port_by_pn(PCIBus *bus, uint8_t pn); +PCIDevice *pcie_find_port_first(PCIBus *bus); +int pcie_count_ds_ports(PCIBus *bus); #define TYPE_PCIE_SLOT "pcie-slot" OBJECT_DECLARE_SIMPLE_TYPE(PCIESlot, PCIE_SLOT) diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h index 963dc2e170..4972106c42 100644 --- a/include/hw/pci/pcie_regs.h +++ b/include/hw/pci/pcie_regs.h @@ -66,20 +66,6 @@ typedef enum PCIExpLinkWidth { #define PCI_EXP_SLTCAP_PSN_SHIFT ctz32(PCI_EXP_SLTCAP_PSN) -#define PCI_EXP_SLTCTL_IND_RESERVED 0x0 -#define PCI_EXP_SLTCTL_IND_ON 0x1 -#define PCI_EXP_SLTCTL_IND_BLINK 0x2 -#define PCI_EXP_SLTCTL_IND_OFF 0x3 -#define PCI_EXP_SLTCTL_AIC_SHIFT ctz32(PCI_EXP_SLTCTL_AIC) -#define PCI_EXP_SLTCTL_AIC_OFF \ - (PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_AIC_SHIFT) - -#define PCI_EXP_SLTCTL_PIC_SHIFT ctz32(PCI_EXP_SLTCTL_PIC) -#define PCI_EXP_SLTCTL_PIC_OFF \ - (PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_PIC_SHIFT) -#define PCI_EXP_SLTCTL_PIC_ON \ - (PCI_EXP_SLTCTL_IND_ON << PCI_EXP_SLTCTL_PIC_SHIFT) - #define PCI_EXP_SLTCTL_SUPPORTED \ (PCI_EXP_SLTCTL_ABPE | \ PCI_EXP_SLTCTL_PDCE | \ @@ -155,6 +141,9 @@ typedef enum PCIExpLinkWidth { PCI_ERR_UNC_ATOP_EBLOCKED | \ PCI_ERR_UNC_TLP_PRF_BLOCKED) +#define PCI_ERR_UNC_MASK_DEFAULT (PCI_ERR_UNC_INTN | \ + PCI_ERR_UNC_TLP_PRF_BLOCKED) + #define PCI_ERR_UNC_SEVERITY_DEFAULT (PCI_ERR_UNC_DLP | \ PCI_ERR_UNC_SDN | \ PCI_ERR_UNC_FCP | \ diff --git a/include/hw/pci/pcie_sriov.h b/include/hw/pci/pcie_sriov.h index 96cc743309..095fb0c9ed 100644 --- a/include/hw/pci/pcie_sriov.h +++ b/include/hw/pci/pcie_sriov.h @@ -76,4 +76,7 @@ PCIDevice *pcie_sriov_get_pf(PCIDevice *dev); */ PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n); +/* Returns the current number of virtual functions. */ +uint16_t pcie_sriov_num_vfs(PCIDevice *dev); + #endif /* QEMU_PCIE_SRIOV_H */ diff --git a/include/hw/pcmcia.h b/include/hw/pcmcia.h index e3ba44e0bf..ab26802751 100644 --- a/include/hw/pcmcia.h +++ b/include/hw/pcmcia.h @@ -43,22 +43,22 @@ struct PCMCIACardClass { void (*io_write)(PCMCIACardState *card, uint32_t address, uint16_t value); }; -#define CISTPL_DEVICE 0x01 /* 5V Device Information Tuple */ -#define CISTPL_NO_LINK 0x14 /* No Link Tuple */ -#define CISTPL_VERS_1 0x15 /* Level 1 Version Tuple */ -#define CISTPL_JEDEC_C 0x18 /* JEDEC ID Tuple */ -#define CISTPL_JEDEC_A 0x19 /* JEDEC ID Tuple */ -#define CISTPL_CONFIG 0x1a /* Configuration Tuple */ -#define CISTPL_CFTABLE_ENTRY 0x1b /* 16-bit PCCard Configuration */ -#define CISTPL_DEVICE_OC 0x1c /* Additional Device Information */ -#define CISTPL_DEVICE_OA 0x1d /* Additional Device Information */ -#define CISTPL_DEVICE_GEO 0x1e /* Additional Device Information */ -#define CISTPL_DEVICE_GEO_A 0x1f /* Additional Device Information */ -#define CISTPL_MANFID 0x20 /* Manufacture ID Tuple */ -#define CISTPL_FUNCID 0x21 /* Function ID Tuple */ -#define CISTPL_FUNCE 0x22 /* Function Extension Tuple */ -#define CISTPL_END 0xff /* Tuple End */ -#define CISTPL_ENDMARK 0xff +#define CISTPL_DEVICE 0x01 /* 5V Device Information Tuple */ +#define CISTPL_NO_LINK 0x14 /* No Link Tuple */ +#define CISTPL_VERS_1 0x15 /* Level 1 Version Tuple */ +#define CISTPL_JEDEC_C 0x18 /* JEDEC ID Tuple */ +#define CISTPL_JEDEC_A 0x19 /* JEDEC ID Tuple */ +#define CISTPL_CONFIG 0x1a /* Configuration Tuple */ +#define CISTPL_CFTABLE_ENTRY 0x1b /* 16-bit PCCard Configuration */ +#define CISTPL_DEVICE_OC 0x1c /* Additional Device Information */ +#define CISTPL_DEVICE_OA 0x1d /* Additional Device Information */ +#define CISTPL_DEVICE_GEO 0x1e /* Additional Device Information */ +#define CISTPL_DEVICE_GEO_A 0x1f /* Additional Device Information */ +#define CISTPL_MANFID 0x20 /* Manufacture ID Tuple */ +#define CISTPL_FUNCID 0x21 /* Function ID Tuple */ +#define CISTPL_FUNCE 0x22 /* Function Extension Tuple */ +#define CISTPL_END 0xff /* Tuple End */ +#define CISTPL_ENDMARK 0xff /* dscm1xxxx.c */ PCMCIACardState *dscm1xxxx_init(DriveInfo *bdrv); diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h index 96fb850419..7e5fef7c43 100644 --- a/include/hw/ppc/pnv.h +++ b/include/hw/ppc/pnv.h @@ -48,7 +48,7 @@ DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER8, DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER8NVL, TYPE_PNV_CHIP_POWER8NVL) -#define TYPE_PNV_CHIP_POWER9 PNV_CHIP_TYPE_NAME("power9_v2.0") +#define TYPE_PNV_CHIP_POWER9 PNV_CHIP_TYPE_NAME("power9_v2.2") DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER9, TYPE_PNV_CHIP_POWER9) @@ -100,7 +100,7 @@ struct PnvMachineState { }; PnvChip *pnv_get_chip(PnvMachineState *pnv, uint32_t chip_id); -Object *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp); +PnvChip *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb); #define PNV_FDT_ADDR 0x01000000 #define PNV_TIMEBASE_FREQ 512000000ULL diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 5c8aabd444..bd5a6c4780 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -78,8 +78,10 @@ typedef enum { #define SPAPR_CAP_FWNMI 0x0A /* Support H_RPT_INVALIDATE */ #define SPAPR_CAP_RPT_INVALIDATE 0x0B +/* Support for AIL modes */ +#define SPAPR_CAP_AIL_MODE_3 0x0C /* Num Caps */ -#define SPAPR_CAP_NUM (SPAPR_CAP_RPT_INVALIDATE + 1) +#define SPAPR_CAP_NUM (SPAPR_CAP_AIL_MODE_3 + 1) /* * Capability Values diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index 35fddb19a6..f1070d6dc7 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -1,6 +1,7 @@ #ifndef QDEV_CORE_H #define QDEV_CORE_H +#include "qemu/atomic.h" #include "qemu/queue.h" #include "qemu/bitmap.h" #include "qemu/rcu.h" @@ -162,11 +163,12 @@ struct NamedClockList { QLIST_ENTRY(NamedClockList) node; }; +typedef struct { + bool engaged_in_io; +} MemReentrancyGuard; + /** * DeviceState: - * @realized: Indicates whether the device has been fully constructed. - * When accessed outside big qemu lock, must be accessed with - * qatomic_load_acquire() * @reset: ResettableState for the device; handled by Resettable interface. * * This structure should not be accessed directly. We declare it here @@ -194,6 +196,9 @@ struct DeviceState { int alias_required_for_version; ResettableState reset; GSList *unplug_blockers; + + /* Is the device currently in mmio/pio/dma? Used to prevent re-entrancy */ + MemReentrancyGuard mem_reentrancy_guard; }; struct DeviceListener { @@ -332,6 +337,19 @@ DeviceState *qdev_new(const char *name); */ DeviceState *qdev_try_new(const char *name); +/** + * qdev_is_realized: + * @dev: The device to check. + * + * May be called outside big qemu lock. + * + * Returns: %true% if the device has been fully constructed, %false% otherwise. + */ +static inline bool qdev_is_realized(DeviceState *dev) +{ + return qatomic_load_acquire(&dev->realized); +} + /** * qdev_realize: Realize @dev. * @dev: device to realize @@ -715,7 +733,7 @@ static inline void qdev_init_gpio_in_named(DeviceState *dev, void qdev_pass_gpios(DeviceState *dev, DeviceState *container, const char *name); -BusState *qdev_get_parent_bus(DeviceState *dev); +BusState *qdev_get_parent_bus(const DeviceState *dev); /*** BUS API. ***/ @@ -812,7 +830,18 @@ void qbus_set_bus_hotplug_handler(BusState *bus); static inline bool qbus_is_hotpluggable(BusState *bus) { - return bus->hotplug_handler; + HotplugHandler *plug_handler = bus->hotplug_handler; + bool ret = !!plug_handler; + + if (plug_handler) { + HotplugHandlerClass *hdc; + + hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler); + if (hdc->is_hotpluggable_bus) { + ret = hdc->is_hotpluggable_bus(plug_handler, bus); + } + } + return ret; } /** diff --git a/include/hw/riscv/sifive_u.h b/include/hw/riscv/sifive_u.h index 65af306963..0696f85942 100644 --- a/include/hw/riscv/sifive_u.h +++ b/include/hw/riscv/sifive_u.h @@ -68,6 +68,7 @@ typedef struct SiFiveUState { /*< public >*/ SiFiveUSoCState soc; + int fdt_size; bool start_in_flash; uint32_t msel; diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h index b3d26135c0..e5c474b26e 100644 --- a/include/hw/riscv/virt.h +++ b/include/hw/riscv/virt.h @@ -56,6 +56,10 @@ struct RISCVVirtState { bool have_aclint; RISCVVirtAIAType aia_type; int aia_guests; + char *oem_id; + char *oem_table_id; + OnOffAuto acpi; + const MemMapEntry *memmap; }; enum { @@ -121,4 +125,6 @@ enum { #define FDT_APLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \ 1 + FDT_APLIC_INT_CELLS) +bool virt_is_acpi_enabled(RISCVVirtState *s); +void virt_acpi_setup(RISCVVirtState *vms); #endif diff --git a/include/hw/rtc/mc146818rtc.h b/include/hw/rtc/mc146818rtc.h index 45bcd6f040..97cec0b3e8 100644 --- a/include/hw/rtc/mc146818rtc.h +++ b/include/hw/rtc/mc146818rtc.h @@ -16,9 +16,9 @@ #include "qom/object.h" #define TYPE_MC146818_RTC "mc146818rtc" -OBJECT_DECLARE_SIMPLE_TYPE(RTCState, MC146818_RTC) +OBJECT_DECLARE_SIMPLE_TYPE(MC146818RtcState, MC146818_RTC) -struct RTCState { +struct MC146818RtcState { ISADevice parent_obj; MemoryRegion io; @@ -46,15 +46,15 @@ struct RTCState { Notifier clock_reset_notifier; LostTickPolicy lost_tick_policy; Notifier suspend_notifier; - QLIST_ENTRY(RTCState) link; + QLIST_ENTRY(MC146818RtcState) link; }; #define RTC_ISA_IRQ 8 -ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, - qemu_irq intercept_irq); -void rtc_set_memory(ISADevice *dev, int addr, int val); -int rtc_get_memory(ISADevice *dev, int addr); +MC146818RtcState *mc146818_rtc_init(ISABus *bus, int base_year, + qemu_irq intercept_irq); +void mc146818rtc_set_cmos_data(MC146818RtcState *s, int addr, int val); +int mc146818rtc_get_cmos_data(MC146818RtcState *s, int addr); void qmp_rtc_reset_reinjection(Error **errp); #endif /* HW_RTC_MC146818RTC_H */ diff --git a/include/hw/s390x/pv.h b/include/hw/s390x/pv.h index 966306a9db..7b935e2246 100644 --- a/include/hw/s390x/pv.h +++ b/include/hw/s390x/pv.h @@ -14,10 +14,10 @@ #include "qapi/error.h" #include "sysemu/kvm.h" +#include "hw/s390x/s390-virtio-ccw.h" #ifdef CONFIG_KVM #include "cpu.h" -#include "hw/s390x/s390-virtio-ccw.h" static inline bool s390_is_pv(void) { @@ -41,7 +41,7 @@ static inline bool s390_is_pv(void) int s390_pv_query_info(void); int s390_pv_vm_enable(void); void s390_pv_vm_disable(void); -bool s390_pv_vm_try_disable_async(void); +bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms); int s390_pv_set_sec_parms(uint64_t origin, uint64_t length); int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak); void s390_pv_prep_reset(void); @@ -61,7 +61,7 @@ static inline bool s390_is_pv(void) { return false; } static inline int s390_pv_query_info(void) { return 0; } static inline int s390_pv_vm_enable(void) { return 0; } static inline void s390_pv_vm_disable(void) {} -static inline bool s390_pv_vm_try_disable_async(void) { return false; } +static inline bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) { return false; } static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) { return 0; } static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) { return 0; } static inline void s390_pv_prep_reset(void) {} diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h index 6ea4b64fe7..e2bb1a2fbf 100644 --- a/include/hw/scsi/scsi.h +++ b/include/hw/scsi/scsi.h @@ -8,7 +8,7 @@ #include "qemu/notify.h" #include "qom/object.h" -#define MAX_SCSI_DEVS 255 +#define MAX_SCSI_DEVS 255 typedef struct SCSIBus SCSIBus; typedef struct SCSIBusInfo SCSIBusInfo; @@ -133,6 +133,16 @@ struct SCSIBusInfo { void (*save_request)(QEMUFile *f, SCSIRequest *req); void *(*load_request)(QEMUFile *f, SCSIRequest *req); void (*free_request)(SCSIBus *bus, void *priv); + + /* + * Temporarily stop submitting new requests between drained_begin() and + * drained_end(). Called from the main loop thread with the BQL held. + * + * Implement these callbacks if request processing is triggered by a file + * descriptor like an EventNotifier. Otherwise set them to NULL. + */ + void (*drained_begin)(SCSIBus *bus); + void (*drained_end)(SCSIBus *bus); }; #define TYPE_SCSI_BUS "SCSI" @@ -144,6 +154,8 @@ struct SCSIBus { SCSISense unit_attention; const SCSIBusInfo *info; + + int drain_count; /* protected by BQL */ }; /** @@ -213,6 +225,8 @@ void scsi_req_cancel_complete(SCSIRequest *req); void scsi_req_cancel(SCSIRequest *req); void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier); void scsi_req_retry(SCSIRequest *req); +void scsi_device_drained_begin(SCSIDevice *sdev); +void scsi_device_drained_end(SCSIDevice *sdev); void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense); void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense); void scsi_device_report_change(SCSIDevice *dev, SCSISense sense); diff --git a/include/hw/sd/sd.h b/include/hw/sd/sd.h index 47360ba4ee..3047adb2fc 100644 --- a/include/hw/sd/sd.h +++ b/include/hw/sd/sd.h @@ -77,10 +77,10 @@ typedef enum { typedef enum { sd_none = -1, - sd_bc = 0, /* broadcast -- no response */ - sd_bcr, /* broadcast with response */ - sd_ac, /* addressed -- no data transfer */ - sd_adtc, /* addressed with data transfer */ + sd_bc = 0, /* broadcast -- no response */ + sd_bcr, /* broadcast with response */ + sd_ac, /* addressed -- no data transfer */ + sd_adtc, /* addressed with data transfer */ } sd_cmd_type_t; typedef struct { diff --git a/include/hw/i386/ich9.h b/include/hw/southbridge/ich9.h similarity index 91% rename from include/hw/i386/ich9.h rename to include/hw/southbridge/ich9.h index 222781e8b9..fd01649d04 100644 --- a/include/hw/i386/ich9.h +++ b/include/hw/southbridge/ich9.h @@ -1,20 +1,16 @@ -#ifndef HW_ICH9_H -#define HW_ICH9_H +#ifndef HW_SOUTHBRIDGE_ICH9_H +#define HW_SOUTHBRIDGE_ICH9_H -#include "hw/isa/isa.h" -#include "hw/sysbus.h" -#include "hw/i386/pc.h" #include "hw/isa/apm.h" -#include "hw/acpi/acpi.h" #include "hw/acpi/ich9.h" +#include "hw/intc/ioapic.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_device.h" +#include "hw/rtc/mc146818rtc.h" +#include "exec/memory.h" +#include "qemu/notify.h" #include "qom/object.h" -void ich9_lpc_set_irq(void *opaque, int irq_num, int level); -int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx); -PCIINTxRoute ich9_route_intx_pin_to_irq(void *opaque, int pirq_pin); -void ich9_lpc_pm_init(PCIDevice *pci_lpc, bool smm_enabled); -I2CBus *ich9_smb_init(PCIBus *bus, int devfn, uint32_t smb_io_base); - void ich9_generate_smi(void); #define ICH9_CC_SIZE (16 * 1024) /* 16KB. Chipset configuration registers */ @@ -35,6 +31,7 @@ struct ICH9LPCState { */ uint8_t irr[PCI_SLOT_MAX][PCI_NUM_PINS]; + MC146818RtcState rtc; APMState apm; ICH9LPCPMRegs pm; uint32_t sci_level; /* track sci level */ @@ -67,15 +64,13 @@ struct ICH9LPCState { * triggers feature lockdown */ uint64_t smi_negotiated_features; /* guest-invisible, host endian */ - /* isa bus */ - ISABus *isa_bus; MemoryRegion rcrb_mem; /* root complex register block */ Notifier machine_ready; - qemu_irq gsi[GSI_NUM_PINS]; + qemu_irq gsi[IOAPIC_NUM_PINS]; }; -#define Q35_MASK(bit, ms_bit, ls_bit) \ +#define ICH9_MASK(bit, ms_bit, ls_bit) \ ((uint##bit##_t)(((1ULL << ((ms_bit) + 1)) - 1) & ~((1ULL << ls_bit) - 1))) /* ICH9: Chipset Configuration Registers */ @@ -137,13 +132,13 @@ struct ICH9LPCState { #define ICH9_LPC_NB_PIRQS 8 /* PCI A-H */ #define ICH9_LPC_PMBASE 0x40 -#define ICH9_LPC_PMBASE_BASE_ADDRESS_MASK Q35_MASK(32, 15, 7) +#define ICH9_LPC_PMBASE_BASE_ADDRESS_MASK ICH9_MASK(32, 15, 7) #define ICH9_LPC_PMBASE_RTE 0x1 #define ICH9_LPC_PMBASE_DEFAULT 0x1 #define ICH9_LPC_ACPI_CTRL 0x44 #define ICH9_LPC_ACPI_CTRL_ACPI_EN 0x80 -#define ICH9_LPC_ACPI_CTRL_SCI_IRQ_SEL_MASK Q35_MASK(8, 2, 0) +#define ICH9_LPC_ACPI_CTRL_SCI_IRQ_SEL_MASK ICH9_MASK(8, 2, 0) #define ICH9_LPC_ACPI_CTRL_9 0x0 #define ICH9_LPC_ACPI_CTRL_10 0x1 #define ICH9_LPC_ACPI_CTRL_11 0x2 @@ -162,7 +157,7 @@ struct ICH9LPCState { #define ICH9_LPC_PIRQH_ROUT 0x6b #define ICH9_LPC_PIRQ_ROUT_IRQEN 0x80 -#define ICH9_LPC_PIRQ_ROUT_MASK Q35_MASK(8, 3, 0) +#define ICH9_LPC_PIRQ_ROUT_MASK ICH9_MASK(8, 3, 0) #define ICH9_LPC_PIRQ_ROUT_DEFAULT 0x80 #define ICH9_LPC_GEN_PMCON_1 0xa0 @@ -172,7 +167,7 @@ struct ICH9LPCState { #define ICH9_LPC_GEN_PMCON_LOCK 0xa6 #define ICH9_LPC_RCBA 0xf0 -#define ICH9_LPC_RCBA_BA_MASK Q35_MASK(32, 31, 14) +#define ICH9_LPC_RCBA_BA_MASK ICH9_MASK(32, 31, 14) #define ICH9_LPC_RCBA_EN 0x1 #define ICH9_LPC_RCBA_DEFAULT 0x0 @@ -249,4 +244,4 @@ struct ICH9LPCState { #define ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT 1 #define ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT 2 -#endif /* HW_ICH9_H */ +#endif /* HW_SOUTHBRIDGE_ICH9_H */ diff --git a/include/hw/southbridge/piix.h b/include/hw/southbridge/piix.h index 0bf48e936d..a840340308 100644 --- a/include/hw/southbridge/piix.h +++ b/include/hw/southbridge/piix.h @@ -13,6 +13,7 @@ #define HW_SOUTHBRIDGE_PIIX_H #include "hw/pci/pci_device.h" +#include "hw/rtc/mc146818rtc.h" /* PIRQRC[A:D]: PIRQx Route Control Registers */ #define PIIX_PIRQCA 0x60 @@ -51,6 +52,8 @@ struct PIIXState { /* This member isn't used. Just for save/load compatibility */ int32_t pci_irq_levels_vmstate[PIIX_NUM_PIRQS]; + MC146818RtcState rtc; + /* Reset Control Register contents */ uint8_t rcr; diff --git a/include/hw/timer/i8254.h b/include/hw/timer/i8254.h index 3e569f42b6..8402caad30 100644 --- a/include/hw/timer/i8254.h +++ b/include/hw/timer/i8254.h @@ -56,7 +56,8 @@ static inline ISADevice *i8254_pit_init(ISABus *bus, int base, int isa_irq, qdev_prop_set_uint32(dev, "iobase", base); isa_realize_and_unref(d, bus, &error_fatal); qdev_connect_gpio_out(dev, 0, - isa_irq >= 0 ? isa_get_irq(d, isa_irq) : alt_irq); + isa_irq >= 0 ? isa_bus_get_irq(bus, isa_irq) + : alt_irq); return d; } diff --git a/include/hw/timer/i8254_internal.h b/include/hw/timer/i8254_internal.h index a9a600d941..1761deb4cf 100644 --- a/include/hw/timer/i8254_internal.h +++ b/include/hw/timer/i8254_internal.h @@ -58,7 +58,7 @@ struct PITCommonState { }; struct PITCommonClass { - ISADeviceClass parent_class; + DeviceClass parent_class; void (*set_channel_gate)(PITCommonState *s, PITChannelState *sc, int val); void (*get_channel_info)(PITCommonState *s, PITChannelState *sc, diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index 87524c64a4..eed244f25f 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -143,6 +143,8 @@ typedef struct VFIODevice { VFIOMigration *migration; Error *migration_blocker; OnOffAuto pre_copy_dirty_page_tracking; + bool dirty_pages_supported; + bool dirty_tracking; } VFIODevice; struct VFIODeviceOps { @@ -220,6 +222,7 @@ extern VFIOGroupList vfio_group_list; bool vfio_mig_active(void); int vfio_block_multiple_devices_migration(Error **errp); void vfio_unblock_multiple_devices_migration(void); +int vfio_block_giommu_migration(Error **errp); int64_t vfio_mig_bytes_transferred(void); #ifdef CONFIG_LINUX @@ -243,7 +246,8 @@ int vfio_spapr_create_window(VFIOContainer *container, int vfio_spapr_remove_window(VFIOContainer *container, hwaddr offset_within_address_space); -int vfio_migration_probe(VFIODevice *vbasedev, Error **errp); -void vfio_migration_finalize(VFIODevice *vbasedev); +int vfio_migration_realize(VFIODevice *vbasedev, Error **errp); +void vfio_migration_exit(VFIODevice *vbasedev); +void vfio_migration_finalize(void); #endif /* HW_VFIO_VFIO_COMMON_H */ diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index c5ab49051e..ec3fbae58d 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -130,6 +130,9 @@ typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev); typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev, int fd); + +typedef void (*vhost_reset_status_op)(struct vhost_dev *dev); + typedef struct VhostOps { VhostBackendType backend_type; vhost_backend_init vhost_backend_init; @@ -177,6 +180,7 @@ typedef struct VhostOps { vhost_get_device_id_op vhost_get_device_id; vhost_force_iommu_op vhost_force_iommu; vhost_set_config_call_op vhost_set_config_call; + vhost_reset_status_op vhost_reset_status; } VhostOps; int vhost_backend_update_device_iotlb(struct vhost_dev *dev, diff --git a/include/hw/virtio/vhost-user-gpio.h b/include/hw/virtio/vhost-user-gpio.h index a9305c5e6c..a9d3f9b049 100644 --- a/include/hw/virtio/vhost-user-gpio.h +++ b/include/hw/virtio/vhost-user-gpio.h @@ -23,7 +23,7 @@ struct VHostUserGPIO { VirtIODevice parent_obj; CharBackend chardev; struct virtio_gpio_config config; - struct vhost_virtqueue *vhost_vq; + struct vhost_virtqueue *vhost_vqs; struct vhost_dev vhost_dev; VhostUserState vhost_user; VirtQueue *command_vq; diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h index 7997f09a8d..e64bfc7f98 100644 --- a/include/hw/virtio/vhost-vdpa.h +++ b/include/hw/virtio/vhost-vdpa.h @@ -42,13 +42,18 @@ typedef struct vhost_vdpa { bool shadow_vqs_enabled; /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */ bool shadow_data; + /* Device suspended successfully */ + bool suspended; /* IOVA mapping used by the Shadow Virtqueue */ VhostIOVATree *iova_tree; GPtrArray *shadow_vqs; const VhostShadowVirtqueueOps *shadow_vq_ops; void *shadow_vq_ops_opaque; struct vhost_dev *dev; + Error *migration_blocker; VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; + QLIST_HEAD(, vdpa_iommu) iommu_list; + IOMMUNotifier n; } VhostVDPA; int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range); @@ -58,4 +63,13 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, hwaddr size); +typedef struct vdpa_iommu { + struct vhost_vdpa *dev; + IOMMUMemoryRegion *iommu_mr; + hwaddr iommu_offset; + IOMMUNotifier n; + QLIST_ENTRY(vdpa_iommu) iommu_next; +} VDPAIOMMUState; + + #endif diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index a52f273347..f7f10c8fb7 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -336,4 +336,5 @@ int vhost_dev_set_inflight(struct vhost_dev *dev, struct vhost_inflight *inflight); int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, struct vhost_inflight *inflight); +bool vhost_dev_has_iommu(struct vhost_dev *dev); #endif diff --git a/include/hw/virtio/virtio-input.h b/include/hw/virtio/virtio-input.h index f2da63d309..08f1591424 100644 --- a/include/hw/virtio/virtio-input.h +++ b/include/hw/virtio/virtio-input.h @@ -24,10 +24,11 @@ OBJECT_DECLARE_TYPE(VirtIOInput, VirtIOInputClass, #define VIRTIO_INPUT_GET_PARENT_CLASS(obj) \ OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_INPUT) -#define TYPE_VIRTIO_INPUT_HID "virtio-input-hid-device" -#define TYPE_VIRTIO_KEYBOARD "virtio-keyboard-device" -#define TYPE_VIRTIO_MOUSE "virtio-mouse-device" -#define TYPE_VIRTIO_TABLET "virtio-tablet-device" +#define TYPE_VIRTIO_INPUT_HID "virtio-input-hid-device" +#define TYPE_VIRTIO_KEYBOARD "virtio-keyboard-device" +#define TYPE_VIRTIO_MOUSE "virtio-mouse-device" +#define TYPE_VIRTIO_TABLET "virtio-tablet-device" +#define TYPE_VIRTIO_MULTITOUCH "virtio-multitouch-device" OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHID, VIRTIO_INPUT_HID) #define VIRTIO_INPUT_HID_GET_PARENT_CLASS(obj) \ diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index 77c6c55929..af86ed7249 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -29,7 +29,7 @@ * vhost-user to advertise VHOST_USER_F_PROTOCOL_FEATURES between QEMU * and a vhost-user backend. */ -#define VIRTIO_F_BAD_FEATURE 30 +#define VIRTIO_F_BAD_FEATURE 30 #define VIRTIO_LEGACY_FEATURES ((0x1ULL << VIRTIO_F_BAD_FEATURE) | \ (0x1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \ @@ -155,6 +155,7 @@ struct VirtIODevice QLIST_HEAD(, VirtQueue) *vector_queues; QTAILQ_ENTRY(VirtIODevice) next; EventNotifier config_notifier; + bool device_iotlb_enabled; }; struct VirtioDeviceClass { @@ -212,6 +213,7 @@ struct VirtioDeviceClass { const VMStateDescription *vmsd; bool (*primary_unplug_pending)(void *opaque); struct vhost_dev *(*get_vhost)(VirtIODevice *vdev); + void (*toggle_device_iotlb)(VirtIODevice *vdev); }; void virtio_instance_init_common(Object *proxy_obj, void *data, @@ -309,6 +311,7 @@ int virtio_get_num_queues(VirtIODevice *vdev); void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, hwaddr avail, hwaddr used); void virtio_queue_update_rings(VirtIODevice *vdev, int n); +void virtio_init_region_cache(VirtIODevice *vdev, int n); void virtio_queue_set_align(VirtIODevice *vdev, int n, int align); void virtio_queue_notify(VirtIODevice *vdev, int n); uint16_t virtio_queue_vector(VirtIODevice *vdev, int n); diff --git a/include/hw/watchdog/allwinner-wdt.h b/include/hw/watchdog/allwinner-wdt.h new file mode 100644 index 0000000000..7fe41e20f2 --- /dev/null +++ b/include/hw/watchdog/allwinner-wdt.h @@ -0,0 +1,123 @@ +/* + * Allwinner Watchdog emulation + * + * Copyright (C) 2023 Strahinja Jankovic + * + * This file is derived from Allwinner RTC, + * by Niek Linnenbank. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef HW_WATCHDOG_ALLWINNER_WDT_H +#define HW_WATCHDOG_ALLWINNER_WDT_H + +#include "qom/object.h" +#include "hw/ptimer.h" +#include "hw/sysbus.h" + +/* + * This is a model of the Allwinner watchdog. + * Since watchdog registers belong to the timer module (and are shared with the + * RTC module), the interrupt line from watchdog is not handled right now. + * In QEMU, we just wire up the watchdog reset to watchdog_perform_action(), + * at least for the moment. + */ + +#define TYPE_AW_WDT "allwinner-wdt" + +/** Allwinner WDT sun4i family (A10, A12), also sun7i (A20) */ +#define TYPE_AW_WDT_SUN4I TYPE_AW_WDT "-sun4i" + +/** Allwinner WDT sun6i family and newer (A31, H2+, H3, etc) */ +#define TYPE_AW_WDT_SUN6I TYPE_AW_WDT "-sun6i" + +/** Number of WDT registers */ +#define AW_WDT_REGS_NUM (5) + +OBJECT_DECLARE_TYPE(AwWdtState, AwWdtClass, AW_WDT) + +/** + * Allwinner WDT object instance state. + */ +struct AwWdtState { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + MemoryRegion iomem; + struct ptimer_state *timer; + + uint32_t regs[AW_WDT_REGS_NUM]; +}; + +/** + * Allwinner WDT class-level struct. + * + * This struct is filled by each sunxi device specific code + * such that the generic code can use this struct to support + * all devices. + */ +struct AwWdtClass { + /*< private >*/ + SysBusDeviceClass parent_class; + /*< public >*/ + + /** Defines device specific register map */ + const uint8_t *regmap; + + /** Size of the regmap in bytes */ + size_t regmap_size; + + /** + * Read device specific register + * + * @offset: register offset to read + * @return true if register read successful, false otherwise + */ + bool (*read)(AwWdtState *s, uint32_t offset); + + /** + * Write device specific register + * + * @offset: register offset to write + * @data: value to set in register + * @return true if register write successful, false otherwise + */ + bool (*write)(AwWdtState *s, uint32_t offset, uint32_t data); + + /** + * Check if watchdog can generate system reset + * + * @return true if watchdog can generate system reset + */ + bool (*can_reset_system)(AwWdtState *s); + + /** + * Check if provided key is valid + * + * @value: value written to register + * @return true if key is valid, false otherwise + */ + bool (*is_key_valid)(AwWdtState *s, uint32_t val); + + /** + * Get current INTV_VALUE setting + * + * @return current INTV_VALUE (0-15) + */ + uint8_t (*get_intv_value)(AwWdtState *s); +}; + +#endif /* HW_WATCHDOG_ALLWINNER_WDT_H */ diff --git a/include/hw/xen/interface/arch-arm.h b/include/hw/xen/interface/arch-arm.h new file mode 100644 index 0000000000..94b31511dd --- /dev/null +++ b/include/hw/xen/interface/arch-arm.h @@ -0,0 +1,510 @@ +/****************************************************************************** + * arch-arm.h + * + * Guest OS interface to ARM Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright 2011 (C) Citrix Systems + */ + +#ifndef __XEN_PUBLIC_ARCH_ARM_H__ +#define __XEN_PUBLIC_ARCH_ARM_H__ + +/* + * `incontents 50 arm_abi Hypercall Calling Convention + * + * A hypercall is issued using the ARM HVC instruction. + * + * A hypercall can take up to 5 arguments. These are passed in + * registers, the first argument in x0/r0 (for arm64/arm32 guests + * respectively irrespective of whether the underlying hypervisor is + * 32- or 64-bit), the second argument in x1/r1, the third in x2/r2, + * the forth in x3/r3 and the fifth in x4/r4. + * + * The hypercall number is passed in r12 (arm) or x16 (arm64). In both + * cases the relevant ARM procedure calling convention specifies this + * is an inter-procedure-call scratch register (e.g. for use in linker + * stubs). This use does not conflict with use during a hypercall. + * + * The HVC ISS must contain a Xen specific TAG: XEN_HYPERCALL_TAG. + * + * The return value is in x0/r0. + * + * The hypercall will clobber x16/r12 and the argument registers used + * by that hypercall (except r0 which is the return value) i.e. in + * addition to x16/r12 a 2 argument hypercall will clobber x1/r1 and a + * 4 argument hypercall will clobber x1/r1, x2/r2 and x3/r3. + * + * Parameter structs passed to hypercalls are laid out according to + * the Procedure Call Standard for the ARM Architecture (AAPCS, AKA + * EABI) and Procedure Call Standard for the ARM 64-bit Architecture + * (AAPCS64). Where there is a conflict the 64-bit standard should be + * used regardless of guest type. Structures which are passed as + * hypercall arguments are always little endian. + * + * All memory which is shared with other entities in the system + * (including the hypervisor and other guests) must reside in memory + * which is mapped as Normal Inner Write-Back Outer Write-Back Inner-Shareable. + * This applies to: + * - hypercall arguments passed via a pointer to guest memory. + * - memory shared via the grant table mechanism (including PV I/O + * rings etc). + * - memory shared with the hypervisor (struct shared_info, struct + * vcpu_info, the grant table, etc). + * + * Any cache allocation hints are acceptable. + */ + +/* + * `incontents 55 arm_hcall Supported Hypercalls + * + * Xen on ARM makes extensive use of hardware facilities and therefore + * only a subset of the potential hypercalls are required. + * + * Since ARM uses second stage paging any machine/physical addresses + * passed to hypercalls are Guest Physical Addresses (Intermediate + * Physical Addresses) unless otherwise noted. + * + * The following hypercalls (and sub operations) are supported on the + * ARM platform. Other hypercalls should be considered + * unavailable/unsupported. + * + * HYPERVISOR_memory_op + * All generic sub-operations + * + * HYPERVISOR_domctl + * All generic sub-operations, with the exception of: + * * XEN_DOMCTL_irq_permission (not yet implemented) + * + * HYPERVISOR_sched_op + * All generic sub-operations, with the exception of: + * * SCHEDOP_block -- prefer wfi hardware instruction + * + * HYPERVISOR_console_io + * All generic sub-operations + * + * HYPERVISOR_xen_version + * All generic sub-operations + * + * HYPERVISOR_event_channel_op + * All generic sub-operations + * + * HYPERVISOR_physdev_op + * Exactly these sub-operations are supported: + * PHYSDEVOP_pci_device_add + * PHYSDEVOP_pci_device_remove + * + * HYPERVISOR_sysctl + * All generic sub-operations, with the exception of: + * * XEN_SYSCTL_page_offline_op + * * XEN_SYSCTL_get_pmstat + * * XEN_SYSCTL_pm_op + * + * HYPERVISOR_hvm_op + * Exactly these sub-operations are supported: + * * HVMOP_set_param + * * HVMOP_get_param + * + * HYPERVISOR_grant_table_op + * All generic sub-operations + * + * HYPERVISOR_vcpu_op + * Exactly these sub-operations are supported: + * * VCPUOP_register_vcpu_info + * * VCPUOP_register_runstate_memory_area + * + * HYPERVISOR_argo_op + * All generic sub-operations + * + * Other notes on the ARM ABI: + * + * - struct start_info is not exported to ARM guests. + * + * - struct shared_info is mapped by ARM guests using the + * HYPERVISOR_memory_op sub-op XENMEM_add_to_physmap, passing + * XENMAPSPACE_shared_info as space parameter. + * + * - All the per-cpu struct vcpu_info are mapped by ARM guests using the + * HYPERVISOR_vcpu_op sub-op VCPUOP_register_vcpu_info, including cpu0 + * struct vcpu_info. + * + * - The grant table is mapped using the HYPERVISOR_memory_op sub-op + * XENMEM_add_to_physmap, passing XENMAPSPACE_grant_table as space + * parameter. The memory range specified under the Xen compatible + * hypervisor node on device tree can be used as target gpfn for the + * mapping. + * + * - Xenstore is initialized by using the two hvm_params + * HVM_PARAM_STORE_PFN and HVM_PARAM_STORE_EVTCHN. They can be read + * with the HYPERVISOR_hvm_op sub-op HVMOP_get_param. + * + * - The paravirtualized console is initialized by using the two + * hvm_params HVM_PARAM_CONSOLE_PFN and HVM_PARAM_CONSOLE_EVTCHN. They + * can be read with the HYPERVISOR_hvm_op sub-op HVMOP_get_param. + * + * - Event channel notifications are delivered using the percpu GIC + * interrupt specified under the Xen compatible hypervisor node on + * device tree. + * + * - The device tree Xen compatible node is fully described under Linux + * at Documentation/devicetree/bindings/arm/xen.txt. + */ + +#define XEN_HYPERCALL_TAG 0XEA1 + +#define int64_aligned_t int64_t __attribute__((aligned(8))) +#define uint64_aligned_t uint64_t __attribute__((aligned(8))) + +#ifndef __ASSEMBLY__ +#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ + typedef union { type *p; unsigned long q; } \ + __guest_handle_ ## name; \ + typedef union { type *p; uint64_aligned_t q; } \ + __guest_handle_64_ ## name + +/* + * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field + * in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes + * aligned. + * XEN_GUEST_HANDLE_PARAM represents a guest pointer, when passed as an + * hypercall argument. It is 4 bytes on aarch32 and 8 bytes on aarch64. + */ +#define __DEFINE_XEN_GUEST_HANDLE(name, type) \ + ___DEFINE_XEN_GUEST_HANDLE(name, type); \ + ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) +#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) +#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name +#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) +#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name +#define set_xen_guest_handle_raw(hnd, val) \ + do { \ + __typeof__(&(hnd)) _sxghr_tmp = &(hnd); \ + _sxghr_tmp->q = 0; \ + _sxghr_tmp->p = val; \ + } while ( 0 ) +#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) + +typedef uint64_t xen_pfn_t; +#define PRI_xen_pfn PRIx64 +#define PRIu_xen_pfn PRIu64 + +/* + * Maximum number of virtual CPUs in legacy multi-processor guests. + * Only one. All other VCPUS must use VCPUOP_register_vcpu_info. + */ +#define XEN_LEGACY_MAX_VCPUS 1 + +typedef uint64_t xen_ulong_t; +#define PRI_xen_ulong PRIx64 + +#if defined(__XEN__) || defined(__XEN_TOOLS__) +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */ +# define __DECL_REG(n64, n32) union { \ + uint64_t n64; \ + uint32_t n32; \ + } +#else +/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */ +#define __DECL_REG(n64, n32) uint64_t n64 +#endif + +struct vcpu_guest_core_regs +{ + /* Aarch64 Aarch32 */ + __DECL_REG(x0, r0_usr); + __DECL_REG(x1, r1_usr); + __DECL_REG(x2, r2_usr); + __DECL_REG(x3, r3_usr); + __DECL_REG(x4, r4_usr); + __DECL_REG(x5, r5_usr); + __DECL_REG(x6, r6_usr); + __DECL_REG(x7, r7_usr); + __DECL_REG(x8, r8_usr); + __DECL_REG(x9, r9_usr); + __DECL_REG(x10, r10_usr); + __DECL_REG(x11, r11_usr); + __DECL_REG(x12, r12_usr); + + __DECL_REG(x13, sp_usr); + __DECL_REG(x14, lr_usr); + + __DECL_REG(x15, __unused_sp_hyp); + + __DECL_REG(x16, lr_irq); + __DECL_REG(x17, sp_irq); + + __DECL_REG(x18, lr_svc); + __DECL_REG(x19, sp_svc); + + __DECL_REG(x20, lr_abt); + __DECL_REG(x21, sp_abt); + + __DECL_REG(x22, lr_und); + __DECL_REG(x23, sp_und); + + __DECL_REG(x24, r8_fiq); + __DECL_REG(x25, r9_fiq); + __DECL_REG(x26, r10_fiq); + __DECL_REG(x27, r11_fiq); + __DECL_REG(x28, r12_fiq); + + __DECL_REG(x29, sp_fiq); + __DECL_REG(x30, lr_fiq); + + /* Return address and mode */ + __DECL_REG(pc64, pc32); /* ELR_EL2 */ + uint64_t cpsr; /* SPSR_EL2 */ + + union { + uint64_t spsr_el1; /* AArch64 */ + uint32_t spsr_svc; /* AArch32 */ + }; + + /* AArch32 guests only */ + uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt; + + /* AArch64 guests only */ + uint64_t sp_el0; + uint64_t sp_el1, elr_el1; +}; +typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t); + +#undef __DECL_REG + +struct vcpu_guest_context { +#define _VGCF_online 0 +#define VGCF_online (1<<_VGCF_online) + uint32_t flags; /* VGCF_* */ + + struct vcpu_guest_core_regs user_regs; /* Core CPU registers */ + + uint64_t sctlr; + uint64_t ttbcr, ttbr0, ttbr1; +}; +typedef struct vcpu_guest_context vcpu_guest_context_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); + +/* + * struct xen_arch_domainconfig's ABI is covered by + * XEN_DOMCTL_INTERFACE_VERSION. + */ +#define XEN_DOMCTL_CONFIG_GIC_NATIVE 0 +#define XEN_DOMCTL_CONFIG_GIC_V2 1 +#define XEN_DOMCTL_CONFIG_GIC_V3 2 + +#define XEN_DOMCTL_CONFIG_TEE_NONE 0 +#define XEN_DOMCTL_CONFIG_TEE_OPTEE 1 + +struct xen_arch_domainconfig { + /* IN/OUT */ + uint8_t gic_version; + /* IN */ + uint16_t tee_type; + /* IN */ + uint32_t nr_spis; + /* + * OUT + * Based on the property clock-frequency in the DT timer node. + * The property may be present when the bootloader/firmware doesn't + * set correctly CNTFRQ which hold the timer frequency. + * + * As it's not possible to trap this register, we have to replicate + * the value in the guest DT. + * + * = 0 => property not present + * > 0 => Value of the property + * + */ + uint32_t clock_frequency; +}; +#endif /* __XEN__ || __XEN_TOOLS__ */ + +struct arch_vcpu_info { +}; +typedef struct arch_vcpu_info arch_vcpu_info_t; + +struct arch_shared_info { +}; +typedef struct arch_shared_info arch_shared_info_t; +typedef uint64_t xen_callback_t; + +#endif + +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +/* PSR bits (CPSR, SPSR) */ + +#define PSR_THUMB (1<<5) /* Thumb Mode enable */ +#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */ +#define PSR_IRQ_MASK (1<<7) /* Interrupt mask */ +#define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */ +#define PSR_BIG_ENDIAN (1<<9) /* arm32: Big Endian Mode */ +#define PSR_DBG_MASK (1<<9) /* arm64: Debug Exception mask */ +#define PSR_IT_MASK (0x0600fc00) /* Thumb If-Then Mask */ +#define PSR_JAZELLE (1<<24) /* Jazelle Mode */ + +/* 32 bit modes */ +#define PSR_MODE_USR 0x10 +#define PSR_MODE_FIQ 0x11 +#define PSR_MODE_IRQ 0x12 +#define PSR_MODE_SVC 0x13 +#define PSR_MODE_MON 0x16 +#define PSR_MODE_ABT 0x17 +#define PSR_MODE_HYP 0x1a +#define PSR_MODE_UND 0x1b +#define PSR_MODE_SYS 0x1f + +/* 64 bit modes */ +#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */ +#define PSR_MODE_EL3h 0x0d +#define PSR_MODE_EL3t 0x0c +#define PSR_MODE_EL2h 0x09 +#define PSR_MODE_EL2t 0x08 +#define PSR_MODE_EL1h 0x05 +#define PSR_MODE_EL1t 0x04 +#define PSR_MODE_EL0t 0x00 + +#define PSR_GUEST32_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC) +#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h) + +#define SCTLR_GUEST_INIT xen_mk_ullong(0x00c50078) + +/* + * Virtual machine platform (memory layout, interrupts) + * + * These are defined for consistency between the tools and the + * hypervisor. Guests must not rely on these hardcoded values but + * should instead use the FDT. + */ + +/* Physical Address Space */ + +/* + * vGIC mappings: Only one set of mapping is used by the guest. + * Therefore they can overlap. + */ + +/* vGIC v2 mappings */ +#define GUEST_GICD_BASE xen_mk_ullong(0x03001000) +#define GUEST_GICD_SIZE xen_mk_ullong(0x00001000) +#define GUEST_GICC_BASE xen_mk_ullong(0x03002000) +#define GUEST_GICC_SIZE xen_mk_ullong(0x00002000) + +/* vGIC v3 mappings */ +#define GUEST_GICV3_GICD_BASE xen_mk_ullong(0x03001000) +#define GUEST_GICV3_GICD_SIZE xen_mk_ullong(0x00010000) + +#define GUEST_GICV3_RDIST_REGIONS 1 + +#define GUEST_GICV3_GICR0_BASE xen_mk_ullong(0x03020000) /* vCPU0..127 */ +#define GUEST_GICV3_GICR0_SIZE xen_mk_ullong(0x01000000) + +/* + * 256 MB is reserved for VPCI configuration space based on calculation + * 256 buses x 32 devices x 8 functions x 4 KB = 256 MB + */ +#define GUEST_VPCI_ECAM_BASE xen_mk_ullong(0x10000000) +#define GUEST_VPCI_ECAM_SIZE xen_mk_ullong(0x10000000) + +/* ACPI tables physical address */ +#define GUEST_ACPI_BASE xen_mk_ullong(0x20000000) +#define GUEST_ACPI_SIZE xen_mk_ullong(0x02000000) + +/* PL011 mappings */ +#define GUEST_PL011_BASE xen_mk_ullong(0x22000000) +#define GUEST_PL011_SIZE xen_mk_ullong(0x00001000) + +/* Guest PCI-PCIe memory space where config space and BAR will be available.*/ +#define GUEST_VPCI_ADDR_TYPE_MEM xen_mk_ullong(0x02000000) +#define GUEST_VPCI_MEM_ADDR xen_mk_ullong(0x23000000) +#define GUEST_VPCI_MEM_SIZE xen_mk_ullong(0x10000000) + +/* + * 16MB == 4096 pages reserved for guest to use as a region to map its + * grant table in. + */ +#define GUEST_GNTTAB_BASE xen_mk_ullong(0x38000000) +#define GUEST_GNTTAB_SIZE xen_mk_ullong(0x01000000) + +#define GUEST_MAGIC_BASE xen_mk_ullong(0x39000000) +#define GUEST_MAGIC_SIZE xen_mk_ullong(0x01000000) + +#define GUEST_RAM_BANKS 2 + +/* + * The way to find the extended regions (to be exposed to the guest as unused + * address space) relies on the fact that the regions reserved for the RAM + * below are big enough to also accommodate such regions. + */ +#define GUEST_RAM0_BASE xen_mk_ullong(0x40000000) /* 3GB of low RAM @ 1GB */ +#define GUEST_RAM0_SIZE xen_mk_ullong(0xc0000000) + +/* 4GB @ 4GB Prefetch Memory for VPCI */ +#define GUEST_VPCI_ADDR_TYPE_PREFETCH_MEM xen_mk_ullong(0x42000000) +#define GUEST_VPCI_PREFETCH_MEM_ADDR xen_mk_ullong(0x100000000) +#define GUEST_VPCI_PREFETCH_MEM_SIZE xen_mk_ullong(0x100000000) + +#define GUEST_RAM1_BASE xen_mk_ullong(0x0200000000) /* 1016GB of RAM @ 8GB */ +#define GUEST_RAM1_SIZE xen_mk_ullong(0xfe00000000) + +#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */ +/* Largest amount of actual RAM, not including holes */ +#define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE) +/* Suitable for e.g. const uint64_t ramfoo[] = GUEST_RAM_BANK_FOOS; */ +#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE } +#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE } + +/* Current supported guest VCPUs */ +#define GUEST_MAX_VCPUS 128 + +/* Interrupts */ +#define GUEST_TIMER_VIRT_PPI 27 +#define GUEST_TIMER_PHYS_S_PPI 29 +#define GUEST_TIMER_PHYS_NS_PPI 30 +#define GUEST_EVTCHN_PPI 31 + +#define GUEST_VPL011_SPI 32 + +/* PSCI functions */ +#define PSCI_cpu_suspend 0 +#define PSCI_cpu_off 1 +#define PSCI_cpu_on 2 +#define PSCI_migrate 3 + +#endif + +#ifndef __ASSEMBLY__ +/* Stub definition of PMU structure */ +typedef struct xen_pmu_arch { uint8_t dummy; } xen_pmu_arch_t; +#endif + +#endif /* __XEN_PUBLIC_ARCH_ARM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/arch-x86/cpuid.h b/include/hw/xen/interface/arch-x86/cpuid.h new file mode 100644 index 0000000000..ce46305bee --- /dev/null +++ b/include/hw/xen/interface/arch-x86/cpuid.h @@ -0,0 +1,118 @@ +/****************************************************************************** + * arch-x86/cpuid.h + * + * CPUID interface to Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2007 Citrix Systems, Inc. + * + * Authors: + * Keir Fraser + */ + +#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__ +#define __XEN_PUBLIC_ARCH_X86_CPUID_H__ + +/* + * For compatibility with other hypervisor interfaces, the Xen cpuid leaves + * can be found at the first otherwise unused 0x100 aligned boundary starting + * from 0x40000000. + * + * e.g If viridian extensions are enabled for an HVM domain, the Xen cpuid + * leaves will start at 0x40000100 + */ + +#define XEN_CPUID_FIRST_LEAF 0x40000000 +#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i)) + +/* + * Leaf 1 (0x40000x00) + * EAX: Largest Xen-information leaf. All leaves up to an including @EAX + * are supported by the Xen host. + * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification + * of a Xen host. + */ +#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */ +#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */ +#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */ + +/* + * Leaf 2 (0x40000x01) + * EAX[31:16]: Xen major version. + * EAX[15: 0]: Xen minor version. + * EBX-EDX: Reserved (currently all zeroes). + */ + +/* + * Leaf 3 (0x40000x02) + * EAX: Number of hypercall transfer pages. This register is always guaranteed + * to specify one hypercall page. + * EBX: Base address of Xen-specific MSRs. + * ECX: Features 1. Unused bits are set to zero. + * EDX: Features 2. Unused bits are set to zero. + */ + +/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */ +#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 +#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) + +/* + * Leaf 4 (0x40000x03) + * Sub-leaf 0: EAX: bit 0: emulated tsc + * bit 1: host tsc is known to be reliable + * bit 2: RDTSCP instruction available + * EBX: tsc_mode: 0=default (emulate if necessary), 1=emulate, + * 2=no emulation, 3=no emulation + TSC_AUX support + * ECX: guest tsc frequency in kHz + * EDX: guest tsc incarnation (migration count) + * Sub-leaf 1: EAX: tsc offset low part + * EBX: tsc offset high part + * ECX: multiplicator for tsc->ns conversion + * EDX: shift amount for tsc->ns conversion + * Sub-leaf 2: EAX: host tsc frequency in kHz + */ + +/* + * Leaf 5 (0x40000x04) + * HVM-specific features + * Sub-leaf 0: EAX: Features + * Sub-leaf 0: EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag) + * Sub-leaf 0: ECX: domain id (iff EAX has XEN_HVM_CPUID_DOMID_PRESENT flag) + */ +#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */ +#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC accesses */ +/* Memory mapped from other domains has valid IOMMU entries */ +#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2) +#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */ +#define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */ + +/* + * Leaf 6 (0x40000x05) + * PV-specific parameters + * Sub-leaf 0: EAX: max available sub-leaf + * Sub-leaf 0: EBX: bits 0-7: max machine address width + */ + +/* Max. address width in bits taking memory hotplug into account. */ +#define XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK (0xffu << 0) + +#define XEN_CPUID_MAX_NUM_LEAVES 5 + +#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ diff --git a/include/hw/xen/interface/arch-x86/xen-x86_32.h b/include/hw/xen/interface/arch-x86/xen-x86_32.h new file mode 100644 index 0000000000..19d7388633 --- /dev/null +++ b/include/hw/xen/interface/arch-x86/xen-x86_32.h @@ -0,0 +1,194 @@ +/****************************************************************************** + * xen-x86_32.h + * + * Guest OS interface to x86 32-bit Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2004-2007, K A Fraser + */ + +#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ +#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ + +/* + * Hypercall interface: + * Input: %ebx, %ecx, %edx, %esi, %edi, %ebp (arguments 1-6) + * Output: %eax + * Access is via hypercall page (set up by guest loader or via a Xen MSR): + * call hypercall_page + hypercall-number * 32 + * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) + */ + +/* + * These flat segments are in the Xen-private section of every GDT. Since these + * are also present in the initial GDT, many OSes will be able to avoid + * installing their own GDT. + */ +#define FLAT_RING1_CS 0xe019 /* GDT index 259 */ +#define FLAT_RING1_DS 0xe021 /* GDT index 260 */ +#define FLAT_RING1_SS 0xe021 /* GDT index 260 */ +#define FLAT_RING3_CS 0xe02b /* GDT index 261 */ +#define FLAT_RING3_DS 0xe033 /* GDT index 262 */ +#define FLAT_RING3_SS 0xe033 /* GDT index 262 */ + +#define FLAT_KERNEL_CS FLAT_RING1_CS +#define FLAT_KERNEL_DS FLAT_RING1_DS +#define FLAT_KERNEL_SS FLAT_RING1_SS +#define FLAT_USER_CS FLAT_RING3_CS +#define FLAT_USER_DS FLAT_RING3_DS +#define FLAT_USER_SS FLAT_RING3_SS + +#define __HYPERVISOR_VIRT_START_PAE 0xF5800000 +#define __MACH2PHYS_VIRT_START_PAE 0xF5800000 +#define __MACH2PHYS_VIRT_END_PAE 0xF6800000 +#define HYPERVISOR_VIRT_START_PAE xen_mk_ulong(__HYPERVISOR_VIRT_START_PAE) +#define MACH2PHYS_VIRT_START_PAE xen_mk_ulong(__MACH2PHYS_VIRT_START_PAE) +#define MACH2PHYS_VIRT_END_PAE xen_mk_ulong(__MACH2PHYS_VIRT_END_PAE) + +/* Non-PAE bounds are obsolete. */ +#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 +#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 +#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 +#define HYPERVISOR_VIRT_START_NONPAE \ + xen_mk_ulong(__HYPERVISOR_VIRT_START_NONPAE) +#define MACH2PHYS_VIRT_START_NONPAE \ + xen_mk_ulong(__MACH2PHYS_VIRT_START_NONPAE) +#define MACH2PHYS_VIRT_END_NONPAE \ + xen_mk_ulong(__MACH2PHYS_VIRT_END_NONPAE) + +#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE +#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE +#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE + +#ifndef HYPERVISOR_VIRT_START +#define HYPERVISOR_VIRT_START xen_mk_ulong(__HYPERVISOR_VIRT_START) +#endif + +#define MACH2PHYS_VIRT_START xen_mk_ulong(__MACH2PHYS_VIRT_START) +#define MACH2PHYS_VIRT_END xen_mk_ulong(__MACH2PHYS_VIRT_END) +#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) +#ifndef machine_to_phys_mapping +#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) +#endif + +/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) +#undef ___DEFINE_XEN_GUEST_HANDLE +#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ + typedef struct { type *p; } \ + __guest_handle_ ## name; \ + typedef struct { union { type *p; uint64_aligned_t q; }; } \ + __guest_handle_64_ ## name +#undef set_xen_guest_handle_raw +#define set_xen_guest_handle_raw(hnd, val) \ + do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ + (hnd).p = val; \ + } while ( 0 ) +#define int64_aligned_t int64_t __attribute__((aligned(8))) +#define uint64_aligned_t uint64_t __attribute__((aligned(8))) +#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name +#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) +#endif + +#ifndef __ASSEMBLY__ + +#if defined(XEN_GENERATING_COMPAT_HEADERS) +/* nothing */ +#elif defined(__XEN__) || defined(__XEN_TOOLS__) +/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax). */ +#define __DECL_REG_LO8(which) union { \ + uint32_t e ## which ## x; \ + uint16_t which ## x; \ + struct { \ + uint8_t which ## l; \ + uint8_t which ## h; \ + }; \ +} +#define __DECL_REG_LO16(name) union { \ + uint32_t e ## name, _e ## name; \ + uint16_t name; \ +} +#else +/* Other sources must always use the proper 32-bit name (e.g., eax). */ +#define __DECL_REG_LO8(which) uint32_t e ## which ## x +#define __DECL_REG_LO16(name) uint32_t e ## name +#endif + +struct cpu_user_regs { + __DECL_REG_LO8(b); + __DECL_REG_LO8(c); + __DECL_REG_LO8(d); + __DECL_REG_LO16(si); + __DECL_REG_LO16(di); + __DECL_REG_LO16(bp); + __DECL_REG_LO8(a); + uint16_t error_code; /* private */ + uint16_t entry_vector; /* private */ + __DECL_REG_LO16(ip); + uint16_t cs; + uint8_t saved_upcall_mask; + uint8_t _pad0; + __DECL_REG_LO16(flags); /* eflags.IF == !saved_upcall_mask */ + __DECL_REG_LO16(sp); + uint16_t ss, _pad1; + uint16_t es, _pad2; + uint16_t ds, _pad3; + uint16_t fs, _pad4; + uint16_t gs, _pad5; +}; +typedef struct cpu_user_regs cpu_user_regs_t; +DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); + +#undef __DECL_REG_LO8 +#undef __DECL_REG_LO16 + +/* + * Page-directory addresses above 4GB do not fit into architectural %cr3. + * When accessing %cr3, or equivalent field in vcpu_guest_context, guests + * must use the following accessor macros to pack/unpack valid MFNs. + */ +#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) +#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) + +struct arch_vcpu_info { + unsigned long cr2; + unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ +}; +typedef struct arch_vcpu_info arch_vcpu_info_t; + +struct xen_callback { + unsigned long cs; + unsigned long eip; +}; +typedef struct xen_callback xen_callback_t; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/arch-x86/xen-x86_64.h b/include/hw/xen/interface/arch-x86/xen-x86_64.h new file mode 100644 index 0000000000..40aed14366 --- /dev/null +++ b/include/hw/xen/interface/arch-x86/xen-x86_64.h @@ -0,0 +1,241 @@ +/****************************************************************************** + * xen-x86_64.h + * + * Guest OS interface to x86 64-bit Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2004-2006, K A Fraser + */ + +#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ +#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ + +/* + * Hypercall interface: + * Input: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6) + * Output: %rax + * Access is via hypercall page (set up by guest loader or via a Xen MSR): + * call hypercall_page + hypercall-number * 32 + * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) + */ + +/* + * 64-bit segment selectors + * These flat segments are in the Xen-private section of every GDT. Since these + * are also present in the initial GDT, many OSes will be able to avoid + * installing their own GDT. + */ + +#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ +#define FLAT_RING3_CS64 0xe033 /* GDT index 262 */ +#define FLAT_RING3_DS32 0xe02b /* GDT index 261 */ +#define FLAT_RING3_DS64 0x0000 /* NULL selector */ +#define FLAT_RING3_SS32 0xe02b /* GDT index 261 */ +#define FLAT_RING3_SS64 0xe02b /* GDT index 261 */ + +#define FLAT_KERNEL_DS64 FLAT_RING3_DS64 +#define FLAT_KERNEL_DS32 FLAT_RING3_DS32 +#define FLAT_KERNEL_DS FLAT_KERNEL_DS64 +#define FLAT_KERNEL_CS64 FLAT_RING3_CS64 +#define FLAT_KERNEL_CS32 FLAT_RING3_CS32 +#define FLAT_KERNEL_CS FLAT_KERNEL_CS64 +#define FLAT_KERNEL_SS64 FLAT_RING3_SS64 +#define FLAT_KERNEL_SS32 FLAT_RING3_SS32 +#define FLAT_KERNEL_SS FLAT_KERNEL_SS64 + +#define FLAT_USER_DS64 FLAT_RING3_DS64 +#define FLAT_USER_DS32 FLAT_RING3_DS32 +#define FLAT_USER_DS FLAT_USER_DS64 +#define FLAT_USER_CS64 FLAT_RING3_CS64 +#define FLAT_USER_CS32 FLAT_RING3_CS32 +#define FLAT_USER_CS FLAT_USER_CS64 +#define FLAT_USER_SS64 FLAT_RING3_SS64 +#define FLAT_USER_SS32 FLAT_RING3_SS32 +#define FLAT_USER_SS FLAT_USER_SS64 + +#define __HYPERVISOR_VIRT_START 0xFFFF800000000000 +#define __HYPERVISOR_VIRT_END 0xFFFF880000000000 +#define __MACH2PHYS_VIRT_START 0xFFFF800000000000 +#define __MACH2PHYS_VIRT_END 0xFFFF804000000000 + +#ifndef HYPERVISOR_VIRT_START +#define HYPERVISOR_VIRT_START xen_mk_ulong(__HYPERVISOR_VIRT_START) +#define HYPERVISOR_VIRT_END xen_mk_ulong(__HYPERVISOR_VIRT_END) +#endif + +#define MACH2PHYS_VIRT_START xen_mk_ulong(__MACH2PHYS_VIRT_START) +#define MACH2PHYS_VIRT_END xen_mk_ulong(__MACH2PHYS_VIRT_END) +#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) +#ifndef machine_to_phys_mapping +#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) +#endif + +/* + * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) + * @which == SEGBASE_* ; @base == 64-bit base address + * Returns 0 on success. + */ +#define SEGBASE_FS 0 +#define SEGBASE_GS_USER 1 +#define SEGBASE_GS_KERNEL 2 +#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ + +/* + * int HYPERVISOR_iret(void) + * All arguments are on the kernel stack, in the following format. + * Never returns if successful. Current kernel context is lost. + * The saved CS is mapped as follows: + * RING0 -> RING3 kernel mode. + * RING1 -> RING3 kernel mode. + * RING2 -> RING3 kernel mode. + * RING3 -> RING3 user mode. + * However RING0 indicates that the guest kernel should return to iteself + * directly with + * orb $3,1*8(%rsp) + * iretq + * If flags contains VGCF_in_syscall: + * Restore RAX, RIP, RFLAGS, RSP. + * Discard R11, RCX, CS, SS. + * Otherwise: + * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. + * All other registers are saved on hypercall entry and restored to user. + */ +/* Guest exited in SYSCALL context? Return to guest with SYSRET? */ +#define _VGCF_in_syscall 8 +#define VGCF_in_syscall (1<<_VGCF_in_syscall) +#define VGCF_IN_SYSCALL VGCF_in_syscall + +#ifndef __ASSEMBLY__ + +struct iret_context { + /* Top of stack (%rsp at point of hypercall). */ + uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; + /* Bottom of iret stack frame. */ +}; + +#if defined(__XEN__) || defined(__XEN_TOOLS__) +/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax/rax). */ +#define __DECL_REG_LOHI(which) union { \ + uint64_t r ## which ## x; \ + uint32_t e ## which ## x; \ + uint16_t which ## x; \ + struct { \ + uint8_t which ## l; \ + uint8_t which ## h; \ + }; \ +} +#define __DECL_REG_LO8(name) union { \ + uint64_t r ## name; \ + uint32_t e ## name; \ + uint16_t name; \ + uint8_t name ## l; \ +} +#define __DECL_REG_LO16(name) union { \ + uint64_t r ## name; \ + uint32_t e ## name; \ + uint16_t name; \ +} +#define __DECL_REG_HI(num) union { \ + uint64_t r ## num; \ + uint32_t r ## num ## d; \ + uint16_t r ## num ## w; \ + uint8_t r ## num ## b; \ +} +#elif defined(__GNUC__) && !defined(__STRICT_ANSI__) +/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ +#define __DECL_REG(name) union { \ + uint64_t r ## name, e ## name; \ + uint32_t _e ## name; \ +} +#else +/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ +#define __DECL_REG(name) uint64_t r ## name +#endif + +#ifndef __DECL_REG_LOHI +#define __DECL_REG_LOHI(name) __DECL_REG(name ## x) +#define __DECL_REG_LO8 __DECL_REG +#define __DECL_REG_LO16 __DECL_REG +#define __DECL_REG_HI(num) uint64_t r ## num +#endif + +struct cpu_user_regs { + __DECL_REG_HI(15); + __DECL_REG_HI(14); + __DECL_REG_HI(13); + __DECL_REG_HI(12); + __DECL_REG_LO8(bp); + __DECL_REG_LOHI(b); + __DECL_REG_HI(11); + __DECL_REG_HI(10); + __DECL_REG_HI(9); + __DECL_REG_HI(8); + __DECL_REG_LOHI(a); + __DECL_REG_LOHI(c); + __DECL_REG_LOHI(d); + __DECL_REG_LO8(si); + __DECL_REG_LO8(di); + uint32_t error_code; /* private */ + uint32_t entry_vector; /* private */ + __DECL_REG_LO16(ip); + uint16_t cs, _pad0[1]; + uint8_t saved_upcall_mask; + uint8_t _pad1[3]; + __DECL_REG_LO16(flags); /* rflags.IF == !saved_upcall_mask */ + __DECL_REG_LO8(sp); + uint16_t ss, _pad2[3]; + uint16_t es, _pad3[3]; + uint16_t ds, _pad4[3]; + uint16_t fs, _pad5[3]; + uint16_t gs, _pad6[3]; +}; +typedef struct cpu_user_regs cpu_user_regs_t; +DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); + +#undef __DECL_REG +#undef __DECL_REG_LOHI +#undef __DECL_REG_LO8 +#undef __DECL_REG_LO16 +#undef __DECL_REG_HI + +#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) +#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) + +struct arch_vcpu_info { + unsigned long cr2; + unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ +}; +typedef struct arch_vcpu_info arch_vcpu_info_t; + +typedef unsigned long xen_callback_t; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/arch-x86/xen.h b/include/hw/xen/interface/arch-x86/xen.h new file mode 100644 index 0000000000..7acd94c8eb --- /dev/null +++ b/include/hw/xen/interface/arch-x86/xen.h @@ -0,0 +1,398 @@ +/****************************************************************************** + * arch-x86/xen.h + * + * Guest OS interface to x86 Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2004-2006, K A Fraser + */ + +#include "../xen.h" + +#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ +#define __XEN_PUBLIC_ARCH_X86_XEN_H__ + +/* Structural guest handles introduced in 0x00030201. */ +#if __XEN_INTERFACE_VERSION__ >= 0x00030201 +#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ + typedef struct { type *p; } __guest_handle_ ## name +#else +#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ + typedef type * __guest_handle_ ## name +#endif + +/* + * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field + * in a struct in memory. + * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an + * hypercall argument. + * XEN_GUEST_HANDLE_PARAM and XEN_GUEST_HANDLE are the same on X86 but + * they might not be on other architectures. + */ +#define __DEFINE_XEN_GUEST_HANDLE(name, type) \ + ___DEFINE_XEN_GUEST_HANDLE(name, type); \ + ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) +#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) +#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name +#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) +#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name) +#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0) +#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) + +#if defined(__i386__) +# ifdef __XEN__ +__DeFiNe__ __DECL_REG_LO8(which) uint32_t e ## which ## x +__DeFiNe__ __DECL_REG_LO16(name) union { uint32_t e ## name; } +# endif +#include "xen-x86_32.h" +# ifdef __XEN__ +__UnDeF__ __DECL_REG_LO8 +__UnDeF__ __DECL_REG_LO16 +__DeFiNe__ __DECL_REG_LO8(which) e ## which ## x +__DeFiNe__ __DECL_REG_LO16(name) e ## name +# endif +#elif defined(__x86_64__) +#include "xen-x86_64.h" +#endif + +#ifndef __ASSEMBLY__ +typedef unsigned long xen_pfn_t; +#define PRI_xen_pfn "lx" +#define PRIu_xen_pfn "lu" +#endif + +#define XEN_HAVE_PV_GUEST_ENTRY 1 + +#define XEN_HAVE_PV_UPCALL_MASK 1 + +/* + * `incontents 200 segdesc Segment Descriptor Tables + */ +/* + * ` enum neg_errnoval + * ` HYPERVISOR_set_gdt(const xen_pfn_t frames[], unsigned int entries); + * ` + */ +/* + * A number of GDT entries are reserved by Xen. These are not situated at the + * start of the GDT because some stupid OSes export hard-coded selector values + * in their ABI. These hard-coded values are always near the start of the GDT, + * so Xen places itself out of the way, at the far end of the GDT. + * + * NB The LDT is set using the MMUEXT_SET_LDT op of HYPERVISOR_mmuext_op + */ +#define FIRST_RESERVED_GDT_PAGE 14 +#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) +#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) + + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_update_descriptor(u64 pa, u64 desc); + * ` + * ` @pa The machine physical address of the descriptor to + * ` update. Must be either a descriptor page or writable. + * ` @desc The descriptor value to update, in the same format as a + * ` native descriptor table entry. + */ + +/* Maximum number of virtual CPUs in legacy multi-processor guests. */ +#define XEN_LEGACY_MAX_VCPUS 32 + +#ifndef __ASSEMBLY__ + +typedef unsigned long xen_ulong_t; +#define PRI_xen_ulong "lx" + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp); + * ` + * Sets the stack segment and pointer for the current vcpu. + */ + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_set_trap_table(const struct trap_info traps[]); + * ` + */ +/* + * Send an array of these to HYPERVISOR_set_trap_table(). + * Terminate the array with a sentinel entry, with traps[].address==0. + * The privilege level specifies which modes may enter a trap via a software + * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate + * privilege levels as follows: + * Level == 0: Noone may enter + * Level == 1: Kernel may enter + * Level == 2: Kernel may enter + * Level == 3: Everyone may enter + * + * Note: For compatibility with kernels not setting up exception handlers + * early enough, Xen will avoid trying to inject #GP (and hence crash + * the domain) when an RDMSR would require this, but no handler was + * set yet. The precise conditions are implementation specific, and + * new code may not rely on such behavior anyway. + */ +#define TI_GET_DPL(_ti) ((_ti)->flags & 3) +#define TI_GET_IF(_ti) ((_ti)->flags & 4) +#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) +#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) +struct trap_info { + uint8_t vector; /* exception vector */ + uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ + uint16_t cs; /* code selector */ + unsigned long address; /* code offset */ +}; +typedef struct trap_info trap_info_t; +DEFINE_XEN_GUEST_HANDLE(trap_info_t); + +typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ + +/* + * The following is all CPU context. Note that the fpu_ctxt block is filled + * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. + * + * Also note that when calling DOMCTL_setvcpucontext for HVM guests, not all + * information in this structure is updated, the fields read include: fpu_ctxt + * (if VGCT_I387_VALID is set), flags, user_regs and debugreg[*]. + * + * Note: VCPUOP_initialise for HVM guests is non-symetric with + * DOMCTL_setvcpucontext, and uses struct vcpu_hvm_context from hvm/hvm_vcpu.h + */ +struct vcpu_guest_context { + /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ + struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ +#define VGCF_I387_VALID (1<<0) +#define VGCF_IN_KERNEL (1<<2) +#define _VGCF_i387_valid 0 +#define VGCF_i387_valid (1<<_VGCF_i387_valid) +#define _VGCF_in_kernel 2 +#define VGCF_in_kernel (1<<_VGCF_in_kernel) +#define _VGCF_failsafe_disables_events 3 +#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) +#define _VGCF_syscall_disables_events 4 +#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) +#define _VGCF_online 5 +#define VGCF_online (1<<_VGCF_online) + unsigned long flags; /* VGCF_* flags */ + struct cpu_user_regs user_regs; /* User-level CPU registers */ + struct trap_info trap_ctxt[256]; /* Virtual IDT */ + unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ + unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ + unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ + /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ + unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ + unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ +#ifdef __i386__ + unsigned long event_callback_cs; /* CS:EIP of event callback */ + unsigned long event_callback_eip; + unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ + unsigned long failsafe_callback_eip; +#else + unsigned long event_callback_eip; + unsigned long failsafe_callback_eip; +#ifdef __XEN__ + union { + unsigned long syscall_callback_eip; + struct { + unsigned int event_callback_cs; /* compat CS of event cb */ + unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ + }; + }; +#else + unsigned long syscall_callback_eip; +#endif +#endif + unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ +#ifdef __x86_64__ + /* Segment base addresses. */ + uint64_t fs_base; + uint64_t gs_base_kernel; + uint64_t gs_base_user; +#endif +}; +typedef struct vcpu_guest_context vcpu_guest_context_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); + +struct arch_shared_info { + /* + * Number of valid entries in the p2m table(s) anchored at + * pfn_to_mfn_frame_list_list and/or p2m_vaddr. + */ + unsigned long max_pfn; + /* + * Frame containing list of mfns containing list of mfns containing p2m. + * A value of 0 indicates it has not yet been set up, ~0 indicates it has + * been set to invalid e.g. due to the p2m being too large for the 3-level + * p2m tree. In this case the linear mapper p2m list anchored at p2m_vaddr + * is to be used. + */ + xen_pfn_t pfn_to_mfn_frame_list_list; + unsigned long nmi_reason; + /* + * Following three fields are valid if p2m_cr3 contains a value different + * from 0. + * p2m_cr3 is the root of the address space where p2m_vaddr is valid. + * p2m_cr3 is in the same format as a cr3 value in the vcpu register state + * and holds the folded machine frame number (via xen_pfn_to_cr3) of a + * L3 or L4 page table. + * p2m_vaddr holds the virtual address of the linear p2m list. All entries + * in the range [0...max_pfn[ are accessible via this pointer. + * p2m_generation will be incremented by the guest before and after each + * change of the mappings of the p2m list. p2m_generation starts at 0 and + * a value with the least significant bit set indicates that a mapping + * update is in progress. This allows guest external software (e.g. in Dom0) + * to verify that read mappings are consistent and whether they have changed + * since the last check. + * Modifying a p2m element in the linear p2m list is allowed via an atomic + * write only. + */ + unsigned long p2m_cr3; /* cr3 value of the p2m address space */ + unsigned long p2m_vaddr; /* virtual address of the p2m list */ + unsigned long p2m_generation; /* generation count of p2m mapping */ +#ifdef __i386__ + /* There's no room for this field in the generic structure. */ + uint32_t wc_sec_hi; +#endif +}; +typedef struct arch_shared_info arch_shared_info_t; + +#if defined(__XEN__) || defined(__XEN_TOOLS__) +/* + * struct xen_arch_domainconfig's ABI is covered by + * XEN_DOMCTL_INTERFACE_VERSION. + */ +struct xen_arch_domainconfig { +#define _XEN_X86_EMU_LAPIC 0 +#define XEN_X86_EMU_LAPIC (1U<<_XEN_X86_EMU_LAPIC) +#define _XEN_X86_EMU_HPET 1 +#define XEN_X86_EMU_HPET (1U<<_XEN_X86_EMU_HPET) +#define _XEN_X86_EMU_PM 2 +#define XEN_X86_EMU_PM (1U<<_XEN_X86_EMU_PM) +#define _XEN_X86_EMU_RTC 3 +#define XEN_X86_EMU_RTC (1U<<_XEN_X86_EMU_RTC) +#define _XEN_X86_EMU_IOAPIC 4 +#define XEN_X86_EMU_IOAPIC (1U<<_XEN_X86_EMU_IOAPIC) +#define _XEN_X86_EMU_PIC 5 +#define XEN_X86_EMU_PIC (1U<<_XEN_X86_EMU_PIC) +#define _XEN_X86_EMU_VGA 6 +#define XEN_X86_EMU_VGA (1U<<_XEN_X86_EMU_VGA) +#define _XEN_X86_EMU_IOMMU 7 +#define XEN_X86_EMU_IOMMU (1U<<_XEN_X86_EMU_IOMMU) +#define _XEN_X86_EMU_PIT 8 +#define XEN_X86_EMU_PIT (1U<<_XEN_X86_EMU_PIT) +#define _XEN_X86_EMU_USE_PIRQ 9 +#define XEN_X86_EMU_USE_PIRQ (1U<<_XEN_X86_EMU_USE_PIRQ) +#define _XEN_X86_EMU_VPCI 10 +#define XEN_X86_EMU_VPCI (1U<<_XEN_X86_EMU_VPCI) + +#define XEN_X86_EMU_ALL (XEN_X86_EMU_LAPIC | XEN_X86_EMU_HPET | \ + XEN_X86_EMU_PM | XEN_X86_EMU_RTC | \ + XEN_X86_EMU_IOAPIC | XEN_X86_EMU_PIC | \ + XEN_X86_EMU_VGA | XEN_X86_EMU_IOMMU | \ + XEN_X86_EMU_PIT | XEN_X86_EMU_USE_PIRQ |\ + XEN_X86_EMU_VPCI) + uint32_t emulation_flags; + +/* + * Select whether to use a relaxed behavior for accesses to MSRs not explicitly + * handled by Xen instead of injecting a #GP to the guest. Note this option + * doesn't allow the guest to read or write to the underlying MSR. + */ +#define XEN_X86_MSR_RELAXED (1u << 0) + uint32_t misc_flags; +}; + +/* Location of online VCPU bitmap. */ +#define XEN_ACPI_CPU_MAP 0xaf00 +#define XEN_ACPI_CPU_MAP_LEN ((HVM_MAX_VCPUS + 7) / 8) + +/* GPE0 bit set during CPU hotplug */ +#define XEN_ACPI_GPE0_CPUHP_BIT 2 +#endif + +/* + * Representations of architectural CPUID and MSR information. Used as the + * serialised version of Xen's internal representation. + */ +typedef struct xen_cpuid_leaf { +#define XEN_CPUID_NO_SUBLEAF 0xffffffffu + uint32_t leaf, subleaf; + uint32_t a, b, c, d; +} xen_cpuid_leaf_t; +DEFINE_XEN_GUEST_HANDLE(xen_cpuid_leaf_t); + +typedef struct xen_msr_entry { + uint32_t idx; + uint32_t flags; /* Reserved MBZ. */ + uint64_t val; +} xen_msr_entry_t; +DEFINE_XEN_GUEST_HANDLE(xen_msr_entry_t); + +#endif /* !__ASSEMBLY__ */ + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_fpu_taskswitch(int set); + * ` + * Sets (if set!=0) or clears (if set==0) CR0.TS. + */ + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_set_debugreg(int regno, unsigned long value); + * + * ` unsigned long + * ` HYPERVISOR_get_debugreg(int regno); + * For 0<=reg<=7, returns the debug register value. + * For other values of reg, returns ((unsigned long)-EINVAL). + * (Unfortunately, this interface is defective.) + */ + +/* + * Prefix forces emulation of some non-trapping instructions. + * Currently only CPUID. + */ +#ifdef __ASSEMBLY__ +#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; +#define XEN_CPUID XEN_EMULATE_PREFIX cpuid +#else +#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " +#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" +#endif + +/* + * Debug console IO port, also called "port E9 hack". Each character written + * to this IO port will be printed on the hypervisor console, subject to log + * level restrictions. + */ +#define XEN_HVM_DEBUGCONS_IOPORT 0xe9 + +#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/event_channel.h b/include/hw/xen/interface/event_channel.h new file mode 100644 index 0000000000..73c9f38ce1 --- /dev/null +++ b/include/hw/xen/interface/event_channel.h @@ -0,0 +1,388 @@ +/****************************************************************************** + * event_channel.h + * + * Event channels between domains. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2003-2004, K A Fraser. + */ + +#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ +#define __XEN_PUBLIC_EVENT_CHANNEL_H__ + +#include "xen.h" + +/* + * `incontents 150 evtchn Event Channels + * + * Event channels are the basic primitive provided by Xen for event + * notifications. An event is the Xen equivalent of a hardware + * interrupt. They essentially store one bit of information, the event + * of interest is signalled by transitioning this bit from 0 to 1. + * + * Notifications are received by a guest via an upcall from Xen, + * indicating when an event arrives (setting the bit). Further + * notifications are masked until the bit is cleared again (therefore, + * guests must check the value of the bit after re-enabling event + * delivery to ensure no missed notifications). + * + * Event notifications can be masked by setting a flag; this is + * equivalent to disabling interrupts and can be used to ensure + * atomicity of certain operations in the guest kernel. + * + * Event channels are represented by the evtchn_* fields in + * struct shared_info and struct vcpu_info. + */ + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_event_channel_op(enum event_channel_op cmd, void *args) + * ` + * @cmd == EVTCHNOP_* (event-channel operation). + * @args == struct evtchn_* Operation-specific extra arguments (NULL if none). + */ + +/* ` enum event_channel_op { // EVTCHNOP_* => struct evtchn_* */ +#define EVTCHNOP_bind_interdomain 0 +#define EVTCHNOP_bind_virq 1 +#define EVTCHNOP_bind_pirq 2 +#define EVTCHNOP_close 3 +#define EVTCHNOP_send 4 +#define EVTCHNOP_status 5 +#define EVTCHNOP_alloc_unbound 6 +#define EVTCHNOP_bind_ipi 7 +#define EVTCHNOP_bind_vcpu 8 +#define EVTCHNOP_unmask 9 +#define EVTCHNOP_reset 10 +#define EVTCHNOP_init_control 11 +#define EVTCHNOP_expand_array 12 +#define EVTCHNOP_set_priority 13 +#ifdef __XEN__ +#define EVTCHNOP_reset_cont 14 +#endif +/* ` } */ + +typedef uint32_t evtchn_port_t; +DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); + +/* + * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as + * accepting interdomain bindings from domain . A fresh port + * is allocated in and returned as . + * NOTES: + * 1. If the caller is unprivileged then must be DOMID_SELF. + * 2. may be DOMID_SELF, allowing loopback connections. + */ +struct evtchn_alloc_unbound { + /* IN parameters */ + domid_t dom, remote_dom; + /* OUT parameters */ + evtchn_port_t port; +}; +typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; + +/* + * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between + * the calling domain and . must identify + * a port that is unbound and marked as accepting bindings from the calling + * domain. A fresh port is allocated in the calling domain and returned as + * . + * + * In case the peer domain has already tried to set our event channel + * pending, before it was bound, EVTCHNOP_bind_interdomain always sets + * the local event channel pending. + * + * The usual pattern of use, in the guest's upcall (or subsequent + * handler) is as follows: (Re-enable the event channel for subsequent + * signalling and then) check for the existence of whatever condition + * is being waited for by other means, and take whatever action is + * needed (if any). + * + * NOTES: + * 1. may be DOMID_SELF, allowing loopback connections. + */ +struct evtchn_bind_interdomain { + /* IN parameters. */ + domid_t remote_dom; + evtchn_port_t remote_port; + /* OUT parameters. */ + evtchn_port_t local_port; +}; +typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; + +/* + * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified + * vcpu. + * NOTES: + * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list + * in xen.h for the classification of each VIRQ. + * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be + * re-bound via EVTCHNOP_bind_vcpu. + * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. + * The allocated event channel is bound to the specified vcpu and the + * binding cannot be changed. + */ +struct evtchn_bind_virq { + /* IN parameters. */ + uint32_t virq; /* enum virq */ + uint32_t vcpu; + /* OUT parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_bind_virq evtchn_bind_virq_t; + +/* + * EVTCHNOP_bind_pirq: Bind a local event channel to a real IRQ (PIRQ ). + * NOTES: + * 1. A physical IRQ may be bound to at most one event channel per domain. + * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. + */ +struct evtchn_bind_pirq { + /* IN parameters. */ + uint32_t pirq; +#define BIND_PIRQ__WILL_SHARE 1 + uint32_t flags; /* BIND_PIRQ__* */ + /* OUT parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; + +/* + * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. + * NOTES: + * 1. The allocated event channel is bound to the specified vcpu. The binding + * may not be changed. + */ +struct evtchn_bind_ipi { + uint32_t vcpu; + /* OUT parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; + +/* + * EVTCHNOP_close: Close a local event channel . If the channel is + * interdomain then the remote end is placed in the unbound state + * (EVTCHNSTAT_unbound), awaiting a new connection. + */ +struct evtchn_close { + /* IN parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_close evtchn_close_t; + +/* + * EVTCHNOP_send: Send an event to the remote end of the channel whose local + * endpoint is . + */ +struct evtchn_send { + /* IN parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_send evtchn_send_t; + +/* + * EVTCHNOP_status: Get the current status of the communication channel which + * has an endpoint at . + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may obtain the status of an event + * channel for which is not DOMID_SELF. + */ +struct evtchn_status { + /* IN parameters */ + domid_t dom; + evtchn_port_t port; + /* OUT parameters */ +#define EVTCHNSTAT_closed 0 /* Channel is not in use. */ +#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ +#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ +#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ +#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ +#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ + uint32_t status; + uint32_t vcpu; /* VCPU to which this channel is bound. */ + union { + struct { + domid_t dom; + } unbound; /* EVTCHNSTAT_unbound */ + struct { + domid_t dom; + evtchn_port_t port; + } interdomain; /* EVTCHNSTAT_interdomain */ + uint32_t pirq; /* EVTCHNSTAT_pirq */ + uint32_t virq; /* EVTCHNSTAT_virq */ + } u; +}; +typedef struct evtchn_status evtchn_status_t; + +/* + * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an + * event is pending. + * NOTES: + * 1. IPI-bound channels always notify the vcpu specified at bind time. + * This binding cannot be changed. + * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. + * This binding cannot be changed. + * 3. All other channels notify vcpu0 by default. This default is set when + * the channel is allocated (a port that is freed and subsequently reused + * has its binding reset to vcpu0). + */ +struct evtchn_bind_vcpu { + /* IN parameters. */ + evtchn_port_t port; + uint32_t vcpu; +}; +typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; + +/* + * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver + * a notification to the appropriate VCPU if an event is pending. + */ +struct evtchn_unmask { + /* IN parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_unmask evtchn_unmask_t; + +/* + * EVTCHNOP_reset: Close all event channels associated with specified domain. + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. + * 3. Destroys all control blocks and event array, resets event channel + * operations to 2-level ABI if called with == DOMID_SELF and FIFO + * ABI was used. Guests should not bind events during EVTCHNOP_reset call + * as these events are likely to be lost. + */ +struct evtchn_reset { + /* IN parameters. */ + domid_t dom; +}; +typedef struct evtchn_reset evtchn_reset_t; + +/* + * EVTCHNOP_init_control: initialize the control block for the FIFO ABI. + * + * Note: any events that are currently pending will not be resent and + * will be lost. Guests should call this before binding any event to + * avoid losing any events. + */ +struct evtchn_init_control { + /* IN parameters. */ + uint64_t control_gfn; + uint32_t offset; + uint32_t vcpu; + /* OUT parameters. */ + uint8_t link_bits; + uint8_t _pad[7]; +}; +typedef struct evtchn_init_control evtchn_init_control_t; + +/* + * EVTCHNOP_expand_array: add an additional page to the event array. + */ +struct evtchn_expand_array { + /* IN parameters. */ + uint64_t array_gfn; +}; +typedef struct evtchn_expand_array evtchn_expand_array_t; + +/* + * EVTCHNOP_set_priority: set the priority for an event channel. + */ +struct evtchn_set_priority { + /* IN parameters. */ + evtchn_port_t port; + uint32_t priority; +}; +typedef struct evtchn_set_priority evtchn_set_priority_t; + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op) + * ` + * Superceded by new event_channel_op() hypercall since 0x00030202. + */ +struct evtchn_op { + uint32_t cmd; /* enum event_channel_op */ + union { + evtchn_alloc_unbound_t alloc_unbound; + evtchn_bind_interdomain_t bind_interdomain; + evtchn_bind_virq_t bind_virq; + evtchn_bind_pirq_t bind_pirq; + evtchn_bind_ipi_t bind_ipi; + evtchn_close_t close; + evtchn_send_t send; + evtchn_status_t status; + evtchn_bind_vcpu_t bind_vcpu; + evtchn_unmask_t unmask; + } u; +}; +typedef struct evtchn_op evtchn_op_t; +DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); + +/* + * 2-level ABI + */ + +#define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64) + +/* + * FIFO ABI + */ + +/* Events may have priorities from 0 (highest) to 15 (lowest). */ +#define EVTCHN_FIFO_PRIORITY_MAX 0 +#define EVTCHN_FIFO_PRIORITY_DEFAULT 7 +#define EVTCHN_FIFO_PRIORITY_MIN 15 + +#define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1) + +typedef uint32_t event_word_t; + +#define EVTCHN_FIFO_PENDING 31 +#define EVTCHN_FIFO_MASKED 30 +#define EVTCHN_FIFO_LINKED 29 +#define EVTCHN_FIFO_BUSY 28 + +#define EVTCHN_FIFO_LINK_BITS 17 +#define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1) + +#define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS) + +struct evtchn_fifo_control_block { + uint32_t ready; + uint32_t _rsvd; + uint32_t head[EVTCHN_FIFO_MAX_QUEUES]; +}; +typedef struct evtchn_fifo_control_block evtchn_fifo_control_block_t; + +#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/features.h b/include/hw/xen/interface/features.h new file mode 100644 index 0000000000..9ee2f760ef --- /dev/null +++ b/include/hw/xen/interface/features.h @@ -0,0 +1,143 @@ +/****************************************************************************** + * features.h + * + * Feature flags, reported by XENVER_get_features. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2006, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_FEATURES_H__ +#define __XEN_PUBLIC_FEATURES_H__ + +/* + * `incontents 200 elfnotes_features XEN_ELFNOTE_FEATURES + * + * The list of all the features the guest supports. They are set by + * parsing the XEN_ELFNOTE_FEATURES and XEN_ELFNOTE_SUPPORTED_FEATURES + * string. The format is the feature names (as given here without the + * "XENFEAT_" prefix) separated by '|' characters. + * If a feature is required for the kernel to function then the feature name + * must be preceded by a '!' character. + * + * Note that if XEN_ELFNOTE_SUPPORTED_FEATURES is used, then in the + * XENFEAT_dom0 MUST be set if the guest is to be booted as dom0, + */ + +/* + * If set, the guest does not need to write-protect its pagetables, and can + * update them via direct writes. + */ +#define XENFEAT_writable_page_tables 0 + +/* + * If set, the guest does not need to write-protect its segment descriptor + * tables, and can update them via direct writes. + */ +#define XENFEAT_writable_descriptor_tables 1 + +/* + * If set, translation between the guest's 'pseudo-physical' address space + * and the host's machine address space are handled by the hypervisor. In this + * mode the guest does not need to perform phys-to/from-machine translations + * when performing page table operations. + */ +#define XENFEAT_auto_translated_physmap 2 + +/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ +#define XENFEAT_supervisor_mode_kernel 3 + +/* + * If set, the guest does not need to allocate x86 PAE page directories + * below 4GB. This flag is usually implied by auto_translated_physmap. + */ +#define XENFEAT_pae_pgdir_above_4gb 4 + +/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ +#define XENFEAT_mmu_pt_update_preserve_ad 5 + +/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */ +#define XENFEAT_highmem_assist 6 + +/* + * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel + * available pte bits. + */ +#define XENFEAT_gnttab_map_avail_bits 7 + +/* x86: Does this Xen host support the HVM callback vector type? */ +#define XENFEAT_hvm_callback_vector 8 + +/* x86: pvclock algorithm is safe to use on HVM */ +#define XENFEAT_hvm_safe_pvclock 9 + +/* x86: pirq can be used by HVM guests */ +#define XENFEAT_hvm_pirqs 10 + +/* operation as Dom0 is supported */ +#define XENFEAT_dom0 11 + +/* Xen also maps grant references at pfn = mfn. + * This feature flag is deprecated and should not be used. +#define XENFEAT_grant_map_identity 12 + */ + +/* Guest can use XENMEMF_vnode to specify virtual node for memory op. */ +#define XENFEAT_memory_op_vnode_supported 13 + +/* arm: Hypervisor supports ARM SMC calling convention. */ +#define XENFEAT_ARM_SMCCC_supported 14 + +/* + * x86/PVH: If set, ACPI RSDP can be placed at any address. Otherwise RSDP + * must be located in lower 1MB, as required by ACPI Specification for IA-PC + * systems. + * This feature flag is only consulted if XEN_ELFNOTE_GUEST_OS contains + * the "linux" string. + */ +#define XENFEAT_linux_rsdp_unrestricted 15 + +/* + * A direct-mapped (or 1:1 mapped) domain is a domain for which its + * local pages have gfn == mfn. If a domain is direct-mapped, + * XENFEAT_direct_mapped is set; otherwise XENFEAT_not_direct_mapped + * is set. + * + * If neither flag is set (e.g. older Xen releases) the assumptions are: + * - not auto_translated domains (x86 only) are always direct-mapped + * - on x86, auto_translated domains are not direct-mapped + * - on ARM, Dom0 is direct-mapped, DomUs are not + */ +#define XENFEAT_not_direct_mapped 16 +#define XENFEAT_direct_mapped 17 + +#define XENFEAT_NR_SUBMAPS 1 + +#endif /* __XEN_PUBLIC_FEATURES_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/grant_table.h b/include/hw/xen/interface/grant_table.h index 2af0cbdde3..7934d7b718 100644 --- a/include/hw/xen/interface/grant_table.h +++ b/include/hw/xen/interface/grant_table.h @@ -28,9 +28,659 @@ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ +#include "xen.h" + +/* + * `incontents 150 gnttab Grant Tables + * + * Xen's grant tables provide a generic mechanism to memory sharing + * between domains. This shared memory interface underpins the split + * device drivers for block and network IO. + * + * Each domain has its own grant table. This is a data structure that + * is shared with Xen; it allows the domain to tell Xen what kind of + * permissions other domains have on its pages. Entries in the grant + * table are identified by grant references. A grant reference is an + * integer, which indexes into the grant table. It acts as a + * capability which the grantee can use to perform operations on the + * granter's memory. + * + * This capability-based system allows shared-memory communications + * between unprivileged domains. A grant reference also encapsulates + * the details of a shared page, removing the need for a domain to + * know the real machine address of a page it is sharing. This makes + * it possible to share memory correctly with domains running in + * fully virtualised memory. + */ + +/*********************************** + * GRANT TABLE REPRESENTATION + */ + +/* Some rough guidelines on accessing and updating grant-table entries + * in a concurrency-safe manner. For more information, Linux contains a + * reference implementation for guest OSes (drivers/xen/grant_table.c, see + * http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob;f=drivers/xen/grant-table.c;hb=HEAD + * + * NB. WMB is a no-op on current-generation x86 processors. However, a + * compiler barrier will still be required. + * + * Introducing a valid entry into the grant table: + * 1. Write ent->domid. + * 2. Write ent->frame: + * GTF_permit_access: Frame to which access is permitted. + * GTF_accept_transfer: Pseudo-phys frame slot being filled by new + * frame, or zero if none. + * 3. Write memory barrier (WMB). + * 4. Write ent->flags, inc. valid type. + * + * Invalidating an unused GTF_permit_access entry: + * 1. flags = ent->flags. + * 2. Observe that !(flags & (GTF_reading|GTF_writing)). + * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). + * NB. No need for WMB as reuse of entry is control-dependent on success of + * step 3, and all architectures guarantee ordering of ctrl-dep writes. + * + * Invalidating an in-use GTF_permit_access entry: + * This cannot be done directly. Request assistance from the domain controller + * which can set a timeout on the use of a grant entry and take necessary + * action. (NB. This is not yet implemented!). + * + * Invalidating an unused GTF_accept_transfer entry: + * 1. flags = ent->flags. + * 2. Observe that !(flags & GTF_transfer_committed). [*] + * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). + * NB. No need for WMB as reuse of entry is control-dependent on success of + * step 3, and all architectures guarantee ordering of ctrl-dep writes. + * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. + * The guest must /not/ modify the grant entry until the address of the + * transferred frame is written. It is safe for the guest to spin waiting + * for this to occur (detect by observing GTF_transfer_completed in + * ent->flags). + * + * Invalidating a committed GTF_accept_transfer entry: + * 1. Wait for (ent->flags & GTF_transfer_completed). + * + * Changing a GTF_permit_access from writable to read-only: + * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. + * + * Changing a GTF_permit_access from read-only to writable: + * Use SMP-safe bit-setting instruction. + */ + /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; +/* + * A grant table comprises a packed array of grant entries in one or more + * page frames shared between Xen and a guest. + * [XEN]: This field is written by Xen and read by the sharing guest. + * [GST]: This field is written by the guest and read by Xen. + */ + +/* + * Version 1 of the grant table entry structure is maintained largely for + * backwards compatibility. New guests are recommended to support using + * version 2 to overcome version 1 limitations, but to default to version 1. + */ +#if __XEN_INTERFACE_VERSION__ < 0x0003020a +#define grant_entry_v1 grant_entry +#define grant_entry_v1_t grant_entry_t +#endif +struct grant_entry_v1 { + /* GTF_xxx: various type and flag information. [XEN,GST] */ + uint16_t flags; + /* The domain being granted foreign privileges. [GST] */ + domid_t domid; + /* + * GTF_permit_access: GFN that @domid is allowed to map and access. [GST] + * GTF_accept_transfer: GFN that @domid is allowed to transfer into. [GST] + * GTF_transfer_completed: MFN whose ownership transferred by @domid + * (non-translated guests only). [XEN] + */ + uint32_t frame; +}; +typedef struct grant_entry_v1 grant_entry_v1_t; + +/* The first few grant table entries will be preserved across grant table + * version changes and may be pre-populated at domain creation by tools. + */ +#define GNTTAB_NR_RESERVED_ENTRIES 8 +#define GNTTAB_RESERVED_CONSOLE 0 +#define GNTTAB_RESERVED_XENSTORE 1 + +/* + * Type of grant entry. + * GTF_invalid: This grant entry grants no privileges. + * GTF_permit_access: Allow @domid to map/access @frame. + * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame + * to this guest. Xen writes the page number to @frame. + * GTF_transitive: Allow @domid to transitively access a subrange of + * @trans_grant in @trans_domid. No mappings are allowed. + */ +#define GTF_invalid (0U<<0) +#define GTF_permit_access (1U<<0) +#define GTF_accept_transfer (2U<<0) +#define GTF_transitive (3U<<0) +#define GTF_type_mask (3U<<0) + +/* + * Subflags for GTF_permit_access and GTF_transitive. + * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] + * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] + * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] + * Further subflags for GTF_permit_access only. + * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags to be used for + * mappings of the grant [GST] + * GTF_sub_page: Grant access to only a subrange of the page. @domid + * will only be allowed to copy from the grant, and not + * map it. [GST] + */ +#define _GTF_readonly (2) +#define GTF_readonly (1U<<_GTF_readonly) +#define _GTF_reading (3) +#define GTF_reading (1U<<_GTF_reading) +#define _GTF_writing (4) +#define GTF_writing (1U<<_GTF_writing) +#define _GTF_PWT (5) +#define GTF_PWT (1U<<_GTF_PWT) +#define _GTF_PCD (6) +#define GTF_PCD (1U<<_GTF_PCD) +#define _GTF_PAT (7) +#define GTF_PAT (1U<<_GTF_PAT) +#define _GTF_sub_page (8) +#define GTF_sub_page (1U<<_GTF_sub_page) + +/* + * Subflags for GTF_accept_transfer: + * GTF_transfer_committed: Xen sets this flag to indicate that it is committed + * to transferring ownership of a page frame. When a guest sees this flag + * it must /not/ modify the grant entry until GTF_transfer_completed is + * set by Xen. + * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag + * after reading GTF_transfer_committed. Xen will always write the frame + * address, followed by ORing this flag, in a timely manner. + */ +#define _GTF_transfer_committed (2) +#define GTF_transfer_committed (1U<<_GTF_transfer_committed) +#define _GTF_transfer_completed (3) +#define GTF_transfer_completed (1U<<_GTF_transfer_completed) + +/* + * Version 2 grant table entries. These fulfil the same role as + * version 1 entries, but can represent more complicated operations. + * Any given domain will have either a version 1 or a version 2 table, + * and every entry in the table will be the same version. + * + * The interface by which domains use grant references does not depend + * on the grant table version in use by the other domain. + */ +#if __XEN_INTERFACE_VERSION__ >= 0x0003020a +/* + * Version 1 and version 2 grant entries share a common prefix. The + * fields of the prefix are documented as part of struct + * grant_entry_v1. + */ +struct grant_entry_header { + uint16_t flags; + domid_t domid; +}; +typedef struct grant_entry_header grant_entry_header_t; + +/* + * Version 2 of the grant entry structure. + */ +union grant_entry_v2 { + grant_entry_header_t hdr; + + /* + * This member is used for V1-style full page grants, where either: + * + * -- hdr.type is GTF_accept_transfer, or + * -- hdr.type is GTF_permit_access and GTF_sub_page is not set. + * + * In that case, the frame field has the same semantics as the + * field of the same name in the V1 entry structure. + */ + struct { + grant_entry_header_t hdr; + uint32_t pad0; + uint64_t frame; + } full_page; + + /* + * If the grant type is GTF_grant_access and GTF_sub_page is set, + * @domid is allowed to access bytes [@page_off,@page_off+@length) + * in frame @frame. + */ + struct { + grant_entry_header_t hdr; + uint16_t page_off; + uint16_t length; + uint64_t frame; + } sub_page; + + /* + * If the grant is GTF_transitive, @domid is allowed to use the + * grant @gref in domain @trans_domid, as if it was the local + * domain. Obviously, the transitive access must be compatible + * with the original grant. + * + * The current version of Xen does not allow transitive grants + * to be mapped. + */ + struct { + grant_entry_header_t hdr; + domid_t trans_domid; + uint16_t pad0; + grant_ref_t gref; + } transitive; + + uint32_t __spacer[4]; /* Pad to a power of two */ +}; +typedef union grant_entry_v2 grant_entry_v2_t; + +typedef uint16_t grant_status_t; + +#endif /* __XEN_INTERFACE_VERSION__ */ + +/*********************************** + * GRANT TABLE QUERIES AND USES + */ + +/* ` enum neg_errnoval + * ` HYPERVISOR_grant_table_op(enum grant_table_op cmd, + * ` void *args, + * ` unsigned int count) + * ` + * + * @args points to an array of a per-command data structure. The array + * has @count members + */ + +/* ` enum grant_table_op { // GNTTABOP_* => struct gnttab_* */ +#define GNTTABOP_map_grant_ref 0 +#define GNTTABOP_unmap_grant_ref 1 +#define GNTTABOP_setup_table 2 +#define GNTTABOP_dump_table 3 +#define GNTTABOP_transfer 4 +#define GNTTABOP_copy 5 +#define GNTTABOP_query_size 6 +#define GNTTABOP_unmap_and_replace 7 +#if __XEN_INTERFACE_VERSION__ >= 0x0003020a +#define GNTTABOP_set_version 8 +#define GNTTABOP_get_status_frames 9 +#define GNTTABOP_get_version 10 +#define GNTTABOP_swap_grant_ref 11 +#define GNTTABOP_cache_flush 12 +#endif /* __XEN_INTERFACE_VERSION__ */ +/* ` } */ + +/* + * Handle to track a mapping created via a grant reference. + */ +typedef uint32_t grant_handle_t; + +/* + * GNTTABOP_map_grant_ref: Map the grant entry (,) for access + * by devices and/or host CPUs. If successful, is a tracking number + * that must be presented later to destroy the mapping(s). On error, + * is a negative status code. + * NOTES: + * 1. If GNTMAP_device_map is specified then is the address + * via which I/O devices may access the granted frame. + * 2. If GNTMAP_host_map is specified then a mapping will be added at + * either a host virtual address in the current address space, or at + * a PTE at the specified machine address. The type of mapping to + * perform is selected through the GNTMAP_contains_pte flag, and the + * address is specified in . + * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a + * host mapping is destroyed by other means then it is *NOT* guaranteed + * to be accounted to the correct grant reference! + */ +struct gnttab_map_grant_ref { + /* IN parameters. */ + uint64_t host_addr; + uint32_t flags; /* GNTMAP_* */ + grant_ref_t ref; + domid_t dom; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ + grant_handle_t handle; + uint64_t dev_bus_addr; +}; +typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); + +/* + * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings + * tracked by . If or is zero, that + * field is ignored. If non-zero, they must refer to a device/host mapping + * that is tracked by + * NOTES: + * 1. The call may fail in an undefined manner if either mapping is not + * tracked by . + * 3. After executing a batch of unmaps, it is guaranteed that no stale + * mappings will remain in the device or host TLBs. + */ +struct gnttab_unmap_grant_ref { + /* IN parameters. */ + uint64_t host_addr; + uint64_t dev_bus_addr; + grant_handle_t handle; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ +}; +typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); + +/* + * GNTTABOP_setup_table: Set up a grant table for comprising at least + * pages. The frame addresses are written to the . + * Only addresses are written, even if the table is larger. + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. + * 3. Xen may not support more than a single grant-table page per domain. + */ +struct gnttab_setup_table { + /* IN parameters. */ + domid_t dom; + uint32_t nr_frames; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ +#if __XEN_INTERFACE_VERSION__ < 0x00040300 + XEN_GUEST_HANDLE(ulong) frame_list; +#else + XEN_GUEST_HANDLE(xen_pfn_t) frame_list; +#endif +}; +typedef struct gnttab_setup_table gnttab_setup_table_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); + +/* + * GNTTABOP_dump_table: Dump the contents of the grant table to the + * xen console. Debugging use only. + */ +struct gnttab_dump_table { + /* IN parameters. */ + domid_t dom; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ +}; +typedef struct gnttab_dump_table gnttab_dump_table_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); + +/* + * GNTTABOP_transfer: Transfer to a foreign domain. The foreign domain + * has previously registered its interest in the transfer via . + * + * Note that, even if the transfer fails, the specified page no longer belongs + * to the calling domain *unless* the error is GNTST_bad_page. + * + * Note further that only PV guests can use this operation. + */ +struct gnttab_transfer { + /* IN parameters. */ + xen_pfn_t mfn; + domid_t domid; + grant_ref_t ref; + /* OUT parameters. */ + int16_t status; +}; +typedef struct gnttab_transfer gnttab_transfer_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); + + +/* + * GNTTABOP_copy: Hypervisor based copy + * source and destinations can be eithers MFNs or, for foreign domains, + * grant references. the foreign domain has to grant read/write access + * in its grant table. + * + * The flags specify what type source and destinations are (either MFN + * or grant reference). + * + * Note that this can also be used to copy data between two domains + * via a third party if the source and destination domains had previously + * grant appropriate access to their pages to the third party. + * + * source_offset specifies an offset in the source frame, dest_offset + * the offset in the target frame and len specifies the number of + * bytes to be copied. + */ + +#define _GNTCOPY_source_gref (0) +#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) +#define _GNTCOPY_dest_gref (1) +#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) + +struct gnttab_copy { + /* IN parameters. */ + struct gnttab_copy_ptr { + union { + grant_ref_t ref; + xen_pfn_t gmfn; + } u; + domid_t domid; + uint16_t offset; + } source, dest; + uint16_t len; + uint16_t flags; /* GNTCOPY_* */ + /* OUT parameters. */ + int16_t status; +}; +typedef struct gnttab_copy gnttab_copy_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); + +/* + * GNTTABOP_query_size: Query the current and maximum sizes of the shared + * grant table. + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. + */ +struct gnttab_query_size { + /* IN parameters. */ + domid_t dom; + /* OUT parameters. */ + uint32_t nr_frames; + uint32_t max_nr_frames; + int16_t status; /* => enum grant_status */ +}; +typedef struct gnttab_query_size gnttab_query_size_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); + +/* + * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings + * tracked by but atomically replace the page table entry with one + * pointing to the machine address under . will be + * redirected to the null entry. + * NOTES: + * 1. The call may fail in an undefined manner if either mapping is not + * tracked by . + * 2. After executing a batch of unmaps, it is guaranteed that no stale + * mappings will remain in the device or host TLBs. + */ +struct gnttab_unmap_and_replace { + /* IN parameters. */ + uint64_t host_addr; + uint64_t new_addr; + grant_handle_t handle; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ +}; +typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); + +#if __XEN_INTERFACE_VERSION__ >= 0x0003020a +/* + * GNTTABOP_set_version: Request a particular version of the grant + * table shared table structure. This operation may be used to toggle + * between different versions, but must be performed while no grants + * are active. The only defined versions are 1 and 2. + */ +struct gnttab_set_version { + /* IN/OUT parameters */ + uint32_t version; +}; +typedef struct gnttab_set_version gnttab_set_version_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_set_version_t); + + +/* + * GNTTABOP_get_status_frames: Get the list of frames used to store grant + * status for . In grant format version 2, the status is separated + * from the other shared grant fields to allow more efficient synchronization + * using barriers instead of atomic cmpexch operations. + * specify the size of vector . + * The frame addresses are returned in the . + * Only addresses are returned, even if the table is larger. + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. + */ +struct gnttab_get_status_frames { + /* IN parameters. */ + uint32_t nr_frames; + domid_t dom; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ + XEN_GUEST_HANDLE(uint64_t) frame_list; +}; +typedef struct gnttab_get_status_frames gnttab_get_status_frames_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_get_status_frames_t); + +/* + * GNTTABOP_get_version: Get the grant table version which is in + * effect for domain . + */ +struct gnttab_get_version { + /* IN parameters */ + domid_t dom; + uint16_t pad; + /* OUT parameters */ + uint32_t version; +}; +typedef struct gnttab_get_version gnttab_get_version_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_get_version_t); + +/* + * GNTTABOP_swap_grant_ref: Swap the contents of two grant entries. + */ +struct gnttab_swap_grant_ref { + /* IN parameters */ + grant_ref_t ref_a; + grant_ref_t ref_b; + /* OUT parameters */ + int16_t status; /* => enum grant_status */ +}; +typedef struct gnttab_swap_grant_ref gnttab_swap_grant_ref_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t); + +/* + * Issue one or more cache maintenance operations on a portion of a + * page granted to the calling domain by a foreign domain. + */ +struct gnttab_cache_flush { + union { + uint64_t dev_bus_addr; + grant_ref_t ref; + } a; + uint16_t offset; /* offset from start of grant */ + uint16_t length; /* size within the grant */ +#define GNTTAB_CACHE_CLEAN (1u<<0) +#define GNTTAB_CACHE_INVAL (1u<<1) +#define GNTTAB_CACHE_SOURCE_GREF (1u<<31) + uint32_t op; +}; +typedef struct gnttab_cache_flush gnttab_cache_flush_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_cache_flush_t); + +#endif /* __XEN_INTERFACE_VERSION__ */ + +/* + * Bitfield values for gnttab_map_grant_ref.flags. + */ + /* Map the grant entry for access by I/O devices. */ +#define _GNTMAP_device_map (0) +#define GNTMAP_device_map (1<<_GNTMAP_device_map) + /* Map the grant entry for access by host CPUs. */ +#define _GNTMAP_host_map (1) +#define GNTMAP_host_map (1<<_GNTMAP_host_map) + /* Accesses to the granted frame will be restricted to read-only access. */ +#define _GNTMAP_readonly (2) +#define GNTMAP_readonly (1<<_GNTMAP_readonly) + /* + * GNTMAP_host_map subflag: + * 0 => The host mapping is usable only by the guest OS. + * 1 => The host mapping is usable by guest OS + current application. + */ +#define _GNTMAP_application_map (3) +#define GNTMAP_application_map (1<<_GNTMAP_application_map) + + /* + * GNTMAP_contains_pte subflag: + * 0 => This map request contains a host virtual address. + * 1 => This map request contains the machine addess of the PTE to update. + */ +#define _GNTMAP_contains_pte (4) +#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) + +/* + * Bits to be placed in guest kernel available PTE bits (architecture + * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). + */ +#define _GNTMAP_guest_avail0 (16) +#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0) + +/* + * Values for error status returns. All errors are -ve. + */ +/* ` enum grant_status { */ +#define GNTST_okay (0) /* Normal return. */ +#define GNTST_general_error (-1) /* General undefined error. */ +#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ +#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ +#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ +#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ +#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ +#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ +#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ +#define GNTST_bad_page (-9) /* Specified page was invalid for op. */ +#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ +#define GNTST_address_too_big (-11) /* transfer page address too large. */ +#define GNTST_eagain (-12) /* Operation not done; try again. */ +#define GNTST_no_space (-13) /* Out of space (handles etc). */ +/* ` } */ + +#define GNTTABOP_error_msgs { \ + "okay", \ + "undefined error", \ + "unrecognised domain id", \ + "invalid grant reference", \ + "invalid mapping handle", \ + "invalid virtual address", \ + "invalid device address", \ + "no spare translation slot in the I/O MMU", \ + "permission denied", \ + "bad page", \ + "copy arguments cross page boundary", \ + "page address size too large", \ + "operation not done; try again", \ + "out of space", \ +} + #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/hvm/hvm_op.h b/include/hw/xen/interface/hvm/hvm_op.h new file mode 100644 index 0000000000..870ec52060 --- /dev/null +++ b/include/hw/xen/interface/hvm/hvm_op.h @@ -0,0 +1,395 @@ +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2007, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ +#define __XEN_PUBLIC_HVM_HVM_OP_H__ + +#include "../xen.h" +#include "../trace.h" +#include "../event_channel.h" + +/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ +#define HVMOP_set_param 0 +#define HVMOP_get_param 1 +struct xen_hvm_param { + domid_t domid; /* IN */ + uint16_t pad; + uint32_t index; /* IN */ + uint64_t value; /* IN/OUT */ +}; +typedef struct xen_hvm_param xen_hvm_param_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); + +struct xen_hvm_altp2m_suppress_ve { + uint16_t view; + uint8_t suppress_ve; /* Boolean type. */ + uint8_t pad1; + uint32_t pad2; + uint64_t gfn; +}; + +struct xen_hvm_altp2m_suppress_ve_multi { + uint16_t view; + uint8_t suppress_ve; /* Boolean type. */ + uint8_t pad1; + int32_t first_error; /* Should be set to 0. */ + uint64_t first_gfn; /* Value may be updated. */ + uint64_t last_gfn; + uint64_t first_error_gfn; /* Gfn of the first error. */ +}; + +#if __XEN_INTERFACE_VERSION__ < 0x00040900 + +/* Set the logical level of one of a domain's PCI INTx wires. */ +#define HVMOP_set_pci_intx_level 2 +struct xen_hvm_set_pci_intx_level { + /* Domain to be updated. */ + domid_t domid; + /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ + uint8_t domain, bus, device, intx; + /* Assertion level (0 = unasserted, 1 = asserted). */ + uint8_t level; +}; +typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); + +/* Set the logical level of one of a domain's ISA IRQ wires. */ +#define HVMOP_set_isa_irq_level 3 +struct xen_hvm_set_isa_irq_level { + /* Domain to be updated. */ + domid_t domid; + /* ISA device identification, by ISA IRQ (0-15). */ + uint8_t isa_irq; + /* Assertion level (0 = unasserted, 1 = asserted). */ + uint8_t level; +}; +typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); + +#define HVMOP_set_pci_link_route 4 +struct xen_hvm_set_pci_link_route { + /* Domain to be updated. */ + domid_t domid; + /* PCI link identifier (0-3). */ + uint8_t link; + /* ISA IRQ (1-15), or 0 (disable link). */ + uint8_t isa_irq; +}; +typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); + +#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */ + +/* Flushes all VCPU TLBs: @arg must be NULL. */ +#define HVMOP_flush_tlbs 5 + +/* + * hvmmem_type_t should not be defined when generating the corresponding + * compat header. This will ensure that the improperly named HVMMEM_(*) + * values are defined only once. + */ +#ifndef XEN_GENERATING_COMPAT_HEADERS + +typedef enum { + HVMMEM_ram_rw, /* Normal read/write guest RAM */ + HVMMEM_ram_ro, /* Read-only; writes are discarded */ + HVMMEM_mmio_dm, /* Reads and write go to the device model */ +#if __XEN_INTERFACE_VERSION__ < 0x00040700 + HVMMEM_mmio_write_dm, /* Read-only; writes go to the device model */ +#else + HVMMEM_unused, /* Placeholder; setting memory to this type + will fail for code after 4.7.0 */ +#endif + HVMMEM_ioreq_server /* Memory type claimed by an ioreq server; type + changes to this value are only allowed after + an ioreq server has claimed its ownership. + Only pages with HVMMEM_ram_rw are allowed to + change to this type; conversely, pages with + this type are only allowed to be changed back + to HVMMEM_ram_rw. */ +} hvmmem_type_t; + +#endif /* XEN_GENERATING_COMPAT_HEADERS */ + +/* Hint from PV drivers for pagetable destruction. */ +#define HVMOP_pagetable_dying 9 +struct xen_hvm_pagetable_dying { + /* Domain with a pagetable about to be destroyed. */ + domid_t domid; + uint16_t pad[3]; /* align next field on 8-byte boundary */ + /* guest physical address of the toplevel pagetable dying */ + uint64_t gpa; +}; +typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t); + +/* Get the current Xen time, in nanoseconds since system boot. */ +#define HVMOP_get_time 10 +struct xen_hvm_get_time { + uint64_t now; /* OUT */ +}; +typedef struct xen_hvm_get_time xen_hvm_get_time_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_time_t); + +#define HVMOP_xentrace 11 +struct xen_hvm_xentrace { + uint16_t event, extra_bytes; + uint8_t extra[TRACE_EXTRA_MAX * sizeof(uint32_t)]; +}; +typedef struct xen_hvm_xentrace xen_hvm_xentrace_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t); + +/* Following tools-only interfaces may change in future. */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +/* Deprecated by XENMEM_access_op_set_access */ +#define HVMOP_set_mem_access 12 + +/* Deprecated by XENMEM_access_op_get_access */ +#define HVMOP_get_mem_access 13 + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +#define HVMOP_get_mem_type 15 +/* Return hvmmem_type_t for the specified pfn. */ +struct xen_hvm_get_mem_type { + /* Domain to be queried. */ + domid_t domid; + /* OUT variable. */ + uint16_t mem_type; + uint16_t pad[2]; /* align next field on 8-byte boundary */ + /* IN variable. */ + uint64_t pfn; +}; +typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t); + +/* Following tools-only interfaces may change in future. */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +/* + * Definitions relating to DMOP_create_ioreq_server. (Defined here for + * backwards compatibility). + */ + +#define HVM_IOREQSRV_BUFIOREQ_OFF 0 +#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1 +/* + * Use this when read_pointer gets updated atomically and + * the pointer pair gets read atomically: + */ +#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2 + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +#if defined(__i386__) || defined(__x86_64__) + +/* + * HVMOP_set_evtchn_upcall_vector: Set a that should be used for event + * channel upcalls on the specified . If set, + * this vector will be used in preference to the + * domain global callback via (see + * HVM_PARAM_CALLBACK_IRQ). + */ +#define HVMOP_set_evtchn_upcall_vector 23 +struct xen_hvm_evtchn_upcall_vector { + uint32_t vcpu; + uint8_t vector; +}; +typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t); + +#endif /* defined(__i386__) || defined(__x86_64__) */ + +#define HVMOP_guest_request_vm_event 24 + +/* HVMOP_altp2m: perform altp2m state operations */ +#define HVMOP_altp2m 25 + +#define HVMOP_ALTP2M_INTERFACE_VERSION 0x00000001 + +struct xen_hvm_altp2m_domain_state { + /* IN or OUT variable on/off */ + uint8_t state; +}; +typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t); + +struct xen_hvm_altp2m_vcpu_enable_notify { + uint32_t vcpu_id; + uint32_t pad; + /* #VE info area gfn */ + uint64_t gfn; +}; +typedef struct xen_hvm_altp2m_vcpu_enable_notify xen_hvm_altp2m_vcpu_enable_notify_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t); + +struct xen_hvm_altp2m_vcpu_disable_notify { + uint32_t vcpu_id; +}; +typedef struct xen_hvm_altp2m_vcpu_disable_notify xen_hvm_altp2m_vcpu_disable_notify_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_disable_notify_t); + +struct xen_hvm_altp2m_view { + /* IN/OUT variable */ + uint16_t view; + uint16_t hvmmem_default_access; /* xenmem_access_t */ +}; +typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t); + +#if __XEN_INTERFACE_VERSION__ < 0x00040a00 +struct xen_hvm_altp2m_set_mem_access { + /* view */ + uint16_t view; + /* Memory type */ + uint16_t access; /* xenmem_access_t */ + uint32_t pad; + /* gfn */ + uint64_t gfn; +}; +typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t); +#endif /* __XEN_INTERFACE_VERSION__ < 0x00040a00 */ + +struct xen_hvm_altp2m_mem_access { + /* view */ + uint16_t view; + /* Memory type */ + uint16_t access; /* xenmem_access_t */ + uint32_t pad; + /* gfn */ + uint64_t gfn; +}; +typedef struct xen_hvm_altp2m_mem_access xen_hvm_altp2m_mem_access_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_mem_access_t); + +struct xen_hvm_altp2m_set_mem_access_multi { + /* view */ + uint16_t view; + uint16_t pad; + /* Number of pages */ + uint32_t nr; + /* + * Used for continuation purposes. + * Must be set to zero upon initial invocation. + */ + uint64_t opaque; + /* List of pfns to set access for */ + XEN_GUEST_HANDLE(const_uint64) pfn_list; + /* Corresponding list of access settings for pfn_list */ + XEN_GUEST_HANDLE(const_uint8) access_list; +}; + +struct xen_hvm_altp2m_change_gfn { + /* view */ + uint16_t view; + uint16_t pad1; + uint32_t pad2; + /* old gfn */ + uint64_t old_gfn; + /* new gfn, INVALID_GFN (~0UL) means revert */ + uint64_t new_gfn; +}; +typedef struct xen_hvm_altp2m_change_gfn xen_hvm_altp2m_change_gfn_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_gfn_t); + +struct xen_hvm_altp2m_get_vcpu_p2m_idx { + uint32_t vcpu_id; + uint16_t altp2m_idx; +}; + +struct xen_hvm_altp2m_set_visibility { + uint16_t altp2m_idx; + uint8_t visible; + uint8_t pad; +}; + +struct xen_hvm_altp2m_op { + uint32_t version; /* HVMOP_ALTP2M_INTERFACE_VERSION */ + uint32_t cmd; +/* Get/set the altp2m state for a domain */ +#define HVMOP_altp2m_get_domain_state 1 +#define HVMOP_altp2m_set_domain_state 2 +/* Set a given VCPU to receive altp2m event notifications */ +#define HVMOP_altp2m_vcpu_enable_notify 3 +/* Create a new view */ +#define HVMOP_altp2m_create_p2m 4 +/* Destroy a view */ +#define HVMOP_altp2m_destroy_p2m 5 +/* Switch view for an entire domain */ +#define HVMOP_altp2m_switch_p2m 6 +/* Notify that a page of memory is to have specific access types */ +#define HVMOP_altp2m_set_mem_access 7 +/* Change a p2m entry to have a different gfn->mfn mapping */ +#define HVMOP_altp2m_change_gfn 8 +/* Set access for an array of pages */ +#define HVMOP_altp2m_set_mem_access_multi 9 +/* Set the "Suppress #VE" bit on a page */ +#define HVMOP_altp2m_set_suppress_ve 10 +/* Get the "Suppress #VE" bit of a page */ +#define HVMOP_altp2m_get_suppress_ve 11 +/* Get the access of a page of memory from a certain view */ +#define HVMOP_altp2m_get_mem_access 12 +/* Disable altp2m event notifications for a given VCPU */ +#define HVMOP_altp2m_vcpu_disable_notify 13 +/* Get the active vcpu p2m index */ +#define HVMOP_altp2m_get_p2m_idx 14 +/* Set the "Supress #VE" bit for a range of pages */ +#define HVMOP_altp2m_set_suppress_ve_multi 15 +/* Set visibility for a given altp2m view */ +#define HVMOP_altp2m_set_visibility 16 + domid_t domain; + uint16_t pad1; + uint32_t pad2; + union { + struct xen_hvm_altp2m_domain_state domain_state; + struct xen_hvm_altp2m_vcpu_enable_notify enable_notify; + struct xen_hvm_altp2m_view view; +#if __XEN_INTERFACE_VERSION__ < 0x00040a00 + struct xen_hvm_altp2m_set_mem_access set_mem_access; +#endif /* __XEN_INTERFACE_VERSION__ < 0x00040a00 */ + struct xen_hvm_altp2m_mem_access mem_access; + struct xen_hvm_altp2m_change_gfn change_gfn; + struct xen_hvm_altp2m_set_mem_access_multi set_mem_access_multi; + struct xen_hvm_altp2m_suppress_ve suppress_ve; + struct xen_hvm_altp2m_suppress_ve_multi suppress_ve_multi; + struct xen_hvm_altp2m_vcpu_disable_notify disable_notify; + struct xen_hvm_altp2m_get_vcpu_p2m_idx get_vcpu_p2m_idx; + struct xen_hvm_altp2m_set_visibility set_visibility; + uint8_t pad[64]; + } u; +}; +typedef struct xen_hvm_altp2m_op xen_hvm_altp2m_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_op_t); + +#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/hvm/params.h b/include/hw/xen/interface/hvm/params.h new file mode 100644 index 0000000000..c9d6e70d7b --- /dev/null +++ b/include/hw/xen/interface/hvm/params.h @@ -0,0 +1,318 @@ +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2007, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_HVM_PARAMS_H__ +#define __XEN_PUBLIC_HVM_PARAMS_H__ + +#include "hvm_op.h" + +/* These parameters are deprecated and their meaning is undefined. */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +#define HVM_PARAM_PAE_ENABLED 4 +#define HVM_PARAM_DM_DOMAIN 13 +#define HVM_PARAM_MEMORY_EVENT_CR0 20 +#define HVM_PARAM_MEMORY_EVENT_CR3 21 +#define HVM_PARAM_MEMORY_EVENT_CR4 22 +#define HVM_PARAM_MEMORY_EVENT_INT3 23 +#define HVM_PARAM_NESTEDHVM 24 +#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25 +#define HVM_PARAM_BUFIOREQ_EVTCHN 26 +#define HVM_PARAM_MEMORY_EVENT_MSR 30 + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +/* + * Parameter space for HVMOP_{set,get}_param. + */ + +#define HVM_PARAM_CALLBACK_IRQ 0 +#define HVM_PARAM_CALLBACK_IRQ_TYPE_MASK xen_mk_ullong(0xFF00000000000000) +/* + * How should CPU0 event-channel notifications be delivered? + * + * If val == 0 then CPU0 event-channel notifications are not delivered. + * If val != 0, val[63:56] encodes the type, as follows: + */ + +#define HVM_PARAM_CALLBACK_TYPE_GSI 0 +/* + * val[55:0] is a delivery GSI. GSI 0 cannot be used, as it aliases val == 0, + * and disables all notifications. + */ + +#define HVM_PARAM_CALLBACK_TYPE_PCI_INTX 1 +/* + * val[55:0] is a delivery PCI INTx line: + * Domain = val[47:32], Bus = val[31:16] DevFn = val[15:8], IntX = val[1:0] + */ + +#if defined(__i386__) || defined(__x86_64__) +#define HVM_PARAM_CALLBACK_TYPE_VECTOR 2 +/* + * val[7:0] is a vector number. Check for XENFEAT_hvm_callback_vector to know + * if this delivery method is available. + */ +#elif defined(__arm__) || defined(__aarch64__) +#define HVM_PARAM_CALLBACK_TYPE_PPI 2 +/* + * val[55:16] needs to be zero. + * val[15:8] is interrupt flag of the PPI used by event-channel: + * bit 8: the PPI is edge(1) or level(0) triggered + * bit 9: the PPI is active low(1) or high(0) + * val[7:0] is a PPI number used by event-channel. + * This is only used by ARM/ARM64 and masking/eoi the interrupt associated to + * the notification is handled by the interrupt controller. + */ +#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_MASK 0xFF00 +#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_LOW_LEVEL 2 +#endif + +/* + * These are not used by Xen. They are here for convenience of HVM-guest + * xenbus implementations. + */ +#define HVM_PARAM_STORE_PFN 1 +#define HVM_PARAM_STORE_EVTCHN 2 + +#define HVM_PARAM_IOREQ_PFN 5 + +#define HVM_PARAM_BUFIOREQ_PFN 6 + +#if defined(__i386__) || defined(__x86_64__) + +/* + * Viridian enlightenments + * + * (See http://download.microsoft.com/download/A/B/4/AB43A34E-BDD0-4FA6-BDEF-79EEF16E880B/Hypervisor%20Top%20Level%20Functional%20Specification%20v4.0.docx) + * + * To expose viridian enlightenments to the guest set this parameter + * to the desired feature mask. The base feature set must be present + * in any valid feature mask. + */ +#define HVM_PARAM_VIRIDIAN 9 + +/* Base+Freq viridian feature sets: + * + * - Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) + * - APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) + * - Virtual Processor index MSR (HV_X64_MSR_VP_INDEX) + * - Timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and + * HV_X64_MSR_APIC_FREQUENCY) + */ +#define _HVMPV_base_freq 0 +#define HVMPV_base_freq (1 << _HVMPV_base_freq) + +/* Feature set modifications */ + +/* Disable timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and + * HV_X64_MSR_APIC_FREQUENCY). + * This modification restores the viridian feature set to the + * original 'base' set exposed in releases prior to Xen 4.4. + */ +#define _HVMPV_no_freq 1 +#define HVMPV_no_freq (1 << _HVMPV_no_freq) + +/* Enable Partition Time Reference Counter (HV_X64_MSR_TIME_REF_COUNT) */ +#define _HVMPV_time_ref_count 2 +#define HVMPV_time_ref_count (1 << _HVMPV_time_ref_count) + +/* Enable Reference TSC Page (HV_X64_MSR_REFERENCE_TSC) */ +#define _HVMPV_reference_tsc 3 +#define HVMPV_reference_tsc (1 << _HVMPV_reference_tsc) + +/* Use Hypercall for remote TLB flush */ +#define _HVMPV_hcall_remote_tlb_flush 4 +#define HVMPV_hcall_remote_tlb_flush (1 << _HVMPV_hcall_remote_tlb_flush) + +/* Use APIC assist */ +#define _HVMPV_apic_assist 5 +#define HVMPV_apic_assist (1 << _HVMPV_apic_assist) + +/* Enable crash MSRs */ +#define _HVMPV_crash_ctl 6 +#define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl) + +/* Enable SYNIC MSRs */ +#define _HVMPV_synic 7 +#define HVMPV_synic (1 << _HVMPV_synic) + +/* Enable STIMER MSRs */ +#define _HVMPV_stimer 8 +#define HVMPV_stimer (1 << _HVMPV_stimer) + +/* Use Synthetic Cluster IPI Hypercall */ +#define _HVMPV_hcall_ipi 9 +#define HVMPV_hcall_ipi (1 << _HVMPV_hcall_ipi) + +/* Enable ExProcessorMasks */ +#define _HVMPV_ex_processor_masks 10 +#define HVMPV_ex_processor_masks (1 << _HVMPV_ex_processor_masks) + +/* Allow more than 64 VPs */ +#define _HVMPV_no_vp_limit 11 +#define HVMPV_no_vp_limit (1 << _HVMPV_no_vp_limit) + +/* Enable vCPU hotplug */ +#define _HVMPV_cpu_hotplug 12 +#define HVMPV_cpu_hotplug (1 << _HVMPV_cpu_hotplug) + +#define HVMPV_feature_mask \ + (HVMPV_base_freq | \ + HVMPV_no_freq | \ + HVMPV_time_ref_count | \ + HVMPV_reference_tsc | \ + HVMPV_hcall_remote_tlb_flush | \ + HVMPV_apic_assist | \ + HVMPV_crash_ctl | \ + HVMPV_synic | \ + HVMPV_stimer | \ + HVMPV_hcall_ipi | \ + HVMPV_ex_processor_masks | \ + HVMPV_no_vp_limit | \ + HVMPV_cpu_hotplug) + +#endif + +/* + * Set mode for virtual timers (currently x86 only): + * delay_for_missed_ticks (default): + * Do not advance a vcpu's time beyond the correct delivery time for + * interrupts that have been missed due to preemption. Deliver missed + * interrupts when the vcpu is rescheduled and advance the vcpu's virtual + * time stepwise for each one. + * no_delay_for_missed_ticks: + * As above, missed interrupts are delivered, but guest time always tracks + * wallclock (i.e., real) time while doing so. + * no_missed_ticks_pending: + * No missed interrupts are held pending. Instead, to ensure ticks are + * delivered at some non-zero rate, if we detect missed ticks then the + * internal tick alarm is not disabled if the VCPU is preempted during the + * next tick period. + * one_missed_tick_pending: + * Missed interrupts are collapsed together and delivered as one 'late tick'. + * Guest time always tracks wallclock (i.e., real) time. + */ +#define HVM_PARAM_TIMER_MODE 10 +#define HVMPTM_delay_for_missed_ticks 0 +#define HVMPTM_no_delay_for_missed_ticks 1 +#define HVMPTM_no_missed_ticks_pending 2 +#define HVMPTM_one_missed_tick_pending 3 + +/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ +#define HVM_PARAM_HPET_ENABLED 11 + +/* Identity-map page directory used by Intel EPT when CR0.PG=0. */ +#define HVM_PARAM_IDENT_PT 12 + +/* ACPI S state: currently support S0 and S3 on x86. */ +#define HVM_PARAM_ACPI_S_STATE 14 + +/* TSS used on Intel when CR0.PE=0. */ +#define HVM_PARAM_VM86_TSS 15 + +/* Boolean: Enable aligning all periodic vpts to reduce interrupts */ +#define HVM_PARAM_VPT_ALIGN 16 + +/* Console debug shared memory ring and event channel */ +#define HVM_PARAM_CONSOLE_PFN 17 +#define HVM_PARAM_CONSOLE_EVTCHN 18 + +/* + * Select location of ACPI PM1a and TMR control blocks. Currently two locations + * are supported, specified by version 0 or 1 in this parameter: + * - 0: default, use the old addresses + * PM1A_EVT == 0x1f40; PM1A_CNT == 0x1f44; PM_TMR == 0x1f48 + * - 1: use the new default qemu addresses + * PM1A_EVT == 0xb000; PM1A_CNT == 0xb004; PM_TMR == 0xb008 + * You can find these address definitions in + */ +#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19 + +/* Params for the mem event rings */ +#define HVM_PARAM_PAGING_RING_PFN 27 +#define HVM_PARAM_MONITOR_RING_PFN 28 +#define HVM_PARAM_SHARING_RING_PFN 29 + +/* SHUTDOWN_* action in case of a triple fault */ +#define HVM_PARAM_TRIPLE_FAULT_REASON 31 + +#define HVM_PARAM_IOREQ_SERVER_PFN 32 +#define HVM_PARAM_NR_IOREQ_SERVER_PAGES 33 + +/* Location of the VM Generation ID in guest physical address space. */ +#define HVM_PARAM_VM_GENERATION_ID_ADDR 34 + +/* + * Set mode for altp2m: + * disabled: don't activate altp2m (default) + * mixed: allow access to all altp2m ops for both in-guest and external tools + * external: allow access to external privileged tools only + * limited: guest only has limited access (ie. control VMFUNC and #VE) + * + * Note that 'mixed' mode has not been evaluated for safety from a + * security perspective. Before using this mode in a + * security-critical environment, each subop should be evaluated for + * safety, with unsafe subops blacklisted in XSM. + */ +#define HVM_PARAM_ALTP2M 35 +#define XEN_ALTP2M_disabled 0 +#define XEN_ALTP2M_mixed 1 +#define XEN_ALTP2M_external 2 +#define XEN_ALTP2M_limited 3 + +/* + * Size of the x87 FPU FIP/FDP registers that the hypervisor needs to + * save/restore. This is a workaround for a hardware limitation that + * does not allow the full FIP/FDP and FCS/FDS to be restored. + * + * Valid values are: + * + * 8: save/restore 64-bit FIP/FDP and clear FCS/FDS (default if CPU + * has FPCSDS feature). + * + * 4: save/restore 32-bit FIP/FDP, FCS/FDS, and clear upper 32-bits of + * FIP/FDP. + * + * 0: allow hypervisor to choose based on the value of FIP/FDP + * (default if CPU does not have FPCSDS). + * + * If FPCSDS (bit 13 in CPUID leaf 0x7, subleaf 0x0) is set, the CPU + * never saves FCS/FDS and this parameter should be left at the + * default of 8. + */ +#define HVM_PARAM_X87_FIP_WIDTH 36 + +/* + * TSS (and its size) used on Intel when CR0.PE=0. The address occupies + * the low 32 bits, while the size is in the high 32 ones. + */ +#define HVM_PARAM_VM86_TSS_SIZED 37 + +/* Enable MCA capabilities. */ +#define HVM_PARAM_MCA_CAP 38 +#define XEN_HVM_MCA_CAP_LMCE (xen_mk_ullong(1) << 0) +#define XEN_HVM_MCA_CAP_MASK XEN_HVM_MCA_CAP_LMCE + +#define HVM_NR_PARAMS 39 + +#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ diff --git a/include/hw/xen/interface/io/blkif.h b/include/hw/xen/interface/io/blkif.h index d07fa1e078..4cdba79aba 100644 --- a/include/hw/xen/interface/io/blkif.h +++ b/include/hw/xen/interface/io/blkif.h @@ -118,7 +118,7 @@ * * The underlying storage is not affected by the direct IO memory * lifetime bug. See: - * http://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html + * https://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html * * Therefore this option gives the backend permission to use * O_DIRECT, notwithstanding that bug. @@ -341,7 +341,7 @@ * access (even when it should be read-only). If the frontend hits the * maximum number of allowed persistently mapped grants, it can fallback * to non persistent mode. This will cause a performance degradation, - * since the backend driver will still try to map those grants + * since the the backend driver will still try to map those grants * persistently. Since the persistent grants protocol is compatible with * the previous protocol, a frontend driver can choose to work in * persistent mode even when the backend doesn't support it. @@ -710,3 +710,13 @@ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_READONLY 0x4 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/io/console.h b/include/hw/xen/interface/io/console.h index e2155d1cf5..4811f47220 100644 --- a/include/hw/xen/interface/io/console.h +++ b/include/hw/xen/interface/io/console.h @@ -44,3 +44,13 @@ DEFINE_XEN_FLEX_RING(xencons); #endif #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/io/fbif.h b/include/hw/xen/interface/io/fbif.h index ea87ebec0a..cc25aab32e 100644 --- a/include/hw/xen/interface/io/fbif.h +++ b/include/hw/xen/interface/io/fbif.h @@ -153,4 +153,24 @@ struct xenfb_page unsigned long pd[256]; }; +/* + * Wart: xenkbd needs to know default resolution. Put it here until a + * better solution is found, but don't leak it to the backend. + */ +#ifdef __KERNEL__ +#define XENFB_WIDTH 800 +#define XENFB_HEIGHT 600 +#define XENFB_DEPTH 32 #endif + +#endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/io/kbdif.h b/include/hw/xen/interface/io/kbdif.h index 1d68cd458e..a6b01c52c7 100644 --- a/include/hw/xen/interface/io/kbdif.h +++ b/include/hw/xen/interface/io/kbdif.h @@ -564,3 +564,13 @@ struct xenkbd_page }; #endif /* __XEN_PUBLIC_IO_KBDIF_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/io/netif.h b/include/hw/xen/interface/io/netif.h index 48fa530950..00dd258712 100644 --- a/include/hw/xen/interface/io/netif.h +++ b/include/hw/xen/interface/io/netif.h @@ -171,7 +171,7 @@ * The ability of the backend to use a control ring is advertised by * setting: * - * /local/domain/X/backend///feature-ctrl-ring = "1" + * /local/domain/X/backend/vif///feature-ctrl-ring = "1" * * The frontend provides a control ring to the backend by setting: * @@ -190,6 +190,32 @@ * order as requests. */ +/* + * Link state + * ========== + * + * The backend can advertise its current link (carrier) state to the + * frontend using the /local/domain/X/backend/vif///carrier + * node. If this node is not present, then the frontend should assume that + * the link is up (for compatibility with backends that do not implement + * this feature). If this node is present, then a value of "0" should be + * interpreted by the frontend as the link being down (no carrier) and a + * value of "1" should be interpreted as the link being up (carrier + * present). + */ + +/* + * MTU + * === + * + * The toolstack may set a value of MTU for the frontend by setting the + * /local/domain//device/vif//mtu node with the MTU value in + * octets. If this node is absent the frontend should assume an MTU value + * of 1500 octets. A frontend is also at liberty to ignore this value so + * it is only suitable for informing the frontend that a packet payload + * >1500 octets is permitted. + */ + /* * Hash types * ========== @@ -267,6 +293,62 @@ #define XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ 1 +/* + * This algorithm uses a 'key' as well as the data buffer itself. + * (Buffer[] and Key[] are treated as shift-registers where the MSB of + * Buffer/Key[0] is considered 'left-most' and the LSB of Buffer/Key[N-1] + * is the 'right-most'). + * + * Value = 0 + * For number of bits in Buffer[] + * If (left-most bit of Buffer[] is 1) + * Value ^= left-most 32 bits of Key[] + * Key[] << 1 + * Buffer[] << 1 + * + * The code below is provided for convenience where an operating system + * does not already provide an implementation. + */ +#ifdef XEN_NETIF_DEFINE_TOEPLITZ +static uint32_t xen_netif_toeplitz_hash(const uint8_t *key, + unsigned int keylen, + const uint8_t *buf, + unsigned int buflen) +{ + unsigned int keyi, bufi; + uint64_t prefix = 0; + uint64_t hash = 0; + + /* Pre-load prefix with the first 8 bytes of the key */ + for (keyi = 0; keyi < 8; keyi++) { + prefix <<= 8; + prefix |= (keyi < keylen) ? key[keyi] : 0; + } + + for (bufi = 0; bufi < buflen; bufi++) { + uint8_t byte = buf[bufi]; + unsigned int bit; + + for (bit = 0; bit < 8; bit++) { + if (byte & 0x80) + hash ^= prefix; + prefix <<= 1; + byte <<=1; + } + + /* + * 'prefix' has now been left-shifted by 8, so + * OR in the next byte. + */ + prefix |= (keyi < keylen) ? key[keyi] : 0; + keyi++; + } + + /* The valid part of the hash is in the upper 32 bits. */ + return hash >> 32; +} +#endif /* XEN_NETIF_DEFINE_TOEPLITZ */ + /* * Control requests (struct xen_netif_ctrl_request) * ================================================ @@ -1008,3 +1090,13 @@ DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); #define NETIF_RSP_NULL 1 #endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/io/ring.h b/include/hw/xen/interface/io/ring.h index 115705f3f4..c486c457e0 100644 --- a/include/hw/xen/interface/io/ring.h +++ b/include/hw/xen/interface/io/ring.h @@ -1,6 +1,6 @@ /****************************************************************************** * ring.h - * + * * Shared producer-consumer ring macros. * * Permission is hereby granted, free of charge, to any person obtaining a copy @@ -33,13 +33,6 @@ * - standard integers types (uint8_t, uint16_t, etc) * They are provided by stdint.h of the standard headers. * - * Before using the different macros, you need to provide the following - * macros: - * - xen_mb() a memory barrier - * - xen_rmb() a read memory barrier - * - xen_wmb() a write memory barrier - * Example of those can be found in xenctrl.h. - * * In addition, if you intend to use the FLEX macros, you also need to * provide the following, before invoking the FLEX macros: * - size_t @@ -49,6 +42,14 @@ * and grant_table.h from the Xen public headers. */ +#include "../xen-compat.h" + +#if __XEN_INTERFACE_VERSION__ < 0x00030208 +#define xen_mb() mb() +#define xen_rmb() rmb() +#define xen_wmb() wmb() +#endif + typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ @@ -61,12 +62,12 @@ typedef unsigned int RING_IDX; /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. - * A ring contains as many entries as will fit, rounded down to the nearest + * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __CONST_RING_SIZE(_s, _sz) \ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ - sizeof_field(struct _s##_sring, ring[0]))) + sizeof(((struct _s##_sring *)0)->ring[0]))) /* * The same for passing in an actual pointer instead of a name tag. */ @@ -75,7 +76,7 @@ typedef unsigned int RING_IDX; /* * Macros to make the correct C datatypes for a new kind of ring. - * + * * To make a new ring datatype, you need to have two message structures, * let's say request_t, and response_t already defined. * @@ -85,7 +86,7 @@ typedef unsigned int RING_IDX; * * These expand out to give you a set of types, as you can see below. * The most important of these are: - * + * * mytag_sring_t - The shared ring. * mytag_front_ring_t - The 'front' half of the ring. * mytag_back_ring_t - The 'back' half of the ring. @@ -153,15 +154,15 @@ typedef struct __name##_back_ring __name##_back_ring_t /* * Macros for manipulating rings. - * - * FRONT_RING_whatever works on the "front end" of a ring: here + * + * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. - * - * BACK_RING_whatever works on the "back end" of a ring: here + * + * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. - * - * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. - * This is OK in 1-for-1 request-response situations where the + * + * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. + * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ @@ -174,20 +175,24 @@ typedef struct __name##_back_ring __name##_back_ring_t (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ } while(0) -#define FRONT_RING_INIT(_r, _s, __size) do { \ - (_r)->req_prod_pvt = 0; \ - (_r)->rsp_cons = 0; \ +#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \ + (_r)->req_prod_pvt = (_i); \ + (_r)->rsp_cons = (_i); \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) -#define BACK_RING_INIT(_r, _s, __size) do { \ - (_r)->rsp_prod_pvt = 0; \ - (_r)->req_cons = 0; \ +#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size) + +#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ + (_r)->rsp_prod_pvt = (_i); \ + (_r)->req_cons = (_i); \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) +#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size) + /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) @@ -206,33 +211,45 @@ typedef struct __name##_back_ring __name##_back_ring_t #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) +#ifdef __GNUC__ #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) +#else +/* Same as above, but without the nice GCC ({ ... }) syntax. */ +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ + ((((_r)->sring->req_prod - (_r)->req_cons) < \ + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ + ((_r)->sring->req_prod - (_r)->req_cons) : \ + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) +#endif /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) +#define RING_GET_RESPONSE(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) + /* - * Get a local copy of a request. + * Get a local copy of a request/response. * - * Use this in preference to RING_GET_REQUEST() so all processing is + * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is * done on a local copy that cannot be modified by the other end. * * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this - * to be ineffective where _req is a struct which consists of only bitfields. + * to be ineffective where dest is a struct which consists of only bitfields. */ -#define RING_COPY_REQUEST(_r, _idx, _req) do { \ - /* Use volatile to force the copy into _req. */ \ - *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ +#define RING_COPY_(type, r, idx, dest) do { \ + /* Use volatile to force the copy into dest. */ \ + *(dest) = *(volatile __typeof__(dest))RING_GET_##type(r, idx); \ } while (0) -#define RING_GET_RESPONSE(_r, _idx) \ - (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) +#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req) +#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ @@ -242,6 +259,10 @@ typedef struct __name##_back_ring __name##_back_ring_t #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) +/* Ill-behaved backend determination: Can there be this many responses? */ +#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \ + (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r)) + #define RING_PUSH_REQUESTS(_r) do { \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ @@ -254,26 +275,26 @@ typedef struct __name##_back_ring __name##_back_ring_t /* * Notification hold-off (req_event and rsp_event): - * + * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). - * + * * When enqueuing requests or responses: - * + * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. - * + * * After dequeuing requests or responses (before sleeping the connection): - * + * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). - * + * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several diff --git a/include/hw/xen/interface/io/usbif.h b/include/hw/xen/interface/io/usbif.h index c6a58639d6..c0a552e195 100644 --- a/include/hw/xen/interface/io/usbif.h +++ b/include/hw/xen/interface/io/usbif.h @@ -32,6 +32,34 @@ #include "../grant_table.h" /* + * Detailed Interface Description + * ============================== + * The pvUSB interface is using a split driver design: a frontend driver in + * the guest and a backend driver in a driver domain (normally dom0) having + * access to the physical USB device(s) being passed to the guest. + * + * The frontend and backend drivers use XenStore to initiate the connection + * between them, the I/O activity is handled via two shared ring pages and an + * event channel. As the interface between frontend and backend is at the USB + * host connector level, multiple (up to 31) physical USB devices can be + * handled by a single connection. + * + * The Xen pvUSB device name is "qusb", so the frontend's XenStore entries are + * to be found under "device/qusb", while the backend's XenStore entries are + * under "backend//qusb". + * + * When a new pvUSB connection is established, the frontend needs to setup the + * two shared ring pages for communication and the event channel. The ring + * pages need to be made available to the backend via the grant table + * interface. + * + * One of the shared ring pages is used by the backend to inform the frontend + * about USB device plug events (device to be added or removed). This is the + * "conn-ring". + * + * The other ring page is used for USB I/O communication (requests and + * responses). This is the "urb-ring". + * * Feature and Parameter Negotiation * ================================= * The two halves of a Xen pvUSB driver utilize nodes within the XenStore to @@ -99,130 +127,273 @@ * The machine ABI rules governing the format of all ring request and * response structures. * + * Protocol Description + * ==================== + * + *-------------------------- USB device plug events -------------------------- + * + * USB device plug events are send via the "conn-ring" shared page. As only + * events are being sent, the respective requests from the frontend to the + * backend are just dummy ones. + * The events sent to the frontend have the following layout: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | portnum | speed | 4 + * +----------------+----------------+----------------+----------------+ + * id - uint16_t, event id (taken from the actual frontend dummy request) + * portnum - uint8_t, port number (1 ... 31) + * speed - uint8_t, device USBIF_SPEED_*, USBIF_SPEED_NONE == unplug + * + * The dummy request: + * 0 1 octet + * +----------------+----------------+ + * | id | 2 + * +----------------+----------------+ + * id - uint16_t, guest supplied value (no need for being unique) + * + *-------------------------- USB I/O request --------------------------------- + * + * A single USB I/O request on the "urb-ring" has the following layout: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | nr_buffer_segs | 4 + * +----------------+----------------+----------------+----------------+ + * | pipe | 8 + * +----------------+----------------+----------------+----------------+ + * | transfer_flags | buffer_length | 12 + * +----------------+----------------+----------------+----------------+ + * | request type specific | 16 + * | data | 20 + * +----------------+----------------+----------------+----------------+ + * | seg[0] | 24 + * | data | 28 + * +----------------+----------------+----------------+----------------+ + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| + * +----------------+----------------+----------------+----------------+ + * | seg[USBIF_MAX_SEGMENTS_PER_REQUEST - 1] | 144 + * | data | 148 + * +----------------+----------------+----------------+----------------+ + * Bit field bit number 0 is always least significant bit, undefined bits must + * be zero. + * id - uint16_t, guest supplied value + * nr_buffer_segs - uint16_t, number of segment entries in seg[] array + * pipe - uint32_t, bit field with multiple information: + * bits 0-4: port request to send to + * bit 5: unlink request with specified id (cancel I/O) if set (see below) + * bit 7: direction (1 = read from device) + * bits 8-14: device number on port + * bits 15-18: endpoint of device + * bits 30-31: request type: 00 = isochronous, 01 = interrupt, + * 10 = control, 11 = bulk + * transfer_flags - uint16_t, bit field with processing flags: + * bit 0: less data than specified allowed + * buffer_length - uint16_t, total length of data + * request type specific data - 8 bytes, see below + * seg[] - array with 8 byte elements, see below + * + * Request type specific data for isochronous request: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | interval | start_frame | 4 + * +----------------+----------------+----------------+----------------+ + * | number_of_packets | nr_frame_desc_segs | 8 + * +----------------+----------------+----------------+----------------+ + * interval - uint16_t, time interval in msecs between frames + * start_frame - uint16_t, start frame number + * number_of_packets - uint16_t, number of packets to transfer + * nr_frame_desc_segs - uint16_t number of seg[] frame descriptors elements + * + * Request type specific data for interrupt request: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | interval | 0 | 4 + * +----------------+----------------+----------------+----------------+ + * | 0 | 8 + * +----------------+----------------+----------------+----------------+ + * interval - uint16_t, time in msecs until interruption + * + * Request type specific data for control request: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | data of setup packet | 4 + * | | 8 + * +----------------+----------------+----------------+----------------+ + * + * Request type specific data for bulk request: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | 0 | 4 + * | 0 | 8 + * +----------------+----------------+----------------+----------------+ + * + * Request type specific data for unlink request: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | unlink_id | 0 | 4 + * +----------------+----------------+----------------+----------------+ + * | 0 | 8 + * +----------------+----------------+----------------+----------------+ + * unlink_id - uint16_t, request id of request to terminate + * + * seg[] array element layout: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | gref | 4 + * +----------------+----------------+----------------+----------------+ + * | offset | length | 8 + * +----------------+----------------+----------------+----------------+ + * gref - uint32_t, grant reference of buffer page + * offset - uint16_t, offset of buffer start in page + * length - uint16_t, length of buffer in page + * + *-------------------------- USB I/O response -------------------------------- + * + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | start_frame | 4 + * +----------------+----------------+----------------+----------------+ + * | status | 8 + * +----------------+----------------+----------------+----------------+ + * | actual_length | 12 + * +----------------+----------------+----------------+----------------+ + * | error_count | 16 + * +----------------+----------------+----------------+----------------+ + * id - uint16_t, id of the request this response belongs to + * start_frame - uint16_t, start_frame this response (iso requests only) + * status - int32_t, USBIF_STATUS_* (non-iso requests) + * actual_length - uint32_t, actual size of data transferred + * error_count - uint32_t, number of errors (iso requests) */ enum usb_spec_version { - USB_VER_UNKNOWN = 0, - USB_VER_USB11, - USB_VER_USB20, - USB_VER_USB30, /* not supported yet */ + USB_VER_UNKNOWN = 0, + USB_VER_USB11, + USB_VER_USB20, + USB_VER_USB30, /* not supported yet */ }; /* * USB pipe in usbif_request * - * - port number: bits 0-4 - * (USB_MAXCHILDREN is 31) + * - port number: bits 0-4 + * (USB_MAXCHILDREN is 31) * - * - operation flag: bit 5 - * (0 = submit urb, - * 1 = unlink urb) + * - operation flag: bit 5 + * (0 = submit urb, + * 1 = unlink urb) * - * - direction: bit 7 - * (0 = Host-to-Device [Out] - * 1 = Device-to-Host [In]) + * - direction: bit 7 + * (0 = Host-to-Device [Out] + * 1 = Device-to-Host [In]) * - * - device address: bits 8-14 + * - device address: bits 8-14 * - * - endpoint: bits 15-18 + * - endpoint: bits 15-18 * - * - pipe type: bits 30-31 - * (00 = isochronous, 01 = interrupt, - * 10 = control, 11 = bulk) + * - pipe type: bits 30-31 + * (00 = isochronous, 01 = interrupt, + * 10 = control, 11 = bulk) */ -#define USBIF_PIPE_PORT_MASK 0x0000001f -#define USBIF_PIPE_UNLINK 0x00000020 -#define USBIF_PIPE_DIR 0x00000080 -#define USBIF_PIPE_DEV_MASK 0x0000007f -#define USBIF_PIPE_DEV_SHIFT 8 -#define USBIF_PIPE_EP_MASK 0x0000000f -#define USBIF_PIPE_EP_SHIFT 15 -#define USBIF_PIPE_TYPE_MASK 0x00000003 -#define USBIF_PIPE_TYPE_SHIFT 30 -#define USBIF_PIPE_TYPE_ISOC 0 -#define USBIF_PIPE_TYPE_INT 1 -#define USBIF_PIPE_TYPE_CTRL 2 -#define USBIF_PIPE_TYPE_BULK 3 +#define USBIF_PIPE_PORT_MASK 0x0000001f +#define USBIF_PIPE_UNLINK 0x00000020 +#define USBIF_PIPE_DIR 0x00000080 +#define USBIF_PIPE_DEV_MASK 0x0000007f +#define USBIF_PIPE_DEV_SHIFT 8 +#define USBIF_PIPE_EP_MASK 0x0000000f +#define USBIF_PIPE_EP_SHIFT 15 +#define USBIF_PIPE_TYPE_MASK 0x00000003 +#define USBIF_PIPE_TYPE_SHIFT 30 +#define USBIF_PIPE_TYPE_ISOC 0 +#define USBIF_PIPE_TYPE_INT 1 +#define USBIF_PIPE_TYPE_CTRL 2 +#define USBIF_PIPE_TYPE_BULK 3 -#define usbif_pipeportnum(pipe) ((pipe) & USBIF_PIPE_PORT_MASK) -#define usbif_setportnum_pipe(pipe, portnum) ((pipe) | (portnum)) +#define usbif_pipeportnum(pipe) ((pipe) & USBIF_PIPE_PORT_MASK) +#define usbif_setportnum_pipe(pipe, portnum) ((pipe) | (portnum)) -#define usbif_pipeunlink(pipe) ((pipe) & USBIF_PIPE_UNLINK) -#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe)) -#define usbif_setunlink_pipe(pipe) ((pipe) | USBIF_PIPE_UNLINK) +#define usbif_pipeunlink(pipe) ((pipe) & USBIF_PIPE_UNLINK) +#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe)) +#define usbif_setunlink_pipe(pipe) ((pipe) | USBIF_PIPE_UNLINK) -#define usbif_pipein(pipe) ((pipe) & USBIF_PIPE_DIR) -#define usbif_pipeout(pipe) (!usbif_pipein(pipe)) +#define usbif_pipein(pipe) ((pipe) & USBIF_PIPE_DIR) +#define usbif_pipeout(pipe) (!usbif_pipein(pipe)) -#define usbif_pipedevice(pipe) \ - (((pipe) >> USBIF_PIPE_DEV_SHIFT) & USBIF_PIPE_DEV_MASK) +#define usbif_pipedevice(pipe) \ + (((pipe) >> USBIF_PIPE_DEV_SHIFT) & USBIF_PIPE_DEV_MASK) -#define usbif_pipeendpoint(pipe) \ - (((pipe) >> USBIF_PIPE_EP_SHIFT) & USBIF_PIPE_EP_MASK) +#define usbif_pipeendpoint(pipe) \ + (((pipe) >> USBIF_PIPE_EP_SHIFT) & USBIF_PIPE_EP_MASK) -#define usbif_pipetype(pipe) \ - (((pipe) >> USBIF_PIPE_TYPE_SHIFT) & USBIF_PIPE_TYPE_MASK) -#define usbif_pipeisoc(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_ISOC) -#define usbif_pipeint(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_INT) -#define usbif_pipectrl(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_CTRL) -#define usbif_pipebulk(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_BULK) +#define usbif_pipetype(pipe) \ + (((pipe) >> USBIF_PIPE_TYPE_SHIFT) & USBIF_PIPE_TYPE_MASK) +#define usbif_pipeisoc(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_ISOC) +#define usbif_pipeint(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_INT) +#define usbif_pipectrl(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_CTRL) +#define usbif_pipebulk(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_BULK) #define USBIF_MAX_SEGMENTS_PER_REQUEST (16) -#define USBIF_MAX_PORTNR 31 -#define USBIF_RING_SIZE 4096 +#define USBIF_MAX_PORTNR 31 +#define USBIF_RING_SIZE 4096 /* * RING for transferring urbs. */ struct usbif_request_segment { - grant_ref_t gref; - uint16_t offset; - uint16_t length; + grant_ref_t gref; + uint16_t offset; + uint16_t length; }; struct usbif_urb_request { - uint16_t id; /* request id */ - uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */ + uint16_t id; /* request id */ + uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */ - /* basic urb parameter */ - uint32_t pipe; - uint16_t transfer_flags; -#define USBIF_SHORT_NOT_OK 0x0001 - uint16_t buffer_length; - union { - uint8_t ctrl[8]; /* setup_packet (Ctrl) */ + /* basic urb parameter */ + uint32_t pipe; + uint16_t transfer_flags; +#define USBIF_SHORT_NOT_OK 0x0001 + uint16_t buffer_length; + union { + uint8_t ctrl[8]; /* setup_packet (Ctrl) */ - struct { - uint16_t interval; /* maximum (1024*8) in usb core */ - uint16_t start_frame; /* start frame */ - uint16_t number_of_packets; /* number of ISO packet */ - uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */ - } isoc; + struct { + uint16_t interval; /* maximum (1024*8) in usb core */ + uint16_t start_frame; /* start frame */ + uint16_t number_of_packets; /* number of ISO packet */ + uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */ + } isoc; - struct { - uint16_t interval; /* maximum (1024*8) in usb core */ - uint16_t pad[3]; - } intr; + struct { + uint16_t interval; /* maximum (1024*8) in usb core */ + uint16_t pad[3]; + } intr; - struct { - uint16_t unlink_id; /* unlink request id */ - uint16_t pad[3]; - } unlink; + struct { + uint16_t unlink_id; /* unlink request id */ + uint16_t pad[3]; + } unlink; - } u; + } u; - /* urb data segments */ - struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST]; + /* urb data segments */ + struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct usbif_urb_request usbif_urb_request_t; struct usbif_urb_response { - uint16_t id; /* request id */ - uint16_t start_frame; /* start frame (ISO) */ - int32_t status; /* status (non-ISO) */ - int32_t actual_length; /* actual transfer length */ - int32_t error_count; /* number of ISO errors */ + uint16_t id; /* request id */ + uint16_t start_frame; /* start frame (ISO) */ + int32_t status; /* status (non-ISO) */ +#define USBIF_STATUS_OK 0 +#define USBIF_STATUS_NODEV (-19) +#define USBIF_STATUS_INVAL (-22) +#define USBIF_STATUS_STALL (-32) +#define USBIF_STATUS_IOERROR (-71) +#define USBIF_STATUS_BABBLE (-75) +#define USBIF_STATUS_SHUTDOWN (-108) + int32_t actual_length; /* actual transfer length */ + int32_t error_count; /* number of ISO errors */ }; typedef struct usbif_urb_response usbif_urb_response_t; @@ -233,18 +404,18 @@ DEFINE_RING_TYPES(usbif_urb, struct usbif_urb_request, struct usbif_urb_response * RING for notifying connect/disconnect events to frontend */ struct usbif_conn_request { - uint16_t id; + uint16_t id; }; typedef struct usbif_conn_request usbif_conn_request_t; struct usbif_conn_response { - uint16_t id; /* request id */ - uint8_t portnum; /* port number */ - uint8_t speed; /* usb_device_speed */ -#define USBIF_SPEED_NONE 0 -#define USBIF_SPEED_LOW 1 -#define USBIF_SPEED_FULL 2 -#define USBIF_SPEED_HIGH 3 + uint16_t id; /* request id */ + uint8_t portnum; /* port number */ + uint8_t speed; /* usb_device_speed */ +#define USBIF_SPEED_NONE 0 +#define USBIF_SPEED_LOW 1 +#define USBIF_SPEED_FULL 2 +#define USBIF_SPEED_HIGH 3 }; typedef struct usbif_conn_response usbif_conn_response_t; diff --git a/include/hw/xen/interface/io/xenbus.h b/include/hw/xen/interface/io/xenbus.h index 2fbf2a7fdc..927f9db552 100644 --- a/include/hw/xen/interface/io/xenbus.h +++ b/include/hw/xen/interface/io/xenbus.h @@ -68,3 +68,13 @@ enum xenbus_state { typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/io/xs_wire.h b/include/hw/xen/interface/io/xs_wire.h new file mode 100644 index 0000000000..4dd6632669 --- /dev/null +++ b/include/hw/xen/interface/io/xs_wire.h @@ -0,0 +1,153 @@ +/* + * Details of the "wire" protocol between Xen Store Daemon and client + * library or guest kernel. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (C) 2005 Rusty Russell IBM Corporation + */ + +#ifndef _XS_WIRE_H +#define _XS_WIRE_H + +enum xsd_sockmsg_type +{ + XS_CONTROL, +#define XS_DEBUG XS_CONTROL + XS_DIRECTORY, + XS_READ, + XS_GET_PERMS, + XS_WATCH, + XS_UNWATCH, + XS_TRANSACTION_START, + XS_TRANSACTION_END, + XS_INTRODUCE, + XS_RELEASE, + XS_GET_DOMAIN_PATH, + XS_WRITE, + XS_MKDIR, + XS_RM, + XS_SET_PERMS, + XS_WATCH_EVENT, + XS_ERROR, + XS_IS_DOMAIN_INTRODUCED, + XS_RESUME, + XS_SET_TARGET, + /* XS_RESTRICT has been removed */ + XS_RESET_WATCHES = XS_SET_TARGET + 2, + XS_DIRECTORY_PART, + + XS_TYPE_COUNT, /* Number of valid types. */ + + XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */ +}; + +#define XS_WRITE_NONE "NONE" +#define XS_WRITE_CREATE "CREATE" +#define XS_WRITE_CREATE_EXCL "CREATE|EXCL" + +/* We hand errors as strings, for portability. */ +struct xsd_errors +{ + int errnum; + const char *errstring; +}; +#ifdef EINVAL +#define XSD_ERROR(x) { x, #x } +/* LINTED: static unused */ +static struct xsd_errors xsd_errors[] +#if defined(__GNUC__) +__attribute__((unused)) +#endif + = { + XSD_ERROR(EINVAL), + XSD_ERROR(EACCES), + XSD_ERROR(EEXIST), + XSD_ERROR(EISDIR), + XSD_ERROR(ENOENT), + XSD_ERROR(ENOMEM), + XSD_ERROR(ENOSPC), + XSD_ERROR(EIO), + XSD_ERROR(ENOTEMPTY), + XSD_ERROR(ENOSYS), + XSD_ERROR(EROFS), + XSD_ERROR(EBUSY), + XSD_ERROR(EAGAIN), + XSD_ERROR(EISCONN), + XSD_ERROR(E2BIG) +}; +#endif + +struct xsd_sockmsg +{ + uint32_t type; /* XS_??? */ + uint32_t req_id;/* Request identifier, echoed in daemon's response. */ + uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ + uint32_t len; /* Length of data following this. */ + + /* Generally followed by nul-terminated string(s). */ +}; + +enum xs_watch_type +{ + XS_WATCH_PATH = 0, + XS_WATCH_TOKEN +}; + +/* + * `incontents 150 xenstore_struct XenStore wire protocol. + * + * Inter-domain shared memory communications. */ +#define XENSTORE_RING_SIZE 1024 +typedef uint32_t XENSTORE_RING_IDX; +#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) +struct xenstore_domain_interface { + char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ + char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ + XENSTORE_RING_IDX req_cons, req_prod; + XENSTORE_RING_IDX rsp_cons, rsp_prod; + uint32_t server_features; /* Bitmap of features supported by the server */ + uint32_t connection; +}; + +/* Violating this is very bad. See docs/misc/xenstore.txt. */ +#define XENSTORE_PAYLOAD_MAX 4096 + +/* Violating these just gets you an error back */ +#define XENSTORE_ABS_PATH_MAX 3072 +#define XENSTORE_REL_PATH_MAX 2048 + +/* The ability to reconnect a ring */ +#define XENSTORE_SERVER_FEATURE_RECONNECTION 1 + +/* Valid values for the connection field */ +#define XENSTORE_CONNECTED 0 /* the steady-state */ +#define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */ + +#endif /* _XS_WIRE_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/memory.h b/include/hw/xen/interface/memory.h new file mode 100644 index 0000000000..383a9468c3 --- /dev/null +++ b/include/hw/xen/interface/memory.h @@ -0,0 +1,754 @@ +/****************************************************************************** + * memory.h + * + * Memory reservation and information. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2005, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_MEMORY_H__ +#define __XEN_PUBLIC_MEMORY_H__ + +#include "xen.h" +#include "physdev.h" + +/* + * Increase or decrease the specified domain's memory reservation. Returns the + * number of extents successfully allocated or freed. + * arg == addr of struct xen_memory_reservation. + */ +#define XENMEM_increase_reservation 0 +#define XENMEM_decrease_reservation 1 +#define XENMEM_populate_physmap 6 + +#if __XEN_INTERFACE_VERSION__ >= 0x00030209 +/* + * Maximum # bits addressable by the user of the allocated region (e.g., I/O + * devices often have a 32-bit limitation even in 64-bit systems). If zero + * then the user has no addressing restriction. This field is not used by + * XENMEM_decrease_reservation. + */ +#define XENMEMF_address_bits(x) (x) +#define XENMEMF_get_address_bits(x) ((x) & 0xffu) +/* NUMA node to allocate from. */ +#define XENMEMF_node(x) (((x) + 1) << 8) +#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) +/* Flag to populate physmap with populate-on-demand entries */ +#define XENMEMF_populate_on_demand (1<<16) +/* Flag to request allocation only from the node specified */ +#define XENMEMF_exact_node_request (1<<17) +#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request) +/* Flag to indicate the node specified is virtual node */ +#define XENMEMF_vnode (1<<18) +#endif + +struct xen_memory_reservation { + + /* + * XENMEM_increase_reservation: + * OUT: MFN (*not* GMFN) bases of extents that were allocated + * XENMEM_decrease_reservation: + * IN: GMFN bases of extents to free + * XENMEM_populate_physmap: + * IN: GPFN bases of extents to populate with memory + * OUT: GMFN bases of extents that were allocated + * (NB. This command also updates the mach_to_phys translation table) + * XENMEM_claim_pages: + * IN: must be zero + */ + XEN_GUEST_HANDLE(xen_pfn_t) extent_start; + + /* Number of extents, and size/alignment of each (2^extent_order pages). */ + xen_ulong_t nr_extents; + unsigned int extent_order; + +#if __XEN_INTERFACE_VERSION__ >= 0x00030209 + /* XENMEMF flags. */ + unsigned int mem_flags; +#else + unsigned int address_bits; +#endif + + /* + * Domain whose reservation is being changed. + * Unprivileged domains can specify only DOMID_SELF. + */ + domid_t domid; +}; +typedef struct xen_memory_reservation xen_memory_reservation_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); + +/* + * An atomic exchange of memory pages. If return code is zero then + * @out.extent_list provides GMFNs of the newly-allocated memory. + * Returns zero on complete success, otherwise a negative error code. + * On complete success then always @nr_exchanged == @in.nr_extents. + * On partial success @nr_exchanged indicates how much work was done. + * + * Note that only PV guests can use this operation. + */ +#define XENMEM_exchange 11 +struct xen_memory_exchange { + /* + * [IN] Details of memory extents to be exchanged (GMFN bases). + * Note that @in.address_bits is ignored and unused. + */ + struct xen_memory_reservation in; + + /* + * [IN/OUT] Details of new memory extents. + * We require that: + * 1. @in.domid == @out.domid + * 2. @in.nr_extents << @in.extent_order == + * @out.nr_extents << @out.extent_order + * 3. @in.extent_start and @out.extent_start lists must not overlap + * 4. @out.extent_start lists GPFN bases to be populated + * 5. @out.extent_start is overwritten with allocated GMFN bases + */ + struct xen_memory_reservation out; + + /* + * [OUT] Number of input extents that were successfully exchanged: + * 1. The first @nr_exchanged input extents were successfully + * deallocated. + * 2. The corresponding first entries in the output extent list correctly + * indicate the GMFNs that were successfully exchanged. + * 3. All other input and output extents are untouched. + * 4. If not all input exents are exchanged then the return code of this + * command will be non-zero. + * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! + */ + xen_ulong_t nr_exchanged; +}; +typedef struct xen_memory_exchange xen_memory_exchange_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); + +/* + * Returns the maximum machine frame number of mapped RAM in this system. + * This command always succeeds (it never returns an error code). + * arg == NULL. + */ +#define XENMEM_maximum_ram_page 2 + +struct xen_memory_domain { + /* [IN] Domain information is being queried for. */ + domid_t domid; +}; + +/* + * Returns the current or maximum memory reservation, in pages, of the + * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. + * arg == addr of struct xen_memory_domain. + */ +#define XENMEM_current_reservation 3 +#define XENMEM_maximum_reservation 4 + +/* + * Returns the maximum GFN in use by the specified domain (may be DOMID_SELF). + * Returns -ve errcode on failure. + * arg == addr of struct xen_memory_domain. + */ +#define XENMEM_maximum_gpfn 14 + +/* + * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys + * mapping table. Architectures which do not have a m2p table do not implement + * this command. + * arg == addr of xen_machphys_mfn_list_t. + */ +#define XENMEM_machphys_mfn_list 5 +struct xen_machphys_mfn_list { + /* + * Size of the 'extent_start' array. Fewer entries will be filled if the + * machphys table is smaller than max_extents * 2MB. + */ + unsigned int max_extents; + + /* + * Pointer to buffer to fill with list of extent starts. If there are + * any large discontiguities in the machine address space, 2MB gaps in + * the machphys table will be represented by an MFN base of zero. + */ + XEN_GUEST_HANDLE(xen_pfn_t) extent_start; + + /* + * Number of extents written to the above array. This will be smaller + * than 'max_extents' if the machphys table is smaller than max_e * 2MB. + */ + unsigned int nr_extents; +}; +typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); + +/* + * For a compat caller, this is identical to XENMEM_machphys_mfn_list. + * + * For a non compat caller, this functions similarly to + * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility + * m2p table. + */ +#define XENMEM_machphys_compat_mfn_list 25 + +/* + * Returns the location in virtual address space of the machine_to_phys + * mapping table. Architectures which do not have a m2p table, or which do not + * map it by default into guest address space, do not implement this command. + * arg == addr of xen_machphys_mapping_t. + */ +#define XENMEM_machphys_mapping 12 +struct xen_machphys_mapping { + xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ + xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ +}; +typedef struct xen_machphys_mapping xen_machphys_mapping_t; +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); + +/* Source mapping space. */ +/* ` enum phys_map_space { */ +#define XENMAPSPACE_shared_info 0 /* shared info page */ +#define XENMAPSPACE_grant_table 1 /* grant table page */ +#define XENMAPSPACE_gmfn 2 /* GMFN */ +#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */ +#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom, + * XENMEM_add_to_physmap_batch only. */ +#define XENMAPSPACE_dev_mmio 5 /* device mmio region + ARM only; the region is mapped in + Stage-2 using the Normal Memory + Inner/Outer Write-Back Cacheable + memory attribute. */ +/* ` } */ + +/* + * Sets the GPFN at which a particular page appears in the specified guest's + * physical address space (translated guests only). + * arg == addr of xen_add_to_physmap_t. + */ +#define XENMEM_add_to_physmap 7 +struct xen_add_to_physmap { + /* Which domain to change the mapping for. */ + domid_t domid; + + /* Number of pages to go through for gmfn_range */ + uint16_t size; + + unsigned int space; /* => enum phys_map_space */ + +#define XENMAPIDX_grant_table_status 0x80000000 + + /* Index into space being mapped. */ + xen_ulong_t idx; + + /* GPFN in domid where the source mapping page should appear. */ + xen_pfn_t gpfn; +}; +typedef struct xen_add_to_physmap xen_add_to_physmap_t; +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); + +/* A batched version of add_to_physmap. */ +#define XENMEM_add_to_physmap_batch 23 +struct xen_add_to_physmap_batch { + /* IN */ + /* Which domain to change the mapping for. */ + domid_t domid; + uint16_t space; /* => enum phys_map_space */ + + /* Number of pages to go through */ + uint16_t size; + +#if __XEN_INTERFACE_VERSION__ < 0x00040700 + domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */ +#else + union xen_add_to_physmap_batch_extra { + domid_t foreign_domid; /* gmfn_foreign */ + uint16_t res0; /* All the other spaces. Should be 0 */ + } u; +#endif + + /* Indexes into space being mapped. */ + XEN_GUEST_HANDLE(xen_ulong_t) idxs; + + /* GPFN in domid where the source mapping page should appear. */ + XEN_GUEST_HANDLE(xen_pfn_t) gpfns; + + /* OUT */ + + /* Per index error code. */ + XEN_GUEST_HANDLE(int) errs; +}; +typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t; +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t); + +#if __XEN_INTERFACE_VERSION__ < 0x00040400 +#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch +#define xen_add_to_physmap_range xen_add_to_physmap_batch +typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t; +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t); +#endif + +/* + * Unmaps the page appearing at a particular GPFN from the specified guest's + * physical address space (translated guests only). + * arg == addr of xen_remove_from_physmap_t. + */ +#define XENMEM_remove_from_physmap 15 +struct xen_remove_from_physmap { + /* Which domain to change the mapping for. */ + domid_t domid; + + /* GPFN of the current mapping of the page. */ + xen_pfn_t gpfn; +}; +typedef struct xen_remove_from_physmap xen_remove_from_physmap_t; +DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t); + +/*** REMOVED ***/ +/*#define XENMEM_translate_gpfn_list 8*/ + +/* + * Returns the pseudo-physical memory map as it was when the domain + * was started (specified by XENMEM_set_memory_map). + * arg == addr of xen_memory_map_t. + */ +#define XENMEM_memory_map 9 +struct xen_memory_map { + /* + * On call the number of entries which can be stored in buffer. On + * return the number of entries which have been stored in + * buffer. + */ + unsigned int nr_entries; + + /* + * Entries in the buffer are in the same format as returned by the + * BIOS INT 0x15 EAX=0xE820 call. + */ + XEN_GUEST_HANDLE(void) buffer; +}; +typedef struct xen_memory_map xen_memory_map_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); + +/* + * Returns the real physical memory map. Passes the same structure as + * XENMEM_memory_map. + * Specifying buffer as NULL will return the number of entries required + * to store the complete memory map. + * arg == addr of xen_memory_map_t. + */ +#define XENMEM_machine_memory_map 10 + +/* + * Set the pseudo-physical memory map of a domain, as returned by + * XENMEM_memory_map. + * arg == addr of xen_foreign_memory_map_t. + */ +#define XENMEM_set_memory_map 13 +struct xen_foreign_memory_map { + domid_t domid; + struct xen_memory_map map; +}; +typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; +DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); + +#define XENMEM_set_pod_target 16 +#define XENMEM_get_pod_target 17 +struct xen_pod_target { + /* IN */ + uint64_t target_pages; + /* OUT */ + uint64_t tot_pages; + uint64_t pod_cache_pages; + uint64_t pod_entries; + /* IN */ + domid_t domid; +}; +typedef struct xen_pod_target xen_pod_target_t; + +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +#ifndef uint64_aligned_t +#define uint64_aligned_t uint64_t +#endif + +/* + * Get the number of MFNs saved through memory sharing. + * The call never fails. + */ +#define XENMEM_get_sharing_freed_pages 18 +#define XENMEM_get_sharing_shared_pages 19 + +#define XENMEM_paging_op 20 +#define XENMEM_paging_op_nominate 0 +#define XENMEM_paging_op_evict 1 +#define XENMEM_paging_op_prep 2 + +struct xen_mem_paging_op { + uint8_t op; /* XENMEM_paging_op_* */ + domid_t domain; + + /* IN: (XENMEM_paging_op_prep) buffer to immediately fill page from */ + XEN_GUEST_HANDLE_64(const_uint8) buffer; + /* IN: gfn of page being operated on */ + uint64_aligned_t gfn; +}; +typedef struct xen_mem_paging_op xen_mem_paging_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t); + +#define XENMEM_access_op 21 +#define XENMEM_access_op_set_access 0 +#define XENMEM_access_op_get_access 1 +/* + * XENMEM_access_op_enable_emulate and XENMEM_access_op_disable_emulate are + * currently unused, but since they have been in use please do not reuse them. + * + * #define XENMEM_access_op_enable_emulate 2 + * #define XENMEM_access_op_disable_emulate 3 + */ +#define XENMEM_access_op_set_access_multi 4 + +typedef enum { + XENMEM_access_n, + XENMEM_access_r, + XENMEM_access_w, + XENMEM_access_rw, + XENMEM_access_x, + XENMEM_access_rx, + XENMEM_access_wx, + XENMEM_access_rwx, + /* + * Page starts off as r-x, but automatically + * change to r-w on a write + */ + XENMEM_access_rx2rw, + /* + * Log access: starts off as n, automatically + * goes to rwx, generating an event without + * pausing the vcpu + */ + XENMEM_access_n2rwx, + /* Take the domain default */ + XENMEM_access_default +} xenmem_access_t; + +struct xen_mem_access_op { + /* XENMEM_access_op_* */ + uint8_t op; + /* xenmem_access_t */ + uint8_t access; + domid_t domid; + /* + * Number of pages for set op (or size of pfn_list for + * XENMEM_access_op_set_access_multi) + * Ignored on setting default access and other ops + */ + uint32_t nr; + /* + * First pfn for set op + * pfn for get op + * ~0ull is used to set and get the default access for pages + */ + uint64_aligned_t pfn; + /* + * List of pfns to set access for + * Used only with XENMEM_access_op_set_access_multi + */ + XEN_GUEST_HANDLE(const_uint64) pfn_list; + /* + * Corresponding list of access settings for pfn_list + * Used only with XENMEM_access_op_set_access_multi + */ + XEN_GUEST_HANDLE(const_uint8) access_list; +}; +typedef struct xen_mem_access_op xen_mem_access_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t); + +#define XENMEM_sharing_op 22 +#define XENMEM_sharing_op_nominate_gfn 0 +#define XENMEM_sharing_op_nominate_gref 1 +#define XENMEM_sharing_op_share 2 +#define XENMEM_sharing_op_debug_gfn 3 +#define XENMEM_sharing_op_debug_mfn 4 +#define XENMEM_sharing_op_debug_gref 5 +#define XENMEM_sharing_op_add_physmap 6 +#define XENMEM_sharing_op_audit 7 +#define XENMEM_sharing_op_range_share 8 +#define XENMEM_sharing_op_fork 9 +#define XENMEM_sharing_op_fork_reset 10 + +#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10) +#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9) + +/* The following allows sharing of grant refs. This is useful + * for sharing utilities sitting as "filters" in IO backends + * (e.g. memshr + blktap(2)). The IO backend is only exposed + * to grant references, and this allows sharing of the grefs */ +#define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (xen_mk_ullong(1) << 62) + +#define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \ + (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val) +#define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \ + ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG) +#define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \ + ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)) + +struct xen_mem_sharing_op { + uint8_t op; /* XENMEM_sharing_op_* */ + domid_t domain; + + union { + struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */ + union { + uint64_aligned_t gfn; /* IN: gfn to nominate */ + uint32_t grant_ref; /* IN: grant ref to nominate */ + } u; + uint64_aligned_t handle; /* OUT: the handle */ + } nominate; + struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */ + uint64_aligned_t source_gfn; /* IN: the gfn of the source page */ + uint64_aligned_t source_handle; /* IN: handle to the source page */ + uint64_aligned_t client_gfn; /* IN: the client gfn */ + uint64_aligned_t client_handle; /* IN: handle to the client page */ + domid_t client_domain; /* IN: the client domain id */ + } share; + struct mem_sharing_op_range { /* OP_RANGE_SHARE */ + uint64_aligned_t first_gfn; /* IN: the first gfn */ + uint64_aligned_t last_gfn; /* IN: the last gfn */ + uint64_aligned_t opaque; /* Must be set to 0 */ + domid_t client_domain; /* IN: the client domain id */ + uint16_t _pad[3]; /* Must be set to 0 */ + } range; + struct mem_sharing_op_debug { /* OP_DEBUG_xxx */ + union { + uint64_aligned_t gfn; /* IN: gfn to debug */ + uint64_aligned_t mfn; /* IN: mfn to debug */ + uint32_t gref; /* IN: gref to debug */ + } u; + } debug; + struct mem_sharing_op_fork { /* OP_FORK */ + domid_t parent_domain; /* IN: parent's domain id */ +/* Only makes sense for short-lived forks */ +#define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0) +/* Only makes sense for short-lived forks */ +#define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1) + uint16_t flags; /* IN: optional settings */ + uint32_t pad; /* Must be set to 0 */ + } fork; + } u; +}; +typedef struct xen_mem_sharing_op xen_mem_sharing_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t); + +/* + * Attempt to stake a claim for a domain on a quantity of pages + * of system RAM, but _not_ assign specific pageframes. Only + * arithmetic is performed so the hypercall is very fast and need + * not be preemptible, thus sidestepping time-of-check-time-of-use + * races for memory allocation. Returns 0 if the hypervisor page + * allocator has atomically and successfully claimed the requested + * number of pages, else non-zero. + * + * Any domain may have only one active claim. When sufficient memory + * has been allocated to resolve the claim, the claim silently expires. + * Claiming zero pages effectively resets any outstanding claim and + * is always successful. + * + * Note that a valid claim may be staked even after memory has been + * allocated for a domain. In this case, the claim is not incremental, + * i.e. if the domain's total page count is 3, and a claim is staked + * for 10, only 7 additional pages are claimed. + * + * Caller must be privileged or the hypercall fails. + */ +#define XENMEM_claim_pages 24 + +/* + * XENMEM_claim_pages flags - the are no flags at this time. + * The zero value is appropriate. + */ + +/* + * With some legacy devices, certain guest-physical addresses cannot safely + * be used for other purposes, e.g. to map guest RAM. This hypercall + * enumerates those regions so the toolstack can avoid using them. + */ +#define XENMEM_reserved_device_memory_map 27 +struct xen_reserved_device_memory { + xen_pfn_t start_pfn; + xen_ulong_t nr_pages; +}; +typedef struct xen_reserved_device_memory xen_reserved_device_memory_t; +DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t); + +struct xen_reserved_device_memory_map { +#define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */ + /* IN */ + uint32_t flags; + /* + * IN/OUT + * + * Gets set to the required number of entries when too low, + * signaled by error code -ERANGE. + */ + unsigned int nr_entries; + /* OUT */ + XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer; + /* IN */ + union { + physdev_pci_device_t pci; + } dev; +}; +typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t; +DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t); + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +/* + * Get the pages for a particular guest resource, so that they can be + * mapped directly by a tools domain. + */ +#define XENMEM_acquire_resource 28 +struct xen_mem_acquire_resource { + /* IN - The domain whose resource is to be mapped */ + domid_t domid; + /* IN - the type of resource */ + uint16_t type; + +#define XENMEM_resource_ioreq_server 0 +#define XENMEM_resource_grant_table 1 +#define XENMEM_resource_vmtrace_buf 2 + + /* + * IN - a type-specific resource identifier, which must be zero + * unless stated otherwise. + * + * type == XENMEM_resource_ioreq_server -> id == ioreq server id + * type == XENMEM_resource_grant_table -> id defined below + */ + uint32_t id; + +#define XENMEM_resource_grant_table_id_shared 0 +#define XENMEM_resource_grant_table_id_status 1 + + /* + * IN/OUT + * + * As an IN parameter number of frames of the resource to be mapped. + * This value may be updated over the course of the operation. + * + * When frame_list is NULL and nr_frames is 0, this is interpreted as a + * request for the size of the resource, which shall be returned in the + * nr_frames field. + * + * The size of a resource will never be zero, but a nonzero result doesn't + * guarantee that a subsequent mapping request will be successful. There + * are further type/id specific constraints which may change between the + * two calls. + */ + uint32_t nr_frames; + uint32_t pad; + /* + * IN - the index of the initial frame to be mapped. This parameter + * is ignored if nr_frames is 0. This value may be updated + * over the course of the operation. + */ + uint64_t frame; + +#define XENMEM_resource_ioreq_server_frame_bufioreq 0 +#define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n)) + + /* + * IN/OUT - If the tools domain is PV then, upon return, frame_list + * will be populated with the MFNs of the resource. + * If the tools domain is HVM then it is expected that, on + * entry, frame_list will be populated with a list of GFNs + * that will be mapped to the MFNs of the resource. + * If -EIO is returned then the frame_list has only been + * partially mapped and it is up to the caller to unmap all + * the GFNs. + * This parameter may be NULL if nr_frames is 0. This + * value may be updated over the course of the operation. + */ + XEN_GUEST_HANDLE(xen_pfn_t) frame_list; +}; +typedef struct xen_mem_acquire_resource xen_mem_acquire_resource_t; +DEFINE_XEN_GUEST_HANDLE(xen_mem_acquire_resource_t); + +/* + * XENMEM_get_vnumainfo used by guest to get + * vNUMA topology from hypervisor. + */ +#define XENMEM_get_vnumainfo 26 + +/* vNUMA node memory ranges */ +struct xen_vmemrange { + uint64_t start, end; + unsigned int flags; + unsigned int nid; +}; +typedef struct xen_vmemrange xen_vmemrange_t; +DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t); + +/* + * vNUMA topology specifies vNUMA node number, distance table, + * memory ranges and vcpu mapping provided for guests. + * XENMEM_get_vnumainfo hypercall expects to see from guest + * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory. + * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus + * copied back to guest. Domain returns expected values of nr_vnodes, + * nr_vmemranges and nr_vcpus to guest if the values where incorrect. + */ +struct xen_vnuma_topology_info { + /* IN */ + domid_t domid; + uint16_t pad; + /* IN/OUT */ + unsigned int nr_vnodes; + unsigned int nr_vcpus; + unsigned int nr_vmemranges; + /* OUT */ + union { + XEN_GUEST_HANDLE(uint) h; + uint64_t pad; + } vdistance; + union { + XEN_GUEST_HANDLE(uint) h; + uint64_t pad; + } vcpu_to_vnode; + union { + XEN_GUEST_HANDLE(xen_vmemrange_t) h; + uint64_t pad; + } vmemrange; +}; +typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t; +DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t); + +/* Next available subop number is 29 */ + +#endif /* __XEN_PUBLIC_MEMORY_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/physdev.h b/include/hw/xen/interface/physdev.h new file mode 100644 index 0000000000..d271766ad0 --- /dev/null +++ b/include/hw/xen/interface/physdev.h @@ -0,0 +1,383 @@ +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2006, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_PHYSDEV_H__ +#define __XEN_PUBLIC_PHYSDEV_H__ + +#include "xen.h" + +/* + * Prototype for this hypercall is: + * int physdev_op(int cmd, void *args) + * @cmd == PHYSDEVOP_??? (physdev operation). + * @args == Operation-specific extra arguments (NULL if none). + */ + +/* + * Notify end-of-interrupt (EOI) for the specified IRQ. + * @arg == pointer to physdev_eoi structure. + */ +#define PHYSDEVOP_eoi 12 +struct physdev_eoi { + /* IN */ + uint32_t irq; +}; +typedef struct physdev_eoi physdev_eoi_t; +DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); + +/* + * Register a shared page for the hypervisor to indicate whether the guest + * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly + * once the guest used this function in that the associated event channel + * will automatically get unmasked. The page registered is used as a bit + * array indexed by Xen's PIRQ value. + */ +#define PHYSDEVOP_pirq_eoi_gmfn_v1 17 +/* + * Register a shared page for the hypervisor to indicate whether the + * guest must issue PHYSDEVOP_eoi. This hypercall is very similar to + * PHYSDEVOP_pirq_eoi_gmfn_v1 but it doesn't change the semantics of + * PHYSDEVOP_eoi. The page registered is used as a bit array indexed by + * Xen's PIRQ value. + */ +#define PHYSDEVOP_pirq_eoi_gmfn_v2 28 +struct physdev_pirq_eoi_gmfn { + /* IN */ + xen_pfn_t gmfn; +}; +typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t; +DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t); + +/* + * Query the status of an IRQ line. + * @arg == pointer to physdev_irq_status_query structure. + */ +#define PHYSDEVOP_irq_status_query 5 +struct physdev_irq_status_query { + /* IN */ + uint32_t irq; + /* OUT */ + uint32_t flags; /* XENIRQSTAT_* */ +}; +typedef struct physdev_irq_status_query physdev_irq_status_query_t; +DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); + +/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ +#define _XENIRQSTAT_needs_eoi (0) +#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) + +/* IRQ shared by multiple guests? */ +#define _XENIRQSTAT_shared (1) +#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) + +/* + * Set the current VCPU's I/O privilege level. + * @arg == pointer to physdev_set_iopl structure. + */ +#define PHYSDEVOP_set_iopl 6 +struct physdev_set_iopl { + /* IN */ + uint32_t iopl; +}; +typedef struct physdev_set_iopl physdev_set_iopl_t; +DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); + +/* + * Set the current VCPU's I/O-port permissions bitmap. + * @arg == pointer to physdev_set_iobitmap structure. + */ +#define PHYSDEVOP_set_iobitmap 7 +struct physdev_set_iobitmap { + /* IN */ +#if __XEN_INTERFACE_VERSION__ >= 0x00030205 + XEN_GUEST_HANDLE(uint8) bitmap; +#else + uint8_t *bitmap; +#endif + uint32_t nr_ports; +}; +typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; +DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); + +/* + * Read or write an IO-APIC register. + * @arg == pointer to physdev_apic structure. + */ +#define PHYSDEVOP_apic_read 8 +#define PHYSDEVOP_apic_write 9 +struct physdev_apic { + /* IN */ + unsigned long apic_physbase; + uint32_t reg; + /* IN or OUT */ + uint32_t value; +}; +typedef struct physdev_apic physdev_apic_t; +DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); + +/* + * Allocate or free a physical upcall vector for the specified IRQ line. + * @arg == pointer to physdev_irq structure. + */ +#define PHYSDEVOP_alloc_irq_vector 10 +#define PHYSDEVOP_free_irq_vector 11 +struct physdev_irq { + /* IN */ + uint32_t irq; + /* IN or OUT */ + uint32_t vector; +}; +typedef struct physdev_irq physdev_irq_t; +DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); + +#define MAP_PIRQ_TYPE_MSI 0x0 +#define MAP_PIRQ_TYPE_GSI 0x1 +#define MAP_PIRQ_TYPE_UNKNOWN 0x2 +#define MAP_PIRQ_TYPE_MSI_SEG 0x3 +#define MAP_PIRQ_TYPE_MULTI_MSI 0x4 + +#define PHYSDEVOP_map_pirq 13 +struct physdev_map_pirq { + domid_t domid; + /* IN */ + int type; + /* IN (ignored for ..._MULTI_MSI) */ + int index; + /* IN or OUT */ + int pirq; + /* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */ + int bus; + /* IN */ + int devfn; + /* IN (also OUT for ..._MULTI_MSI) */ + int entry_nr; + /* IN */ + uint64_t table_base; +}; +typedef struct physdev_map_pirq physdev_map_pirq_t; +DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t); + +#define PHYSDEVOP_unmap_pirq 14 +struct physdev_unmap_pirq { + domid_t domid; + /* IN */ + int pirq; +}; + +typedef struct physdev_unmap_pirq physdev_unmap_pirq_t; +DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t); + +#define PHYSDEVOP_manage_pci_add 15 +#define PHYSDEVOP_manage_pci_remove 16 +struct physdev_manage_pci { + /* IN */ + uint8_t bus; + uint8_t devfn; +}; + +typedef struct physdev_manage_pci physdev_manage_pci_t; +DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t); + +#define PHYSDEVOP_restore_msi 19 +struct physdev_restore_msi { + /* IN */ + uint8_t bus; + uint8_t devfn; +}; +typedef struct physdev_restore_msi physdev_restore_msi_t; +DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t); + +#define PHYSDEVOP_manage_pci_add_ext 20 +struct physdev_manage_pci_ext { + /* IN */ + uint8_t bus; + uint8_t devfn; + unsigned is_extfn; + unsigned is_virtfn; + struct { + uint8_t bus; + uint8_t devfn; + } physfn; +}; + +typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t; +DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t); + +/* + * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() + * hypercall since 0x00030202. + */ +struct physdev_op { + uint32_t cmd; + union { + physdev_irq_status_query_t irq_status_query; + physdev_set_iopl_t set_iopl; + physdev_set_iobitmap_t set_iobitmap; + physdev_apic_t apic_op; + physdev_irq_t irq_op; + } u; +}; +typedef struct physdev_op physdev_op_t; +DEFINE_XEN_GUEST_HANDLE(physdev_op_t); + +#define PHYSDEVOP_setup_gsi 21 +struct physdev_setup_gsi { + int gsi; + /* IN */ + uint8_t triggering; + /* IN */ + uint8_t polarity; + /* IN */ +}; + +typedef struct physdev_setup_gsi physdev_setup_gsi_t; +DEFINE_XEN_GUEST_HANDLE(physdev_setup_gsi_t); + +/* leave PHYSDEVOP 22 free */ + +/* type is MAP_PIRQ_TYPE_GSI or MAP_PIRQ_TYPE_MSI + * the hypercall returns a free pirq */ +#define PHYSDEVOP_get_free_pirq 23 +struct physdev_get_free_pirq { + /* IN */ + int type; + /* OUT */ + uint32_t pirq; +}; + +typedef struct physdev_get_free_pirq physdev_get_free_pirq_t; +DEFINE_XEN_GUEST_HANDLE(physdev_get_free_pirq_t); + +#define XEN_PCI_MMCFG_RESERVED 0x1 + +#define PHYSDEVOP_pci_mmcfg_reserved 24 +struct physdev_pci_mmcfg_reserved { + uint64_t address; + uint16_t segment; + uint8_t start_bus; + uint8_t end_bus; + uint32_t flags; +}; +typedef struct physdev_pci_mmcfg_reserved physdev_pci_mmcfg_reserved_t; +DEFINE_XEN_GUEST_HANDLE(physdev_pci_mmcfg_reserved_t); + +#define XEN_PCI_DEV_EXTFN 0x1 +#define XEN_PCI_DEV_VIRTFN 0x2 +#define XEN_PCI_DEV_PXM 0x4 + +#define PHYSDEVOP_pci_device_add 25 +struct physdev_pci_device_add { + /* IN */ + uint16_t seg; + uint8_t bus; + uint8_t devfn; + uint32_t flags; + struct { + uint8_t bus; + uint8_t devfn; + } physfn; + /* + * Optional parameters array. + * First element ([0]) is PXM domain associated with the device (if + * XEN_PCI_DEV_PXM is set) + */ + uint32_t optarr[XEN_FLEX_ARRAY_DIM]; +}; +typedef struct physdev_pci_device_add physdev_pci_device_add_t; +DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_add_t); + +#define PHYSDEVOP_pci_device_remove 26 +#define PHYSDEVOP_restore_msi_ext 27 +/* + * Dom0 should use these two to announce MMIO resources assigned to + * MSI-X capable devices won't (prepare) or may (release) change. + */ +#define PHYSDEVOP_prepare_msix 30 +#define PHYSDEVOP_release_msix 31 +struct physdev_pci_device { + /* IN */ + uint16_t seg; + uint8_t bus; + uint8_t devfn; +}; +typedef struct physdev_pci_device physdev_pci_device_t; +DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t); + +#define PHYSDEVOP_DBGP_RESET_PREPARE 1 +#define PHYSDEVOP_DBGP_RESET_DONE 2 + +#define PHYSDEVOP_DBGP_BUS_UNKNOWN 0 +#define PHYSDEVOP_DBGP_BUS_PCI 1 + +#define PHYSDEVOP_dbgp_op 29 +struct physdev_dbgp_op { + /* IN */ + uint8_t op; + uint8_t bus; + union { + physdev_pci_device_t pci; + } u; +}; +typedef struct physdev_dbgp_op physdev_dbgp_op_t; +DEFINE_XEN_GUEST_HANDLE(physdev_dbgp_op_t); + +/* + * Notify that some PIRQ-bound event channels have been unmasked. + * ** This command is obsolete since interface version 0x00030202 and is ** + * ** unsupported by newer versions of Xen. ** + */ +#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 + +#if __XEN_INTERFACE_VERSION__ < 0x00040600 +/* + * These all-capitals physdev operation names are superceded by the new names + * (defined above) since interface version 0x00030202. The guard above was + * added post-4.5 only though and hence shouldn't check for 0x00030202. + */ +#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query +#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl +#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap +#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read +#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write +#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector +#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector +#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi +#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared +#endif + +#if __XEN_INTERFACE_VERSION__ < 0x00040200 +#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v1 +#else +#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v2 +#endif + +#endif /* __XEN_PUBLIC_PHYSDEV_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/sched.h b/include/hw/xen/interface/sched.h new file mode 100644 index 0000000000..811bd87c82 --- /dev/null +++ b/include/hw/xen/interface/sched.h @@ -0,0 +1,202 @@ +/****************************************************************************** + * sched.h + * + * Scheduler state interactions + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2005, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_SCHED_H__ +#define __XEN_PUBLIC_SCHED_H__ + +#include "event_channel.h" + +/* + * `incontents 150 sched Guest Scheduler Operations + * + * The SCHEDOP interface provides mechanisms for a guest to interact + * with the scheduler, including yield, blocking and shutting itself + * down. + */ + +/* + * The prototype for this hypercall is: + * ` long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...) + * + * @cmd == SCHEDOP_??? (scheduler operation). + * @arg == Operation-specific extra argument(s), as described below. + * ... == Additional Operation-specific extra arguments, described below. + * + * Versions of Xen prior to 3.0.2 provided only the following legacy version + * of this hypercall, supporting only the commands yield, block and shutdown: + * long sched_op(int cmd, unsigned long arg) + * @cmd == SCHEDOP_??? (scheduler operation). + * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) + * == SHUTDOWN_* code (SCHEDOP_shutdown) + * + * This legacy version is available to new guests as: + * ` long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg) + */ + +/* ` enum sched_op { // SCHEDOP_* => struct sched_* */ +/* + * Voluntarily yield the CPU. + * @arg == NULL. + */ +#define SCHEDOP_yield 0 + +/* + * Block execution of this VCPU until an event is received for processing. + * If called with event upcalls masked, this operation will atomically + * reenable event delivery and check for pending events before blocking the + * VCPU. This avoids a "wakeup waiting" race. + * @arg == NULL. + */ +#define SCHEDOP_block 1 + +/* + * Halt execution of this domain (all VCPUs) and notify the system controller. + * @arg == pointer to sched_shutdown_t structure. + * + * If the sched_shutdown_t reason is SHUTDOWN_suspend then + * x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN + * of the guest's start info page. RDX/EDX is the third hypercall + * argument. + * + * In addition, which reason is SHUTDOWN_suspend this hypercall + * returns 1 if suspend was cancelled or the domain was merely + * checkpointed, and 0 if it is resuming in a new domain. + */ +#define SCHEDOP_shutdown 2 + +/* + * Poll a set of event-channel ports. Return when one or more are pending. An + * optional timeout may be specified. + * @arg == pointer to sched_poll_t structure. + */ +#define SCHEDOP_poll 3 + +/* + * Declare a shutdown for another domain. The main use of this function is + * in interpreting shutdown requests and reasons for fully-virtualized + * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. + * @arg == pointer to sched_remote_shutdown_t structure. + */ +#define SCHEDOP_remote_shutdown 4 + +/* + * Latch a shutdown code, so that when the domain later shuts down it + * reports this code to the control tools. + * @arg == sched_shutdown_t, as for SCHEDOP_shutdown. + */ +#define SCHEDOP_shutdown_code 5 + +/* + * Setup, poke and destroy a domain watchdog timer. + * @arg == pointer to sched_watchdog_t structure. + * With id == 0, setup a domain watchdog timer to cause domain shutdown + * after timeout, returns watchdog id. + * With id != 0 and timeout == 0, destroy domain watchdog timer. + * With id != 0 and timeout != 0, poke watchdog timer and set new timeout. + */ +#define SCHEDOP_watchdog 6 + +/* + * Override the current vcpu affinity by pinning it to one physical cpu or + * undo this override restoring the previous affinity. + * @arg == pointer to sched_pin_override_t structure. + * + * A negative pcpu value will undo a previous pin override and restore the + * previous cpu affinity. + * This call is allowed for the hardware domain only and requires the cpu + * to be part of the domain's cpupool. + */ +#define SCHEDOP_pin_override 7 +/* ` } */ + +struct sched_shutdown { + unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */ +}; +typedef struct sched_shutdown sched_shutdown_t; +DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); + +struct sched_poll { + XEN_GUEST_HANDLE(evtchn_port_t) ports; + unsigned int nr_ports; + uint64_t timeout; +}; +typedef struct sched_poll sched_poll_t; +DEFINE_XEN_GUEST_HANDLE(sched_poll_t); + +struct sched_remote_shutdown { + domid_t domain_id; /* Remote domain ID */ + unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */ +}; +typedef struct sched_remote_shutdown sched_remote_shutdown_t; +DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); + +struct sched_watchdog { + uint32_t id; /* watchdog ID */ + uint32_t timeout; /* timeout */ +}; +typedef struct sched_watchdog sched_watchdog_t; +DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t); + +struct sched_pin_override { + int32_t pcpu; +}; +typedef struct sched_pin_override sched_pin_override_t; +DEFINE_XEN_GUEST_HANDLE(sched_pin_override_t); + +/* + * Reason codes for SCHEDOP_shutdown. These may be interpreted by control + * software to determine the appropriate action. For the most part, Xen does + * not care about the shutdown code. + */ +/* ` enum sched_shutdown_reason { */ +#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ +#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ +#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ +#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ +#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ + +/* + * Domain asked to perform 'soft reset' for it. The expected behavior is to + * reset internal Xen state for the domain returning it to the point where it + * was created but leaving the domain's memory contents and vCPU contexts + * intact. This will allow the domain to start over and set up all Xen specific + * interfaces again. + */ +#define SHUTDOWN_soft_reset 5 +#define SHUTDOWN_MAX 5 /* Maximum valid shutdown reason. */ +/* ` } */ + +#endif /* __XEN_PUBLIC_SCHED_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/trace.h b/include/hw/xen/interface/trace.h new file mode 100644 index 0000000000..d5fa4aea8d --- /dev/null +++ b/include/hw/xen/interface/trace.h @@ -0,0 +1,341 @@ +/****************************************************************************** + * include/public/trace.h + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Mark Williamson, (C) 2004 Intel Research Cambridge + * Copyright (C) 2005 Bin Ren + */ + +#ifndef __XEN_PUBLIC_TRACE_H__ +#define __XEN_PUBLIC_TRACE_H__ + +#define TRACE_EXTRA_MAX 7 +#define TRACE_EXTRA_SHIFT 28 + +/* Trace classes */ +#define TRC_CLS_SHIFT 16 +#define TRC_GEN 0x0001f000 /* General trace */ +#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */ +#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */ +#define TRC_HVM 0x0008f000 /* Xen HVM trace */ +#define TRC_MEM 0x0010f000 /* Xen memory trace */ +#define TRC_PV 0x0020f000 /* Xen PV traces */ +#define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */ +#define TRC_HW 0x0080f000 /* Xen hardware-related traces */ +#define TRC_GUEST 0x0800f000 /* Guest-generated traces */ +#define TRC_ALL 0x0ffff000 +#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) +#define TRC_HD_CYCLE_FLAG (1UL<<31) +#define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) ) +#define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX) + +/* Trace subclasses */ +#define TRC_SUBCLS_SHIFT 12 + +/* trace subclasses for SVM */ +#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ +#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ +#define TRC_HVM_EMUL 0x00084000 /* emulated devices */ + +#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */ +#define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */ +#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */ + +/* + * The highest 3 bits of the last 12 bits of TRC_SCHED_CLASS above are + * reserved for encoding what scheduler produced the information. The + * actual event is encoded in the last 9 bits. + * + * This means we have 8 scheduling IDs available (which means at most 8 + * schedulers generating events) and, in each scheduler, up to 512 + * different events. + */ +#define TRC_SCHED_ID_BITS 3 +#define TRC_SCHED_ID_SHIFT (TRC_SUBCLS_SHIFT - TRC_SCHED_ID_BITS) +#define TRC_SCHED_ID_MASK (((1UL<cpu_offset[cpu]). + */ +struct t_info { + uint16_t tbuf_size; /* Size in pages of each trace buffer */ + uint16_t mfn_offset[]; /* Offset within t_info structure of the page list per cpu */ + /* MFN lists immediately after the header */ +}; + +#endif /* __XEN_PUBLIC_TRACE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/vcpu.h b/include/hw/xen/interface/vcpu.h new file mode 100644 index 0000000000..3623af932f --- /dev/null +++ b/include/hw/xen/interface/vcpu.h @@ -0,0 +1,248 @@ +/****************************************************************************** + * vcpu.h + * + * VCPU initialisation, query, and hotplug. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2005, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_VCPU_H__ +#define __XEN_PUBLIC_VCPU_H__ + +#include "xen.h" + +/* + * Prototype for this hypercall is: + * long vcpu_op(int cmd, unsigned int vcpuid, void *extra_args) + * @cmd == VCPUOP_??? (VCPU operation). + * @vcpuid == VCPU to operate on. + * @extra_args == Operation-specific extra arguments (NULL if none). + */ + +/* + * Initialise a VCPU. Each VCPU can be initialised only once. A + * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. + * + * @extra_arg == For PV or ARM guests this is a pointer to a vcpu_guest_context + * structure containing the initial state for the VCPU. For x86 + * HVM based guests this is a pointer to a vcpu_hvm_context + * structure. + */ +#define VCPUOP_initialise 0 + +/* + * Bring up a VCPU. This makes the VCPU runnable. This operation will fail + * if the VCPU has not been initialised (VCPUOP_initialise). + */ +#define VCPUOP_up 1 + +/* + * Bring down a VCPU (i.e., make it non-runnable). + * There are a few caveats that callers should observe: + * 1. This operation may return, and VCPU_is_up may return false, before the + * VCPU stops running (i.e., the command is asynchronous). It is a good + * idea to ensure that the VCPU has entered a non-critical loop before + * bringing it down. Alternatively, this operation is guaranteed + * synchronous if invoked by the VCPU itself. + * 2. After a VCPU is initialised, there is currently no way to drop all its + * references to domain memory. Even a VCPU that is down still holds + * memory references via its pagetable base pointer and GDT. It is good + * practise to move a VCPU onto an 'idle' or default page table, LDT and + * GDT before bringing it down. + */ +#define VCPUOP_down 2 + +/* Returns 1 if the given VCPU is up. */ +#define VCPUOP_is_up 3 + +/* + * Return information about the state and running time of a VCPU. + * @extra_arg == pointer to vcpu_runstate_info structure. + */ +#define VCPUOP_get_runstate_info 4 +struct vcpu_runstate_info { + /* VCPU's current state (RUNSTATE_*). */ + int state; + /* When was current state entered (system time, ns)? */ + uint64_t state_entry_time; + /* + * Update indicator set in state_entry_time: + * When activated via VMASST_TYPE_runstate_update_flag, set during + * updates in guest memory mapped copy of vcpu_runstate_info. + */ +#define XEN_RUNSTATE_UPDATE (xen_mk_ullong(1) << 63) + /* + * Time spent in each RUNSTATE_* (ns). The sum of these times is + * guaranteed not to drift from system time. + */ + uint64_t time[4]; +}; +typedef struct vcpu_runstate_info vcpu_runstate_info_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); + +/* VCPU is currently running on a physical CPU. */ +#define RUNSTATE_running 0 + +/* VCPU is runnable, but not currently scheduled on any physical CPU. */ +#define RUNSTATE_runnable 1 + +/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ +#define RUNSTATE_blocked 2 + +/* + * VCPU is not runnable, but it is not blocked. + * This is a 'catch all' state for things like hotplug and pauses by the + * system administrator (or for critical sections in the hypervisor). + * RUNSTATE_blocked dominates this state (it is the preferred state). + */ +#define RUNSTATE_offline 3 + +/* + * Register a shared memory area from which the guest may obtain its own + * runstate information without needing to execute a hypercall. + * Notes: + * 1. The registered address may be virtual or physical or guest handle, + * depending on the platform. Virtual address or guest handle should be + * registered on x86 systems. + * 2. Only one shared area may be registered per VCPU. The shared area is + * updated by the hypervisor each time the VCPU is scheduled. Thus + * runstate.state will always be RUNSTATE_running and + * runstate.state_entry_time will indicate the system time at which the + * VCPU was last scheduled to run. + * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. + */ +#define VCPUOP_register_runstate_memory_area 5 +struct vcpu_register_runstate_memory_area { + union { + XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; + struct vcpu_runstate_info *v; + uint64_t p; + } addr; +}; +typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); + +/* + * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer + * which can be set via these commands. Periods smaller than one millisecond + * may not be supported. + */ +#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ +#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ +struct vcpu_set_periodic_timer { + uint64_t period_ns; +}; +typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); + +/* + * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot + * timer which can be set via these commands. + */ +#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ +#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ +struct vcpu_set_singleshot_timer { + uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */ + uint32_t flags; /* VCPU_SSHOTTMR_??? */ +}; +typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); + +/* Flags to VCPUOP_set_singleshot_timer. */ + /* Require the timeout to be in the future (return -ETIME if it's passed). */ +#define _VCPU_SSHOTTMR_future (0) +#define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) + +/* + * Register a memory location in the guest address space for the + * vcpu_info structure. This allows the guest to place the vcpu_info + * structure in a convenient place, such as in a per-cpu data area. + * The pointer need not be page aligned, but the structure must not + * cross a page boundary. + * + * This may be called only once per vcpu. + */ +#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */ +struct vcpu_register_vcpu_info { + uint64_t mfn; /* mfn of page to place vcpu_info */ + uint32_t offset; /* offset within page */ + uint32_t rsvd; /* unused */ +}; +typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); + +/* Send an NMI to the specified VCPU. @extra_arg == NULL. */ +#define VCPUOP_send_nmi 11 + +/* + * Get the physical ID information for a pinned vcpu's underlying physical + * processor. The physical ID informmation is architecture-specific. + * On x86: id[31:0]=apic_id, id[63:32]=acpi_id. + * This command returns -EINVAL if it is not a valid operation for this VCPU. + */ +#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ +struct vcpu_get_physid { + uint64_t phys_id; +}; +typedef struct vcpu_get_physid vcpu_get_physid_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t); +#define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid)) +#define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32)) + +/* + * Register a memory location to get a secondary copy of the vcpu time + * parameters. The master copy still exists as part of the vcpu shared + * memory area, and this secondary copy is updated whenever the master copy + * is updated (and using the same versioning scheme for synchronisation). + * + * The intent is that this copy may be mapped (RO) into userspace so + * that usermode can compute system time using the time info and the + * tsc. Usermode will see an array of vcpu_time_info structures, one + * for each vcpu, and choose the right one by an existing mechanism + * which allows it to get the current vcpu number (such as via a + * segment limit). It can then apply the normal algorithm to compute + * system time from the tsc. + * + * @extra_arg == pointer to vcpu_register_time_info_memory_area structure. + */ +#define VCPUOP_register_vcpu_time_memory_area 13 +DEFINE_XEN_GUEST_HANDLE(vcpu_time_info_t); +struct vcpu_register_time_memory_area { + union { + XEN_GUEST_HANDLE(vcpu_time_info_t) h; + struct vcpu_time_info *v; + uint64_t p; + } addr; +}; +typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t); + +#endif /* __XEN_PUBLIC_VCPU_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/version.h b/include/hw/xen/interface/version.h new file mode 100644 index 0000000000..17a81e23cd --- /dev/null +++ b/include/hw/xen/interface/version.h @@ -0,0 +1,113 @@ +/****************************************************************************** + * version.h + * + * Xen version, type, and compile information. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2005, Nguyen Anh Quynh + * Copyright (c) 2005, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_VERSION_H__ +#define __XEN_PUBLIC_VERSION_H__ + +#include "xen.h" + +/* NB. All ops return zero on success, except XENVER_{version,pagesize} + * XENVER_{version,pagesize,build_id} */ + +/* arg == NULL; returns major:minor (16:16). */ +#define XENVER_version 0 + +/* arg == xen_extraversion_t. */ +#define XENVER_extraversion 1 +typedef char xen_extraversion_t[16]; +#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) + +/* arg == xen_compile_info_t. */ +#define XENVER_compile_info 2 +struct xen_compile_info { + char compiler[64]; + char compile_by[16]; + char compile_domain[32]; + char compile_date[32]; +}; +typedef struct xen_compile_info xen_compile_info_t; + +#define XENVER_capabilities 3 +typedef char xen_capabilities_info_t[1024]; +#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) + +#define XENVER_changeset 4 +typedef char xen_changeset_info_t[64]; +#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) + +#define XENVER_platform_parameters 5 +struct xen_platform_parameters { + xen_ulong_t virt_start; +}; +typedef struct xen_platform_parameters xen_platform_parameters_t; + +#define XENVER_get_features 6 +struct xen_feature_info { + unsigned int submap_idx; /* IN: which 32-bit submap to return */ + uint32_t submap; /* OUT: 32-bit submap */ +}; +typedef struct xen_feature_info xen_feature_info_t; + +/* Declares the features reported by XENVER_get_features. */ +#include "features.h" + +/* arg == NULL; returns host memory page size. */ +#define XENVER_pagesize 7 + +/* arg == xen_domain_handle_t. + * + * The toolstack fills it out for guest consumption. It is intended to hold + * the UUID of the guest. + */ +#define XENVER_guest_handle 8 + +#define XENVER_commandline 9 +typedef char xen_commandline_t[1024]; + +/* + * Return value is the number of bytes written, or XEN_Exx on error. + * Calling with empty parameter returns the size of build_id. + */ +#define XENVER_build_id 10 +struct xen_build_id { + uint32_t len; /* IN: size of buf[]. */ + unsigned char buf[XEN_FLEX_ARRAY_DIM]; + /* OUT: Variable length buffer with build_id. */ +}; +typedef struct xen_build_id xen_build_id_t; + +#endif /* __XEN_PUBLIC_VERSION_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/interface/xen-compat.h b/include/hw/xen/interface/xen-compat.h new file mode 100644 index 0000000000..e1c027a95c --- /dev/null +++ b/include/hw/xen/interface/xen-compat.h @@ -0,0 +1,46 @@ +/****************************************************************************** + * xen-compat.h + * + * Guest OS interface to Xen. Compatibility layer. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2006, Christian Limpach + */ + +#ifndef __XEN_PUBLIC_XEN_COMPAT_H__ +#define __XEN_PUBLIC_XEN_COMPAT_H__ + +#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040e00 + +#if defined(__XEN__) || defined(__XEN_TOOLS__) +/* Xen is built with matching headers and implements the latest interface. */ +#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ +#elif !defined(__XEN_INTERFACE_VERSION__) +/* Guests which do not specify a version get the legacy interface. */ +#define __XEN_INTERFACE_VERSION__ 0x00000000 +#endif + +#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ +#error "These header files do not support the requested interface version." +#endif + +#define COMPAT_FLEX_ARRAY_DIM XEN_FLEX_ARRAY_DIM + +#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ diff --git a/include/hw/xen/interface/xen.h b/include/hw/xen/interface/xen.h new file mode 100644 index 0000000000..e373592c33 --- /dev/null +++ b/include/hw/xen/interface/xen.h @@ -0,0 +1,1049 @@ +/****************************************************************************** + * xen.h + * + * Guest OS interface to Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2004, K A Fraser + */ + +#ifndef __XEN_PUBLIC_XEN_H__ +#define __XEN_PUBLIC_XEN_H__ + +#include "xen-compat.h" + +#if defined(__i386__) || defined(__x86_64__) +#include "arch-x86/xen.h" +#elif defined(__arm__) || defined (__aarch64__) +#include "arch-arm.h" +#else +#error "Unsupported architecture" +#endif + +#ifndef __ASSEMBLY__ +/* Guest handles for primitive C types. */ +DEFINE_XEN_GUEST_HANDLE(char); +__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); +DEFINE_XEN_GUEST_HANDLE(int); +__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); +#if __XEN_INTERFACE_VERSION__ < 0x00040300 +DEFINE_XEN_GUEST_HANDLE(long); +__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); +#endif +DEFINE_XEN_GUEST_HANDLE(void); + +DEFINE_XEN_GUEST_HANDLE(uint64_t); +DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); +DEFINE_XEN_GUEST_HANDLE(xen_ulong_t); + +/* Define a variable length array (depends on compiler). */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +#define XEN_FLEX_ARRAY_DIM +#elif defined(__GNUC__) +#define XEN_FLEX_ARRAY_DIM 0 +#else +#define XEN_FLEX_ARRAY_DIM 1 /* variable size */ +#endif + +/* Turn a plain number into a C unsigned (long (long)) constant. */ +#define __xen_mk_uint(x) x ## U +#define __xen_mk_ulong(x) x ## UL +#ifndef __xen_mk_ullong +# define __xen_mk_ullong(x) x ## ULL +#endif +#define xen_mk_uint(x) __xen_mk_uint(x) +#define xen_mk_ulong(x) __xen_mk_ulong(x) +#define xen_mk_ullong(x) __xen_mk_ullong(x) + +#else + +/* In assembly code we cannot use C numeric constant suffixes. */ +#define xen_mk_uint(x) x +#define xen_mk_ulong(x) x +#define xen_mk_ullong(x) x + +#endif + +/* + * HYPERCALLS + */ + +/* `incontents 100 hcalls List of hypercalls + * ` enum hypercall_num { // __HYPERVISOR_* => HYPERVISOR_*() + */ + +#define __HYPERVISOR_set_trap_table 0 +#define __HYPERVISOR_mmu_update 1 +#define __HYPERVISOR_set_gdt 2 +#define __HYPERVISOR_stack_switch 3 +#define __HYPERVISOR_set_callbacks 4 +#define __HYPERVISOR_fpu_taskswitch 5 +#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ +#define __HYPERVISOR_platform_op 7 +#define __HYPERVISOR_set_debugreg 8 +#define __HYPERVISOR_get_debugreg 9 +#define __HYPERVISOR_update_descriptor 10 +#define __HYPERVISOR_memory_op 12 +#define __HYPERVISOR_multicall 13 +#define __HYPERVISOR_update_va_mapping 14 +#define __HYPERVISOR_set_timer_op 15 +#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ +#define __HYPERVISOR_xen_version 17 +#define __HYPERVISOR_console_io 18 +#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ +#define __HYPERVISOR_grant_table_op 20 +#define __HYPERVISOR_vm_assist 21 +#define __HYPERVISOR_update_va_mapping_otherdomain 22 +#define __HYPERVISOR_iret 23 /* x86 only */ +#define __HYPERVISOR_vcpu_op 24 +#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ +#define __HYPERVISOR_mmuext_op 26 +#define __HYPERVISOR_xsm_op 27 +#define __HYPERVISOR_nmi_op 28 +#define __HYPERVISOR_sched_op 29 +#define __HYPERVISOR_callback_op 30 +#define __HYPERVISOR_xenoprof_op 31 +#define __HYPERVISOR_event_channel_op 32 +#define __HYPERVISOR_physdev_op 33 +#define __HYPERVISOR_hvm_op 34 +#define __HYPERVISOR_sysctl 35 +#define __HYPERVISOR_domctl 36 +#define __HYPERVISOR_kexec_op 37 +#define __HYPERVISOR_tmem_op 38 +#define __HYPERVISOR_argo_op 39 +#define __HYPERVISOR_xenpmu_op 40 +#define __HYPERVISOR_dm_op 41 +#define __HYPERVISOR_hypfs_op 42 + +/* Architecture-specific hypercall definitions. */ +#define __HYPERVISOR_arch_0 48 +#define __HYPERVISOR_arch_1 49 +#define __HYPERVISOR_arch_2 50 +#define __HYPERVISOR_arch_3 51 +#define __HYPERVISOR_arch_4 52 +#define __HYPERVISOR_arch_5 53 +#define __HYPERVISOR_arch_6 54 +#define __HYPERVISOR_arch_7 55 + +/* ` } */ + +/* + * HYPERCALL COMPATIBILITY. + */ + +/* New sched_op hypercall introduced in 0x00030101. */ +#if __XEN_INTERFACE_VERSION__ < 0x00030101 +#undef __HYPERVISOR_sched_op +#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat +#endif + +/* New event-channel and physdev hypercalls introduced in 0x00030202. */ +#if __XEN_INTERFACE_VERSION__ < 0x00030202 +#undef __HYPERVISOR_event_channel_op +#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat +#undef __HYPERVISOR_physdev_op +#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat +#endif + +/* New platform_op hypercall introduced in 0x00030204. */ +#if __XEN_INTERFACE_VERSION__ < 0x00030204 +#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op +#endif + +/* + * VIRTUAL INTERRUPTS + * + * Virtual interrupts that a guest OS may receive from Xen. + * + * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a + * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. + * The latter can be allocated only once per guest: they must initially be + * allocated to VCPU0 but can subsequently be re-bound. + */ +/* ` enum virq { */ +#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ +#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ +#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ +#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ +#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ +#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ +#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ +#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ +#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */ +#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occurred */ +#define VIRQ_ARGO 11 /* G. Argo interdomain message notification */ +#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */ +#define VIRQ_XENPMU 13 /* V. PMC interrupt */ + +/* Architecture-specific VIRQ definitions. */ +#define VIRQ_ARCH_0 16 +#define VIRQ_ARCH_1 17 +#define VIRQ_ARCH_2 18 +#define VIRQ_ARCH_3 19 +#define VIRQ_ARCH_4 20 +#define VIRQ_ARCH_5 21 +#define VIRQ_ARCH_6 22 +#define VIRQ_ARCH_7 23 +/* ` } */ + +#define NR_VIRQS 24 + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_mmu_update(const struct mmu_update reqs[], + * ` unsigned count, unsigned *done_out, + * ` unsigned foreigndom) + * ` + * @reqs is an array of mmu_update_t structures ((ptr, val) pairs). + * @count is the length of the above array. + * @pdone is an output parameter indicating number of completed operations + * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this + * hypercall invocation. Can be DOMID_SELF. + * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced + * in this hypercall invocation. The value of this field + * (x) encodes the PFD as follows: + * x == 0 => PFD == DOMID_SELF + * x != 0 => PFD == x - 1 + * + * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command. + * ------------- + * ptr[1:0] == MMU_NORMAL_PT_UPDATE: + * Updates an entry in a page table belonging to PFD. If updating an L1 table, + * and the new table entry is valid/present, the mapped frame must belong to + * FD. If attempting to map an I/O page then the caller assumes the privilege + * of the FD. + * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. + * FD == DOMID_XEN: Map restricted areas of Xen's heap space. + * ptr[:2] -- Machine address of the page-table entry to modify. + * val -- Value to write. + * + * There also certain implicit requirements when using this hypercall. The + * pages that make up a pagetable must be mapped read-only in the guest. + * This prevents uncontrolled guest updates to the pagetable. Xen strictly + * enforces this, and will disallow any pagetable update which will end up + * mapping pagetable page RW, and will disallow using any writable page as a + * pagetable. In practice it means that when constructing a page table for a + * process, thread, etc, we MUST be very dilligient in following these rules: + * 1). Start with top-level page (PGD or in Xen language: L4). Fill out + * the entries. + * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD + * or L2). + * 3). Start filling out the PTE table (L1) with the PTE entries. Once + * done, make sure to set each of those entries to RO (so writeable bit + * is unset). Once that has been completed, set the PMD (L2) for this + * PTE table as RO. + * 4). When completed with all of the PMD (L2) entries, and all of them have + * been set to RO, make sure to set RO the PUD (L3). Do the same + * operation on PGD (L4) pagetable entries that have a PUD (L3) entry. + * 5). Now before you can use those pages (so setting the cr3), you MUST also + * pin them so that the hypervisor can verify the entries. This is done + * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame + * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op( + * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be + * issued. + * For 32-bit guests, the L4 is not used (as there is less pagetables), so + * instead use L3. + * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE + * hypercall. Also if so desired the OS can also try to write to the PTE + * and be trapped by the hypervisor (as the PTE entry is RO). + * + * To deallocate the pages, the operations are the reverse of the steps + * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the + * pagetable MUST not be in use (meaning that the cr3 is not set to it). + * + * ptr[1:0] == MMU_MACHPHYS_UPDATE: + * Updates an entry in the machine->pseudo-physical mapping table. + * ptr[:2] -- Machine address within the frame whose mapping to modify. + * The frame must belong to the FD, if one is specified. + * val -- Value to write into the mapping entry. + * + * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: + * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed + * with those in @val. + * + * ptr[1:0] == MMU_PT_UPDATE_NO_TRANSLATE: + * As MMU_NORMAL_PT_UPDATE above, but @val is not translated though FD + * page tables. + * + * @val is usually the machine frame number along with some attributes. + * The attributes by default follow the architecture defined bits. Meaning that + * if this is a X86_64 machine and four page table layout is used, the layout + * of val is: + * - 63 if set means No execute (NX) + * - 46-13 the machine frame number + * - 12 available for guest + * - 11 available for guest + * - 10 available for guest + * - 9 available for guest + * - 8 global + * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages) + * - 6 dirty + * - 5 accessed + * - 4 page cached disabled + * - 3 page write through + * - 2 userspace accessible + * - 1 writeable + * - 0 present + * + * The one bits that does not fit with the default layout is the PAGE_PSE + * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the + * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB + * (or 2MB) instead of using the PAGE_PSE bit. + * + * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen + * using it as the Page Attribute Table (PAT) bit - for details on it please + * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of + * pages instead of using MTRRs. + * + * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits): + * PAT4 PAT0 + * +-----+-----+----+----+----+-----+----+----+ + * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux + * +-----+-----+----+----+----+-----+----+----+ + * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots) + * +-----+-----+----+----+----+-----+----+----+ + * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen + * +-----+-----+----+----+----+-----+----+----+ + * + * The lookup of this index table translates to looking up + * Bit 7, Bit 4, and Bit 3 of val entry: + * + * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3). + * + * If all bits are off, then we are using PAT0. If bit 3 turned on, + * then we are using PAT1, if bit 3 and bit 4, then PAT2.. + * + * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means + * that if a guest that follows Linux's PAT setup and would like to set Write + * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is + * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the + * caching as: + * + * WB = none (so PAT0) + * WC = PWT (bit 3 on) + * UC = PWT | PCD (bit 3 and 4 are on). + * + * To make it work with Xen, it needs to translate the WC bit as so: + * + * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3 + * + * And to translate back it would: + * + * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7. + */ +#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ +#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ +#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ +#define MMU_PT_UPDATE_NO_TRANSLATE 3 /* checked '*ptr = val'. ptr is MA. */ + /* val never translated. */ + +/* + * MMU EXTENDED OPERATIONS + * + * ` enum neg_errnoval + * ` HYPERVISOR_mmuext_op(mmuext_op_t uops[], + * ` unsigned int count, + * ` unsigned int *pdone, + * ` unsigned int foreigndom) + */ +/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. + * A foreigndom (FD) can be specified (or DOMID_SELF for none). + * Where the FD has some effect, it is described below. + * + * cmd: MMUEXT_(UN)PIN_*_TABLE + * mfn: Machine frame number to be (un)pinned as a p.t. page. + * The frame must belong to the FD, if one is specified. + * + * cmd: MMUEXT_NEW_BASEPTR + * mfn: Machine frame number of new page-table base to install in MMU. + * + * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] + * mfn: Machine frame number of new page-table base to install in MMU + * when in user space. + * + * cmd: MMUEXT_TLB_FLUSH_LOCAL + * No additional arguments. Flushes local TLB. + * + * cmd: MMUEXT_INVLPG_LOCAL + * linear_addr: Linear address to be flushed from the local TLB. + * + * cmd: MMUEXT_TLB_FLUSH_MULTI + * vcpumask: Pointer to bitmap of VCPUs to be flushed. + * + * cmd: MMUEXT_INVLPG_MULTI + * linear_addr: Linear address to be flushed. + * vcpumask: Pointer to bitmap of VCPUs to be flushed. + * + * cmd: MMUEXT_TLB_FLUSH_ALL + * No additional arguments. Flushes all VCPUs' TLBs. + * + * cmd: MMUEXT_INVLPG_ALL + * linear_addr: Linear address to be flushed from all VCPUs' TLBs. + * + * cmd: MMUEXT_FLUSH_CACHE + * No additional arguments. Writes back and flushes cache contents. + * + * cmd: MMUEXT_FLUSH_CACHE_GLOBAL + * No additional arguments. Writes back and flushes cache contents + * on all CPUs in the system. + * + * cmd: MMUEXT_SET_LDT + * linear_addr: Linear address of LDT base (NB. must be page-aligned). + * nr_ents: Number of entries in LDT. + * + * cmd: MMUEXT_CLEAR_PAGE + * mfn: Machine frame number to be cleared. + * + * cmd: MMUEXT_COPY_PAGE + * mfn: Machine frame number of the destination page. + * src_mfn: Machine frame number of the source page. + * + * cmd: MMUEXT_[UN]MARK_SUPER + * mfn: Machine frame number of head of superpage to be [un]marked. + */ +/* ` enum mmuext_cmd { */ +#define MMUEXT_PIN_L1_TABLE 0 +#define MMUEXT_PIN_L2_TABLE 1 +#define MMUEXT_PIN_L3_TABLE 2 +#define MMUEXT_PIN_L4_TABLE 3 +#define MMUEXT_UNPIN_TABLE 4 +#define MMUEXT_NEW_BASEPTR 5 +#define MMUEXT_TLB_FLUSH_LOCAL 6 +#define MMUEXT_INVLPG_LOCAL 7 +#define MMUEXT_TLB_FLUSH_MULTI 8 +#define MMUEXT_INVLPG_MULTI 9 +#define MMUEXT_TLB_FLUSH_ALL 10 +#define MMUEXT_INVLPG_ALL 11 +#define MMUEXT_FLUSH_CACHE 12 +#define MMUEXT_SET_LDT 13 +#define MMUEXT_NEW_USER_BASEPTR 15 +#define MMUEXT_CLEAR_PAGE 16 +#define MMUEXT_COPY_PAGE 17 +#define MMUEXT_FLUSH_CACHE_GLOBAL 18 +#define MMUEXT_MARK_SUPER 19 +#define MMUEXT_UNMARK_SUPER 20 +/* ` } */ + +#ifndef __ASSEMBLY__ +struct mmuext_op { + unsigned int cmd; /* => enum mmuext_cmd */ + union { + /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR + * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */ + xen_pfn_t mfn; + /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ + unsigned long linear_addr; + } arg1; + union { + /* SET_LDT */ + unsigned int nr_ents; + /* TLB_FLUSH_MULTI, INVLPG_MULTI */ +#if __XEN_INTERFACE_VERSION__ >= 0x00030205 + XEN_GUEST_HANDLE(const_void) vcpumask; +#else + const void *vcpumask; +#endif + /* COPY_PAGE */ + xen_pfn_t src_mfn; + } arg2; +}; +typedef struct mmuext_op mmuext_op_t; +DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); +#endif + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_update_va_mapping(unsigned long va, u64 val, + * ` enum uvm_flags flags) + * ` + * ` enum neg_errnoval + * ` HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, u64 val, + * ` enum uvm_flags flags, + * ` domid_t domid) + * ` + * ` @va: The virtual address whose mapping we want to change + * ` @val: The new page table entry, must contain a machine address + * ` @flags: Control TLB flushes + */ +/* These are passed as 'flags' to update_va_mapping. They can be ORed. */ +/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ +/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ +/* ` enum uvm_flags { */ +#define UVMF_NONE (xen_mk_ulong(0)<<0) /* No flushing at all. */ +#define UVMF_TLB_FLUSH (xen_mk_ulong(1)<<0) /* Flush entire TLB(s). */ +#define UVMF_INVLPG (xen_mk_ulong(2)<<0) /* Flush only one entry. */ +#define UVMF_FLUSHTYPE_MASK (xen_mk_ulong(3)<<0) +#define UVMF_MULTI (xen_mk_ulong(0)<<2) /* Flush subset of TLBs. */ +#define UVMF_LOCAL (xen_mk_ulong(0)<<2) /* Flush local TLB. */ +#define UVMF_ALL (xen_mk_ulong(1)<<2) /* Flush all TLBs. */ +/* ` } */ + +/* + * ` int + * ` HYPERVISOR_console_io(unsigned int cmd, + * ` unsigned int count, + * ` char buffer[]); + * + * @cmd: Command (see below) + * @count: Size of the buffer to read/write + * @buffer: Pointer in the guest memory + * + * List of commands: + * + * * CONSOLEIO_write: Write the buffer to Xen console. + * For the hardware domain, all the characters in the buffer will + * be written. Characters will be printed directly to the console. + * For all the other domains, only the printable characters will be + * written. Characters may be buffered until a newline (i.e '\n') is + * found. + * @return 0 on success, otherwise return an error code. + * * CONSOLEIO_read: Attempts to read up to @count characters from Xen + * console. The maximum buffer size (i.e. @count) supported is 2GB. + * @return the number of characters read on success, otherwise return + * an error code. + */ +#define CONSOLEIO_write 0 +#define CONSOLEIO_read 1 + +/* + * Commands to HYPERVISOR_vm_assist(). + */ +#define VMASST_CMD_enable 0 +#define VMASST_CMD_disable 1 + +/* x86/32 guests: simulate full 4GB segment limits. */ +#define VMASST_TYPE_4gb_segments 0 + +/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ +#define VMASST_TYPE_4gb_segments_notify 1 + +/* + * x86 guests: support writes to bottom-level PTEs. + * NB1. Page-directory entries cannot be written. + * NB2. Guest must continue to remove all writable mappings of PTEs. + */ +#define VMASST_TYPE_writable_pagetables 2 + +/* x86/PAE guests: support PDPTs above 4GB. */ +#define VMASST_TYPE_pae_extended_cr3 3 + +/* + * x86 guests: Sane behaviour for virtual iopl + * - virtual iopl updated from do_iret() hypercalls. + * - virtual iopl reported in bounce frames. + * - guest kernels assumed to be level 0 for the purpose of iopl checks. + */ +#define VMASST_TYPE_architectural_iopl 4 + +/* + * All guests: activate update indicator in vcpu_runstate_info + * Enable setting the XEN_RUNSTATE_UPDATE flag in guest memory mapped + * vcpu_runstate_info during updates of the runstate information. + */ +#define VMASST_TYPE_runstate_update_flag 5 + +/* + * x86/64 guests: strictly hide M2P from user mode. + * This allows the guest to control respective hypervisor behavior: + * - when not set, L4 tables get created with the respective slot blank, + * and whenever the L4 table gets used as a kernel one the missing + * mapping gets inserted, + * - when set, L4 tables get created with the respective slot initialized + * as before, and whenever the L4 table gets used as a user one the + * mapping gets zapped. + */ +#define VMASST_TYPE_m2p_strict 32 + +#if __XEN_INTERFACE_VERSION__ < 0x00040600 +#define MAX_VMASST_TYPE 3 +#endif + +/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ +#define DOMID_FIRST_RESERVED xen_mk_uint(0x7FF0) + +/* DOMID_SELF is used in certain contexts to refer to oneself. */ +#define DOMID_SELF xen_mk_uint(0x7FF0) + +/* + * DOMID_IO is used to restrict page-table updates to mapping I/O memory. + * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO + * is useful to ensure that no mappings to the OS's own heap are accidentally + * installed. (e.g., in Linux this could cause havoc as reference counts + * aren't adjusted on the I/O-mapping code path). + * This only makes sense as HYPERVISOR_mmu_update()'s and + * HYPERVISOR_update_va_mapping_otherdomain()'s "foreigndom" argument. For + * HYPERVISOR_mmu_update() context it can be specified by any calling domain, + * otherwise it's only permitted if the caller is privileged. + */ +#define DOMID_IO xen_mk_uint(0x7FF1) + +/* + * DOMID_XEN is used to allow privileged domains to map restricted parts of + * Xen's heap space (e.g., the machine_to_phys table). + * This only makes sense as + * - HYPERVISOR_mmu_update()'s, HYPERVISOR_mmuext_op()'s, or + * HYPERVISOR_update_va_mapping_otherdomain()'s "foreigndom" argument, + * - with XENMAPSPACE_gmfn_foreign, + * and is only permitted if the caller is privileged. + */ +#define DOMID_XEN xen_mk_uint(0x7FF2) + +/* + * DOMID_COW is used as the owner of sharable pages */ +#define DOMID_COW xen_mk_uint(0x7FF3) + +/* DOMID_INVALID is used to identify pages with unknown owner. */ +#define DOMID_INVALID xen_mk_uint(0x7FF4) + +/* Idle domain. */ +#define DOMID_IDLE xen_mk_uint(0x7FFF) + +/* Mask for valid domain id values */ +#define DOMID_MASK xen_mk_uint(0x7FFF) + +#ifndef __ASSEMBLY__ + +typedef uint16_t domid_t; + +/* + * Send an array of these to HYPERVISOR_mmu_update(). + * NB. The fields are natural pointer/address size for this architecture. + */ +struct mmu_update { + uint64_t ptr; /* Machine address of PTE. */ + uint64_t val; /* New contents of PTE. */ +}; +typedef struct mmu_update mmu_update_t; +DEFINE_XEN_GUEST_HANDLE(mmu_update_t); + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_multicall(multicall_entry_t call_list[], + * ` uint32_t nr_calls); + * + * NB. The fields are logically the natural register size for this + * architecture. In cases where xen_ulong_t is larger than this then + * any unused bits in the upper portion must be zero. + */ +struct multicall_entry { + xen_ulong_t op, result; + xen_ulong_t args[6]; +}; +typedef struct multicall_entry multicall_entry_t; +DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); + +#if __XEN_INTERFACE_VERSION__ < 0x00040400 +/* + * Event channel endpoints per domain (when using the 2-level ABI): + * 1024 if a long is 32 bits; 4096 if a long is 64 bits. + */ +#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS +#endif + +struct vcpu_time_info { + /* + * Updates to the following values are preceded and followed by an + * increment of 'version'. The guest can therefore detect updates by + * looking for changes to 'version'. If the least-significant bit of + * the version number is set then an update is in progress and the guest + * must wait to read a consistent set of values. + * The correct way to interact with the version number is similar to + * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. + */ + uint32_t version; + uint32_t pad0; + uint64_t tsc_timestamp; /* TSC at last update of time vals. */ + uint64_t system_time; /* Time, in nanosecs, since boot. */ + /* + * Current system time: + * system_time + + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) + * CPU frequency (Hz): + * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift + */ + uint32_t tsc_to_system_mul; + int8_t tsc_shift; +#if __XEN_INTERFACE_VERSION__ > 0x040600 + uint8_t flags; + uint8_t pad1[2]; +#else + int8_t pad1[3]; +#endif +}; /* 32 bytes */ +typedef struct vcpu_time_info vcpu_time_info_t; + +#define XEN_PVCLOCK_TSC_STABLE_BIT (1 << 0) +#define XEN_PVCLOCK_GUEST_STOPPED (1 << 1) + +struct vcpu_info { + /* + * 'evtchn_upcall_pending' is written non-zero by Xen to indicate + * a pending notification for a particular VCPU. It is then cleared + * by the guest OS /before/ checking for pending work, thus avoiding + * a set-and-check race. Note that the mask is only accessed by Xen + * on the CPU that is currently hosting the VCPU. This means that the + * pending and mask flags can be updated by the guest without special + * synchronisation (i.e., no need for the x86 LOCK prefix). + * This may seem suboptimal because if the pending flag is set by + * a different CPU then an IPI may be scheduled even when the mask + * is set. However, note: + * 1. The task of 'interrupt holdoff' is covered by the per-event- + * channel mask bits. A 'noisy' event that is continually being + * triggered can be masked at source at this very precise + * granularity. + * 2. The main purpose of the per-VCPU mask is therefore to restrict + * reentrant execution: whether for concurrency control, or to + * prevent unbounded stack usage. Whatever the purpose, we expect + * that the mask will be asserted only for short periods at a time, + * and so the likelihood of a 'spurious' IPI is suitably small. + * The mask is read before making an event upcall to the guest: a + * non-zero mask therefore guarantees that the VCPU will not receive + * an upcall activation. The mask is cleared when the VCPU requests + * to block: this avoids wakeup-waiting races. + */ + uint8_t evtchn_upcall_pending; +#ifdef XEN_HAVE_PV_UPCALL_MASK + uint8_t evtchn_upcall_mask; +#else /* XEN_HAVE_PV_UPCALL_MASK */ + uint8_t pad0; +#endif /* XEN_HAVE_PV_UPCALL_MASK */ + xen_ulong_t evtchn_pending_sel; + struct arch_vcpu_info arch; + vcpu_time_info_t time; +}; /* 64 bytes (x86) */ +#ifndef __XEN__ +typedef struct vcpu_info vcpu_info_t; +#endif + +/* + * `incontents 200 startofday_shared Start-of-day shared data structure + * Xen/kernel shared data -- pointer provided in start_info. + * + * This structure is defined to be both smaller than a page, and the + * only data on the shared page, but may vary in actual size even within + * compatible Xen versions; guests should not rely on the size + * of this structure remaining constant. + */ +struct shared_info { + struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS]; + + /* + * A domain can create "event channels" on which it can send and receive + * asynchronous event notifications. There are three classes of event that + * are delivered by this mechanism: + * 1. Bi-directional inter- and intra-domain connections. Domains must + * arrange out-of-band to set up a connection (usually by allocating + * an unbound 'listener' port and avertising that via a storage service + * such as xenstore). + * 2. Physical interrupts. A domain with suitable hardware-access + * privileges can bind an event-channel port to a physical interrupt + * source. + * 3. Virtual interrupts ('events'). A domain can bind an event-channel + * port to a virtual interrupt source, such as the virtual-timer + * device or the emergency console. + * + * Event channels are addressed by a "port index". Each channel is + * associated with two bits of information: + * 1. PENDING -- notifies the domain that there is a pending notification + * to be processed. This bit is cleared by the guest. + * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING + * will cause an asynchronous upcall to be scheduled. This bit is only + * updated by the guest. It is read-only within Xen. If a channel + * becomes pending while the channel is masked then the 'edge' is lost + * (i.e., when the channel is unmasked, the guest must manually handle + * pending notifications as no upcall will be scheduled by Xen). + * + * To expedite scanning of pending notifications, any 0->1 pending + * transition on an unmasked channel causes a corresponding bit in a + * per-vcpu selector word to be set. Each bit in the selector covers a + * 'C long' in the PENDING bitfield array. + */ + xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8]; + xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8]; + + /* + * Wallclock time: updated by control software or RTC emulation. + * Guests should base their gettimeofday() syscall on this + * wallclock-base value. + * The values of wc_sec and wc_nsec are offsets from the Unix epoch + * adjusted by the domain's 'time offset' (in seconds) as set either + * by XEN_DOMCTL_settimeoffset, or adjusted via a guest write to the + * emulated RTC. + */ + uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ + uint32_t wc_sec; + uint32_t wc_nsec; +#if !defined(__i386__) + uint32_t wc_sec_hi; +# define xen_wc_sec_hi wc_sec_hi +#elif !defined(__XEN__) && !defined(__XEN_TOOLS__) +# define xen_wc_sec_hi arch.wc_sec_hi +#endif + + struct arch_shared_info arch; + +}; +#ifndef __XEN__ +typedef struct shared_info shared_info_t; +#endif + +/* + * `incontents 200 startofday Start-of-day memory layout + * + * 1. The domain is started within contiguous virtual-memory region. + * 2. The contiguous region ends on an aligned 4MB boundary. + * 3. This the order of bootstrap elements in the initial virtual region: + * a. relocated kernel image + * b. initial ram disk [mod_start, mod_len] + * (may be omitted) + * c. list of allocated page frames [mfn_list, nr_pages] + * (unless relocated due to XEN_ELFNOTE_INIT_P2M) + * d. start_info_t structure [register rSI (x86)] + * in case of dom0 this page contains the console info, too + * e. unless dom0: xenstore ring page + * f. unless dom0: console ring page + * g. bootstrap page tables [pt_base and CR3 (x86)] + * h. bootstrap stack [register ESP (x86)] + * 4. Bootstrap elements are packed together, but each is 4kB-aligned. + * 5. The list of page frames forms a contiguous 'pseudo-physical' memory + * layout for the domain. In particular, the bootstrap virtual-memory + * region is a 1:1 mapping to the first section of the pseudo-physical map. + * 6. All bootstrap elements are mapped read-writable for the guest OS. The + * only exception is the bootstrap page table, which is mapped read-only. + * 7. There is guaranteed to be at least 512kB padding after the final + * bootstrap element. If necessary, the bootstrap virtual region is + * extended by an extra 4MB to ensure this. + * + * Note: Prior to 25833:bb85bbccb1c9. ("x86/32-on-64 adjust Dom0 initial page + * table layout") a bug caused the pt_base (3.g above) and cr3 to not point + * to the start of the guest page tables (it was offset by two pages). + * This only manifested itself on 32-on-64 dom0 kernels and not 32-on-64 domU + * or 64-bit kernels of any colour. The page tables for a 32-on-64 dom0 got + * allocated in the order: 'first L1','first L2', 'first L3', so the offset + * to the page table base is by two pages back. The initial domain if it is + * 32-bit and runs under a 64-bit hypervisor should _NOT_ use two of the + * pages preceding pt_base and mark them as reserved/unused. + */ +#ifdef XEN_HAVE_PV_GUEST_ENTRY +struct start_info { + /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ + char magic[32]; /* "xen--". */ + unsigned long nr_pages; /* Total pages allocated to this domain. */ + unsigned long shared_info; /* MACHINE address of shared info struct. */ + uint32_t flags; /* SIF_xxx flags. */ + xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ + uint32_t store_evtchn; /* Event channel for store communication. */ + union { + struct { + xen_pfn_t mfn; /* MACHINE page number of console page. */ + uint32_t evtchn; /* Event channel for console page. */ + } domU; + struct { + uint32_t info_off; /* Offset of console_info struct. */ + uint32_t info_size; /* Size of console_info struct from start.*/ + } dom0; + } console; + /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ + unsigned long pt_base; /* VIRTUAL address of page directory. */ + unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ + unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ + unsigned long mod_start; /* VIRTUAL address of pre-loaded module */ + /* (PFN of pre-loaded module if */ + /* SIF_MOD_START_PFN set in flags). */ + unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ +#define MAX_GUEST_CMDLINE 1024 + int8_t cmd_line[MAX_GUEST_CMDLINE]; + /* The pfn range here covers both page table and p->m table frames. */ + unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ + unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ +}; +typedef struct start_info start_info_t; + +/* New console union for dom0 introduced in 0x00030203. */ +#if __XEN_INTERFACE_VERSION__ < 0x00030203 +#define console_mfn console.domU.mfn +#define console_evtchn console.domU.evtchn +#endif +#endif /* XEN_HAVE_PV_GUEST_ENTRY */ + +/* These flags are passed in the 'flags' field of start_info_t. */ +#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ +#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ +#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */ +#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */ +#define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */ + /* P->M making the 3 level tree obsolete? */ +#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ + +/* + * A multiboot module is a package containing modules very similar to a + * multiboot module array. The only differences are: + * - the array of module descriptors is by convention simply at the beginning + * of the multiboot module, + * - addresses in the module descriptors are based on the beginning of the + * multiboot module, + * - the number of modules is determined by a termination descriptor that has + * mod_start == 0. + * + * This permits to both build it statically and reference it in a configuration + * file, and let the PV guest easily rebase the addresses to virtual addresses + * and at the same time count the number of modules. + */ +struct xen_multiboot_mod_list +{ + /* Address of first byte of the module */ + uint32_t mod_start; + /* Address of last byte of the module (inclusive) */ + uint32_t mod_end; + /* Address of zero-terminated command line */ + uint32_t cmdline; + /* Unused, must be zero */ + uint32_t pad; +}; +/* + * `incontents 200 startofday_dom0_console Dom0_console + * + * The console structure in start_info.console.dom0 + * + * This structure includes a variety of information required to + * have a working VGA/VESA console. + */ +typedef struct dom0_vga_console_info { + uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ +#define XEN_VGATYPE_TEXT_MODE_3 0x03 +#define XEN_VGATYPE_VESA_LFB 0x23 +#define XEN_VGATYPE_EFI_LFB 0x70 + + union { + struct { + /* Font height, in pixels. */ + uint16_t font_height; + /* Cursor location (column, row). */ + uint16_t cursor_x, cursor_y; + /* Number of rows and columns (dimensions in characters). */ + uint16_t rows, columns; + } text_mode_3; + + struct { + /* Width and height, in pixels. */ + uint16_t width, height; + /* Bytes per scan line. */ + uint16_t bytes_per_line; + /* Bits per pixel. */ + uint16_t bits_per_pixel; + /* LFB physical address, and size (in units of 64kB). */ + uint32_t lfb_base; + uint32_t lfb_size; + /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ + uint8_t red_pos, red_size; + uint8_t green_pos, green_size; + uint8_t blue_pos, blue_size; + uint8_t rsvd_pos, rsvd_size; +#if __XEN_INTERFACE_VERSION__ >= 0x00030206 + /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ + uint32_t gbl_caps; + /* Mode attributes (offset 0x0, VESA command 0x4f01). */ + uint16_t mode_attrs; + uint16_t pad; +#endif +#if __XEN_INTERFACE_VERSION__ >= 0x00040d00 + /* high 32 bits of lfb_base */ + uint32_t ext_lfb_base; +#endif + } vesa_lfb; + } u; +} dom0_vga_console_info_t; +#define xen_vga_console_info dom0_vga_console_info +#define xen_vga_console_info_t dom0_vga_console_info_t + +typedef uint8_t xen_domain_handle_t[16]; + +__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); +__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); +__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); +__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); + +typedef struct { + uint8_t a[16]; +} xen_uuid_t; + +/* + * XEN_DEFINE_UUID(0x00112233, 0x4455, 0x6677, 0x8899, + * 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff) + * will construct UUID 00112233-4455-6677-8899-aabbccddeeff presented as + * {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + * 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff}; + * + * NB: This is compatible with Linux kernel and with libuuid, but it is not + * compatible with Microsoft, as they use mixed-endian encoding (some + * components are little-endian, some are big-endian). + */ +#define XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6) \ + {{((a) >> 24) & 0xFF, ((a) >> 16) & 0xFF, \ + ((a) >> 8) & 0xFF, ((a) >> 0) & 0xFF, \ + ((b) >> 8) & 0xFF, ((b) >> 0) & 0xFF, \ + ((c) >> 8) & 0xFF, ((c) >> 0) & 0xFF, \ + ((d) >> 8) & 0xFF, ((d) >> 0) & 0xFF, \ + e1, e2, e3, e4, e5, e6}} + +#if defined(__STDC_VERSION__) ? __STDC_VERSION__ >= 199901L : defined(__GNUC__) +#define XEN_DEFINE_UUID(a, b, c, d, e1, e2, e3, e4, e5, e6) \ + ((xen_uuid_t)XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6)) +#else +#define XEN_DEFINE_UUID(a, b, c, d, e1, e2, e3, e4, e5, e6) \ + XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6) +#endif /* __STDC_VERSION__ / __GNUC__ */ + +#endif /* !__ASSEMBLY__ */ + +/* Default definitions for macros used by domctl/sysctl. */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +#ifndef int64_aligned_t +#define int64_aligned_t int64_t +#endif +#ifndef uint64_aligned_t +#define uint64_aligned_t uint64_t +#endif +#ifndef XEN_GUEST_HANDLE_64 +#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) +#endif + +#ifndef __ASSEMBLY__ +struct xenctl_bitmap { + XEN_GUEST_HANDLE_64(uint8) bitmap; + uint32_t nr_bits; +}; +typedef struct xenctl_bitmap xenctl_bitmap_t; +#endif + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +#endif /* __XEN_PUBLIC_XEN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/xen-bus-helper.h b/include/hw/xen/xen-bus-helper.h index 8782f30550..d8dcc2f010 100644 --- a/include/hw/xen/xen-bus-helper.h +++ b/include/hw/xen/xen-bus-helper.h @@ -8,40 +8,40 @@ #ifndef HW_XEN_BUS_HELPER_H #define HW_XEN_BUS_HELPER_H -#include "hw/xen/xen_common.h" +#include "hw/xen/xen_backend_ops.h" const char *xs_strstate(enum xenbus_state state); -void xs_node_create(struct xs_handle *xsh, xs_transaction_t tid, - const char *node, struct xs_permissions perms[], - unsigned int nr_perms, Error **errp); -void xs_node_destroy(struct xs_handle *xsh, xs_transaction_t tid, +void xs_node_create(struct qemu_xs_handle *h, xs_transaction_t tid, + const char *node, unsigned int owner, unsigned int domid, + unsigned int perms, Error **errp); +void xs_node_destroy(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, Error **errp); /* Write to node/key unless node is empty, in which case write to key */ -void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid, +void xs_node_vprintf(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, va_list ap) G_GNUC_PRINTF(6, 0); -void xs_node_printf(struct xs_handle *xsh, xs_transaction_t tid, +void xs_node_printf(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, ...) G_GNUC_PRINTF(6, 7); /* Read from node/key unless node is empty, in which case read from key */ -int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid, +int xs_node_vscanf(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, va_list ap) G_GNUC_SCANF(6, 0); -int xs_node_scanf(struct xs_handle *xsh, xs_transaction_t tid, +int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, ...) G_GNUC_SCANF(6, 7); /* Watch node/key unless node is empty, in which case watch key */ -void xs_node_watch(struct xs_handle *xsh, const char *node, const char *key, - char *token, Error **errp); -void xs_node_unwatch(struct xs_handle *xsh, const char *node, const char *key, - const char *token, Error **errp); +struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node, + const char *key, xs_watch_fn fn, + void *opaque, Error **errp); +void xs_node_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w); #endif /* HW_XEN_BUS_HELPER_H */ diff --git a/include/hw/xen/xen-bus.h b/include/hw/xen/xen-bus.h index 4d966a2dbb..f435898164 100644 --- a/include/hw/xen/xen-bus.h +++ b/include/hw/xen/xen-bus.h @@ -8,31 +8,25 @@ #ifndef HW_XEN_BUS_H #define HW_XEN_BUS_H -#include "hw/xen/xen_common.h" +#include "hw/xen/xen_backend_ops.h" #include "hw/sysbus.h" #include "qemu/notify.h" #include "qom/object.h" -typedef void (*XenWatchHandler)(void *opaque); - -typedef struct XenWatchList XenWatchList; -typedef struct XenWatch XenWatch; typedef struct XenEventChannel XenEventChannel; struct XenDevice { DeviceState qdev; domid_t frontend_id; char *name; - struct xs_handle *xsh; - XenWatchList *watch_list; + struct qemu_xs_handle *xsh; char *backend_path, *frontend_path; enum xenbus_state backend_state, frontend_state; Notifier exit; - XenWatch *backend_state_watch, *frontend_state_watch; + struct qemu_xs_watch *backend_state_watch, *frontend_state_watch; bool backend_online; - XenWatch *backend_online_watch; + struct qemu_xs_watch *backend_online_watch; xengnttab_handle *xgth; - bool feature_grant_copy; bool inactive; QLIST_HEAD(, XenEventChannel) event_channels; QLIST_ENTRY(XenDevice) list; @@ -64,10 +58,9 @@ OBJECT_DECLARE_TYPE(XenDevice, XenDeviceClass, XEN_DEVICE) struct XenBus { BusState qbus; domid_t backend_id; - struct xs_handle *xsh; - XenWatchList *watch_list; + struct qemu_xs_handle *xsh; unsigned int backend_types; - XenWatch **backend_watch; + struct qemu_xs_watch **backend_watch; QLIST_HEAD(, XenDevice) inactive_devices; }; @@ -102,7 +95,7 @@ void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs, void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs, unsigned int nr_refs, int prot, Error **errp); -void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, +void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, uint32_t *refs, unsigned int nr_refs, Error **errp); typedef struct XenDeviceGrantCopySegment { diff --git a/include/hw/xen/xen-legacy-backend.h b/include/hw/xen/xen-legacy-backend.h index be281e1f38..6c307c5f2c 100644 --- a/include/hw/xen/xen-legacy-backend.h +++ b/include/hw/xen/xen-legacy-backend.h @@ -1,7 +1,7 @@ #ifndef HW_XEN_LEGACY_BACKEND_H #define HW_XEN_LEGACY_BACKEND_H -#include "hw/xen/xen_common.h" +#include "hw/xen/xen_backend_ops.h" #include "hw/xen/xen_pvdev.h" #include "net/net.h" #include "qom/object.h" @@ -15,7 +15,7 @@ DECLARE_INSTANCE_CHECKER(XenLegacyDevice, XENBACKEND, TYPE_XENBACKEND) /* variables */ -extern struct xs_handle *xenstore; +extern struct qemu_xs_handle *xenstore; extern const char *xen_protocol; extern DeviceState *xen_sysdev; extern BusState *xen_sysbus; @@ -30,9 +30,6 @@ int xenstore_write_be_int64(struct XenLegacyDevice *xendev, const char *node, char *xenstore_read_be_str(struct XenLegacyDevice *xendev, const char *node); int xenstore_read_be_int(struct XenLegacyDevice *xendev, const char *node, int *ival); -void xenstore_update_fe(char *watch, struct XenLegacyDevice *xendev); -void xenstore_update_be(char *watch, char *type, int dom, - struct XenDevOps *ops); char *xenstore_read_fe_str(struct XenLegacyDevice *xendev, const char *node); int xenstore_read_fe_int(struct XenLegacyDevice *xendev, const char *node, int *ival); @@ -42,8 +39,7 @@ int xenstore_read_fe_uint64(struct XenLegacyDevice *xendev, const char *node, void xen_be_check_state(struct XenLegacyDevice *xendev); /* xen backend driver bits */ -int xen_be_init(void); -void xen_be_register_common(void); +void xen_be_init(void); int xen_be_register(const char *type, struct XenDevOps *ops); int xen_be_set_state(struct XenLegacyDevice *xendev, enum xenbus_state state); int xen_be_bind_evtchn(struct XenLegacyDevice *xendev); @@ -52,18 +48,7 @@ void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev, void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs, unsigned int nr_refs, int prot); void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr, - unsigned int nr_refs); - -typedef struct XenGrantCopySegment { - union { - void *virt; - struct { - uint32_t ref; - off_t offset; - } foreign; - } source, dest; - size_t len; -} XenGrantCopySegment; + uint32_t *refs, unsigned int nr_refs); int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev, bool to_domain, XenGrantCopySegment segs[], @@ -76,9 +61,9 @@ static inline void *xen_be_map_grant_ref(struct XenLegacyDevice *xendev, } static inline void xen_be_unmap_grant_ref(struct XenLegacyDevice *xendev, - void *ptr) + void *ptr, uint32_t ref) { - return xen_be_unmap_grant_refs(xendev, ptr, 1); + return xen_be_unmap_grant_refs(xendev, ptr, &ref, 1); } /* actual backend drivers */ diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h index afdf9c436a..2bd8ec742d 100644 --- a/include/hw/xen/xen.h +++ b/include/hw/xen/xen.h @@ -1,19 +1,36 @@ -#ifndef QEMU_HW_XEN_H -#define QEMU_HW_XEN_H - /* * public xen header * stuff needed outside xen-*.c, i.e. interfaces to qemu. * must not depend on any xen headers being present in * /usr/include/xen, so it can be included unconditionally. */ +#ifndef QEMU_HW_XEN_H +#define QEMU_HW_XEN_H + +/* + * C files using Xen toolstack libraries will have included those headers + * already via xen_native.h, and having __XEM_TOOLS__ defined will have + * automatically set __XEN_INTERFACE_VERSION__ to the latest supported + * by the *system* Xen headers which were transitively included. + * + * C files which are part of the internal emulation, and which did not + * include xen_native.h, may need this defined so that the Xen headers + * imported to include/hw/xen/interface/ will expose the appropriate API + * version. + * + * This is why there's a rule that xen_native.h must be included first. + */ +#ifndef __XEN_INTERFACE_VERSION__ +#define __XEN_INTERFACE_VERSION__ 0x00040e00 +#endif #include "exec/cpu-common.h" /* xen-machine.c */ enum xen_mode { - XEN_EMULATE = 0, // xen emulation, using xenner (default) - XEN_ATTACH // attach to xen domain created by libxl + XEN_DISABLED = 0, /* xen support disabled (default) */ + XEN_ATTACH, /* attach to xen domain created by libxl */ + XEN_EMULATE, /* emulate Xen within QEMU */ }; extern uint32_t xen_domid; @@ -28,8 +45,6 @@ int xen_is_pirq_msi(uint32_t msi_data); qemu_irq *xen_interrupt_controller_init(void); -void xenstore_store_pv_console_info(int i, Chardev *chr); - void xen_register_framebuffer(struct MemoryRegion *mr); #endif /* QEMU_HW_XEN_H */ diff --git a/include/hw/xen/xen_backend_ops.h b/include/hw/xen/xen_backend_ops.h new file mode 100644 index 0000000000..90cca85f52 --- /dev/null +++ b/include/hw/xen/xen_backend_ops.h @@ -0,0 +1,408 @@ +/* + * QEMU Xen backend support + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_XEN_BACKEND_OPS_H +#define QEMU_XEN_BACKEND_OPS_H + +#include "hw/xen/xen.h" +#include "hw/xen/interface/xen.h" +#include "hw/xen/interface/io/xenbus.h" + +/* + * For the time being, these operations map fairly closely to the API of + * the actual Xen libraries, e.g. libxenevtchn. As we complete the migration + * from XenLegacyDevice back ends to the new XenDevice model, they may + * evolve to slightly higher-level APIs. + * + * The internal emulations do not emulate the Xen APIs entirely faithfully; + * only enough to be used by the Xen backend devices. For example, only one + * event channel can be bound to each handle, since that's sufficient for + * the device support (only the true Xen HVM backend uses more). And the + * behaviour of unmask() and pending() is different too because the device + * backends don't care. + */ + +typedef struct xenevtchn_handle xenevtchn_handle; +typedef int xenevtchn_port_or_error_t; +typedef uint32_t evtchn_port_t; +typedef uint16_t domid_t; +typedef uint32_t grant_ref_t; + +#define XEN_PAGE_SHIFT 12 +#define XEN_PAGE_SIZE (1UL << XEN_PAGE_SHIFT) +#define XEN_PAGE_MASK (~(XEN_PAGE_SIZE - 1)) + +#ifndef xen_rmb +#define xen_rmb() smp_rmb() +#endif +#ifndef xen_wmb +#define xen_wmb() smp_wmb() +#endif +#ifndef xen_mb +#define xen_mb() smp_mb() +#endif + +struct evtchn_backend_ops { + xenevtchn_handle *(*open)(void); + int (*bind_interdomain)(xenevtchn_handle *xc, uint32_t domid, + evtchn_port_t guest_port); + int (*unbind)(xenevtchn_handle *xc, evtchn_port_t port); + int (*close)(struct xenevtchn_handle *xc); + int (*get_fd)(struct xenevtchn_handle *xc); + int (*notify)(struct xenevtchn_handle *xc, evtchn_port_t port); + int (*unmask)(struct xenevtchn_handle *xc, evtchn_port_t port); + int (*pending)(struct xenevtchn_handle *xc); +}; + +extern struct evtchn_backend_ops *xen_evtchn_ops; + +static inline xenevtchn_handle *qemu_xen_evtchn_open(void) +{ + if (!xen_evtchn_ops) { + return NULL; + } + return xen_evtchn_ops->open(); +} + +static inline int qemu_xen_evtchn_bind_interdomain(xenevtchn_handle *xc, + uint32_t domid, + evtchn_port_t guest_port) +{ + if (!xen_evtchn_ops) { + return -ENOSYS; + } + return xen_evtchn_ops->bind_interdomain(xc, domid, guest_port); +} + +static inline int qemu_xen_evtchn_unbind(xenevtchn_handle *xc, + evtchn_port_t port) +{ + if (!xen_evtchn_ops) { + return -ENOSYS; + } + return xen_evtchn_ops->unbind(xc, port); +} + +static inline int qemu_xen_evtchn_close(xenevtchn_handle *xc) +{ + if (!xen_evtchn_ops) { + return -ENOSYS; + } + return xen_evtchn_ops->close(xc); +} + +static inline int qemu_xen_evtchn_fd(xenevtchn_handle *xc) +{ + if (!xen_evtchn_ops) { + return -ENOSYS; + } + return xen_evtchn_ops->get_fd(xc); +} + +static inline int qemu_xen_evtchn_notify(xenevtchn_handle *xc, + evtchn_port_t port) +{ + if (!xen_evtchn_ops) { + return -ENOSYS; + } + return xen_evtchn_ops->notify(xc, port); +} + +static inline int qemu_xen_evtchn_unmask(xenevtchn_handle *xc, + evtchn_port_t port) +{ + if (!xen_evtchn_ops) { + return -ENOSYS; + } + return xen_evtchn_ops->unmask(xc, port); +} + +static inline int qemu_xen_evtchn_pending(xenevtchn_handle *xc) +{ + if (!xen_evtchn_ops) { + return -ENOSYS; + } + return xen_evtchn_ops->pending(xc); +} + +typedef struct xengntdev_handle xengnttab_handle; + +typedef struct XenGrantCopySegment { + union { + void *virt; + struct { + uint32_t ref; + off_t offset; + } foreign; + } source, dest; + size_t len; +} XenGrantCopySegment; + +#define XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE (1U << 0) + +struct gnttab_backend_ops { + uint32_t features; + xengnttab_handle *(*open)(void); + int (*close)(xengnttab_handle *xgt); + int (*grant_copy)(xengnttab_handle *xgt, bool to_domain, uint32_t domid, + XenGrantCopySegment *segs, uint32_t nr_segs, + Error **errp); + int (*set_max_grants)(xengnttab_handle *xgt, uint32_t nr_grants); + void *(*map_refs)(xengnttab_handle *xgt, uint32_t count, uint32_t domid, + uint32_t *refs, int prot); + int (*unmap)(xengnttab_handle *xgt, void *start_address, uint32_t *refs, + uint32_t count); +}; + +extern struct gnttab_backend_ops *xen_gnttab_ops; + +static inline bool qemu_xen_gnttab_can_map_multi(void) +{ + return xen_gnttab_ops && + !!(xen_gnttab_ops->features & XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE); +} + +static inline xengnttab_handle *qemu_xen_gnttab_open(void) +{ + if (!xen_gnttab_ops) { + return NULL; + } + return xen_gnttab_ops->open(); +} + +static inline int qemu_xen_gnttab_close(xengnttab_handle *xgt) +{ + if (!xen_gnttab_ops) { + return -ENOSYS; + } + return xen_gnttab_ops->close(xgt); +} + +static inline int qemu_xen_gnttab_grant_copy(xengnttab_handle *xgt, + bool to_domain, uint32_t domid, + XenGrantCopySegment *segs, + uint32_t nr_segs, Error **errp) +{ + if (!xen_gnttab_ops) { + return -ENOSYS; + } + + return xen_gnttab_ops->grant_copy(xgt, to_domain, domid, segs, nr_segs, + errp); +} + +static inline int qemu_xen_gnttab_set_max_grants(xengnttab_handle *xgt, + uint32_t nr_grants) +{ + if (!xen_gnttab_ops) { + return -ENOSYS; + } + return xen_gnttab_ops->set_max_grants(xgt, nr_grants); +} + +static inline void *qemu_xen_gnttab_map_refs(xengnttab_handle *xgt, + uint32_t count, uint32_t domid, + uint32_t *refs, int prot) +{ + if (!xen_gnttab_ops) { + return NULL; + } + return xen_gnttab_ops->map_refs(xgt, count, domid, refs, prot); +} + +static inline int qemu_xen_gnttab_unmap(xengnttab_handle *xgt, + void *start_address, uint32_t *refs, + uint32_t count) +{ + if (!xen_gnttab_ops) { + return -ENOSYS; + } + return xen_gnttab_ops->unmap(xgt, start_address, refs, count); +} + +struct foreignmem_backend_ops { + void *(*map)(uint32_t dom, void *addr, int prot, size_t pages, + xen_pfn_t *pfns, int *errs); + int (*unmap)(void *addr, size_t pages); +}; + +extern struct foreignmem_backend_ops *xen_foreignmem_ops; + +static inline void *qemu_xen_foreignmem_map(uint32_t dom, void *addr, int prot, + size_t pages, xen_pfn_t *pfns, + int *errs) +{ + if (!xen_foreignmem_ops) { + return NULL; + } + return xen_foreignmem_ops->map(dom, addr, prot, pages, pfns, errs); +} + +static inline int qemu_xen_foreignmem_unmap(void *addr, size_t pages) +{ + if (!xen_foreignmem_ops) { + return -ENOSYS; + } + return xen_foreignmem_ops->unmap(addr, pages); +} + +typedef void (*xs_watch_fn)(void *opaque, const char *path); + +struct qemu_xs_handle; +struct qemu_xs_watch; +typedef uint32_t xs_transaction_t; + +#define XBT_NULL 0 + +#define XS_PERM_NONE 0x00 +#define XS_PERM_READ 0x01 +#define XS_PERM_WRITE 0x02 + +struct xenstore_backend_ops { + struct qemu_xs_handle *(*open)(void); + void (*close)(struct qemu_xs_handle *h); + char *(*get_domain_path)(struct qemu_xs_handle *h, unsigned int domid); + char **(*directory)(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path, unsigned int *num); + void *(*read)(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path, unsigned int *len); + bool (*write)(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path, const void *data, unsigned int len); + bool (*create)(struct qemu_xs_handle *h, xs_transaction_t t, + unsigned int owner, unsigned int domid, + unsigned int perms, const char *path); + bool (*destroy)(struct qemu_xs_handle *h, xs_transaction_t t, + const char *path); + struct qemu_xs_watch *(*watch)(struct qemu_xs_handle *h, const char *path, + xs_watch_fn fn, void *opaque); + void (*unwatch)(struct qemu_xs_handle *h, struct qemu_xs_watch *w); + xs_transaction_t (*transaction_start)(struct qemu_xs_handle *h); + bool (*transaction_end)(struct qemu_xs_handle *h, xs_transaction_t t, + bool abort); +}; + +extern struct xenstore_backend_ops *xen_xenstore_ops; + +static inline struct qemu_xs_handle *qemu_xen_xs_open(void) +{ + if (!xen_xenstore_ops) { + return NULL; + } + return xen_xenstore_ops->open(); +} + +static inline void qemu_xen_xs_close(struct qemu_xs_handle *h) +{ + if (!xen_xenstore_ops) { + return; + } + xen_xenstore_ops->close(h); +} + +static inline char *qemu_xen_xs_get_domain_path(struct qemu_xs_handle *h, + unsigned int domid) +{ + if (!xen_xenstore_ops) { + return NULL; + } + return xen_xenstore_ops->get_domain_path(h, domid); +} + +static inline char **qemu_xen_xs_directory(struct qemu_xs_handle *h, + xs_transaction_t t, const char *path, + unsigned int *num) +{ + if (!xen_xenstore_ops) { + return NULL; + } + return xen_xenstore_ops->directory(h, t, path, num); +} + +static inline void *qemu_xen_xs_read(struct qemu_xs_handle *h, + xs_transaction_t t, const char *path, + unsigned int *len) +{ + if (!xen_xenstore_ops) { + return NULL; + } + return xen_xenstore_ops->read(h, t, path, len); +} + +static inline bool qemu_xen_xs_write(struct qemu_xs_handle *h, + xs_transaction_t t, const char *path, + const void *data, unsigned int len) +{ + if (!xen_xenstore_ops) { + return false; + } + return xen_xenstore_ops->write(h, t, path, data, len); +} + +static inline bool qemu_xen_xs_create(struct qemu_xs_handle *h, + xs_transaction_t t, unsigned int owner, + unsigned int domid, unsigned int perms, + const char *path) +{ + if (!xen_xenstore_ops) { + return false; + } + return xen_xenstore_ops->create(h, t, owner, domid, perms, path); +} + +static inline bool qemu_xen_xs_destroy(struct qemu_xs_handle *h, + xs_transaction_t t, const char *path) +{ + if (!xen_xenstore_ops) { + return false; + } + return xen_xenstore_ops->destroy(h, t, path); +} + +static inline struct qemu_xs_watch *qemu_xen_xs_watch(struct qemu_xs_handle *h, + const char *path, + xs_watch_fn fn, + void *opaque) +{ + if (!xen_xenstore_ops) { + return NULL; + } + return xen_xenstore_ops->watch(h, path, fn, opaque); +} + +static inline void qemu_xen_xs_unwatch(struct qemu_xs_handle *h, + struct qemu_xs_watch *w) +{ + if (!xen_xenstore_ops) { + return; + } + xen_xenstore_ops->unwatch(h, w); +} + +static inline xs_transaction_t qemu_xen_xs_transaction_start(struct qemu_xs_handle *h) +{ + if (!xen_xenstore_ops) { + return XBT_NULL; + } + return xen_xenstore_ops->transaction_start(h); +} + +static inline bool qemu_xen_xs_transaction_end(struct qemu_xs_handle *h, + xs_transaction_t t, bool abort) +{ + if (!xen_xenstore_ops) { + return false; + } + return xen_xenstore_ops->transaction_end(h, t, abort); +} + +void setup_xen_backend_ops(void); + +#endif /* QEMU_XEN_BACKEND_OPS_H */ diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_native.h similarity index 89% rename from include/hw/xen/xen_common.h rename to include/hw/xen/xen_native.h index 9a13a756ae..6bcc83baf9 100644 --- a/include/hw/xen/xen_common.h +++ b/include/hw/xen/xen_native.h @@ -1,5 +1,9 @@ -#ifndef QEMU_HW_XEN_COMMON_H -#define QEMU_HW_XEN_COMMON_H +#ifndef QEMU_HW_XEN_NATIVE_H +#define QEMU_HW_XEN_NATIVE_H + +#ifdef __XEN_INTERFACE_VERSION__ +#error In Xen native files, include xen_native.h before other Xen headers +#endif /* * If we have new enough libxenctrl then we do not want/need these compat @@ -12,7 +16,6 @@ #include #include -#include "hw/xen/interface/io/xenbus.h" #include "hw/xen/xen.h" #include "hw/pci/pci_device.h" @@ -28,49 +31,12 @@ extern xc_interface *xen_xc; #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 typedef xc_interface xenforeignmemory_handle; -typedef xc_evtchn xenevtchn_handle; -typedef xc_gnttab xengnttab_handle; -typedef evtchn_port_or_error_t xenevtchn_port_or_error_t; - -#define xenevtchn_open(l, f) xc_evtchn_open(l, f); -#define xenevtchn_close(h) xc_evtchn_close(h) -#define xenevtchn_fd(h) xc_evtchn_fd(h) -#define xenevtchn_pending(h) xc_evtchn_pending(h) -#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p) -#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p) -#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p) -#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p) - -#define xengnttab_open(l, f) xc_gnttab_open(l, f) -#define xengnttab_close(h) xc_gnttab_close(h) -#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n) -#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p) -#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n) -#define xengnttab_map_grant_refs(h, c, d, r, p) \ - xc_gnttab_map_grant_refs(h, c, d, r, p) -#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \ - xc_gnttab_map_domain_grant_refs(h, c, d, r, p) #define xenforeignmemory_open(l, f) xen_xc #define xenforeignmemory_close(h) -static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, - int prot, size_t pages, - const xen_pfn_t arr[/*pages*/], - int err[/*pages*/]) -{ - if (err) - return xc_map_foreign_bulk(h, dom, prot, arr, err, pages); - else - return xc_map_foreign_pages(h, dom, prot, arr, pages); -} - -#define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE) - #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ -#include -#include #include #endif @@ -660,31 +626,4 @@ static inline int xen_set_ioreq_server_state(domid_t dom, #endif -/* Xen before 4.8 */ - -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800 - -struct xengnttab_grant_copy_segment { - union xengnttab_copy_ptr { - void *virt; - struct { - uint32_t ref; - uint16_t offset; - uint16_t domid; - } foreign; - } source, dest; - uint16_t len; - uint16_t flags; - int16_t status; -}; - -typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t; - -static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count, - xengnttab_grant_copy_segment_t *segs) -{ - return -ENOSYS; -} -#endif - -#endif /* QEMU_HW_XEN_COMMON_H */ +#endif /* QEMU_HW_XEN_NATIVE_H */ diff --git a/include/hw/xen/xen_pvdev.h b/include/hw/xen/xen_pvdev.h index 7cd4bc2b82..ddad4b9f36 100644 --- a/include/hw/xen/xen_pvdev.h +++ b/include/hw/xen/xen_pvdev.h @@ -1,7 +1,9 @@ #ifndef QEMU_HW_XEN_PVDEV_H #define QEMU_HW_XEN_PVDEV_H -#include "hw/xen/xen_common.h" +#include "hw/qdev-core.h" +#include "hw/xen/xen_backend_ops.h" + /* ------------------------------------------------------------- */ #define XEN_BUFSIZE 1024 @@ -38,6 +40,7 @@ struct XenLegacyDevice { char name[64]; int debug; + struct qemu_xs_watch *watch; enum xenbus_state be_state; enum xenbus_state fe_state; int online; @@ -63,7 +66,6 @@ int xenstore_write_int64(const char *base, const char *node, int64_t ival); char *xenstore_read_str(const char *base, const char *node); int xenstore_read_int(const char *base, const char *node, int *ival); int xenstore_read_uint64(const char *base, const char *node, uint64_t *uval); -void xenstore_update(void *unused); const char *xenbus_strstate(enum xenbus_state state); diff --git a/include/io/channel.h b/include/io/channel.h index 153fbd2904..229bf36910 100644 --- a/include/io/channel.h +++ b/include/io/channel.h @@ -301,10 +301,10 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc, * Returns: 1 if all bytes were read, 0 if end-of-file * occurs without data, or -1 on error */ -int qio_channel_readv_all_eof(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - Error **errp); +int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + Error **errp); /** * qio_channel_readv_all: @@ -328,10 +328,10 @@ int qio_channel_readv_all_eof(QIOChannel *ioc, * * Returns: 0 if all bytes were read, or -1 on error */ -int qio_channel_readv_all(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - Error **errp); +int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + Error **errp); /** @@ -353,10 +353,10 @@ int qio_channel_readv_all(QIOChannel *ioc, * * Returns: 0 if all bytes were written, or -1 on error */ -int qio_channel_writev_all(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - Error **errp); +int coroutine_mixed_fn qio_channel_writev_all(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + Error **errp); /** * qio_channel_readv: @@ -437,10 +437,10 @@ ssize_t qio_channel_write(QIOChannel *ioc, * Returns: 1 if all bytes were read, 0 if end-of-file occurs * without data, or -1 on error */ -int qio_channel_read_all_eof(QIOChannel *ioc, - char *buf, - size_t buflen, - Error **errp); +int coroutine_mixed_fn qio_channel_read_all_eof(QIOChannel *ioc, + char *buf, + size_t buflen, + Error **errp); /** * qio_channel_read_all: @@ -457,10 +457,10 @@ int qio_channel_read_all_eof(QIOChannel *ioc, * * Returns: 0 if all bytes were read, or -1 on error */ -int qio_channel_read_all(QIOChannel *ioc, - char *buf, - size_t buflen, - Error **errp); +int coroutine_mixed_fn qio_channel_read_all(QIOChannel *ioc, + char *buf, + size_t buflen, + Error **errp); /** * qio_channel_write_all: @@ -476,10 +476,10 @@ int qio_channel_read_all(QIOChannel *ioc, * * Returns: 0 if all bytes were written, or -1 on error */ -int qio_channel_write_all(QIOChannel *ioc, - const char *buf, - size_t buflen, - Error **errp); +int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc, + const char *buf, + size_t buflen, + Error **errp); /** * qio_channel_set_blocking: @@ -757,6 +757,16 @@ void qio_channel_detach_aio_context(QIOChannel *ioc); void coroutine_fn qio_channel_yield(QIOChannel *ioc, GIOCondition condition); +/** + * qio_channel_wake_read: + * @ioc: the channel object + * + * If qio_channel_yield() is currently waiting for the channel to become + * readable, interrupt it and reenter immediately. This function is safe to call + * from any thread. + */ +void qio_channel_wake_read(QIOChannel *ioc); + /** * qio_channel_wait: * @ioc: the channel object @@ -812,11 +822,11 @@ void qio_channel_set_aio_fd_handler(QIOChannel *ioc, * occurs without data, or -1 on error */ -int qio_channel_readv_full_all_eof(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - int **fds, size_t *nfds, - Error **errp); +int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + int **fds, size_t *nfds, + Error **errp); /** * qio_channel_readv_full_all: @@ -838,11 +848,11 @@ int qio_channel_readv_full_all_eof(QIOChannel *ioc, * Returns: 0 if all bytes were read, or -1 on error */ -int qio_channel_readv_full_all(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - int **fds, size_t *nfds, - Error **errp); +int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + int **fds, size_t *nfds, + Error **errp); /** * qio_channel_writev_full_all: @@ -872,11 +882,11 @@ int qio_channel_readv_full_all(QIOChannel *ioc, * Returns: 0 if all bytes were written, or -1 on error */ -int qio_channel_writev_full_all(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - int *fds, size_t nfds, - int flags, Error **errp); +int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + int *fds, size_t nfds, + int flags, Error **errp); /** * qio_channel_flush: diff --git a/include/migration/colo.h b/include/migration/colo.h index 5fbe1a6d5d..eaac07f26d 100644 --- a/include/migration/colo.h +++ b/include/migration/colo.h @@ -28,7 +28,6 @@ bool migration_in_colo_state(void); int migration_incoming_enable_colo(void); void migration_incoming_disable_colo(void); bool migration_incoming_colo_enabled(void); -void *colo_process_incoming_thread(void *opaque); bool migration_incoming_in_colo_state(void); COLOMode get_colo_mode(void); @@ -36,6 +35,21 @@ COLOMode get_colo_mode(void); /* failover */ void colo_do_failover(void); -void colo_checkpoint_notify(void *opaque); +/* + * colo_checkpoint_delay_set + * + * Handles change of x-checkpoint-delay migration parameter, called from + * migrate_params_apply() to notify COLO module about the change. + */ +void colo_checkpoint_delay_set(void); + +/* + * Starts COLO incoming process. Called from process_incoming_migration_co() + * after loading the state. + * + * Called with BQL locked, may temporary release BQL. + */ +int coroutine_fn colo_incoming_co(void); + void colo_shutdown(void); #endif diff --git a/include/migration/misc.h b/include/migration/misc.h index 8b49841016..5ebe13b4b9 100644 --- a/include/migration/misc.h +++ b/include/migration/misc.h @@ -66,7 +66,6 @@ bool migration_has_finished(MigrationState *); bool migration_has_failed(MigrationState *); /* ...and after the device transmission */ bool migration_in_postcopy_after_devices(MigrationState *); -void migration_global_dump(Monitor *mon); /* True if incoming migration entered POSTCOPY_INCOMING_DISCARD */ bool migration_in_incoming_postcopy(void); /* True if incoming migration entered POSTCOPY_INCOMING_ADVISE */ diff --git a/include/migration/qemu-file-types.h b/include/migration/qemu-file-types.h index 2867e3da84..9ba163f333 100644 --- a/include/migration/qemu-file-types.h +++ b/include/migration/qemu-file-types.h @@ -35,7 +35,7 @@ void qemu_put_byte(QEMUFile *f, int v); void qemu_put_be16(QEMUFile *f, unsigned int v); void qemu_put_be32(QEMUFile *f, unsigned int v); void qemu_put_be64(QEMUFile *f, uint64_t v); -size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size); +size_t coroutine_mixed_fn qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size); int qemu_get_byte(QEMUFile *f); @@ -161,10 +161,20 @@ static inline void qemu_get_sbe64s(QEMUFile *f, int64_t *pv) qemu_get_be64s(f, (uint64_t *)pv); } -size_t qemu_get_counted_string(QEMUFile *f, char buf[256]); +size_t coroutine_mixed_fn qemu_get_counted_string(QEMUFile *f, char buf[256]); void qemu_put_counted_string(QEMUFile *f, const char *name); -int qemu_file_rate_limit(QEMUFile *f); +/** + * migration_rate_exceeded: Check if we have exceeded rate for this interval + * + * Checks if we have already transferred more data that we are allowed + * in the current interval. + * + * @f: QEMUFile used for main migration channel + * + * Returns if we should stop sending data for this interval. + */ +bool migration_rate_exceeded(QEMUFile *f); #endif diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h index 2220f14fc9..13f9a2dedb 100644 --- a/include/monitor/hmp.h +++ b/include/monitor/hmp.h @@ -114,6 +114,8 @@ void hmp_virtio_status(Monitor *mon, const QDict *qdict); void hmp_virtio_queue_status(Monitor *mon, const QDict *qdict); void hmp_vhost_queue_status(Monitor *mon, const QDict *qdict); void hmp_virtio_queue_element(Monitor *mon, const QDict *qdict); +void hmp_xen_event_inject(Monitor *mon, const QDict *qdict); +void hmp_xen_event_list(Monitor *mon, const QDict *qdict); void object_add_completion(ReadLineState *rs, int nb_args, const char *str); void object_del_completion(ReadLineState *rs, int nb_args, const char *str); void device_add_completion(ReadLineState *rs, int nb_args, const char *str); @@ -156,7 +158,7 @@ void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict); void hmp_human_readable_text_helper(Monitor *mon, HumanReadableText *(*qmp_handler)(Error **)); void hmp_info_stats(Monitor *mon, const QDict *qdict); -void hmp_singlestep(Monitor *mon, const QDict *qdict); +void hmp_one_insn_per_tb(Monitor *mon, const QDict *qdict); void hmp_watchdog_action(Monitor *mon, const QDict *qdict); void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict); void hmp_info_capture(Monitor *mon, const QDict *qdict); @@ -178,5 +180,6 @@ void hmp_ioport_read(Monitor *mon, const QDict *qdict); void hmp_ioport_write(Monitor *mon, const QDict *qdict); void hmp_boot_set(Monitor *mon, const QDict *qdict); void hmp_info_mtree(Monitor *mon, const QDict *qdict); +void hmp_info_cryptodev(Monitor *mon, const QDict *qdict); #endif diff --git a/include/monitor/monitor.h b/include/monitor/monitor.h index 033390f699..965f5d5450 100644 --- a/include/monitor/monitor.h +++ b/include/monitor/monitor.h @@ -40,6 +40,9 @@ void monitor_flush(Monitor *mon); int monitor_set_cpu(Monitor *mon, int cpu_index); int monitor_get_cpu_index(Monitor *mon); +int monitor_puts_locked(Monitor *mon, const char *str); +void monitor_flush_locked(Monitor *mon); + void *gpa2hva(MemoryRegion **p_mr, hwaddr addr, uint64_t size, Error **errp); void monitor_read_command(MonitorHMP *mon, int show_prompt); diff --git a/include/net/eth.h b/include/net/eth.h index 6e699b0d7a..3b80b6e07f 100644 --- a/include/net/eth.h +++ b/include/net/eth.h @@ -32,6 +32,8 @@ #define ETH_ALEN 6 #define ETH_HLEN 14 #define ETH_ZLEN 60 /* Min. octets in frame without FCS */ +#define ETH_FCS_LEN 4 +#define ETH_MTU 1500 struct eth_header { uint8_t h_dest[ETH_ALEN]; /* destination eth addr */ @@ -222,6 +224,7 @@ struct tcp_hdr { #define IP_HEADER_VERSION_6 (6) #define IP_PROTO_TCP (6) #define IP_PROTO_UDP (17) +#define IP_PROTO_SCTP (132) #define IPTOS_ECN_MASK 0x03 #define IPTOS_ECN(x) ((x) & IPTOS_ECN_MASK) #define IPTOS_ECN_CE 0x03 @@ -312,10 +315,10 @@ eth_get_l2_hdr_length(const void *p) } static inline uint32_t -eth_get_l2_hdr_length_iov(const struct iovec *iov, int iovcnt) +eth_get_l2_hdr_length_iov(const struct iovec *iov, size_t iovcnt, size_t iovoff) { uint8_t p[sizeof(struct eth_header) + sizeof(struct vlan_header)]; - size_t copied = iov_to_buf(iov, iovcnt, 0, p, ARRAY_SIZE(p)); + size_t copied = iov_to_buf(iov, iovcnt, iovoff, p, ARRAY_SIZE(p)); if (copied < ARRAY_SIZE(p)) { return copied; @@ -340,26 +343,19 @@ eth_get_pkt_tci(const void *p) size_t eth_strip_vlan(const struct iovec *iov, int iovcnt, size_t iovoff, - uint8_t *new_ehdr_buf, + void *new_ehdr_buf, uint16_t *payload_offset, uint16_t *tci); size_t -eth_strip_vlan_ex(const struct iovec *iov, int iovcnt, size_t iovoff, - uint16_t vet, uint8_t *new_ehdr_buf, +eth_strip_vlan_ex(const struct iovec *iov, int iovcnt, size_t iovoff, int index, + uint16_t vet, uint16_t vet_ext, void *new_ehdr_buf, uint16_t *payload_offset, uint16_t *tci); uint16_t eth_get_l3_proto(const struct iovec *l2hdr_iov, int iovcnt, size_t l2hdr_len); -void eth_setup_vlan_headers_ex(struct eth_header *ehdr, uint16_t vlan_tag, - uint16_t vlan_ethtype, bool *is_new); - -static inline void -eth_setup_vlan_headers(struct eth_header *ehdr, uint16_t vlan_tag, - bool *is_new) -{ - eth_setup_vlan_headers_ex(ehdr, vlan_tag, ETH_P_VLAN, is_new); -} +void eth_setup_vlan_headers(struct eth_header *ehdr, size_t *ehdr_size, + uint16_t vlan_tag, uint16_t vlan_ethtype); uint8_t eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto); @@ -381,18 +377,25 @@ typedef struct eth_ip4_hdr_info_st { bool fragment; } eth_ip4_hdr_info; +typedef enum EthL4HdrProto { + ETH_L4_HDR_PROTO_INVALID, + ETH_L4_HDR_PROTO_TCP, + ETH_L4_HDR_PROTO_UDP, + ETH_L4_HDR_PROTO_SCTP +} EthL4HdrProto; + typedef struct eth_l4_hdr_info_st { union { struct tcp_header tcp; struct udp_header udp; } hdr; + EthL4HdrProto proto; bool has_tcp_data; } eth_l4_hdr_info; -void eth_get_protocols(const struct iovec *iov, int iovcnt, - bool *isip4, bool *isip6, - bool *isudp, bool *istcp, +void eth_get_protocols(const struct iovec *iov, size_t iovcnt, size_t iovoff, + bool *hasip4, bool *hasip6, size_t *l3hdr_off, size_t *l4hdr_off, size_t *l5hdr_off, @@ -400,11 +403,6 @@ void eth_get_protocols(const struct iovec *iov, int iovcnt, eth_ip4_hdr_info *ip4hdr_info, eth_l4_hdr_info *l4hdr_info); -void eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len, - void *l3hdr, size_t l3hdr_len, - size_t l3payload_len, - size_t frag_offset, bool more_frags); - void eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len); diff --git a/include/net/net.h b/include/net/net.h index 1d88621c12..1448d00afb 100644 --- a/include/net/net.h +++ b/include/net/net.h @@ -56,8 +56,10 @@ typedef RxFilterInfo *(QueryRxFilter)(NetClientState *); typedef bool (HasUfo)(NetClientState *); typedef bool (HasVnetHdr)(NetClientState *); typedef bool (HasVnetHdrLen)(NetClientState *, int); +typedef bool (GetUsingVnetHdr)(NetClientState *); typedef void (UsingVnetHdr)(NetClientState *, bool); typedef void (SetOffload)(NetClientState *, int, int, int, int, int); +typedef int (GetVnetHdrLen)(NetClientState *); typedef void (SetVnetHdrLen)(NetClientState *, int); typedef int (SetVnetLE)(NetClientState *, bool); typedef int (SetVnetBE)(NetClientState *, bool); @@ -84,8 +86,10 @@ typedef struct NetClientInfo { HasUfo *has_ufo; HasVnetHdr *has_vnet_hdr; HasVnetHdrLen *has_vnet_hdr_len; + GetUsingVnetHdr *get_using_vnet_hdr; UsingVnetHdr *using_vnet_hdr; SetOffload *set_offload; + GetVnetHdrLen *get_vnet_hdr_len; SetVnetHdrLen *set_vnet_hdr_len; SetVnetLE *set_vnet_le; SetVnetBE *set_vnet_be; @@ -185,9 +189,11 @@ void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]); bool qemu_has_ufo(NetClientState *nc); bool qemu_has_vnet_hdr(NetClientState *nc); bool qemu_has_vnet_hdr_len(NetClientState *nc, int len); +bool qemu_get_using_vnet_hdr(NetClientState *nc); void qemu_using_vnet_hdr(NetClientState *nc, bool enable); void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6, int ecn, int ufo); +int qemu_get_vnet_hdr_len(NetClientState *nc); void qemu_set_vnet_hdr_len(NetClientState *nc, int len); int qemu_set_vnet_le(NetClientState *nc, bool is_le); int qemu_set_vnet_be(NetClientState *nc, bool is_be); diff --git a/include/qapi/error.h b/include/qapi/error.h index d798faeec3..f21a231bb1 100644 --- a/include/qapi/error.h +++ b/include/qapi/error.h @@ -519,6 +519,12 @@ static inline void error_propagator_cleanup(ErrorPropagator *prop) G_DEFINE_AUTO_CLEANUP_CLEAR_FUNC(ErrorPropagator, error_propagator_cleanup); +/* + * Special error destination to warn on error. + * See error_setg() and error_propagate() for details. + */ +extern Error *error_warn; + /* * Special error destination to abort on error. * See error_setg() and error_propagate() for details. diff --git a/include/qapi/qmp/dispatch.h b/include/qapi/qmp/dispatch.h index 1e4240fd0d..f2e956813a 100644 --- a/include/qapi/qmp/dispatch.h +++ b/include/qapi/qmp/dispatch.h @@ -55,8 +55,8 @@ bool qmp_command_available(const QmpCommand *cmd, Error **errp); const char *qmp_command_name(const QmpCommand *cmd); bool qmp_has_success_response(const QmpCommand *cmd); QDict *qmp_error_response(Error *err); -QDict *qmp_dispatch(const QmpCommandList *cmds, QObject *request, - bool allow_oob, Monitor *cur_mon); +QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *request, + bool allow_oob, Monitor *cur_mon); bool qmp_is_oob(const QDict *dict); typedef void (*qmp_cmd_callback_fn)(const QmpCommand *cmd, void *opaque); diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index 874134fd19..f85834ee8b 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -245,6 +245,20 @@ #define smp_wmb() smp_mb_release() #define smp_rmb() smp_mb_acquire() +/* + * SEQ_CST is weaker than the older __sync_* builtins and Linux + * kernel read-modify-write atomics. Provide a macro to obtain + * the same semantics. + */ +#if !defined(QEMU_SANITIZE_THREAD) && \ + (defined(__i386__) || defined(__x86_64__) || defined(__s390x__)) +# define smp_mb__before_rmw() signal_barrier() +# define smp_mb__after_rmw() signal_barrier() +#else +# define smp_mb__before_rmw() smp_mb() +# define smp_mb__after_rmw() smp_mb() +#endif + /* qatomic_mb_read/set semantics map Java volatile variables. They are * less expensive on some platforms (notably POWER) than fully * sequentially consistent operations. @@ -259,7 +273,8 @@ #if !defined(QEMU_SANITIZE_THREAD) && \ (defined(__i386__) || defined(__x86_64__) || defined(__s390x__)) /* This is more efficient than a store plus a fence. */ -# define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i)) +# define qatomic_mb_set(ptr, i) \ + ({ (void)qatomic_xchg(ptr, i); smp_mb__after_rmw(); }) #else # define qatomic_mb_set(ptr, i) \ ({ qatomic_store_release(ptr, i); smp_mb(); }) diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h index d0ba0b9c65..34554bf0ac 100644 --- a/include/qemu/atomic128.h +++ b/include/qemu/atomic128.h @@ -15,6 +15,23 @@ #include "qemu/int128.h" +/* + * If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics + * that are supported by the host, e.g. s390x. We can force the pointer to + * have our known alignment with __builtin_assume_aligned, however prior to + * GCC 13 that was only reliable with optimization enabled. See + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389 + */ +#if defined(CONFIG_ATOMIC128_OPT) +# if !defined(__OPTIMIZE__) +# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1"))) +# endif +# define CONFIG_ATOMIC128 +#endif +#ifndef ATTRIBUTE_ATOMIC128_OPT +# define ATTRIBUTE_ATOMIC128_OPT +#endif + /* * GCC is a house divided about supporting large atomic operations. * @@ -41,132 +58,7 @@ * Therefore, special case each platform. */ -#if defined(CONFIG_ATOMIC128) -static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) -{ - Int128Alias r, c, n; - - c.s = cmp; - n.s = new; - r.i = qatomic_cmpxchg__nocheck((__int128_t *)ptr, c.i, n.i); - return r.s; -} -# define HAVE_CMPXCHG128 1 -#elif defined(CONFIG_CMPXCHG128) -static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) -{ - Int128Alias r, c, n; - - c.s = cmp; - n.s = new; - r.i = __sync_val_compare_and_swap_16((__int128_t *)ptr, c.i, n.i); - return r.s; -} -# define HAVE_CMPXCHG128 1 -#elif defined(__aarch64__) -/* Through gcc 8, aarch64 has no support for 128-bit at all. */ -static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) -{ - uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp); - uint64_t newl = int128_getlo(new), newh = int128_gethi(new); - uint64_t oldl, oldh; - uint32_t tmp; - - asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t" - "cmp %[oldl], %[cmpl]\n\t" - "ccmp %[oldh], %[cmph], #0, eq\n\t" - "b.ne 1f\n\t" - "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t" - "cbnz %w[tmp], 0b\n" - "1:" - : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), - [oldl] "=&r"(oldl), [oldh] "=&r"(oldh) - : [cmpl] "r"(cmpl), [cmph] "r"(cmph), - [newl] "r"(newl), [newh] "r"(newh) - : "memory", "cc"); - - return int128_make128(oldl, oldh); -} -# define HAVE_CMPXCHG128 1 -#else -/* Fallback definition that must be optimized away, or error. */ -Int128 QEMU_ERROR("unsupported atomic") - atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new); -# define HAVE_CMPXCHG128 0 -#endif /* Some definition for HAVE_CMPXCHG128 */ - - -#if defined(CONFIG_ATOMIC128) -static inline Int128 atomic16_read(Int128 *ptr) -{ - Int128Alias r; - - r.i = qatomic_read__nocheck((__int128_t *)ptr); - return r.s; -} - -static inline void atomic16_set(Int128 *ptr, Int128 val) -{ - Int128Alias v; - - v.s = val; - qatomic_set__nocheck((__int128_t *)ptr, v.i); -} - -# define HAVE_ATOMIC128 1 -#elif !defined(CONFIG_USER_ONLY) && defined(__aarch64__) -/* We can do better than cmpxchg for AArch64. */ -static inline Int128 atomic16_read(Int128 *ptr) -{ - uint64_t l, h; - uint32_t tmp; - - /* The load must be paired with the store to guarantee not tearing. */ - asm("0: ldxp %[l], %[h], %[mem]\n\t" - "stxp %w[tmp], %[l], %[h], %[mem]\n\t" - "cbnz %w[tmp], 0b" - : [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h)); - - return int128_make128(l, h); -} - -static inline void atomic16_set(Int128 *ptr, Int128 val) -{ - uint64_t l = int128_getlo(val), h = int128_gethi(val); - uint64_t t1, t2; - - /* Load into temporaries to acquire the exclusive access lock. */ - asm("0: ldxp %[t1], %[t2], %[mem]\n\t" - "stxp %w[t1], %[l], %[h], %[mem]\n\t" - "cbnz %w[t1], 0b" - : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2) - : [l] "r"(l), [h] "r"(h)); -} - -# define HAVE_ATOMIC128 1 -#elif !defined(CONFIG_USER_ONLY) && HAVE_CMPXCHG128 -static inline Int128 atomic16_read(Int128 *ptr) -{ - /* Maybe replace 0 with 0, returning the old value. */ - Int128 z = int128_make64(0); - return atomic16_cmpxchg(ptr, z, z); -} - -static inline void atomic16_set(Int128 *ptr, Int128 val) -{ - Int128 old = *ptr, cmp; - do { - cmp = old; - old = atomic16_cmpxchg(ptr, cmp, val); - } while (int128_ne(old, cmp)); -} - -# define HAVE_ATOMIC128 1 -#else -/* Fallback definitions that must be optimized away, or error. */ -Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr); -void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val); -# define HAVE_ATOMIC128 0 -#endif /* Some definition for HAVE_ATOMIC128 */ +#include "host/atomic128-cas.h" +#include "host/atomic128-ldst.h" #endif /* QEMU_ATOMIC128_H */ diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h index 3ccb00865f..97806811ee 100644 --- a/include/qemu/bitmap.h +++ b/include/qemu/bitmap.h @@ -22,23 +22,23 @@ * Note that nbits should be always a compile time evaluable constant. * Otherwise many inlines will generate horrible code. * - * bitmap_zero(dst, nbits) *dst = 0UL - * bitmap_fill(dst, nbits) *dst = ~0UL - * bitmap_copy(dst, src, nbits) *dst = *src - * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 - * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 - * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 - * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) - * bitmap_complement(dst, src, nbits) *dst = ~(*src) - * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? + * bitmap_zero(dst, nbits) *dst = 0UL + * bitmap_fill(dst, nbits) *dst = ~0UL + * bitmap_copy(dst, src, nbits) *dst = *src + * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 + * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 + * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 + * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) + * bitmap_complement(dst, src, nbits) *dst = ~(*src) + * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? - * bitmap_empty(src, nbits) Are all bits zero in *src? - * bitmap_full(src, nbits) Are all bits set in *src? - * bitmap_set(dst, pos, nbits) Set specified bit area - * bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops - * bitmap_clear(dst, pos, nbits) Clear specified bit area + * bitmap_empty(src, nbits) Are all bits zero in *src? + * bitmap_full(src, nbits) Are all bits set in *src? + * bitmap_set(dst, pos, nbits) Set specified bit area + * bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops + * bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_test_and_clear_atomic(dst, pos, nbits) Test and clear area - * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area * bitmap_to_le(dst, src, nbits) Convert bitmap to little endian * bitmap_from_le(dst, src, nbits) Convert bitmap from little endian * bitmap_copy_with_src_offset(dst, src, offset, nbits) @@ -50,17 +50,17 @@ /* * Also the following operations apply to bitmaps. * - * set_bit(bit, addr) *addr |= bit - * clear_bit(bit, addr) *addr &= ~bit - * change_bit(bit, addr) *addr ^= bit - * test_bit(bit, addr) Is bit set in *addr? - * test_and_set_bit(bit, addr) Set bit and return old value - * test_and_clear_bit(bit, addr) Clear bit and return old value - * test_and_change_bit(bit, addr) Change bit and return old value - * find_first_zero_bit(addr, nbits) Position first zero bit in *addr - * find_first_bit(addr, nbits) Position first set bit in *addr - * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit - * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit + * set_bit(bit, addr) *addr |= bit + * clear_bit(bit, addr) *addr &= ~bit + * change_bit(bit, addr) *addr ^= bit + * test_bit(bit, addr) Is bit set in *addr? + * test_and_set_bit(bit, addr) Set bit and return old value + * test_and_clear_bit(bit, addr) Clear bit and return old value + * test_and_change_bit(bit, addr) Change bit and return old value + * find_first_zero_bit(addr, nbits) Position first zero bit in *addr + * find_first_bit(addr, nbits) Position first set bit in *addr + * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit + * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit */ #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h index 03213ce952..cb3526d1f4 100644 --- a/include/qemu/bitops.h +++ b/include/qemu/bitops.h @@ -218,7 +218,7 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr, */ static inline uint8_t rol8(uint8_t word, unsigned int shift) { - return (word << shift) | (word >> ((8 - shift) & 7)); + return (word << (shift & 7)) | (word >> (-shift & 7)); } /** @@ -228,7 +228,7 @@ static inline uint8_t rol8(uint8_t word, unsigned int shift) */ static inline uint8_t ror8(uint8_t word, unsigned int shift) { - return (word >> shift) | (word << ((8 - shift) & 7)); + return (word >> (shift & 7)) | (word << (-shift & 7)); } /** @@ -238,7 +238,7 @@ static inline uint8_t ror8(uint8_t word, unsigned int shift) */ static inline uint16_t rol16(uint16_t word, unsigned int shift) { - return (word << shift) | (word >> ((16 - shift) & 15)); + return (word << (shift & 15)) | (word >> (-shift & 15)); } /** @@ -248,7 +248,7 @@ static inline uint16_t rol16(uint16_t word, unsigned int shift) */ static inline uint16_t ror16(uint16_t word, unsigned int shift) { - return (word >> shift) | (word << ((16 - shift) & 15)); + return (word >> (shift & 15)) | (word << (-shift & 15)); } /** @@ -258,7 +258,7 @@ static inline uint16_t ror16(uint16_t word, unsigned int shift) */ static inline uint32_t rol32(uint32_t word, unsigned int shift) { - return (word << shift) | (word >> ((32 - shift) & 31)); + return (word << (shift & 31)) | (word >> (-shift & 31)); } /** @@ -268,7 +268,7 @@ static inline uint32_t rol32(uint32_t word, unsigned int shift) */ static inline uint32_t ror32(uint32_t word, unsigned int shift) { - return (word >> shift) | (word << ((32 - shift) & 31)); + return (word >> (shift & 31)) | (word << (-shift & 31)); } /** @@ -278,7 +278,7 @@ static inline uint32_t ror32(uint32_t word, unsigned int shift) */ static inline uint64_t rol64(uint64_t word, unsigned int shift) { - return (word << shift) | (word >> ((64 - shift) & 63)); + return (word << (shift & 63)) | (word >> (-shift & 63)); } /** @@ -288,7 +288,7 @@ static inline uint64_t rol64(uint64_t word, unsigned int shift) */ static inline uint64_t ror64(uint64_t word, unsigned int shift) { - return (word >> shift) | (word << ((64 - shift) & 63)); + return (word >> (shift & 63)) | (word << (-shift & 63)); } /** diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h index b1650daedf..15a78c0db5 100644 --- a/include/qemu/bswap.h +++ b/include/qemu/bswap.h @@ -125,11 +125,20 @@ CPU_CONVERT(le, 32, uint32_t) CPU_CONVERT(le, 64, uint64_t) /* - * Same as cpu_to_le{16,32}, except that gcc will figure the result is + * Same as cpu_to_le{16,32,64}, except that gcc will figure the result is * a compile-time constant if you pass in a constant. So this can be * used to initialize static variables. */ #if HOST_BIG_ENDIAN +# define const_le64(_x) \ + ((((_x) & 0x00000000000000ffU) << 56) | \ + (((_x) & 0x000000000000ff00U) << 40) | \ + (((_x) & 0x0000000000ff0000U) << 24) | \ + (((_x) & 0x00000000ff000000U) << 8) | \ + (((_x) & 0x000000ff00000000U) >> 8) | \ + (((_x) & 0x0000ff0000000000U) >> 24) | \ + (((_x) & 0x00ff000000000000U) >> 40) | \ + (((_x) & 0xff00000000000000U) >> 56)) # define const_le32(_x) \ ((((_x) & 0x000000ffU) << 24) | \ (((_x) & 0x0000ff00U) << 8) | \ @@ -139,6 +148,7 @@ CPU_CONVERT(le, 64, uint64_t) ((((_x) & 0x00ff) << 8) | \ (((_x) & 0xff00) >> 8)) #else +# define const_le64(_x) (_x) # define const_le32(_x) (_x) # define const_le16(_x) (_x) #endif diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h index f20a76e4a2..c2f49df1f9 100644 --- a/include/qemu/compiler.h +++ b/include/qemu/compiler.h @@ -33,8 +33,8 @@ #ifndef glue #define xglue(x, y) x ## y #define glue(x, y) xglue(x, y) -#define stringify(s) tostring(s) -#define tostring(s) #s +#define stringify(s) tostring(s) +#define tostring(s) #s #endif #ifndef likely diff --git a/include/qemu/cpuid.h b/include/qemu/cpuid.h index 7adb12d320..35325f1995 100644 --- a/include/qemu/cpuid.h +++ b/include/qemu/cpuid.h @@ -71,4 +71,29 @@ #define bit_LZCNT (1 << 5) #endif +/* + * Signatures for different CPU implementations as returned from Leaf 0. + */ + +#ifndef signature_INTEL_ecx +/* "Genu" "ineI" "ntel" */ +#define signature_INTEL_ebx 0x756e6547 +#define signature_INTEL_edx 0x49656e69 +#define signature_INTEL_ecx 0x6c65746e +#endif + +#ifndef signature_AMD_ecx +/* "Auth" "enti" "cAMD" */ +#define signature_AMD_ebx 0x68747541 +#define signature_AMD_edx 0x69746e65 +#define signature_AMD_ecx 0x444d4163 +#endif + +static inline unsigned xgetbv_low(unsigned c) +{ + unsigned a, d; + asm("xgetbv" : "=a"(a), "=d"(d) : "c"(c)); + return a; +} + #endif /* QEMU_CPUID_H */ diff --git a/include/qemu/crc32c.h b/include/qemu/crc32c.h index 5b78884c38..88b4d2b3b3 100644 --- a/include/qemu/crc32c.h +++ b/include/qemu/crc32c.h @@ -30,5 +30,6 @@ uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length); +uint32_t iov_crc32c(uint32_t crc, const struct iovec *iov, size_t iov_cnt); #endif diff --git a/include/qemu/help-texts.h b/include/qemu/help-texts.h index 4f265fed8d..d0359f82e0 100644 --- a/include/qemu/help-texts.h +++ b/include/qemu/help-texts.h @@ -2,7 +2,7 @@ #define QEMU_HELP_TEXTS_H /* Copyright string for -version arguments, About dialogs, etc */ -#define QEMU_COPYRIGHT "Copyright (c) 2003-2022 " \ +#define QEMU_COPYRIGHT "Copyright (c) 2003-2023 " \ "Fabrice Bellard and the QEMU Project developers" /* Bug reporting information for --help arguments, About dialogs, etc */ diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index 3ce62bf4a5..d3b4dce6a9 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -107,6 +107,36 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) } #endif +/** + * clz8 - count leading zeros in a 8-bit value. + * @val: The value to search + * + * Returns 8 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + * + * Note that the GCC builtin will upcast its argument to an `unsigned int` + * so this function subtracts off the number of prepended zeroes. + */ +static inline int clz8(uint8_t val) +{ + return val ? __builtin_clz(val) - 24 : 8; +} + +/** + * clz16 - count leading zeros in a 16-bit value. + * @val: The value to search + * + * Returns 16 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + * + * Note that the GCC builtin will upcast its argument to an `unsigned int` + * so this function subtracts off the number of prepended zeroes. + */ +static inline int clz16(uint16_t val) +{ + return val ? __builtin_clz(val) - 16 : 16; +} + /** * clz32 - count leading zeros in a 32-bit value. * @val: The value to search @@ -153,6 +183,30 @@ static inline int clo64(uint64_t val) return clz64(~val); } +/** + * ctz8 - count trailing zeros in a 8-bit value. + * @val: The value to search + * + * Returns 8 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + */ +static inline int ctz8(uint8_t val) +{ + return val ? __builtin_ctz(val) : 8; +} + +/** + * ctz16 - count trailing zeros in a 16-bit value. + * @val: The value to search + * + * Returns 16 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + */ +static inline int ctz16(uint16_t val) +{ + return val ? __builtin_ctz(val) : 16; +} + /** * ctz32 - count trailing zeros in a 32-bit value. * @val: The value to search diff --git a/include/qemu/int128.h b/include/qemu/int128.h index f62a46b48c..73624e8be7 100644 --- a/include/qemu/int128.h +++ b/include/qemu/int128.h @@ -481,14 +481,14 @@ static inline void bswap128s(Int128 *s) * a possible structure and the native types. Ease parameter passing * via use of the transparent union extension. */ -#ifdef CONFIG_INT128 +#ifdef CONFIG_INT128_TYPE typedef union { - Int128 s; - __int128_t i; __uint128_t u; + __int128_t i; + Int128 s; } Int128Alias __attribute__((transparent_union)); #else typedef Int128 Int128Alias; -#endif /* CONFIG_INT128 */ +#endif /* CONFIG_INT128_TYPE */ #endif /* INT128_H */ diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h index c25f390696..68e70e61aa 100644 --- a/include/qemu/main-loop.h +++ b/include/qemu/main-loop.h @@ -387,11 +387,12 @@ void qemu_cond_timedwait_iothread(QemuCond *cond, int ms); /* internal interfaces */ -void qemu_fd_register(int fd); - +#define qemu_bh_new_guarded(cb, opaque, guard) \ + qemu_bh_new_full((cb), (opaque), (stringify(cb)), guard) #define qemu_bh_new(cb, opaque) \ - qemu_bh_new_full((cb), (opaque), (stringify(cb))) -QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name); + qemu_bh_new_full((cb), (opaque), (stringify(cb)), NULL) +QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name, + MemReentrancyGuard *reentrancy_guard); void qemu_bh_schedule_idle(QEMUBH *bh); enum { diff --git a/include/qemu/mmap-alloc.h b/include/qemu/mmap-alloc.h index 2825e231a7..8344daaa03 100644 --- a/include/qemu/mmap-alloc.h +++ b/include/qemu/mmap-alloc.h @@ -1,8 +1,15 @@ #ifndef QEMU_MMAP_ALLOC_H #define QEMU_MMAP_ALLOC_H +typedef enum { + QEMU_FS_TYPE_UNKNOWN = 0, + QEMU_FS_TYPE_TMPFS, + QEMU_FS_TYPE_HUGETLBFS, + QEMU_FS_TYPE_NUM, +} QemuFsType; size_t qemu_fd_getpagesize(int fd); +QemuFsType qemu_fd_getfs(int fd); /** * qemu_ram_mmap: mmap anonymous memory, the specified file or device. diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index 88c9facbf2..cc61b00ba9 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -75,7 +75,7 @@ QEMU_EXTERN_C int daemon(int, int); #ifdef _WIN32 /* as defined in sdkddkver.h */ #ifndef _WIN32_WINNT -#define _WIN32_WINNT 0x0601 /* Windows 7 API (should be in sync with glib) */ +#define _WIN32_WINNT 0x0602 /* Windows 8 API (should be >= the one from glib) */ #endif /* reduces the number of implicitly included headers */ #ifndef WIN32_LEAN_AND_MEAN @@ -237,7 +237,7 @@ extern "C" { * supports QEMU_ERROR, this will be reported at compile time; otherwise * this will be reported at link time due to the missing symbol. */ -extern G_NORETURN +G_NORETURN extern void QEMU_ERROR("code path is reachable") qemu_build_not_reached_always(void); #if defined(__OPTIMIZE__) && !defined(__NO_INLINE__) @@ -665,20 +665,6 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, */ char *qemu_get_pid_name(pid_t pid); -/** - * qemu_fork: - * - * A version of fork that avoids signal handler race - * conditions that can lead to child process getting - * signals that are otherwise only expected by the - * parent. It also resets all signal handlers to the - * default settings. - * - * Returns 0 to child process, pid number to parent - * or -1 on failure. - */ -pid_t qemu_fork(Error **errp); - /* Using intptr_t ensures that qemu_*_page_mask is sign-extended even * when intptr_t is 32-bit and we are aligning a long long. */ diff --git a/include/qemu/plugin-event.h b/include/qemu/plugin-event.h new file mode 100644 index 0000000000..7056d8427b --- /dev/null +++ b/include/qemu/plugin-event.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2017, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_PLUGIN_EVENT_H +#define QEMU_PLUGIN_EVENT_H + +/* + * Events that plugins can subscribe to. + */ +enum qemu_plugin_event { + QEMU_PLUGIN_EV_VCPU_INIT, + QEMU_PLUGIN_EV_VCPU_EXIT, + QEMU_PLUGIN_EV_VCPU_TB_TRANS, + QEMU_PLUGIN_EV_VCPU_IDLE, + QEMU_PLUGIN_EV_VCPU_RESUME, + QEMU_PLUGIN_EV_VCPU_SYSCALL, + QEMU_PLUGIN_EV_VCPU_SYSCALL_RET, + QEMU_PLUGIN_EV_FLUSH, + QEMU_PLUGIN_EV_ATEXIT, + QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */ +}; + +#endif /* QEMU_PLUGIN_EVENT_H */ diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h index fb338ba576..bc0781cab8 100644 --- a/include/qemu/plugin.h +++ b/include/qemu/plugin.h @@ -12,23 +12,9 @@ #include "qemu/error-report.h" #include "qemu/queue.h" #include "qemu/option.h" +#include "qemu/plugin-event.h" #include "exec/memopidx.h" - -/* - * Events that plugins can subscribe to. - */ -enum qemu_plugin_event { - QEMU_PLUGIN_EV_VCPU_INIT, - QEMU_PLUGIN_EV_VCPU_EXIT, - QEMU_PLUGIN_EV_VCPU_TB_TRANS, - QEMU_PLUGIN_EV_VCPU_IDLE, - QEMU_PLUGIN_EV_VCPU_RESUME, - QEMU_PLUGIN_EV_VCPU_SYSCALL, - QEMU_PLUGIN_EV_VCPU_SYSCALL_RET, - QEMU_PLUGIN_EV_FLUSH, - QEMU_PLUGIN_EV_ATEXIT, - QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */ -}; +#include "hw/core/cpu.h" /* * Option parsing/processing. @@ -59,8 +45,6 @@ get_plugin_meminfo_rw(qemu_plugin_meminfo_t i) #ifdef CONFIG_PLUGIN extern QemuOptsList qemu_plugin_opts; -#define QEMU_PLUGIN_ASSERT(cond) g_assert(cond) - static inline void qemu_plugin_add_opts(void) { qemu_add_opts(&qemu_plugin_opts); @@ -221,7 +205,10 @@ void qemu_plugin_atexit_cb(void); void qemu_plugin_add_dyn_cb_arr(GArray *arr); -void qemu_plugin_disable_mem_helpers(CPUState *cpu); +static inline void qemu_plugin_disable_mem_helpers(CPUState *cpu) +{ + cpu->plugin_mem_cbs = NULL; +} /** * qemu_plugin_user_exit(): clean-up callbacks before calling exit callbacks @@ -252,8 +239,6 @@ void qemu_plugin_user_postfork(bool is_child); #else /* !CONFIG_PLUGIN */ -#define QEMU_PLUGIN_ASSERT(cond) - static inline void qemu_plugin_add_opts(void) { } diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h index d0e9d03adf..50a9957279 100644 --- a/include/qemu/qemu-plugin.h +++ b/include/qemu/qemu-plugin.h @@ -481,17 +481,56 @@ uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr); */ const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h); -typedef void -(*qemu_plugin_vcpu_mem_cb_t)(unsigned int vcpu_index, - qemu_plugin_meminfo_t info, uint64_t vaddr, - void *userdata); +/** + * typedef qemu_plugin_vcpu_mem_cb_t - memory callback function type + * @vcpu_index: the executing vCPU + * @info: an opaque handle for further queries about the memory + * @vaddr: the virtual address of the transaction + * @userdata: any user data attached to the callback + */ +typedef void (*qemu_plugin_vcpu_mem_cb_t) (unsigned int vcpu_index, + qemu_plugin_meminfo_t info, + uint64_t vaddr, + void *userdata); +/** + * qemu_plugin_register_vcpu_mem_cb() - register memory access callback + * @insn: handle for instruction to instrument + * @cb: callback of type qemu_plugin_vcpu_mem_cb_t + * @flags: (currently unused) callback flags + * @rw: monitor reads, writes or both + * @userdata: opaque pointer for userdata + * + * This registers a full callback for every memory access generated by + * an instruction. If the instruction doesn't access memory no + * callback will be made. + * + * The callback reports the vCPU the access took place on, the virtual + * address of the access and a handle for further queries. The user + * can attach some userdata to the callback for additional purposes. + * + * Other execution threads will continue to execute during the + * callback so the plugin is responsible for ensuring it doesn't get + * confused by making appropriate use of locking if required. + */ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn, qemu_plugin_vcpu_mem_cb_t cb, enum qemu_plugin_cb_flags flags, enum qemu_plugin_mem_rw rw, void *userdata); +/** + * qemu_plugin_register_vcpu_mem_inline() - register an inline op to any memory access + * @insn: handle for instruction to instrument + * @rw: apply to reads, writes or both + * @op: the op, of type qemu_plugin_op + * @ptr: pointer memory for the op + * @imm: immediate data for @op + * + * This registers a inline op every memory access generated by the + * instruction. This provides for a lightweight but not thread-safe + * way of counting the number of operations done. + */ void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, enum qemu_plugin_mem_rw rw, enum qemu_plugin_op op, void *ptr, diff --git a/include/qemu/qtree.h b/include/qemu/qtree.h new file mode 100644 index 0000000000..69fe74b50d --- /dev/null +++ b/include/qemu/qtree.h @@ -0,0 +1,201 @@ +/* + * GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * SPDX-License-Identifier: LGPL-2.1-or-later + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +/* + * QTree is a partial import of Glib's GTree. The parts excluded correspond + * to API calls either deprecated (e.g. g_tree_traverse) or recently added + * (e.g. g_tree_search_node, added in 2.68); neither have callers in QEMU. + * + * The reason for this import is to allow us to control the memory allocator + * used by the tree implementation. Until Glib 2.75.3, GTree uses Glib's + * slice allocator, which causes problems when forking in user-mode; + * see https://gitlab.com/qemu-project/qemu/-/issues/285 and glib's + * "45b5a6c1e gslice: Remove slice allocator and use malloc() instead". + * + * TODO: remove QTree when QEMU's minimum Glib version is >= 2.75.3. + */ + +#ifndef QEMU_QTREE_H +#define QEMU_QTREE_H + +#include "qemu/osdep.h" + +#ifdef HAVE_GLIB_WITH_SLICE_ALLOCATOR + +typedef struct _QTree QTree; + +typedef struct _QTreeNode QTreeNode; + +typedef gboolean (*QTraverseNodeFunc)(QTreeNode *node, + gpointer user_data); + +/* + * Balanced binary trees + */ +QTree *q_tree_new(GCompareFunc key_compare_func); +QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func, + gpointer key_compare_data); +QTree *q_tree_new_full(GCompareDataFunc key_compare_func, + gpointer key_compare_data, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func); +QTree *q_tree_ref(QTree *tree); +void q_tree_unref(QTree *tree); +void q_tree_destroy(QTree *tree); +void q_tree_insert(QTree *tree, + gpointer key, + gpointer value); +void q_tree_replace(QTree *tree, + gpointer key, + gpointer value); +gboolean q_tree_remove(QTree *tree, + gconstpointer key); +gboolean q_tree_steal(QTree *tree, + gconstpointer key); +gpointer q_tree_lookup(QTree *tree, + gconstpointer key); +gboolean q_tree_lookup_extended(QTree *tree, + gconstpointer lookup_key, + gpointer *orig_key, + gpointer *value); +void q_tree_foreach(QTree *tree, + GTraverseFunc func, + gpointer user_data); +gpointer q_tree_search(QTree *tree, + GCompareFunc search_func, + gconstpointer user_data); +gint q_tree_height(QTree *tree); +gint q_tree_nnodes(QTree *tree); + +#else /* !HAVE_GLIB_WITH_SLICE_ALLOCATOR */ + +typedef GTree QTree; +typedef GTreeNode QTreeNode; +typedef GTraverseNodeFunc QTraverseNodeFunc; + +static inline QTree *q_tree_new(GCompareFunc key_compare_func) +{ + return g_tree_new(key_compare_func); +} + +static inline QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func, + gpointer key_compare_data) +{ + return g_tree_new_with_data(key_compare_func, key_compare_data); +} + +static inline QTree *q_tree_new_full(GCompareDataFunc key_compare_func, + gpointer key_compare_data, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func) +{ + return g_tree_new_full(key_compare_func, key_compare_data, + key_destroy_func, value_destroy_func); +} + +static inline QTree *q_tree_ref(QTree *tree) +{ + return g_tree_ref(tree); +} + +static inline void q_tree_unref(QTree *tree) +{ + g_tree_unref(tree); +} + +static inline void q_tree_destroy(QTree *tree) +{ + g_tree_destroy(tree); +} + +static inline void q_tree_insert(QTree *tree, + gpointer key, + gpointer value) +{ + g_tree_insert(tree, key, value); +} + +static inline void q_tree_replace(QTree *tree, + gpointer key, + gpointer value) +{ + g_tree_replace(tree, key, value); +} + +static inline gboolean q_tree_remove(QTree *tree, + gconstpointer key) +{ + return g_tree_remove(tree, key); +} + +static inline gboolean q_tree_steal(QTree *tree, + gconstpointer key) +{ + return g_tree_steal(tree, key); +} + +static inline gpointer q_tree_lookup(QTree *tree, + gconstpointer key) +{ + return g_tree_lookup(tree, key); +} + +static inline gboolean q_tree_lookup_extended(QTree *tree, + gconstpointer lookup_key, + gpointer *orig_key, + gpointer *value) +{ + return g_tree_lookup_extended(tree, lookup_key, orig_key, value); +} + +static inline void q_tree_foreach(QTree *tree, + GTraverseFunc func, + gpointer user_data) +{ + return g_tree_foreach(tree, func, user_data); +} + +static inline gpointer q_tree_search(QTree *tree, + GCompareFunc search_func, + gconstpointer user_data) +{ + return g_tree_search(tree, search_func, user_data); +} + +static inline gint q_tree_height(QTree *tree) +{ + return g_tree_height(tree); +} + +static inline gint q_tree_nnodes(QTree *tree) +{ + return g_tree_nnodes(tree); +} + +#endif /* HAVE_GLIB_WITH_SLICE_ALLOCATOR */ + +#endif /* QEMU_QTREE_H */ diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h index 313fc414bc..661c1a1468 100644 --- a/include/qemu/rcu.h +++ b/include/qemu/rcu.h @@ -87,7 +87,10 @@ static inline void rcu_read_lock(void) ctr = qatomic_read(&rcu_gp_ctr); qatomic_set(&p_rcu_reader->ctr, ctr); - /* Write p_rcu_reader->ctr before reading RCU-protected pointers. */ + /* + * Read rcu_gp_ptr and write p_rcu_reader->ctr before reading + * RCU-protected pointers. + */ smp_mb_placeholder(); } diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h index 2b0698a7c9..d935fd80da 100644 --- a/include/qemu/sockets.h +++ b/include/qemu/sockets.h @@ -15,7 +15,6 @@ int inet_aton(const char *cp, struct in_addr *ia); bool fd_is_socket(int fd); int qemu_socket(int domain, int type, int protocol); -#ifndef WIN32 /** * qemu_socketpair: * @domain: specifies a communication domain, such as PF_UNIX @@ -30,7 +29,6 @@ int qemu_socket(int domain, int type, int protocol); * Return 0 on success. */ int qemu_socketpair(int domain, int type, int protocol, int sv[2]); -#endif int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen); /* diff --git a/include/qemu/stats64.h b/include/qemu/stats64.h index 802402254b..99b5cb724a 100644 --- a/include/qemu/stats64.h +++ b/include/qemu/stats64.h @@ -40,6 +40,11 @@ static inline uint64_t stat64_get(const Stat64 *s) return qatomic_read__nocheck(&s->value); } +static inline void stat64_set(Stat64 *s, uint64_t value) +{ + qatomic_set__nocheck(&s->value, value); +} + static inline void stat64_add(Stat64 *s, uint64_t value) { qatomic_add(&s->value, value); @@ -62,6 +67,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value) } #else uint64_t stat64_get(const Stat64 *s); +void stat64_set(Stat64 *s, uint64_t value); bool stat64_min_slow(Stat64 *s, uint64_t value); bool stat64_max_slow(Stat64 *s, uint64_t value); bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high); diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index c7c8a85315..8e9ef252f5 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -41,6 +41,7 @@ typedef struct CompatProperty CompatProperty; typedef struct ConfidentialGuestSupport ConfidentialGuestSupport; typedef struct CPUAddressSpace CPUAddressSpace; typedef struct CPUArchState CPUArchState; +typedef struct CpuInfoFast CpuInfoFast; typedef struct CPUJumpCache CPUJumpCache; typedef struct CPUState CPUState; typedef struct CPUTLBEntryFull CPUTLBEntryFull; @@ -49,6 +50,7 @@ typedef struct DeviceState DeviceState; typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot; typedef struct DisplayChangeListener DisplayChangeListener; typedef struct DriveInfo DriveInfo; +typedef struct DumpState DumpState; typedef struct Error Error; typedef struct EventNotifier EventNotifier; typedef struct FlatView FlatView; @@ -56,6 +58,7 @@ typedef struct FWCfgEntry FWCfgEntry; typedef struct FWCfgIoState FWCfgIoState; typedef struct FWCfgMemState FWCfgMemState; typedef struct FWCfgState FWCfgState; +typedef struct GraphicHwOps GraphicHwOps; typedef struct HostMemoryBackend HostMemoryBackend; typedef struct I2CBus I2CBus; typedef struct I2SCodec I2SCodec; @@ -90,10 +93,10 @@ typedef struct PCIDevice PCIDevice; typedef struct PCIEAERErr PCIEAERErr; typedef struct PCIEAERLog PCIEAERLog; typedef struct PCIEAERMsg PCIEAERMsg; -typedef struct PCIESriovPF PCIESriovPF; -typedef struct PCIESriovVF PCIESriovVF; typedef struct PCIEPort PCIEPort; typedef struct PCIESlot PCIESlot; +typedef struct PCIESriovPF PCIESriovPF; +typedef struct PCIESriovVF PCIESriovVF; typedef struct PCIExpressDevice PCIExpressDevice; typedef struct PCIExpressHost PCIExpressHost; typedef struct PCIHostDeviceAddress PCIHostDeviceAddress; @@ -106,6 +109,7 @@ typedef struct QBool QBool; typedef struct QDict QDict; typedef struct QEMUBH QEMUBH; typedef struct QemuConsole QemuConsole; +typedef struct QEMUCursor QEMUCursor; typedef struct QEMUFile QEMUFile; typedef struct QemuLockable QemuLockable; typedef struct QemuMutex QemuMutex; @@ -132,9 +136,6 @@ typedef struct VirtIODevice VirtIODevice; typedef struct Visitor Visitor; typedef struct VMChangeStateEntry VMChangeStateEntry; typedef struct VMStateDescription VMStateDescription; -typedef struct DumpState DumpState; -typedef struct GraphicHwOps GraphicHwOps; -typedef struct QEMUCursor QEMUCursor; /* * Pointer types diff --git a/include/qemu/uri.h b/include/qemu/uri.h index 3ad211d676..b43f35a6a6 100644 --- a/include/qemu/uri.h +++ b/include/qemu/uri.h @@ -59,16 +59,16 @@ * as described in RFC 2396 but separated for further processing. */ typedef struct URI { - char *scheme; /* the URI scheme */ - char *opaque; /* opaque part */ - char *authority; /* the authority part */ - char *server; /* the server part */ - char *user; /* the user part */ - int port; /* the port number */ - char *path; /* the path string */ - char *fragment; /* the fragment identifier */ - int cleanup; /* parsing potentially unclean URI */ - char *query; /* the query string (as it appears in the URI) */ + char *scheme; /* the URI scheme */ + char *opaque; /* opaque part */ + char *authority; /* the authority part */ + char *server; /* the server part */ + char *user; /* the user part */ + int port; /* the port number */ + char *path; /* the path string */ + char *fragment; /* the fragment identifier */ + int cleanup; /* parsing potentially unclean URI */ + char *query; /* the query string (as it appears in the URI) */ } URI; URI *uri_new(void); @@ -84,16 +84,16 @@ void uri_free(URI *uri); /* Single web service query parameter 'name=value'. */ typedef struct QueryParam { - char *name; /* Name (unescaped). */ - char *value; /* Value (unescaped). */ - int ignore; /* Ignore this field in qparam_get_query */ + char *name; /* Name (unescaped). */ + char *value; /* Value (unescaped). */ + int ignore; /* Ignore this field in qparam_get_query */ } QueryParam; /* Set of parameters. */ typedef struct QueryParams { - int n; /* number of parameters used */ - int alloc; /* allocated space */ - QueryParam *p; /* array of parameters */ + int n; /* number of parameters used */ + int alloc; /* allocated space */ + QueryParam *p; /* array of parameters */ } QueryParams; struct QueryParams *query_params_new (int init_alloc); diff --git a/include/qemu/uuid.h b/include/qemu/uuid.h index 9925febfa5..dc40ee1fc9 100644 --- a/include/qemu/uuid.h +++ b/include/qemu/uuid.h @@ -61,6 +61,18 @@ typedef struct { (clock_seq_hi_and_reserved), (clock_seq_low), (node0), (node1), (node2),\ (node3), (node4), (node5) } +/* Normal (network byte order) UUID */ +#define UUID(time_low, time_mid, time_hi_and_version, \ + clock_seq_hi_and_reserved, clock_seq_low, node0, node1, node2, \ + node3, node4, node5) \ + { ((time_low) >> 24) & 0xff, ((time_low) >> 16) & 0xff, \ + ((time_low) >> 8) & 0xff, (time_low) & 0xff, \ + ((time_mid) >> 8) & 0xff, (time_mid) & 0xff, \ + ((time_hi_and_version) >> 8) & 0xff, (time_hi_and_version) & 0xff, \ + (clock_seq_hi_and_reserved), (clock_seq_low), \ + (node0), (node1), (node2), (node3), (node4), (node5) \ + } + #define UUID_FMT "%02hhx%02hhx%02hhx%02hhx-" \ "%02hhx%02hhx-%02hhx%02hhx-" \ "%02hhx%02hhx-" \ diff --git a/include/qemu/vhost-user-server.h b/include/qemu/vhost-user-server.h index 25c72433ca..b1c1cda886 100644 --- a/include/qemu/vhost-user-server.h +++ b/include/qemu/vhost-user-server.h @@ -40,8 +40,9 @@ typedef struct { int max_queues; const VuDevIface *vu_iface; + unsigned int in_flight; /* atomic */ + /* Protected by ctx lock */ - unsigned int refcount; bool wait_idle; VuDev vu_dev; QIOChannel *ioc; /* The I/O channel with the client */ @@ -60,8 +61,9 @@ bool vhost_user_server_start(VuServer *server, void vhost_user_server_stop(VuServer *server); -void vhost_user_server_ref(VuServer *server); -void vhost_user_server_unref(VuServer *server); +void vhost_user_server_inc_in_flight(VuServer *server); +void vhost_user_server_dec_in_flight(VuServer *server); +bool vhost_user_server_has_in_flight(VuServer *server); void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx); void vhost_user_server_detach_aio_context(VuServer *server); diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h index 69cab17b38..dc3e6112c1 100644 --- a/include/standard-headers/drm/drm_fourcc.h +++ b/include/standard-headers/drm/drm_fourcc.h @@ -87,6 +87,18 @@ extern "C" { * * The authoritative list of format modifier codes is found in * `include/uapi/drm/drm_fourcc.h` + * + * Open Source User Waiver + * ----------------------- + * + * Because this is the authoritative source for pixel formats and modifiers + * referenced by GL, Vulkan extensions and other standards and hence used both + * by open source and closed source driver stacks, the usual requirement for an + * upstream in-kernel or open source userspace user does not apply. + * + * To ensure, as much as feasible, compatibility across stacks and avoid + * confusion with incompatible enumerations stakeholders for all relevant driver + * stacks should approve additions. */ #define fourcc_code(a, b, c, d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \ diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h index 87176ab075..99fcddf04f 100644 --- a/include/standard-headers/linux/ethtool.h +++ b/include/standard-headers/linux/ethtool.h @@ -711,6 +711,24 @@ enum ethtool_stringset { ETH_SS_COUNT }; +/** + * enum ethtool_mac_stats_src - source of ethtool MAC statistics + * @ETHTOOL_MAC_STATS_SRC_AGGREGATE: + * if device supports a MAC merge layer, this retrieves the aggregate + * statistics of the eMAC and pMAC. Otherwise, it retrieves just the + * statistics of the single (express) MAC. + * @ETHTOOL_MAC_STATS_SRC_EMAC: + * if device supports a MM layer, this retrieves the eMAC statistics. + * Otherwise, it retrieves the statistics of the single (express) MAC. + * @ETHTOOL_MAC_STATS_SRC_PMAC: + * if device supports a MM layer, this retrieves the pMAC statistics. + */ +enum ethtool_mac_stats_src { + ETHTOOL_MAC_STATS_SRC_AGGREGATE, + ETHTOOL_MAC_STATS_SRC_EMAC, + ETHTOOL_MAC_STATS_SRC_PMAC, +}; + /** * enum ethtool_module_power_mode_policy - plug-in module power mode policy * @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode. @@ -779,6 +797,31 @@ enum ethtool_podl_pse_pw_d_status { ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR, }; +/** + * enum ethtool_mm_verify_status - status of MAC Merge Verify function + * @ETHTOOL_MM_VERIFY_STATUS_UNKNOWN: + * verification status is unknown + * @ETHTOOL_MM_VERIFY_STATUS_INITIAL: + * the 802.3 Verify State diagram is in the state INIT_VERIFICATION + * @ETHTOOL_MM_VERIFY_STATUS_VERIFYING: + * the Verify State diagram is in the state VERIFICATION_IDLE, + * SEND_VERIFY or WAIT_FOR_RESPONSE + * @ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED: + * indicates that the Verify State diagram is in the state VERIFIED + * @ETHTOOL_MM_VERIFY_STATUS_FAILED: + * the Verify State diagram is in the state VERIFY_FAIL + * @ETHTOOL_MM_VERIFY_STATUS_DISABLED: + * verification of preemption operation is disabled + */ +enum ethtool_mm_verify_status { + ETHTOOL_MM_VERIFY_STATUS_UNKNOWN, + ETHTOOL_MM_VERIFY_STATUS_INITIAL, + ETHTOOL_MM_VERIFY_STATUS_VERIFYING, + ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED, + ETHTOOL_MM_VERIFY_STATUS_FAILED, + ETHTOOL_MM_VERIFY_STATUS_DISABLED, +}; + /** * struct ethtool_gstrings - string set for data tagging * @cmd: Command number = %ETHTOOL_GSTRINGS @@ -1183,7 +1226,7 @@ struct ethtool_rxnfc { uint32_t rule_cnt; uint32_t rss_context; }; - uint32_t rule_locs[0]; + uint32_t rule_locs[]; }; @@ -1741,6 +1784,9 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96, ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97, ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98, + ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, + ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, + ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h index a1af78d989..35c131a107 100644 --- a/include/standard-headers/linux/fuse.h +++ b/include/standard-headers/linux/fuse.h @@ -201,6 +201,11 @@ * 7.38 * - add FUSE_EXPIRE_ONLY flag to fuse_notify_inval_entry * - add FOPEN_PARALLEL_DIRECT_WRITES + * - add total_extlen to fuse_in_header + * - add FUSE_MAX_NR_SECCTX + * - add extension header + * - add FUSE_EXT_GROUPS + * - add FUSE_CREATE_SUPP_GROUP */ #ifndef _LINUX_FUSE_H @@ -358,6 +363,8 @@ struct fuse_file_lock { * FUSE_SECURITY_CTX: add security context to create, mkdir, symlink, and * mknod * FUSE_HAS_INODE_DAX: use per inode DAX + * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir, + * symlink and mknod (single group that matches parent) */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -394,6 +401,7 @@ struct fuse_file_lock { /* bits 32..63 get shifted down 32 bits into the flags2 field */ #define FUSE_SECURITY_CTX (1ULL << 32) #define FUSE_HAS_INODE_DAX (1ULL << 33) +#define FUSE_CREATE_SUPP_GROUP (1ULL << 34) /** * CUSE INIT request/reply flags @@ -499,6 +507,17 @@ struct fuse_file_lock { */ #define FUSE_EXPIRE_ONLY (1 << 0) +/** + * extension type + * FUSE_MAX_NR_SECCTX: maximum value of &fuse_secctx_header.nr_secctx + * FUSE_EXT_GROUPS: &fuse_supp_groups extension + */ +enum fuse_ext_type { + /* Types 0..31 are reserved for fuse_secctx_header */ + FUSE_MAX_NR_SECCTX = 31, + FUSE_EXT_GROUPS = 32, +}; + enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ @@ -882,7 +901,8 @@ struct fuse_in_header { uint32_t uid; uint32_t gid; uint32_t pid; - uint32_t padding; + uint16_t total_extlen; /* length of extensions in 8byte units */ + uint16_t padding; }; struct fuse_out_header { @@ -1043,4 +1063,27 @@ struct fuse_secctx_header { uint32_t nr_secctx; }; +/** + * struct fuse_ext_header - extension header + * @size: total size of this extension including this header + * @type: type of extension + * + * This is made compatible with fuse_secctx_header by using type values > + * FUSE_MAX_NR_SECCTX + */ +struct fuse_ext_header { + uint32_t size; + uint32_t type; +}; + +/** + * struct fuse_supp_groups - Supplementary group extension + * @nr_groups: number of supplementary groups + * @groups: flexible array of group IDs + */ +struct fuse_supp_groups { + uint32_t nr_groups; + uint32_t groups[]; +}; + #endif /* _LINUX_FUSE_H */ diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h index 85ab127881..dc2000e0fe 100644 --- a/include/standard-headers/linux/pci_regs.h +++ b/include/standard-headers/linux/pci_regs.h @@ -693,6 +693,7 @@ #define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */ #define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */ #define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */ +#define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */ #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */ #define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */ #define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */ diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h index c41a73fe36..88600e2d9f 100644 --- a/include/standard-headers/linux/vhost_types.h +++ b/include/standard-headers/linux/vhost_types.h @@ -163,5 +163,7 @@ struct vhost_vdpa_iova_range { #define VHOST_BACKEND_F_IOTLB_ASID 0x3 /* Device can be suspended */ #define VHOST_BACKEND_F_SUSPEND 0x4 +/* Device can be resumed */ +#define VHOST_BACKEND_F_RESUME 0x5 #endif diff --git a/include/standard-headers/linux/virtio_blk.h b/include/standard-headers/linux/virtio_blk.h index e81715cd70..7155b1a470 100644 --- a/include/standard-headers/linux/virtio_blk.h +++ b/include/standard-headers/linux/virtio_blk.h @@ -41,6 +41,7 @@ #define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */ #define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */ #define VIRTIO_BLK_F_SECURE_ERASE 16 /* Secure Erase is supported */ +#define VIRTIO_BLK_F_ZONED 17 /* Zoned block device */ /* Legacy feature bits */ #ifndef VIRTIO_BLK_NO_LEGACY @@ -135,6 +136,16 @@ struct virtio_blk_config { /* Secure erase commands must be aligned to this number of sectors. */ __virtio32 secure_erase_sector_alignment; + /* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */ + struct virtio_blk_zoned_characteristics { + uint32_t zone_sectors; + uint32_t max_open_zones; + uint32_t max_active_zones; + uint32_t max_append_sectors; + uint32_t write_granularity; + uint8_t model; + uint8_t unused2[3]; + } zoned; } QEMU_PACKED; /* @@ -172,6 +183,27 @@ struct virtio_blk_config { /* Secure erase command */ #define VIRTIO_BLK_T_SECURE_ERASE 14 +/* Zone append command */ +#define VIRTIO_BLK_T_ZONE_APPEND 15 + +/* Report zones command */ +#define VIRTIO_BLK_T_ZONE_REPORT 16 + +/* Open zone command */ +#define VIRTIO_BLK_T_ZONE_OPEN 18 + +/* Close zone command */ +#define VIRTIO_BLK_T_ZONE_CLOSE 20 + +/* Finish zone command */ +#define VIRTIO_BLK_T_ZONE_FINISH 22 + +/* Reset zone command */ +#define VIRTIO_BLK_T_ZONE_RESET 24 + +/* Reset All zones command */ +#define VIRTIO_BLK_T_ZONE_RESET_ALL 26 + #ifndef VIRTIO_BLK_NO_LEGACY /* Barrier before this op. */ #define VIRTIO_BLK_T_BARRIER 0x80000000 @@ -191,6 +223,72 @@ struct virtio_blk_outhdr { __virtio64 sector; }; +/* + * Supported zoned device models. + */ + +/* Regular block device */ +#define VIRTIO_BLK_Z_NONE 0 +/* Host-managed zoned device */ +#define VIRTIO_BLK_Z_HM 1 +/* Host-aware zoned device */ +#define VIRTIO_BLK_Z_HA 2 + +/* + * Zone descriptor. A part of VIRTIO_BLK_T_ZONE_REPORT command reply. + */ +struct virtio_blk_zone_descriptor { + /* Zone capacity */ + uint64_t z_cap; + /* The starting sector of the zone */ + uint64_t z_start; + /* Zone write pointer position in sectors */ + uint64_t z_wp; + /* Zone type */ + uint8_t z_type; + /* Zone state */ + uint8_t z_state; + uint8_t reserved[38]; +}; + +struct virtio_blk_zone_report { + uint64_t nr_zones; + uint8_t reserved[56]; + struct virtio_blk_zone_descriptor zones[]; +}; + +/* + * Supported zone types. + */ + +/* Conventional zone */ +#define VIRTIO_BLK_ZT_CONV 1 +/* Sequential Write Required zone */ +#define VIRTIO_BLK_ZT_SWR 2 +/* Sequential Write Preferred zone */ +#define VIRTIO_BLK_ZT_SWP 3 + +/* + * Zone states that are available for zones of all types. + */ + +/* Not a write pointer (conventional zones only) */ +#define VIRTIO_BLK_ZS_NOT_WP 0 +/* Empty */ +#define VIRTIO_BLK_ZS_EMPTY 1 +/* Implicitly Open */ +#define VIRTIO_BLK_ZS_IOPEN 2 +/* Explicitly Open */ +#define VIRTIO_BLK_ZS_EOPEN 3 +/* Closed */ +#define VIRTIO_BLK_ZS_CLOSED 4 +/* Read-Only */ +#define VIRTIO_BLK_ZS_RDONLY 13 +/* Full */ +#define VIRTIO_BLK_ZS_FULL 14 +/* Offline */ +#define VIRTIO_BLK_ZS_OFFLINE 15 + /* Unmap this range (only valid for write zeroes command) */ #define VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP 0x00000001 @@ -217,4 +315,11 @@ struct virtio_scsi_inhdr { #define VIRTIO_BLK_S_OK 0 #define VIRTIO_BLK_S_IOERR 1 #define VIRTIO_BLK_S_UNSUPP 2 + +/* Error codes that are specific to zoned block devices */ +#define VIRTIO_BLK_S_ZONE_INVALID_CMD 3 +#define VIRTIO_BLK_S_ZONE_UNALIGNED_WP 4 +#define VIRTIO_BLK_S_ZONE_OPEN_RESOURCE 5 +#define VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE 6 + #endif /* _LINUX_VIRTIO_BLK_H */ diff --git a/include/sysemu/accel-ops.h b/include/sysemu/accel-ops.h index 8cc7996def..3c1fab4b1e 100644 --- a/include/sysemu/accel-ops.h +++ b/include/sysemu/accel-ops.h @@ -10,7 +10,7 @@ #ifndef ACCEL_OPS_H #define ACCEL_OPS_H -#include "exec/hwaddr.h" +#include "exec/cpu-common.h" #include "qom/object.h" #define ACCEL_OPS_SUFFIX "-ops" @@ -48,8 +48,9 @@ struct AccelOpsClass { /* gdbstub hooks */ bool (*supports_guest_debug)(void); - int (*insert_breakpoint)(CPUState *cpu, int type, hwaddr addr, hwaddr len); - int (*remove_breakpoint)(CPUState *cpu, int type, hwaddr addr, hwaddr len); + int (*update_guest_debug)(CPUState *cpu); + int (*insert_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len); + int (*remove_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len); void (*remove_all_breakpoints)(CPUState *cpu); }; diff --git a/include/sysemu/block-backend-common.h b/include/sysemu/block-backend-common.h index 2391679c56..780cea7305 100644 --- a/include/sysemu/block-backend-common.h +++ b/include/sysemu/block-backend-common.h @@ -59,6 +59,19 @@ typedef struct BlockDevOps { */ bool (*is_medium_locked)(void *opaque); + /* + * Runs when the backend receives a drain request. + */ + void (*drained_begin)(void *opaque); + /* + * Runs when the backend's last drain request ends. + */ + void (*drained_end)(void *opaque); + /* + * Is the device still busy? + */ + bool (*drained_poll)(void *opaque); + /* * I/O API functions. These functions are thread-safe. * @@ -76,18 +89,6 @@ typedef struct BlockDevOps { * Runs when the size changed (e.g. monitor command block_resize) */ void (*resize_cb)(void *opaque); - /* - * Runs when the backend receives a drain request. - */ - void (*drained_begin)(void *opaque); - /* - * Runs when the backend's last drain request ends. - */ - void (*drained_end)(void *opaque); - /* - * Is the device still busy? - */ - bool (*drained_poll)(void *opaque); } BlockDevOps; /* diff --git a/include/sysemu/block-backend-global-state.h b/include/sysemu/block-backend-global-state.h index 2b6d27db7c..184e667ebd 100644 --- a/include/sysemu/block-backend-global-state.h +++ b/include/sysemu/block-backend-global-state.h @@ -42,7 +42,10 @@ blk_co_new_open(const char *filename, const char *reference, QDict *options, int blk_get_refcnt(BlockBackend *blk); void blk_ref(BlockBackend *blk); -void blk_unref(BlockBackend *blk); + +void no_coroutine_fn blk_unref(BlockBackend *blk); +void coroutine_fn no_co_wrapper blk_co_unref(BlockBackend *blk); + void blk_remove_all_bs(void); BlockBackend *blk_by_name(const char *name); BlockBackend *blk_next(BlockBackend *blk); @@ -78,6 +81,7 @@ void blk_activate(BlockBackend *blk, Error **errp); int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags); void blk_aio_cancel(BlockAIOCB *acb); int blk_commit_all(void); +bool blk_in_drain(BlockBackend *blk); void blk_drain(BlockBackend *blk); void blk_drain_all(void); void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h index 40ab178719..d62a7ee773 100644 --- a/include/sysemu/block-backend-io.h +++ b/include/sysemu/block-backend-io.h @@ -46,6 +46,16 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, BlockCompletionFunc *cb, void *opaque); BlockAIOCB *blk_aio_flush(BlockBackend *blk, BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset, + unsigned int *nr_zones, + BlockZoneDescriptor *zones, + BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op, + int64_t offset, int64_t len, + BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset, + QEMUIOVector *qiov, BdrvRequestFlags flags, + BlockCompletionFunc *cb, void *opaque); BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes, BlockCompletionFunc *cb, void *opaque); void blk_aio_cancel_async(BlockAIOCB *acb); @@ -70,10 +80,12 @@ void co_wrapper blk_eject(BlockBackend *blk, bool eject_flag); int64_t coroutine_fn blk_co_getlength(BlockBackend *blk); int64_t co_wrapper_mixed blk_getlength(BlockBackend *blk); +void coroutine_fn blk_co_get_geometry(BlockBackend *blk, + uint64_t *nb_sectors_ptr); void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr); int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk); -int64_t co_wrapper_mixed blk_nb_sectors(BlockBackend *blk); +int64_t blk_nb_sectors(BlockBackend *blk); void *blk_try_blockalign(BlockBackend *blk, size_t size); void *blk_blockalign(BlockBackend *blk, size_t size); @@ -88,6 +100,11 @@ void blk_iostatus_set_err(BlockBackend *blk, int error); int blk_get_max_iov(BlockBackend *blk); int blk_get_max_hw_iov(BlockBackend *blk); +/* + * blk_io_plug/unplug are thread-local operations. This means that multiple + * IOThreads can simultaneously call plug/unplug, but the caller must ensure + * that each unplug() is called in the same IOThread of the matching plug(). + */ void coroutine_fn blk_co_io_plug(BlockBackend *blk); void co_wrapper blk_io_plug(BlockBackend *blk); @@ -184,6 +201,23 @@ int co_wrapper_mixed blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, int64_t bytes, BdrvRequestFlags flags); +int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset, + unsigned int *nr_zones, + BlockZoneDescriptor *zones); +int co_wrapper_mixed blk_zone_report(BlockBackend *blk, int64_t offset, + unsigned int *nr_zones, + BlockZoneDescriptor *zones); +int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op, + int64_t offset, int64_t len); +int co_wrapper_mixed blk_zone_mgmt(BlockBackend *blk, BlockZoneOp op, + int64_t offset, int64_t len); +int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset, + QEMUIOVector *qiov, + BdrvRequestFlags flags); +int co_wrapper_mixed blk_zone_append(BlockBackend *blk, int64_t *offset, + QEMUIOVector *qiov, + BdrvRequestFlags flags); + int co_wrapper_mixed blk_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes); int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h index 1bace3379b..0535a4c68a 100644 --- a/include/sysemu/cpus.h +++ b/include/sysemu/cpus.h @@ -1,7 +1,6 @@ #ifndef QEMU_CPUS_H #define QEMU_CPUS_H -#include "qemu/timer.h" #include "sysemu/accel-ops.h" /* register accel-specific operations */ diff --git a/include/sysemu/cryptodev.h b/include/sysemu/cryptodev.h index cf9b3f07fe..bc021ce847 100644 --- a/include/sysemu/cryptodev.h +++ b/include/sysemu/cryptodev.h @@ -24,7 +24,9 @@ #define CRYPTODEV_H #include "qemu/queue.h" +#include "qemu/throttle.h" #include "qom/object.h" +#include "qapi/qapi-types-cryptodev.h" /** * CryptoDevBackend: @@ -48,12 +50,6 @@ typedef struct CryptoDevBackendPeers CryptoDevBackendPeers; typedef struct CryptoDevBackendClient CryptoDevBackendClient; -enum CryptoDevBackendAlgType { - CRYPTODEV_BACKEND_ALG_SYM, - CRYPTODEV_BACKEND_ALG_ASYM, - CRYPTODEV_BACKEND_ALG__MAX, -}; - /** * CryptoDevBackendSymSessionInfo: * @@ -179,17 +175,22 @@ typedef struct CryptoDevBackendAsymOpInfo { uint8_t *dst; } CryptoDevBackendAsymOpInfo; +typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret); + typedef struct CryptoDevBackendOpInfo { - enum CryptoDevBackendAlgType algtype; + QCryptodevBackendAlgType algtype; uint32_t op_code; + uint32_t queue_index; + CryptoDevCompletionFunc cb; + void *opaque; /* argument for cb */ uint64_t session_id; union { CryptoDevBackendSymOpInfo *sym_op_info; CryptoDevBackendAsymOpInfo *asym_op_info; } u; + QTAILQ_ENTRY(CryptoDevBackendOpInfo) next; } CryptoDevBackendOpInfo; -typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret); struct CryptoDevBackendClass { ObjectClass parent_class; @@ -209,24 +210,11 @@ struct CryptoDevBackendClass { void *opaque); int (*do_op)(CryptoDevBackend *backend, - CryptoDevBackendOpInfo *op_info, - uint32_t queue_index, - CryptoDevCompletionFunc cb, - void *opaque); + CryptoDevBackendOpInfo *op_info); }; -typedef enum CryptoDevBackendOptionsType { - CRYPTODEV_BACKEND_TYPE_NONE = 0, - CRYPTODEV_BACKEND_TYPE_BUILTIN = 1, - CRYPTODEV_BACKEND_TYPE_VHOST_USER = 2, - CRYPTODEV_BACKEND_TYPE_LKCF = 3, - CRYPTODEV_BACKEND_TYPE__MAX, -} CryptoDevBackendOptionsType; - struct CryptoDevBackendClient { - CryptoDevBackendOptionsType type; - char *model; - char *name; + QCryptodevBackendType type; char *info_str; unsigned int queue_index; int vring_enable; @@ -260,6 +248,24 @@ struct CryptoDevBackendConf { uint64_t max_size; }; +typedef struct CryptodevBackendSymStat { + int64_t encrypt_ops; + int64_t decrypt_ops; + int64_t encrypt_bytes; + int64_t decrypt_bytes; +} CryptodevBackendSymStat; + +typedef struct CryptodevBackendAsymStat { + int64_t encrypt_ops; + int64_t decrypt_ops; + int64_t sign_ops; + int64_t verify_ops; + int64_t encrypt_bytes; + int64_t decrypt_bytes; + int64_t sign_bytes; + int64_t verify_bytes; +} CryptodevBackendAsymStat; + struct CryptoDevBackend { Object parent_obj; @@ -267,15 +273,48 @@ struct CryptoDevBackend { /* Tag the cryptodev backend is used by virtio-crypto or not */ bool is_used; CryptoDevBackendConf conf; + CryptodevBackendSymStat *sym_stat; + CryptodevBackendAsymStat *asym_stat; + + ThrottleState ts; + ThrottleTimers tt; + ThrottleConfig tc; + QTAILQ_HEAD(, CryptoDevBackendOpInfo) opinfos; }; +#define CryptodevSymStatInc(be, op, bytes) do { \ + be->sym_stat->op##_bytes += (bytes); \ + be->sym_stat->op##_ops += 1; \ +} while (/*CONSTCOND*/0) + +#define CryptodevSymStatIncEncrypt(be, bytes) \ + CryptodevSymStatInc(be, encrypt, bytes) + +#define CryptodevSymStatIncDecrypt(be, bytes) \ + CryptodevSymStatInc(be, decrypt, bytes) + +#define CryptodevAsymStatInc(be, op, bytes) do { \ + be->asym_stat->op##_bytes += (bytes); \ + be->asym_stat->op##_ops += 1; \ +} while (/*CONSTCOND*/0) + +#define CryptodevAsymStatIncEncrypt(be, bytes) \ + CryptodevAsymStatInc(be, encrypt, bytes) + +#define CryptodevAsymStatIncDecrypt(be, bytes) \ + CryptodevAsymStatInc(be, decrypt, bytes) + +#define CryptodevAsymStatIncSign(be, bytes) \ + CryptodevAsymStatInc(be, sign, bytes) + +#define CryptodevAsymStatIncVerify(be, bytes) \ + CryptodevAsymStatInc(be, verify, bytes) + + /** * cryptodev_backend_new_client: - * @model: the cryptodev backend model - * @name: the cryptodev backend name, can be NULL * - * Creates a new cryptodev backend client object - * with the @name in the model @model. + * Creates a new cryptodev backend client object. * * The returned object must be released with * cryptodev_backend_free_client() when no @@ -283,9 +322,8 @@ struct CryptoDevBackend { * * Returns: a new cryptodev backend client object */ -CryptoDevBackendClient * -cryptodev_backend_new_client(const char *model, - const char *name); +CryptoDevBackendClient *cryptodev_backend_new_client(void); + /** * cryptodev_backend_free_client: * @cc: the cryptodev backend client object @@ -354,24 +392,17 @@ int cryptodev_backend_close_session( /** * cryptodev_backend_crypto_operation: * @backend: the cryptodev backend object - * @opaque1: pointer to a VirtIOCryptoReq object - * @queue_index: queue index of cryptodev backend client - * @errp: pointer to a NULL-initialized error object - * @cb: callbacks when operation is completed - * @opaque2: parameter passed to cb + * @op_info: pointer to a CryptoDevBackendOpInfo object * - * Do crypto operation, such as encryption and - * decryption + * Do crypto operation, such as encryption, decryption, signature and + * verification * * Returns: 0 for success and cb will be called when creation is completed, * negative value for error, and cb will not be called. */ int cryptodev_backend_crypto_operation( CryptoDevBackend *backend, - void *opaque1, - uint32_t queue_index, - CryptoDevCompletionFunc cb, - void *opaque2); + CryptoDevBackendOpInfo *op_info); /** * cryptodev_backend_set_used: diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index e9a97eda8c..88f5ccfbce 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -14,9 +14,6 @@ #ifndef QEMU_KVM_H #define QEMU_KVM_H -#include "qemu/queue.h" -#include "hw/core/cpu.h" -#include "exec/memattrs.h" #include "qemu/accel.h" #include "qom/object.h" @@ -471,10 +468,8 @@ uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index); void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len); -#if !defined(CONFIG_USER_ONLY) int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr, hwaddr *phys_addr); -#endif #endif /* NEED_CPU_H */ diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h index 60b520a13e..511b42bde5 100644 --- a/include/sysemu/kvm_int.h +++ b/include/sysemu/kvm_int.h @@ -115,9 +115,14 @@ struct KVMState } *as; uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */ uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */ + bool kvm_dirty_ring_with_bitmap; struct KVMDirtyRingReaper reaper; NotifyVmexitOption notify_vmexit; uint32_t notify_window; + uint32_t xen_version; + uint32_t xen_caps; + uint16_t xen_gnttab_max_frames; + uint16_t xen_evtchn_max_pirq; }; void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, diff --git a/include/sysemu/kvm_xen.h b/include/sysemu/kvm_xen.h new file mode 100644 index 0000000000..595abfbe40 --- /dev/null +++ b/include/sysemu/kvm_xen.h @@ -0,0 +1,43 @@ +/* + * Xen HVM emulation support in KVM + * + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_SYSEMU_KVM_XEN_H +#define QEMU_SYSEMU_KVM_XEN_H + +/* The KVM API uses these to indicate "no GPA" or "no GFN" */ +#define INVALID_GPA UINT64_MAX +#define INVALID_GFN UINT64_MAX + +/* QEMU plays the rôle of dom0 for "interdomain" communication. */ +#define DOMID_QEMU 0 + +int kvm_xen_soft_reset(void); +uint32_t kvm_xen_get_caps(void); +void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id); +void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type); +void kvm_xen_set_callback_asserted(void); +int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port); +uint16_t kvm_xen_get_gnttab_max_frames(void); +uint16_t kvm_xen_get_evtchn_max_pirq(void); + +#define kvm_xen_has_cap(cap) (!!(kvm_xen_get_caps() & \ + KVM_XEN_HVM_CONFIG_ ## cap)) + +#define XEN_SPECIAL_AREA_ADDR 0xfeff8000UL +#define XEN_SPECIAL_AREA_SIZE 0x4000UL + +#define XEN_SPECIALPAGE_CONSOLE 0 +#define XEN_SPECIALPAGE_XENSTORE 1 + +#define XEN_SPECIAL_PFN(x) ((XEN_SPECIAL_AREA_ADDR >> TARGET_PAGE_BITS) + \ + XEN_SPECIALPAGE_##x) + +#endif /* QEMU_SYSEMU_KVM_XEN_H */ diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h index 58de7c994d..1030d39904 100644 --- a/include/sysemu/os-posix.h +++ b/include/sysemu/os-posix.h @@ -51,9 +51,6 @@ void os_daemonize(void); void os_setup_post(void); int os_mlock(void); -#define closesocket(s) close(s) -#define ioctlsocket(s, r, v) ioctl(s, r, v) - int os_set_daemonize(bool d); bool is_daemonized(void); diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h index 97d0243aee..65f6c9ea57 100644 --- a/include/sysemu/os-win32.h +++ b/include/sysemu/os-win32.h @@ -29,6 +29,7 @@ #include #include #include +#include "qemu/typedefs.h" #ifdef HAVE_AFUNIX_H #include @@ -164,10 +165,31 @@ static inline void qemu_funlockfile(FILE *f) #endif } -/* We wrap all the sockets functions so that we can - * set errno based on WSAGetLastError() +/* Helper for WSAEventSelect, to report errors */ +bool qemu_socket_select(int sockfd, WSAEVENT hEventObject, + long lNetworkEvents, Error **errp); + +bool qemu_socket_unselect(int sockfd, Error **errp); + +/* We wrap all the sockets functions so that we can set errno based on + * WSAGetLastError(), and use file-descriptors instead of SOCKET. */ +/* + * qemu_close_socket_osfhandle: + * @fd: a file descriptor associated with a SOCKET + * + * Close only the C run-time file descriptor, leave the SOCKET opened. + * + * Returns zero on success. On error, -1 is returned, and errno is set to + * indicate the error. + */ +int qemu_close_socket_osfhandle(int fd); + +#undef close +#define close qemu_close_wrap +int qemu_close_wrap(int fd); + #undef connect #define connect qemu_connect_wrap int qemu_connect_wrap(int sockfd, const struct sockaddr *addr, @@ -199,10 +221,6 @@ int qemu_shutdown_wrap(int sockfd, int how); #define ioctlsocket qemu_ioctlsocket_wrap int qemu_ioctlsocket_wrap(int fd, int req, void *val); -#undef closesocket -#define closesocket qemu_closesocket_wrap -int qemu_closesocket_wrap(int fd); - #undef getsockopt #define getsockopt qemu_getsockopt_wrap int qemu_getsockopt_wrap(int sockfd, int level, int optname, @@ -241,6 +259,10 @@ ssize_t qemu_recv_wrap(int sockfd, void *buf, size_t len, int flags); ssize_t qemu_recvfrom_wrap(int sockfd, void *buf, size_t len, int flags, struct sockaddr *addr, socklen_t *addrlen); +EXCEPTION_DISPOSITION +win32_close_exception_handler(struct _EXCEPTION_RECORD*, void*, + struct _CONTEXT*, void*); + #ifdef __cplusplus } #endif diff --git a/include/sysemu/qtest.h b/include/sysemu/qtest.h index 4c53537ef3..85f05b0e46 100644 --- a/include/sysemu/qtest.h +++ b/include/sysemu/qtest.h @@ -14,6 +14,7 @@ #ifndef QTEST_H #define QTEST_H +#include "chardev/char.h" extern bool qtest_allowed; @@ -22,6 +23,9 @@ static inline bool qtest_enabled(void) return qtest_allowed; } +void qtest_send_prefix(CharBackend *chr); +void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...); +void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words)); bool qtest_driver(void); void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **errp); diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h index 6e5ab09f71..08aae5869f 100644 --- a/include/sysemu/replay.h +++ b/include/sysemu/replay.h @@ -1,8 +1,8 @@ -#ifndef REPLAY_H -#define REPLAY_H +#ifndef SYSEMU_REPLAY_H +#define SYSEMU_REPLAY_H /* - * replay.h + * QEMU replay (system interface) * * Copyright (c) 2010-2015 Institute for System Programming * of the Russian Academy of Sciences. @@ -12,9 +12,9 @@ * */ +#include "exec/replay-core.h" #include "qapi/qapi-types-misc.h" #include "qapi/qapi-types-run-state.h" -#include "qapi/qapi-types-replay.h" #include "qapi/qapi-types-ui.h" #include "block/aio.h" @@ -45,8 +45,6 @@ typedef enum ReplayCheckpoint ReplayCheckpoint; typedef struct ReplayNetState ReplayNetState; -extern ReplayMode replay_mode; - /* Name of the initial VM snapshot */ extern char *replay_snapshot; @@ -63,40 +61,6 @@ extern char *replay_snapshot; void replay_mutex_lock(void); void replay_mutex_unlock(void); -/* Replay process control functions */ - -/*! Enables recording or saving event log with specified parameters */ -void replay_configure(struct QemuOpts *opts); -/*! Initializes timers used for snapshotting and enables events recording */ -void replay_start(void); -/*! Closes replay log file and frees other resources. */ -void replay_finish(void); -/*! Adds replay blocker with the specified error description */ -void replay_add_blocker(const char *feature); -/* Returns name of the replay log file */ -const char *replay_get_filename(void); -/* - * Start making one step in backward direction. - * Used by gdbstub for backwards debugging. - * Returns true on success. - */ -bool replay_reverse_step(void); -/* - * Start searching the last breakpoint/watchpoint. - * Used by gdbstub for backwards debugging. - * Returns true if the process successfully started. - */ -bool replay_reverse_continue(void); -/* - * Returns true if replay module is processing - * reverse_continue or reverse_step request - */ -bool replay_running_debug(void); -/* Called in reverse debugging mode to collect breakpoint information */ -void replay_breakpoint(void); -/* Called when gdb is attached to gdbstub */ -void replay_gdb_attached(void); - /* Processing the instructions */ /*! Returns number of executed instructions. */ @@ -106,22 +70,6 @@ int replay_get_instructions(void); /*! Updates instructions counter in replay mode. */ void replay_account_executed_instructions(void); -/* Interrupts and exceptions */ - -/*! Called by exception handler to write or read - exception processing events. */ -bool replay_exception(void); -/*! Used to determine that exception is pending. - Does not proceed to the next event in the log. */ -bool replay_has_exception(void); -/*! Called by interrupt handlers to write or read - interrupt processing events. - \return true if interrupt should be processed */ -bool replay_interrupt(void); -/*! Tries to read interrupt event from the file. - Returns true, when interrupt request is pending */ -bool replay_has_interrupt(void); - /* Processing clocks and other time sources */ /*! Save the specified clock */ @@ -143,13 +91,6 @@ int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount); ? replay_save_clock((clock), (value), icount_get_raw_locked()) \ : (value)) -/* Processing data from random generators */ - -/* Saves the values from the random number generator */ -void replay_save_random(int ret, void *buf, size_t len); -/* Loads the saved values for the random number generator */ -int replay_read_random(void *buf, size_t len); - /* Events */ /*! Called when qemu shutdown is requested. */ diff --git a/include/sysemu/tpm.h b/include/sysemu/tpm.h index fb40e30ff6..66e3b45f30 100644 --- a/include/sysemu/tpm.h +++ b/include/sysemu/tpm.h @@ -48,6 +48,7 @@ struct TPMIfClass { #define TYPE_TPM_TIS_SYSBUS "tpm-tis-device" #define TYPE_TPM_CRB "tpm-crb" #define TYPE_TPM_SPAPR "tpm-spapr" +#define TYPE_TPM_TIS_I2C "tpm-tis-i2c" #define TPM_IS_TIS_ISA(chr) \ object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS_ISA) @@ -57,6 +58,8 @@ struct TPMIfClass { object_dynamic_cast(OBJECT(chr), TYPE_TPM_CRB) #define TPM_IS_SPAPR(chr) \ object_dynamic_cast(OBJECT(chr), TYPE_TPM_SPAPR) +#define TPM_IS_TIS_I2C(chr) \ + object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS_I2C) /* returns NULL unless there is exactly one TPM device */ static inline TPMIf *tpm_find(void) diff --git a/include/tcg/debug-assert.h b/include/tcg/debug-assert.h new file mode 100644 index 0000000000..596765a3d2 --- /dev/null +++ b/include/tcg/debug-assert.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define tcg_debug_assert + * Copyright (c) 2008 Fabrice Bellard + */ + +#ifndef TCG_DEBUG_ASSERT_H +#define TCG_DEBUG_ASSERT_H + +#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS +# define tcg_debug_assert(X) do { assert(X); } while (0) +#else +# define tcg_debug_assert(X) \ + do { if (!(X)) { __builtin_unreachable(); } } while (0) +#endif + +#endif diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h index 2ba22bd5fe..6ccfe9131d 100644 --- a/include/tcg/tcg-ldst.h +++ b/include/tcg/tcg-ldst.h @@ -25,55 +25,39 @@ #ifndef TCG_LDST_H #define TCG_LDST_H -#ifdef CONFIG_SOFTMMU - /* Value zero-extended to tcg register size. */ -tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr); +uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr); +Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr); /* Value sign-extended to tcg register size. */ -tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, + MemOpIdx oi, uintptr_t retaddr); -void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr); +/* + * Value extended to at least uint32_t, so that some ABIs do not require + * zero-extension from uint8_t or uint16_t. + */ +void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr); -#else - -G_NORETURN void helper_unaligned_ld(CPUArchState *env, target_ulong addr); -G_NORETURN void helper_unaligned_st(CPUArchState *env, target_ulong addr); - -#endif /* CONFIG_SOFTMMU */ #endif /* TCG_LDST_H */ diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h index 28cafbcc5c..a8183bfeab 100644 --- a/include/tcg/tcg-op-gvec.h +++ b/include/tcg/tcg-op-gvec.h @@ -330,6 +330,8 @@ void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_andcs(unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs, @@ -369,6 +371,8 @@ void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_rotrs(unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz); /* * Perform vector shift by vector element, modulo the element size. diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h index 839d91c0c7..35c5700183 100644 --- a/include/tcg/tcg-op.h +++ b/include/tcg/tcg-op.h @@ -259,12 +259,7 @@ static inline void gen_set_label(TCGLabel *l) tcg_gen_op1(INDEX_op_set_label, label_arg(l)); } -static inline void tcg_gen_br(TCGLabel *l) -{ - l->refs++; - tcg_gen_op1(INDEX_op_br, label_arg(l)); -} - +void tcg_gen_br(TCGLabel *l); void tcg_gen_mb(TCGBar); /* Helper calls. */ @@ -728,48 +723,27 @@ static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi) #endif #if TARGET_INSN_START_WORDS == 1 -# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS static inline void tcg_gen_insn_start(target_ulong pc) { - tcg_gen_op1(INDEX_op_insn_start, pc); + TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 64 / TCG_TARGET_REG_BITS); + tcg_set_insn_start_param(op, 0, pc); } -# else -static inline void tcg_gen_insn_start(target_ulong pc) -{ - tcg_gen_op2(INDEX_op_insn_start, (uint32_t)pc, (uint32_t)(pc >> 32)); -} -# endif #elif TARGET_INSN_START_WORDS == 2 -# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1) { - tcg_gen_op2(INDEX_op_insn_start, pc, a1); + TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 2 * 64 / TCG_TARGET_REG_BITS); + tcg_set_insn_start_param(op, 0, pc); + tcg_set_insn_start_param(op, 1, a1); } -# else -static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1) -{ - tcg_gen_op4(INDEX_op_insn_start, - (uint32_t)pc, (uint32_t)(pc >> 32), - (uint32_t)a1, (uint32_t)(a1 >> 32)); -} -# endif #elif TARGET_INSN_START_WORDS == 3 -# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1, target_ulong a2) { - tcg_gen_op3(INDEX_op_insn_start, pc, a1, a2); + TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 3 * 64 / TCG_TARGET_REG_BITS); + tcg_set_insn_start_param(op, 0, pc); + tcg_set_insn_start_param(op, 1, a1); + tcg_set_insn_start_param(op, 2, a2); } -# else -static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1, - target_ulong a2) -{ - tcg_gen_op6(INDEX_op_insn_start, - (uint32_t)pc, (uint32_t)(pc >> 32), - (uint32_t)a1, (uint32_t)(a1 >> 32), - (uint32_t)a2, (uint32_t)(a2 >> 32)); -} -# endif #else # error "Unhandled number of operands to insn_start" #endif @@ -828,131 +802,204 @@ static inline void tcg_gen_plugin_cb_end(void) #if TARGET_LONG_BITS == 32 #define tcg_temp_new() tcg_temp_new_i32() #define tcg_global_mem_new tcg_global_mem_new_i32 -#define tcg_temp_local_new() tcg_temp_local_new_i32() #define tcg_temp_free tcg_temp_free_i32 +#define tcgv_tl_temp tcgv_i32_temp #define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32 #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32 #else #define tcg_temp_new() tcg_temp_new_i64() #define tcg_global_mem_new tcg_global_mem_new_i64 -#define tcg_temp_local_new() tcg_temp_local_new_i64() #define tcg_temp_free tcg_temp_free_i64 +#define tcgv_tl_temp tcgv_i64_temp #define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64 #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 #endif -void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, MemOp); -void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, MemOp); -void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, MemOp); -void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp); -void tcg_gen_qemu_ld_i128(TCGv_i128, TCGv, TCGArg, MemOp); -void tcg_gen_qemu_st_i128(TCGv_i128, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_ld_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType); +void tcg_gen_qemu_st_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType); +void tcg_gen_qemu_ld_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType); +void tcg_gen_qemu_st_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType); +void tcg_gen_qemu_ld_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType); +void tcg_gen_qemu_st_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType); -static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index) +static inline void +tcg_gen_qemu_ld_i32(TCGv_i32 v, TCGv a, TCGArg i, MemOp m) { - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB); + tcg_gen_qemu_ld_i32_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); } -static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index) +static inline void +tcg_gen_qemu_st_i32(TCGv_i32 v, TCGv a, TCGArg i, MemOp m) { - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB); + tcg_gen_qemu_st_i32_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); } -static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index) +static inline void +tcg_gen_qemu_ld_i64(TCGv_i64 v, TCGv a, TCGArg i, MemOp m) { - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW); + tcg_gen_qemu_ld_i64_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); } -static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index) +static inline void +tcg_gen_qemu_st_i64(TCGv_i64 v, TCGv a, TCGArg i, MemOp m) { - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW); + tcg_gen_qemu_st_i64_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); } -static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index) +static inline void +tcg_gen_qemu_ld_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m) { - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL); + tcg_gen_qemu_ld_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); } -static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index) +static inline void +tcg_gen_qemu_st_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m) { - tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL); + tcg_gen_qemu_st_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); } -static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index) -{ - tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEUQ); -} +void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128, + TCGv_i128, TCGArg, MemOp, TCGType); -static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index) -{ - tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB); -} +void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128, + TCGv_i128, TCGArg, MemOp, TCGType); -static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index) -{ - tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW); -} +void tcg_gen_atomic_xchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_xchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); -static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index) -{ - tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL); -} +void tcg_gen_atomic_fetch_add_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_add_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_and_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_and_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_or_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_or_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_xor_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_xor_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_smin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_smin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_umin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_umin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_smax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_smax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_umax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_umax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); -static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) -{ - tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEUQ); -} +void tcg_gen_atomic_add_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_add_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_and_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_and_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_or_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_or_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_xor_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_xor_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_smin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_smin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_umin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_umin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_smax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_smax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_umax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_umax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); -void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, - TCGArg, MemOp); -void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64, - TCGArg, MemOp); -void tcg_gen_atomic_cmpxchg_i128(TCGv_i128, TCGv, TCGv_i128, TCGv_i128, - TCGArg, MemOp); +#define DEF_ATOMIC2(N, S) \ + static inline void N##_##S(TCGv_##S r, TCGv a, TCGv_##S v, \ + TCGArg i, MemOp m) \ + { N##_##S##_chk(r, tcgv_tl_temp(a), v, i, m, TCG_TYPE_TL); } -void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, - TCGArg, MemOp); -void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64, - TCGArg, MemOp); -void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128, TCGv, TCGv_i128, TCGv_i128, - TCGArg, MemOp); +#define DEF_ATOMIC3(N, S) \ + static inline void N##_##S(TCGv_##S r, TCGv a, TCGv_##S o, \ + TCGv_##S n, TCGArg i, MemOp m) \ + { N##_##S##_chk(r, tcgv_tl_temp(a), o, n, i, m, TCG_TYPE_TL); } -void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i32) +DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i64) +DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i128) -void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i32) +DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i64) +DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i128) -void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +DEF_ATOMIC2(tcg_gen_atomic_xchg, i32) +DEF_ATOMIC2(tcg_gen_atomic_xchg, i64) + +DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_smin, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_smin, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_umin, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_umin, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_smax, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_smax, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_umax, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_umax, i64) + +DEF_ATOMIC2(tcg_gen_atomic_add_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_add_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_and_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_and_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_or_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_or_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_xor_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_xor_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_smin_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_smin_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_umin_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_umin_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_smax_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_smax_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64) + +#undef DEF_ATOMIC2 +#undef DEF_ATOMIC3 void tcg_gen_mov_vec(TCGv_vec, TCGv_vec); void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32); @@ -1096,9 +1143,7 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t); #define tcg_gen_extract_tl tcg_gen_extract_i64 #define tcg_gen_sextract_tl tcg_gen_sextract_i64 #define tcg_gen_extract2_tl tcg_gen_extract2_i64 -#define tcg_const_tl tcg_const_i64 #define tcg_constant_tl tcg_constant_i64 -#define tcg_const_local_tl tcg_const_local_i64 #define tcg_gen_movcond_tl tcg_gen_movcond_i64 #define tcg_gen_add2_tl tcg_gen_add2_i64 #define tcg_gen_sub2_tl tcg_gen_sub2_i64 @@ -1212,9 +1257,7 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t); #define tcg_gen_extract_tl tcg_gen_extract_i32 #define tcg_gen_sextract_tl tcg_gen_sextract_i32 #define tcg_gen_extract2_tl tcg_gen_extract2_i32 -#define tcg_const_tl tcg_const_i32 #define tcg_constant_tl tcg_constant_i32 -#define tcg_const_local_tl tcg_const_local_i32 #define tcg_gen_movcond_tl tcg_gen_movcond_i32 #define tcg_gen_add2_tl tcg_gen_add2_i32 #define tcg_gen_sub2_tl tcg_gen_sub2_i32 @@ -1285,6 +1328,11 @@ static inline void tcg_gen_mov_ptr(TCGv_ptr d, TCGv_ptr s) glue(tcg_gen_mov_,PTR)((NAT)d, (NAT)s); } +static inline void tcg_gen_movi_ptr(TCGv_ptr d, intptr_t s) +{ + glue(tcg_gen_movi_,PTR)((NAT)d, s); +} + static inline void tcg_gen_brcondi_ptr(TCGCond cond, TCGv_ptr a, intptr_t b, TCGLabel *label) { diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h index dd444734d9..21594c1590 100644 --- a/include/tcg/tcg-opc.h +++ b/include/tcg/tcg-opc.h @@ -186,11 +186,10 @@ DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64)) DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64)) DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64)) -#define TLADDR_ARGS (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2) #define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2) /* QEMU specific */ -DEF(insn_start, 0, 0, TLADDR_ARGS * TARGET_INSN_START_WORDS, +DEF(insn_start, 0, 0, DATA64_ARGS * TARGET_INSN_START_WORDS, TCG_OPF_NOT_PRESENT) DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) @@ -199,19 +198,46 @@ DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT) DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT) -DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1, +/* Replicate ld/st ops for 32 and 64-bit guest addresses. */ +DEF(qemu_ld_a32_i32, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) -DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1, +DEF(qemu_st_a32_i32, 0, 1 + 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) -DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1, +DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) -DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1, +DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) + +DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) +DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) +DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) +DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) /* Only used by i386 to cope with stupid register constraints. */ -DEF(qemu_st8_i32, 0, TLADDR_ARGS + 1, 1, +DEF(qemu_st8_a32_i32, 0, 1 + 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | IMPL(TCG_TARGET_HAS_qemu_st8_i32)) +DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | + IMPL(TCG_TARGET_HAS_qemu_st8_i32)) + +/* Only for 64-bit hosts at the moment. */ +DEF(qemu_ld_a32_i128, 2, 1, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | + IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) +DEF(qemu_ld_a64_i128, 2, 1, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | + IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) +DEF(qemu_st_a32_i128, 0, 3, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | + IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) +DEF(qemu_st_a64_i128, 0, 3, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | + IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) /* Host vector support. */ @@ -283,7 +309,6 @@ DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT) DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT) #endif -#undef TLADDR_ARGS #undef DATA64_ARGS #undef IMPL #undef IMPL64 diff --git a/include/tcg/tcg-temp-internal.h b/include/tcg/tcg-temp-internal.h new file mode 100644 index 0000000000..dded2917e5 --- /dev/null +++ b/include/tcg/tcg-temp-internal.h @@ -0,0 +1,83 @@ +/* + * TCG internals related to TCG temp allocation + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef TCG_TEMP_INTERNAL_H +#define TCG_TEMP_INTERNAL_H + +/* + * Allocation and freeing of EBB temps is reserved to TCG internals + */ + +void tcg_temp_free_internal(TCGTemp *); + +static inline void tcg_temp_free_i32(TCGv_i32 arg) +{ + tcg_temp_free_internal(tcgv_i32_temp(arg)); +} + +static inline void tcg_temp_free_i64(TCGv_i64 arg) +{ + tcg_temp_free_internal(tcgv_i64_temp(arg)); +} + +static inline void tcg_temp_free_i128(TCGv_i128 arg) +{ + tcg_temp_free_internal(tcgv_i128_temp(arg)); +} + +static inline void tcg_temp_free_ptr(TCGv_ptr arg) +{ + tcg_temp_free_internal(tcgv_ptr_temp(arg)); +} + +static inline void tcg_temp_free_vec(TCGv_vec arg) +{ + tcg_temp_free_internal(tcgv_vec_temp(arg)); +} + +static inline TCGv_i32 tcg_temp_ebb_new_i32(void) +{ + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB); + return temp_tcgv_i32(t); +} + +static inline TCGv_i64 tcg_temp_ebb_new_i64(void) +{ + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB); + return temp_tcgv_i64(t); +} + +static inline TCGv_i128 tcg_temp_ebb_new_i128(void) +{ + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB); + return temp_tcgv_i128(t); +} + +static inline TCGv_ptr tcg_temp_ebb_new_ptr(void) +{ + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB); + return temp_tcgv_ptr(t); +} + +#endif /* TCG_TEMP_FREE_H */ diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 59854f95b1..072c35f7f5 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -34,6 +34,7 @@ #include "tcg/tcg-mo.h" #include "tcg-target.h" #include "tcg/tcg-cond.h" +#include "tcg/debug-assert.h" /* XXX: make safe guess about sizes */ #define MAX_OP_PER_INSTR 266 @@ -222,14 +223,6 @@ typedef uint64_t tcg_insn_unit; /* The port better have done this. */ #endif - -#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS -# define tcg_debug_assert(X) do { assert(X); } while (0) -#else -# define tcg_debug_assert(X) \ - do { if (!(X)) { __builtin_unreachable(); } } while (0) -#endif - typedef struct TCGRelocation TCGRelocation; struct TCGRelocation { QSIMPLEQ_ENTRY(TCGRelocation) next; @@ -238,16 +231,23 @@ struct TCGRelocation { int type; }; +typedef struct TCGOp TCGOp; +typedef struct TCGLabelUse TCGLabelUse; +struct TCGLabelUse { + QSIMPLEQ_ENTRY(TCGLabelUse) next; + TCGOp *op; +}; + typedef struct TCGLabel TCGLabel; struct TCGLabel { - unsigned present : 1; - unsigned has_value : 1; - unsigned id : 14; - unsigned refs : 16; + bool present; + bool has_value; + uint16_t id; union { uintptr_t value; const tcg_insn_unit *value_ptr; } u; + QSIMPLEQ_HEAD(, TCGLabelUse) branches; QSIMPLEQ_HEAD(, TCGRelocation) relocs; QSIMPLEQ_ENTRY(TCGLabel) next; }; @@ -431,13 +431,15 @@ typedef enum TCGTempVal { } TCGTempVal; typedef enum TCGTempKind { - /* Temp is dead at the end of all basic blocks. */ - TEMP_NORMAL, - /* Temp is live across conditional branch, but dead otherwise. */ + /* + * Temp is dead at the end of the extended basic block (EBB), + * the single-entry multiple-exit region that falls through + * conditional branches. + */ TEMP_EBB, - /* Temp is saved across basic blocks but dead at the end of TBs. */ - TEMP_LOCAL, - /* Temp is saved across both basic blocks and translation blocks. */ + /* Temp is live across the entire translation block, but dead at end. */ + TEMP_TB, + /* Temp is live across the entire translation block, and between them. */ TEMP_GLOBAL, /* Temp is in a fixed register. */ TEMP_FIXED, @@ -485,7 +487,7 @@ typedef struct TCGTempSet { #define SYNC_ARG (1 << 0) typedef uint32_t TCGLifeData; -typedef struct TCGOp { +struct TCGOp { TCGOpcode opc : 8; unsigned nargs : 8; @@ -504,7 +506,7 @@ typedef struct TCGOp { /* Arguments for the opcode. */ TCGArg args[]; -} TCGOp; +}; #define TCGOP_CALLI(X) (X)->param1 #define TCGOP_CALLO(X) (X)->param2 @@ -549,6 +551,13 @@ struct TCGContext { int nb_temps; int nb_indirects; int nb_ops; + TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */ + +#ifdef CONFIG_SOFTMMU + int page_mask; + uint8_t page_bits; + uint8_t tlb_dyn_max_bits; +#endif TCGRegSet reserved_regs; intptr_t current_frame_offset; @@ -565,7 +574,6 @@ struct TCGContext { #endif #ifdef CONFIG_DEBUG_TCG - int temps_in_use; int goto_tb_issue_mask; const TCGOpcode *vecop_list; #endif @@ -610,7 +618,7 @@ struct TCGContext { #endif GHashTable *const_table[TCG_TYPE_COUNT]; - TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; + TCGTempSet free_temps[TCG_TYPE_COUNT]; TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ QTAILQ_HEAD(, TCGOp) ops, free_ops; @@ -621,7 +629,7 @@ struct TCGContext { TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; uint16_t gen_insn_end_off[TCG_MAX_INSNS]; - target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; + uint64_t gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; /* Exit to translator on overflow. */ sigjmp_buf jmp_trans; @@ -763,24 +771,24 @@ static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v) op->args[arg] = v; } -static inline target_ulong tcg_get_insn_start_param(TCGOp *op, int arg) +static inline uint64_t tcg_get_insn_start_param(TCGOp *op, int arg) { -#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - return tcg_get_insn_param(op, arg); -#else - return tcg_get_insn_param(op, arg * 2) | - ((uint64_t)tcg_get_insn_param(op, arg * 2 + 1) << 32); -#endif + if (TCG_TARGET_REG_BITS == 64) { + return tcg_get_insn_param(op, arg); + } else { + return deposit64(tcg_get_insn_param(op, arg * 2), 32, 32, + tcg_get_insn_param(op, arg * 2 + 1)); + } } -static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v) +static inline void tcg_set_insn_start_param(TCGOp *op, int arg, uint64_t v) { -#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - tcg_set_insn_param(op, arg, v); -#else - tcg_set_insn_param(op, arg * 2, v); - tcg_set_insn_param(op, arg * 2 + 1, v >> 32); -#endif + if (TCG_TARGET_REG_BITS == 64) { + tcg_set_insn_param(op, arg, v); + } else { + tcg_set_insn_param(op, arg * 2, v); + tcg_set_insn_param(op, arg * 2 + 1, v >> 32); + } } /* The last op that was emitted. */ @@ -844,7 +852,7 @@ void tcg_register_thread(void); void tcg_prologue_init(TCGContext *s); void tcg_func_start(TCGContext *s); -int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start); +int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start); void tb_target_set_jmp_target(const TranslationBlock *, int, uintptr_t, uintptr_t); @@ -853,36 +861,10 @@ void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr, intptr_t, const char *); -TCGTemp *tcg_temp_new_internal(TCGType, bool); -void tcg_temp_free_internal(TCGTemp *); +TCGTemp *tcg_temp_new_internal(TCGType, TCGTempKind); TCGv_vec tcg_temp_new_vec(TCGType type); TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match); -static inline void tcg_temp_free_i32(TCGv_i32 arg) -{ - tcg_temp_free_internal(tcgv_i32_temp(arg)); -} - -static inline void tcg_temp_free_i64(TCGv_i64 arg) -{ - tcg_temp_free_internal(tcgv_i64_temp(arg)); -} - -static inline void tcg_temp_free_i128(TCGv_i128 arg) -{ - tcg_temp_free_internal(tcgv_i128_temp(arg)); -} - -static inline void tcg_temp_free_ptr(TCGv_ptr arg) -{ - tcg_temp_free_internal(tcgv_ptr_temp(arg)); -} - -static inline void tcg_temp_free_vec(TCGv_vec arg) -{ - tcg_temp_free_internal(tcgv_vec_temp(arg)); -} - static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset, const char *name) { @@ -892,13 +874,7 @@ static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset, static inline TCGv_i32 tcg_temp_new_i32(void) { - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false); - return temp_tcgv_i32(t); -} - -static inline TCGv_i32 tcg_temp_local_new_i32(void) -{ - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true); + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_TB); return temp_tcgv_i32(t); } @@ -911,25 +887,13 @@ static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset, static inline TCGv_i64 tcg_temp_new_i64(void) { - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false); - return temp_tcgv_i64(t); -} - -static inline TCGv_i64 tcg_temp_local_new_i64(void) -{ - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true); + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_TB); return temp_tcgv_i64(t); } static inline TCGv_i128 tcg_temp_new_i128(void) { - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, false); - return temp_tcgv_i128(t); -} - -static inline TCGv_i128 tcg_temp_local_new_i128(void) -{ - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, true); + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_TB); return temp_tcgv_i128(t); } @@ -942,29 +906,10 @@ static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset, static inline TCGv_ptr tcg_temp_new_ptr(void) { - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false); + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_TB); return temp_tcgv_ptr(t); } -static inline TCGv_ptr tcg_temp_local_new_ptr(void) -{ - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true); - return temp_tcgv_ptr(t); -} - -#if defined(CONFIG_DEBUG_TCG) -/* If you call tcg_clear_temp_count() at the start of a section of - * code which is not supposed to leak any TCG temporaries, then - * calling tcg_check_temp_count() at the end of the section will - * return 1 if the section did in fact leak a temporary. - */ -void tcg_clear_temp_count(void); -int tcg_check_temp_count(void); -#else -#define tcg_clear_temp_count() do { } while (0) -#define tcg_check_temp_count() 0 -#endif - int64_t tcg_cpu_exec_time(void); void tcg_dump_info(GString *buf); void tcg_dump_op_count(GString *buf); @@ -1022,12 +967,6 @@ typedef struct TCGTargetOpDef { const char *args_ct_str[TCG_MAX_OP_ARGS]; } TCGTargetOpDef; -#define tcg_abort() \ -do {\ - fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\ - abort();\ -} while (0) - bool tcg_op_supported(TCGOpcode op); void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args); @@ -1051,16 +990,6 @@ void tcg_remove_ops_after(TCGOp *op); void tcg_optimize(TCGContext *s); -/* Allocate a new temporary and initialize it with a constant. */ -TCGv_i32 tcg_const_i32(int32_t val); -TCGv_i64 tcg_const_i64(int64_t val); -TCGv_i32 tcg_const_local_i32(int32_t val); -TCGv_i64 tcg_const_local_i64(int64_t val); -TCGv_vec tcg_const_zeros_vec(TCGType); -TCGv_vec tcg_const_ones_vec(TCGType); -TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec); -TCGv_vec tcg_const_ones_vec_matching(TCGv_vec); - /* * Locate or create a read-only temporary that is a constant. * This kind of temporary need not be freed, but for convenience @@ -1082,12 +1011,8 @@ TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val); TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val); #if UINTPTR_MAX == UINT32_MAX -# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x))) -# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x))) # define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i32((intptr_t)(x))) #else -# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x))) -# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x))) # define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i64((intptr_t)(x))) #endif diff --git a/include/ui/console.h b/include/ui/console.h index 1cb53acc33..ae5ec466c1 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -144,15 +144,15 @@ typedef struct QemuUIInfo { /* cursor data format is 32bit RGBA */ typedef struct QEMUCursor { - int width, height; + uint16_t width, height; int hot_x, hot_y; int refcount; uint32_t data[]; } QEMUCursor; -QEMUCursor *cursor_alloc(int width, int height); -void cursor_get(QEMUCursor *c); -void cursor_put(QEMUCursor *c); +QEMUCursor *cursor_alloc(uint16_t width, uint16_t height); +QEMUCursor *cursor_ref(QEMUCursor *c); +void cursor_unref(QEMUCursor *c); QEMUCursor *cursor_builtin_hidden(void); QEMUCursor *cursor_builtin_left_ptr(void); void cursor_print_ascii_art(QEMUCursor *c, const char *prefix); @@ -459,6 +459,7 @@ QemuConsole *qemu_console_lookup_by_device(DeviceState *dev, uint32_t head); QemuConsole *qemu_console_lookup_by_device_name(const char *device_id, uint32_t head, Error **errp); QemuConsole *qemu_console_lookup_unused(void); +QEMUCursor *qemu_console_get_cursor(QemuConsole *con); bool qemu_console_is_visible(QemuConsole *con); bool qemu_console_is_graphic(QemuConsole *con); bool qemu_console_is_fixedsize(QemuConsole *con); diff --git a/include/ui/egl-helpers.h b/include/ui/egl-helpers.h index 2fb6e0dd6b..53d953ddf4 100644 --- a/include/ui/egl-helpers.h +++ b/include/ui/egl-helpers.h @@ -22,6 +22,8 @@ typedef struct egl_fb { QemuDmaBuf *dmabuf; } egl_fb; +#define EGL_FB_INIT { 0, } + void egl_fb_destroy(egl_fb *fb); void egl_fb_setup_default(egl_fb *fb, int width, int height); void egl_fb_setup_for_tex(egl_fb *fb, int width, int height, @@ -63,4 +65,6 @@ int qemu_egl_init_dpy_mesa(EGLNativeDisplayType dpy, DisplayGLMode mode); EGLContext qemu_egl_init_ctx(void); bool qemu_egl_has_dmabuf(void); +bool egl_init(const char *rendernode, DisplayGLMode mode, Error **errp); + #endif /* EGL_HELPERS_H */ diff --git a/include/ui/input.h b/include/ui/input.h index c86219a1c1..c29a730a71 100644 --- a/include/ui/input.h +++ b/include/ui/input.h @@ -8,9 +8,12 @@ #define INPUT_EVENT_MASK_BTN (1<readfd, false, - io_read, NULL, NULL, NULL, opaque); - aio_set_fd_handler(ctx, cioc->writefd, false, - NULL, io_write, NULL, NULL, opaque); + aio_set_fd_handler(ctx, cioc->readfd, io_read, NULL, NULL, NULL, opaque); + aio_set_fd_handler(ctx, cioc->writefd, NULL, io_write, NULL, NULL, opaque); } diff --git a/io/channel-file.c b/io/channel-file.c index d76663e6ae..8b5821f452 100644 --- a/io/channel-file.c +++ b/io/channel-file.c @@ -198,8 +198,7 @@ static void qio_channel_file_set_aio_fd_handler(QIOChannel *ioc, void *opaque) { QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); - aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, - NULL, NULL, opaque); + aio_set_fd_handler(ctx, fioc->fd, io_read, io_write, NULL, NULL, opaque); } static GSource *qio_channel_file_create_watch(QIOChannel *ioc, diff --git a/io/channel-socket.c b/io/channel-socket.c index 7aca84f61a..d99945ebec 100644 --- a/io/channel-socket.c +++ b/io/channel-socket.c @@ -442,9 +442,9 @@ static void qio_channel_socket_finalize(Object *obj) } } #ifdef WIN32 - WSAEventSelect(ioc->fd, NULL, 0); + qemu_socket_unselect(ioc->fd, NULL); #endif - closesocket(ioc->fd); + close(ioc->fd); ioc->fd = -1; } } @@ -846,13 +846,13 @@ qio_channel_socket_close(QIOChannel *ioc, if (sioc->fd != -1) { #ifdef WIN32 - WSAEventSelect(sioc->fd, NULL, 0); + qemu_socket_unselect(sioc->fd, NULL); #endif if (qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_LISTEN)) { socket_listen_cleanup(sioc->fd, errp); } - if (closesocket(sioc->fd) < 0) { + if (close(sioc->fd) < 0) { sioc->fd = -1; error_setg_errno(&err, errno, "Unable to close socket"); error_propagate(errp, err); @@ -899,8 +899,7 @@ static void qio_channel_socket_set_aio_fd_handler(QIOChannel *ioc, void *opaque) { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); - aio_set_fd_handler(ctx, sioc->fd, false, - io_read, io_write, NULL, NULL, opaque); + aio_set_fd_handler(ctx, sioc->fd, io_read, io_write, NULL, NULL, opaque); } static GSource *qio_channel_socket_create_watch(QIOChannel *ioc, diff --git a/io/channel-tls.c b/io/channel-tls.c index 8052945ba0..9805dd0a3f 100644 --- a/io/channel-tls.c +++ b/io/channel-tls.c @@ -74,6 +74,9 @@ qio_channel_tls_new_server(QIOChannel *master, ioc = QIO_CHANNEL_TLS(object_new(TYPE_QIO_CHANNEL_TLS)); ioc->master = master; + if (qio_channel_has_feature(master, QIO_CHANNEL_FEATURE_SHUTDOWN)) { + qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_SHUTDOWN); + } object_ref(OBJECT(master)); ioc->session = qcrypto_tls_session_new( @@ -446,6 +449,7 @@ qio_channel_tls_read_watch(QIOChannelTLS *tioc, GSource *source) object_ref(OBJECT(tioc)); g_source_add_child_source(source, child); + g_source_unref(child); } static GSource *qio_channel_tls_create_watch(QIOChannel *ioc, diff --git a/io/channel-watch.c b/io/channel-watch.c index ad7c568a84..64b486e378 100644 --- a/io/channel-watch.c +++ b/io/channel-watch.c @@ -275,15 +275,15 @@ GSource *qio_channel_create_fd_watch(QIOChannel *ioc, #ifdef CONFIG_WIN32 GSource *qio_channel_create_socket_watch(QIOChannel *ioc, - int socket, + int sockfd, GIOCondition condition) { GSource *source; QIOChannelSocketSource *ssource; - WSAEventSelect(socket, ioc->event, - FD_READ | FD_ACCEPT | FD_CLOSE | - FD_CONNECT | FD_WRITE | FD_OOB); + qemu_socket_select(sockfd, ioc->event, + FD_READ | FD_ACCEPT | FD_CLOSE | + FD_CONNECT | FD_WRITE | FD_OOB, NULL); source = g_source_new(&qio_channel_socket_source_funcs, sizeof(QIOChannelSocketSource)); @@ -293,7 +293,7 @@ GSource *qio_channel_create_socket_watch(QIOChannel *ioc, object_ref(OBJECT(ioc)); ssource->condition = condition; - ssource->socket = socket; + ssource->socket = _get_osfhandle(sockfd); ssource->revents = 0; ssource->fd.fd = (gintptr)ioc->event; diff --git a/io/channel.c b/io/channel.c index a8c7f11649..72f0066af5 100644 --- a/io/channel.c +++ b/io/channel.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "block/aio-wait.h" #include "io/channel.h" #include "qapi/error.h" #include "qemu/main-loop.h" @@ -109,27 +110,27 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc, } -int qio_channel_readv_all_eof(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - Error **errp) +int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + Error **errp) { return qio_channel_readv_full_all_eof(ioc, iov, niov, NULL, NULL, errp); } -int qio_channel_readv_all(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - Error **errp) +int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + Error **errp) { return qio_channel_readv_full_all(ioc, iov, niov, NULL, NULL, errp); } -int qio_channel_readv_full_all_eof(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - int **fds, size_t *nfds, - Error **errp) +int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + int **fds, size_t *nfds, + Error **errp) { int ret = -1; struct iovec *local_iov = g_new(struct iovec, niov); @@ -215,11 +216,11 @@ next_iter: return ret; } -int qio_channel_readv_full_all(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - int **fds, size_t *nfds, - Error **errp) +int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + int **fds, size_t *nfds, + Error **errp) { int ret = qio_channel_readv_full_all_eof(ioc, iov, niov, fds, nfds, errp); @@ -234,19 +235,19 @@ int qio_channel_readv_full_all(QIOChannel *ioc, return ret; } -int qio_channel_writev_all(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - Error **errp) +int coroutine_mixed_fn qio_channel_writev_all(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + Error **errp) { return qio_channel_writev_full_all(ioc, iov, niov, NULL, 0, 0, errp); } -int qio_channel_writev_full_all(QIOChannel *ioc, - const struct iovec *iov, - size_t niov, - int *fds, size_t nfds, - int flags, Error **errp) +int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + int *fds, size_t nfds, + int flags, Error **errp) { int ret = -1; struct iovec *local_iov = g_new(struct iovec, niov); @@ -325,30 +326,30 @@ ssize_t qio_channel_write(QIOChannel *ioc, } -int qio_channel_read_all_eof(QIOChannel *ioc, - char *buf, - size_t buflen, - Error **errp) +int coroutine_mixed_fn qio_channel_read_all_eof(QIOChannel *ioc, + char *buf, + size_t buflen, + Error **errp) { struct iovec iov = { .iov_base = buf, .iov_len = buflen }; return qio_channel_readv_all_eof(ioc, &iov, 1, errp); } -int qio_channel_read_all(QIOChannel *ioc, - char *buf, - size_t buflen, - Error **errp) +int coroutine_mixed_fn qio_channel_read_all(QIOChannel *ioc, + char *buf, + size_t buflen, + Error **errp) { struct iovec iov = { .iov_base = buf, .iov_len = buflen }; return qio_channel_readv_all(ioc, &iov, 1, errp); } -int qio_channel_write_all(QIOChannel *ioc, - const char *buf, - size_t buflen, - Error **errp) +int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc, + const char *buf, + size_t buflen, + Error **errp) { struct iovec iov = { .iov_base = (char *)buf, .iov_len = buflen }; return qio_channel_writev_all(ioc, &iov, 1, errp); @@ -514,7 +515,11 @@ int qio_channel_flush(QIOChannel *ioc, static void qio_channel_restart_read(void *opaque) { QIOChannel *ioc = opaque; - Coroutine *co = ioc->read_coroutine; + Coroutine *co = qatomic_xchg(&ioc->read_coroutine, NULL); + + if (!co) { + return; + } /* Assert that aio_co_wake() reenters the coroutine directly */ assert(qemu_get_current_aio_context() == @@ -525,7 +530,11 @@ static void qio_channel_restart_read(void *opaque) static void qio_channel_restart_write(void *opaque) { QIOChannel *ioc = opaque; - Coroutine *co = ioc->write_coroutine; + Coroutine *co = qatomic_xchg(&ioc->write_coroutine, NULL); + + if (!co) { + return; + } /* Assert that aio_co_wake() reenters the coroutine directly */ assert(qemu_get_current_aio_context() == @@ -568,7 +577,11 @@ void qio_channel_detach_aio_context(QIOChannel *ioc) void coroutine_fn qio_channel_yield(QIOChannel *ioc, GIOCondition condition) { + AioContext *ioc_ctx = ioc->ctx ?: qemu_get_aio_context(); + assert(qemu_in_coroutine()); + assert(in_aio_context_home_thread(ioc_ctx)); + if (condition == G_IO_IN) { assert(!ioc->read_coroutine); ioc->read_coroutine = qemu_coroutine_self(); @@ -580,18 +593,26 @@ void coroutine_fn qio_channel_yield(QIOChannel *ioc, } qio_channel_set_aio_fd_handlers(ioc); qemu_coroutine_yield(); + assert(in_aio_context_home_thread(ioc_ctx)); /* Allow interrupting the operation by reentering the coroutine other than * through the aio_fd_handlers. */ - if (condition == G_IO_IN && ioc->read_coroutine) { - ioc->read_coroutine = NULL; + if (condition == G_IO_IN) { + assert(ioc->read_coroutine == NULL); qio_channel_set_aio_fd_handlers(ioc); - } else if (condition == G_IO_OUT && ioc->write_coroutine) { - ioc->write_coroutine = NULL; + } else if (condition == G_IO_OUT) { + assert(ioc->write_coroutine == NULL); qio_channel_set_aio_fd_handlers(ioc); } } +void qio_channel_wake_read(QIOChannel *ioc) +{ + Coroutine *co = qatomic_xchg(&ioc->read_coroutine, NULL); + if (co) { + aio_co_wake(co); + } +} static gboolean qio_channel_wait_complete(QIOChannel *ioc, GIOCondition condition, diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h index a7cfefb3a8..d7e7bb885e 100644 --- a/linux-headers/asm-arm64/kvm.h +++ b/linux-headers/asm-arm64/kvm.h @@ -109,6 +109,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ +#define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */ struct kvm_vcpu_init { __u32 target; diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h index 2747d2ce14..2937e7bf69 100644 --- a/linux-headers/asm-x86/kvm.h +++ b/linux-headers/asm-x86/kvm.h @@ -9,6 +9,7 @@ #include #include +#include #define KVM_PIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 @@ -505,8 +506,8 @@ struct kvm_nested_state { * KVM_{GET,PUT}_NESTED_STATE ioctl values. */ union { - struct kvm_vmx_nested_state_data vmx[0]; - struct kvm_svm_nested_state_data svm[0]; + __DECLARE_FLEX_ARRAY(struct kvm_vmx_nested_state_data, vmx); + __DECLARE_FLEX_ARRAY(struct kvm_svm_nested_state_data, svm); } data; }; @@ -523,6 +524,35 @@ struct kvm_pmu_event_filter { #define KVM_PMU_EVENT_ALLOW 0 #define KVM_PMU_EVENT_DENY 1 +#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS BIT(0) +#define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS) + +/* + * Masked event layout. + * Bits Description + * ---- ----------- + * 7:0 event select (low bits) + * 15:8 umask match + * 31:16 unused + * 35:32 event select (high bits) + * 36:54 unused + * 55 exclude bit + * 63:56 umask mask + */ + +#define KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, exclude) \ + (((event_select) & 0xFFULL) | (((event_select) & 0XF00ULL) << 24) | \ + (((mask) & 0xFFULL) << 56) | \ + (((match) & 0xFFULL) << 8) | \ + ((__u64)(!!(exclude)) << 55)) + +#define KVM_PMU_MASKED_ENTRY_EVENT_SELECT \ + (GENMASK_ULL(7, 0) | GENMASK_ULL(35, 32)) +#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (GENMASK_ULL(63, 56)) +#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (GENMASK_ULL(15, 8)) +#define KVM_PMU_MASKED_ENTRY_EXCLUDE (BIT_ULL(55)) +#define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT (56) + /* for KVM_{GET,SET,HAS}_DEVICE_ATTR */ #define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */ #define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */ diff --git a/linux-headers/linux/const.h b/linux-headers/linux/const.h new file mode 100644 index 0000000000..5e48987251 --- /dev/null +++ b/linux-headers/linux/const.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* const.h: Macros for dealing with constants. */ + +#ifndef _LINUX_CONST_H +#define _LINUX_CONST_H + +/* Some constant macros are used in both assembler and + * C code. Therefore we cannot annotate them always with + * 'UL' and other type specifiers unilaterally. We + * use the following macros to deal with this. + * + * Similarly, _AT() will cast an expression with a type in C, but + * leave it unchanged in asm. + */ + +#ifdef __ASSEMBLY__ +#define _AC(X,Y) X +#define _AT(T,X) X +#else +#define __AC(X,Y) (X##Y) +#define _AC(X,Y) __AC(X,Y) +#define _AT(T,X) ((T)(X)) +#endif + +#define _UL(x) (_AC(x, UL)) +#define _ULL(x) (_AC(x, ULL)) + +#define _BITUL(x) (_UL(1) << (x)) +#define _BITULL(x) (_ULL(1) << (x)) + +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) + +#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + +#endif /* _LINUX_CONST_H */ diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index 1e2c16cfe3..599de3c6e3 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -581,6 +581,8 @@ struct kvm_s390_mem_op { struct { __u8 ar; /* the access register number */ __u8 key; /* access key, ignored if flag unset */ + __u8 pad1[6]; /* ignored */ + __u64 old_addr; /* ignored if cmpxchg flag unset */ }; __u32 sida_offset; /* offset into the sida */ __u8 reserved[32]; /* ignored */ @@ -593,11 +595,17 @@ struct kvm_s390_mem_op { #define KVM_S390_MEMOP_SIDA_WRITE 3 #define KVM_S390_MEMOP_ABSOLUTE_READ 4 #define KVM_S390_MEMOP_ABSOLUTE_WRITE 5 +#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6 + /* flags for kvm_s390_mem_op->flags */ #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) #define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2) +/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */ +#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0) +#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1) + /* for KVM_INTERRUPT */ struct kvm_interrupt { /* in */ @@ -1173,6 +1181,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223 #define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224 #define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225 +#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/linux-headers/linux/memfd.h b/linux-headers/linux/memfd.h new file mode 100644 index 0000000000..01c0324e77 --- /dev/null +++ b/linux-headers/linux/memfd.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_MEMFD_H +#define _LINUX_MEMFD_H + +#include + +/* flags for memfd_create(2) (unsigned int) */ +#define MFD_CLOEXEC 0x0001U +#define MFD_ALLOW_SEALING 0x0002U +#define MFD_HUGETLB 0x0004U +/* not executable and sealed to prevent changing to executable. */ +#define MFD_NOEXEC_SEAL 0x0008U +/* executable */ +#define MFD_EXEC 0x0010U + +/* + * Huge page size encoding when MFD_HUGETLB is specified, and a huge page + * size other than the default is desired. See hugetlb_encode.h. + * All known huge page size encodings are provided here. It is the + * responsibility of the application to know which sizes are supported on + * the running system. See mmap(2) man page for details. + */ +#define MFD_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT +#define MFD_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK + +#define MFD_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB +#define MFD_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB +#define MFD_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB +#define MFD_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB +#define MFD_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB +#define MFD_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB +#define MFD_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB +#define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB +#define MFD_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB +#define MFD_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB +#define MFD_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB +#define MFD_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB + +#endif /* _LINUX_MEMFD_H */ diff --git a/linux-headers/linux/nvme_ioctl.h b/linux-headers/linux/nvme_ioctl.h new file mode 100644 index 0000000000..f8df31dbc4 --- /dev/null +++ b/linux-headers/linux/nvme_ioctl.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Definitions for the NVM Express ioctl interface + * Copyright (c) 2011-2014, Intel Corporation. + */ + +#ifndef _LINUX_NVME_IOCTL_H +#define _LINUX_NVME_IOCTL_H + +#include + +struct nvme_user_io { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; +}; + +struct nvme_passthru_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result; +}; + +struct nvme_passthru_cmd64 { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + union { + __u32 data_len; /* for non-vectored io */ + __u32 vec_cnt; /* for vectored io */ + }; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; + __u64 result; +}; + +/* same as struct nvme_passthru_cmd64, minus the 8b result field */ +struct nvme_uring_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; +}; + +#define nvme_admin_cmd nvme_passthru_cmd + +#define NVME_IOCTL_ID _IO('N', 0x40) +#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd) +#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io) +#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) +#define NVME_IOCTL_RESET _IO('N', 0x44) +#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) +#define NVME_IOCTL_RESCAN _IO('N', 0x46) +#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64) +#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64) +#define NVME_IOCTL_IO64_CMD_VEC _IOWR('N', 0x49, struct nvme_passthru_cmd64) + +/* io_uring async commands: */ +#define NVME_URING_CMD_IO _IOWR('N', 0x80, struct nvme_uring_cmd) +#define NVME_URING_CMD_IO_VEC _IOWR('N', 0x81, struct nvme_uring_cmd) +#define NVME_URING_CMD_ADMIN _IOWR('N', 0x82, struct nvme_uring_cmd) +#define NVME_URING_CMD_ADMIN_VEC _IOWR('N', 0x83, struct nvme_uring_cmd) + +#endif /* _LINUX_NVME_IOCTL_H */ diff --git a/linux-headers/linux/stddef.h b/linux-headers/linux/stddef.h new file mode 100644 index 0000000000..bb6ea517ef --- /dev/null +++ b/linux-headers/linux/stddef.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_STDDEF_H +#define _LINUX_STDDEF_H + + + +#ifndef __always_inline +#define __always_inline __inline__ +#endif + +/** + * __struct_group() - Create a mirrored named and anonyomous struct + * + * @TAG: The tag name for the named sub-struct (usually empty) + * @NAME: The identifier name of the mirrored sub-struct + * @ATTRS: Any struct attributes (usually empty) + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical layout + * and size: one anonymous and one named. The former's members can be used + * normally without sub-struct naming, and the latter can be used to + * reason about the start, end, and size of the group of struct members. + * The named struct can also be explicitly tagged for layer reuse, as well + * as both having struct attributes appended. + */ +#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ + union { \ + struct { MEMBERS } ATTRS; \ + struct TAG { MEMBERS } ATTRS NAME; \ + } + +/** + * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union + * + * @TYPE: The type of each flexible array element + * @NAME: The name of the flexible array member + * + * In order to have a flexible array member in a union or alone in a + * struct, it needs to be wrapped in an anonymous struct with at least 1 + * named member, but that member can be empty. + */ +#define __DECLARE_FLEX_ARRAY(TYPE, NAME) \ + struct { \ + struct { } __empty_ ## NAME; \ + TYPE NAME[]; \ + } +#endif diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index c59692ce0b..4a534edbdc 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -49,7 +49,11 @@ /* Supports VFIO_DMA_UNMAP_FLAG_ALL */ #define VFIO_UNMAP_ALL 9 -/* Supports the vaddr flag for DMA map and unmap */ +/* + * Supports the vaddr flag for DMA map and unmap. Not supported for mediated + * devices, so this capability is subject to change as groups are added or + * removed. + */ #define VFIO_UPDATE_VADDR 10 /* @@ -1343,8 +1347,7 @@ struct vfio_iommu_type1_info_dma_avail { * Map process virtual addresses to IO virtual addresses using the * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required. * - * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and - * unblock translation of host virtual addresses in the iova range. The vaddr + * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR. To * maintain memory consistency within the user application, the updated vaddr * must address the same memory object as originally mapped. Failure to do so @@ -1395,9 +1398,9 @@ struct vfio_bitmap { * must be 0. This cannot be combined with the get-dirty-bitmap flag. * * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host - * virtual addresses in the iova range. Tasks that attempt to translate an - * iova's vaddr will block. DMA to already-mapped pages continues. This - * cannot be combined with the get-dirty-bitmap flag. + * virtual addresses in the iova range. DMA to already-mapped pages continues. + * Groups may not be added to the container while any addresses are invalid. + * This cannot be combined with the get-dirty-bitmap flag. */ struct vfio_iommu_type1_dma_unmap { __u32 argsz; diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h index f9f115a7c7..92e1b700b5 100644 --- a/linux-headers/linux/vhost.h +++ b/linux-headers/linux/vhost.h @@ -180,4 +180,12 @@ */ #define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D) +/* Resume a device so it can resume processing virtqueue requests + * + * After the return of this ioctl the device will have restored all the + * necessary states and it is fully operational to continue processing the + * virtqueue descriptors. + */ +#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E) + #endif diff --git a/linux-user/alpha/target_mman.h b/linux-user/alpha/target_mman.h index cd6e3d70a6..051544f5ab 100644 --- a/linux-user/alpha/target_mman.h +++ b/linux-user/alpha/target_mman.h @@ -3,6 +3,10 @@ #define TARGET_MADV_DONTNEED 6 +#define TARGET_MS_ASYNC 1 +#define TARGET_MS_SYNC 2 +#define TARGET_MS_INVALIDATE 4 + #include "../generic/target_mman.h" #endif diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h index 89ba274cfc..f6383a7cd1 100644 --- a/linux-user/arm/target_cpu.h +++ b/linux-user/arm/target_cpu.h @@ -30,7 +30,7 @@ static inline unsigned long arm_max_reserved_va(CPUState *cs) * the high addresses. Restrict linux-user to the * cached write-back RAM in the system map. */ - return 0x80000000ul; + return 0x7ffffffful; } else { /* * We need to be able to map the commpage. diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 5928c14dfc..418ad92598 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -17,7 +17,9 @@ #include "qemu/guest-random.h" #include "qemu/units.h" #include "qemu/selfmap.h" +#include "qemu/lockable.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "target_signal.h" #include "accel/tcg/debuginfo.h" @@ -207,12 +209,12 @@ static bool init_guest_commpage(void) * has specified -R reserved_va, which would trigger an assert(). */ if (reserved_va != 0 && - TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) { + TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { error_report("Cannot allocate vsyscall page"); exit(EXIT_FAILURE); } page_set_flags(TARGET_VSYSCALL_PAGE, - TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE, + TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK, PAGE_EXEC | PAGE_VALID); return true; } @@ -443,7 +445,7 @@ static bool init_guest_commpage(void) exit(EXIT_FAILURE); } - page_set_flags(commpage, commpage + qemu_host_page_size, + page_set_flags(commpage, commpage | ~qemu_host_page_mask, PAGE_READ | PAGE_EXEC | PAGE_VALID); return true; } @@ -960,9 +962,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en (*regs)[36] = tswapreg(env->lr); (*regs)[37] = tswapreg(cpu_read_xer(env)); - for (i = 0; i < ARRAY_SIZE(env->crf); i++) { - ccr |= env->crf[i] << (32 - ((i + 1) * 4)); - } + ccr = ppc_get_cr(env); (*regs)[38] = tswapreg(ccr); } @@ -1315,7 +1315,7 @@ static bool init_guest_commpage(void) exit(EXIT_FAILURE); } - page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE, + page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, PAGE_READ | PAGE_EXEC | PAGE_VALID); return true; } @@ -1727,7 +1727,7 @@ static bool init_guest_commpage(void) * and implement syscalls. Here, simply mark the page executable. * Special case the entry points during translation (see do_page_zero). */ - page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE, + page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, PAGE_EXEC | PAGE_VALID); return true; } @@ -1748,6 +1748,15 @@ static inline void init_thread(struct target_pt_regs *regs, regs->windowstart = 1; regs->areg[1] = infop->start_stack; regs->pc = infop->entry; + if (info_is_fdpic(infop)) { + regs->areg[4] = infop->loadmap_addr; + regs->areg[5] = infop->interpreter_loadmap_addr; + if (infop->interpreter_loadmap_addr) { + regs->areg[6] = infop->interpreter_pt_dynamic_addr; + } else { + regs->areg[6] = infop->pt_dynamic_addr; + } + } } /* See linux kernel: arch/xtensa/include/asm/elf.h. */ @@ -2199,7 +2208,8 @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot) /* Ensure that the bss page(s) are valid */ if ((page_get_flags(last_bss-1) & prot) != prot) { - page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID); + page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1, + prot | PAGE_VALID); } if (host_start < host_map_start) { @@ -2207,11 +2217,16 @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot) } } -#ifdef TARGET_ARM +#if defined(TARGET_ARM) static int elf_is_fdpic(struct elfhdr *exec) { return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC; } +#elif defined(TARGET_XTENSA) +static int elf_is_fdpic(struct elfhdr *exec) +{ + return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC; +} #else /* Default implementation, always false. */ static int elf_is_fdpic(struct elfhdr *exec) @@ -2496,7 +2511,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr, if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) { error_report("%s: requires more virtual address space " "than the host can provide (0x%" PRIx64 ")", - image_name, (uint64_t)guest_hiaddr - guest_base); + image_name, (uint64_t)guest_hiaddr + 1 - guest_base); exit(EXIT_FAILURE); } #endif @@ -2514,13 +2529,13 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr, /* Reserve the address space for the binary, or reserved_va. */ test = g2h_untagged(guest_loaddr); - addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0); + addr = mmap(test, guest_hiaddr - guest_loaddr + 1, PROT_NONE, flags, -1, 0); if (test != addr) { pgb_fail_in_use(image_name); } qemu_log_mask(CPU_LOG_PAGE, - "%s: base @ %p for " TARGET_ABI_FMT_ld " bytes\n", - __func__, addr, guest_hiaddr - guest_loaddr); + "%s: base @ %p for %" PRIu64 " bytes\n", + __func__, addr, (uint64_t)guest_hiaddr - guest_loaddr + 1); } /** @@ -2664,7 +2679,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr, if (hiaddr != orig_hiaddr) { error_report("%s: requires virtual address space that the " "host cannot provide (0x%" PRIx64 ")", - image_name, (uint64_t)orig_hiaddr); + image_name, (uint64_t)orig_hiaddr + 1); exit(EXIT_FAILURE); } @@ -2678,7 +2693,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr, * arithmetic wraps around. */ if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) { - hiaddr = (uintptr_t) 4 << 30; + hiaddr = UINT32_MAX; } else { offset = -(HI_COMMPAGE & -align); } @@ -2686,7 +2701,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr, loaddr = MIN(loaddr, LO_COMMPAGE & -align); } - addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset); + addr = pgb_find_hole(loaddr, hiaddr - loaddr + 1, align, offset); if (addr == -1) { /* * If HI_COMMPAGE, there *might* be a non-consecutive allocation @@ -2752,17 +2767,17 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr, /* Reserve the memory on the host. */ assert(guest_base != 0); test = g2h_untagged(0); - addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0); + addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0); if (addr == MAP_FAILED || addr != test) { error_report("Unable to reserve 0x%lx bytes of virtual address " "space at %p (%s) for use as guest address space (check your " "virtual memory ulimit setting, min_mmap_addr or reserve less " - "using -R option)", reserved_va, test, strerror(errno)); + "using -R option)", reserved_va + 1, test, strerror(errno)); exit(EXIT_FAILURE); } qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n", - __func__, addr, reserved_va); + __func__, addr, reserved_va + 1); } void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, @@ -3005,7 +3020,7 @@ static void load_elf_image(const char *image_name, int image_fd, if (a < loaddr) { loaddr = a; } - a = eppnt->p_vaddr + eppnt->p_memsz; + a = eppnt->p_vaddr + eppnt->p_memsz - 1; if (a > hiaddr) { hiaddr = a; } @@ -3096,7 +3111,7 @@ static void load_elf_image(const char *image_name, int image_fd, * In both cases, we will overwrite pages in this range with mappings * from the executable. */ - load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE, + load_addr = target_mmap(loaddr, (size_t)hiaddr - loaddr + 1, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | (ehdr->e_type == ET_EXEC ? MAP_FIXED : 0), -1, 0); @@ -3313,9 +3328,10 @@ static void load_elf_interp(const char *filename, struct image_info *info, static int symfind(const void *s0, const void *s1) { - target_ulong addr = *(target_ulong *)s0; struct elf_sym *sym = (struct elf_sym *)s1; + __typeof(sym->st_value) addr = *(uint64_t *)s0; int result = 0; + if (addr < sym->st_value) { result = -1; } else if (addr >= sym->st_value + sym->st_size) { @@ -3324,7 +3340,7 @@ static int symfind(const void *s0, const void *s1) return result; } -static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr) +static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr) { #if ELF_CLASS == ELFCLASS32 struct elf_sym *syms = s->disas_symtab.elf32; @@ -4223,14 +4239,14 @@ static int fill_note_info(struct elf_note_info *info, info->notes_size += note_size(&info->notes[i]); /* read and fill status of all threads */ - cpu_list_lock(); - CPU_FOREACH(cpu) { - if (cpu == thread_cpu) { - continue; + WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) { + CPU_FOREACH(cpu) { + if (cpu == thread_cpu) { + continue; + } + fill_thread_info(info, cpu->env_ptr); } - fill_thread_info(info, cpu->env_ptr); } - cpu_list_unlock(); return (0); } diff --git a/linux-user/exit.c b/linux-user/exit.c index 607b6da9fc..3017d28a3c 100644 --- a/linux-user/exit.c +++ b/linux-user/exit.c @@ -18,9 +18,10 @@ */ #include "qemu/osdep.h" #include "accel/tcg/perf.h" -#include "exec/gdbstub.h" +#include "gdbstub/syscalls.h" #include "qemu.h" #include "user-internals.h" +#include "qemu/plugin.h" #ifdef CONFIG_GPROF #include #endif diff --git a/linux-user/fd-trans.c b/linux-user/fd-trans.c index 7b25468d02..c04a97c73a 100644 --- a/linux-user/fd-trans.c +++ b/linux-user/fd-trans.c @@ -1284,6 +1284,49 @@ static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh, return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route); } +static abi_long target_to_host_for_each_nlattr(struct nlattr *nlattr, + size_t len, + abi_long (*target_to_host_nlattr) + (struct nlattr *)) +{ + unsigned short aligned_nla_len; + abi_long ret; + + while (len > sizeof(struct nlattr)) { + if (tswap16(nlattr->nla_len) < sizeof(struct rtattr) || + tswap16(nlattr->nla_len) > len) { + break; + } + nlattr->nla_len = tswap16(nlattr->nla_len); + nlattr->nla_type = tswap16(nlattr->nla_type); + ret = target_to_host_nlattr(nlattr); + if (ret < 0) { + return ret; + } + + aligned_nla_len = NLA_ALIGN(nlattr->nla_len); + if (aligned_nla_len >= len) { + break; + } + len -= aligned_nla_len; + nlattr = (struct nlattr *)(((char *)nlattr) + aligned_nla_len); + } + return 0; +} + +static abi_long target_to_host_data_inet6_nlattr(struct nlattr *nlattr) +{ + switch (nlattr->nla_type) { + /* uint8_t */ + case QEMU_IFLA_INET6_ADDR_GEN_MODE: + break; + default: + qemu_log_mask(LOG_UNIMP, "Unknown target AF_INET6 type: %d\n", + nlattr->nla_type); + } + return 0; +} + static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr, size_t len, abi_long (*target_to_host_rtattr) @@ -1314,16 +1357,35 @@ static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr, return 0; } +static abi_long target_to_host_data_spec_nlattr(struct nlattr *nlattr) +{ + switch (nlattr->nla_type & NLA_TYPE_MASK) { + case AF_INET6: + return target_to_host_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len, + target_to_host_data_inet6_nlattr); + default: + qemu_log_mask(LOG_UNIMP, "Unknown target AF_SPEC type: %d\n", + nlattr->nla_type); + break; + } + return 0; +} + static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr) { uint32_t *u32; - switch (rtattr->rta_type) { + switch (rtattr->rta_type & NLA_TYPE_MASK) { /* uint32_t */ + case QEMU_IFLA_MTU: + case QEMU_IFLA_TXQLEN: case QEMU_IFLA_EXT_MASK: u32 = RTA_DATA(rtattr); *u32 = tswap32(*u32); break; + case QEMU_IFLA_AF_SPEC: + return target_to_host_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, + target_to_host_data_spec_nlattr); default: qemu_log_mask(LOG_UNIMP, "Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type); @@ -1622,7 +1684,7 @@ TargetFdTrans target_signalfd_trans = { .host_to_target_data = host_to_target_data_signalfd, }; -static abi_long swap_data_eventfd(void *buf, size_t len) +static abi_long swap_data_u64(void *buf, size_t len) { uint64_t *counter = buf; int i; @@ -1640,8 +1702,12 @@ static abi_long swap_data_eventfd(void *buf, size_t len) } TargetFdTrans target_eventfd_trans = { - .host_to_target_data = swap_data_eventfd, - .target_to_host_data = swap_data_eventfd, + .host_to_target_data = swap_data_u64, + .target_to_host_data = swap_data_u64, +}; + +TargetFdTrans target_timerfd_trans = { + .host_to_target_data = swap_data_u64, }; #if defined(CONFIG_INOTIFY) && (defined(TARGET_NR_inotify_init) || \ diff --git a/linux-user/fd-trans.h b/linux-user/fd-trans.h index 1b9fa2041c..910faaf237 100644 --- a/linux-user/fd-trans.h +++ b/linux-user/fd-trans.h @@ -130,6 +130,7 @@ extern TargetFdTrans target_netlink_route_trans; extern TargetFdTrans target_netlink_audit_trans; extern TargetFdTrans target_signalfd_trans; extern TargetFdTrans target_eventfd_trans; +extern TargetFdTrans target_timerfd_trans; #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \ (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \ defined(__NR_inotify_init1)) diff --git a/linux-user/flatload.c b/linux-user/flatload.c index e99570ca18..5efec2630e 100644 --- a/linux-user/flatload.c +++ b/linux-user/flatload.c @@ -448,7 +448,7 @@ static int load_flat_file(struct linux_binprm * bprm, * Allocate the address space. */ probe_guest_base(bprm->filename, 0, - text_len + data_len + extra + indx_len); + text_len + data_len + extra + indx_len - 1); /* * there are a couple of cases here, the separate code/data diff --git a/linux-user/generic/target_mman.h b/linux-user/generic/target_mman.h index 1436a3c543..32bf1a52d0 100644 --- a/linux-user/generic/target_mman.h +++ b/linux-user/generic/target_mman.h @@ -89,4 +89,17 @@ #define TARGET_MADV_DONTNEED_LOCKED 24 #endif + +#ifndef TARGET_MS_ASYNC +#define TARGET_MS_ASYNC 1 +#endif + +#ifndef TARGET_MS_INVALIDATE +#define TARGET_MS_INVALIDATE 2 +#endif + +#ifndef TARGET_MS_SYNC +#define TARGET_MS_SYNC 4 +#endif + #endif diff --git a/linux-user/generic/target_resource.h b/linux-user/generic/target_resource.h index 539d8c4677..37d3eb09b3 100644 --- a/linux-user/generic/target_resource.h +++ b/linux-user/generic/target_resource.h @@ -12,8 +12,8 @@ struct target_rlimit { }; struct target_rlimit64 { - uint64_t rlim_cur; - uint64_t rlim_max; + abi_ullong rlim_cur; + abi_ullong rlim_max; }; #define TARGET_RLIM_INFINITY ((abi_ulong)-1) diff --git a/linux-user/hexagon/cpu_loop.c b/linux-user/hexagon/cpu_loop.c index b84e25bf71..7f1499ed28 100644 --- a/linux-user/hexagon/cpu_loop.c +++ b/linux-user/hexagon/cpu_loop.c @@ -63,6 +63,9 @@ void cpu_loop(CPUHexagonState *env) case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; + case EXCP_DEBUG: + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, 0); + break; default: EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n", trapnr); diff --git a/linux-user/hexagon/target_elf.h b/linux-user/hexagon/target_elf.h index b4e9f40527..36056fc9f0 100644 --- a/linux-user/hexagon/target_elf.h +++ b/linux-user/hexagon/target_elf.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -20,7 +20,10 @@ static inline const char *cpu_get_model(uint32_t eflags) { - /* For now, treat anything newer than v5 as a v67 */ + static char buf[32]; + int err; + + /* For now, treat anything newer than v5 as a v73 */ /* FIXME - Disable instructions that are newer than the specified arch */ if (eflags == 0x04 || /* v5 */ eflags == 0x05 || /* v55 */ @@ -30,11 +33,18 @@ static inline const char *cpu_get_model(uint32_t eflags) eflags == 0x65 || /* v65 */ eflags == 0x66 || /* v66 */ eflags == 0x67 || /* v67 */ - eflags == 0x8067 /* v67t */ + eflags == 0x8067 || /* v67t */ + eflags == 0x68 || /* v68 */ + eflags == 0x69 || /* v69 */ + eflags == 0x71 || /* v71 */ + eflags == 0x8071 || /* v71t */ + eflags == 0x73 /* v73 */ ) { - return "v67"; + return "v73"; } - return "unknown"; + + err = snprintf(buf, sizeof(buf), "unknown (0x%x)", eflags); + return err >= 0 && err < sizeof(buf) ? buf : "unknown"; } #endif diff --git a/linux-user/hppa/target_mman.h b/linux-user/hppa/target_mman.h index 66dd9f7941..f9b6b97032 100644 --- a/linux-user/hppa/target_mman.h +++ b/linux-user/hppa/target_mman.h @@ -10,6 +10,10 @@ #define TARGET_MADV_WIPEONFORK 71 #define TARGET_MADV_KEEPONFORK 72 +#define TARGET_MS_SYNC 1 +#define TARGET_MS_ASYNC 2 +#define TARGET_MS_INVALIDATE 4 + #include "../generic/target_mman.h" #endif diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c index 15c98cdd67..10c75b8ef6 100644 --- a/linux-user/i386/cpu_loop.c +++ b/linux-user/i386/cpu_loop.c @@ -331,8 +331,17 @@ void cpu_loop(CPUX86State *env) } } +static void target_cpu_free(void *obj) +{ + CPUArchState *env = ((CPUState *)obj)->env_ptr; + target_munmap(env->gdt.base, sizeof(uint64_t) * TARGET_GDT_ENTRIES); + g_free(obj); +} + void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) { + CPUState *cpu = env_cpu(env); + OBJECT(cpu)->free = target_cpu_free; env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK; env->hflags |= HF_PE_MASK | HF_CPL_MASK; if (env->features[FEAT_1_EDX] & CPUID_SSE) { diff --git a/linux-user/loongarch64/signal.c b/linux-user/loongarch64/signal.c index 7c7afb652e..bb8efb1172 100644 --- a/linux-user/loongarch64/signal.c +++ b/linux-user/loongarch64/signal.c @@ -128,7 +128,7 @@ static void setup_sigframe(CPULoongArchState *env, fpu_ctx = (struct target_fpu_context *)(info + 1); for (i = 0; i < 32; ++i) { - __put_user(env->fpr[i], &fpu_ctx->regs[i]); + __put_user(env->fpr[i].vreg.D(0), &fpu_ctx->regs[i]); } __put_user(read_fcc(env), &fpu_ctx->fcc); __put_user(env->fcsr0, &fpu_ctx->fcsr); @@ -193,7 +193,7 @@ static void restore_sigframe(CPULoongArchState *env, uint64_t fcc; for (i = 0; i < 32; ++i) { - __get_user(env->fpr[i], &fpu_ctx->regs[i]); + __get_user(env->fpr[i].vreg.D(0), &fpu_ctx->regs[i]); } __get_user(fcc, &fpu_ctx->fcc); write_fcc(env, fcc); diff --git a/linux-user/main.c b/linux-user/main.c index 8090009afa..4481917662 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -40,6 +40,7 @@ #include "qemu/plugin.h" #include "exec/exec-all.h" #include "exec/gdbstub.h" +#include "gdbstub/user.h" #include "tcg/tcg.h" #include "qemu/timer.h" #include "qemu/envlist.h" @@ -65,8 +66,9 @@ #endif char *exec_path; +char real_exec_path[PATH_MAX]; -int singlestep; +static bool opt_one_insn_per_tb; static const char *argv0; static const char *gdbstub; static envlist_t *envlist; @@ -107,11 +109,9 @@ static const char *last_log_filename; # if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS # if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \ (TARGET_LONG_BITS == 32 || defined(TARGET_ABI32)) -/* There are a number of places where we assign reserved_va to a variable - of type abi_ulong and expect it to fit. Avoid the last page. */ -# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK) +# define MAX_RESERVED_VA(CPU) 0xfffffffful # else -# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS) +# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) # endif # else # define MAX_RESERVED_VA(CPU) 0 @@ -237,6 +237,14 @@ CPUArchState *cpu_copy(CPUArchState *env) new_cpu->tcg_cflags = cpu->tcg_cflags; memcpy(new_env, env, sizeof(CPUArchState)); +#if defined(TARGET_I386) || defined(TARGET_X86_64) + new_env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES, + PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + memcpy(g2h_untagged(new_env->gdt.base), g2h_untagged(env->gdt.base), + sizeof(uint64_t) * TARGET_GDT_ENTRIES); + OBJECT(new_cpu)->free = OBJECT(cpu)->free; +#endif /* Clone all break/watchpoints. Note: Once we support ptrace with hw-debug register access, make sure @@ -351,10 +359,7 @@ static void handle_arg_cpu(const char *arg) { cpu_model = strdup(arg); if (cpu_model == NULL || is_help_option(cpu_model)) { - /* XXX: implement xxx_cpu_list for targets that still miss it */ -#if defined(cpu_list) - cpu_list(); -#endif + list_cpus(); exit(EXIT_FAILURE); } } @@ -369,7 +374,9 @@ static void handle_arg_reserved_va(const char *arg) { char *p; int shift = 0; - reserved_va = strtoul(arg, &p, 0); + unsigned long val; + + val = strtoul(arg, &p, 0); switch (*p) { case 'k': case 'K': @@ -383,10 +390,10 @@ static void handle_arg_reserved_va(const char *arg) break; } if (shift) { - unsigned long unshifted = reserved_va; + unsigned long unshifted = val; p++; - reserved_va <<= shift; - if (reserved_va >> shift != unshifted) { + val <<= shift; + if (val >> shift != unshifted) { fprintf(stderr, "Reserved virtual address too big\n"); exit(EXIT_FAILURE); } @@ -395,11 +402,13 @@ static void handle_arg_reserved_va(const char *arg) fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p); exit(EXIT_FAILURE); } + /* The representation is size - 1, with 0 remaining "default". */ + reserved_va = val ? val - 1 : 0; } -static void handle_arg_singlestep(const char *arg) +static void handle_arg_one_insn_per_tb(const char *arg) { - singlestep = 1; + opt_one_insn_per_tb = true; } static void handle_arg_strace(const char *arg) @@ -488,8 +497,11 @@ static const struct qemu_argument arg_table[] = { "logfile", "write logs to 'logfile' (default stderr)"}, {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize, "pagesize", "set the host page size to 'pagesize'"}, - {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep, - "", "run in singlestep mode"}, + {"one-insn-per-tb", + "QEMU_ONE_INSN_PER_TB", false, handle_arg_one_insn_per_tb, + "", "run with one guest instruction per emulated TB"}, + {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_one_insn_per_tb, + "", "deprecated synonym for -one-insn-per-tb"}, {"strace", "QEMU_STRACE", false, handle_arg_strace, "", "log system calls"}, {"seed", "QEMU_RAND_SEED", true, handle_arg_seed, @@ -784,6 +796,11 @@ int main(int argc, char **argv, char **envp) } } + /* Resolve executable file name to full path name */ + if (realpath(exec_path, real_exec_path)) { + exec_path = real_exec_path; + } + /* * get binfmt_misc flags */ @@ -805,9 +822,12 @@ int main(int argc, char **argv, char **envp) /* init tcg before creating CPUs and to get qemu_host_page_size */ { - AccelClass *ac = ACCEL_GET_CLASS(current_accel()); + AccelState *accel = current_accel(); + AccelClass *ac = ACCEL_GET_CLASS(accel); accel_init_interfaces(ac); + object_property_set_bool(OBJECT(accel), "one-insn-per-tb", + opt_one_insn_per_tb, &error_abort); ac->init_machine(NULL); } cpu = cpu_create(cpu_type); @@ -823,16 +843,19 @@ int main(int argc, char **argv, char **envp) */ max_reserved_va = MAX_RESERVED_VA(cpu); if (reserved_va != 0) { + if ((reserved_va + 1) % qemu_host_page_size) { + char *s = size_to_str(qemu_host_page_size); + fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s); + g_free(s); + exit(EXIT_FAILURE); + } if (max_reserved_va && reserved_va > max_reserved_va) { fprintf(stderr, "Reserved virtual address too big\n"); exit(EXIT_FAILURE); } } else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) { - /* - * reserved_va must be aligned with the host page size - * as it is used with mmap() - */ - reserved_va = max_reserved_va & qemu_host_page_mask; + /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */ + reserved_va = max_reserved_va; } { diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c index 4305f5cf21..bcfcb2279c 100644 --- a/linux-user/mips/cpu_loop.c +++ b/linux-user/mips/cpu_loop.c @@ -307,7 +307,10 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) env->CP0_Status |= (1 << CP0St_FR); env->hflags |= MIPS_HFLAG_F64; } - } else if (!prog_req.fre && !prog_req.frdefault && + } else if (prog_req.fr1) { + env->CP0_Status |= (1 << CP0St_FR); + env->hflags |= MIPS_HFLAG_F64; + } else if (!prog_req.fre && !prog_req.frdefault && !prog_req.fr1 && !prog_req.single && !prog_req.soft) { fprintf(stderr, "qemu: Can't find a matching FPU mode\n"); exit(1); diff --git a/linux-user/mips/target_elf.h b/linux-user/mips/target_elf.h index a98c9bd6ad..b965e86b2b 100644 --- a/linux-user/mips/target_elf.h +++ b/linux-user/mips/target_elf.h @@ -15,6 +15,9 @@ static inline const char *cpu_get_model(uint32_t eflags) if ((eflags & EF_MIPS_MACH) == EF_MIPS_MACH_5900) { return "R5900"; } + if (eflags & EF_MIPS_NAN2008) { + return "P5600"; + } return "24Kf"; } #endif diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 28135c9e6a..0aa8ae7356 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -181,7 +181,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) } } - page_set_flags(start, start + len, page_flags); + page_set_flags(start, start + len - 1, page_flags); ret = 0; error: @@ -283,7 +283,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, end_addr = start + size; if (start > reserved_va - size) { /* Start at the top of the address space. */ - end_addr = ((reserved_va - size) & -align) + size; + end_addr = ((reserved_va + 1 - size) & -align) + size; looped = true; } @@ -297,7 +297,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, return (abi_ulong)-1; } /* Re-start at the top of the address space. */ - addr = end_addr = ((reserved_va - size) & -align) + size; + addr = end_addr = ((reserved_va + 1 - size) & -align) + size; looped = true; } else { prot = page_get_flags(addr); @@ -640,15 +640,15 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, } page_flags |= PAGE_RESET; if (passthrough_start == passthrough_end) { - page_set_flags(start, start + len, page_flags); + page_set_flags(start, start + len - 1, page_flags); } else { if (start < passthrough_start) { - page_set_flags(start, passthrough_start, page_flags); + page_set_flags(start, passthrough_start - 1, page_flags); } - page_set_flags(passthrough_start, passthrough_end, + page_set_flags(passthrough_start, passthrough_end - 1, page_flags | PAGE_PASSTHROUGH); if (passthrough_end < start + len) { - page_set_flags(passthrough_end, start + len, page_flags); + page_set_flags(passthrough_end, start + len - 1, page_flags); } } the_end: @@ -763,7 +763,7 @@ int target_munmap(abi_ulong start, abi_ulong len) } if (ret == 0) { - page_set_flags(start, start + len, 0); + page_set_flags(start, start + len - 1, 0); } mmap_unlock(); return ret; @@ -849,8 +849,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, } else { new_addr = h2g(host_addr); prot = page_get_flags(old_addr); - page_set_flags(old_addr, old_addr + old_size, 0); - page_set_flags(new_addr, new_addr + new_size, + page_set_flags(old_addr, old_addr + old_size - 1, 0); + page_set_flags(new_addr, new_addr + new_size - 1, prot | PAGE_VALID | PAGE_RESET); } mmap_unlock(); @@ -946,7 +946,7 @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice) if (can_passthrough_madvise(start, end)) { ret = get_errno(madvise(g2h_untagged(start), len, advice)); if ((advice == MADV_DONTNEED) && (ret == 0)) { - page_reset_target_data(start, start + len); + page_reset_target_data(start, start + len - 1); } } } diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c index 07729c1653..a616f20efb 100644 --- a/linux-user/ppc/signal.c +++ b/linux-user/ppc/signal.c @@ -243,9 +243,7 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); __put_user(cpu_read_xer(env), &frame->mc_gregs[TARGET_PT_XER]); - for (i = 0; i < ARRAY_SIZE(env->crf); i++) { - ccr |= env->crf[i] << (32 - ((i + 1) * 4)); - } + ccr = ppc_get_cr(env); __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); /* Save Altivec registers if necessary. */ @@ -335,10 +333,7 @@ static void restore_user_regs(CPUPPCState *env, cpu_write_xer(env, xer); __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); - for (i = 0; i < ARRAY_SIZE(env->crf); i++) { - env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; - } - + ppc_set_cr(env, ccr); if (!sig) { env->gpr[2] = save_r2; } diff --git a/linux-user/qemu.h b/linux-user/qemu.h index e2e93fbd1d..92f9f5af41 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -168,6 +168,7 @@ abi_long do_brk(abi_ulong new_brk); /* user access */ +#define VERIFY_NONE 0 #define VERIFY_READ PAGE_READ #define VERIFY_WRITE (PAGE_READ | PAGE_WRITE) diff --git a/linux-user/signal.c b/linux-user/signal.c index 2ab3828722..6532ffc70d 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -18,7 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu/bitops.h" -#include "exec/gdbstub.h" +#include "gdbstub/user.h" #include "hw/core/tcg-cpu-ops.h" #include diff --git a/linux-user/sparc/cpu_loop.c b/linux-user/sparc/cpu_loop.c index c120c42278..b36bb2574b 100644 --- a/linux-user/sparc/cpu_loop.c +++ b/linux-user/sparc/cpu_loop.c @@ -149,6 +149,69 @@ static void flush_windows(CPUSPARCState *env) #endif } +static void next_instruction(CPUSPARCState *env) +{ + env->pc = env->npc; + env->npc = env->npc + 4; +} + +static uint32_t do_getcc(CPUSPARCState *env) +{ +#ifdef TARGET_SPARC64 + return cpu_get_ccr(env) & 0xf; +#else + return extract32(cpu_get_psr(env), 20, 4); +#endif +} + +static void do_setcc(CPUSPARCState *env, uint32_t icc) +{ +#ifdef TARGET_SPARC64 + cpu_put_ccr(env, (cpu_get_ccr(env) & 0xf0) | (icc & 0xf)); +#else + cpu_put_psr(env, deposit32(cpu_get_psr(env), 20, 4, icc)); +#endif +} + +static uint32_t do_getpsr(CPUSPARCState *env) +{ +#ifdef TARGET_SPARC64 + const uint64_t TSTATE_CWP = 0x1f; + const uint64_t TSTATE_ICC = 0xfull << 32; + const uint64_t TSTATE_XCC = 0xfull << 36; + const uint32_t PSR_S = 0x00000080u; + const uint32_t PSR_V8PLUS = 0xff000000u; + uint64_t tstate = sparc64_tstate(env); + + /* See , tstate_to_psr. */ + return ((tstate & TSTATE_CWP) | + PSR_S | + ((tstate & TSTATE_ICC) >> 12) | + ((tstate & TSTATE_XCC) >> 20) | + PSR_V8PLUS); +#else + return (cpu_get_psr(env) & (PSR_ICC | PSR_CWP)) | PSR_S; +#endif +} + +/* Avoid ifdefs below for the abi32 and abi64 paths. */ +#ifdef TARGET_ABI32 +#define TARGET_TT_SYSCALL (TT_TRAP + 0x10) /* t_linux */ +#define syscall_cc psr +#else +#define TARGET_TT_SYSCALL (TT_TRAP + 0x6d) /* tl0_linux64 */ +#define syscall_cc xcc +#endif + +/* Avoid ifdefs below for the v9 and pre-v9 hw traps. */ +#ifdef TARGET_SPARC64 +#define TARGET_TT_SPILL TT_SPILL +#define TARGET_TT_FILL TT_FILL +#else +#define TARGET_TT_SPILL TT_WIN_OVF +#define TARGET_TT_FILL TT_WIN_UNF +#endif + void cpu_loop (CPUSPARCState *env) { CPUState *cs = env_cpu(env); @@ -167,13 +230,7 @@ void cpu_loop (CPUSPARCState *env) } switch (trapnr) { -#ifndef TARGET_SPARC64 - case 0x88: - case 0x90: -#else - case 0x110: - case 0x16d: -#endif + case TARGET_TT_SYSCALL: ret = do_syscall (env, env->gregs[1], env->regwptr[0], env->regwptr[1], env->regwptr[2], env->regwptr[3], @@ -183,67 +240,110 @@ void cpu_loop (CPUSPARCState *env) break; } if ((abi_ulong)ret >= (abi_ulong)(-515)) { -#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) - env->xcc |= PSR_CARRY; -#else - env->psr |= PSR_CARRY; -#endif + env->syscall_cc |= PSR_CARRY; ret = -ret; } else { -#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) - env->xcc &= ~PSR_CARRY; -#else - env->psr &= ~PSR_CARRY; -#endif + env->syscall_cc &= ~PSR_CARRY; } env->regwptr[0] = ret; /* next instruction */ env->pc = env->npc; env->npc = env->npc + 4; break; - case 0x83: /* flush windows */ -#ifdef TARGET_ABI32 - case 0x103: -#endif + + case TT_TRAP + 0x01: /* breakpoint */ + case EXCP_DEBUG: + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); + break; + + case TT_TRAP + 0x02: /* div0 */ + case TT_DIV_ZERO: + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, env->pc); + break; + + case TT_TRAP + 0x03: /* flush windows */ flush_windows(env); - /* next instruction */ - env->pc = env->npc; - env->npc = env->npc + 4; + next_instruction(env); break; -#ifndef TARGET_SPARC64 - case TT_WIN_OVF: /* window overflow */ - save_window(env); + + case TT_TRAP + 0x20: /* getcc */ + env->gregs[1] = do_getcc(env); + next_instruction(env); break; - case TT_WIN_UNF: /* window underflow */ - restore_window(env); + case TT_TRAP + 0x21: /* setcc */ + do_setcc(env, env->gregs[1]); + next_instruction(env); break; -#else - case TT_SPILL: /* window overflow */ - save_window(env); + case TT_TRAP + 0x22: /* getpsr */ + env->gregs[1] = do_getpsr(env); + next_instruction(env); break; - case TT_FILL: /* window underflow */ - restore_window(env); - break; -#ifndef TARGET_ABI32 - case 0x16e: + +#ifdef TARGET_SPARC64 + case TT_TRAP + 0x6e: flush_windows(env); sparc64_get_context(env); break; - case 0x16f: + case TT_TRAP + 0x6f: flush_windows(env); sparc64_set_context(env); break; #endif -#endif + + case TARGET_TT_SPILL: /* window overflow */ + save_window(env); + break; + case TARGET_TT_FILL: /* window underflow */ + restore_window(env); + break; + + case TT_FP_EXCP: + { + int code = TARGET_FPE_FLTUNK; + target_ulong fsr = env->fsr; + + if ((fsr & FSR_FTT_MASK) == FSR_FTT_IEEE_EXCP) { + if (fsr & FSR_NVC) { + code = TARGET_FPE_FLTINV; + } else if (fsr & FSR_OFC) { + code = TARGET_FPE_FLTOVF; + } else if (fsr & FSR_UFC) { + code = TARGET_FPE_FLTUND; + } else if (fsr & FSR_DZC) { + code = TARGET_FPE_FLTDIV; + } else if (fsr & FSR_NXC) { + code = TARGET_FPE_FLTRES; + } + } + force_sig_fault(TARGET_SIGFPE, code, env->pc); + } + break; + case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case TT_ILL_INSN: force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc); break; - case EXCP_DEBUG: - force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); + case TT_PRIV_INSN: + force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->pc); break; + case TT_TOVF: + force_sig_fault(TARGET_SIGEMT, TARGET_EMT_TAGOVF, env->pc); + break; +#ifdef TARGET_SPARC64 + case TT_PRIV_ACT: + /* Note do_privact defers to do_privop. */ + force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->pc); + break; +#else + case TT_NCP_INSN: + force_sig_fault(TARGET_SIGILL, TARGET_ILL_COPROC, env->pc); + break; + case TT_UNIMP_FLUSH: + next_instruction(env); + break; +#endif case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; diff --git a/linux-user/sparc/signal.c b/linux-user/sparc/signal.c index b501750fe0..2be9000b9e 100644 --- a/linux-user/sparc/signal.c +++ b/linux-user/sparc/signal.c @@ -503,7 +503,23 @@ long do_rt_sigreturn(CPUSPARCState *env) return -QEMU_ESIGRETURN; } -#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) +#ifdef TARGET_ABI32 +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + install_sigtramp(tramp, TARGET_NR_sigreturn); + + default_rt_sigreturn = sigtramp_page + 8; + install_sigtramp(tramp + 2, TARGET_NR_rt_sigreturn); + + unlock_user(tramp, sigtramp_page, 2 * 8); +} +#endif + +#ifdef TARGET_SPARC64 #define SPARC_MC_TSTATE 0 #define SPARC_MC_PC 1 #define SPARC_MC_NPC 2 @@ -575,7 +591,7 @@ void sparc64_set_context(CPUSPARCState *env) struct target_ucontext *ucp; target_mc_gregset_t *grp; target_mc_fpu_t *fpup; - abi_ulong pc, npc, tstate; + target_ulong pc, npc, tstate; unsigned int i; unsigned char fenab; @@ -773,18 +789,4 @@ do_sigsegv: unlock_user_struct(ucp, ucp_addr, 1); force_sig(TARGET_SIGSEGV); } -#else -void setup_sigtramp(abi_ulong sigtramp_page) -{ - uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); - assert(tramp != NULL); - - default_sigreturn = sigtramp_page; - install_sigtramp(tramp, TARGET_NR_sigreturn); - - default_rt_sigreturn = sigtramp_page + 8; - install_sigtramp(tramp + 2, TARGET_NR_rt_sigreturn); - - unlock_user(tramp, sigtramp_page, 2 * 8); -} -#endif +#endif /* TARGET_SPARC64 */ diff --git a/linux-user/sparc/target_signal.h b/linux-user/sparc/target_signal.h index 87757f0c4e..f223eb4af6 100644 --- a/linux-user/sparc/target_signal.h +++ b/linux-user/sparc/target_signal.h @@ -8,7 +8,7 @@ #define TARGET_SIGTRAP 5 #define TARGET_SIGABRT 6 #define TARGET_SIGIOT 6 -#define TARGET_SIGSTKFLT 7 /* actually EMT */ +#define TARGET_SIGEMT 7 #define TARGET_SIGFPE 8 #define TARGET_SIGKILL 9 #define TARGET_SIGBUS 10 diff --git a/linux-user/strace.c b/linux-user/strace.c index 340010661c..aad2b62ca4 100644 --- a/linux-user/strace.c +++ b/linux-user/strace.c @@ -81,6 +81,7 @@ UNUSED static void print_syscall_epilogue(const struct syscallname *); UNUSED static void print_string(abi_long, int); UNUSED static void print_buf(abi_long addr, abi_long len, int last); UNUSED static void print_raw_param(const char *, abi_long, int); +UNUSED static void print_raw_param64(const char *, long long, int last); UNUSED static void print_timeval(abi_ulong, int); UNUSED static void print_timespec(abi_ulong, int); UNUSED static void print_timespec64(abi_ulong, int); @@ -1110,11 +1111,16 @@ UNUSED static const struct flags mmap_flags[] = { FLAG_END, }; +#ifndef CLONE_PIDFD +# define CLONE_PIDFD 0x00001000 +#endif + UNUSED static const struct flags clone_flags[] = { FLAG_GENERIC(CLONE_VM), FLAG_GENERIC(CLONE_FS), FLAG_GENERIC(CLONE_FILES), FLAG_GENERIC(CLONE_SIGHAND), + FLAG_GENERIC(CLONE_PIDFD), FLAG_GENERIC(CLONE_PTRACE), FLAG_GENERIC(CLONE_VFORK), FLAG_GENERIC(CLONE_PARENT), @@ -1642,6 +1648,19 @@ print_raw_param(const char *fmt, abi_long param, int last) qemu_log(format, param); } +/* + * Same as print_raw_param() but prints out raw 64-bit parameter. + */ +static void +print_raw_param64(const char *fmt, long long param, int last) +{ + char format[64]; + + (void)snprintf(format, sizeof(format), "%s%s", fmt, get_comma(last)); + qemu_log(format, param); +} + + static void print_pointer(abi_long p, int last) { @@ -1718,10 +1737,8 @@ print_timespec64(abi_ulong ts_addr, int last) print_pointer(ts_addr, last); return; } - qemu_log("{tv_sec = %lld" - ",tv_nsec = %lld}%s", - (long long)tswap64(ts->tv_sec), (long long)tswap64(ts->tv_nsec), - get_comma(last)); + print_raw_param64("{tv_sec=%" PRId64, tswap64(ts->tv_sec), 0); + print_raw_param64("tv_nsec=%" PRId64 "}", tswap64(ts->tv_nsec), last); unlock_user(ts, ts_addr, 0); } else { qemu_log("NULL%s", get_comma(last)); @@ -3854,6 +3871,94 @@ print_futex(CPUArchState *cpu_env, const struct syscallname *name, } #endif +#ifdef TARGET_NR_prlimit64 +static const char *target_ressource_string(abi_ulong r) +{ + #define RET_RES_ENTRY(res) case TARGET_##res: return #res; + switch (r) { + RET_RES_ENTRY(RLIMIT_AS); + RET_RES_ENTRY(RLIMIT_CORE); + RET_RES_ENTRY(RLIMIT_CPU); + RET_RES_ENTRY(RLIMIT_DATA); + RET_RES_ENTRY(RLIMIT_FSIZE); + RET_RES_ENTRY(RLIMIT_LOCKS); + RET_RES_ENTRY(RLIMIT_MEMLOCK); + RET_RES_ENTRY(RLIMIT_MSGQUEUE); + RET_RES_ENTRY(RLIMIT_NICE); + RET_RES_ENTRY(RLIMIT_NOFILE); + RET_RES_ENTRY(RLIMIT_NPROC); + RET_RES_ENTRY(RLIMIT_RSS); + RET_RES_ENTRY(RLIMIT_RTPRIO); +#ifdef RLIMIT_RTTIME + RET_RES_ENTRY(RLIMIT_RTTIME); +#endif + RET_RES_ENTRY(RLIMIT_SIGPENDING); + RET_RES_ENTRY(RLIMIT_STACK); + default: + return NULL; + } + #undef RET_RES_ENTRY +} + +static void +print_rlimit64(abi_ulong rlim_addr, int last) +{ + if (rlim_addr) { + struct target_rlimit64 *rl; + + rl = lock_user(VERIFY_READ, rlim_addr, sizeof(*rl), 1); + if (!rl) { + print_pointer(rlim_addr, last); + return; + } + print_raw_param64("{rlim_cur=%" PRId64, tswap64(rl->rlim_cur), 0); + print_raw_param64("rlim_max=%" PRId64 "}", tswap64(rl->rlim_max), + last); + unlock_user(rl, rlim_addr, 0); + } else { + qemu_log("NULL%s", get_comma(last)); + } +} + +static void +print_prlimit64(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + const char *rlim_name; + + print_syscall_prologue(name); + print_raw_param("%d", arg0, 0); + rlim_name = target_ressource_string(arg1); + if (rlim_name) { + qemu_log("%s,", rlim_name); + } else { + print_raw_param("%d", arg1, 0); + } + print_rlimit64(arg2, 0); + print_pointer(arg3, 1); + print_syscall_epilogue(name); +} + +static void +print_syscall_ret_prlimit64(CPUArchState *cpu_env, + const struct syscallname *name, + abi_long ret, abi_long arg0, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, + abi_long arg5) +{ + if (!print_syscall_err(ret)) { + qemu_log(TARGET_ABI_FMT_ld, ret); + if (arg3) { + qemu_log(" ("); + print_rlimit64(arg3, 1); + qemu_log(")"); + } + } + qemu_log("\n"); +} +#endif + #ifdef TARGET_NR_kill static void print_kill(CPUArchState *cpu_env, const struct syscallname *name, diff --git a/linux-user/strace.list b/linux-user/strace.list index d8acbeec60..c7808ea118 100644 --- a/linux-user/strace.list +++ b/linux-user/strace.list @@ -656,7 +656,7 @@ { TARGET_NR_msgsnd, "msgsnd" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_msync -{ TARGET_NR_msync, "msync" , NULL, NULL, NULL }, +{ TARGET_NR_msync, "msync" , "%s(%p,%u,%d)", NULL, NULL }, #endif #ifdef TARGET_NR_multiplexer { TARGET_NR_multiplexer, "multiplexer" , NULL, NULL, NULL }, @@ -1074,7 +1074,8 @@ { TARGET_NR_preadv, "preadv" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_prlimit64 -{ TARGET_NR_prlimit64, "prlimit64" , NULL, NULL, NULL }, +{ TARGET_NR_prlimit64, "prlimit64" , NULL, print_prlimit64, + print_syscall_ret_prlimit64 }, #endif #ifdef TARGET_NR_process_vm_readv { TARGET_NR_process_vm_readv, "process_vm_readv" , NULL, NULL, NULL }, diff --git a/linux-user/syscall.c b/linux-user/syscall.c index e6ab4265b1..d260e03513 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -22,6 +22,8 @@ #include "qemu/path.h" #include "qemu/memfd.h" #include "qemu/queue.h" +#include "qemu/plugin.h" +#include "target_mman.h" #include #include #include @@ -168,9 +170,13 @@ #define CLONE_IGNORED_FLAGS \ (CLONE_DETACHED | CLONE_IO) +#ifndef CLONE_PIDFD +# define CLONE_PIDFD 0x00001000 +#endif + /* Flags for fork which we can implement within QEMU itself */ #define CLONE_OPTIONAL_FORK_FLAGS \ - (CLONE_SETTLS | CLONE_PARENT_SETTID | \ + (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \ CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) /* Flags for thread creation which we can implement within QEMU itself */ @@ -795,12 +801,11 @@ static inline int host_to_target_sock_type(int host_type) } static abi_ulong target_brk; -static abi_ulong target_original_brk; static abi_ulong brk_page; void target_set_brk(abi_ulong new_brk) { - target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); + target_brk = new_brk; brk_page = HOST_PAGE_ALIGN(target_brk); } @@ -821,40 +826,44 @@ uint64_t libafl_set_brk(uint64_t new_brk) { //// --- End LibAFL code --- -//#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) -#define DEBUGF_BRK(message, args...) - /* do_brk() must return target values and target errnos. */ -abi_long do_brk(abi_ulong new_brk) +abi_long do_brk(abi_ulong brk_val) { abi_long mapped_addr; abi_ulong new_alloc_size; + abi_ulong new_brk, new_host_brk_page; /* brk pointers are always untagged */ - DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); - - if (!new_brk) { - DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); - return target_brk; - } - if (new_brk < target_original_brk) { - DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", - target_brk); + /* return old brk value if brk_val unchanged or zero */ + if (!brk_val || brk_val == target_brk) { return target_brk; } - /* If the new brk is less than the highest page reserved to the - * target heap allocation, set it and we're almost done... */ - if (new_brk <= brk_page) { - /* Heap contents are initialized to zero, as for anonymous - * mapped pages. */ - if (new_brk > target_brk) { - memset(g2h_untagged(target_brk), 0, new_brk - target_brk); + new_brk = TARGET_PAGE_ALIGN(brk_val); + new_host_brk_page = HOST_PAGE_ALIGN(brk_val); + + /* brk_val and old target_brk might be on the same page */ + if (new_brk == TARGET_PAGE_ALIGN(target_brk)) { + if (brk_val > target_brk) { + /* empty remaining bytes in (possibly larger) host page */ + memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk); } - target_brk = new_brk; - DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); - return target_brk; + target_brk = brk_val; + return target_brk; + } + + /* Release heap if necesary */ + if (new_brk < target_brk) { + /* empty remaining bytes in (possibly larger) host page */ + memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val); + + /* free unused host pages and set new brk_page */ + target_munmap(new_host_brk_page, brk_page - new_host_brk_page); + brk_page = new_host_brk_page; + + target_brk = brk_val; + return target_brk; } /* We need to allocate more memory after the brk... Note that @@ -863,10 +872,14 @@ abi_long do_brk(abi_ulong new_brk) * itself); instead we treat "mapped but at wrong address" as * a failure and unmap again. */ - new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); - mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, + new_alloc_size = new_host_brk_page - brk_page; + if (new_alloc_size) { + mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, 0, 0)); + } else { + mapped_addr = brk_page; + } if (mapped_addr == brk_page) { /* Heap contents are initialized to zero, as for anonymous @@ -878,10 +891,8 @@ abi_long do_brk(abi_ulong new_brk) * then shrunken). */ memset(g2h_untagged(target_brk), 0, brk_page - target_brk); - target_brk = new_brk; - brk_page = HOST_PAGE_ALIGN(target_brk); - DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", - target_brk); + target_brk = brk_val; + brk_page = new_host_brk_page; return target_brk; } else if (mapped_addr != -1) { /* Mapped but at wrong address, meaning there wasn't actually @@ -889,10 +900,6 @@ abi_long do_brk(abi_ulong new_brk) */ target_munmap(mapped_addr, new_alloc_size); mapped_addr = -1; - DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); - } - else { - DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); } #if defined(TARGET_ALPHA) @@ -1730,6 +1737,11 @@ static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, lladdr = (struct target_sockaddr_ll *)addr; lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); lladdr->sll_hatype = tswap16(lladdr->sll_hatype); + } else if (sa_family == AF_INET6) { + struct sockaddr_in6 *in6addr; + + in6addr = (struct sockaddr_in6 *)addr; + in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id); } unlock_user(target_saddr, target_addr, 0); @@ -4600,7 +4612,7 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env, } raddr=h2g((unsigned long)host_raddr); - page_set_flags(raddr, raddr + shm_info.shm_segsz, + page_set_flags(raddr, raddr + shm_info.shm_segsz - 1, PAGE_VALID | PAGE_RESET | PAGE_READ | (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); @@ -4630,7 +4642,7 @@ static inline abi_long do_shmdt(abi_ulong shmaddr) for (i = 0; i < N_SHM_REGIONS; ++i) { if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { shm_regions[i].in_use = false; - page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); + page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0); break; } } @@ -5752,7 +5764,7 @@ static abi_long do_ioctl(int fd, int cmd, abi_long arg) if (ie->target_cmd == 0) { qemu_log_mask( LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); - return -TARGET_ENOSYS; + return -TARGET_ENOTTY; } if (ie->target_cmd == cmd) break; @@ -5764,7 +5776,7 @@ static abi_long do_ioctl(int fd, int cmd, abi_long arg) } else if (!ie->host_cmd) { /* Some architectures define BSD ioctls in their headers that are not implemented in Linux. */ - return -TARGET_ENOSYS; + return -TARGET_ENOTTY; } switch(arg_type[0]) { @@ -5822,7 +5834,7 @@ static abi_long do_ioctl(int fd, int cmd, abi_long arg) qemu_log_mask(LOG_UNIMP, "Unsupported ioctl type: cmd=0x%04lx type=%d\n", (long)cmd, arg_type[0]); - ret = -TARGET_ENOSYS; + ret = -TARGET_ENOTTY; break; } return ret; @@ -6759,6 +6771,17 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, return -TARGET_EINVAL; } +#if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open) + if (flags & CLONE_PIDFD) { + return -TARGET_EINVAL; + } +#endif + + /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */ + if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) { + return -TARGET_EINVAL; + } + if (block_signals()) { return -QEMU_ERESTARTSYS; } @@ -6786,6 +6809,20 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, ts->child_tidptr = child_tidptr; } else { cpu_clone_regs_parent(env, flags); + if (flags & CLONE_PIDFD) { + int pid_fd = 0; +#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) + int pid_child = ret; + pid_fd = pidfd_open(pid_child, 0); + if (pid_fd >= 0) { + fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL) + | FD_CLOEXEC); + } else { + pid_fd = 0; + } +#endif + put_user_u32(pid_fd, parent_tidptr); + } fork_end(0); } g_assert(!cpu_in_exclusive_context(cpu)); @@ -7642,6 +7679,14 @@ static inline int target_to_host_mlockall_arg(int arg) } #endif +static inline int target_to_host_msync_arg(abi_long arg) +{ + return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) | + ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) | + ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) | + (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC)); +} + #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \ defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \ defined(TARGET_NR_newfstatat)) @@ -8115,6 +8160,9 @@ static int open_self_stat(CPUArchState *cpu_env, int fd) gchar *bin = g_strrstr(ts->bprm->argv[0], "/"); bin = bin ? bin + 1 : ts->bprm->argv[0]; g_string_printf(buf, "(%.15s) ", bin); + } else if (i == 2) { + /* task state */ + g_string_assign(buf, "R "); /* we are running right now */ } else if (i == 3) { /* ppid */ g_string_printf(buf, FMT_pid " ", getppid()); @@ -8219,7 +8267,8 @@ void target_exception_dump(CPUArchState *env, const char *fmt, int code) } #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \ - defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) + defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \ + defined(TARGET_RISCV) static int is_proc(const char *filename, const char *entry) { return strcmp(filename, entry) == 0; @@ -8297,6 +8346,35 @@ static int open_cpuinfo(CPUArchState *cpu_env, int fd) } #endif +#if defined(TARGET_RISCV) +static int open_cpuinfo(CPUArchState *cpu_env, int fd) +{ + int i; + int num_cpus = sysconf(_SC_NPROCESSORS_ONLN); + RISCVCPU *cpu = env_archcpu(cpu_env); + const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env); + char *isa_string = riscv_isa_string(cpu); + const char *mmu; + + if (cfg->mmu) { + mmu = (cpu_env->xl == MXL_RV32) ? "sv32" : "sv48"; + } else { + mmu = "none"; + } + + for (i = 0; i < num_cpus; i++) { + dprintf(fd, "processor\t: %d\n", i); + dprintf(fd, "hart\t\t: %d\n", i); + dprintf(fd, "isa\t\t: %s\n", isa_string); + dprintf(fd, "mmu\t\t: %s\n", mmu); + dprintf(fd, "uarch\t\t: qemu\n\n"); + } + + g_free(isa_string); + return 0; +} +#endif + #if defined(TARGET_M68K) static int open_hardware(CPUArchState *cpu_env, int fd) { @@ -8321,7 +8399,7 @@ static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN { "/proc/net/route", open_net_route, is_proc }, #endif -#if defined(TARGET_SPARC) || defined(TARGET_HPPA) +#if defined(TARGET_SPARC) || defined(TARGET_HPPA) || defined(TARGET_RISCV) { "/proc/cpuinfo", open_cpuinfo, is_proc }, #endif #if defined(TARGET_M68K) @@ -8734,6 +8812,18 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) _syscall2(int, pivot_root, const char *, new_root, const char *, put_old) #endif +#if defined(TARGET_NR_open_tree) && defined(__NR_open_tree) +#define __NR_sys_open_tree __NR_open_tree +_syscall3(int, sys_open_tree, int, __dfd, const char *, __filename, + unsigned int, __flags) +#endif + +#if defined(TARGET_NR_move_mount) && defined(__NR_move_mount) +#define __NR_sys_move_mount __NR_move_mount +_syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname, + int, __to_dfd, const char *, __to_pathname, unsigned int, flag) +#endif + /* This is an internal helper for do_syscall so that it is easier * to have a single return point, so that actions, such as logging * of syscall results, can be performed. @@ -9127,6 +9217,60 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, unlock_user(p, arg1, 0); return ret; #endif +#if defined(TARGET_NR_move_mount) && defined(__NR_move_mount) + case TARGET_NR_move_mount: + { + void *p2, *p4; + + if (!arg2 || !arg4) { + return -TARGET_EFAULT; + } + + p2 = lock_user_string(arg2); + if (!p2) { + return -TARGET_EFAULT; + } + + p4 = lock_user_string(arg4); + if (!p4) { + unlock_user(p2, arg2, 0); + return -TARGET_EFAULT; + } + ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5)); + + unlock_user(p2, arg2, 0); + unlock_user(p4, arg4, 0); + + return ret; + } +#endif +#if defined(TARGET_NR_open_tree) && defined(__NR_open_tree) + case TARGET_NR_open_tree: + { + void *p2; + int host_flags; + + if (!arg2) { + return -TARGET_EFAULT; + } + + p2 = lock_user_string(arg2); + if (!p2) { + return -TARGET_EFAULT; + } + + host_flags = arg3 & ~TARGET_O_CLOEXEC; + if (arg3 & TARGET_O_CLOEXEC) { + host_flags |= O_CLOEXEC; + } + + ret = get_errno(sys_open_tree(arg1, p2, host_flags)); + + unlock_user(p2, arg2, 0); + + return ret; + } +#endif #ifdef TARGET_NR_stime /* not on alpha */ case TARGET_NR_stime: { @@ -10025,18 +10169,13 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, /* Short circuit this for the magic exe check. */ ret = -TARGET_EINVAL; } else if (is_proc_myself((const char *)p, "exe")) { - char real[PATH_MAX], *temp; - temp = realpath(exec_path, real); - /* Return value is # of bytes that we wrote to the buffer. */ - if (temp == NULL) { - ret = get_errno(-1); - } else { - /* Don't worry about sign mismatch as earlier mapping - * logic would have thrown a bad address error. */ - ret = MIN(strlen(real), arg3); - /* We cannot NUL terminate the string. */ - memcpy(p2, real, ret); - } + /* + * Don't worry about sign mismatch as earlier mapping + * logic would have thrown a bad address error. + */ + ret = MIN(strlen(exec_path), arg3); + /* We cannot NUL terminate the string. */ + memcpy(p2, exec_path, ret); } else { ret = get_errno(readlink(path(p), p2, arg3)); } @@ -10057,18 +10196,13 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, /* Short circuit this for the magic exe check. */ ret = -TARGET_EINVAL; } else if (is_proc_myself((const char *)p, "exe")) { - char real[PATH_MAX], *temp; - temp = realpath(exec_path, real); - /* Return value is # of bytes that we wrote to the buffer. */ - if (temp == NULL) { - ret = get_errno(-1); - } else { - /* Don't worry about sign mismatch as earlier mapping - * logic would have thrown a bad address error. */ - ret = MIN(strlen(real), arg4); - /* We cannot NUL terminate the string. */ - memcpy(p2, real, ret); - } + /* + * Don't worry about sign mismatch as earlier mapping + * logic would have thrown a bad address error. + */ + ret = MIN(strlen(exec_path), arg4); + /* We cannot NUL terminate the string. */ + memcpy(p2, exec_path, ret); } else { ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); } @@ -10165,7 +10299,8 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, /* ??? msync/mlock/munlock are broken for softmmu. */ #ifdef TARGET_NR_msync case TARGET_NR_msync: - return get_errno(msync(g2h(cpu, arg1), arg2, arg3)); + return get_errno(msync(g2h(cpu, arg1), arg2, + target_to_host_msync_arg(arg3))); #endif #ifdef TARGET_NR_mlock case TARGET_NR_mlock: @@ -11472,39 +11607,58 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, { int gidsetsize = arg1; target_id *target_grouplist; - gid_t *grouplist; + g_autofree gid_t *grouplist = NULL; int i; - grouplist = alloca(gidsetsize * sizeof(gid_t)); - ret = get_errno(getgroups(gidsetsize, grouplist)); - if (gidsetsize == 0) - return ret; - if (!is_error(ret)) { - target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); - if (!target_grouplist) - return -TARGET_EFAULT; - for(i = 0;i < ret; i++) - target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); - unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); + if (gidsetsize > NGROUPS_MAX) { + return -TARGET_EINVAL; } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + } + ret = get_errno(getgroups(gidsetsize, grouplist)); + if (!is_error(ret) && gidsetsize > 0) { + target_grouplist = lock_user(VERIFY_WRITE, arg2, + gidsetsize * sizeof(target_id), 0); + if (!target_grouplist) { + return -TARGET_EFAULT; + } + for (i = 0; i < ret; i++) { + target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); + } + unlock_user(target_grouplist, arg2, + gidsetsize * sizeof(target_id)); + } + return ret; } - return ret; case TARGET_NR_setgroups: { int gidsetsize = arg1; target_id *target_grouplist; - gid_t *grouplist = NULL; + g_autofree gid_t *grouplist = NULL; int i; - if (gidsetsize) { - grouplist = alloca(gidsetsize * sizeof(gid_t)); - target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); + + if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) { + return -TARGET_EINVAL; + } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + target_grouplist = lock_user(VERIFY_READ, arg2, + gidsetsize * sizeof(target_id), 1); if (!target_grouplist) { return -TARGET_EFAULT; } for (i = 0; i < gidsetsize; i++) { grouplist[i] = low2highgid(tswapid(target_grouplist[i])); } - unlock_user(target_grouplist, arg2, 0); + unlock_user(target_grouplist, arg2, + gidsetsize * sizeof(target_id)); } return get_errno(setgroups(gidsetsize, grouplist)); } @@ -11789,41 +11943,59 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, { int gidsetsize = arg1; uint32_t *target_grouplist; - gid_t *grouplist; + g_autofree gid_t *grouplist = NULL; int i; - grouplist = alloca(gidsetsize * sizeof(gid_t)); + if (gidsetsize > NGROUPS_MAX) { + return -TARGET_EINVAL; + } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + } ret = get_errno(getgroups(gidsetsize, grouplist)); - if (gidsetsize == 0) - return ret; - if (!is_error(ret)) { - target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); + if (!is_error(ret) && gidsetsize > 0) { + target_grouplist = lock_user(VERIFY_WRITE, arg2, + gidsetsize * 4, 0); if (!target_grouplist) { return -TARGET_EFAULT; } - for(i = 0;i < ret; i++) + for (i = 0; i < ret; i++) { target_grouplist[i] = tswap32(grouplist[i]); + } unlock_user(target_grouplist, arg2, gidsetsize * 4); } + return ret; } - return ret; #endif #ifdef TARGET_NR_setgroups32 case TARGET_NR_setgroups32: { int gidsetsize = arg1; uint32_t *target_grouplist; - gid_t *grouplist; + g_autofree gid_t *grouplist = NULL; int i; - grouplist = alloca(gidsetsize * sizeof(gid_t)); - target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); - if (!target_grouplist) { - return -TARGET_EFAULT; + if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) { + return -TARGET_EINVAL; + } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + target_grouplist = lock_user(VERIFY_READ, arg2, + gidsetsize * 4, 1); + if (!target_grouplist) { + return -TARGET_EFAULT; + } + for (i = 0; i < gidsetsize; i++) { + grouplist[i] = tswap32(target_grouplist[i]); + } + unlock_user(target_grouplist, arg2, 0); } - for(i = 0;i < gidsetsize; i++) - grouplist[i] = tswap32(target_grouplist[i]); - unlock_user(target_grouplist, arg2, 0); return get_errno(setgroups(gidsetsize, grouplist)); } #endif @@ -11894,7 +12066,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #ifdef TARGET_NR_mincore case TARGET_NR_mincore: { - void *a = lock_user(VERIFY_READ, arg1, arg2, 0); + void *a = lock_user(VERIFY_NONE, arg1, arg2, 0); if (!a) { return -TARGET_ENOMEM; } @@ -12922,8 +13094,8 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { return -TARGET_EFAULT; } - rnew.rlim_cur = tswap64(target_rnew->rlim_cur); - rnew.rlim_max = tswap64(target_rnew->rlim_max); + __get_user(rnew.rlim_cur, &target_rnew->rlim_cur); + __get_user(rnew.rlim_max, &target_rnew->rlim_max); unlock_user_struct(target_rnew, arg3, 0); rnewp = &rnew; } @@ -12933,8 +13105,8 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { return -TARGET_EFAULT; } - target_rold->rlim_cur = tswap64(rold.rlim_cur); - target_rold->rlim_max = tswap64(rold.rlim_max); + __put_user(rold.rlim_cur, &target_rold->rlim_cur); + __put_user(rold.rlim_max, &target_rold->rlim_max); unlock_user_struct(target_rold, arg4, 1); } return ret; @@ -13154,8 +13326,12 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) case TARGET_NR_timerfd_create: - return get_errno(timerfd_create(arg1, - target_to_host_bitmask(arg2, fcntl_flags_tbl))); + ret = get_errno(timerfd_create(arg1, + target_to_host_bitmask(arg2, fcntl_flags_tbl))); + if (ret >= 0) { + fd_trans_register(ret, &target_timerfd_trans); + } + return ret; #endif #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index 77864de57f..cc37054cb5 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -61,7 +61,7 @@ #if (defined(TARGET_I386) && defined(TARGET_ABI32)) \ || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \ - || defined(TARGET_SPARC) \ + || (defined(TARGET_SPARC) && defined(TARGET_ABI32)) \ || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS) /* 16 bit uid wrappers emulation */ #define USE_UID16 @@ -717,6 +717,11 @@ typedef struct target_siginfo { #define TARGET_TRAP_HWBKPT (4) /* hardware breakpoint/watchpoint */ #define TARGET_TRAP_UNK (5) /* undiagnosed trap */ +/* + * SIGEMT si_codes + */ +#define TARGET_EMT_TAGOVF 1 /* tag overflow */ + #include "target_resource.h" struct target_pollfd { diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h index e63eab8f00..ac21ca1d07 100644 --- a/linux-user/user-internals.h +++ b/linux-user/user-internals.h @@ -20,6 +20,7 @@ #include "exec/user/thunk.h" #include "exec/exec-all.h" +#include "exec/tb-flush.h" #include "qemu/log.h" extern char *exec_path; @@ -75,19 +76,19 @@ void fork_end(int child); /** * probe_guest_base: * @image_name: the executable being loaded - * @loaddr: the lowest fixed address in the executable - * @hiaddr: the highest fixed address in the executable + * @loaddr: the lowest fixed address within the executable + * @hiaddr: the highest fixed address within the executable * * Creates the initial guest address space in the host memory space. * - * If @loaddr == 0, then no address in the executable is fixed, - * i.e. it is fully relocatable. In that case @hiaddr is the size - * of the executable. + * If @loaddr == 0, then no address in the executable is fixed, i.e. + * it is fully relocatable. In that case @hiaddr is the size of the + * executable minus one. * * This function will not return if a valid value for guest_base * cannot be chosen. On return, the executable loader can expect * - * target_mmap(loaddr, hiaddr - loaddr, ...) + * target_mmap(loaddr, hiaddr - loaddr + 1, ...) * * to succeed. */ diff --git a/meson.build b/meson.build index 9ceebe273b..937e9eb595 100644 --- a/meson.build +++ b/meson.build @@ -1,4 +1,4 @@ -project('qemu', ['c'], meson_version: '>=0.61.3', +project('qemu', ['c'], meson_version: '>=0.63.0', default_options: ['warning_level=1', 'c_std=gnu11', 'cpp_std=gnu++11', 'b_colorout=auto', 'b_staticpic=false', 'stdsplit=false', 'optimization=2', 'b_pie=true'], version: files('VERSION')) @@ -17,11 +17,6 @@ fs = import('fs') targetos = host_machine.system() sh = find_program('sh') config_host = keyval.load(meson.current_build_dir() / 'config-host.mak') -enable_modules = 'CONFIG_MODULES' in config_host -enable_static = 'CONFIG_STATIC' in config_host - -# Allow both shared and static libraries unless --enable-static -static_kwargs = enable_static ? {'static': true} : {} cc = meson.get_compiler('c') all_languages = ['c'] @@ -88,6 +83,12 @@ have_ga = get_option('guest_agent') \ .require(targetos in ['sunos', 'linux', 'windows', 'freebsd', 'netbsd', 'openbsd'], error_message: 'unsupported OS for QEMU guest agent') \ .allowed() +enable_modules = get_option('modules') \ + .require(targetos != 'windows', + error_message: 'Modules are not available for Windows') \ + .require(not get_option('prefer_static'), + error_message: 'Modules are incompatible with static linking') \ + .allowed() have_block = have_system or have_tools python = import('python').find_installation() @@ -189,14 +190,206 @@ endif # Compiler flags # ################## -qemu_cflags = config_host['QEMU_CFLAGS'].split() -qemu_objcflags = config_host['QEMU_OBJCFLAGS'].split() -qemu_ldflags = config_host['QEMU_LDFLAGS'].split() +foreach lang : all_languages + compiler = meson.get_compiler(lang) + if compiler.get_id() == 'gcc' and compiler.version().version_compare('>=7.4') + # ok + elif compiler.get_id() == 'clang' and compiler.compiles(''' + #ifdef __apple_build_version__ + # if __clang_major__ < 12 || (__clang_major__ == 12 && __clang_minor__ < 0) + # error You need at least XCode Clang v12.0 to compile QEMU + # endif + #else + # if __clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 0) + # error You need at least Clang v10.0 to compile QEMU + # endif + #endif''') + # ok + else + error('You either need GCC v7.4 or Clang v10.0 (or XCode Clang v12.0) to compile QEMU') + endif +endforeach -if enable_static +# default flags for all hosts +# We use -fwrapv to tell the compiler that we require a C dialect where +# left shift of signed integers is well defined and has the expected +# 2s-complement style results. (Both clang and gcc agree that it +# provides these semantics.) + +qemu_common_flags = [ + '-D_GNU_SOURCE', '-D_FILE_OFFSET_BITS=64', '-D_LARGEFILE_SOURCE', + '-fno-strict-aliasing', '-fno-common', '-fwrapv' ] +qemu_cflags = [] +qemu_ldflags = [] + +if targetos == 'darwin' + # Disable attempts to use ObjectiveC features in os/object.h since they + # won't work when we're compiling with gcc as a C compiler. + qemu_common_flags += '-DOS_OBJECT_USE_OBJC=0' +elif targetos == 'solaris' + # needed for CMSG_ macros in sys/socket.h + qemu_common_flags += '-D_XOPEN_SOURCE=600' + # needed for TIOCWIN* defines in termios.h + qemu_common_flags += '-D__EXTENSIONS__' +elif targetos == 'haiku' + qemu_common_flags += ['-DB_USE_POSITIVE_POSIX_ERRORS', '-D_BSD_SOURCE', '-fPIC'] +endif + +# __sync_fetch_and_and requires at least -march=i486. Many toolchains +# use i686 as default anyway, but for those that don't, an explicit +# specification is necessary +if host_arch == 'i386' and not cc.links(''' + static int sfaa(int *ptr) + { + return __sync_fetch_and_and(ptr, 0); + } + + int main(void) + { + int val = 42; + val = __sync_val_compare_and_swap(&val, 0, 1); + sfaa(&val); + return val; + }''') + qemu_common_flags = ['-march=i486'] + qemu_common_flags +endif + +if get_option('gprof') + qemu_common_flags += ['-p'] + qemu_ldflags += ['-p'] +endif + +if get_option('prefer_static') qemu_ldflags += get_option('b_pie') ? '-static-pie' : '-static' endif +# Meson currently only handles pie as a boolean for now, so if the user +# has explicitly disabled PIE we need to extend our cflags. +# +# -no-pie is supposedly a linker flag that has no effect on the compiler +# command line, but some distros, that didn't quite know what they were +# doing, made local changes to gcc's specs file that turned it into +# a compiler command-line flag. +# +# What about linker flags? For a static build, no PIE is implied by -static +# which we added above (and if it's not because of the same specs patching, +# there's nothing we can do: compilation will fail, report a bug to your +# distro and do not use --disable-pie in the meanwhile). For dynamic linking, +# instead, we can't add -no-pie because it overrides -shared: the linker then +# tries to build an executable instead of a shared library and fails. So +# don't add -no-pie anywhere and cross fingers. :( +if not get_option('b_pie') + qemu_common_flags += cc.get_supported_arguments('-fno-pie', '-no-pie') +endif + +if not get_option('stack_protector').disabled() + stack_protector_probe = ''' + int main(int argc, char *argv[]) + { + char arr[64], *p = arr, *c = argv[argc - 1]; + while (*c) { + *p++ = *c++; + } + return 0; + }''' + have_stack_protector = false + foreach arg : ['-fstack-protector-strong', '-fstack-protector-all'] + # We need to check both a compile and a link, since some compiler + # setups fail only on a .c->.o compile and some only at link time + if cc.compiles(stack_protector_probe, args: ['-Werror', arg]) and \ + cc.links(stack_protector_probe, args: ['-Werror', arg]) + have_stack_protector = true + qemu_cflags += arg + qemu_ldflags += arg + break + endif + endforeach + get_option('stack_protector') \ + .require(have_stack_protector, error_message: 'Stack protector not supported') +endif + +coroutine_backend = get_option('coroutine_backend') +ucontext_probe = ''' + #include + #ifdef __stub_makecontext + #error Ignoring glibc stub makecontext which will always fail + #endif + int main(void) { makecontext(0, 0, 0); return 0; }''' + +# On Windows the only valid backend is the Windows specific one. +# For POSIX prefer ucontext, but it's not always possible. The fallback +# is sigcontext. +supported_backends = [] +if targetos == 'windows' + supported_backends += ['windows'] +else + if targetos != 'darwin' and cc.links(ucontext_probe) + supported_backends += ['ucontext'] + endif + supported_backends += ['sigaltstack'] +endif + +if coroutine_backend == 'auto' + coroutine_backend = supported_backends[0] +elif coroutine_backend not in supported_backends + error('"@0@" backend requested but not available. Available backends: @1@' \ + .format(coroutine_backend, ', '.join(supported_backends))) +endif + +# Compiles if SafeStack *not* enabled +safe_stack_probe = ''' + int main(void) + { + #if defined(__has_feature) + #if __has_feature(safe_stack) + #error SafeStack Enabled + #endif + #endif + return 0; + }''' +if get_option('safe_stack') != not cc.compiles(safe_stack_probe) + safe_stack_arg = get_option('safe_stack') ? '-fsanitize=safe-stack' : '-fno-sanitize=safe-stack' + if get_option('safe_stack') != not cc.compiles(safe_stack_probe, args: safe_stack_arg) + error(get_option('safe_stack') \ + ? 'SafeStack not supported by your compiler' \ + : 'Cannot disable SafeStack') + endif + qemu_cflags += safe_stack_arg + qemu_ldflags += safe_stack_arg +endif +if get_option('safe_stack') and coroutine_backend != 'ucontext' + error('SafeStack is only supported with the ucontext coroutine backend') +endif + +if get_option('sanitizers') + if cc.has_argument('-fsanitize=address') + qemu_cflags = ['-fsanitize=address'] + qemu_cflags + qemu_ldflags = ['-fsanitize=address'] + qemu_ldflags + endif + + # Detect static linking issue with ubsan - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84285 + if cc.links('int main(int argc, char **argv) { return argc + 1; }', + args: [qemu_ldflags, '-fsanitize=undefined']) + qemu_cflags = ['-fsanitize=undefined'] + qemu_cflags + qemu_ldflags = ['-fsanitize=undefined'] + qemu_ldflags + endif +endif + +# Thread sanitizer is, for now, much noisier than the other sanitizers; +# keep it separate until that is not the case. +if get_option('tsan') + if get_option('sanitizers') + error('TSAN is not supported with other sanitizers') + endif + if not cc.has_function('__tsan_create_fiber', + args: '-fsanitize=thread', + prefix: '#include ') + error('Cannot enable TSAN due to missing fiber annotation interface') + endif + qemu_cflags = ['-fsanitize=thread'] + qemu_cflags + qemu_ldflags = ['-fsanitize=thread'] + qemu_ldflags +endif + # Detect support for PT_GNU_RELRO + DT_BIND_NOW. # The combination is known as "full relro", because .got.plt is read-only too. qemu_ldflags += cc.get_supported_link_arguments('-Wl,-z,relro', '-Wl,-z,now') @@ -206,14 +399,11 @@ if targetos == 'windows' qemu_ldflags += cc.get_supported_link_arguments('-Wl,--dynamicbase', '-Wl,--high-entropy-va') endif -if get_option('gprof') - qemu_cflags += ['-p'] - qemu_objcflags += ['-p'] - qemu_ldflags += ['-p'] +# Exclude --warn-common with TSan to suppress warnings from the TSan libraries. +if targetos != 'sunos' and not get_option('tsan') + qemu_ldflags += cc.get_supported_link_arguments('-Wl,--warn-common') endif -# Specify linker-script with add_project_link_arguments so that it is not placed -# within a linker --start-group/--end-group pair if get_option('fuzzing') # Specify a filter to only instrument code that is directly related to # virtual-devices. @@ -225,8 +415,7 @@ if get_option('fuzzing') name: '-fsanitize-coverage-allowlist=/dev/null', args: ['-fsanitize-coverage-allowlist=/dev/null', '-fsanitize-coverage=trace-pc'] ) - add_global_arguments('-fsanitize-coverage-allowlist=instrumentation-filter', - native: false, language: all_languages) + qemu_common_flags += ['-fsanitize-coverage-allowlist=instrumentation-filter'] endif if get_option('fuzzing_engine') == '' @@ -234,10 +423,8 @@ if get_option('fuzzing') # compiled code. To build non-fuzzer binaries with --enable-fuzzing, link # everything with fsanitize=fuzzer-no-link. Otherwise, the linker will be # unable to bind the fuzzer-related callbacks added by instrumentation. - add_global_arguments('-fsanitize=fuzzer-no-link', - native: false, language: all_languages) - add_global_link_arguments('-fsanitize=fuzzer-no-link', - native: false, language: all_languages) + qemu_common_flags += ['-fsanitize=fuzzer-no-link'] + qemu_ldflags += ['-fsanitize=fuzzer-no-link'] # For the actual fuzzer binaries, we need to link against the libfuzzer # library. They need to be configurable, to support OSS-Fuzz fuzz_exe_ldflags = ['-fsanitize=fuzzer'] @@ -248,20 +435,50 @@ if get_option('fuzzing') endif endif +add_global_arguments(qemu_common_flags, native: false, language: all_languages) +add_global_link_arguments(qemu_ldflags, native: false, language: all_languages) + +# Collect warnings that we want to enable + +warn_flags = [ + '-Wundef', + '-Wwrite-strings', + '-Wmissing-prototypes', + '-Wstrict-prototypes', + '-Wredundant-decls', + '-Wold-style-declaration', + '-Wold-style-definition', + '-Wtype-limits', + '-Wformat-security', + '-Wformat-y2k', + '-Winit-self', + '-Wignored-qualifiers', + '-Wempty-body', + '-Wnested-externs', + '-Wendif-labels', + '-Wexpansion-to-defined', + '-Wimplicit-fallthrough=2', + '-Wmissing-format-attribute', + '-Wno-initializer-overrides', + '-Wno-missing-include-dirs', + '-Wno-shift-negative-value', + '-Wno-string-plus-int', + '-Wno-typedef-redefinition', + '-Wno-tautological-type-limit-compare', + '-Wno-psabi', + '-Wno-gnu-variable-sized-type-not-at-end', +] + +if targetos != 'darwin' + warn_flags += ['-Wthread-safety'] +endif + # Check that the C++ compiler exists and works with the C compiler. link_language = 'c' linker = cc qemu_cxxflags = [] if 'cpp' in all_languages - add_global_arguments(['-D__STDC_LIMIT_MACROS', '-D__STDC_CONSTANT_MACROS', '-D__STDC_FORMAT_MACROS'], - native: false, language: 'cpp') - foreach k: qemu_cflags - if k not in ['-Wstrict-prototypes', '-Wmissing-prototypes', '-Wnested-externs', - '-Wold-style-declaration', '-Wold-style-definition', '-Wredundant-decls'] - qemu_cxxflags += [k] - endif - endforeach - + qemu_cxxflags = ['-D__STDC_LIMIT_MACROS', '-D__STDC_CONSTANT_MACROS', '-D__STDC_FORMAT_MACROS'] + qemu_cflags if cxx.links(files('scripts/main.c'), args: qemu_cflags) link_language = 'cpp' linker = cxx @@ -271,16 +488,26 @@ if 'cpp' in all_languages endif endif -# Exclude --warn-common with TSan to suppress warnings from the TSan libraries. -if targetos != 'sunos' and not config_host.has_key('CONFIG_TSAN') - qemu_ldflags += linker.get_supported_link_arguments('-Wl,--warn-common') +# clang does not support glibc + FORTIFY_SOURCE (is it still true?) +if get_option('optimization') != '0' and targetos == 'linux' + if cc.get_id() == 'gcc' + qemu_cflags += ['-U_FORTIFY_SOURCE', '-D_FORTIFY_SOURCE=2'] + endif + if 'cpp' in all_languages and cxx.get_id() == 'gcc' + qemu_cxxflags += ['-U_FORTIFY_SOURCE', '-D_FORTIFY_SOURCE=2'] + endif endif -add_global_link_arguments(qemu_ldflags, native: false, language: all_languages) - -add_global_arguments(qemu_cflags, native: false, language: 'c') -add_global_arguments(qemu_cxxflags, native: false, language: 'cpp') -add_global_arguments(qemu_objcflags, native: false, language: 'objc') +add_project_arguments(qemu_cflags, native: false, language: 'c') +add_project_arguments(cc.get_supported_arguments(warn_flags), native: false, language: 'c') +if 'cpp' in all_languages + add_project_arguments(qemu_cxxflags, native: false, language: 'cpp') + add_project_arguments(cxx.get_supported_arguments(warn_flags), native: false, language: 'cpp') +endif +if 'objc' in all_languages + # Note sanitizer flags are not applied to Objective-C sources! + add_project_arguments(objc.get_supported_arguments(warn_flags), native: false, language: 'objc') +endif if targetos == 'linux' add_project_arguments('-isystem', meson.current_source_dir() / 'linux-headers', '-isystem', 'linux-headers', @@ -292,6 +519,16 @@ add_project_arguments('-iquote', '.', '-iquote', meson.current_source_dir() / 'include', language: all_languages) +# If a host-specific include directory exists, list that first... +host_include = meson.current_source_dir() / 'host/include/' +if fs.is_dir(host_include / host_arch) + add_project_arguments('-iquote', host_include / host_arch, + language: all_languages) +endif +# ... followed by the generic fallback. +add_project_arguments('-iquote', host_include / 'generic', + language: all_languages) + sparse = find_program('cgcc', required: get_option('sparse')) if sparse.found() run_target('sparse', @@ -496,25 +733,85 @@ endif # Dependencies # ################ -# The path to glib.h is added to all compilation commands. This was -# grandfathered in from the QEMU Makefiles. -add_project_arguments(config_host['GLIB_CFLAGS'].split(), - native: false, language: all_languages) -glib = declare_dependency(compile_args: config_host['GLIB_CFLAGS'].split(), - link_args: config_host['GLIB_LIBS'].split(), - version: config_host['GLIB_VERSION'], - variables: { - 'bindir': config_host['GLIB_BINDIR'], - }) -# override glib dep with the configure results (for subprojects) +# When bumping glib minimum version, please check also whether to increase +# the _WIN32_WINNT setting in osdep.h according to the value from glib +glib_req_ver = '>=2.56.0' +glib_pc = dependency('glib-2.0', version: glib_req_ver, required: true, + method: 'pkg-config') +glib_cflags = [] +if enable_modules + gmodule = dependency('gmodule-export-2.0', version: glib_req_ver, required: true, + method: 'pkg-config') +elif config_host.has_key('CONFIG_PLUGIN') + gmodule = dependency('gmodule-no-export-2.0', version: glib_req_ver, required: true, + method: 'pkg-config') +else + gmodule = not_found +endif + +# This workaround is required due to a bug in pkg-config file for glib as it +# doesn't define GLIB_STATIC_COMPILATION for pkg-config --static +if targetos == 'windows' and get_option('prefer_static') + glib_cflags += ['-DGLIB_STATIC_COMPILATION'] +endif + +# Sanity check that the current size_t matches the +# size that glib thinks it should be. This catches +# problems on multi-arch where people try to build +# 32-bit QEMU while pointing at 64-bit glib headers + +if not cc.compiles(''' + #include + #include + + #define QEMU_BUILD_BUG_ON(x) \ + typedef char qemu_build_bug_on[(x)?-1:1] __attribute__((unused)); + + int main(void) { + QEMU_BUILD_BUG_ON(sizeof(size_t) != GLIB_SIZEOF_SIZE_T); + return 0; + }''', dependencies: glib_pc, args: glib_cflags) + error('''sizeof(size_t) doesn't match GLIB_SIZEOF_SIZE_T. + You probably need to set PKG_CONFIG_LIBDIR" to point + to the right pkg-config files for your build target.''') +endif + +# Silence clang warnings triggered by glib < 2.57.2 +if not cc.compiles(''' + #include + typedef struct Foo { + int i; + } Foo; + static void foo_free(Foo *f) + { + g_free(f); + } + G_DEFINE_AUTOPTR_CLEANUP_FUNC(Foo, foo_free) + int main(void) { return 0; }''', dependencies: glib_pc, args: ['-Wunused-function', '-Werror']) + glib_cflags += cc.get_supported_arguments('-Wno-unused-function') +endif +glib = declare_dependency(dependencies: [glib_pc, gmodule], + compile_args: glib_cflags, + version: glib_pc.version()) + +# Check whether glib has gslice, which we have to avoid for correctness. +# TODO: remove this check and the corresponding workaround (qtree) when +# the minimum supported glib is >= 2.75.3 +glib_has_gslice = glib.version().version_compare('<2.75.3') + +# override glib dep to include the above refinements meson.override_dependency('glib-2.0', glib) +# The path to glib.h is added to all compilation commands. +add_project_dependencies(glib.partial_dependency(compile_args: true, includes: true), + native: false, language: all_languages) + gio = not_found gdbus_codegen = not_found gdbus_codegen_error = '@0@ requires gdbus-codegen, please install libgio' if not get_option('gio').auto() or have_system gio = dependency('gio-2.0', required: get_option('gio'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') if gio.found() and not cc.links(''' #include int main(void) @@ -531,7 +828,7 @@ if not get_option('gio').auto() or have_system gdbus_codegen = find_program(gio.get_variable('gdbus_codegen'), required: get_option('gio')) gio_unix = dependency('gio-unix-2.0', required: get_option('gio'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') gio = declare_dependency(dependencies: [gio, gio_unix], version: gio.version()) endif @@ -544,20 +841,19 @@ endif lttng = not_found if 'ust' in get_option('trace_backends') lttng = dependency('lttng-ust', required: true, version: '>= 2.1', - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif pixman = not_found if have_system or have_tools pixman = dependency('pixman-1', required: have_system, version:'>=0.21.8', - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif -zlib = dependency('zlib', required: true, kwargs: static_kwargs) +zlib = dependency('zlib', required: true) libaio = not_found if not get_option('linux_aio').auto() or have_block libaio = cc.find_library('aio', has_headers: ['libaio.h'], - required: get_option('linux_aio'), - kwargs: static_kwargs) + required: get_option('linux_aio')) endif linux_io_uring_test = ''' @@ -570,7 +866,7 @@ linux_io_uring = not_found if not get_option('linux_io_uring').auto() or have_block linux_io_uring = dependency('liburing', version: '>=0.3', required: get_option('linux_io_uring'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') if not cc.links(linux_io_uring_test) linux_io_uring = not_found endif @@ -580,7 +876,7 @@ libnfs = not_found if not get_option('libnfs').auto() or have_block libnfs = dependency('libnfs', version: '>=1.9.3', required: get_option('libnfs'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif libattr_test = ''' @@ -600,8 +896,7 @@ if get_option('attr').allowed() libattr = declare_dependency() else libattr = cc.find_library('attr', has_headers: ['attr/xattr.h'], - required: get_option('attr'), - kwargs: static_kwargs) + required: get_option('attr')) if libattr.found() and not \ cc.links(libattr_test, dependencies: libattr, args: '-DCONFIG_LIBATTR') libattr = not_found @@ -636,7 +931,7 @@ seccomp_has_sysrawrc = false if not get_option('seccomp').auto() or have_system or have_tools seccomp = dependency('libseccomp', version: '>=2.3.0', required: get_option('seccomp'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') if seccomp.found() seccomp_has_sysrawrc = cc.has_header_symbol('seccomp.h', 'SCMP_FLTATR_API_SYSRAWRC', @@ -647,8 +942,7 @@ endif libcap_ng = not_found if not get_option('cap_ng').auto() or have_system or have_tools libcap_ng = cc.find_library('cap-ng', has_headers: ['cap-ng.h'], - required: get_option('cap_ng'), - kwargs: static_kwargs) + required: get_option('cap_ng')) endif if libcap_ng.found() and not cc.links(''' #include @@ -669,13 +963,13 @@ if get_option('xkbcommon').auto() and not have_system and not have_tools xkbcommon = not_found else xkbcommon = dependency('xkbcommon', required: get_option('xkbcommon'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif slirp = not_found if not get_option('slirp').auto() or have_system slirp = dependency('slirp', required: get_option('slirp'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') # slirp < 4.7 is incompatible with CFI support in QEMU. This is because # it passes function pointers within libslirp as callbacks for timers. # When using a system-wide shared libslirp, the type information for the @@ -695,8 +989,7 @@ endif vde = not_found if not get_option('vde').auto() or have_system or have_tools vde = cc.find_library('vdeplug', has_headers: ['libvdeplug.h'], - required: get_option('vde'), - kwargs: static_kwargs) + required: get_option('vde')) endif if vde.found() and not cc.links(''' #include @@ -718,35 +1011,41 @@ endif pulse = not_found if not get_option('pa').auto() or (targetos == 'linux' and have_system) pulse = dependency('libpulse', required: get_option('pa'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif alsa = not_found if not get_option('alsa').auto() or (targetos == 'linux' and have_system) alsa = dependency('alsa', required: get_option('alsa'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif jack = not_found if not get_option('jack').auto() or have_system jack = dependency('jack', required: get_option('jack'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') +endif +pipewire = not_found +if not get_option('pipewire').auto() or (targetos == 'linux' and have_system) + pipewire = dependency('libpipewire-0.3', version: '>=0.3.60', + required: get_option('pipewire'), + method: 'pkg-config') endif sndio = not_found if not get_option('sndio').auto() or have_system sndio = dependency('sndio', required: get_option('sndio'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif spice_protocol = not_found if not get_option('spice_protocol').auto() or have_system spice_protocol = dependency('spice-protocol', version: '>=0.14.0', required: get_option('spice_protocol'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif spice = not_found if not get_option('spice').auto() or have_system spice = dependency('spice-server', version: '>=0.14.0', required: get_option('spice'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif spice_headers = spice.partial_dependency(compile_args: true, includes: true) @@ -756,13 +1055,13 @@ libiscsi = not_found if not get_option('libiscsi').auto() or have_block libiscsi = dependency('libiscsi', version: '>=1.9.0', required: get_option('libiscsi'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif zstd = not_found if not get_option('zstd').auto() or have_block zstd = dependency('libzstd', version: '>=1.4.0', required: get_option('zstd'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif virgl = not_found @@ -770,29 +1069,25 @@ have_vhost_user_gpu = have_tools and targetos == 'linux' and pixman.found() if not get_option('virglrenderer').auto() or have_system or have_vhost_user_gpu virgl = dependency('virglrenderer', method: 'pkg-config', - required: get_option('virglrenderer'), - kwargs: static_kwargs) + required: get_option('virglrenderer')) endif blkio = not_found if not get_option('blkio').auto() or have_block blkio = dependency('blkio', method: 'pkg-config', - required: get_option('blkio'), - kwargs: static_kwargs) + required: get_option('blkio')) endif curl = not_found if not get_option('curl').auto() or have_block curl = dependency('libcurl', version: '>=7.29.0', method: 'pkg-config', - required: get_option('curl'), - kwargs: static_kwargs) + required: get_option('curl')) endif libudev = not_found if targetos == 'linux' and (have_system or have_tools) libudev = dependency('libudev', method: 'pkg-config', - required: get_option('libudev'), - kwargs: static_kwargs) + required: get_option('libudev')) endif mpathlibs = [libudev] @@ -827,18 +1122,15 @@ if targetos == 'linux' and have_tools and get_option('mpath').allowed() return 0; }''' libmpathpersist = cc.find_library('mpathpersist', - required: get_option('mpath'), - kwargs: static_kwargs) + required: get_option('mpath')) if libmpathpersist.found() mpathlibs += libmpathpersist - if enable_static + if get_option('prefer_static') mpathlibs += cc.find_library('devmapper', - required: get_option('mpath'), - kwargs: static_kwargs) + required: get_option('mpath')) endif mpathlibs += cc.find_library('multipath', - required: get_option('mpath'), - kwargs: static_kwargs) + required: get_option('mpath')) foreach lib: mpathlibs if not lib.found() mpathlibs = [] @@ -888,13 +1180,13 @@ if have_system and get_option('curses').allowed() curses_dep_list = targetos == 'windows' ? ['ncurses', 'ncursesw'] : ['ncursesw'] curses = dependency(curses_dep_list, required: false, - method: 'pkg-config', - kwargs: static_kwargs) + method: 'pkg-config') msg = get_option('curses').enabled() ? 'curses library not found' : '' curses_compile_args = ['-DNCURSES_WIDECHAR=1'] if curses.found() if cc.links(curses_test, args: curses_compile_args, dependencies: [curses]) - curses = declare_dependency(compile_args: curses_compile_args, dependencies: [curses]) + curses = declare_dependency(compile_args: curses_compile_args, dependencies: [curses], + version: curses.version()) else msg = 'curses package not usable' curses = not_found @@ -911,8 +1203,7 @@ if have_system and get_option('curses').allowed() curses_libname_list = (targetos == 'windows' ? ['pdcurses'] : ['ncursesw', 'cursesw']) foreach curses_libname : curses_libname_list libcurses = cc.find_library(curses_libname, - required: false, - kwargs: static_kwargs) + required: false) if libcurses.found() if cc.links(curses_test, args: curses_compile_args, dependencies: libcurses) curses = declare_dependency(compile_args: curses_compile_args, @@ -937,7 +1228,7 @@ if have_system and get_option('curses').allowed() int main(void) { iconv_t conv = iconv_open("WCHAR_T", "UCS-2"); return conv != (iconv_t) -1; - }''', args: config_host['GLIB_CFLAGS'].split() + config_host['GLIB_LIBS'].split() + link_args) + }''', args: link_args, dependencies: glib) iconv = declare_dependency(link_args: link_args, dependencies: glib) break endif @@ -962,8 +1253,7 @@ endif brlapi = not_found if not get_option('brlapi').auto() or have_system brlapi = cc.find_library('brlapi', has_headers: ['brlapi.h'], - required: get_option('brlapi'), - kwargs: static_kwargs) + required: get_option('brlapi')) if brlapi.found() and not cc.links(''' #include #include @@ -979,15 +1269,16 @@ endif sdl = not_found if not get_option('sdl').auto() or have_system - sdl = dependency('sdl2', required: get_option('sdl'), kwargs: static_kwargs) + sdl = dependency('sdl2', required: get_option('sdl')) sdl_image = not_found endif if sdl.found() # work around 2.0.8 bug sdl = declare_dependency(compile_args: '-Wno-undef', - dependencies: sdl) + dependencies: sdl, + version: sdl.version()) sdl_image = dependency('SDL2_image', required: get_option('sdl_image'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') else if get_option('sdl_image').enabled() error('sdl-image required, but SDL was @0@'.format( @@ -998,11 +1289,9 @@ endif rbd = not_found if not get_option('rbd').auto() or have_block - librados = cc.find_library('rados', required: get_option('rbd'), - kwargs: static_kwargs) + librados = cc.find_library('rados', required: get_option('rbd')) librbd = cc.find_library('rbd', has_headers: ['rbd/librbd.h'], - required: get_option('rbd'), - kwargs: static_kwargs) + required: get_option('rbd')) if librados.found() and librbd.found() if cc.links(''' #include @@ -1030,7 +1319,7 @@ glusterfs_iocb_has_stat = false if not get_option('glusterfs').auto() or have_block glusterfs = dependency('glusterfs-api', version: '>=3', required: get_option('glusterfs'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') if glusterfs.found() glusterfs_ftruncate_has_stat = cc.links(''' #include @@ -1065,15 +1354,13 @@ libssh = not_found if not get_option('libssh').auto() or have_block libssh = dependency('libssh', version: '>=0.8.7', method: 'pkg-config', - required: get_option('libssh'), - kwargs: static_kwargs) + required: get_option('libssh')) endif libbzip2 = not_found if not get_option('bzip2').auto() or have_block libbzip2 = cc.find_library('bz2', has_headers: ['bzlib.h'], - required: get_option('bzip2'), - kwargs: static_kwargs) + required: get_option('bzip2')) if libbzip2.found() and not cc.links(''' #include int main(void) { BZ2_bzlibVersion(); return 0; }''', dependencies: libbzip2) @@ -1089,8 +1376,7 @@ endif liblzfse = not_found if not get_option('lzfse').auto() or have_block liblzfse = cc.find_library('lzfse', has_headers: ['lzfse.h'], - required: get_option('lzfse'), - kwargs: static_kwargs) + required: get_option('lzfse')) endif if liblzfse.found() and not cc.links(''' #include @@ -1108,8 +1394,7 @@ if get_option('oss').allowed() and have_system if not cc.has_header('sys/soundcard.h') # not found elif targetos == 'netbsd' - oss = cc.find_library('ossaudio', required: get_option('oss'), - kwargs: static_kwargs) + oss = cc.find_library('ossaudio', required: get_option('oss')) else oss = declare_dependency() endif @@ -1142,7 +1427,7 @@ endif opengl = not_found if not get_option('opengl').auto() or have_system or have_vhost_user_gpu epoxy = dependency('epoxy', method: 'pkg-config', - required: get_option('opengl'), kwargs: static_kwargs) + required: get_option('opengl')) if cc.has_header('epoxy/egl.h', dependencies: epoxy) opengl = epoxy elif get_option('opengl').enabled() @@ -1151,8 +1436,7 @@ if not get_option('opengl').auto() or have_system or have_vhost_user_gpu endif gbm = not_found if (have_system or have_tools) and (virgl.found() or opengl.found()) - gbm = dependency('gbm', method: 'pkg-config', required: false, - kwargs: static_kwargs) + gbm = dependency('gbm', method: 'pkg-config', required: false) endif have_vhost_user_gpu = have_vhost_user_gpu and virgl.found() and opengl.found() and gbm.found() @@ -1174,16 +1458,14 @@ if get_option('gnutls').enabled() or (get_option('gnutls').auto() and have_syste # the platform support requirements gnutls_crypto = dependency('gnutls', version: '>=3.6.14', method: 'pkg-config', - required: false, - kwargs: static_kwargs) + required: false) if gnutls_crypto.found() gnutls = gnutls_crypto else # Our min version if all we need is TLS gnutls = dependency('gnutls', version: '>=3.5.18', method: 'pkg-config', - required: get_option('gnutls'), - kwargs: static_kwargs) + required: get_option('gnutls')) endif endif @@ -1210,34 +1492,32 @@ if not gnutls_crypto.found() if (not get_option('gcrypt').auto() or have_system) and not get_option('nettle').enabled() gcrypt = dependency('libgcrypt', version: '>=1.8', method: 'config-tool', - required: get_option('gcrypt'), - kwargs: static_kwargs) + required: get_option('gcrypt')) # Debian has removed -lgpg-error from libgcrypt-config # as it "spreads unnecessary dependencies" which in # turn breaks static builds... - if gcrypt.found() and enable_static - gcrypt = declare_dependency(dependencies: [ - gcrypt, - cc.find_library('gpg-error', required: true, kwargs: static_kwargs)]) + if gcrypt.found() and get_option('prefer_static') + gcrypt = declare_dependency(dependencies: + [gcrypt, + cc.find_library('gpg-error', required: true)], + version: gcrypt.version()) endif endif if (not get_option('nettle').auto() or have_system) and not gcrypt.found() nettle = dependency('nettle', version: '>=3.4', method: 'pkg-config', - required: get_option('nettle'), - kwargs: static_kwargs) + required: get_option('nettle')) if nettle.found() and not cc.has_header('nettle/xts.h', dependencies: nettle) xts = 'private' endif endif endif -gmp = dependency('gmp', required: false, method: 'pkg-config', kwargs: static_kwargs) +gmp = dependency('gmp', required: false, method: 'pkg-config') if nettle.found() and gmp.found() hogweed = dependency('hogweed', version: '>=3.4', method: 'pkg-config', - required: get_option('nettle'), - kwargs: static_kwargs) + required: get_option('nettle')) endif @@ -1249,20 +1529,18 @@ have_gtk_clipboard = get_option('gtk_clipboard').enabled() if not get_option('gtk').auto() or have_system gtk = dependency('gtk+-3.0', version: '>=3.22.0', method: 'pkg-config', - required: get_option('gtk'), - kwargs: static_kwargs) + required: get_option('gtk')) if gtk.found() gtkx11 = dependency('gtk+-x11-3.0', version: '>=3.22.0', method: 'pkg-config', - required: false, - kwargs: static_kwargs) - gtk = declare_dependency(dependencies: [gtk, gtkx11]) + required: false) + gtk = declare_dependency(dependencies: [gtk, gtkx11], + version: gtk.version()) if not get_option('vte').auto() or have_system vte = dependency('vte-2.91', method: 'pkg-config', - required: get_option('vte'), - kwargs: static_kwargs) + required: get_option('vte')) endif elif have_gtk_clipboard error('GTK clipboard requested, but GTK not found') @@ -1271,13 +1549,12 @@ endif x11 = not_found if gtkx11.found() - x11 = dependency('x11', method: 'pkg-config', required: gtkx11.found(), - kwargs: static_kwargs) + x11 = dependency('x11', method: 'pkg-config', required: gtkx11.found()) endif png = not_found if get_option('png').allowed() and have_system png = dependency('libpng', version: '>=1.6.34', required: get_option('png'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif vnc = not_found jpeg = not_found @@ -1285,10 +1562,9 @@ sasl = not_found if get_option('vnc').allowed() and have_system vnc = declare_dependency() # dummy dependency jpeg = dependency('libjpeg', required: get_option('vnc_jpeg'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') sasl = cc.find_library('sasl2', has_headers: ['sasl/sasl.h'], - required: get_option('vnc_sasl'), - kwargs: static_kwargs) + required: get_option('vnc_sasl')) if sasl.found() sasl = declare_dependency(dependencies: sasl, compile_args: '-DSTRUCT_IOVEC_DEFINED') @@ -1298,8 +1574,7 @@ endif pam = not_found if not get_option('auth_pam').auto() or have_system pam = cc.find_library('pam', has_headers: ['security/pam_appl.h'], - required: get_option('auth_pam'), - kwargs: static_kwargs) + required: get_option('auth_pam')) endif if pam.found() and not cc.links(''' #include @@ -1323,8 +1598,7 @@ endif snappy = not_found if not get_option('snappy').auto() or have_system snappy = cc.find_library('snappy', has_headers: ['snappy-c.h'], - required: get_option('snappy'), - kwargs: static_kwargs) + required: get_option('snappy')) endif if snappy.found() and not linker.links(''' #include @@ -1340,8 +1614,7 @@ endif lzo = not_found if not get_option('lzo').auto() or have_system lzo = cc.find_library('lzo2', has_headers: ['lzo/lzo1x.h'], - required: get_option('lzo'), - kwargs: static_kwargs) + required: get_option('lzo')) endif if lzo.found() and not cc.links(''' #include @@ -1357,8 +1630,7 @@ endif numa = not_found if not get_option('numa').auto() or have_system or have_tools numa = cc.find_library('numa', has_headers: ['numa.h'], - required: get_option('numa'), - kwargs: static_kwargs) + required: get_option('numa')) endif if numa.found() and not cc.links(''' #include @@ -1376,10 +1648,8 @@ rdma = not_found if not get_option('rdma').auto() or have_system libumad = cc.find_library('ibumad', required: get_option('rdma')) rdma_libs = [cc.find_library('rdmacm', has_headers: ['rdma/rdma_cma.h'], - required: get_option('rdma'), - kwargs: static_kwargs), - cc.find_library('ibverbs', required: get_option('rdma'), - kwargs: static_kwargs), + required: get_option('rdma')), + cc.find_library('ibverbs', required: get_option('rdma')), libumad] rdma = declare_dependency(dependencies: rdma_libs) foreach lib: rdma_libs @@ -1392,30 +1662,30 @@ endif xen = not_found if get_option('xen').enabled() or (get_option('xen').auto() and have_system) xencontrol = dependency('xencontrol', required: false, - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') if xencontrol.found() xen_pc = declare_dependency(version: xencontrol.version(), dependencies: [ xencontrol, # disabler: true makes xen_pc.found() return false if any is not found dependency('xenstore', required: false, - method: 'pkg-config', kwargs: static_kwargs, + method: 'pkg-config', disabler: true), dependency('xenforeignmemory', required: false, - method: 'pkg-config', kwargs: static_kwargs, + method: 'pkg-config', disabler: true), dependency('xengnttab', required: false, - method: 'pkg-config', kwargs: static_kwargs, + method: 'pkg-config', disabler: true), dependency('xenevtchn', required: false, - method: 'pkg-config', kwargs: static_kwargs, + method: 'pkg-config', disabler: true), dependency('xendevicemodel', required: false, - method: 'pkg-config', kwargs: static_kwargs, + method: 'pkg-config', disabler: true), # optional, no "disabler: true" dependency('xentoolcore', required: false, - method: 'pkg-config', kwargs: static_kwargs)]) + method: 'pkg-config')]) if xen_pc.found() xen = xen_pc endif @@ -1473,60 +1743,53 @@ have_xen_pci_passthrough = get_option('xen_pci_passthrough') \ cacard = not_found if not get_option('smartcard').auto() or have_system cacard = dependency('libcacard', required: get_option('smartcard'), - version: '>=2.5.1', method: 'pkg-config', - kwargs: static_kwargs) + version: '>=2.5.1', method: 'pkg-config') endif u2f = not_found if have_system u2f = dependency('u2f-emu', required: get_option('u2f'), - method: 'pkg-config', - kwargs: static_kwargs) + method: 'pkg-config') endif canokey = not_found if have_system canokey = dependency('canokey-qemu', required: get_option('canokey'), - method: 'pkg-config', - kwargs: static_kwargs) + method: 'pkg-config') endif usbredir = not_found if not get_option('usb_redir').auto() or have_system usbredir = dependency('libusbredirparser-0.5', required: get_option('usb_redir'), - version: '>=0.6', method: 'pkg-config', - kwargs: static_kwargs) + version: '>=0.6', method: 'pkg-config') endif libusb = not_found if not get_option('libusb').auto() or have_system libusb = dependency('libusb-1.0', required: get_option('libusb'), - version: '>=1.0.13', method: 'pkg-config', - kwargs: static_kwargs) + version: '>=1.0.13', method: 'pkg-config') endif libpmem = not_found if not get_option('libpmem').auto() or have_system libpmem = dependency('libpmem', required: get_option('libpmem'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') endif libdaxctl = not_found if not get_option('libdaxctl').auto() or have_system libdaxctl = dependency('libdaxctl', required: get_option('libdaxctl'), - version: '>=57', method: 'pkg-config', - kwargs: static_kwargs) + version: '>=57', method: 'pkg-config') endif tasn1 = not_found if gnutls.found() tasn1 = dependency('libtasn1', - method: 'pkg-config', - kwargs: static_kwargs) + method: 'pkg-config') endif keyutils = dependency('libkeyutils', required: false, - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') has_gettid = cc.has_function('gettid') # libselinux selinux = dependency('libselinux', required: get_option('selinux'), - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') # Malloc tests @@ -1590,8 +1853,7 @@ if get_option('fuse').disabled() and get_option('fuse_lseek').enabled() endif fuse = dependency('fuse3', required: get_option('fuse'), - version: '>=3.1', method: 'pkg-config', - kwargs: static_kwargs) + version: '>=3.1', method: 'pkg-config') fuse_lseek = not_found if get_option('fuse_lseek').allowed() @@ -1647,10 +1909,9 @@ endif # libdw libdw = not_found if not get_option('libdw').auto() or \ - (not enable_static and (have_system or have_user)) + (not get_option('prefer_static') and (have_system or have_user)) libdw = dependency('libdw', method: 'pkg-config', - kwargs: static_kwargs, required: get_option('libdw')) endif @@ -1667,6 +1928,7 @@ if have_system 'jack': jack.found(), 'oss': oss.found(), 'pa': pulse.found(), + 'pipewire': pipewire.found(), 'sdl': sdl.found(), 'sndio': sndio.found(), } @@ -1704,7 +1966,7 @@ if get_option('cfi') if not get_option('b_lto') error('Selected Control-Flow Integrity but LTO is disabled') endif - if config_host.has_key('CONFIG_MODULES') + if enable_modules error('Selected Control-Flow Integrity is not compatible with modules') endif # Check for cfi flags. CFI requires LTO so we can't use @@ -1746,8 +2008,8 @@ dbus_display = get_option('dbus_display') \ error_message: '-display dbus requires glib>=2.64') \ .require(gdbus_codegen.found(), error_message: gdbus_codegen_error.format('-display dbus')) \ - .require(opengl.found() and gbm.found(), - error_message: '-display dbus requires epoxy/egl and gbm') \ + .require(targetos != 'windows', + error_message: '-display dbus is not available on Windows') \ .allowed() have_virtfs = get_option('virtfs') \ @@ -1755,12 +2017,17 @@ have_virtfs = get_option('virtfs') \ error_message: 'virtio-9p (virtfs) requires Linux or macOS') \ .require(targetos == 'linux' or cc.has_function('pthread_fchdir_np'), error_message: 'virtio-9p (virtfs) on macOS requires the presence of pthread_fchdir_np') \ - .require(targetos == 'darwin' or (libattr.found() and libcap_ng.found()), - error_message: 'virtio-9p (virtfs) on Linux requires libcap-ng-devel and libattr-devel') \ + .require(targetos == 'darwin' or libattr.found(), + error_message: 'virtio-9p (virtfs) on Linux requires libattr-devel') \ .disable_auto_if(not have_tools and not have_system) \ .allowed() -have_virtfs_proxy_helper = targetos != 'darwin' and have_virtfs and have_tools +have_virtfs_proxy_helper = get_option('virtfs_proxy_helper') \ + .require(targetos != 'darwin', error_message: 'the virtfs proxy helper is incompatible with macOS') \ + .require(have_virtfs, error_message: 'the virtfs proxy helper requires that virtfs is enabled') \ + .disable_auto_if(not have_tools) \ + .require(libcap_ng.found(), error_message: 'the virtfs proxy helper requires libcap-ng') \ + .allowed() if get_option('block_drv_ro_whitelist') == '' config_host_data.set('CONFIG_BDRV_RO_WHITELIST', '') @@ -1802,7 +2069,7 @@ config_host_data.set_quoted('CONFIG_QEMU_LOCALSTATEDIR', get_option('prefix') / config_host_data.set_quoted('CONFIG_QEMU_MODDIR', get_option('prefix') / qemu_moddir) config_host_data.set_quoted('CONFIG_SYSCONFDIR', get_option('prefix') / get_option('sysconfdir')) -if config_host.has_key('CONFIG_MODULES') +if enable_modules config_host_data.set('CONFIG_STAMP', run_command( meson.current_source_dir() / 'scripts/qemu-stamp.py', meson.project_version(), get_option('pkgversion'), '--', @@ -1855,6 +2122,7 @@ endif config_host_data.set('CONFIG_GTK', gtk.found()) config_host_data.set('CONFIG_VTE', vte.found()) config_host_data.set('CONFIG_GTK_CLIPBOARD', have_gtk_clipboard) +config_host_data.set('CONFIG_HEXAGON_IDEF_PARSER', get_option('hexagon_idef_parser')) config_host_data.set('CONFIG_LIBATTR', have_old_libattr) config_host_data.set('CONFIG_LIBCAP_NG', libcap_ng.found()) config_host_data.set('CONFIG_EBPF', libbpf.found()) @@ -1865,6 +2133,7 @@ config_host_data.set('CONFIG_LIBSSH', libssh.found()) config_host_data.set('CONFIG_LINUX_AIO', libaio.found()) config_host_data.set('CONFIG_LINUX_IO_URING', linux_io_uring.found()) config_host_data.set('CONFIG_LIBPMEM', libpmem.found()) +config_host_data.set('CONFIG_MODULES', enable_modules) config_host_data.set('CONFIG_NUMA', numa.found()) if numa.found() config_host_data.set('HAVE_NUMA_HAS_PREFERRED_MANY', @@ -1875,6 +2144,7 @@ config_host_data.set('CONFIG_OPENGL', opengl.found()) config_host_data.set('CONFIG_PROFILER', get_option('profiler')) config_host_data.set('CONFIG_RBD', rbd.found()) config_host_data.set('CONFIG_RDMA', rdma.found()) +config_host_data.set('CONFIG_SAFESTACK', get_option('safe_stack')) config_host_data.set('CONFIG_SDL', sdl.found()) config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found()) config_host_data.set('CONFIG_SECCOMP', seccomp.found()) @@ -1883,6 +2153,7 @@ if seccomp.found() endif config_host_data.set('CONFIG_SNAPPY', snappy.found()) config_host_data.set('CONFIG_TPM', have_tpm) +config_host_data.set('CONFIG_TSAN', get_option('tsan')) config_host_data.set('CONFIG_USB_LIBUSB', libusb.found()) config_host_data.set('CONFIG_VDE', vde.found()) config_host_data.set('CONFIG_VHOST_NET', have_vhost_net) @@ -1952,6 +2223,7 @@ if get_option('debug_stack_usage') and have_coroutine_pool have_coroutine_pool = false endif config_host_data.set10('CONFIG_COROUTINE_POOL', have_coroutine_pool) +config_host_data.set('CONFIG_DEBUG_GRAPH_LOCK', get_option('debug_graph_lock')) config_host_data.set('CONFIG_DEBUG_MUTEX', get_option('debug_mutex')) config_host_data.set('CONFIG_DEBUG_STACK_USAGE', get_option('debug_stack_usage')) config_host_data.set('CONFIG_GPROF', get_option('gprof')) @@ -1996,6 +2268,7 @@ config_host_data.set('CONFIG_SYNC_FILE_RANGE', cc.has_function('sync_file_range' config_host_data.set('CONFIG_TIMERFD', cc.has_function('timerfd_create')) config_host_data.set('HAVE_COPY_FILE_RANGE', cc.has_function('copy_file_range')) config_host_data.set('HAVE_GETIFADDRS', cc.has_function('getifaddrs')) +config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', glib_has_gslice) config_host_data.set('HAVE_OPENPTY', cc.has_function('openpty', dependencies: util)) config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul')) config_host_data.set('HAVE_SYSTEM_FUNCTION', cc.has_function('system', prefix: '#include ')) @@ -2012,7 +2285,21 @@ if rdma.found() prefix: '#include ')) endif +have_asan_fiber = false +if get_option('sanitizers') and \ + not cc.has_function('__sanitizer_start_switch_fiber', + args: '-fsanitize=address', + prefix: '#include ') + warning('Missing ASAN due to missing fiber annotation interface') + warning('Without code annotation, the report may be inferior.') +else + have_asan_fiber = true +endif +config_host_data.set('CONFIG_ASAN_IFACE_FIBER', have_asan_fiber) + # has_header_symbol +config_host_data.set('CONFIG_BLKZONED', + cc.has_header_symbol('linux/blkzoned.h', 'BLKOPENZONE')) config_host_data.set('CONFIG_EPOLL_CREATE1', cc.has_header_symbol('sys/epoll.h', 'epoll_create1')) config_host_data.set('CONFIG_FALLOCATE_PUNCH_HOLE', @@ -2048,6 +2335,9 @@ config_host_data.set('HAVE_SIGEV_NOTIFY_THREAD_ID', config_host_data.set('HAVE_STRUCT_STAT_ST_ATIM', cc.has_member('struct stat', 'st_atim', prefix: '#include ')) +config_host_data.set('HAVE_BLK_ZONE_REP_CAPACITY', + cc.has_member('struct blk_zone', 'capacity', + prefix: '#include ')) # has_type config_host_data.set('CONFIG_IOVEC', @@ -2237,25 +2527,29 @@ config_host_data.set('HAVE_BROKEN_SIZE_MAX', not cc.compiles(''' return printf("%zu", SIZE_MAX); }''', args: ['-Werror'])) -atomic_test = ''' +# See if 64-bit atomic operations are supported. +# Note that without __atomic builtins, we can only +# assume atomic loads/stores max at pointer size. +config_host_data.set('CONFIG_ATOMIC64', cc.links(''' #include int main(void) { - @0@ x = 0, y = 0; + uint64_t x = 0, y = 0; y = __atomic_load_n(&x, __ATOMIC_RELAXED); __atomic_store_n(&x, y, __ATOMIC_RELAXED); __atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); __atomic_exchange_n(&x, y, __ATOMIC_RELAXED); __atomic_fetch_add(&x, y, __ATOMIC_RELAXED); return 0; - }''' + }''')) -# See if 64-bit atomic operations are supported. -# Note that without __atomic builtins, we can only -# assume atomic loads/stores max at pointer size. -config_host_data.set('CONFIG_ATOMIC64', cc.links(atomic_test.format('uint64_t'))) +has_int128_type = cc.compiles(''' + __int128_t a; + __uint128_t b; + int main(void) { b = a; }''') +config_host_data.set('CONFIG_INT128_TYPE', has_int128_type) -has_int128 = cc.links(''' +has_int128 = has_int128_type and cc.links(''' __int128_t a; __uint128_t b; int main (void) { @@ -2264,28 +2558,45 @@ has_int128 = cc.links(''' a = a * a; return 0; }''') - config_host_data.set('CONFIG_INT128', has_int128) -if has_int128 +if has_int128_type # "do we have 128-bit atomics which are handled inline and specifically not # via libatomic". The reason we can't use libatomic is documented in the # comment starting "GCC is a house divided" in include/qemu/atomic128.h. - has_atomic128 = cc.links(atomic_test.format('unsigned __int128')) + # We only care about these operations on 16-byte aligned pointers, so + # force 16-byte alignment of the pointer, which may be greater than + # __alignof(unsigned __int128) for the host. + atomic_test_128 = ''' + int main(int ac, char **av) { + __uint128_t *p = __builtin_assume_aligned(av[ac - 1], 16); + p[1] = __atomic_load_n(&p[0], __ATOMIC_RELAXED); + __atomic_store_n(&p[2], p[3], __ATOMIC_RELAXED); + __atomic_compare_exchange_n(&p[4], &p[5], p[6], 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); + return 0; + }''' + has_atomic128 = cc.links(atomic_test_128) config_host_data.set('CONFIG_ATOMIC128', has_atomic128) if not has_atomic128 - has_cmpxchg128 = cc.links(''' - int main(void) - { - unsigned __int128 x = 0, y = 0; - __sync_val_compare_and_swap_16(&x, y, x); - return 0; - } - ''') + # Even with __builtin_assume_aligned, the above test may have failed + # without optimization enabled. Try again with optimizations locally + # enabled for the function. See + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389 + has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128) + config_host_data.set('CONFIG_ATOMIC128_OPT', has_atomic128_opt) - config_host_data.set('CONFIG_CMPXCHG128', has_cmpxchg128) + if not has_atomic128_opt + config_host_data.set('CONFIG_CMPXCHG128', cc.links(''' + int main(void) + { + __uint128_t x = 0, y = 0; + __sync_val_compare_and_swap_16(&x, y, x); + return 0; + } + ''')) + endif endif endif @@ -2366,12 +2677,9 @@ config_host_data.set('CONFIG_AVX512F_OPT', get_option('avx512f') \ config_host_data.set('CONFIG_AVX512BW_OPT', get_option('avx512bw') \ .require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX512BW') \ .require(cc.links(''' - #pragma GCC push_options - #pragma GCC target("avx512bw") #include #include - static int bar(void *a) { - + static int __attribute__((target("avx512bw"))) bar(void *a) { __m512i *x = a; __m512i res= _mm512_abs_epi8(*x); return res[1]; @@ -2726,7 +3034,7 @@ genh += custom_target('config-poison.h', capstone = not_found if not get_option('capstone').auto() or have_system or have_user capstone = dependency('capstone', version: '>=3.0.5', - kwargs: static_kwargs, method: 'pkg-config', + method: 'pkg-config', required: get_option('capstone')) # Some versions of capstone have broken pkg-config file @@ -2752,19 +3060,18 @@ if have_system and vfio_user_server_allowed libvfio_user_proj = subproject('libvfio-user') - libvfio_user_lib = libvfio_user_proj.get_variable('libvfio_user_dep') - - libvfio_user_dep = declare_dependency(dependencies: [libvfio_user_lib]) + libvfio_user_dep = libvfio_user_proj.get_variable('libvfio_user_dep') endif fdt = not_found -if have_system - fdt_opt = get_option('fdt') +fdt_opt = get_option('fdt') +if fdt_required.length() > 0 or fdt_opt == 'enabled' + if fdt_opt == 'disabled' + error('fdt disabled but required by targets ' + ', '.join(fdt_required)) + endif + if fdt_opt in ['enabled', 'auto', 'system'] - have_internal = fs.exists(meson.current_source_dir() / 'dtc/libfdt/Makefile.libfdt') - fdt = cc.find_library('fdt', kwargs: static_kwargs, - required: fdt_opt == 'system' or - fdt_opt == 'enabled' and not have_internal) + fdt = cc.find_library('fdt', required: fdt_opt == 'system') if fdt.found() and cc.links(''' #include #include @@ -2773,42 +3080,27 @@ if have_system fdt_opt = 'system' elif fdt_opt == 'system' error('system libfdt requested, but it is too old (1.5.1 or newer required)') - elif have_internal - fdt_opt = 'internal' else - fdt_opt = 'disabled' + fdt_opt = 'internal' fdt = not_found endif endif - if fdt_opt == 'internal' - fdt_files = files( - 'dtc/libfdt/fdt.c', - 'dtc/libfdt/fdt_ro.c', - 'dtc/libfdt/fdt_wip.c', - 'dtc/libfdt/fdt_sw.c', - 'dtc/libfdt/fdt_rw.c', - 'dtc/libfdt/fdt_strerror.c', - 'dtc/libfdt/fdt_empty_tree.c', - 'dtc/libfdt/fdt_addresses.c', - 'dtc/libfdt/fdt_overlay.c', - 'dtc/libfdt/fdt_check.c', - ) + if not fdt.found() + assert(fdt_opt == 'internal') + have_internal = fs.exists(meson.current_source_dir() / 'subprojects/dtc/meson.build') - fdt_inc = include_directories('dtc/libfdt') - libfdt = static_library('fdt', - build_by_default: false, - sources: fdt_files, - include_directories: fdt_inc, - pic: 'AS_SHARED_LIB' in config_host) - fdt = declare_dependency(link_with: libfdt, - include_directories: fdt_inc) + if not have_internal + error('libfdt source not found - please pull git submodule') + endif + + libfdt_proj = subproject('dtc', required: true, + default_options: ['tools=false', 'yaml=disabled', + 'python=disabled', 'default_library=static']) + fdt = libfdt_proj.get_variable('libfdt_dep') endif else fdt_opt = 'disabled' endif -if not fdt.found() and fdt_required.length() > 0 - error('fdt not available but required by targets ' + ', '.join(fdt_required)) -endif config_host_data.set('CONFIG_CAPSTONE', capstone.found()) config_host_data.set('CONFIG_FDT', fdt.found()) @@ -2831,12 +3123,12 @@ qapi_gen_depends = [ meson.current_source_dir() / 'scripts/qapi/__init__.py', meson.current_source_dir() / 'scripts/qapi/expr.py', meson.current_source_dir() / 'scripts/qapi/gen.py', meson.current_source_dir() / 'scripts/qapi/introspect.py', + meson.current_source_dir() / 'scripts/qapi/main.py', meson.current_source_dir() / 'scripts/qapi/parser.py', meson.current_source_dir() / 'scripts/qapi/schema.py', meson.current_source_dir() / 'scripts/qapi/source.py', meson.current_source_dir() / 'scripts/qapi/types.py', meson.current_source_dir() / 'scripts/qapi/visit.py', - meson.current_source_dir() / 'scripts/qapi/common.py', meson.current_source_dir() / 'scripts/qapi-gen.py' ] @@ -2862,7 +3154,6 @@ tracetool_depends = files( 'scripts/tracetool/format/log_stap.py', 'scripts/tracetool/format/stap.py', 'scripts/tracetool/__init__.py', - 'scripts/tracetool/transform.py', 'scripts/tracetool/vcpu.py' ) @@ -2987,6 +3278,7 @@ if have_system 'hw/i2c', 'hw/i386', 'hw/i386/xen', + 'hw/i386/kvm', 'hw/ide', 'hw/input', 'hw/intc', @@ -3073,7 +3365,6 @@ subdir('ui') subdir('hw') subdir('gdbstub') - if enable_modules libmodulecommon = static_library('module-common', files('module-common.c') + genh, pic: true, c_args: '-DBUILD_DSO') modulecommon = declare_dependency(link_whole: libmodulecommon, compile_args: '-DBUILD_DSO') @@ -3148,12 +3439,10 @@ if have_block endif common_ss.add(files('cpus-common.c')) +specific_ss.add(files('cpu.c')) subdir('softmmu') -common_ss.add(capstone) -specific_ss.add(files('cpu.c', 'disas.c'), capstone) - # Work around a gcc bug/misfeature wherein constant propagation looks # through an alias: # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99696 @@ -3217,8 +3506,12 @@ modinfo_files = [] block_mods = [] softmmu_mods = [] foreach d, list : modules + if not (d == 'block' ? have_block : have_system) + continue + endif + foreach m, module_ss : list - if enable_modules and targetos != 'windows' + if enable_modules module_ss = module_ss.apply(config_all, strict: false) sl = static_library(d + '-' + m, [genh, module_ss.sources()], dependencies: [modulecommon, module_ss.dependencies()], pic: true) @@ -3251,7 +3544,7 @@ endforeach foreach d, list : target_modules foreach m, module_ss : list - if enable_modules and targetos != 'windows' + if enable_modules foreach target : target_dirs if target.endswith('-softmmu') config_target = config_target_mak[target] @@ -3727,7 +4020,7 @@ if host_machine.system() == 'windows' '@OUTPUT@', get_option('prefix'), meson.current_source_dir(), - config_host['GLIB_BINDIR'], + glib_pc.get_variable('bindir'), host_machine.cpu(), '--', '-DDISPLAYVERSION=' + meson.project_version(), @@ -3778,7 +4071,6 @@ summary(summary_info, bool_yn: true, section: 'Directories') # Host binaries summary_info = {} summary_info += {'git': config_host['GIT']} -summary_info += {'make': config_host['MAKE']} summary_info += {'python': '@0@ (version: @1@)'.format(python.full_path(), python.language_version())} summary_info += {'sphinx-build': sphinx_build} if config_host.has_key('HAVE_GDB_BIN') @@ -3801,8 +4093,8 @@ summary_info += {'system-mode emulation': have_system} summary_info += {'user-mode emulation': have_user} summary_info += {'block layer': have_block} summary_info += {'Install blobs': get_option('install_blobs')} -summary_info += {'module support': config_host.has_key('CONFIG_MODULES')} -if config_host.has_key('CONFIG_MODULES') +summary_info += {'module support': enable_modules} +if enable_modules summary_info += {'alternative module path': get_option('module_upgrades')} endif summary_info += {'fuzzing support': get_option('fuzzing')} @@ -3853,12 +4145,12 @@ link_args = get_option(link_language + '_link_args') if link_args.length() > 0 summary_info += {'LDFLAGS': ' '.join(link_args)} endif -summary_info += {'QEMU_CFLAGS': ' '.join(qemu_cflags)} +summary_info += {'QEMU_CFLAGS': ' '.join(qemu_common_flags + qemu_cflags)} if 'cpp' in all_languages - summary_info += {'QEMU_CXXFLAGS': ' '.join(qemu_cxxflags)} + summary_info += {'QEMU_CXXFLAGS': ' '.join(qemu_common_flags + qemu_cxxflags)} endif if 'objc' in all_languages - summary_info += {'QEMU_OBJCFLAGS': ' '.join(qemu_objcflags)} + summary_info += {'QEMU_OBJCFLAGS': ' '.join(qemu_common_flags)} endif summary_info += {'QEMU_LDFLAGS': ' '.join(qemu_ldflags)} summary_info += {'profiler': get_option('profiler')} @@ -3867,6 +4159,7 @@ summary_info += {'PIE': get_option('b_pie')} summary_info += {'static build': config_host.has_key('CONFIG_STATIC')} summary_info += {'malloc trim support': has_malloc_trim} summary_info += {'membarrier': have_membarrier} +summary_info += {'debug graph lock': get_option('debug_graph_lock')} summary_info += {'debug stack usage': get_option('debug_stack_usage')} summary_info += {'mutex debugging': get_option('debug_mutex')} summary_info += {'memory allocator': get_option('malloc')} @@ -3880,7 +4173,7 @@ else endif summary_info += {'gprof': gprof_info} summary_info += {'gcov': get_option('b_coverage')} -summary_info += {'thread sanitizer': config_host.has_key('CONFIG_TSAN')} +summary_info += {'thread sanitizer': get_option('tsan')} summary_info += {'CFI support': get_option('cfi')} if get_option('cfi') summary_info += {'CFI debug support': get_option('cfi_debug')} @@ -3919,6 +4212,7 @@ if have_system if xen.found() summary_info += {'xen ctrl version': xen.version()} endif + summary_info += {'Xen emulation': config_all.has_key('CONFIG_XEN_EMU')} endif summary_info += {'TCG support': config_all.has_key('CONFIG_TCG')} if config_all.has_key('CONFIG_TCG') @@ -3940,13 +4234,14 @@ summary(summary_info, bool_yn: true, section: 'Targets and accelerators') # Block layer summary_info = {} -summary_info += {'coroutine backend': config_host['CONFIG_COROUTINE_BACKEND']} +summary_info += {'coroutine backend': coroutine_backend} summary_info += {'coroutine pool': have_coroutine_pool} if have_block summary_info += {'Block whitelist (rw)': get_option('block_drv_rw_whitelist')} summary_info += {'Block whitelist (ro)': get_option('block_drv_ro_whitelist')} summary_info += {'Use block whitelist in tools': get_option('block_drv_whitelist_in_tools')} - summary_info += {'VirtFS support': have_virtfs} + summary_info += {'VirtFS (9P) support': have_virtfs} + summary_info += {'VirtFS (9P) Proxy Helper support': have_virtfs_proxy_helper} summary_info += {'Live block migration': config_host_data.get('CONFIG_LIVE_BLOCK_MIGRATION')} summary_info += {'replication support': config_host_data.get('CONFIG_REPLICATION')} summary_info += {'bochs support': get_option('bochs').allowed()} @@ -3954,6 +4249,9 @@ if have_block summary_info += {'dmg support': get_option('dmg').allowed()} summary_info += {'qcow v1 support': get_option('qcow1').allowed()} summary_info += {'vdi support': get_option('vdi').allowed()} + summary_info += {'vhdx support': get_option('vhdx').allowed()} + summary_info += {'vmdk support': get_option('vmdk').allowed()} + summary_info += {'vpc support': get_option('vpc').allowed()} summary_info += {'vvfat support': get_option('vvfat').allowed()} summary_info += {'qed support': get_option('qed').allowed()} summary_info += {'parallels support': get_option('parallels').allowed()} @@ -4017,6 +4315,7 @@ if targetos == 'linux' summary_info += {'ALSA support': alsa} summary_info += {'PulseAudio support': pulse} endif +summary_info += {'Pipewire support': pipewire} summary_info += {'JACK support': jack} summary_info += {'brlapi support': brlapi} summary_info += {'vde support': vde} diff --git a/meson_options.txt b/meson_options.txt index 6b0900205e..90237389e2 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -12,8 +12,6 @@ option('pkgversion', type : 'string', value : '', description: 'use specified string as sub-version of the package') option('smbd', type : 'string', value : '', description: 'Path to smbd for slirp networking') -option('sphinx_build', type : 'string', value : '', - description: 'Use specified sphinx-build for building document') option('iasl', type : 'string', value : '', description: 'Path to ACPI disassembler') option('tls_priority', type : 'string', value : 'NORMAL', @@ -21,7 +19,7 @@ option('tls_priority', type : 'string', value : 'NORMAL', option('default_devices', type : 'boolean', value : true, description: 'Include a default selection of devices in emulators') option('audio_drv_list', type: 'array', value: ['default'], - choices: ['alsa', 'coreaudio', 'default', 'dsound', 'jack', 'oss', 'pa', 'sdl', 'sndio'], + choices: ['alsa', 'coreaudio', 'default', 'dsound', 'jack', 'oss', 'pa', 'pipewire', 'sdl', 'sndio'], description: 'Set audio driver list') option('block_drv_rw_whitelist', type : 'string', value : '', description: 'set block driver read-write whitelist (by default affects only QEMU, not tools like qemu-img)') @@ -33,6 +31,9 @@ option('fuzzing_engine', type : 'string', value : '', description: 'fuzzing engine library for OSS-Fuzz') option('trace_file', type: 'string', value: 'trace', description: 'Trace file prefix for simple backend') +option('coroutine_backend', type: 'combo', + choices: ['ucontext', 'sigaltstack', 'windows', 'auto'], + value: 'auto', description: 'coroutine backend to use') # Everything else can be set via --enable/--disable-* option # on the configure script command line. After adding an option @@ -44,6 +45,8 @@ option('fuzzing', type : 'boolean', value: false, description: 'build fuzzing targets') option('gettext', type : 'feature', value : 'auto', description: 'Localization of the GTK+ user interface') +option('modules', type : 'feature', value : 'disabled', + description: 'modules support (non Windows)') option('module_upgrades', type : 'boolean', value : false, description: 'try to load modules from alternate paths for upgrades') option('install_blobs', type : 'boolean', value : true, @@ -82,6 +85,14 @@ option('tcg', type: 'feature', value: 'enabled', description: 'TCG support') option('tcg_interpreter', type: 'boolean', value: false, description: 'TCG with bytecode interpreter (slow)') +option('safe_stack', type: 'boolean', value: false, + description: 'SafeStack Stack Smash Protection (requires clang/llvm and coroutine backend ucontext)') +option('sanitizers', type: 'boolean', value: false, + description: 'enable default sanitizers') +option('tsan', type: 'boolean', value: false, + description: 'enable thread sanitizer') +option('stack_protector', type: 'feature', value: 'auto', + description: 'compiler-provided stack protection') option('cfi', type: 'boolean', value: false, description: 'Control-Flow Integrity (CFI)') option('cfi_debug', type: 'boolean', value: false, @@ -255,6 +266,8 @@ option('oss', type: 'feature', value: 'auto', description: 'OSS sound support') option('pa', type: 'feature', value: 'auto', description: 'PulseAudio sound support') +option('pipewire', type: 'feature', value: 'auto', + description: 'Pipewire sound support') option('sndio', type: 'feature', value: 'auto', description: 'sndio sound support') @@ -272,6 +285,8 @@ option('vhost_user_blk_server', type: 'feature', value: 'auto', description: 'build vhost-user-blk server') option('virtfs', type: 'feature', value: 'auto', description: 'virtio-9p support') +option('virtfs_proxy_helper', type: 'feature', value: 'auto', + description: 'virtio-9p proxy helper support') option('libvduse', type: 'feature', value: 'auto', description: 'build VDUSE Library') option('vduse_blk_export', type: 'feature', value: 'auto', @@ -289,6 +304,8 @@ option('live_block_migration', type: 'feature', value: 'auto', description: 'block migration in the main migration stream') option('replication', type: 'feature', value: 'auto', description: 'replication support') +option('colo_proxy', type: 'feature', value: 'auto', + description: 'colo-proxy support') option('bochs', type: 'feature', value: 'auto', description: 'bochs image format support') option('cloop', type: 'feature', value: 'auto', @@ -299,6 +316,12 @@ option('qcow1', type: 'feature', value: 'auto', description: 'qcow1 image format support') option('vdi', type: 'feature', value: 'auto', description: 'vdi image format support') +option('vhdx', type: 'feature', value: 'auto', + description: 'vhdx image format support') +option('vmdk', type: 'feature', value: 'auto', + description: 'vmdk image format support') +option('vpc', type: 'feature', value: 'auto', + description: 'vpc image format support') option('vvfat', type: 'feature', value: 'auto', description: 'vvfat image format support') option('qed', type: 'feature', value: 'auto', @@ -311,11 +334,13 @@ option('rng_none', type: 'boolean', value: false, description: 'dummy RNG, avoid using /dev/(u)random and getrandom()') option('coroutine_pool', type: 'boolean', value: true, description: 'coroutine freelist (better performance)') +option('debug_graph_lock', type: 'boolean', value: false, + description: 'graph lock debugging support') option('debug_mutex', type: 'boolean', value: false, description: 'mutex debugging support') option('debug_stack_usage', type: 'boolean', value: false, description: 'measure coroutine stack usage') -option('qom_cast_debug', type: 'boolean', value: false, +option('qom_cast_debug', type: 'boolean', value: true, description: 'cast debugging support') option('gprof', type: 'boolean', value: false, description: 'QEMU profiling with gprof', diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c index fe73aa94b1..032fc5f405 100644 --- a/migration/block-dirty-bitmap.c +++ b/migration/block-dirty-bitmap.c @@ -79,6 +79,7 @@ #include "qapi/qapi-visit-migration.h" #include "qapi/clone-visitor.h" #include "trace.h" +#include "options.h" #define CHUNK_SIZE (1 << 10) @@ -604,11 +605,10 @@ static int init_dirty_bitmap_migration(DBMSaveState *s) SaveBitmapState *dbms; GHashTable *handled_by_blk = g_hash_table_new(NULL, NULL); BlockBackend *blk; - const MigrationParameters *mig_params = &migrate_get_current()->parameters; GHashTable *alias_map = NULL; - if (mig_params->has_block_bitmap_mapping) { - alias_map = construct_alias_map(mig_params->block_bitmap_mapping, true, + if (migrate_has_block_bitmap_mapping()) { + alias_map = construct_alias_map(migrate_block_bitmap_mapping(), true, &error_abort); } @@ -706,7 +706,7 @@ static void bulk_phase(QEMUFile *f, DBMSaveState *s, bool limit) QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) { while (!dbms->bulk_completed) { bulk_phase_send_chunk(f, s, dbms); - if (limit && qemu_file_rate_limit(f)) { + if (limit && migration_rate_exceeded(f)) { return; } } @@ -1157,7 +1157,6 @@ static int dirty_bitmap_load_header(QEMUFile *f, DBMLoadState *s, static int dirty_bitmap_load(QEMUFile *f, void *opaque, int version_id) { GHashTable *alias_map = NULL; - const MigrationParameters *mig_params = &migrate_get_current()->parameters; DBMLoadState *s = &((DBMState *)opaque)->load; int ret = 0; @@ -1169,9 +1168,9 @@ static int dirty_bitmap_load(QEMUFile *f, void *opaque, int version_id) return -EINVAL; } - if (mig_params->has_block_bitmap_mapping) { - alias_map = construct_alias_map(mig_params->block_bitmap_mapping, - false, &error_abort); + if (migrate_has_block_bitmap_mapping()) { + alias_map = construct_alias_map(migrate_block_bitmap_mapping(), false, + &error_abort); } do { diff --git a/migration/block.c b/migration/block.c index 426a25bb19..b9580a6c7e 100644 --- a/migration/block.c +++ b/migration/block.c @@ -23,11 +23,13 @@ #include "block/dirty-bitmap.h" #include "migration/misc.h" #include "migration.h" +#include "migration-stats.h" #include "migration/register.h" #include "qemu-file.h" #include "migration/vmstate.h" #include "sysemu/block-backend.h" #include "trace.h" +#include "options.h" #define BLK_MIG_BLOCK_SIZE (1ULL << 20) #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS) @@ -195,7 +197,7 @@ static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector) { int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK; - if (sector < blk_nb_sectors(bmds->blk)) { + if (sector < bmds->total_sectors) { return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] & (1UL << (chunk % (sizeof(unsigned long) * 8)))); } else { @@ -229,10 +231,9 @@ static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num, static void alloc_aio_bitmap(BlkMigDevState *bmds) { - BlockBackend *bb = bmds->blk; int64_t bitmap_size; - bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1; + bitmap_size = bmds->total_sectors + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1; bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8; bmds->aio_bitmap = g_malloc0(bitmap_size); @@ -417,7 +418,7 @@ static int init_blk_migration(QEMUFile *f) bmds->bulk_completed = 0; bmds->total_sectors = sectors; bmds->completed_sectors = 0; - bmds->shared_base = migrate_use_block_incremental(); + bmds->shared_base = migrate_block_incremental(); assert(i < num_bs); bmds_bs[i].bmds = bmds; @@ -625,7 +626,7 @@ static int flush_blks(QEMUFile *f) blk_mig_lock(); while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { - if (qemu_file_rate_limit(f)) { + if (migration_rate_exceeded(f)) { break; } if (blk->ret < 0) { @@ -747,8 +748,7 @@ static int block_save_setup(QEMUFile *f, void *opaque) static int block_save_iterate(QEMUFile *f, void *opaque) { int ret; - int64_t last_bytes = qemu_file_total_transferred(f); - int64_t delta_bytes; + uint64_t last_bytes = qemu_file_transferred(f); trace_migration_block_save("iterate", block_mig_state.submitted, block_mig_state.transferred); @@ -763,7 +763,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque) /* control the rate of transfer */ blk_mig_lock(); while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE < - qemu_file_get_rate_limit(f) && + migration_rate_get() && block_mig_state.submitted < MAX_PARALLEL_IO && (block_mig_state.submitted + block_mig_state.read_done) < MAX_IO_BUFFERS) { @@ -800,14 +800,8 @@ static int block_save_iterate(QEMUFile *f, void *opaque) } qemu_put_be64(f, BLK_MIG_FLAG_EOS); - delta_bytes = qemu_file_total_transferred(f) - last_bytes; - if (delta_bytes > 0) { - return 1; - } else if (delta_bytes < 0) { - return -1; - } else { - return 0; - } + uint64_t delta_bytes = qemu_file_transferred(f) - last_bytes; + return (delta_bytes > 0); } /* Called with iothread lock taken. */ @@ -1001,7 +995,7 @@ static int block_load(QEMUFile *f, void *opaque, int version_id) static bool block_is_active(void *opaque) { - return migrate_use_block(); + return migrate_block(); } static SaveVMHandlers savevm_block_handlers = { diff --git a/migration/colo.c b/migration/colo.c index 0716e64689..72f4f7b37e 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -26,9 +26,7 @@ #include "qemu/rcu.h" #include "migration/failover.h" #include "migration/ram.h" -#ifdef CONFIG_REPLICATION #include "block/replication.h" -#endif #include "net/colo-compare.h" #include "net/colo.h" #include "block/block.h" @@ -36,6 +34,7 @@ #include "sysemu/cpus.h" #include "sysemu/runstate.h" #include "net/filter.h" +#include "options.h" static bool vmstate_loading; static Notifier packets_compare_notifier; @@ -64,10 +63,27 @@ static bool colo_runstate_is_stopped(void) return runstate_check(RUN_STATE_COLO) || !runstate_is_running(); } +static void colo_checkpoint_notify(void *opaque) +{ + MigrationState *s = opaque; + int64_t next_notify_time; + + qemu_event_set(&s->colo_checkpoint_event); + s->colo_checkpoint_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); + next_notify_time = s->colo_checkpoint_time + migrate_checkpoint_delay(); + timer_mod(s->colo_delay_timer, next_notify_time); +} + +void colo_checkpoint_delay_set(void) +{ + if (migration_in_colo_state()) { + colo_checkpoint_notify(migrate_get_current()); + } +} + static void secondary_vm_do_failover(void) { /* COLO needs enable block-replication */ -#ifdef CONFIG_REPLICATION int old_state; MigrationIncomingState *mis = migration_incoming_get_current(); Error *local_err = NULL; @@ -129,17 +145,13 @@ static void secondary_vm_do_failover(void) qemu_sem_post(&mis->colo_incoming_sem); /* For Secondary VM, jump to incoming co */ - if (mis->migration_incoming_co) { - qemu_coroutine_enter(mis->migration_incoming_co); + if (mis->colo_incoming_co) { + qemu_coroutine_enter(mis->colo_incoming_co); } -#else - abort(); -#endif } static void primary_vm_do_failover(void) { -#ifdef CONFIG_REPLICATION MigrationState *s = migrate_get_current(); int old_state; Error *local_err = NULL; @@ -180,9 +192,6 @@ static void primary_vm_do_failover(void) /* Notify COLO thread that failover work is finished */ qemu_sem_post(&s->colo_exit_sem); -#else - abort(); -#endif } COLOMode get_colo_mode(void) @@ -216,7 +225,6 @@ void colo_do_failover(void) } } -#ifdef CONFIG_REPLICATION void qmp_xen_set_replication(bool enable, bool primary, bool has_failover, bool failover, Error **errp) @@ -270,7 +278,6 @@ void qmp_xen_colo_do_checkpoint(Error **errp) /* Notify all filters of all NIC to do checkpoint */ colo_notify_filters_event(COLO_EVENT_CHECKPOINT, errp); } -#endif COLOStatus *qmp_query_colo_status(Error **errp) { @@ -434,15 +441,11 @@ static int colo_do_checkpoint_transaction(MigrationState *s, } qemu_mutex_lock_iothread(); -#ifdef CONFIG_REPLICATION replication_do_checkpoint_all(&local_err); if (local_err) { qemu_mutex_unlock_iothread(); goto out; } -#else - abort(); -#endif colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err); if (local_err) { @@ -560,22 +563,18 @@ static void colo_process_checkpoint(MigrationState *s) object_unref(OBJECT(bioc)); qemu_mutex_lock_iothread(); -#ifdef CONFIG_REPLICATION replication_start_all(REPLICATION_MODE_PRIMARY, &local_err); if (local_err) { qemu_mutex_unlock_iothread(); goto out; } -#else - abort(); -#endif vm_start(); qemu_mutex_unlock_iothread(); trace_colo_vm_state_change("stop", "run"); timer_mod(s->colo_delay_timer, qemu_clock_get_ms(QEMU_CLOCK_HOST) + - s->parameters.x_checkpoint_delay); + migrate_checkpoint_delay()); while (s->state == MIGRATION_STATUS_COLO) { if (failover_get_state() != FAILOVER_STATUS_NONE) { @@ -643,18 +642,6 @@ out: } } -void colo_checkpoint_notify(void *opaque) -{ - MigrationState *s = opaque; - int64_t next_notify_time; - - qemu_event_set(&s->colo_checkpoint_event); - s->colo_checkpoint_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); - next_notify_time = s->colo_checkpoint_time + - s->parameters.x_checkpoint_delay; - timer_mod(s->colo_delay_timer, next_notify_time); -} - void migrate_start_colo_process(MigrationState *s) { qemu_mutex_unlock_iothread(); @@ -748,7 +735,6 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, return; } -#ifdef CONFIG_REPLICATION replication_get_error_all(&local_err); if (local_err) { error_propagate(errp, local_err); @@ -765,9 +751,6 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, qemu_mutex_unlock_iothread(); return; } -#else - abort(); -#endif /* Notify all filters of all NIC to do checkpoint */ colo_notify_filters_event(COLO_EVENT_CHECKPOINT, &local_err); @@ -834,7 +817,7 @@ void colo_shutdown(void) } } -void *colo_process_incoming_thread(void *opaque) +static void *colo_process_incoming_thread(void *opaque) { MigrationIncomingState *mis = opaque; QEMUFile *fb = NULL; @@ -874,15 +857,11 @@ void *colo_process_incoming_thread(void *opaque) object_unref(OBJECT(bioc)); qemu_mutex_lock_iothread(); -#ifdef CONFIG_REPLICATION replication_start_all(REPLICATION_MODE_SECONDARY, &local_err); if (local_err) { qemu_mutex_unlock_iothread(); goto out; } -#else - abort(); -#endif vm_start(); qemu_mutex_unlock_iothread(); trace_colo_vm_state_change("stop", "run"); @@ -939,3 +918,40 @@ out: rcu_unregister_thread(); return NULL; } + +int coroutine_fn colo_incoming_co(void) +{ + MigrationIncomingState *mis = migration_incoming_get_current(); + Error *local_err = NULL; + QemuThread th; + + assert(qemu_mutex_iothread_locked()); + + if (!migration_incoming_colo_enabled()) { + return 0; + } + + /* Make sure all file formats throw away their mutable metadata */ + bdrv_activate_all(&local_err); + if (local_err) { + error_report_err(local_err); + return -EINVAL; + } + + qemu_thread_create(&th, "COLO incoming", colo_process_incoming_thread, + mis, QEMU_THREAD_JOINABLE); + + mis->colo_incoming_co = qemu_coroutine_self(); + qemu_coroutine_yield(); + mis->colo_incoming_co = NULL; + + qemu_mutex_unlock_iothread(); + /* Wait checkpoint incoming thread exit before free resource */ + qemu_thread_join(&th); + qemu_mutex_lock_iothread(); + + /* We hold the global iothread lock, so it is safe here */ + colo_release_ram_cache(); + + return 0; +} diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 575d48c397..84f1b0fb20 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -11,11 +11,12 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include +#include "hw/core/cpu.h" #include "qapi/error.h" -#include "cpu.h" #include "exec/ramblock.h" -#include "exec/ram_addr.h" +#include "exec/target_page.h" #include "qemu/rcu_queue.h" #include "qemu/main-loop.h" #include "qapi/qapi-commands-migration.h" @@ -28,6 +29,7 @@ #include "sysemu/kvm.h" #include "sysemu/runstate.h" #include "exec/memory.h" +#include "qemu/xxhash.h" /* * total_dirty_pages is procted by BQL and is used @@ -73,13 +75,11 @@ static inline void record_dirtypages(DirtyPageRecord *dirty_pages, static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages, int64_t calc_time_ms) { - uint64_t memory_size_MB; uint64_t increased_dirty_pages = dirty_pages.end_pages - dirty_pages.start_pages; + uint64_t memory_size_MiB = qemu_target_pages_to_MiB(increased_dirty_pages); - memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20; - - return memory_size_MB * 1000 / calc_time_ms; + return memory_size_MiB * 1000 / calc_time_ms; } void global_dirty_log_change(unsigned int flag, bool start) @@ -101,7 +101,7 @@ void global_dirty_log_change(unsigned int flag, bool start) static void global_dirty_log_sync(unsigned int flag, bool one_shot) { qemu_mutex_lock_iothread(); - memory_global_dirty_log_sync(); + memory_global_dirty_log_sync(false); if (one_shot) { memory_global_dirty_log_stop(flag); } @@ -149,25 +149,25 @@ int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, retry: init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); - cpu_list_lock(); - gen_id = cpu_list_generation_id_get(); - records = vcpu_dirty_stat_alloc(stat); - vcpu_dirty_stat_collect(stat, records, true); - cpu_list_unlock(); + WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) { + gen_id = cpu_list_generation_id_get(); + records = vcpu_dirty_stat_alloc(stat); + vcpu_dirty_stat_collect(stat, records, true); + } duration = dirty_stat_wait(calc_time_ms, init_time_ms); global_dirty_log_sync(flag, one_shot); - cpu_list_lock(); - if (gen_id != cpu_list_generation_id_get()) { - g_free(records); - g_free(stat->rates); - cpu_list_unlock(); - goto retry; + WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) { + if (gen_id != cpu_list_generation_id_get()) { + g_free(records); + g_free(stat->rates); + cpu_list_unlock(); + goto retry; + } + vcpu_dirty_stat_collect(stat, records, false); } - vcpu_dirty_stat_collect(stat, records, false); - cpu_list_unlock(); for (i = 0; i < stat->nvcpu; i++) { dirtyrate = do_calculate_dirtyrate(records[i], duration); @@ -290,8 +290,8 @@ static void update_dirtyrate_stat(struct RamblockDirtyInfo *info) DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count; DirtyStat.page_sampling.total_sample_count += info->sample_pages_count; /* size of total pages in MB */ - DirtyStat.page_sampling.total_block_mem_MB += (info->ramblock_pages * - TARGET_PAGE_SIZE) >> 20; + DirtyStat.page_sampling.total_block_mem_MB += + qemu_target_pages_to_MiB(info->ramblock_pages); } static void update_dirtyrate(uint64_t msec) @@ -307,6 +307,34 @@ static void update_dirtyrate(uint64_t msec) DirtyStat.dirty_rate = dirtyrate; } +/* + * Compute hash of a single page of size TARGET_PAGE_SIZE. + */ +static uint32_t compute_page_hash(void *ptr) +{ + size_t page_size = qemu_target_page_size(); + uint32_t i; + uint64_t v1, v2, v3, v4; + uint64_t res; + const uint64_t *p = ptr; + + v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2; + v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2; + v3 = QEMU_XXHASH_SEED + 0; + v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1; + for (i = 0; i < page_size / 8; i += 4) { + v1 = XXH64_round(v1, p[i + 0]); + v2 = XXH64_round(v2, p[i + 1]); + v3 = XXH64_round(v3, p[i + 2]); + v4 = XXH64_round(v4, p[i + 3]); + } + res = XXH64_mergerounds(v1, v2, v3, v4); + res += page_size; + res = XXH64_avalanche(res); + return (uint32_t)(res & UINT32_MAX); +} + + /* * get hash result for the sampled memory with length of TARGET_PAGE_SIZE * in ramblock, which starts from ramblock base address. @@ -314,13 +342,13 @@ static void update_dirtyrate(uint64_t msec) static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info, uint64_t vfn) { - uint32_t crc; + uint32_t hash; - crc = crc32(0, (info->ramblock_addr + - vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE); + hash = compute_page_hash(info->ramblock_addr + + vfn * qemu_target_page_size()); - trace_get_ramblock_vfn_hash(info->idstr, vfn, crc); - return crc; + trace_get_ramblock_vfn_hash(info->idstr, vfn, hash); + return hash; } static bool save_ramblock_hash(struct RamblockDirtyInfo *info) @@ -372,7 +400,7 @@ static void get_ramblock_dirty_info(RAMBlock *block, sample_pages_per_gigabytes) >> 30; /* Right shift TARGET_PAGE_BITS to calc page count */ info->ramblock_pages = qemu_ram_get_used_length(block) >> - TARGET_PAGE_BITS; + qemu_target_page_bits(); info->ramblock_addr = qemu_ram_get_host_addr(block); strcpy(info->idstr, qemu_ram_get_idstr(block)); } @@ -453,13 +481,13 @@ out: static void calc_page_dirty_rate(struct RamblockDirtyInfo *info) { - uint32_t crc; + uint32_t hash; int i; for (i = 0; i < info->sample_pages_count; i++) { - crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]); - if (crc != info->hash_result[i]) { - trace_calc_page_dirty_rate(info->idstr, crc, info->hash_result[i]); + hash = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]); + if (hash != info->hash_result[i]) { + trace_calc_page_dirty_rate(info->idstr, hash, info->hash_result[i]); info->sample_dirty_count++; } } @@ -483,7 +511,7 @@ find_block_matched(RAMBlock *block, int count, if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) || infos[i].ramblock_pages != - (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) { + (qemu_ram_get_used_length(block) >> qemu_target_page_bits())) { trace_find_page_matched(block->idstr); return NULL; } @@ -553,7 +581,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) * skip it unconditionally and start dirty tracking * from 2'round of log sync */ - memory_global_dirty_log_sync(); + memory_global_dirty_log_sync(false); /* * reset page protect manually and unconditionally. diff --git a/migration/exec.c b/migration/exec.c index 375d2e1b54..2bf882bbe1 100644 --- a/migration/exec.c +++ b/migration/exec.c @@ -18,17 +18,37 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include "channel.h" #include "exec.h" #include "migration.h" #include "io/channel-command.h" #include "trace.h" +#include "qemu/cutils.h" +#ifdef WIN32 +const char *exec_get_cmd_path(void); +const char *exec_get_cmd_path(void) +{ + g_autofree char *detected_path = g_new(char, MAX_PATH); + if (GetSystemDirectoryA(detected_path, MAX_PATH) == 0) { + warn_report("Could not detect cmd.exe path, using default."); + return "C:\\Windows\\System32\\cmd.exe"; + } + pstrcat(detected_path, MAX_PATH, "\\cmd.exe"); + return g_steal_pointer(&detected_path); +} +#endif void exec_start_outgoing_migration(MigrationState *s, const char *command, Error **errp) { QIOChannel *ioc; + +#ifdef WIN32 + const char *argv[] = { exec_get_cmd_path(), "/c", command, NULL }; +#else const char *argv[] = { "/bin/sh", "-c", command, NULL }; +#endif trace_migration_exec_outgoing(command); ioc = QIO_CHANNEL(qio_channel_command_new_spawn(argv, @@ -55,7 +75,12 @@ static gboolean exec_accept_incoming_migration(QIOChannel *ioc, void exec_start_incoming_migration(const char *command, Error **errp) { QIOChannel *ioc; + +#ifdef WIN32 + const char *argv[] = { exec_get_cmd_path(), "/c", command, NULL }; +#else const char *argv[] = { "/bin/sh", "-c", command, NULL }; +#endif trace_migration_exec_incoming(command); ioc = QIO_CHANNEL(qio_channel_command_new_spawn(argv, diff --git a/migration/meson.build b/migration/meson.build index 0d1bb9f96e..8ba6e420fe 100644 --- a/migration/meson.build +++ b/migration/meson.build @@ -1,5 +1,6 @@ # Files needed by unit tests migration_files = files( + 'migration-stats.c', 'page_cache.c', 'xbzrle.c', 'vmstate-types.c', @@ -7,14 +8,12 @@ migration_files = files( 'qemu-file.c', 'yank_functions.c', ) -softmmu_ss.add(migration_files) softmmu_ss.add(files( 'block-dirty-bitmap.c', 'channel.c', 'channel-block.c', - 'colo-failover.c', - 'colo.c', + 'dirtyrate.c', 'exec.c', 'fd.c', 'global_state.c', @@ -22,6 +21,8 @@ softmmu_ss.add(files( 'migration.c', 'multifd.c', 'multifd-zlib.c', + 'ram-compress.c', + 'options.c', 'postcopy-ram.c', 'savevm.c', 'socket.c', @@ -29,6 +30,10 @@ softmmu_ss.add(files( 'threadinfo.c', ), gnutls) +if get_option('replication').allowed() + softmmu_ss.add(files('colo-failover.c', 'colo.c')) +endif + softmmu_ss.add(when: rdma, if_true: files('rdma.c')) if get_option('live_block_migration').allowed() softmmu_ss.add(files('block.c')) @@ -36,4 +41,5 @@ endif softmmu_ss.add(when: zstd, if_true: files('multifd-zstd.c')) specific_ss.add(when: 'CONFIG_SOFTMMU', - if_true: files('dirtyrate.c', 'ram.c', 'target.c')) + if_true: files('ram.c', + 'target.c')) diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c index 72519ea99f..9885d7c9f7 100644 --- a/migration/migration-hmp-cmds.c +++ b/migration/migration-hmp-cmds.c @@ -15,7 +15,6 @@ #include "qemu/osdep.h" #include "block/qapi.h" -#include "migration/misc.h" #include "migration/snapshot.h" #include "monitor/hmp.h" #include "monitor/monitor.h" @@ -30,6 +29,27 @@ #include "qemu/sockets.h" #include "sysemu/runstate.h" #include "ui/qemu-spice.h" +#include "sysemu/sysemu.h" +#include "migration.h" + +static void migration_global_dump(Monitor *mon) +{ + MigrationState *ms = migrate_get_current(); + + monitor_printf(mon, "globals:\n"); + monitor_printf(mon, "store-global-state: %s\n", + ms->store_global_state ? "on" : "off"); + monitor_printf(mon, "only-migratable: %s\n", + only_migratable ? "on" : "off"); + monitor_printf(mon, "send-configuration: %s\n", + ms->send_configuration ? "on" : "off"); + monitor_printf(mon, "send-section-footer: %s\n", + ms->send_section_footer ? "on" : "off"); + monitor_printf(mon, "decompress-error-check: %s\n", + ms->decompress_error_check ? "on" : "off"); + monitor_printf(mon, "clear-bitmap-shift: %u\n", + ms->clear_bitmap_shift); +} void hmp_info_migrate(Monitor *mon, const QDict *qdict) { @@ -616,23 +636,6 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) hmp_handle_error(mon, err); } -void hmp_client_migrate_info(Monitor *mon, const QDict *qdict) -{ - Error *err = NULL; - const char *protocol = qdict_get_str(qdict, "protocol"); - const char *hostname = qdict_get_str(qdict, "hostname"); - bool has_port = qdict_haskey(qdict, "port"); - int port = qdict_get_try_int(qdict, "port", -1); - bool has_tls_port = qdict_haskey(qdict, "tls-port"); - int tls_port = qdict_get_try_int(qdict, "tls-port", -1); - const char *cert_subject = qdict_get_try_str(qdict, "cert-subject"); - - qmp_client_migrate_info(protocol, hostname, - has_port, port, has_tls_port, tls_port, - cert_subject, &err); - hmp_handle_error(mon, err); -} - void hmp_migrate_start_postcopy(Monitor *mon, const QDict *qdict) { Error *err = NULL; @@ -640,6 +643,7 @@ void hmp_migrate_start_postcopy(Monitor *mon, const QDict *qdict) hmp_handle_error(mon, err); } +#ifdef CONFIG_REPLICATION void hmp_x_colo_lost_heartbeat(Monitor *mon, const QDict *qdict) { Error *err = NULL; @@ -647,6 +651,7 @@ void hmp_x_colo_lost_heartbeat(Monitor *mon, const QDict *qdict) qmp_x_colo_lost_heartbeat(&err); hmp_handle_error(mon, err); } +#endif typedef struct HMPMigrationStatus { QEMUTimer *timer; diff --git a/migration/migration-stats.c b/migration/migration-stats.c new file mode 100644 index 0000000000..f98c8260be --- /dev/null +++ b/migration/migration-stats.c @@ -0,0 +1,68 @@ +/* + * Migration stats + * + * Copyright (c) 2012-2023 Red Hat Inc + * + * Authors: + * Juan Quintela + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/stats64.h" +#include "qemu-file.h" +#include "trace.h" +#include "migration-stats.h" + +MigrationAtomicStats mig_stats; + +bool migration_rate_exceeded(QEMUFile *f) +{ + if (qemu_file_get_error(f)) { + return true; + } + + uint64_t rate_limit_start = stat64_get(&mig_stats.rate_limit_start); + uint64_t rate_limit_current = migration_transferred_bytes(f); + uint64_t rate_limit_used = rate_limit_current - rate_limit_start; + uint64_t rate_limit_max = stat64_get(&mig_stats.rate_limit_max); + + if (rate_limit_max == RATE_LIMIT_DISABLED) { + return false; + } + if (rate_limit_max > 0 && rate_limit_used > rate_limit_max) { + return true; + } + return false; +} + +uint64_t migration_rate_get(void) +{ + return stat64_get(&mig_stats.rate_limit_max); +} + +#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) + +void migration_rate_set(uint64_t limit) +{ + /* + * 'limit' is per second. But we check it each BUFER_DELAY miliseconds. + */ + stat64_set(&mig_stats.rate_limit_max, limit / XFER_LIMIT_RATIO); +} + +void migration_rate_reset(QEMUFile *f) +{ + stat64_set(&mig_stats.rate_limit_start, migration_transferred_bytes(f)); +} + +uint64_t migration_transferred_bytes(QEMUFile *f) +{ + uint64_t multifd = stat64_get(&mig_stats.multifd_bytes); + uint64_t qemu_file = qemu_file_transferred(f); + + trace_migration_transferred_bytes(qemu_file, multifd); + return qemu_file + multifd; +} diff --git a/migration/migration-stats.h b/migration/migration-stats.h new file mode 100644 index 0000000000..ac2260e987 --- /dev/null +++ b/migration/migration-stats.h @@ -0,0 +1,139 @@ +/* + * Migration stats + * + * Copyright (c) 2012-2023 Red Hat Inc + * + * Authors: + * Juan Quintela + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_MIGRATION_STATS_H +#define QEMU_MIGRATION_STATS_H + +#include "qemu/stats64.h" + +/* + * Amount of time to allocate to each "chunk" of bandwidth-throttled + * data. + */ +#define BUFFER_DELAY 100 + +/* + * If rate_limit_max is 0, there is special code to remove the rate + * limit. + */ +#define RATE_LIMIT_DISABLED 0 + +/* + * These are the ram migration statistic counters. It is loosely + * based on MigrationStats. We change to Stat64 any counter that + * needs to be updated using atomic ops (can be accessed by more than + * one thread). + */ +typedef struct { + /* + * Number of bytes that were dirty last time that we synced with + * the guest memory. We use that to calculate the downtime. As + * the remaining dirty amounts to what we know that is still dirty + * since last iteration, not counting what the guest has dirtied + * since we synchronized bitmaps. + */ + Stat64 dirty_bytes_last_sync; + /* + * Number of pages dirtied per second. + */ + Stat64 dirty_pages_rate; + /* + * Number of times we have synchronized guest bitmaps. + */ + Stat64 dirty_sync_count; + /* + * Number of times zero copy failed to send any page using zero + * copy. + */ + Stat64 dirty_sync_missed_zero_copy; + /* + * Number of bytes sent at migration completion stage while the + * guest is stopped. + */ + Stat64 downtime_bytes; + /* + * Number of bytes sent through multifd channels. + */ + Stat64 multifd_bytes; + /* + * Number of pages transferred that were not full of zeros. + */ + Stat64 normal_pages; + /* + * Number of bytes sent during postcopy. + */ + Stat64 postcopy_bytes; + /* + * Number of postcopy page faults that we have handled during + * postcopy stage. + */ + Stat64 postcopy_requests; + /* + * Number of bytes sent during precopy stage. + */ + Stat64 precopy_bytes; + /* + * Amount of transferred data at the start of current cycle. + */ + Stat64 rate_limit_start; + /* + * Maximum amount of data we can send in a cycle. + */ + Stat64 rate_limit_max; + /* + * Total number of bytes transferred. + */ + Stat64 transferred; + /* + * Number of pages transferred that were full of zeros. + */ + Stat64 zero_pages; +} MigrationAtomicStats; + +extern MigrationAtomicStats mig_stats; + +/** + * migration_rate_get: Get the maximum amount that can be transferred. + * + * Returns the maximum number of bytes that can be transferred in a cycle. + */ +uint64_t migration_rate_get(void); + +/** + * migration_rate_reset: Reset the rate limit counter. + * + * This is called when we know we start a new transfer cycle. + * + * @f: QEMUFile used for main migration channel + */ +void migration_rate_reset(QEMUFile *f); + +/** + * migration_rate_set: Set the maximum amount that can be transferred. + * + * Sets the maximum amount of bytes that can be transferred in one cycle. + * + * @new_rate: new maximum amount + */ +void migration_rate_set(uint64_t new_rate); + +/** + * migration_transferred_bytes: Return number of bytes transferred + * + * @f: QEMUFile used for main migration channel + * + * Returns how many bytes have we transferred since the beginning of + * the migration. It accounts for bytes sent through any migration + * channel, multifd, qemu_file, rdma, .... + */ +uint64_t migration_transferred_bytes(QEMUFile *f); +#endif diff --git a/migration/migration.c b/migration/migration.c index ae2025d9d8..5de7f734b9 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -26,9 +26,11 @@ #include "sysemu/cpu-throttle.h" #include "rdma.h" #include "ram.h" +#include "ram-compress.h" #include "migration/global_state.h" #include "migration/misc.h" #include "migration.h" +#include "migration-stats.h" #include "savevm.h" #include "qemu-file.h" #include "channel.h" @@ -52,8 +54,6 @@ #include "io/channel-tls.h" #include "migration/colo.h" #include "hw/boards.h" -#include "hw/qdev-properties.h" -#include "hw/qdev-properties-system.h" #include "monitor/monitor.h" #include "net/announce.h" #include "qemu/queue.h" @@ -63,61 +63,7 @@ #include "sysemu/cpus.h" #include "yank_functions.h" #include "sysemu/qtest.h" -#include "ui/qemu-spice.h" - -#define MAX_THROTTLE (128 << 20) /* Migration transfer speed throttling */ - -/* Amount of time to allocate to each "chunk" of bandwidth-throttled - * data. */ -#define BUFFER_DELAY 100 -#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) - -/* Time in milliseconds we are allowed to stop the source, - * for sending the last part */ -#define DEFAULT_MIGRATE_SET_DOWNTIME 300 - -/* Maximum migrate downtime set to 2000 seconds */ -#define MAX_MIGRATE_DOWNTIME_SECONDS 2000 -#define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000) - -/* Default compression thread count */ -#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 -/* Default decompression thread count, usually decompression is at - * least 4 times as fast as compression.*/ -#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 -/*0: means nocompress, 1: best speed, ... 9: best compress ratio */ -#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 -/* Define default autoconverge cpu throttle migration parameters */ -#define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50 -#define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20 -#define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10 -#define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99 - -/* Migration XBZRLE default cache size */ -#define DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE (64 * 1024 * 1024) - -/* The delay time (in ms) between two COLO checkpoints */ -#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100) -#define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2 -#define DEFAULT_MIGRATE_MULTIFD_COMPRESSION MULTIFD_COMPRESSION_NONE -/* 0: means nocompress, 1: best speed, ... 9: best compress ratio */ -#define DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL 1 -/* 0: means nocompress, 1: best speed, ... 20: best compress ratio */ -#define DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL 1 - -/* Background transfer rate for postcopy, 0 means unlimited, note - * that page requests can still exceed this limit. - */ -#define DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH 0 - -/* - * Parameters for self_announce_delay giving a stream of RARP/ARP - * packets after migration. - */ -#define DEFAULT_MIGRATE_ANNOUNCE_INITIAL 50 -#define DEFAULT_MIGRATE_ANNOUNCE_MAX 550 -#define DEFAULT_MIGRATE_ANNOUNCE_ROUNDS 5 -#define DEFAULT_MIGRATE_ANNOUNCE_STEP 100 +#include "options.h" static NotifierList migration_state_notifiers = NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); @@ -136,39 +82,6 @@ enum mig_rp_message_type { MIG_RP_MSG_MAX }; -/* Migration capabilities set */ -struct MigrateCapsSet { - int size; /* Capability set size */ - MigrationCapability caps[]; /* Variadic array of capabilities */ -}; -typedef struct MigrateCapsSet MigrateCapsSet; - -/* Define and initialize MigrateCapsSet */ -#define INITIALIZE_MIGRATE_CAPS_SET(_name, ...) \ - MigrateCapsSet _name = { \ - .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \ - .caps = { __VA_ARGS__ } \ - } - -/* Background-snapshot compatibility check list */ -static const -INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot, - MIGRATION_CAPABILITY_POSTCOPY_RAM, - MIGRATION_CAPABILITY_DIRTY_BITMAPS, - MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME, - MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE, - MIGRATION_CAPABILITY_RETURN_PATH, - MIGRATION_CAPABILITY_MULTIFD, - MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER, - MIGRATION_CAPABILITY_AUTO_CONVERGE, - MIGRATION_CAPABILITY_RELEASE_RAM, - MIGRATION_CAPABILITY_RDMA_PIN_ALL, - MIGRATION_CAPABILITY_COMPRESS, - MIGRATION_CAPABILITY_XBZRLE, - MIGRATION_CAPABILITY_X_COLO, - MIGRATION_CAPABILITY_VALIDATE_UUID, - MIGRATION_CAPABILITY_ZERO_COPY_SEND); - /* When we add fault tolerance, we could have several migrations at once. For now we don't need to add dynamic creation of migration */ @@ -186,7 +99,7 @@ static void migrate_fd_cancel(MigrationState *s); static bool migration_needs_multiple_sockets(void) { - return migrate_use_multifd() || migrate_postcopy_preempt(); + return migrate_multifd() || migrate_postcopy_preempt(); } static bool uri_supports_multi_channels(const char *uri) @@ -316,6 +229,7 @@ void migration_incoming_state_destroy(void) struct MigrationIncomingState *mis = migration_incoming_get_current(); multifd_load_cleanup(); + compress_threads_load_cleanup(); if (mis->to_src_file) { /* Tell source that we are done */ @@ -353,21 +267,11 @@ void migration_incoming_state_destroy(void) static void migrate_generate_event(int new_state) { - if (migrate_use_events()) { + if (migrate_events()) { qapi_event_send_migration(new_state); } } -static bool migrate_late_block_activate(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[ - MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE]; -} - /* * Send a message on the return channel back to the source * of the migration. @@ -488,6 +392,18 @@ void migration_incoming_disable_colo(void) int migration_incoming_enable_colo(void) { +#ifndef CONFIG_REPLICATION + error_report("ENABLE_COLO command come in migration stream, but COLO " + "module is not built in"); + return -ENOTSUP; +#endif + + if (!migrate_colo()) { + error_report("ENABLE_COLO command come in migration stream, but c-colo " + "capability is not set"); + return -EINVAL; + } + if (ram_block_discard_disable(true)) { error_report("COLO: cannot disable RAM discard"); return -EBUSY; @@ -595,15 +511,22 @@ process_incoming_migration_co(void *opaque) MigrationIncomingState *mis = migration_incoming_get_current(); PostcopyState ps; int ret; - Error *local_err = NULL; assert(mis->from_src_file); - mis->migration_incoming_co = qemu_coroutine_self(); + + if (compress_threads_load_setup(mis->from_src_file)) { + error_report("Failed to setup decompress threads"); + goto fail; + } + mis->largest_page_size = qemu_ram_pagesize_largest(); postcopy_state_set(POSTCOPY_INCOMING_NONE); migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_ACTIVE); + + mis->loadvm_co = qemu_coroutine_self(); ret = qemu_loadvm_state(mis->from_src_file); + mis->loadvm_co = NULL; ps = postcopy_state_get(); trace_process_incoming_migration_co_end(ret, ps); @@ -626,43 +549,25 @@ process_incoming_migration_co(void *opaque) /* Else if something went wrong then just fall out of the normal exit */ } - /* we get COLO info, and know if we are in COLO mode */ - if (!ret && migration_incoming_colo_enabled()) { - /* Make sure all file formats throw away their mutable metadata */ - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - goto fail; - } - - qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming", - colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE); - mis->have_colo_incoming_thread = true; - qemu_coroutine_yield(); - - qemu_mutex_unlock_iothread(); - /* Wait checkpoint incoming thread exit before free resource */ - qemu_thread_join(&mis->colo_incoming_thread); - qemu_mutex_lock_iothread(); - /* We hold the global iothread lock, so it is safe here */ - colo_release_ram_cache(); - } - if (ret < 0) { error_report("load of migration failed: %s", strerror(-ret)); goto fail; } + + if (colo_incoming_co() < 0) { + goto fail; + } + mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); qemu_bh_schedule(mis->bh); - mis->migration_incoming_co = NULL; return; fail: - local_err = NULL; migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED); qemu_fclose(mis->from_src_file); multifd_load_cleanup(); + compress_threads_load_cleanup(); exit(EXIT_FAILURE); } @@ -742,7 +647,7 @@ void migration_fd_process_incoming(QEMUFile *f, Error **errp) static bool migration_should_start_incoming(bool main_channel) { /* Multifd doesn't start unless all channels are established */ - if (migrate_use_multifd()) { + if (migrate_multifd()) { return migration_has_all_channels(); } @@ -769,7 +674,7 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) uint32_t channel_magic = 0; int ret = 0; - if (migrate_use_multifd() && !migrate_postcopy_ram() && + if (migrate_multifd() && !migrate_postcopy_ram() && qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { /* * With multiple channels, it is possible that we receive channels @@ -808,7 +713,7 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) } else { /* Multiple connections */ assert(migration_needs_multiple_sockets()); - if (migrate_use_multifd()) { + if (migrate_multifd()) { multifd_recv_new_channel(ioc, &local_err); } else { assert(migrate_postcopy_preempt()); @@ -844,7 +749,7 @@ bool migration_has_all_channels(void) return false; } - if (migrate_use_multifd()) { + if (migrate_multifd()) { return multifd_recv_all_channels_created(); } @@ -929,139 +834,6 @@ void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); } -MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) -{ - MigrationCapabilityStatusList *head = NULL, **tail = &head; - MigrationCapabilityStatus *caps; - MigrationState *s = migrate_get_current(); - int i; - - for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { -#ifndef CONFIG_LIVE_BLOCK_MIGRATION - if (i == MIGRATION_CAPABILITY_BLOCK) { - continue; - } -#endif - caps = g_malloc0(sizeof(*caps)); - caps->capability = i; - caps->state = s->enabled_capabilities[i]; - QAPI_LIST_APPEND(tail, caps); - } - - return head; -} - -MigrationParameters *qmp_query_migrate_parameters(Error **errp) -{ - MigrationParameters *params; - MigrationState *s = migrate_get_current(); - - /* TODO use QAPI_CLONE() instead of duplicating it inline */ - params = g_malloc0(sizeof(*params)); - params->has_compress_level = true; - params->compress_level = s->parameters.compress_level; - params->has_compress_threads = true; - params->compress_threads = s->parameters.compress_threads; - params->has_compress_wait_thread = true; - params->compress_wait_thread = s->parameters.compress_wait_thread; - params->has_decompress_threads = true; - params->decompress_threads = s->parameters.decompress_threads; - params->has_throttle_trigger_threshold = true; - params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold; - params->has_cpu_throttle_initial = true; - params->cpu_throttle_initial = s->parameters.cpu_throttle_initial; - params->has_cpu_throttle_increment = true; - params->cpu_throttle_increment = s->parameters.cpu_throttle_increment; - params->has_cpu_throttle_tailslow = true; - params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow; - params->tls_creds = g_strdup(s->parameters.tls_creds); - params->tls_hostname = g_strdup(s->parameters.tls_hostname); - params->tls_authz = g_strdup(s->parameters.tls_authz ? - s->parameters.tls_authz : ""); - params->has_max_bandwidth = true; - params->max_bandwidth = s->parameters.max_bandwidth; - params->has_downtime_limit = true; - params->downtime_limit = s->parameters.downtime_limit; - params->has_x_checkpoint_delay = true; - params->x_checkpoint_delay = s->parameters.x_checkpoint_delay; - params->has_block_incremental = true; - params->block_incremental = s->parameters.block_incremental; - params->has_multifd_channels = true; - params->multifd_channels = s->parameters.multifd_channels; - params->has_multifd_compression = true; - params->multifd_compression = s->parameters.multifd_compression; - params->has_multifd_zlib_level = true; - params->multifd_zlib_level = s->parameters.multifd_zlib_level; - params->has_multifd_zstd_level = true; - params->multifd_zstd_level = s->parameters.multifd_zstd_level; - params->has_xbzrle_cache_size = true; - params->xbzrle_cache_size = s->parameters.xbzrle_cache_size; - params->has_max_postcopy_bandwidth = true; - params->max_postcopy_bandwidth = s->parameters.max_postcopy_bandwidth; - params->has_max_cpu_throttle = true; - params->max_cpu_throttle = s->parameters.max_cpu_throttle; - params->has_announce_initial = true; - params->announce_initial = s->parameters.announce_initial; - params->has_announce_max = true; - params->announce_max = s->parameters.announce_max; - params->has_announce_rounds = true; - params->announce_rounds = s->parameters.announce_rounds; - params->has_announce_step = true; - params->announce_step = s->parameters.announce_step; - - if (s->parameters.has_block_bitmap_mapping) { - params->has_block_bitmap_mapping = true; - params->block_bitmap_mapping = - QAPI_CLONE(BitmapMigrationNodeAliasList, - s->parameters.block_bitmap_mapping); - } - - return params; -} - -void qmp_client_migrate_info(const char *protocol, const char *hostname, - bool has_port, int64_t port, - bool has_tls_port, int64_t tls_port, - const char *cert_subject, - Error **errp) -{ - if (strcmp(protocol, "spice") == 0) { - if (!qemu_using_spice(errp)) { - return; - } - - if (!has_port && !has_tls_port) { - error_setg(errp, QERR_MISSING_PARAMETER, "port/tls-port"); - return; - } - - if (qemu_spice.migrate_info(hostname, - has_port ? port : -1, - has_tls_port ? tls_port : -1, - cert_subject)) { - error_setg(errp, "Could not set up display for migration"); - return; - } - return; - } - - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "protocol", "'spice'"); -} - -AnnounceParameters *migrate_announce_params(void) -{ - static AnnounceParameters ap; - - MigrationState *s = migrate_get_current(); - - ap.initial = s->parameters.announce_initial; - ap.max = s->parameters.announce_max; - ap.rounds = s->parameters.announce_rounds; - ap.step = s->parameters.announce_step; - - return ≈ -} - /* * Return true if we're already in the middle of a migration * (i.e. any of the active or setup states) @@ -1140,26 +912,28 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) size_t page_size = qemu_target_page_size(); info->ram = g_malloc0(sizeof(*info->ram)); - info->ram->transferred = stat64_get(&ram_atomic_counters.transferred); + info->ram->transferred = stat64_get(&mig_stats.transferred); info->ram->total = ram_bytes_total(); - info->ram->duplicate = stat64_get(&ram_atomic_counters.duplicate); + info->ram->duplicate = stat64_get(&mig_stats.zero_pages); /* legacy value. It is not used anymore */ info->ram->skipped = 0; - info->ram->normal = stat64_get(&ram_atomic_counters.normal); + info->ram->normal = stat64_get(&mig_stats.normal_pages); info->ram->normal_bytes = info->ram->normal * page_size; info->ram->mbps = s->mbps; - info->ram->dirty_sync_count = ram_counters.dirty_sync_count; + info->ram->dirty_sync_count = + stat64_get(&mig_stats.dirty_sync_count); info->ram->dirty_sync_missed_zero_copy = - ram_counters.dirty_sync_missed_zero_copy; - info->ram->postcopy_requests = ram_counters.postcopy_requests; + stat64_get(&mig_stats.dirty_sync_missed_zero_copy); + info->ram->postcopy_requests = + stat64_get(&mig_stats.postcopy_requests); info->ram->page_size = page_size; - info->ram->multifd_bytes = ram_counters.multifd_bytes; + info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes); info->ram->pages_per_second = s->pages_per_second; - info->ram->precopy_bytes = ram_counters.precopy_bytes; - info->ram->downtime_bytes = ram_counters.downtime_bytes; - info->ram->postcopy_bytes = stat64_get(&ram_atomic_counters.postcopy_bytes); + info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes); + info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes); + info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes); - if (migrate_use_xbzrle()) { + if (migrate_xbzrle()) { info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); info->xbzrle_cache->bytes = xbzrle_counters.bytes; @@ -1170,7 +944,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) info->xbzrle_cache->overflow = xbzrle_counters.overflow; } - if (migrate_use_compression()) { + if (migrate_compress()) { info->compression = g_malloc0(sizeof(*info->compression)); info->compression->pages = compression_counters.pages; info->compression->busy = compression_counters.busy; @@ -1188,7 +962,8 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) if (s->state != MIGRATION_STATUS_COMPLETED) { info->ram->remaining = ram_bytes_remaining(); - info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate; + info->ram->dirty_pages_rate = + stat64_get(&mig_stats.dirty_pages_rate); } } @@ -1272,172 +1047,6 @@ static void fill_source_migration_info(MigrationInfo *info) info->status = state; } -typedef enum WriteTrackingSupport { - WT_SUPPORT_UNKNOWN = 0, - WT_SUPPORT_ABSENT, - WT_SUPPORT_AVAILABLE, - WT_SUPPORT_COMPATIBLE -} WriteTrackingSupport; - -static -WriteTrackingSupport migrate_query_write_tracking(void) -{ - /* Check if kernel supports required UFFD features */ - if (!ram_write_tracking_available()) { - return WT_SUPPORT_ABSENT; - } - /* - * Check if current memory configuration is - * compatible with required UFFD features. - */ - if (!ram_write_tracking_compatible()) { - return WT_SUPPORT_AVAILABLE; - } - - return WT_SUPPORT_COMPATIBLE; -} - -/** - * @migration_caps_check - check capability validity - * - * @cap_list: old capability list, array of bool - * @params: new capabilities to be applied soon - * @errp: set *errp if the check failed, with reason - * - * Returns true if check passed, otherwise false. - */ -static bool migrate_caps_check(bool *cap_list, - MigrationCapabilityStatusList *params, - Error **errp) -{ - MigrationCapabilityStatusList *cap; - bool old_postcopy_cap; - MigrationIncomingState *mis = migration_incoming_get_current(); - - old_postcopy_cap = cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]; - - for (cap = params; cap; cap = cap->next) { - cap_list[cap->value->capability] = cap->value->state; - } - -#ifndef CONFIG_LIVE_BLOCK_MIGRATION - if (cap_list[MIGRATION_CAPABILITY_BLOCK]) { - error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) " - "block migration"); - error_append_hint(errp, "Use drive_mirror+NBD instead.\n"); - return false; - } -#endif - -#ifndef CONFIG_REPLICATION - if (cap_list[MIGRATION_CAPABILITY_X_COLO]) { - error_setg(errp, "QEMU compiled without replication module" - " can't enable COLO"); - error_append_hint(errp, "Please enable replication before COLO.\n"); - return false; - } -#endif - - if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { - /* This check is reasonably expensive, so only when it's being - * set the first time, also it's only the destination that needs - * special support. - */ - if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) && - !postcopy_ram_supported_by_host(mis)) { - /* postcopy_ram_supported_by_host will have emitted a more - * detailed message - */ - error_setg(errp, "Postcopy is not supported"); - return false; - } - - if (cap_list[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) { - error_setg(errp, "Postcopy is not compatible with ignore-shared"); - return false; - } - } - - if (cap_list[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) { - WriteTrackingSupport wt_support; - int idx; - /* - * Check if 'background-snapshot' capability is supported by - * host kernel and compatible with guest memory configuration. - */ - wt_support = migrate_query_write_tracking(); - if (wt_support < WT_SUPPORT_AVAILABLE) { - error_setg(errp, "Background-snapshot is not supported by host kernel"); - return false; - } - if (wt_support < WT_SUPPORT_COMPATIBLE) { - error_setg(errp, "Background-snapshot is not compatible " - "with guest memory configuration"); - return false; - } - - /* - * Check if there are any migration capabilities - * incompatible with 'background-snapshot'. - */ - for (idx = 0; idx < check_caps_background_snapshot.size; idx++) { - int incomp_cap = check_caps_background_snapshot.caps[idx]; - if (cap_list[incomp_cap]) { - error_setg(errp, - "Background-snapshot is not compatible with %s", - MigrationCapability_str(incomp_cap)); - return false; - } - } - } - -#ifdef CONFIG_LINUX - if (cap_list[MIGRATION_CAPABILITY_ZERO_COPY_SEND] && - (!cap_list[MIGRATION_CAPABILITY_MULTIFD] || - cap_list[MIGRATION_CAPABILITY_COMPRESS] || - cap_list[MIGRATION_CAPABILITY_XBZRLE] || - migrate_multifd_compression() || - migrate_use_tls())) { - error_setg(errp, - "Zero copy only available for non-compressed non-TLS multifd migration"); - return false; - } -#else - if (cap_list[MIGRATION_CAPABILITY_ZERO_COPY_SEND]) { - error_setg(errp, - "Zero copy currently only available on Linux"); - return false; - } -#endif - - if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) { - if (!cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { - error_setg(errp, "Postcopy preempt requires postcopy-ram"); - return false; - } - - /* - * Preempt mode requires urgent pages to be sent in separate - * channel, OTOH compression logic will disorder all pages into - * different compression channels, which is not compatible with the - * preempt assumptions on channel assignments. - */ - if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) { - error_setg(errp, "Postcopy preempt not compatible with compress"); - return false; - } - } - - if (cap_list[MIGRATION_CAPABILITY_MULTIFD]) { - if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) { - error_setg(errp, "Multifd is not compatible with compress"); - return false; - } - } - - return true; -} - static void fill_destination_migration_info(MigrationInfo *info) { MigrationIncomingState *mis = migration_incoming_get_current(); @@ -1480,439 +1089,6 @@ MigrationInfo *qmp_query_migrate(Error **errp) return info; } -void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, - Error **errp) -{ - MigrationState *s = migrate_get_current(); - MigrationCapabilityStatusList *cap; - bool cap_list[MIGRATION_CAPABILITY__MAX]; - - if (migration_is_running(s->state)) { - error_setg(errp, QERR_MIGRATION_ACTIVE); - return; - } - - memcpy(cap_list, s->enabled_capabilities, sizeof(cap_list)); - if (!migrate_caps_check(cap_list, params, errp)) { - return; - } - - for (cap = params; cap; cap = cap->next) { - s->enabled_capabilities[cap->value->capability] = cap->value->state; - } -} - -/* - * Check whether the parameters are valid. Error will be put into errp - * (if provided). Return true if valid, otherwise false. - */ -static bool migrate_params_check(MigrationParameters *params, Error **errp) -{ - if (params->has_compress_level && - (params->compress_level > 9)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", - "a value between 0 and 9"); - return false; - } - - if (params->has_compress_threads && (params->compress_threads < 1)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "compress_threads", - "a value between 1 and 255"); - return false; - } - - if (params->has_decompress_threads && (params->decompress_threads < 1)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "decompress_threads", - "a value between 1 and 255"); - return false; - } - - if (params->has_throttle_trigger_threshold && - (params->throttle_trigger_threshold < 1 || - params->throttle_trigger_threshold > 100)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "throttle_trigger_threshold", - "an integer in the range of 1 to 100"); - return false; - } - - if (params->has_cpu_throttle_initial && - (params->cpu_throttle_initial < 1 || - params->cpu_throttle_initial > 99)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "cpu_throttle_initial", - "an integer in the range of 1 to 99"); - return false; - } - - if (params->has_cpu_throttle_increment && - (params->cpu_throttle_increment < 1 || - params->cpu_throttle_increment > 99)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "cpu_throttle_increment", - "an integer in the range of 1 to 99"); - return false; - } - - if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "max_bandwidth", - "an integer in the range of 0 to "stringify(SIZE_MAX) - " bytes/second"); - return false; - } - - if (params->has_downtime_limit && - (params->downtime_limit > MAX_MIGRATE_DOWNTIME)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "downtime_limit", - "an integer in the range of 0 to " - stringify(MAX_MIGRATE_DOWNTIME)" ms"); - return false; - } - - /* x_checkpoint_delay is now always positive */ - - if (params->has_multifd_channels && (params->multifd_channels < 1)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "multifd_channels", - "a value between 1 and 255"); - return false; - } - - if (params->has_multifd_zlib_level && - (params->multifd_zlib_level > 9)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zlib_level", - "a value between 0 and 9"); - return false; - } - - if (params->has_multifd_zstd_level && - (params->multifd_zstd_level > 20)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level", - "a value between 0 and 20"); - return false; - } - - if (params->has_xbzrle_cache_size && - (params->xbzrle_cache_size < qemu_target_page_size() || - !is_power_of_2(params->xbzrle_cache_size))) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "xbzrle_cache_size", - "a power of two no less than the target page size"); - return false; - } - - if (params->has_max_cpu_throttle && - (params->max_cpu_throttle < params->cpu_throttle_initial || - params->max_cpu_throttle > 99)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "max_cpu_throttle", - "an integer in the range of cpu_throttle_initial to 99"); - return false; - } - - if (params->has_announce_initial && - params->announce_initial > 100000) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "announce_initial", - "a value between 0 and 100000"); - return false; - } - if (params->has_announce_max && - params->announce_max > 100000) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "announce_max", - "a value between 0 and 100000"); - return false; - } - if (params->has_announce_rounds && - params->announce_rounds > 1000) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "announce_rounds", - "a value between 0 and 1000"); - return false; - } - if (params->has_announce_step && - (params->announce_step < 1 || - params->announce_step > 10000)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "announce_step", - "a value between 0 and 10000"); - return false; - } - - if (params->has_block_bitmap_mapping && - !check_dirty_bitmap_mig_alias_map(params->block_bitmap_mapping, errp)) { - error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: "); - return false; - } - -#ifdef CONFIG_LINUX - if (migrate_use_zero_copy_send() && - ((params->has_multifd_compression && params->multifd_compression) || - (params->tls_creds && *params->tls_creds))) { - error_setg(errp, - "Zero copy only available for non-compressed non-TLS multifd migration"); - return false; - } -#endif - - return true; -} - -static void migrate_params_test_apply(MigrateSetParameters *params, - MigrationParameters *dest) -{ - *dest = migrate_get_current()->parameters; - - /* TODO use QAPI_CLONE() instead of duplicating it inline */ - - if (params->has_compress_level) { - dest->compress_level = params->compress_level; - } - - if (params->has_compress_threads) { - dest->compress_threads = params->compress_threads; - } - - if (params->has_compress_wait_thread) { - dest->compress_wait_thread = params->compress_wait_thread; - } - - if (params->has_decompress_threads) { - dest->decompress_threads = params->decompress_threads; - } - - if (params->has_throttle_trigger_threshold) { - dest->throttle_trigger_threshold = params->throttle_trigger_threshold; - } - - if (params->has_cpu_throttle_initial) { - dest->cpu_throttle_initial = params->cpu_throttle_initial; - } - - if (params->has_cpu_throttle_increment) { - dest->cpu_throttle_increment = params->cpu_throttle_increment; - } - - if (params->has_cpu_throttle_tailslow) { - dest->cpu_throttle_tailslow = params->cpu_throttle_tailslow; - } - - if (params->tls_creds) { - assert(params->tls_creds->type == QTYPE_QSTRING); - dest->tls_creds = params->tls_creds->u.s; - } - - if (params->tls_hostname) { - assert(params->tls_hostname->type == QTYPE_QSTRING); - dest->tls_hostname = params->tls_hostname->u.s; - } - - if (params->has_max_bandwidth) { - dest->max_bandwidth = params->max_bandwidth; - } - - if (params->has_downtime_limit) { - dest->downtime_limit = params->downtime_limit; - } - - if (params->has_x_checkpoint_delay) { - dest->x_checkpoint_delay = params->x_checkpoint_delay; - } - - if (params->has_block_incremental) { - dest->block_incremental = params->block_incremental; - } - if (params->has_multifd_channels) { - dest->multifd_channels = params->multifd_channels; - } - if (params->has_multifd_compression) { - dest->multifd_compression = params->multifd_compression; - } - if (params->has_xbzrle_cache_size) { - dest->xbzrle_cache_size = params->xbzrle_cache_size; - } - if (params->has_max_postcopy_bandwidth) { - dest->max_postcopy_bandwidth = params->max_postcopy_bandwidth; - } - if (params->has_max_cpu_throttle) { - dest->max_cpu_throttle = params->max_cpu_throttle; - } - if (params->has_announce_initial) { - dest->announce_initial = params->announce_initial; - } - if (params->has_announce_max) { - dest->announce_max = params->announce_max; - } - if (params->has_announce_rounds) { - dest->announce_rounds = params->announce_rounds; - } - if (params->has_announce_step) { - dest->announce_step = params->announce_step; - } - - if (params->has_block_bitmap_mapping) { - dest->has_block_bitmap_mapping = true; - dest->block_bitmap_mapping = params->block_bitmap_mapping; - } -} - -static void migrate_params_apply(MigrateSetParameters *params, Error **errp) -{ - MigrationState *s = migrate_get_current(); - - /* TODO use QAPI_CLONE() instead of duplicating it inline */ - - if (params->has_compress_level) { - s->parameters.compress_level = params->compress_level; - } - - if (params->has_compress_threads) { - s->parameters.compress_threads = params->compress_threads; - } - - if (params->has_compress_wait_thread) { - s->parameters.compress_wait_thread = params->compress_wait_thread; - } - - if (params->has_decompress_threads) { - s->parameters.decompress_threads = params->decompress_threads; - } - - if (params->has_throttle_trigger_threshold) { - s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold; - } - - if (params->has_cpu_throttle_initial) { - s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; - } - - if (params->has_cpu_throttle_increment) { - s->parameters.cpu_throttle_increment = params->cpu_throttle_increment; - } - - if (params->has_cpu_throttle_tailslow) { - s->parameters.cpu_throttle_tailslow = params->cpu_throttle_tailslow; - } - - if (params->tls_creds) { - g_free(s->parameters.tls_creds); - assert(params->tls_creds->type == QTYPE_QSTRING); - s->parameters.tls_creds = g_strdup(params->tls_creds->u.s); - } - - if (params->tls_hostname) { - g_free(s->parameters.tls_hostname); - assert(params->tls_hostname->type == QTYPE_QSTRING); - s->parameters.tls_hostname = g_strdup(params->tls_hostname->u.s); - } - - if (params->tls_authz) { - g_free(s->parameters.tls_authz); - assert(params->tls_authz->type == QTYPE_QSTRING); - s->parameters.tls_authz = g_strdup(params->tls_authz->u.s); - } - - if (params->has_max_bandwidth) { - s->parameters.max_bandwidth = params->max_bandwidth; - if (s->to_dst_file && !migration_in_postcopy()) { - qemu_file_set_rate_limit(s->to_dst_file, - s->parameters.max_bandwidth / XFER_LIMIT_RATIO); - } - } - - if (params->has_downtime_limit) { - s->parameters.downtime_limit = params->downtime_limit; - } - - if (params->has_x_checkpoint_delay) { - s->parameters.x_checkpoint_delay = params->x_checkpoint_delay; - if (migration_in_colo_state()) { - colo_checkpoint_notify(s); - } - } - - if (params->has_block_incremental) { - s->parameters.block_incremental = params->block_incremental; - } - if (params->has_multifd_channels) { - s->parameters.multifd_channels = params->multifd_channels; - } - if (params->has_multifd_compression) { - s->parameters.multifd_compression = params->multifd_compression; - } - if (params->has_xbzrle_cache_size) { - s->parameters.xbzrle_cache_size = params->xbzrle_cache_size; - xbzrle_cache_resize(params->xbzrle_cache_size, errp); - } - if (params->has_max_postcopy_bandwidth) { - s->parameters.max_postcopy_bandwidth = params->max_postcopy_bandwidth; - if (s->to_dst_file && migration_in_postcopy()) { - qemu_file_set_rate_limit(s->to_dst_file, - s->parameters.max_postcopy_bandwidth / XFER_LIMIT_RATIO); - } - } - if (params->has_max_cpu_throttle) { - s->parameters.max_cpu_throttle = params->max_cpu_throttle; - } - if (params->has_announce_initial) { - s->parameters.announce_initial = params->announce_initial; - } - if (params->has_announce_max) { - s->parameters.announce_max = params->announce_max; - } - if (params->has_announce_rounds) { - s->parameters.announce_rounds = params->announce_rounds; - } - if (params->has_announce_step) { - s->parameters.announce_step = params->announce_step; - } - - if (params->has_block_bitmap_mapping) { - qapi_free_BitmapMigrationNodeAliasList( - s->parameters.block_bitmap_mapping); - - s->parameters.has_block_bitmap_mapping = true; - s->parameters.block_bitmap_mapping = - QAPI_CLONE(BitmapMigrationNodeAliasList, - params->block_bitmap_mapping); - } -} - -void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) -{ - MigrationParameters tmp; - - /* TODO Rewrite "" to null instead */ - if (params->tls_creds - && params->tls_creds->type == QTYPE_QNULL) { - qobject_unref(params->tls_creds->u.n); - params->tls_creds->type = QTYPE_QSTRING; - params->tls_creds->u.s = strdup(""); - } - /* TODO Rewrite "" to null instead */ - if (params->tls_hostname - && params->tls_hostname->type == QTYPE_QNULL) { - qobject_unref(params->tls_hostname->u.n); - params->tls_hostname->type = QTYPE_QSTRING; - params->tls_hostname->u.s = strdup(""); - } - - migrate_params_test_apply(params, &tmp); - - if (!migrate_params_check(&tmp, errp)) { - /* Invalid parameter */ - return; - } - - migrate_params_apply(params, errp); -} - - void qmp_migrate_start_postcopy(Error **errp) { MigrationState *s = migrate_get_current(); @@ -1946,42 +1122,6 @@ void migrate_set_state(int *state, int old_state, int new_state) } } -static MigrationCapabilityStatus *migrate_cap_add(MigrationCapability index, - bool state) -{ - MigrationCapabilityStatus *cap; - - cap = g_new0(MigrationCapabilityStatus, 1); - cap->capability = index; - cap->state = state; - - return cap; -} - -void migrate_set_block_enabled(bool value, Error **errp) -{ - MigrationCapabilityStatusList *cap = NULL; - - QAPI_LIST_PREPEND(cap, migrate_cap_add(MIGRATION_CAPABILITY_BLOCK, value)); - qmp_migrate_set_capabilities(cap, errp); - qapi_free_MigrationCapabilityStatusList(cap); -} - -static void migrate_set_block_incremental(MigrationState *s, bool value) -{ - s->parameters.block_incremental = value; -} - -static void block_cleanup_parameters(MigrationState *s) -{ - if (s->must_remove_block_options) { - /* setting to false can never fail */ - migrate_set_block_enabled(false, &error_abort); - migrate_set_block_incremental(s, false); - s->must_remove_block_options = false; - } -} - static void migrate_fd_cleanup(MigrationState *s) { qemu_bh_delete(s->cleanup_bh); @@ -2036,7 +1176,7 @@ static void migrate_fd_cleanup(MigrationState *s) error_report_err(error_copy(s->error)); } notifier_list_notify(&migration_state_notifiers, s); - block_cleanup_parameters(s); + block_cleanup_parameters(); yank_unregister_instance(MIGRATION_YANK_INSTANCE); } @@ -2454,17 +1594,16 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, } if (blk || blk_inc) { - if (migrate_colo_enabled()) { + if (migrate_colo()) { error_setg(errp, "No disk migration is required in COLO mode"); return false; } - if (migrate_use_block() || migrate_use_block_incremental()) { + if (migrate_block() || migrate_block_incremental()) { error_setg(errp, "Command options are incompatible with " "current migration capabilities"); return false; } - migrate_set_block_enabled(true, &local_err); - if (local_err) { + if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, &local_err)) { error_propagate(errp, local_err); return false; } @@ -2472,15 +1611,15 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, } if (blk_inc) { - migrate_set_block_incremental(s, true); + migrate_set_block_incremental(true); } migrate_init(s); /* - * set ram_counters compression_counters memory to zero for a + * set mig_stats compression_counters memory to zero for a * new migration */ - memset(&ram_counters, 0, sizeof(ram_counters)); + memset(&mig_stats, 0, sizeof(mig_stats)); memset(&compression_counters, 0, sizeof(compression_counters)); return true; @@ -2531,7 +1670,7 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, "a valid migration protocol"); migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED); - block_cleanup_parameters(s); + block_cleanup_parameters(); return; } @@ -2561,285 +1700,6 @@ void qmp_migrate_continue(MigrationStatus state, Error **errp) qemu_sem_post(&s->pause_sem); } -bool migrate_release_ram(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM]; -} - -bool migrate_postcopy_ram(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; -} - -bool migrate_postcopy(void) -{ - return migrate_postcopy_ram() || migrate_dirty_bitmaps(); -} - -bool migrate_auto_converge(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; -} - -bool migrate_zero_blocks(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; -} - -bool migrate_postcopy_blocktime(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME]; -} - -bool migrate_use_compression(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; -} - -int migrate_compress_level(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.compress_level; -} - -int migrate_compress_threads(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.compress_threads; -} - -int migrate_compress_wait_thread(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.compress_wait_thread; -} - -int migrate_decompress_threads(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.decompress_threads; -} - -bool migrate_dirty_bitmaps(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS]; -} - -bool migrate_ignore_shared(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED]; -} - -bool migrate_validate_uuid(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID]; -} - -bool migrate_use_events(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS]; -} - -bool migrate_use_multifd(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_MULTIFD]; -} - -bool migrate_pause_before_switchover(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[ - MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER]; -} - -int migrate_multifd_channels(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.multifd_channels; -} - -MultiFDCompression migrate_multifd_compression(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - assert(s->parameters.multifd_compression < MULTIFD_COMPRESSION__MAX); - return s->parameters.multifd_compression; -} - -int migrate_multifd_zlib_level(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.multifd_zlib_level; -} - -int migrate_multifd_zstd_level(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.multifd_zstd_level; -} - -#ifdef CONFIG_LINUX -bool migrate_use_zero_copy_send(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_COPY_SEND]; -} -#endif - -int migrate_use_tls(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.tls_creds && *s->parameters.tls_creds; -} - -int migrate_use_xbzrle(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; -} - -uint64_t migrate_xbzrle_cache_size(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.xbzrle_cache_size; -} - -static int64_t migrate_max_postcopy_bandwidth(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.max_postcopy_bandwidth; -} - -bool migrate_use_block(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK]; -} - -bool migrate_use_return_path(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH]; -} - -bool migrate_use_block_incremental(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->parameters.block_incremental; -} - -bool migrate_background_snapshot(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]; -} - -bool migrate_postcopy_preempt(void) -{ - MigrationState *s; - - s = migrate_get_current(); - - return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]; -} - /* migration thread support */ /* * Something bad happened to the RP stream, mark an error @@ -3199,7 +2059,7 @@ static int postcopy_start(MigrationState *ms) QIOChannelBuffer *bioc; QEMUFile *fb; int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); - int64_t bandwidth = migrate_max_postcopy_bandwidth(); + uint64_t bandwidth = migrate_max_postcopy_bandwidth(); bool restart_block = false; int cur_state = MIGRATION_STATUS_ACTIVE; @@ -3260,12 +2120,7 @@ static int postcopy_start(MigrationState *ms) * will notice we're in POSTCOPY_ACTIVE and not actually * wrap their state up here */ - /* 0 max-postcopy-bandwidth means unlimited */ - if (!bandwidth) { - qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); - } else { - qemu_file_set_rate_limit(ms->to_dst_file, bandwidth / XFER_LIMIT_RATIO); - } + migration_rate_set(bandwidth); if (migrate_postcopy_ram()) { /* Ping just for debugging, helps line traces up */ qemu_savevm_send_ping(ms->to_dst_file, 2); @@ -3436,7 +2291,6 @@ static void migration_completion(MigrationState *s) ret = global_state_store(); if (!ret) { - bool inactivate = !migrate_colo_enabled(); ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); trace_migration_completion_vm_stop(ret); if (ret >= 0) { @@ -3444,12 +2298,15 @@ static void migration_completion(MigrationState *s) MIGRATION_STATUS_DEVICE); } if (ret >= 0) { - qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); + /* + * Inactivate disks except in COLO, and track that we + * have done so in order to remember to reactivate + * them if migration fails or is cancelled. + */ + s->block_inactive = !migrate_colo(); + migration_rate_set(RATE_LIMIT_DISABLED); ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, - inactivate); - } - if (inactivate && ret >= 0) { - s->block_inactive = true; + s->block_inactive); } } qemu_mutex_unlock_iothread(); @@ -3464,8 +2321,12 @@ static void migration_completion(MigrationState *s) qemu_savevm_state_complete_postcopy(s->to_dst_file); qemu_mutex_unlock_iothread(); - /* Shutdown the postcopy fast path thread */ - if (migrate_postcopy_preempt()) { + /* + * Shutdown the postcopy fast path thread. This is only needed + * when dest QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need + * this. + */ + if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { postcopy_preempt_shutdown_file(s); } @@ -3486,16 +2347,16 @@ static void migration_completion(MigrationState *s) rp_error = await_return_path_close_on_source(s); trace_migration_return_path_end_after(rp_error); if (rp_error) { - goto fail_invalidate; + goto fail; } } if (qemu_file_get_error(s->to_dst_file)) { trace_migration_completion_file_err(); - goto fail_invalidate; + goto fail; } - if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) { + if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) { /* COLO does not support postcopy */ migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COLO); @@ -3506,12 +2367,13 @@ static void migration_completion(MigrationState *s) return; -fail_invalidate: - /* If not doing postcopy, vm_start() will be called: let's regain - * control on images. - */ - if (s->state == MIGRATION_STATUS_ACTIVE || - s->state == MIGRATION_STATUS_DEVICE) { +fail: + if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || + s->state == MIGRATION_STATUS_DEVICE)) { + /* + * If not doing postcopy, vm_start() will be called: let's + * regain control on images. + */ Error *local_err = NULL; qemu_mutex_lock_iothread(); @@ -3524,7 +2386,6 @@ fail_invalidate: qemu_mutex_unlock_iothread(); } -fail: migrate_set_state(&s->state, current_active_state, MIGRATION_STATUS_FAILED); } @@ -3573,12 +2434,6 @@ fail: MIGRATION_STATUS_FAILED); } -bool migrate_colo_enabled(void) -{ - MigrationState *s = migrate_get_current(); - return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO]; -} - typedef enum MigThrError { /* No error detected */ MIG_THR_ERR_NONE = 0, @@ -3770,16 +2625,9 @@ static MigThrError migration_detect_error(MigrationState *s) } } -/* How many bytes have we transferred since the beginning of the migration */ -static uint64_t migration_total_bytes(MigrationState *s) -{ - return qemu_file_total_transferred(s->to_dst_file) + - ram_counters.multifd_bytes; -} - static void migration_calculate_complete(MigrationState *s) { - uint64_t bytes = migration_total_bytes(s); + uint64_t bytes = migration_transferred_bytes(s->to_dst_file); int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); int64_t transfer_time; @@ -3805,7 +2653,7 @@ static void update_iteration_initial_status(MigrationState *s) * wrong speed calculation. */ s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); - s->iteration_initial_bytes = migration_total_bytes(s); + s->iteration_initial_bytes = migration_transferred_bytes(s->to_dst_file); s->iteration_initial_pages = ram_get_total_transferred_pages(); } @@ -3820,11 +2668,11 @@ static void migration_update_counters(MigrationState *s, return; } - current_bytes = migration_total_bytes(s); + current_bytes = migration_transferred_bytes(s->to_dst_file); transferred = current_bytes - s->iteration_initial_bytes; time_spent = current_time - s->iteration_start_time; bandwidth = (double)transferred / time_spent; - s->threshold_size = bandwidth * s->parameters.downtime_limit; + s->threshold_size = bandwidth * migrate_downtime_limit(); s->mbps = (((double) transferred * 8.0) / ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; @@ -3838,11 +2686,13 @@ static void migration_update_counters(MigrationState *s, * if we haven't sent anything, we don't want to * recalculate. 10000 is a small enough number for our purposes */ - if (ram_counters.dirty_pages_rate && transferred > 10000) { - s->expected_downtime = ram_counters.remaining / bandwidth; + if (stat64_get(&mig_stats.dirty_pages_rate) && + transferred > 10000) { + s->expected_downtime = + stat64_get(&mig_stats.dirty_bytes_last_sync) / bandwidth; } - qemu_file_reset_rate_limit(s->to_dst_file); + migration_rate_reset(s->to_dst_file); update_iteration_initial_status(s); @@ -3909,10 +2759,7 @@ static void migration_iteration_finish(MigrationState *s) runstate_set(RUN_STATE_POSTMIGRATE); break; case MIGRATION_STATUS_COLO: - if (!migrate_colo_enabled()) { - error_report("%s: critical error: calling COLO code without " - "COLO enabled", __func__); - } + assert(migrate_colo()); migrate_start_colo_process(s); s->vm_was_running = true; /* Fallthrough */ @@ -3998,7 +2845,7 @@ bool migration_rate_limit(void) bool urgent = false; migration_update_counters(s, now); - if (qemu_file_rate_limit(s->to_dst_file)) { + if (migration_rate_exceeded(s->to_dst_file)) { if (qemu_file_get_error(s->to_dst_file)) { return false; @@ -4105,7 +2952,7 @@ static void *migration_thread(void *opaque) qemu_savevm_send_postcopy_advise(s->to_dst_file); } - if (migrate_colo_enabled()) { + if (migrate_colo()) { /* Notify migration destination that we enable COLO */ qemu_savevm_send_colo_enable(s->to_dst_file); } @@ -4120,7 +2967,7 @@ static void *migration_thread(void *opaque) trace_migration_thread_setup_complete(); while (migration_is_active(s)) { - if (urgent || !qemu_file_rate_limit(s->to_dst_file)) { + if (urgent || !migration_rate_exceeded(s->to_dst_file)) { MigIterateState iter_state = migration_iteration_run(s); if (iter_state == MIG_ITERATE_SKIP) { continue; @@ -4194,7 +3041,7 @@ static void *bg_migration_thread(void *opaque) rcu_register_thread(); object_ref(OBJECT(s)); - qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); + migration_rate_set(RATE_LIMIT_DISABLED); setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); /* @@ -4321,7 +3168,7 @@ fail: void migrate_fd_connect(MigrationState *s, Error *error_in) { Error *local_err = NULL; - int64_t rate_limit; + uint64_t rate_limit; bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; /* @@ -4331,7 +3178,7 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) */ migrate_error_free(s); - s->expected_downtime = s->parameters.downtime_limit; + s->expected_downtime = migrate_downtime_limit(); if (resume) { assert(s->cleanup_bh); } else { @@ -4357,17 +3204,16 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) if (resume) { /* This is a resumed migration */ - rate_limit = s->parameters.max_postcopy_bandwidth / - XFER_LIMIT_RATIO; + rate_limit = migrate_max_postcopy_bandwidth(); } else { /* This is a fresh new migration */ - rate_limit = s->parameters.max_bandwidth / XFER_LIMIT_RATIO; + rate_limit = migrate_max_bandwidth(); /* Notify before starting migration thread */ notifier_list_notify(&migration_state_notifiers, s); } - qemu_file_set_rate_limit(s->to_dst_file, rate_limit); + migration_rate_set(rate_limit); qemu_file_set_blocking(s->to_dst_file, true); /* @@ -4375,7 +3221,7 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) * precopy, only if user specified "return-path" capability would * QEMU uses the return path. */ - if (migrate_postcopy_ram() || migrate_use_return_path()) { + if (migrate_postcopy_ram() || migrate_return_path()) { if (open_return_path_on_source(s, !resume)) { error_report("Unable to open return-path for postcopy"); migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); @@ -4384,6 +3230,15 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) } } + /* + * This needs to be done before resuming a postcopy. Note: for newer + * QEMUs we will delay the channel creation until postcopy_start(), to + * avoid disorder of channel creations. + */ + if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { + postcopy_preempt_setup(s); + } + if (resume) { /* Wakeup the main migration thread to do the recovery */ migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, @@ -4410,133 +3265,6 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) s->migration_thread_running = true; } -void migration_global_dump(Monitor *mon) -{ - MigrationState *ms = migrate_get_current(); - - monitor_printf(mon, "globals:\n"); - monitor_printf(mon, "store-global-state: %s\n", - ms->store_global_state ? "on" : "off"); - monitor_printf(mon, "only-migratable: %s\n", - only_migratable ? "on" : "off"); - monitor_printf(mon, "send-configuration: %s\n", - ms->send_configuration ? "on" : "off"); - monitor_printf(mon, "send-section-footer: %s\n", - ms->send_section_footer ? "on" : "off"); - monitor_printf(mon, "decompress-error-check: %s\n", - ms->decompress_error_check ? "on" : "off"); - monitor_printf(mon, "clear-bitmap-shift: %u\n", - ms->clear_bitmap_shift); -} - -#define DEFINE_PROP_MIG_CAP(name, x) \ - DEFINE_PROP_BOOL(name, MigrationState, enabled_capabilities[x], false) - -static Property migration_properties[] = { - DEFINE_PROP_BOOL("store-global-state", MigrationState, - store_global_state, true), - DEFINE_PROP_BOOL("send-configuration", MigrationState, - send_configuration, true), - DEFINE_PROP_BOOL("send-section-footer", MigrationState, - send_section_footer, true), - DEFINE_PROP_BOOL("decompress-error-check", MigrationState, - decompress_error_check, true), - DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState, - clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT), - - /* Migration parameters */ - DEFINE_PROP_UINT8("x-compress-level", MigrationState, - parameters.compress_level, - DEFAULT_MIGRATE_COMPRESS_LEVEL), - DEFINE_PROP_UINT8("x-compress-threads", MigrationState, - parameters.compress_threads, - DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT), - DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState, - parameters.compress_wait_thread, true), - DEFINE_PROP_UINT8("x-decompress-threads", MigrationState, - parameters.decompress_threads, - DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT), - DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState, - parameters.throttle_trigger_threshold, - DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD), - DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState, - parameters.cpu_throttle_initial, - DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL), - DEFINE_PROP_UINT8("x-cpu-throttle-increment", MigrationState, - parameters.cpu_throttle_increment, - DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT), - DEFINE_PROP_BOOL("x-cpu-throttle-tailslow", MigrationState, - parameters.cpu_throttle_tailslow, false), - DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState, - parameters.max_bandwidth, MAX_THROTTLE), - DEFINE_PROP_UINT64("x-downtime-limit", MigrationState, - parameters.downtime_limit, - DEFAULT_MIGRATE_SET_DOWNTIME), - DEFINE_PROP_UINT32("x-checkpoint-delay", MigrationState, - parameters.x_checkpoint_delay, - DEFAULT_MIGRATE_X_CHECKPOINT_DELAY), - DEFINE_PROP_UINT8("multifd-channels", MigrationState, - parameters.multifd_channels, - DEFAULT_MIGRATE_MULTIFD_CHANNELS), - DEFINE_PROP_MULTIFD_COMPRESSION("multifd-compression", MigrationState, - parameters.multifd_compression, - DEFAULT_MIGRATE_MULTIFD_COMPRESSION), - DEFINE_PROP_UINT8("multifd-zlib-level", MigrationState, - parameters.multifd_zlib_level, - DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL), - DEFINE_PROP_UINT8("multifd-zstd-level", MigrationState, - parameters.multifd_zstd_level, - DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL), - DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState, - parameters.xbzrle_cache_size, - DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE), - DEFINE_PROP_SIZE("max-postcopy-bandwidth", MigrationState, - parameters.max_postcopy_bandwidth, - DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH), - DEFINE_PROP_UINT8("max-cpu-throttle", MigrationState, - parameters.max_cpu_throttle, - DEFAULT_MIGRATE_MAX_CPU_THROTTLE), - DEFINE_PROP_SIZE("announce-initial", MigrationState, - parameters.announce_initial, - DEFAULT_MIGRATE_ANNOUNCE_INITIAL), - DEFINE_PROP_SIZE("announce-max", MigrationState, - parameters.announce_max, - DEFAULT_MIGRATE_ANNOUNCE_MAX), - DEFINE_PROP_SIZE("announce-rounds", MigrationState, - parameters.announce_rounds, - DEFAULT_MIGRATE_ANNOUNCE_ROUNDS), - DEFINE_PROP_SIZE("announce-step", MigrationState, - parameters.announce_step, - DEFAULT_MIGRATE_ANNOUNCE_STEP), - DEFINE_PROP_STRING("tls-creds", MigrationState, parameters.tls_creds), - DEFINE_PROP_STRING("tls-hostname", MigrationState, parameters.tls_hostname), - DEFINE_PROP_STRING("tls-authz", MigrationState, parameters.tls_authz), - - /* Migration capabilities */ - DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), - DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL), - DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE), - DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS), - DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS), - DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS), - DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM), - DEFINE_PROP_MIG_CAP("x-postcopy-preempt", - MIGRATION_CAPABILITY_POSTCOPY_PREEMPT), - DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO), - DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM), - DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK), - DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH), - DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD), - DEFINE_PROP_MIG_CAP("x-background-snapshot", - MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT), -#ifdef CONFIG_LINUX - DEFINE_PROP_MIG_CAP("x-zero-copy-send", - MIGRATION_CAPABILITY_ZERO_COPY_SEND), -#endif - - DEFINE_PROP_END_OF_LIST(), -}; - static void migration_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -4565,7 +3293,6 @@ static void migration_instance_finalize(Object *obj) static void migration_instance_init(Object *obj) { MigrationState *ms = MIGRATION_OBJ(obj); - MigrationParameters *params = &ms->parameters; ms->state = MIGRATION_STATUS_NONE; ms->mbps = -1; @@ -4573,33 +3300,7 @@ static void migration_instance_init(Object *obj) qemu_sem_init(&ms->pause_sem, 0); qemu_mutex_init(&ms->error_mutex); - params->tls_hostname = g_strdup(""); - params->tls_creds = g_strdup(""); - - /* Set has_* up only for parameter checks */ - params->has_compress_level = true; - params->has_compress_threads = true; - params->has_compress_wait_thread = true; - params->has_decompress_threads = true; - params->has_throttle_trigger_threshold = true; - params->has_cpu_throttle_initial = true; - params->has_cpu_throttle_increment = true; - params->has_cpu_throttle_tailslow = true; - params->has_max_bandwidth = true; - params->has_downtime_limit = true; - params->has_x_checkpoint_delay = true; - params->has_block_incremental = true; - params->has_multifd_channels = true; - params->has_multifd_compression = true; - params->has_multifd_zlib_level = true; - params->has_multifd_zstd_level = true; - params->has_xbzrle_cache_size = true; - params->has_max_postcopy_bandwidth = true; - params->has_max_cpu_throttle = true; - params->has_announce_initial = true; - params->has_announce_max = true; - params->has_announce_rounds = true; - params->has_announce_step = true; + migrate_params_init(&ms->parameters); qemu_sem_init(&ms->postcopy_pause_sem, 0); qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); @@ -4617,27 +3318,14 @@ static void migration_instance_init(Object *obj) */ static bool migration_object_check(MigrationState *ms, Error **errp) { - MigrationCapabilityStatusList *head = NULL; /* Assuming all off */ - bool cap_list[MIGRATION_CAPABILITY__MAX] = { 0 }, ret; - int i; + bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 }; if (!migrate_params_check(&ms->parameters, errp)) { return false; } - for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { - if (ms->enabled_capabilities[i]) { - QAPI_LIST_PREPEND(head, migrate_cap_add(i, true)); - } - } - - ret = migrate_caps_check(cap_list, head, errp); - - /* It works with head == NULL */ - qapi_free_MigrationCapabilityStatusList(head); - - return ret; + return migrate_caps_check(old_caps, ms->capabilities, errp); } static const TypeInfo migration_type = { diff --git a/migration/migration.h b/migration/migration.h index 2da2f8a164..48a46123a0 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -65,6 +65,12 @@ typedef struct { bool all_zero; } PostcopyTmpPage; +typedef enum { + PREEMPT_THREAD_NONE = 0, + PREEMPT_THREAD_CREATED, + PREEMPT_THREAD_QUIT, +} PreemptThreadStatus; + /* State for the incoming migration */ struct MigrationIncomingState { QEMUFile *from_src_file; @@ -124,7 +130,12 @@ struct MigrationIncomingState { QemuSemaphore postcopy_qemufile_dst_done; /* Postcopy priority thread is used to receive postcopy requested pages */ QemuThread postcopy_prio_thread; - bool postcopy_prio_thread_created; + /* + * Always set by the main vm load thread only, but can be read by the + * postcopy preempt thread. "volatile" makes sure all reads will be + * uptodate across cores. + */ + volatile PreemptThreadStatus preempt_thread_status; /* * Used to sync between the ram load main thread and the fast ram load * thread. It protects postcopy_qemufile_dst, which is the postcopy @@ -151,10 +162,15 @@ struct MigrationIncomingState { int state; - bool have_colo_incoming_thread; - QemuThread colo_incoming_thread; + /* + * The incoming migration coroutine, non-NULL during qemu_loadvm_state(). + * Used to wake the migration incoming coroutine from rdma code. How much is + * it safe - it's a question. + */ + Coroutine *loadvm_co; + /* The coroutine we should enter (back) after failover */ - Coroutine *migration_incoming_co; + Coroutine *colo_incoming_co; QemuSemaphore colo_incoming_sem; /* @@ -299,7 +315,7 @@ struct MigrationState { int64_t downtime_start; int64_t downtime; int64_t expected_downtime; - bool enabled_capabilities[MIGRATION_CAPABILITY__MAX]; + bool capabilities[MIGRATION_CAPABILITY__MAX]; int64_t setup_time; /* * Whether guest was running when we enter the completion stage. @@ -364,7 +380,46 @@ struct MigrationState { * do not trigger spurious decompression errors. */ bool decompress_error_check; + /* + * This variable only affects behavior when postcopy preempt mode is + * enabled. + * + * When set: + * + * - postcopy preempt src QEMU instance will generate an EOS message at + * the end of migration to shut the preempt channel on dest side. + * + * - postcopy preempt channel will be created at the setup phase on src + QEMU. + * + * When clear: + * + * - postcopy preempt src QEMU instance will _not_ generate an EOS + * message at the end of migration, the dest qemu will shutdown the + * channel itself. + * + * - postcopy preempt channel will be created at the switching phase + * from precopy -> postcopy (to avoid race condtion of misordered + * creation of channels). + * + * NOTE: See message-id on qemu-devel + * mailing list for more information on the possible race. Everyone + * should probably just keep this value untouched after set by the + * machine type (or the default). + */ + bool preempt_pre_7_2; + /* + * flush every channel after each section sent. + * + * This assures that we can't mix pages from one iteration through + * ram pages with pages for the following iteration. We really + * only need to do this flush after we have go through all the + * dirty pages. For historical reasons, we do that after each + * section. This is suboptimal (we flush too many times). + * Default value is false. (since 8.1) + */ + bool multifd_flush_after_each_section; /* * This decides the size of guest memory chunk that will be used * to track dirty bitmap clearing. The size of memory chunk will @@ -408,50 +463,8 @@ bool migration_is_blocked(Error **errp); bool migration_in_postcopy(void); MigrationState *migrate_get_current(void); -bool migrate_postcopy(void); - -bool migrate_release_ram(void); -bool migrate_postcopy_ram(void); -bool migrate_zero_blocks(void); -bool migrate_dirty_bitmaps(void); -bool migrate_ignore_shared(void); -bool migrate_validate_uuid(void); - -bool migrate_auto_converge(void); -bool migrate_use_multifd(void); -bool migrate_pause_before_switchover(void); -int migrate_multifd_channels(void); -MultiFDCompression migrate_multifd_compression(void); -int migrate_multifd_zlib_level(void); -int migrate_multifd_zstd_level(void); - -#ifdef CONFIG_LINUX -bool migrate_use_zero_copy_send(void); -#else -#define migrate_use_zero_copy_send() (false) -#endif -int migrate_use_tls(void); -int migrate_use_xbzrle(void); -uint64_t migrate_xbzrle_cache_size(void); -bool migrate_colo_enabled(void); - -bool migrate_use_block(void); -bool migrate_use_block_incremental(void); -int migrate_max_cpu_throttle(void); -bool migrate_use_return_path(void); - uint64_t ram_get_total_transferred_pages(void); -bool migrate_use_compression(void); -int migrate_compress_level(void); -int migrate_compress_threads(void); -int migrate_compress_wait_thread(void); -int migrate_decompress_threads(void); -bool migrate_use_events(void); -bool migrate_postcopy_blocktime(void); -bool migrate_background_snapshot(void); -bool migrate_postcopy_preempt(void); - /* Sending on the return path - generic and then for each message type */ void migrate_send_rp_shut(MigrationIncomingState *mis, uint32_t value); diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c index 37770248e1..81701250ad 100644 --- a/migration/multifd-zlib.c +++ b/migration/multifd-zlib.c @@ -18,6 +18,7 @@ #include "qapi/error.h" #include "migration.h" #include "trace.h" +#include "options.h" #include "multifd.h" struct zlib_data { diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c index f4a8e1ed1f..d1d29e76cc 100644 --- a/migration/multifd-zstd.c +++ b/migration/multifd-zstd.c @@ -18,6 +18,7 @@ #include "qapi/error.h" #include "migration.h" #include "trace.h" +#include "options.h" #include "multifd.h" struct zstd_data { diff --git a/migration/multifd.c b/migration/multifd.c index 5e85c3ea9b..0bf5958a9c 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -19,13 +19,14 @@ #include "qapi/error.h" #include "ram.h" #include "migration.h" +#include "migration-stats.h" #include "socket.h" #include "tls.h" #include "qemu-file.h" #include "trace.h" #include "multifd.h" #include "threadinfo.h" - +#include "options.h" #include "qemu/yank.h" #include "io/channel-socket.h" #include "yank_functions.h" @@ -174,6 +175,7 @@ void multifd_register_ops(int method, MultiFDMethods *ops) static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp) { MultiFDInit_t msg = {}; + size_t size = sizeof(msg); int ret; msg.magic = cpu_to_be32(MULTIFD_MAGIC); @@ -181,10 +183,12 @@ static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp) msg.id = p->id; memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid)); - ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp); + ret = qio_channel_write_all(p->c, (char *)&msg, size, errp); if (ret != 0) { return -1; } + stat64_add(&mig_stats.multifd_bytes, size); + stat64_add(&mig_stats.transferred, size); return 0; } @@ -280,7 +284,6 @@ static void multifd_send_fill_packet(MultiFDSendParams *p) static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) { MultiFDPacket_t *packet = p->packet; - RAMBlock *block; int i; packet->magic = be32_to_cpu(packet->magic); @@ -330,21 +333,21 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) /* make sure that ramblock is 0 terminated */ packet->ramblock[255] = 0; - block = qemu_ram_block_by_name(packet->ramblock); - if (!block) { + p->block = qemu_ram_block_by_name(packet->ramblock); + if (!p->block) { error_setg(errp, "multifd: unknown ram block %s", packet->ramblock); return -1; } - p->host = block->host; + p->host = p->block->host; for (i = 0; i < p->normal_num; i++) { uint64_t offset = be64_to_cpu(packet->offset[i]); - if (offset > (block->used_length - p->page_size)) { + if (offset > (p->block->used_length - p->page_size)) { error_setg(errp, "multifd: offset too long %" PRIu64 " (max " RAM_ADDR_FMT ")", - offset, block->used_length); + offset, p->block->used_length); return -1; } p->normal[i] = offset; @@ -395,7 +398,6 @@ static int multifd_send_pages(QEMUFile *f) static int next_channel; MultiFDSendParams *p = NULL; /* make happy gcc */ MultiFDPages_t *pages = multifd_send_state->pages; - uint64_t transferred; if (qatomic_read(&multifd_send_state->exiting)) { return -1; @@ -430,10 +432,6 @@ static int multifd_send_pages(QEMUFile *f) p->packet_num = multifd_send_state->packet_num++; multifd_send_state->pages = p->pages; p->pages = pages; - transferred = ((uint64_t) pages->num) * p->page_size + p->packet_len; - qemu_file_acct_rate_limit(f, transferred); - ram_counters.multifd_bytes += transferred; - stat64_add(&ram_atomic_counters.transferred, transferred); qemu_mutex_unlock(&p->mutex); qemu_sem_post(&p->sem); @@ -516,7 +514,7 @@ void multifd_save_cleanup(void) { int i; - if (!migrate_use_multifd()) { + if (!migrate_multifd()) { return; } multifd_send_terminate_threads(NULL); @@ -576,7 +574,7 @@ static int multifd_zero_copy_flush(QIOChannel *c) return -1; } if (ret == 1) { - dirty_sync_missed_zero_copy(); + stat64_add(&mig_stats.dirty_sync_missed_zero_copy, 1); } return ret; @@ -587,7 +585,7 @@ int multifd_send_sync_main(QEMUFile *f) int i; bool flush_zero_copy; - if (!migrate_use_multifd()) { + if (!migrate_multifd()) { return 0; } if (multifd_send_state->pages->num) { @@ -608,7 +606,7 @@ int multifd_send_sync_main(QEMUFile *f) * all the dirty bitmaps. */ - flush_zero_copy = migrate_use_zero_copy_send(); + flush_zero_copy = migrate_zero_copy_send(); for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; @@ -626,15 +624,13 @@ int multifd_send_sync_main(QEMUFile *f) p->packet_num = multifd_send_state->packet_num++; p->flags |= MULTIFD_FLAG_SYNC; p->pending_job++; - qemu_file_acct_rate_limit(f, p->packet_len); - ram_counters.multifd_bytes += p->packet_len; - stat64_add(&ram_atomic_counters.transferred, p->packet_len); qemu_mutex_unlock(&p->mutex); qemu_sem_post(&p->sem); } for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; + qemu_sem_wait(&multifd_send_state->channels_ready); trace_multifd_send_sync_main_wait(p->id); qemu_sem_wait(&p->sem_sync); @@ -653,7 +649,7 @@ static void *multifd_send_thread(void *opaque) MigrationThread *thread = NULL; Error *local_err = NULL; int ret = 0; - bool use_zero_copy_send = migrate_use_zero_copy_send(); + bool use_zero_copy_send = migrate_zero_copy_send(); thread = MigrationThreadAdd(p->name, qemu_get_thread_id()); @@ -668,6 +664,7 @@ static void *multifd_send_thread(void *opaque) p->num_packets = 1; while (true) { + qemu_sem_post(&multifd_send_state->channels_ready); qemu_sem_wait(&p->sem); if (qatomic_read(&multifd_send_state->exiting)) { @@ -677,7 +674,7 @@ static void *multifd_send_thread(void *opaque) if (p->pending_job) { uint64_t packet_num = p->packet_num; - uint32_t flags = p->flags; + uint32_t flags; p->normal_num = 0; if (use_zero_copy_send) { @@ -699,6 +696,7 @@ static void *multifd_send_thread(void *opaque) } } multifd_send_fill_packet(p); + flags = p->flags; p->flags = 0; p->num_packets++; p->total_normal_pages += p->normal_num; @@ -716,6 +714,8 @@ static void *multifd_send_thread(void *opaque) if (ret != 0) { break; } + stat64_add(&mig_stats.multifd_bytes, p->packet_len); + stat64_add(&mig_stats.transferred, p->packet_len); } else { /* Send header using the same writev call */ p->iov[0].iov_len = p->packet_len; @@ -728,6 +728,8 @@ static void *multifd_send_thread(void *opaque) break; } + stat64_add(&mig_stats.multifd_bytes, p->next_packet_size); + stat64_add(&mig_stats.transferred, p->next_packet_size); qemu_mutex_lock(&p->mutex); p->pending_job--; qemu_mutex_unlock(&p->mutex); @@ -735,7 +737,6 @@ static void *multifd_send_thread(void *opaque) if (flags & MULTIFD_FLAG_SYNC) { qemu_sem_post(&p->sem_sync); } - qemu_sem_post(&multifd_send_state->channels_ready); } else if (p->quit) { qemu_mutex_unlock(&p->mutex); break; @@ -821,7 +822,7 @@ static void multifd_tls_channel_connect(MultiFDSendParams *p, const char *hostname = s->hostname; QIOChannelTLS *tioc; - tioc = migration_tls_client_create(s, ioc, hostname, errp); + tioc = migration_tls_client_create(ioc, hostname, errp); if (!tioc) { return; } @@ -910,7 +911,7 @@ int multifd_save_setup(Error **errp) uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); uint8_t i; - if (!migrate_use_multifd()) { + if (!migrate_multifd()) { return 0; } @@ -944,7 +945,7 @@ int multifd_save_setup(Error **errp) p->page_size = qemu_target_page_size(); p->page_count = page_count; - if (migrate_use_zero_copy_send()) { + if (migrate_zero_copy_send()) { p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZERO_COPY; } else { p->write_flags = 0; @@ -1015,7 +1016,7 @@ static void multifd_recv_terminate_threads(Error *err) void multifd_load_shutdown(void) { - if (migrate_use_multifd()) { + if (migrate_multifd()) { multifd_recv_terminate_threads(NULL); } } @@ -1024,7 +1025,7 @@ void multifd_load_cleanup(void) { int i; - if (!migrate_use_multifd()) { + if (!migrate_multifd()) { return; } multifd_recv_terminate_threads(NULL); @@ -1071,7 +1072,7 @@ void multifd_recv_sync_main(void) { int i; - if (!migrate_use_multifd()) { + if (!migrate_multifd()) { return; } for (i = 0; i < migrate_multifd_channels(); i++) { @@ -1169,7 +1170,7 @@ int multifd_load_setup(Error **errp) * Return successfully if multiFD recv state is already initialised * or multiFD is not enabled. */ - if (multifd_recv_state || !migrate_use_multifd()) { + if (multifd_recv_state || !migrate_multifd()) { return 0; } @@ -1215,7 +1216,7 @@ bool multifd_recv_all_channels_created(void) { int thread_count = migrate_multifd_channels(); - if (!migrate_use_multifd()) { + if (!migrate_multifd()) { return true; } diff --git a/migration/multifd.h b/migration/multifd.h index 7cfc265148..a835643b48 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -175,6 +175,8 @@ typedef struct { uint32_t next_packet_size; /* packets sent through this channel */ uint64_t num_packets; + /* ramblock */ + RAMBlock *block; /* ramblock host address */ uint8_t *host; /* non zero pages recv through this channel */ diff --git a/migration/options.c b/migration/options.c new file mode 100644 index 0000000000..b62ab30cd5 --- /dev/null +++ b/migration/options.c @@ -0,0 +1,1332 @@ +/* + * QEMU migration capabilities + * + * Copyright (c) 2012-2023 Red Hat Inc + * + * Authors: + * Orit Wasserman + * Juan Quintela + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "exec/target_page.h" +#include "qapi/clone-visitor.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-migration.h" +#include "qapi/qapi-visit-migration.h" +#include "qapi/qmp/qerror.h" +#include "qapi/qmp/qnull.h" +#include "sysemu/runstate.h" +#include "migration/colo.h" +#include "migration/misc.h" +#include "migration.h" +#include "migration-stats.h" +#include "qemu-file.h" +#include "ram.h" +#include "options.h" + +/* Maximum migrate downtime set to 2000 seconds */ +#define MAX_MIGRATE_DOWNTIME_SECONDS 2000 +#define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000) + +#define MAX_THROTTLE (128 << 20) /* Migration transfer speed throttling */ + +/* Time in milliseconds we are allowed to stop the source, + * for sending the last part */ +#define DEFAULT_MIGRATE_SET_DOWNTIME 300 + +/* Default compression thread count */ +#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 +/* Default decompression thread count, usually decompression is at + * least 4 times as fast as compression.*/ +#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 +/*0: means nocompress, 1: best speed, ... 9: best compress ratio */ +#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 +/* Define default autoconverge cpu throttle migration parameters */ +#define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50 +#define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20 +#define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10 +#define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99 + +/* Migration XBZRLE default cache size */ +#define DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE (64 * 1024 * 1024) + +/* The delay time (in ms) between two COLO checkpoints */ +#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100) +#define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2 +#define DEFAULT_MIGRATE_MULTIFD_COMPRESSION MULTIFD_COMPRESSION_NONE +/* 0: means nocompress, 1: best speed, ... 9: best compress ratio */ +#define DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL 1 +/* 0: means nocompress, 1: best speed, ... 20: best compress ratio */ +#define DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL 1 + +/* Background transfer rate for postcopy, 0 means unlimited, note + * that page requests can still exceed this limit. + */ +#define DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH 0 + +/* + * Parameters for self_announce_delay giving a stream of RARP/ARP + * packets after migration. + */ +#define DEFAULT_MIGRATE_ANNOUNCE_INITIAL 50 +#define DEFAULT_MIGRATE_ANNOUNCE_MAX 550 +#define DEFAULT_MIGRATE_ANNOUNCE_ROUNDS 5 +#define DEFAULT_MIGRATE_ANNOUNCE_STEP 100 + +#define DEFINE_PROP_MIG_CAP(name, x) \ + DEFINE_PROP_BOOL(name, MigrationState, capabilities[x], false) + +Property migration_properties[] = { + DEFINE_PROP_BOOL("store-global-state", MigrationState, + store_global_state, true), + DEFINE_PROP_BOOL("send-configuration", MigrationState, + send_configuration, true), + DEFINE_PROP_BOOL("send-section-footer", MigrationState, + send_section_footer, true), + DEFINE_PROP_BOOL("decompress-error-check", MigrationState, + decompress_error_check, true), + DEFINE_PROP_BOOL("multifd-flush-after-each-section", MigrationState, + multifd_flush_after_each_section, false), + DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState, + clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT), + DEFINE_PROP_BOOL("x-preempt-pre-7-2", MigrationState, + preempt_pre_7_2, false), + + /* Migration parameters */ + DEFINE_PROP_UINT8("x-compress-level", MigrationState, + parameters.compress_level, + DEFAULT_MIGRATE_COMPRESS_LEVEL), + DEFINE_PROP_UINT8("x-compress-threads", MigrationState, + parameters.compress_threads, + DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT), + DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState, + parameters.compress_wait_thread, true), + DEFINE_PROP_UINT8("x-decompress-threads", MigrationState, + parameters.decompress_threads, + DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT), + DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState, + parameters.throttle_trigger_threshold, + DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD), + DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState, + parameters.cpu_throttle_initial, + DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL), + DEFINE_PROP_UINT8("x-cpu-throttle-increment", MigrationState, + parameters.cpu_throttle_increment, + DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT), + DEFINE_PROP_BOOL("x-cpu-throttle-tailslow", MigrationState, + parameters.cpu_throttle_tailslow, false), + DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState, + parameters.max_bandwidth, MAX_THROTTLE), + DEFINE_PROP_UINT64("x-downtime-limit", MigrationState, + parameters.downtime_limit, + DEFAULT_MIGRATE_SET_DOWNTIME), + DEFINE_PROP_UINT32("x-checkpoint-delay", MigrationState, + parameters.x_checkpoint_delay, + DEFAULT_MIGRATE_X_CHECKPOINT_DELAY), + DEFINE_PROP_UINT8("multifd-channels", MigrationState, + parameters.multifd_channels, + DEFAULT_MIGRATE_MULTIFD_CHANNELS), + DEFINE_PROP_MULTIFD_COMPRESSION("multifd-compression", MigrationState, + parameters.multifd_compression, + DEFAULT_MIGRATE_MULTIFD_COMPRESSION), + DEFINE_PROP_UINT8("multifd-zlib-level", MigrationState, + parameters.multifd_zlib_level, + DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL), + DEFINE_PROP_UINT8("multifd-zstd-level", MigrationState, + parameters.multifd_zstd_level, + DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL), + DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState, + parameters.xbzrle_cache_size, + DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE), + DEFINE_PROP_SIZE("max-postcopy-bandwidth", MigrationState, + parameters.max_postcopy_bandwidth, + DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH), + DEFINE_PROP_UINT8("max-cpu-throttle", MigrationState, + parameters.max_cpu_throttle, + DEFAULT_MIGRATE_MAX_CPU_THROTTLE), + DEFINE_PROP_SIZE("announce-initial", MigrationState, + parameters.announce_initial, + DEFAULT_MIGRATE_ANNOUNCE_INITIAL), + DEFINE_PROP_SIZE("announce-max", MigrationState, + parameters.announce_max, + DEFAULT_MIGRATE_ANNOUNCE_MAX), + DEFINE_PROP_SIZE("announce-rounds", MigrationState, + parameters.announce_rounds, + DEFAULT_MIGRATE_ANNOUNCE_ROUNDS), + DEFINE_PROP_SIZE("announce-step", MigrationState, + parameters.announce_step, + DEFAULT_MIGRATE_ANNOUNCE_STEP), + DEFINE_PROP_STRING("tls-creds", MigrationState, parameters.tls_creds), + DEFINE_PROP_STRING("tls-hostname", MigrationState, parameters.tls_hostname), + DEFINE_PROP_STRING("tls-authz", MigrationState, parameters.tls_authz), + + /* Migration capabilities */ + DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), + DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL), + DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE), + DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS), + DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS), + DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS), + DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM), + DEFINE_PROP_MIG_CAP("x-postcopy-preempt", + MIGRATION_CAPABILITY_POSTCOPY_PREEMPT), + DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO), + DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM), + DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK), + DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH), + DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD), + DEFINE_PROP_MIG_CAP("x-background-snapshot", + MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT), +#ifdef CONFIG_LINUX + DEFINE_PROP_MIG_CAP("x-zero-copy-send", + MIGRATION_CAPABILITY_ZERO_COPY_SEND), +#endif + + DEFINE_PROP_END_OF_LIST(), +}; + +bool migrate_auto_converge(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; +} + +bool migrate_background_snapshot(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]; +} + +bool migrate_block(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_BLOCK]; +} + +bool migrate_colo(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_X_COLO]; +} + +bool migrate_compress(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_COMPRESS]; +} + +bool migrate_dirty_bitmaps(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS]; +} + +bool migrate_events(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_EVENTS]; +} + +bool migrate_ignore_shared(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED]; +} + +bool migrate_late_block_activate(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE]; +} + +bool migrate_multifd(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_MULTIFD]; +} + +bool migrate_pause_before_switchover(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER]; +} + +bool migrate_postcopy_blocktime(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME]; +} + +bool migrate_postcopy_preempt(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]; +} + +bool migrate_postcopy_ram(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; +} + +bool migrate_rdma_pin_all(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL]; +} + +bool migrate_release_ram(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_RELEASE_RAM]; +} + +bool migrate_return_path(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_RETURN_PATH]; +} + +bool migrate_validate_uuid(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID]; +} + +bool migrate_xbzrle(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_XBZRLE]; +} + +bool migrate_zero_blocks(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; +} + +bool migrate_zero_copy_send(void) +{ + MigrationState *s = migrate_get_current(); + + return s->capabilities[MIGRATION_CAPABILITY_ZERO_COPY_SEND]; +} + +/* pseudo capabilities */ + +bool migrate_multifd_flush_after_each_section(void) +{ + MigrationState *s = migrate_get_current(); + + return s->multifd_flush_after_each_section; +} + +bool migrate_postcopy(void) +{ + return migrate_postcopy_ram() || migrate_dirty_bitmaps(); +} + +bool migrate_tls(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.tls_creds && *s->parameters.tls_creds; +} + +typedef enum WriteTrackingSupport { + WT_SUPPORT_UNKNOWN = 0, + WT_SUPPORT_ABSENT, + WT_SUPPORT_AVAILABLE, + WT_SUPPORT_COMPATIBLE +} WriteTrackingSupport; + +static +WriteTrackingSupport migrate_query_write_tracking(void) +{ + /* Check if kernel supports required UFFD features */ + if (!ram_write_tracking_available()) { + return WT_SUPPORT_ABSENT; + } + /* + * Check if current memory configuration is + * compatible with required UFFD features. + */ + if (!ram_write_tracking_compatible()) { + return WT_SUPPORT_AVAILABLE; + } + + return WT_SUPPORT_COMPATIBLE; +} + +/* Migration capabilities set */ +struct MigrateCapsSet { + int size; /* Capability set size */ + MigrationCapability caps[]; /* Variadic array of capabilities */ +}; +typedef struct MigrateCapsSet MigrateCapsSet; + +/* Define and initialize MigrateCapsSet */ +#define INITIALIZE_MIGRATE_CAPS_SET(_name, ...) \ + MigrateCapsSet _name = { \ + .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \ + .caps = { __VA_ARGS__ } \ + } + +/* Background-snapshot compatibility check list */ +static const +INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot, + MIGRATION_CAPABILITY_POSTCOPY_RAM, + MIGRATION_CAPABILITY_DIRTY_BITMAPS, + MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME, + MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE, + MIGRATION_CAPABILITY_RETURN_PATH, + MIGRATION_CAPABILITY_MULTIFD, + MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER, + MIGRATION_CAPABILITY_AUTO_CONVERGE, + MIGRATION_CAPABILITY_RELEASE_RAM, + MIGRATION_CAPABILITY_RDMA_PIN_ALL, + MIGRATION_CAPABILITY_COMPRESS, + MIGRATION_CAPABILITY_XBZRLE, + MIGRATION_CAPABILITY_X_COLO, + MIGRATION_CAPABILITY_VALIDATE_UUID, + MIGRATION_CAPABILITY_ZERO_COPY_SEND); + +/** + * @migration_caps_check - check capability compatibility + * + * @old_caps: old capability list + * @new_caps: new capability list + * @errp: set *errp if the check failed, with reason + * + * Returns true if check passed, otherwise false. + */ +bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp) +{ + MigrationIncomingState *mis = migration_incoming_get_current(); + + ERRP_GUARD(); +#ifndef CONFIG_LIVE_BLOCK_MIGRATION + if (new_caps[MIGRATION_CAPABILITY_BLOCK]) { + error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) " + "block migration"); + error_append_hint(errp, "Use drive_mirror+NBD instead.\n"); + return false; + } +#endif + +#ifndef CONFIG_REPLICATION + if (new_caps[MIGRATION_CAPABILITY_X_COLO]) { + error_setg(errp, "QEMU compiled without replication module" + " can't enable COLO"); + error_append_hint(errp, "Please enable replication before COLO.\n"); + return false; + } +#endif + + if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { + /* This check is reasonably expensive, so only when it's being + * set the first time, also it's only the destination that needs + * special support. + */ + if (!old_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] && + runstate_check(RUN_STATE_INMIGRATE) && + !postcopy_ram_supported_by_host(mis, errp)) { + error_prepend(errp, "Postcopy is not supported: "); + return false; + } + + if (new_caps[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) { + error_setg(errp, "Postcopy is not compatible with ignore-shared"); + return false; + } + + if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) { + error_setg(errp, "Postcopy is not yet compatible with multifd"); + return false; + } + } + + if (new_caps[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) { + WriteTrackingSupport wt_support; + int idx; + /* + * Check if 'background-snapshot' capability is supported by + * host kernel and compatible with guest memory configuration. + */ + wt_support = migrate_query_write_tracking(); + if (wt_support < WT_SUPPORT_AVAILABLE) { + error_setg(errp, "Background-snapshot is not supported by host kernel"); + return false; + } + if (wt_support < WT_SUPPORT_COMPATIBLE) { + error_setg(errp, "Background-snapshot is not compatible " + "with guest memory configuration"); + return false; + } + + /* + * Check if there are any migration capabilities + * incompatible with 'background-snapshot'. + */ + for (idx = 0; idx < check_caps_background_snapshot.size; idx++) { + int incomp_cap = check_caps_background_snapshot.caps[idx]; + if (new_caps[incomp_cap]) { + error_setg(errp, + "Background-snapshot is not compatible with %s", + MigrationCapability_str(incomp_cap)); + return false; + } + } + } + +#ifdef CONFIG_LINUX + if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND] && + (!new_caps[MIGRATION_CAPABILITY_MULTIFD] || + new_caps[MIGRATION_CAPABILITY_COMPRESS] || + new_caps[MIGRATION_CAPABILITY_XBZRLE] || + migrate_multifd_compression() || + migrate_tls())) { + error_setg(errp, + "Zero copy only available for non-compressed non-TLS multifd migration"); + return false; + } +#else + if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND]) { + error_setg(errp, + "Zero copy currently only available on Linux"); + return false; + } +#endif + + if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) { + if (!new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { + error_setg(errp, "Postcopy preempt requires postcopy-ram"); + return false; + } + + /* + * Preempt mode requires urgent pages to be sent in separate + * channel, OTOH compression logic will disorder all pages into + * different compression channels, which is not compatible with the + * preempt assumptions on channel assignments. + */ + if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) { + error_setg(errp, "Postcopy preempt not compatible with compress"); + return false; + } + } + + if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) { + if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) { + error_setg(errp, "Multifd is not compatible with compress"); + return false; + } + } + + return true; +} + +bool migrate_cap_set(int cap, bool value, Error **errp) +{ + MigrationState *s = migrate_get_current(); + bool new_caps[MIGRATION_CAPABILITY__MAX]; + + if (migration_is_running(s->state)) { + error_setg(errp, QERR_MIGRATION_ACTIVE); + return false; + } + + memcpy(new_caps, s->capabilities, sizeof(new_caps)); + new_caps[cap] = value; + + if (!migrate_caps_check(s->capabilities, new_caps, errp)) { + return false; + } + s->capabilities[cap] = value; + return true; +} + +MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) +{ + MigrationCapabilityStatusList *head = NULL, **tail = &head; + MigrationCapabilityStatus *caps; + MigrationState *s = migrate_get_current(); + int i; + + for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { +#ifndef CONFIG_LIVE_BLOCK_MIGRATION + if (i == MIGRATION_CAPABILITY_BLOCK) { + continue; + } +#endif + caps = g_malloc0(sizeof(*caps)); + caps->capability = i; + caps->state = s->capabilities[i]; + QAPI_LIST_APPEND(tail, caps); + } + + return head; +} + +void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, + Error **errp) +{ + MigrationState *s = migrate_get_current(); + MigrationCapabilityStatusList *cap; + bool new_caps[MIGRATION_CAPABILITY__MAX]; + + if (migration_is_running(s->state) || migration_in_colo_state()) { + error_setg(errp, QERR_MIGRATION_ACTIVE); + return; + } + + memcpy(new_caps, s->capabilities, sizeof(new_caps)); + for (cap = params; cap; cap = cap->next) { + new_caps[cap->value->capability] = cap->value->state; + } + + if (!migrate_caps_check(s->capabilities, new_caps, errp)) { + return; + } + + for (cap = params; cap; cap = cap->next) { + s->capabilities[cap->value->capability] = cap->value->state; + } +} + +/* parameters */ + +const BitmapMigrationNodeAliasList *migrate_block_bitmap_mapping(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.block_bitmap_mapping; +} + +bool migrate_has_block_bitmap_mapping(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.has_block_bitmap_mapping; +} + +bool migrate_block_incremental(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.block_incremental; +} + +uint32_t migrate_checkpoint_delay(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.x_checkpoint_delay; +} + +int migrate_compress_level(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.compress_level; +} + +int migrate_compress_threads(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.compress_threads; +} + +int migrate_compress_wait_thread(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.compress_wait_thread; +} + +uint8_t migrate_cpu_throttle_increment(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.cpu_throttle_increment; +} + +uint8_t migrate_cpu_throttle_initial(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.cpu_throttle_initial; +} + +bool migrate_cpu_throttle_tailslow(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.cpu_throttle_tailslow; +} + +int migrate_decompress_threads(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.decompress_threads; +} + +uint64_t migrate_downtime_limit(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.downtime_limit; +} + +uint8_t migrate_max_cpu_throttle(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.max_cpu_throttle; +} + +uint64_t migrate_max_bandwidth(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.max_bandwidth; +} + +uint64_t migrate_max_postcopy_bandwidth(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.max_postcopy_bandwidth; +} + +int migrate_multifd_channels(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.multifd_channels; +} + +MultiFDCompression migrate_multifd_compression(void) +{ + MigrationState *s = migrate_get_current(); + + assert(s->parameters.multifd_compression < MULTIFD_COMPRESSION__MAX); + return s->parameters.multifd_compression; +} + +int migrate_multifd_zlib_level(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.multifd_zlib_level; +} + +int migrate_multifd_zstd_level(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.multifd_zstd_level; +} + +uint8_t migrate_throttle_trigger_threshold(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.throttle_trigger_threshold; +} + +const char *migrate_tls_authz(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.tls_authz; +} + +const char *migrate_tls_creds(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.tls_creds; +} + +const char *migrate_tls_hostname(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.tls_hostname; +} + +uint64_t migrate_xbzrle_cache_size(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.xbzrle_cache_size; +} + +/* parameter setters */ + +void migrate_set_block_incremental(bool value) +{ + MigrationState *s = migrate_get_current(); + + s->parameters.block_incremental = value; +} + +/* parameters helpers */ + +void block_cleanup_parameters(void) +{ + MigrationState *s = migrate_get_current(); + + if (s->must_remove_block_options) { + /* setting to false can never fail */ + migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, false, &error_abort); + migrate_set_block_incremental(false); + s->must_remove_block_options = false; + } +} + +AnnounceParameters *migrate_announce_params(void) +{ + static AnnounceParameters ap; + + MigrationState *s = migrate_get_current(); + + ap.initial = s->parameters.announce_initial; + ap.max = s->parameters.announce_max; + ap.rounds = s->parameters.announce_rounds; + ap.step = s->parameters.announce_step; + + return ≈ +} + +MigrationParameters *qmp_query_migrate_parameters(Error **errp) +{ + MigrationParameters *params; + MigrationState *s = migrate_get_current(); + + /* TODO use QAPI_CLONE() instead of duplicating it inline */ + params = g_malloc0(sizeof(*params)); + params->has_compress_level = true; + params->compress_level = s->parameters.compress_level; + params->has_compress_threads = true; + params->compress_threads = s->parameters.compress_threads; + params->has_compress_wait_thread = true; + params->compress_wait_thread = s->parameters.compress_wait_thread; + params->has_decompress_threads = true; + params->decompress_threads = s->parameters.decompress_threads; + params->has_throttle_trigger_threshold = true; + params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold; + params->has_cpu_throttle_initial = true; + params->cpu_throttle_initial = s->parameters.cpu_throttle_initial; + params->has_cpu_throttle_increment = true; + params->cpu_throttle_increment = s->parameters.cpu_throttle_increment; + params->has_cpu_throttle_tailslow = true; + params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow; + params->tls_creds = g_strdup(s->parameters.tls_creds); + params->tls_hostname = g_strdup(s->parameters.tls_hostname); + params->tls_authz = g_strdup(s->parameters.tls_authz ? + s->parameters.tls_authz : ""); + params->has_max_bandwidth = true; + params->max_bandwidth = s->parameters.max_bandwidth; + params->has_downtime_limit = true; + params->downtime_limit = s->parameters.downtime_limit; + params->has_x_checkpoint_delay = true; + params->x_checkpoint_delay = s->parameters.x_checkpoint_delay; + params->has_block_incremental = true; + params->block_incremental = s->parameters.block_incremental; + params->has_multifd_channels = true; + params->multifd_channels = s->parameters.multifd_channels; + params->has_multifd_compression = true; + params->multifd_compression = s->parameters.multifd_compression; + params->has_multifd_zlib_level = true; + params->multifd_zlib_level = s->parameters.multifd_zlib_level; + params->has_multifd_zstd_level = true; + params->multifd_zstd_level = s->parameters.multifd_zstd_level; + params->has_xbzrle_cache_size = true; + params->xbzrle_cache_size = s->parameters.xbzrle_cache_size; + params->has_max_postcopy_bandwidth = true; + params->max_postcopy_bandwidth = s->parameters.max_postcopy_bandwidth; + params->has_max_cpu_throttle = true; + params->max_cpu_throttle = s->parameters.max_cpu_throttle; + params->has_announce_initial = true; + params->announce_initial = s->parameters.announce_initial; + params->has_announce_max = true; + params->announce_max = s->parameters.announce_max; + params->has_announce_rounds = true; + params->announce_rounds = s->parameters.announce_rounds; + params->has_announce_step = true; + params->announce_step = s->parameters.announce_step; + + if (s->parameters.has_block_bitmap_mapping) { + params->has_block_bitmap_mapping = true; + params->block_bitmap_mapping = + QAPI_CLONE(BitmapMigrationNodeAliasList, + s->parameters.block_bitmap_mapping); + } + + return params; +} + +void migrate_params_init(MigrationParameters *params) +{ + params->tls_hostname = g_strdup(""); + params->tls_creds = g_strdup(""); + + /* Set has_* up only for parameter checks */ + params->has_compress_level = true; + params->has_compress_threads = true; + params->has_compress_wait_thread = true; + params->has_decompress_threads = true; + params->has_throttle_trigger_threshold = true; + params->has_cpu_throttle_initial = true; + params->has_cpu_throttle_increment = true; + params->has_cpu_throttle_tailslow = true; + params->has_max_bandwidth = true; + params->has_downtime_limit = true; + params->has_x_checkpoint_delay = true; + params->has_block_incremental = true; + params->has_multifd_channels = true; + params->has_multifd_compression = true; + params->has_multifd_zlib_level = true; + params->has_multifd_zstd_level = true; + params->has_xbzrle_cache_size = true; + params->has_max_postcopy_bandwidth = true; + params->has_max_cpu_throttle = true; + params->has_announce_initial = true; + params->has_announce_max = true; + params->has_announce_rounds = true; + params->has_announce_step = true; +} + +/* + * Check whether the parameters are valid. Error will be put into errp + * (if provided). Return true if valid, otherwise false. + */ +bool migrate_params_check(MigrationParameters *params, Error **errp) +{ + if (params->has_compress_level && + (params->compress_level > 9)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", + "a value between 0 and 9"); + return false; + } + + if (params->has_compress_threads && (params->compress_threads < 1)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "compress_threads", + "a value between 1 and 255"); + return false; + } + + if (params->has_decompress_threads && (params->decompress_threads < 1)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "decompress_threads", + "a value between 1 and 255"); + return false; + } + + if (params->has_throttle_trigger_threshold && + (params->throttle_trigger_threshold < 1 || + params->throttle_trigger_threshold > 100)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "throttle_trigger_threshold", + "an integer in the range of 1 to 100"); + return false; + } + + if (params->has_cpu_throttle_initial && + (params->cpu_throttle_initial < 1 || + params->cpu_throttle_initial > 99)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "cpu_throttle_initial", + "an integer in the range of 1 to 99"); + return false; + } + + if (params->has_cpu_throttle_increment && + (params->cpu_throttle_increment < 1 || + params->cpu_throttle_increment > 99)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "cpu_throttle_increment", + "an integer in the range of 1 to 99"); + return false; + } + + if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "max_bandwidth", + "an integer in the range of 0 to "stringify(SIZE_MAX) + " bytes/second"); + return false; + } + + if (params->has_downtime_limit && + (params->downtime_limit > MAX_MIGRATE_DOWNTIME)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "downtime_limit", + "an integer in the range of 0 to " + stringify(MAX_MIGRATE_DOWNTIME)" ms"); + return false; + } + + /* x_checkpoint_delay is now always positive */ + + if (params->has_multifd_channels && (params->multifd_channels < 1)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "multifd_channels", + "a value between 1 and 255"); + return false; + } + + if (params->has_multifd_zlib_level && + (params->multifd_zlib_level > 9)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zlib_level", + "a value between 0 and 9"); + return false; + } + + if (params->has_multifd_zstd_level && + (params->multifd_zstd_level > 20)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level", + "a value between 0 and 20"); + return false; + } + + if (params->has_xbzrle_cache_size && + (params->xbzrle_cache_size < qemu_target_page_size() || + !is_power_of_2(params->xbzrle_cache_size))) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "xbzrle_cache_size", + "a power of two no less than the target page size"); + return false; + } + + if (params->has_max_cpu_throttle && + (params->max_cpu_throttle < params->cpu_throttle_initial || + params->max_cpu_throttle > 99)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "max_cpu_throttle", + "an integer in the range of cpu_throttle_initial to 99"); + return false; + } + + if (params->has_announce_initial && + params->announce_initial > 100000) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "announce_initial", + "a value between 0 and 100000"); + return false; + } + if (params->has_announce_max && + params->announce_max > 100000) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "announce_max", + "a value between 0 and 100000"); + return false; + } + if (params->has_announce_rounds && + params->announce_rounds > 1000) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "announce_rounds", + "a value between 0 and 1000"); + return false; + } + if (params->has_announce_step && + (params->announce_step < 1 || + params->announce_step > 10000)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "announce_step", + "a value between 0 and 10000"); + return false; + } + + if (params->has_block_bitmap_mapping && + !check_dirty_bitmap_mig_alias_map(params->block_bitmap_mapping, errp)) { + error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: "); + return false; + } + +#ifdef CONFIG_LINUX + if (migrate_zero_copy_send() && + ((params->has_multifd_compression && params->multifd_compression) || + (params->tls_creds && *params->tls_creds))) { + error_setg(errp, + "Zero copy only available for non-compressed non-TLS multifd migration"); + return false; + } +#endif + + return true; +} + +static void migrate_params_test_apply(MigrateSetParameters *params, + MigrationParameters *dest) +{ + *dest = migrate_get_current()->parameters; + + /* TODO use QAPI_CLONE() instead of duplicating it inline */ + + if (params->has_compress_level) { + dest->compress_level = params->compress_level; + } + + if (params->has_compress_threads) { + dest->compress_threads = params->compress_threads; + } + + if (params->has_compress_wait_thread) { + dest->compress_wait_thread = params->compress_wait_thread; + } + + if (params->has_decompress_threads) { + dest->decompress_threads = params->decompress_threads; + } + + if (params->has_throttle_trigger_threshold) { + dest->throttle_trigger_threshold = params->throttle_trigger_threshold; + } + + if (params->has_cpu_throttle_initial) { + dest->cpu_throttle_initial = params->cpu_throttle_initial; + } + + if (params->has_cpu_throttle_increment) { + dest->cpu_throttle_increment = params->cpu_throttle_increment; + } + + if (params->has_cpu_throttle_tailslow) { + dest->cpu_throttle_tailslow = params->cpu_throttle_tailslow; + } + + if (params->tls_creds) { + assert(params->tls_creds->type == QTYPE_QSTRING); + dest->tls_creds = params->tls_creds->u.s; + } + + if (params->tls_hostname) { + assert(params->tls_hostname->type == QTYPE_QSTRING); + dest->tls_hostname = params->tls_hostname->u.s; + } + + if (params->has_max_bandwidth) { + dest->max_bandwidth = params->max_bandwidth; + } + + if (params->has_downtime_limit) { + dest->downtime_limit = params->downtime_limit; + } + + if (params->has_x_checkpoint_delay) { + dest->x_checkpoint_delay = params->x_checkpoint_delay; + } + + if (params->has_block_incremental) { + dest->block_incremental = params->block_incremental; + } + if (params->has_multifd_channels) { + dest->multifd_channels = params->multifd_channels; + } + if (params->has_multifd_compression) { + dest->multifd_compression = params->multifd_compression; + } + if (params->has_xbzrle_cache_size) { + dest->xbzrle_cache_size = params->xbzrle_cache_size; + } + if (params->has_max_postcopy_bandwidth) { + dest->max_postcopy_bandwidth = params->max_postcopy_bandwidth; + } + if (params->has_max_cpu_throttle) { + dest->max_cpu_throttle = params->max_cpu_throttle; + } + if (params->has_announce_initial) { + dest->announce_initial = params->announce_initial; + } + if (params->has_announce_max) { + dest->announce_max = params->announce_max; + } + if (params->has_announce_rounds) { + dest->announce_rounds = params->announce_rounds; + } + if (params->has_announce_step) { + dest->announce_step = params->announce_step; + } + + if (params->has_block_bitmap_mapping) { + dest->has_block_bitmap_mapping = true; + dest->block_bitmap_mapping = params->block_bitmap_mapping; + } +} + +static void migrate_params_apply(MigrateSetParameters *params, Error **errp) +{ + MigrationState *s = migrate_get_current(); + + /* TODO use QAPI_CLONE() instead of duplicating it inline */ + + if (params->has_compress_level) { + s->parameters.compress_level = params->compress_level; + } + + if (params->has_compress_threads) { + s->parameters.compress_threads = params->compress_threads; + } + + if (params->has_compress_wait_thread) { + s->parameters.compress_wait_thread = params->compress_wait_thread; + } + + if (params->has_decompress_threads) { + s->parameters.decompress_threads = params->decompress_threads; + } + + if (params->has_throttle_trigger_threshold) { + s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold; + } + + if (params->has_cpu_throttle_initial) { + s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; + } + + if (params->has_cpu_throttle_increment) { + s->parameters.cpu_throttle_increment = params->cpu_throttle_increment; + } + + if (params->has_cpu_throttle_tailslow) { + s->parameters.cpu_throttle_tailslow = params->cpu_throttle_tailslow; + } + + if (params->tls_creds) { + g_free(s->parameters.tls_creds); + assert(params->tls_creds->type == QTYPE_QSTRING); + s->parameters.tls_creds = g_strdup(params->tls_creds->u.s); + } + + if (params->tls_hostname) { + g_free(s->parameters.tls_hostname); + assert(params->tls_hostname->type == QTYPE_QSTRING); + s->parameters.tls_hostname = g_strdup(params->tls_hostname->u.s); + } + + if (params->tls_authz) { + g_free(s->parameters.tls_authz); + assert(params->tls_authz->type == QTYPE_QSTRING); + s->parameters.tls_authz = g_strdup(params->tls_authz->u.s); + } + + if (params->has_max_bandwidth) { + s->parameters.max_bandwidth = params->max_bandwidth; + if (s->to_dst_file && !migration_in_postcopy()) { + migration_rate_set(s->parameters.max_bandwidth); + } + } + + if (params->has_downtime_limit) { + s->parameters.downtime_limit = params->downtime_limit; + } + + if (params->has_x_checkpoint_delay) { + s->parameters.x_checkpoint_delay = params->x_checkpoint_delay; + colo_checkpoint_delay_set(); + } + + if (params->has_block_incremental) { + s->parameters.block_incremental = params->block_incremental; + } + if (params->has_multifd_channels) { + s->parameters.multifd_channels = params->multifd_channels; + } + if (params->has_multifd_compression) { + s->parameters.multifd_compression = params->multifd_compression; + } + if (params->has_xbzrle_cache_size) { + s->parameters.xbzrle_cache_size = params->xbzrle_cache_size; + xbzrle_cache_resize(params->xbzrle_cache_size, errp); + } + if (params->has_max_postcopy_bandwidth) { + s->parameters.max_postcopy_bandwidth = params->max_postcopy_bandwidth; + if (s->to_dst_file && migration_in_postcopy()) { + migration_rate_set(s->parameters.max_postcopy_bandwidth); + } + } + if (params->has_max_cpu_throttle) { + s->parameters.max_cpu_throttle = params->max_cpu_throttle; + } + if (params->has_announce_initial) { + s->parameters.announce_initial = params->announce_initial; + } + if (params->has_announce_max) { + s->parameters.announce_max = params->announce_max; + } + if (params->has_announce_rounds) { + s->parameters.announce_rounds = params->announce_rounds; + } + if (params->has_announce_step) { + s->parameters.announce_step = params->announce_step; + } + + if (params->has_block_bitmap_mapping) { + qapi_free_BitmapMigrationNodeAliasList( + s->parameters.block_bitmap_mapping); + + s->parameters.has_block_bitmap_mapping = true; + s->parameters.block_bitmap_mapping = + QAPI_CLONE(BitmapMigrationNodeAliasList, + params->block_bitmap_mapping); + } +} + +void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) +{ + MigrationParameters tmp; + + /* TODO Rewrite "" to null instead */ + if (params->tls_creds + && params->tls_creds->type == QTYPE_QNULL) { + qobject_unref(params->tls_creds->u.n); + params->tls_creds->type = QTYPE_QSTRING; + params->tls_creds->u.s = strdup(""); + } + /* TODO Rewrite "" to null instead */ + if (params->tls_hostname + && params->tls_hostname->type == QTYPE_QNULL) { + qobject_unref(params->tls_hostname->u.n); + params->tls_hostname->type = QTYPE_QSTRING; + params->tls_hostname->u.s = strdup(""); + } + + migrate_params_test_apply(params, &tmp); + + if (!migrate_params_check(&tmp, errp)) { + /* Invalid parameter */ + return; + } + + migrate_params_apply(params, errp); +} diff --git a/migration/options.h b/migration/options.h new file mode 100644 index 0000000000..45991af3c2 --- /dev/null +++ b/migration/options.h @@ -0,0 +1,102 @@ +/* + * QEMU migration capabilities + * + * Copyright (c) 2012-2023 Red Hat Inc + * + * Authors: + * Orit Wasserman + * Juan Quintela + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_MIGRATION_OPTIONS_H +#define QEMU_MIGRATION_OPTIONS_H + +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" + +/* migration properties */ + +extern Property migration_properties[]; + +/* capabilities */ + +bool migrate_auto_converge(void); +bool migrate_background_snapshot(void); +bool migrate_block(void); +bool migrate_colo(void); +bool migrate_compress(void); +bool migrate_dirty_bitmaps(void); +bool migrate_events(void); +bool migrate_ignore_shared(void); +bool migrate_late_block_activate(void); +bool migrate_multifd(void); +bool migrate_pause_before_switchover(void); +bool migrate_postcopy_blocktime(void); +bool migrate_postcopy_preempt(void); +bool migrate_postcopy_ram(void); +bool migrate_rdma_pin_all(void); +bool migrate_release_ram(void); +bool migrate_return_path(void); +bool migrate_validate_uuid(void); +bool migrate_xbzrle(void); +bool migrate_zero_blocks(void); +bool migrate_zero_copy_send(void); + +/* + * pseudo capabilities + * + * These are functions that are used in a similar way to capabilities + * check, but they are not a capability. + */ + +bool migrate_multifd_flush_after_each_section(void); +bool migrate_postcopy(void); +bool migrate_tls(void); + +/* capabilities helpers */ + +bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp); +bool migrate_cap_set(int cap, bool value, Error **errp); + +/* parameters */ + +const BitmapMigrationNodeAliasList *migrate_block_bitmap_mapping(void); +bool migrate_has_block_bitmap_mapping(void); + +bool migrate_block_incremental(void); +uint32_t migrate_checkpoint_delay(void); +int migrate_compress_level(void); +int migrate_compress_threads(void); +int migrate_compress_wait_thread(void); +uint8_t migrate_cpu_throttle_increment(void); +uint8_t migrate_cpu_throttle_initial(void); +bool migrate_cpu_throttle_tailslow(void); +int migrate_decompress_threads(void); +uint64_t migrate_downtime_limit(void); +uint8_t migrate_max_cpu_throttle(void); +uint64_t migrate_max_bandwidth(void); +uint64_t migrate_max_postcopy_bandwidth(void); +int migrate_multifd_channels(void); +MultiFDCompression migrate_multifd_compression(void); +int migrate_multifd_zlib_level(void); +int migrate_multifd_zstd_level(void); +uint8_t migrate_throttle_trigger_threshold(void); +const char *migrate_tls_authz(void); +const char *migrate_tls_creds(void); +const char *migrate_tls_hostname(void); +uint64_t migrate_xbzrle_cache_size(void); + +/* parameters setters */ + +void migrate_set_block_incremental(bool value); + +/* parameters helpers */ + +bool migrate_params_check(MigrationParameters *params, Error **errp); +void migrate_params_init(MigrationParameters *params); +void block_cleanup_parameters(void); + +#endif diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index f54f44d899..5615ec29eb 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -36,6 +36,8 @@ #include "yank_functions.h" #include "tls.h" #include "qemu/userfaultfd.h" +#include "qemu/mmap-alloc.h" +#include "options.h" /* Arbitrary limit on size of each discard command, * keeps them around ~200 bytes @@ -281,11 +283,13 @@ static bool request_ufd_features(int ufd, uint64_t features) return true; } -static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) +static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis, + Error **errp) { uint64_t asked_features = 0; static uint64_t supported_features; + ERRP_GUARD(); /* * it's not possible to * request UFFD_API twice per one fd @@ -293,7 +297,7 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) */ if (!supported_features) { if (!receive_ufd_features(&supported_features)) { - error_report("%s failed", __func__); + error_setg(errp, "Userfault feature detection failed"); return false; } } @@ -315,8 +319,7 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) * userfault file descriptor */ if (!request_ufd_features(ufd, asked_features)) { - error_report("%s failed: features %" PRIu64, __func__, - asked_features); + error_setg(errp, "Failed features %" PRIu64, asked_features); return false; } @@ -327,7 +330,8 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; #endif if (!have_hp) { - error_report("Userfault on this host does not support huge pages"); + error_setg(errp, + "Userfault on this host does not support huge pages"); return false; } } @@ -336,18 +340,30 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) /* Callback from postcopy_ram_supported_by_host block iterator. */ -static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque) +static int test_ramblock_postcopiable(RAMBlock *rb, Error **errp) { const char *block_name = qemu_ram_get_idstr(rb); ram_addr_t length = qemu_ram_get_used_length(rb); size_t pagesize = qemu_ram_pagesize(rb); + QemuFsType fs; if (length % pagesize) { - error_report("Postcopy requires RAM blocks to be a page size multiple," - " block %s is 0x" RAM_ADDR_FMT " bytes with a " - "page size of 0x%zx", block_name, length, pagesize); + error_setg(errp, + "Postcopy requires RAM blocks to be a page size multiple," + " block %s is 0x" RAM_ADDR_FMT " bytes with a " + "page size of 0x%zx", block_name, length, pagesize); return 1; } + + if (rb->fd >= 0) { + fs = qemu_fd_getfs(rb->fd); + if (fs != QEMU_FS_TYPE_TMPFS && fs != QEMU_FS_TYPE_HUGETLBFS) { + error_setg(errp, + "Host backend files need to be TMPFS or HUGETLBFS only"); + return 1; + } + } + return 0; } @@ -356,7 +372,7 @@ static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque) * normally fine since if the postcopy succeeds it gets turned back on at the * end. */ -bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) +bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) { long pagesize = qemu_real_host_page_size(); int ufd = -1; @@ -365,34 +381,47 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) struct uffdio_register reg_struct; struct uffdio_range range_struct; uint64_t feature_mask; - Error *local_err = NULL; + RAMBlock *block; + ERRP_GUARD(); if (qemu_target_page_size() > pagesize) { - error_report("Target page size bigger than host page size"); + error_setg(errp, "Target page size bigger than host page size"); goto out; } ufd = uffd_open(O_CLOEXEC); if (ufd == -1) { - error_report("%s: userfaultfd not available: %s", __func__, - strerror(errno)); + error_setg(errp, "Userfaultfd not available: %s", strerror(errno)); goto out; } /* Give devices a chance to object */ - if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) { - error_report_err(local_err); + if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, errp)) { goto out; } /* Version and features check */ - if (!ufd_check_and_apply(ufd, mis)) { + if (!ufd_check_and_apply(ufd, mis, errp)) { goto out; } - /* We don't support postcopy with shared RAM yet */ - if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) { - goto out; + /* + * We don't support postcopy with some type of ramblocks. + * + * NOTE: we explicitly ignored ramblock_is_ignored() instead we checked + * all possible ramblocks. This is because this function can be called + * when creating the migration object, during the phase RAM_MIGRATABLE + * is not even properly set for all the ramblocks. + * + * A side effect of this is we'll also check against RAM_SHARED + * ramblocks even if migrate_ignore_shared() is set (in which case + * we'll never migrate RAM_SHARED at all), but normally this shouldn't + * affect in reality, or we can revisit. + */ + RAMBLOCK_FOREACH(block) { + if (test_ramblock_postcopiable(block, errp)) { + goto out; + } } /* @@ -400,7 +429,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) * it was enabled. */ if (munlockall()) { - error_report("%s: munlockall: %s", __func__, strerror(errno)); + error_setg(errp, "munlockall() failed: %s", strerror(errno)); goto out; } @@ -412,8 +441,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (testarea == MAP_FAILED) { - error_report("%s: Failed to map test area: %s", __func__, - strerror(errno)); + error_setg(errp, "Failed to map test area: %s", strerror(errno)); goto out; } g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize)); @@ -423,14 +451,14 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { - error_report("%s userfault register: %s", __func__, strerror(errno)); + error_setg(errp, "UFFDIO_REGISTER failed: %s", strerror(errno)); goto out; } range_struct.start = (uintptr_t)testarea; range_struct.len = pagesize; if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { - error_report("%s userfault unregister: %s", __func__, strerror(errno)); + error_setg(errp, "UFFDIO_UNREGISTER failed: %s", strerror(errno)); goto out; } @@ -438,8 +466,8 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) (__u64)1 << _UFFDIO_COPY | (__u64)1 << _UFFDIO_ZEROPAGE; if ((reg_struct.ioctls & feature_mask) != feature_mask) { - error_report("Missing userfault map features: %" PRIx64, - (uint64_t)(~reg_struct.ioctls & feature_mask)); + error_setg(errp, "Missing userfault map features: %" PRIx64, + (uint64_t)(~reg_struct.ioctls & feature_mask)); goto out; } @@ -568,9 +596,14 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) { trace_postcopy_ram_incoming_cleanup_entry(); - if (mis->postcopy_prio_thread_created) { + if (mis->preempt_thread_status == PREEMPT_THREAD_CREATED) { + /* Notify the fast load thread to quit */ + mis->preempt_thread_status = PREEMPT_THREAD_QUIT; + if (mis->postcopy_qemufile_dst) { + qemu_file_shutdown(mis->postcopy_qemufile_dst); + } qemu_thread_join(&mis->postcopy_prio_thread); - mis->postcopy_prio_thread_created = false; + mis->preempt_thread_status = PREEMPT_THREAD_NONE; } if (mis->have_fault_thread) { @@ -1156,6 +1189,8 @@ static int postcopy_temp_pages_setup(MigrationIncomingState *mis) int postcopy_ram_incoming_setup(MigrationIncomingState *mis) { + Error *local_err = NULL; + /* Open the fd for the kernel to give us userfaults */ mis->userfault_fd = uffd_open(O_CLOEXEC | O_NONBLOCK); if (mis->userfault_fd == -1) { @@ -1168,7 +1203,8 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) * Although the host check already tested the API, we need to * do the check again as an ABI handshake on the new fd. */ - if (!ufd_check_and_apply(mis->userfault_fd, mis)) { + if (!ufd_check_and_apply(mis->userfault_fd, mis, &local_err)) { + error_report_err(local_err); return -1; } @@ -1197,18 +1233,13 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) } if (migrate_postcopy_preempt()) { - /* - * The preempt channel is established in asynchronous way. Wait - * for its completion. - */ - qemu_sem_wait(&mis->postcopy_qemufile_dst_done); /* * This thread needs to be created after the temp pages because * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. */ postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast", postcopy_preempt_thread, QEMU_THREAD_JOINABLE); - mis->postcopy_prio_thread_created = true; + mis->preempt_thread_status = PREEMPT_THREAD_CREATED; } trace_postcopy_ram_enable_notify(); @@ -1333,7 +1364,7 @@ void fill_destination_postcopy_migration_info(MigrationInfo *info) { } -bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) +bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) { error_report("%s: No OS support", __func__); return false; @@ -1500,7 +1531,7 @@ static PostcopyState incoming_postcopy_state; PostcopyState postcopy_state_get(void) { - return qatomic_mb_read(&incoming_postcopy_state); + return qatomic_load_acquire(&incoming_postcopy_state); } /* Set the state and return the old state */ @@ -1601,7 +1632,7 @@ postcopy_preempt_send_channel_new(QIOTask *task, gpointer opaque) } if (migrate_channel_requires_tls_upgrade(ioc)) { - tioc = migration_tls_client_create(s, ioc, s->hostname, &local_err); + tioc = migration_tls_client_create(ioc, s->hostname, &local_err); if (!tioc) { goto out; } @@ -1630,8 +1661,14 @@ int postcopy_preempt_establish_channel(MigrationState *s) return 0; } - /* Kick off async task to establish preempt channel */ - postcopy_preempt_setup(s); + /* + * Kick off async task to establish preempt channel. Only do so with + * 8.0+ machines, because 7.1/7.2 require the channel to be created in + * setup phase of migration (even if racy in an unreliable network). + */ + if (!s->preempt_pre_7_2) { + postcopy_preempt_setup(s); + } /* * We need the postcopy preempt channel to be established before @@ -1657,6 +1694,11 @@ static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis) trace_postcopy_pause_fast_load_continued(); } +static bool preempt_thread_should_run(MigrationIncomingState *mis) +{ + return mis->preempt_thread_status != PREEMPT_THREAD_QUIT; +} + void *postcopy_preempt_thread(void *opaque) { MigrationIncomingState *mis = opaque; @@ -1668,13 +1710,19 @@ void *postcopy_preempt_thread(void *opaque) qemu_sem_post(&mis->thread_sync_sem); + /* + * The preempt channel is established in asynchronous way. Wait + * for its completion. + */ + qemu_sem_wait(&mis->postcopy_qemufile_dst_done); + /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */ qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); - while (1) { + while (preempt_thread_should_run(mis)) { ret = ram_load_postcopy(mis->postcopy_qemufile_dst, RAM_CHANNEL_POSTCOPY); /* If error happened, go into recovery routine */ - if (ret) { + if (ret && preempt_thread_should_run(mis)) { postcopy_pause_ram_fast_load(mis); } else { /* We're done */ diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index b4867a32d5..442ab89752 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -14,7 +14,8 @@ #define QEMU_POSTCOPY_RAM_H /* Return true if the host supports everything we need to do postcopy-ram */ -bool postcopy_ram_supported_by_host(MigrationIncomingState *mis); +bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, + Error **errp); /* * Make all of RAM sensitive to accesses to areas that haven't yet been written diff --git a/migration/qemu-file.c b/migration/qemu-file.c index 102ab3b439..acc282654a 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -27,8 +27,10 @@ #include "qemu/error-report.h" #include "qemu/iov.h" #include "migration.h" +#include "migration-stats.h" #include "qemu-file.h" #include "trace.h" +#include "options.h" #include "qapi/error.h" #define IO_BUF_SIZE 32768 @@ -39,19 +41,8 @@ struct QEMUFile { QIOChannel *ioc; bool is_writable; - /* - * Maximum amount of data in bytes to transfer during one - * rate limiting time window - */ - int64_t rate_limit_max; - /* - * Total amount of data in bytes queued for transfer - * during this rate limiting time window - */ - int64_t rate_limit_used; - /* The sum of bytes transferred on the wire */ - int64_t total_transferred; + uint64_t total_transferred; int buf_index; int buf_size; /* 0 when writing */ @@ -63,8 +54,6 @@ struct QEMUFile { int last_error; Error *last_error_obj; - /* has the file has been shutdown */ - bool shutdown; }; /* @@ -78,8 +67,6 @@ int qemu_file_shutdown(QEMUFile *f) { int ret = 0; - f->shutdown = true; - /* * We must set qemufile error before the real shutdown(), otherwise * there can be a race window where we thought IO all went though @@ -294,7 +281,7 @@ void qemu_fflush(QEMUFile *f) return; } - if (f->shutdown) { + if (qemu_file_get_error(f)) { return; } if (f->iovcnt > 0) { @@ -304,7 +291,8 @@ void qemu_fflush(QEMUFile *f) &local_error) < 0) { qemu_file_set_error_obj(f, -EIO, local_error); } else { - f->total_transferred += iov_size(f->iov, f->iovcnt); + uint64_t size = iov_size(f->iov, f->iovcnt); + f->total_transferred += size; } qemu_iovec_release_ram(f); @@ -340,21 +328,11 @@ void ram_control_after_iterate(QEMUFile *f, uint64_t flags) void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data) { - int ret = -EINVAL; - if (f->hooks && f->hooks->hook_ram_load) { - ret = f->hooks->hook_ram_load(f, flags, data); + int ret = f->hooks->hook_ram_load(f, flags, data); if (ret < 0) { qemu_file_set_error(f, ret); } - } else { - /* - * Hook is a hook specifically requested by the source sending a flag - * that expects there to be a hook on the destination. - */ - if (flags == RAM_CONTROL_HOOK) { - qemu_file_set_error(f, ret); - } } } @@ -365,9 +343,6 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset, if (f->hooks && f->hooks->save_page) { int ret = f->hooks->save_page(f, block_offset, offset, size, bytes_sent); - if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { - f->rate_limit_used += size; - } if (ret != RAM_SAVE_CONTROL_DELAYED && ret != RAM_SAVE_CONTROL_NOT_SUPP) { @@ -392,7 +367,7 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset, * case if the underlying file descriptor gives a short read, and that can * happen even on a blocking fd. */ -static ssize_t qemu_fill_buffer(QEMUFile *f) +static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f) { int len; int pending; @@ -407,7 +382,7 @@ static ssize_t qemu_fill_buffer(QEMUFile *f) f->buf_index = 0; f->buf_size = pending; - if (f->shutdown) { + if (qemu_file_get_error(f)) { return 0; } @@ -496,7 +471,7 @@ static int add_to_iovec(QEMUFile *f, const uint8_t *buf, size_t size, } else { if (f->iovcnt >= MAX_IOV_SIZE) { /* Should only happen if a previous fflush failed */ - assert(f->shutdown || !qemu_file_is_writable(f)); + assert(qemu_file_get_error(f) || !qemu_file_is_writable(f)); return 1; } if (may_free) { @@ -531,7 +506,6 @@ void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size, return; } - f->rate_limit_used += size; add_to_iovec(f, buf, size, may_free); } @@ -549,7 +523,6 @@ void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size) l = size; } memcpy(f->buf + f->buf_index, buf, l); - f->rate_limit_used += l; add_buf_to_iovec(f, l); if (qemu_file_get_error(f)) { break; @@ -566,7 +539,6 @@ void qemu_put_byte(QEMUFile *f, int v) } f->buf[f->buf_index] = v; - f->rate_limit_used++; add_buf_to_iovec(f, 1); } @@ -585,7 +557,7 @@ void qemu_file_skip(QEMUFile *f, int size) * return as many as it managed to read (assuming blocking fd's which * all current QEMUFile are) */ -size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset) +size_t coroutine_mixed_fn qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset) { ssize_t pending; size_t index; @@ -633,7 +605,7 @@ size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset) * return as many as it managed to read (assuming blocking fd's which * all current QEMUFile are) */ -size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size) +size_t coroutine_mixed_fn qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size) { size_t pending = size; size_t done = 0; @@ -674,7 +646,7 @@ size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size) * Note: Since **buf may get changed, the caller should take care to * keep a pointer to the original buffer if it needs to deallocate it. */ -size_t qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size) +size_t coroutine_mixed_fn qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size) { if (size < IO_BUF_SIZE) { size_t res; @@ -696,7 +668,7 @@ size_t qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size) * Peeks a single byte from the buffer; this isn't guaranteed to work if * offset leaves a gap after the previous read/peeked data. */ -int qemu_peek_byte(QEMUFile *f, int offset) +int coroutine_mixed_fn qemu_peek_byte(QEMUFile *f, int offset) { int index = f->buf_index + offset; @@ -713,7 +685,7 @@ int qemu_peek_byte(QEMUFile *f, int offset) return f->buf[index]; } -int qemu_get_byte(QEMUFile *f) +int coroutine_mixed_fn qemu_get_byte(QEMUFile *f) { int result; @@ -722,9 +694,9 @@ int qemu_get_byte(QEMUFile *f) return result; } -int64_t qemu_file_total_transferred_fast(QEMUFile *f) +uint64_t qemu_file_transferred_fast(QEMUFile *f) { - int64_t ret = f->total_transferred; + uint64_t ret = f->total_transferred; int i; for (i = 0; i < f->iovcnt; i++) { @@ -734,46 +706,12 @@ int64_t qemu_file_total_transferred_fast(QEMUFile *f) return ret; } -int64_t qemu_file_total_transferred(QEMUFile *f) +uint64_t qemu_file_transferred(QEMUFile *f) { qemu_fflush(f); return f->total_transferred; } -int qemu_file_rate_limit(QEMUFile *f) -{ - if (f->shutdown) { - return 1; - } - if (qemu_file_get_error(f)) { - return 1; - } - if (f->rate_limit_max > 0 && f->rate_limit_used > f->rate_limit_max) { - return 1; - } - return 0; -} - -int64_t qemu_file_get_rate_limit(QEMUFile *f) -{ - return f->rate_limit_max; -} - -void qemu_file_set_rate_limit(QEMUFile *f, int64_t limit) -{ - f->rate_limit_max = limit; -} - -void qemu_file_reset_rate_limit(QEMUFile *f) -{ - f->rate_limit_used = 0; -} - -void qemu_file_acct_rate_limit(QEMUFile *f, int64_t len) -{ - f->rate_limit_used += len; -} - void qemu_put_be16(QEMUFile *f, unsigned int v) { qemu_put_byte(f, v >> 8); @@ -887,6 +825,17 @@ int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src) return len; } +/* + * Check if the writable buffer is empty + */ + +bool qemu_file_buffer_empty(QEMUFile *file) +{ + assert(qemu_file_is_writable(file)); + + return !file->iovcnt; +} + /* * Get a string whose length is determined by a single preceding byte * A preallocated 256 byte buffer must be passed in. @@ -894,7 +843,7 @@ int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src) * else 0 * (Note a 0 length string will return 0 either way) */ -size_t qemu_get_counted_string(QEMUFile *f, char buf[256]) +size_t coroutine_fn qemu_get_counted_string(QEMUFile *f, char buf[256]) { size_t len = qemu_get_byte(f); size_t res = qemu_get_buffer(f, (uint8_t *)buf, len); diff --git a/migration/qemu-file.h b/migration/qemu-file.h index 9d0155a2a1..e649718492 100644 --- a/migration/qemu-file.h +++ b/migration/qemu-file.h @@ -68,7 +68,7 @@ void qemu_file_set_hooks(QEMUFile *f, const QEMUFileHooks *hooks); int qemu_fclose(QEMUFile *f); /* - * qemu_file_total_transferred: + * qemu_file_transferred: * * Report the total number of bytes transferred with * this file. @@ -83,19 +83,19 @@ int qemu_fclose(QEMUFile *f); * * Returns: the total bytes transferred */ -int64_t qemu_file_total_transferred(QEMUFile *f); +uint64_t qemu_file_transferred(QEMUFile *f); /* - * qemu_file_total_transferred_fast: + * qemu_file_transferred_fast: * - * As qemu_file_total_transferred except for writable + * As qemu_file_transferred except for writable * files, where no flush is performed and the reported * amount will include the size of any queued buffers, * on top of the amount actually transferred. * * Returns: the total bytes transferred and queued */ -int64_t qemu_file_total_transferred_fast(QEMUFile *f); +uint64_t qemu_file_transferred_fast(QEMUFile *f); /* * put_buffer without copying the buffer. @@ -108,18 +108,19 @@ bool qemu_file_is_writable(QEMUFile *f); #include "migration/qemu-file-types.h" -size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset); -size_t qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size); +size_t coroutine_mixed_fn qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset); +size_t coroutine_mixed_fn qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size); ssize_t qemu_put_compression_data(QEMUFile *f, z_stream *stream, const uint8_t *p, size_t size); int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src); +bool qemu_file_buffer_empty(QEMUFile *file); /* * Note that you can only peek continuous bytes from where the current pointer * is; you aren't guaranteed to be able to peak to +n bytes unless you've * previously peeked +n-1. */ -int qemu_peek_byte(QEMUFile *f, int offset); +int coroutine_mixed_fn qemu_peek_byte(QEMUFile *f, int offset); void qemu_file_skip(QEMUFile *f, int size); /* * qemu_file_credit_transfer: @@ -129,17 +130,6 @@ void qemu_file_skip(QEMUFile *f, int size); * accounting information tracks the total migration traffic. */ void qemu_file_credit_transfer(QEMUFile *f, size_t size); -void qemu_file_reset_rate_limit(QEMUFile *f); -/* - * qemu_file_acct_rate_limit: - * - * Report on a number of bytes the have been transferred - * out of band from the main file object I/O methods, and - * need to be applied to the rate limiting calcuations - */ -void qemu_file_acct_rate_limit(QEMUFile *f, int64_t len); -void qemu_file_set_rate_limit(QEMUFile *f, int64_t new_rate); -int64_t qemu_file_get_rate_limit(QEMUFile *f); int qemu_file_get_error_obj(QEMUFile *f, Error **errp); int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp); void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err); diff --git a/migration/ram-compress.c b/migration/ram-compress.c new file mode 100644 index 0000000000..06254d8c69 --- /dev/null +++ b/migration/ram-compress.c @@ -0,0 +1,485 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2011-2015 Red Hat Inc + * + * Authors: + * Juan Quintela + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" + +#include "ram-compress.h" + +#include "qemu/error-report.h" +#include "migration.h" +#include "options.h" +#include "io/channel-null.h" +#include "exec/target_page.h" +#include "exec/ramblock.h" + +CompressionStats compression_counters; + +static CompressParam *comp_param; +static QemuThread *compress_threads; +/* comp_done_cond is used to wake up the migration thread when + * one of the compression threads has finished the compression. + * comp_done_lock is used to co-work with comp_done_cond. + */ +static QemuMutex comp_done_lock; +static QemuCond comp_done_cond; + +struct DecompressParam { + bool done; + bool quit; + QemuMutex mutex; + QemuCond cond; + void *des; + uint8_t *compbuf; + int len; + z_stream stream; +}; +typedef struct DecompressParam DecompressParam; + +static QEMUFile *decomp_file; +static DecompressParam *decomp_param; +static QemuThread *decompress_threads; +static QemuMutex decomp_done_lock; +static QemuCond decomp_done_cond; + +static CompressResult do_compress_ram_page(QEMUFile *f, z_stream *stream, + RAMBlock *block, ram_addr_t offset, + uint8_t *source_buf); + +static void *do_data_compress(void *opaque) +{ + CompressParam *param = opaque; + RAMBlock *block; + ram_addr_t offset; + CompressResult result; + + qemu_mutex_lock(¶m->mutex); + while (!param->quit) { + if (param->trigger) { + block = param->block; + offset = param->offset; + param->trigger = false; + qemu_mutex_unlock(¶m->mutex); + + result = do_compress_ram_page(param->file, ¶m->stream, + block, offset, param->originbuf); + + qemu_mutex_lock(&comp_done_lock); + param->done = true; + param->result = result; + qemu_cond_signal(&comp_done_cond); + qemu_mutex_unlock(&comp_done_lock); + + qemu_mutex_lock(¶m->mutex); + } else { + qemu_cond_wait(¶m->cond, ¶m->mutex); + } + } + qemu_mutex_unlock(¶m->mutex); + + return NULL; +} + +void compress_threads_save_cleanup(void) +{ + int i, thread_count; + + if (!migrate_compress() || !comp_param) { + return; + } + + thread_count = migrate_compress_threads(); + for (i = 0; i < thread_count; i++) { + /* + * we use it as a indicator which shows if the thread is + * properly init'd or not + */ + if (!comp_param[i].file) { + break; + } + + qemu_mutex_lock(&comp_param[i].mutex); + comp_param[i].quit = true; + qemu_cond_signal(&comp_param[i].cond); + qemu_mutex_unlock(&comp_param[i].mutex); + + qemu_thread_join(compress_threads + i); + qemu_mutex_destroy(&comp_param[i].mutex); + qemu_cond_destroy(&comp_param[i].cond); + deflateEnd(&comp_param[i].stream); + g_free(comp_param[i].originbuf); + qemu_fclose(comp_param[i].file); + comp_param[i].file = NULL; + } + qemu_mutex_destroy(&comp_done_lock); + qemu_cond_destroy(&comp_done_cond); + g_free(compress_threads); + g_free(comp_param); + compress_threads = NULL; + comp_param = NULL; +} + +int compress_threads_save_setup(void) +{ + int i, thread_count; + + if (!migrate_compress()) { + return 0; + } + thread_count = migrate_compress_threads(); + compress_threads = g_new0(QemuThread, thread_count); + comp_param = g_new0(CompressParam, thread_count); + qemu_cond_init(&comp_done_cond); + qemu_mutex_init(&comp_done_lock); + for (i = 0; i < thread_count; i++) { + comp_param[i].originbuf = g_try_malloc(qemu_target_page_size()); + if (!comp_param[i].originbuf) { + goto exit; + } + + if (deflateInit(&comp_param[i].stream, + migrate_compress_level()) != Z_OK) { + g_free(comp_param[i].originbuf); + goto exit; + } + + /* comp_param[i].file is just used as a dummy buffer to save data, + * set its ops to empty. + */ + comp_param[i].file = qemu_file_new_output( + QIO_CHANNEL(qio_channel_null_new())); + comp_param[i].done = true; + comp_param[i].quit = false; + qemu_mutex_init(&comp_param[i].mutex); + qemu_cond_init(&comp_param[i].cond); + qemu_thread_create(compress_threads + i, "compress", + do_data_compress, comp_param + i, + QEMU_THREAD_JOINABLE); + } + return 0; + +exit: + compress_threads_save_cleanup(); + return -1; +} + +static CompressResult do_compress_ram_page(QEMUFile *f, z_stream *stream, + RAMBlock *block, ram_addr_t offset, + uint8_t *source_buf) +{ + uint8_t *p = block->host + offset; + size_t page_size = qemu_target_page_size(); + int ret; + + assert(qemu_file_buffer_empty(f)); + + if (buffer_is_zero(p, page_size)) { + return RES_ZEROPAGE; + } + + /* + * copy it to a internal buffer to avoid it being modified by VM + * so that we can catch up the error during compression and + * decompression + */ + memcpy(source_buf, p, page_size); + ret = qemu_put_compression_data(f, stream, source_buf, page_size); + if (ret < 0) { + qemu_file_set_error(migrate_get_current()->to_dst_file, ret); + error_report("compressed data failed!"); + qemu_fflush(f); + return RES_NONE; + } + return RES_COMPRESS; +} + +static inline void compress_reset_result(CompressParam *param) +{ + param->result = RES_NONE; + param->block = NULL; + param->offset = 0; +} + +void flush_compressed_data(int (send_queued_data(CompressParam *))) +{ + int idx, thread_count; + + thread_count = migrate_compress_threads(); + + qemu_mutex_lock(&comp_done_lock); + for (idx = 0; idx < thread_count; idx++) { + while (!comp_param[idx].done) { + qemu_cond_wait(&comp_done_cond, &comp_done_lock); + } + } + qemu_mutex_unlock(&comp_done_lock); + + for (idx = 0; idx < thread_count; idx++) { + qemu_mutex_lock(&comp_param[idx].mutex); + if (!comp_param[idx].quit) { + CompressParam *param = &comp_param[idx]; + send_queued_data(param); + assert(qemu_file_buffer_empty(param->file)); + compress_reset_result(param); + } + qemu_mutex_unlock(&comp_param[idx].mutex); + } +} + +static inline void set_compress_params(CompressParam *param, RAMBlock *block, + ram_addr_t offset) +{ + param->block = block; + param->offset = offset; + param->trigger = true; +} + +int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset, + int (send_queued_data(CompressParam *))) +{ + int idx, thread_count, pages = -1; + bool wait = migrate_compress_wait_thread(); + + thread_count = migrate_compress_threads(); + qemu_mutex_lock(&comp_done_lock); +retry: + for (idx = 0; idx < thread_count; idx++) { + if (comp_param[idx].done) { + CompressParam *param = &comp_param[idx]; + qemu_mutex_lock(¶m->mutex); + param->done = false; + send_queued_data(param); + assert(qemu_file_buffer_empty(param->file)); + compress_reset_result(param); + set_compress_params(param, block, offset); + + qemu_cond_signal(¶m->cond); + qemu_mutex_unlock(¶m->mutex); + pages = 1; + break; + } + } + + /* + * wait for the free thread if the user specifies 'compress-wait-thread', + * otherwise we will post the page out in the main thread as normal page. + */ + if (pages < 0 && wait) { + qemu_cond_wait(&comp_done_cond, &comp_done_lock); + goto retry; + } + qemu_mutex_unlock(&comp_done_lock); + + return pages; +} + +/* return the size after decompression, or negative value on error */ +static int +qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len, + const uint8_t *source, size_t source_len) +{ + int err; + + err = inflateReset(stream); + if (err != Z_OK) { + return -1; + } + + stream->avail_in = source_len; + stream->next_in = (uint8_t *)source; + stream->avail_out = dest_len; + stream->next_out = dest; + + err = inflate(stream, Z_NO_FLUSH); + if (err != Z_STREAM_END) { + return -1; + } + + return stream->total_out; +} + +static void *do_data_decompress(void *opaque) +{ + DecompressParam *param = opaque; + unsigned long pagesize; + uint8_t *des; + int len, ret; + + qemu_mutex_lock(¶m->mutex); + while (!param->quit) { + if (param->des) { + des = param->des; + len = param->len; + param->des = 0; + qemu_mutex_unlock(¶m->mutex); + + pagesize = qemu_target_page_size(); + + ret = qemu_uncompress_data(¶m->stream, des, pagesize, + param->compbuf, len); + if (ret < 0 && migrate_get_current()->decompress_error_check) { + error_report("decompress data failed"); + qemu_file_set_error(decomp_file, ret); + } + + qemu_mutex_lock(&decomp_done_lock); + param->done = true; + qemu_cond_signal(&decomp_done_cond); + qemu_mutex_unlock(&decomp_done_lock); + + qemu_mutex_lock(¶m->mutex); + } else { + qemu_cond_wait(¶m->cond, ¶m->mutex); + } + } + qemu_mutex_unlock(¶m->mutex); + + return NULL; +} + +int wait_for_decompress_done(void) +{ + int idx, thread_count; + + if (!migrate_compress()) { + return 0; + } + + thread_count = migrate_decompress_threads(); + qemu_mutex_lock(&decomp_done_lock); + for (idx = 0; idx < thread_count; idx++) { + while (!decomp_param[idx].done) { + qemu_cond_wait(&decomp_done_cond, &decomp_done_lock); + } + } + qemu_mutex_unlock(&decomp_done_lock); + return qemu_file_get_error(decomp_file); +} + +void compress_threads_load_cleanup(void) +{ + int i, thread_count; + + if (!migrate_compress()) { + return; + } + thread_count = migrate_decompress_threads(); + for (i = 0; i < thread_count; i++) { + /* + * we use it as a indicator which shows if the thread is + * properly init'd or not + */ + if (!decomp_param[i].compbuf) { + break; + } + + qemu_mutex_lock(&decomp_param[i].mutex); + decomp_param[i].quit = true; + qemu_cond_signal(&decomp_param[i].cond); + qemu_mutex_unlock(&decomp_param[i].mutex); + } + for (i = 0; i < thread_count; i++) { + if (!decomp_param[i].compbuf) { + break; + } + + qemu_thread_join(decompress_threads + i); + qemu_mutex_destroy(&decomp_param[i].mutex); + qemu_cond_destroy(&decomp_param[i].cond); + inflateEnd(&decomp_param[i].stream); + g_free(decomp_param[i].compbuf); + decomp_param[i].compbuf = NULL; + } + g_free(decompress_threads); + g_free(decomp_param); + decompress_threads = NULL; + decomp_param = NULL; + decomp_file = NULL; +} + +int compress_threads_load_setup(QEMUFile *f) +{ + int i, thread_count; + + if (!migrate_compress()) { + return 0; + } + + thread_count = migrate_decompress_threads(); + decompress_threads = g_new0(QemuThread, thread_count); + decomp_param = g_new0(DecompressParam, thread_count); + qemu_mutex_init(&decomp_done_lock); + qemu_cond_init(&decomp_done_cond); + decomp_file = f; + for (i = 0; i < thread_count; i++) { + if (inflateInit(&decomp_param[i].stream) != Z_OK) { + goto exit; + } + + size_t compbuf_size = compressBound(qemu_target_page_size()); + decomp_param[i].compbuf = g_malloc0(compbuf_size); + qemu_mutex_init(&decomp_param[i].mutex); + qemu_cond_init(&decomp_param[i].cond); + decomp_param[i].done = true; + decomp_param[i].quit = false; + qemu_thread_create(decompress_threads + i, "decompress", + do_data_decompress, decomp_param + i, + QEMU_THREAD_JOINABLE); + } + return 0; +exit: + compress_threads_load_cleanup(); + return -1; +} + +void decompress_data_with_multi_threads(QEMUFile *f, void *host, int len) +{ + int idx, thread_count; + + thread_count = migrate_decompress_threads(); + QEMU_LOCK_GUARD(&decomp_done_lock); + while (true) { + for (idx = 0; idx < thread_count; idx++) { + if (decomp_param[idx].done) { + decomp_param[idx].done = false; + qemu_mutex_lock(&decomp_param[idx].mutex); + qemu_get_buffer(f, decomp_param[idx].compbuf, len); + decomp_param[idx].des = host; + decomp_param[idx].len = len; + qemu_cond_signal(&decomp_param[idx].cond); + qemu_mutex_unlock(&decomp_param[idx].mutex); + break; + } + } + if (idx < thread_count) { + break; + } else { + qemu_cond_wait(&decomp_done_cond, &decomp_done_lock); + } + } +} diff --git a/migration/ram-compress.h b/migration/ram-compress.h new file mode 100644 index 0000000000..6f7fe2f472 --- /dev/null +++ b/migration/ram-compress.h @@ -0,0 +1,70 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2011-2015 Red Hat Inc + * + * Authors: + * Juan Quintela + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_MIGRATION_COMPRESS_H +#define QEMU_MIGRATION_COMPRESS_H + +#include "qemu-file.h" + +enum CompressResult { + RES_NONE = 0, + RES_ZEROPAGE = 1, + RES_COMPRESS = 2 +}; +typedef enum CompressResult CompressResult; + +struct CompressParam { + bool done; + bool quit; + bool trigger; + CompressResult result; + QEMUFile *file; + QemuMutex mutex; + QemuCond cond; + RAMBlock *block; + ram_addr_t offset; + + /* internally used fields */ + z_stream stream; + uint8_t *originbuf; +}; +typedef struct CompressParam CompressParam; + +void compress_threads_save_cleanup(void); +int compress_threads_save_setup(void); + +void flush_compressed_data(int (send_queued_data(CompressParam *))); +int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset, + int (send_queued_data(CompressParam *))); + +int wait_for_decompress_done(void); +void compress_threads_load_cleanup(void); +int compress_threads_load_setup(QEMUFile *f); +void decompress_data_with_multi_threads(QEMUFile *f, void *host, int len); + +#endif diff --git a/migration/ram.c b/migration/ram.c index 96e8a19a58..88a6c82e63 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -32,10 +32,11 @@ #include "qemu/bitmap.h" #include "qemu/madvise.h" #include "qemu/main-loop.h" -#include "io/channel-null.h" #include "xbzrle.h" +#include "ram-compress.h" #include "ram.h" #include "migration.h" +#include "migration-stats.h" #include "migration/register.h" #include "migration/misc.h" #include "qemu-file.h" @@ -57,6 +58,7 @@ #include "qemu/iov.h" #include "multifd.h" #include "sysemu/runstate.h" +#include "options.h" #include "hw/boards.h" /* for machine_dump_guest_core() */ @@ -85,36 +87,9 @@ #define RAM_SAVE_FLAG_XBZRLE 0x40 /* 0x80 is reserved in qemu-file.h for RAM_SAVE_FLAG_HOOK */ #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 +#define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200 /* We can't use any flag that is bigger than 0x200 */ -int (*xbzrle_encode_buffer_func)(uint8_t *, uint8_t *, int, - uint8_t *, int) = xbzrle_encode_buffer; -#if defined(CONFIG_AVX512BW_OPT) -#include "qemu/cpuid.h" -static void __attribute__((constructor)) init_cpu_flag(void) -{ - unsigned max = __get_cpuid_max(0, NULL); - int a, b, c, d; - if (max >= 1) { - __cpuid(1, a, b, c, d); - /* We must check that AVX is not just available, but usable. */ - if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) { - int bv; - __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); - __cpuid_count(7, 0, a, b, c, d); - /* 0xe6: - * XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15 - * and ZMM16-ZMM31 state are enabled by OS) - * XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS) - */ - if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512BW)) { - xbzrle_encode_buffer_func = xbzrle_encode_buffer_avx512; - } - } - } -} -#endif - XBZRLECacheStats xbzrle_counters; /* used by the search for pages to send */ @@ -155,14 +130,14 @@ static struct { static void XBZRLE_cache_lock(void) { - if (migrate_use_xbzrle()) { + if (migrate_xbzrle()) { qemu_mutex_lock(&XBZRLE.lock); } } static void XBZRLE_cache_unlock(void) { - if (migrate_use_xbzrle()) { + if (migrate_xbzrle()) { qemu_mutex_unlock(&XBZRLE.lock); } } @@ -385,8 +360,8 @@ struct RAMState { uint64_t xbzrle_pages_prev; /* Amount of xbzrle encoded bytes since the beginning of the period */ uint64_t xbzrle_bytes_prev; - /* Start using XBZRLE (e.g., after the first round). */ - bool xbzrle_enabled; + /* Are we really using XBZRLE (e.g., after the first round). */ + bool xbzrle_started; /* Are we on the last stage of migration */ bool last_stage; /* compression statistics since the beginning of the period */ @@ -458,30 +433,16 @@ uint64_t ram_bytes_remaining(void) 0; } -/* - * NOTE: not all stats in ram_counters are used in reality. See comments - * for struct MigrationAtomicStats. The ultimate result of ram migration - * counters will be a merged version with both ram_counters and the atomic - * fields in ram_atomic_counters. - */ -MigrationStats ram_counters; -MigrationAtomicStats ram_atomic_counters; - void ram_transferred_add(uint64_t bytes) { if (runstate_is_running()) { - ram_counters.precopy_bytes += bytes; + stat64_add(&mig_stats.precopy_bytes, bytes); } else if (migration_in_postcopy()) { - stat64_add(&ram_atomic_counters.postcopy_bytes, bytes); + stat64_add(&mig_stats.postcopy_bytes, bytes); } else { - ram_counters.downtime_bytes += bytes; + stat64_add(&mig_stats.downtime_bytes, bytes); } - stat64_add(&ram_atomic_counters.transferred, bytes); -} - -void dirty_sync_missed_zero_copy(void) -{ - ram_counters.dirty_sync_missed_zero_copy++; + stat64_add(&mig_stats.transferred, bytes); } struct MigrationOps { @@ -491,56 +452,8 @@ typedef struct MigrationOps MigrationOps; MigrationOps *migration_ops; -CompressionStats compression_counters; - -struct CompressParam { - bool done; - bool quit; - bool zero_page; - QEMUFile *file; - QemuMutex mutex; - QemuCond cond; - RAMBlock *block; - ram_addr_t offset; - - /* internally used fields */ - z_stream stream; - uint8_t *originbuf; -}; -typedef struct CompressParam CompressParam; - -struct DecompressParam { - bool done; - bool quit; - QemuMutex mutex; - QemuCond cond; - void *des; - uint8_t *compbuf; - int len; - z_stream stream; -}; -typedef struct DecompressParam DecompressParam; - -static CompressParam *comp_param; -static QemuThread *compress_threads; -/* comp_done_cond is used to wake up the migration thread when - * one of the compression threads has finished the compression. - * comp_done_lock is used to co-work with comp_done_cond. - */ -static QemuMutex comp_done_lock; -static QemuCond comp_done_cond; - -static QEMUFile *decomp_file; -static DecompressParam *decomp_param; -static QemuThread *decompress_threads; -static QemuMutex decomp_done_lock; -static QemuCond decomp_done_cond; - static int ram_save_host_page_urgent(PageSearchStatus *pss); -static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, - ram_addr_t offset, uint8_t *source_buf); - /* NOTE: page is the PFN not real ram_addr_t. */ static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page) { @@ -559,123 +472,6 @@ static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2) (pss1->host_page_start == pss2->host_page_start); } -static void *do_data_compress(void *opaque) -{ - CompressParam *param = opaque; - RAMBlock *block; - ram_addr_t offset; - bool zero_page; - - qemu_mutex_lock(¶m->mutex); - while (!param->quit) { - if (param->block) { - block = param->block; - offset = param->offset; - param->block = NULL; - qemu_mutex_unlock(¶m->mutex); - - zero_page = do_compress_ram_page(param->file, ¶m->stream, - block, offset, param->originbuf); - - qemu_mutex_lock(&comp_done_lock); - param->done = true; - param->zero_page = zero_page; - qemu_cond_signal(&comp_done_cond); - qemu_mutex_unlock(&comp_done_lock); - - qemu_mutex_lock(¶m->mutex); - } else { - qemu_cond_wait(¶m->cond, ¶m->mutex); - } - } - qemu_mutex_unlock(¶m->mutex); - - return NULL; -} - -static void compress_threads_save_cleanup(void) -{ - int i, thread_count; - - if (!migrate_use_compression() || !comp_param) { - return; - } - - thread_count = migrate_compress_threads(); - for (i = 0; i < thread_count; i++) { - /* - * we use it as a indicator which shows if the thread is - * properly init'd or not - */ - if (!comp_param[i].file) { - break; - } - - qemu_mutex_lock(&comp_param[i].mutex); - comp_param[i].quit = true; - qemu_cond_signal(&comp_param[i].cond); - qemu_mutex_unlock(&comp_param[i].mutex); - - qemu_thread_join(compress_threads + i); - qemu_mutex_destroy(&comp_param[i].mutex); - qemu_cond_destroy(&comp_param[i].cond); - deflateEnd(&comp_param[i].stream); - g_free(comp_param[i].originbuf); - qemu_fclose(comp_param[i].file); - comp_param[i].file = NULL; - } - qemu_mutex_destroy(&comp_done_lock); - qemu_cond_destroy(&comp_done_cond); - g_free(compress_threads); - g_free(comp_param); - compress_threads = NULL; - comp_param = NULL; -} - -static int compress_threads_save_setup(void) -{ - int i, thread_count; - - if (!migrate_use_compression()) { - return 0; - } - thread_count = migrate_compress_threads(); - compress_threads = g_new0(QemuThread, thread_count); - comp_param = g_new0(CompressParam, thread_count); - qemu_cond_init(&comp_done_cond); - qemu_mutex_init(&comp_done_lock); - for (i = 0; i < thread_count; i++) { - comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE); - if (!comp_param[i].originbuf) { - goto exit; - } - - if (deflateInit(&comp_param[i].stream, - migrate_compress_level()) != Z_OK) { - g_free(comp_param[i].originbuf); - goto exit; - } - - /* comp_param[i].file is just used as a dummy buffer to save data, - * set its ops to empty. - */ - comp_param[i].file = qemu_file_new_output( - QIO_CHANNEL(qio_channel_null_new())); - comp_param[i].done = true; - comp_param[i].quit = false; - qemu_mutex_init(&comp_param[i].mutex); - qemu_cond_init(&comp_param[i].cond); - qemu_thread_create(compress_threads + i, "compress", - do_data_compress, comp_param + i, - QEMU_THREAD_JOINABLE); - } - return 0; - -exit: - compress_threads_save_cleanup(); - return -1; -} - /** * save_page_header: write page header to wire * @@ -688,12 +484,11 @@ exit: * @offset: offset inside the block for the page * in the lower bits, it contains flags */ -static size_t save_page_header(PageSearchStatus *pss, RAMBlock *block, - ram_addr_t offset) +static size_t save_page_header(PageSearchStatus *pss, QEMUFile *f, + RAMBlock *block, ram_addr_t offset) { size_t size, len; bool same_block = (block == pss->last_sent_block); - QEMUFile *f = pss->pss_channel; if (same_block) { offset |= RAM_SAVE_FLAG_CONTINUE; @@ -723,11 +518,10 @@ static size_t save_page_header(PageSearchStatus *pss, RAMBlock *block, static void mig_throttle_guest_down(uint64_t bytes_dirty_period, uint64_t bytes_dirty_threshold) { - MigrationState *s = migrate_get_current(); - uint64_t pct_initial = s->parameters.cpu_throttle_initial; - uint64_t pct_increment = s->parameters.cpu_throttle_increment; - bool pct_tailslow = s->parameters.cpu_throttle_tailslow; - int pct_max = s->parameters.max_cpu_throttle; + uint64_t pct_initial = migrate_cpu_throttle_initial(); + uint64_t pct_increment = migrate_cpu_throttle_increment(); + bool pct_tailslow = migrate_cpu_throttle_tailslow(); + int pct_max = migrate_max_cpu_throttle(); uint64_t throttle_now = cpu_throttle_get_percentage(); uint64_t cpu_now, cpu_ideal, throttle_inc; @@ -757,7 +551,7 @@ void mig_throttle_counter_reset(void) rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); rs->num_dirty_pages_period = 0; - rs->bytes_xfer_prev = stat64_get(&ram_atomic_counters.transferred); + rs->bytes_xfer_prev = stat64_get(&mig_stats.transferred); } /** @@ -777,7 +571,7 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) /* We don't care if this fails to allocate a new cache page * as long as it updated an old one */ cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page, - ram_counters.dirty_sync_count); + stat64_get(&mig_stats.dirty_sync_count)); } #define ENCODING_FLAG_XBZRLE 0x1 @@ -803,13 +597,13 @@ static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, int encoded_len = 0, bytes_xbzrle; uint8_t *prev_cached_page; QEMUFile *file = pss->pss_channel; + uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); - if (!cache_is_cached(XBZRLE.cache, current_addr, - ram_counters.dirty_sync_count)) { + if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) { xbzrle_counters.cache_miss++; if (!rs->last_stage) { if (cache_insert(XBZRLE.cache, current_addr, *current_data, - ram_counters.dirty_sync_count) == -1) { + generation) == -1) { return -1; } else { /* update *current_data when the page has been @@ -838,9 +632,9 @@ static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); /* XBZRLE encoding (if there is no overflow) */ - encoded_len = xbzrle_encode_buffer_func(prev_cached_page, XBZRLE.current_buf, - TARGET_PAGE_SIZE, XBZRLE.encoded_buf, - TARGET_PAGE_SIZE); + encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, + TARGET_PAGE_SIZE, XBZRLE.encoded_buf, + TARGET_PAGE_SIZE); /* * Update the cache contents, so that it corresponds to the data @@ -867,7 +661,7 @@ static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, } /* Send XBZRLE based compressed page */ - bytes_xbzrle = save_page_header(pss, block, + bytes_xbzrle = save_page_header(pss, pss->pss_channel, block, offset | RAM_SAVE_FLAG_XBZRLE); qemu_put_byte(file, ENCODING_FLAG_XBZRLE); qemu_put_be16(file, encoded_len); @@ -1131,8 +925,8 @@ uint64_t ram_pagesize_summary(void) uint64_t ram_get_total_transferred_pages(void) { - return stat64_get(&ram_atomic_counters.normal) + - stat64_get(&ram_atomic_counters.duplicate) + + return stat64_get(&mig_stats.normal_pages) + + stat64_get(&mig_stats.zero_pages) + compression_counters.pages + xbzrle_counters.pages; } @@ -1142,14 +936,15 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) double compressed_size; /* calculate period counters */ - ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000 - / (end_time - rs->time_last_bitmap_sync); + stat64_set(&mig_stats.dirty_pages_rate, + rs->num_dirty_pages_period * 1000 / + (end_time - rs->time_last_bitmap_sync)); if (!page_count) { return; } - if (migrate_use_xbzrle()) { + if (migrate_xbzrle()) { double encoded_size, unencoded_size; xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss - @@ -1167,7 +962,7 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) rs->xbzrle_bytes_prev = xbzrle_counters.bytes; } - if (migrate_use_compression()) { + if (migrate_compress()) { compression_counters.busy_rate = (double)(compression_counters.busy - rs->compress_thread_busy_prev) / page_count; rs->compress_thread_busy_prev = compression_counters.busy; @@ -1190,10 +985,9 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) static void migration_trigger_throttle(RAMState *rs) { - MigrationState *s = migrate_get_current(); - uint64_t threshold = s->parameters.throttle_trigger_threshold; + uint64_t threshold = migrate_throttle_trigger_threshold(); uint64_t bytes_xfer_period = - stat64_get(&ram_atomic_counters.transferred) - rs->bytes_xfer_prev; + stat64_get(&mig_stats.transferred) - rs->bytes_xfer_prev; uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; @@ -1217,26 +1011,26 @@ static void migration_trigger_throttle(RAMState *rs) } } -static void migration_bitmap_sync(RAMState *rs) +static void migration_bitmap_sync(RAMState *rs, bool last_stage) { RAMBlock *block; int64_t end_time; - ram_counters.dirty_sync_count++; + stat64_add(&mig_stats.dirty_sync_count, 1); if (!rs->time_last_bitmap_sync) { rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); } trace_migration_bitmap_sync_start(); - memory_global_dirty_log_sync(); + memory_global_dirty_log_sync(last_stage); qemu_mutex_lock(&rs->bitmap_mutex); WITH_RCU_READ_LOCK_GUARD() { RAMBLOCK_FOREACH_NOT_IGNORED(block) { ramblock_sync_dirty_bitmap(rs, block); } - ram_counters.remaining = ram_bytes_remaining(); + stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining()); } qemu_mutex_unlock(&rs->bitmap_mutex); @@ -1256,14 +1050,15 @@ static void migration_bitmap_sync(RAMState *rs) /* reset period counters */ rs->time_last_bitmap_sync = end_time; rs->num_dirty_pages_period = 0; - rs->bytes_xfer_prev = stat64_get(&ram_atomic_counters.transferred); + rs->bytes_xfer_prev = stat64_get(&mig_stats.transferred); } - if (migrate_use_events()) { - qapi_event_send_migration_pass(ram_counters.dirty_sync_count); + if (migrate_events()) { + uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); + qapi_event_send_migration_pass(generation); } } -static void migration_bitmap_sync_precopy(RAMState *rs) +static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage) { Error *local_err = NULL; @@ -1276,7 +1071,7 @@ static void migration_bitmap_sync_precopy(RAMState *rs) local_err = NULL; } - migration_bitmap_sync(rs); + migration_bitmap_sync(rs, last_stage); if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) { error_report_err(local_err); @@ -1302,15 +1097,14 @@ void ram_release_page(const char *rbname, uint64_t offset) * @block: block that contains the page we want to send * @offset: offset inside the block for the page */ -static int save_zero_page_to_file(PageSearchStatus *pss, +static int save_zero_page_to_file(PageSearchStatus *pss, QEMUFile *file, RAMBlock *block, ram_addr_t offset) { uint8_t *p = block->host + offset; - QEMUFile *file = pss->pss_channel; int len = 0; if (buffer_is_zero(p, TARGET_PAGE_SIZE)) { - len += save_page_header(pss, block, offset | RAM_SAVE_FLAG_ZERO); + len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO); qemu_put_byte(file, 0); len += 1; ram_release_page(block->idstr, offset); @@ -1327,13 +1121,13 @@ static int save_zero_page_to_file(PageSearchStatus *pss, * @block: block that contains the page we want to send * @offset: offset inside the block for the page */ -static int save_zero_page(PageSearchStatus *pss, RAMBlock *block, +static int save_zero_page(PageSearchStatus *pss, QEMUFile *f, RAMBlock *block, ram_addr_t offset) { - int len = save_zero_page_to_file(pss, block, offset); + int len = save_zero_page_to_file(pss, f, block, offset); if (len) { - stat64_add(&ram_atomic_counters.duplicate, 1); + stat64_add(&mig_stats.zero_pages, 1); ram_transferred_add(len); return 1; } @@ -1370,9 +1164,9 @@ static bool control_save_page(PageSearchStatus *pss, RAMBlock *block, } if (bytes_xmit > 0) { - stat64_add(&ram_atomic_counters.normal, 1); + stat64_add(&mig_stats.normal_pages, 1); } else if (bytes_xmit == 0) { - stat64_add(&ram_atomic_counters.duplicate, 1); + stat64_add(&mig_stats.zero_pages, 1); } return true; @@ -1394,7 +1188,7 @@ static int save_normal_page(PageSearchStatus *pss, RAMBlock *block, { QEMUFile *file = pss->pss_channel; - ram_transferred_add(save_page_header(pss, block, + ram_transferred_add(save_page_header(pss, pss->pss_channel, block, offset | RAM_SAVE_FLAG_PAGE)); if (async) { qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE, @@ -1404,7 +1198,7 @@ static int save_normal_page(PageSearchStatus *pss, RAMBlock *block, qemu_put_buffer(file, buf, TARGET_PAGE_SIZE); } ram_transferred_add(TARGET_PAGE_SIZE); - stat64_add(&ram_atomic_counters.normal, 1); + stat64_add(&mig_stats.normal_pages, 1); return 1; } @@ -1433,7 +1227,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss) trace_ram_save_page(block->idstr, (uint64_t)offset, p); XBZRLE_cache_lock(); - if (rs->xbzrle_enabled && !migration_in_postcopy()) { + if (rs->xbzrle_started && !migration_in_postcopy()) { pages = save_xbzrle_page(rs, pss, &p, current_addr, block, offset); if (!rs->last_stage) { @@ -1460,46 +1254,18 @@ static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block, if (multifd_queue_page(file, block, offset) < 0) { return -1; } - stat64_add(&ram_atomic_counters.normal, 1); + stat64_add(&mig_stats.normal_pages, 1); return 1; } -static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, - ram_addr_t offset, uint8_t *source_buf) -{ - RAMState *rs = ram_state; - PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; - uint8_t *p = block->host + offset; - int ret; - - if (save_zero_page_to_file(pss, block, offset)) { - return true; - } - - save_page_header(pss, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); - - /* - * copy it to a internal buffer to avoid it being modified by VM - * so that we can catch up the error during compression and - * decompression - */ - memcpy(source_buf, p, TARGET_PAGE_SIZE); - ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE); - if (ret < 0) { - qemu_file_set_error(migrate_get_current()->to_dst_file, ret); - error_report("compressed data failed!"); - } - return false; -} - static void update_compress_thread_counts(const CompressParam *param, int bytes_xmit) { ram_transferred_add(bytes_xmit); - if (param->zero_page) { - stat64_add(&ram_atomic_counters.duplicate, 1); + if (param->result == RES_ZEROPAGE) { + stat64_add(&mig_stats.zero_pages, 1); return; } @@ -1510,81 +1276,49 @@ update_compress_thread_counts(const CompressParam *param, int bytes_xmit) static bool save_page_use_compression(RAMState *rs); -static void flush_compressed_data(RAMState *rs) +static int send_queued_data(CompressParam *param) { + PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY]; MigrationState *ms = migrate_get_current(); - int idx, len, thread_count; + QEMUFile *file = ms->to_dst_file; + int len = 0; + RAMBlock *block = param->block; + ram_addr_t offset = param->offset; + + if (param->result == RES_NONE) { + return 0; + } + + assert(block == pss->last_sent_block); + + if (param->result == RES_ZEROPAGE) { + assert(qemu_file_buffer_empty(param->file)); + len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO); + qemu_put_byte(file, 0); + len += 1; + ram_release_page(block->idstr, offset); + } else if (param->result == RES_COMPRESS) { + assert(!qemu_file_buffer_empty(param->file)); + len += save_page_header(pss, file, block, + offset | RAM_SAVE_FLAG_COMPRESS_PAGE); + len += qemu_put_qemu_file(file, param->file); + } else { + abort(); + } + + update_compress_thread_counts(param, len); + + return len; +} + +static void ram_flush_compressed_data(RAMState *rs) +{ if (!save_page_use_compression(rs)) { return; } - thread_count = migrate_compress_threads(); - qemu_mutex_lock(&comp_done_lock); - for (idx = 0; idx < thread_count; idx++) { - while (!comp_param[idx].done) { - qemu_cond_wait(&comp_done_cond, &comp_done_lock); - } - } - qemu_mutex_unlock(&comp_done_lock); - - for (idx = 0; idx < thread_count; idx++) { - qemu_mutex_lock(&comp_param[idx].mutex); - if (!comp_param[idx].quit) { - len = qemu_put_qemu_file(ms->to_dst_file, comp_param[idx].file); - /* - * it's safe to fetch zero_page without holding comp_done_lock - * as there is no further request submitted to the thread, - * i.e, the thread should be waiting for a request at this point. - */ - update_compress_thread_counts(&comp_param[idx], len); - } - qemu_mutex_unlock(&comp_param[idx].mutex); - } -} - -static inline void set_compress_params(CompressParam *param, RAMBlock *block, - ram_addr_t offset) -{ - param->block = block; - param->offset = offset; -} - -static int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset) -{ - int idx, thread_count, bytes_xmit = -1, pages = -1; - bool wait = migrate_compress_wait_thread(); - MigrationState *ms = migrate_get_current(); - - thread_count = migrate_compress_threads(); - qemu_mutex_lock(&comp_done_lock); -retry: - for (idx = 0; idx < thread_count; idx++) { - if (comp_param[idx].done) { - comp_param[idx].done = false; - bytes_xmit = qemu_put_qemu_file(ms->to_dst_file, - comp_param[idx].file); - qemu_mutex_lock(&comp_param[idx].mutex); - set_compress_params(&comp_param[idx], block, offset); - qemu_cond_signal(&comp_param[idx].cond); - qemu_mutex_unlock(&comp_param[idx].mutex); - pages = 1; - update_compress_thread_counts(&comp_param[idx], bytes_xmit); - break; - } - } - - /* - * wait for the free thread if the user specifies 'compress-wait-thread', - * otherwise we will post the page out in the main thread as normal page. - */ - if (pages < 0 && wait) { - qemu_cond_wait(&comp_done_cond, &comp_done_lock); - goto retry; - } - qemu_mutex_unlock(&comp_done_lock); - - return pages; + flush_compressed_data(send_queued_data); } #define PAGE_ALL_CLEAN 0 @@ -1595,6 +1329,7 @@ retry: * associated with the search process. * * Returns: + * <0: An error happened * PAGE_ALL_CLEAN: no dirty page found, give up * PAGE_TRY_AGAIN: no dirty page found, retry for next block * PAGE_DIRTY_FOUND: dirty page found @@ -1622,6 +1357,15 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) pss->page = 0; pss->block = QLIST_NEXT_RCU(pss->block, next); if (!pss->block) { + if (!migrate_multifd_flush_after_each_section()) { + QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; + int ret = multifd_send_sync_main(f); + if (ret < 0) { + return ret; + } + qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); + qemu_fflush(f); + } /* * If memory migration starts over, we will meet a dirtied page * which may still exists in compression threads's ring, so we @@ -1631,15 +1375,15 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) * Also If xbzrle is on, stop using the data compression at this * point. In theory, xbzrle can do better than compression. */ - flush_compressed_data(rs); + ram_flush_compressed_data(rs); /* Hit the end of the list */ pss->block = QLIST_FIRST_RCU(&ram_list.blocks); /* Flag that we've looped */ pss->complete_round = true; /* After the first round, enable XBZRLE. */ - if (migrate_use_xbzrle()) { - rs->xbzrle_enabled = true; + if (migrate_xbzrle()) { + rs->xbzrle_started = true; } } /* Didn't find anything this time, but try again on the new block */ @@ -2182,7 +1926,7 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) RAMBlock *ramblock; RAMState *rs = ram_state; - ram_counters.postcopy_requests++; + stat64_add(&mig_stats.postcopy_requests, 1); RCU_READ_LOCK_GUARD(); if (!rbname) { @@ -2282,7 +2026,7 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) static bool save_page_use_compression(RAMState *rs) { - if (!migrate_use_compression()) { + if (!migrate_compress()) { return false; } @@ -2291,7 +2035,7 @@ static bool save_page_use_compression(RAMState *rs) * using the data compression. In theory, xbzrle can do better than * compression. */ - if (rs->xbzrle_enabled) { + if (rs->xbzrle_started) { return false; } @@ -2321,11 +2065,11 @@ static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, * much CPU resource. */ if (block != pss->last_sent_block) { - flush_compressed_data(rs); + ram_flush_compressed_data(rs); return false; } - if (compress_page_with_multi_thread(block, offset) > 0) { + if (compress_page_with_multi_thread(block, offset, send_queued_data) > 0) { return true; } @@ -2355,12 +2099,12 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) return 1; } - res = save_zero_page(pss, block, offset); + res = save_zero_page(pss, pss->pss_channel, block, offset); if (res > 0) { /* Must let xbzrle know, otherwise a previous (now 0'd) cached * page would be stale */ - if (rs->xbzrle_enabled) { + if (rs->xbzrle_started) { XBZRLE_cache_lock(); xbzrle_cache_zero_page(rs, block->offset + offset); XBZRLE_cache_unlock(); @@ -2374,7 +2118,7 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) * if host page size == guest page size the dest guest during run may * still see partially copied pages which is data corruption. */ - if (migrate_use_multifd() && !migration_in_postcopy()) { + if (migrate_multifd() && !migration_in_postcopy()) { return ram_save_multifd_page(pss->pss_channel, block, offset); } @@ -2614,6 +2358,9 @@ static int ram_find_and_save_block(RAMState *rs) break; } else if (res == PAGE_TRY_AGAIN) { continue; + } else if (res < 0) { + pages = res; + break; } } } @@ -2629,19 +2376,6 @@ static int ram_find_and_save_block(RAMState *rs) return pages; } -void acct_update_position(QEMUFile *f, size_t size, bool zero) -{ - uint64_t pages = size / TARGET_PAGE_SIZE; - - if (zero) { - stat64_add(&ram_atomic_counters.duplicate, pages); - } else { - stat64_add(&ram_atomic_counters.normal, pages); - ram_transferred_add(size); - qemu_file_credit_transfer(f, size); - } -} - static uint64_t ram_bytes_total_with_ignored(void) { RAMBlock *block; @@ -2751,7 +2485,7 @@ static void ram_state_reset(RAMState *rs) rs->last_seen_block = NULL; rs->last_page = 0; rs->last_version = ram_list.version; - rs->xbzrle_enabled = false; + rs->xbzrle_started = false; } #define MAX_WAIT 50 /* ms, half buffered_file limit */ @@ -2937,7 +2671,7 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms) RCU_READ_LOCK_GUARD(); /* This should be our last sync, the src is now paused */ - migration_bitmap_sync(rs); + migration_bitmap_sync(rs, false); /* Easiest way to make sure we don't resume in the middle of a host-page */ rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; @@ -2991,7 +2725,7 @@ static int xbzrle_init(void) { Error *local_err = NULL; - if (!migrate_use_xbzrle()) { + if (!migrate_xbzrle()) { return 0; } @@ -3128,7 +2862,7 @@ static void ram_init_bitmaps(RAMState *rs) /* We don't use dirty log with background snapshots */ if (!migrate_background_snapshot()) { memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); - migration_bitmap_sync_precopy(rs); + migration_bitmap_sync_precopy(rs, false); } } qemu_mutex_unlock_ramlist(); @@ -3295,11 +3029,15 @@ static int ram_save_setup(QEMUFile *f, void *opaque) migration_ops = g_malloc0(sizeof(MigrationOps)); migration_ops->ram_save_target_page = ram_save_target_page_legacy; - ret = multifd_send_sync_main(f); + ret = multifd_send_sync_main(f); if (ret < 0) { return ret; } + if (!migrate_multifd_flush_after_each_section()) { + qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); + } + qemu_put_be64(f, RAM_SAVE_FLAG_EOS); qemu_fflush(f); @@ -3350,7 +3088,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); i = 0; - while ((ret = qemu_file_rate_limit(f)) == 0 || + while ((ret = migration_rate_exceeded(f)) == 0 || postcopy_has_request(rs)) { int pages; @@ -3377,7 +3115,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) * page is sent in one chunk. */ if (migrate_postcopy_ram()) { - flush_compressed_data(rs); + ram_flush_compressed_data(rs); } /* @@ -3408,9 +3146,11 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) out: if (ret >= 0 && migration_is_setup_or_active(migrate_get_current()->state)) { - ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); - if (ret < 0) { - return ret; + if (migrate_multifd_flush_after_each_section()) { + ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); + if (ret < 0) { + return ret; + } } qemu_put_be64(f, RAM_SAVE_FLAG_EOS); @@ -3446,7 +3186,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) WITH_RCU_READ_LOCK_GUARD() { if (!migration_in_postcopy()) { - migration_bitmap_sync_precopy(rs); + migration_bitmap_sync_precopy(rs, true); } ram_control_before_iterate(f, RAM_CONTROL_FINISH); @@ -3470,7 +3210,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) } qemu_mutex_unlock(&rs->bitmap_mutex); - flush_compressed_data(rs); + ram_flush_compressed_data(rs); ram_control_after_iterate(f, RAM_CONTROL_FINISH); } @@ -3483,6 +3223,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque) return ret; } + if (!migrate_multifd_flush_after_each_section()) { + qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); + } qemu_put_be64(f, RAM_SAVE_FLAG_EOS); qemu_fflush(f); @@ -3508,15 +3251,16 @@ static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy, static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy, uint64_t *can_postcopy) { + MigrationState *s = migrate_get_current(); RAMState **temp = opaque; RAMState *rs = *temp; uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; - if (!migration_in_postcopy()) { + if (!migration_in_postcopy() && remaining_size < s->threshold_size) { qemu_mutex_lock_iothread(); WITH_RCU_READ_LOCK_GUARD() { - migration_bitmap_sync_precopy(rs); + migration_bitmap_sync_precopy(rs, false); } qemu_mutex_unlock_iothread(); remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; @@ -3636,6 +3380,18 @@ static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block, return ((uintptr_t)block->host + offset) & (block->page_size - 1); } +void colo_record_bitmap(RAMBlock *block, ram_addr_t *normal, uint32_t pages) +{ + qemu_mutex_lock(&ram_state->bitmap_mutex); + for (int i = 0; i < pages; i++) { + ram_addr_t offset = normal[i]; + ram_state->migration_dirty_pages += !test_and_set_bit( + offset >> TARGET_PAGE_BITS, + block->bmap); + } + qemu_mutex_unlock(&ram_state->bitmap_mutex); +} + static inline void *colo_cache_from_block_offset(RAMBlock *block, ram_addr_t offset, bool record_bitmap) { @@ -3653,9 +3409,8 @@ static inline void *colo_cache_from_block_offset(RAMBlock *block, * It help us to decide which pages in ram cache should be flushed * into VM's RAM later. */ - if (record_bitmap && - !test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) { - ram_state->migration_dirty_pages++; + if (record_bitmap) { + colo_record_bitmap(block, &offset, 1); } return block->colo_cache + offset; } @@ -3677,192 +3432,6 @@ void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) } } -/* return the size after decompression, or negative value on error */ -static int -qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len, - const uint8_t *source, size_t source_len) -{ - int err; - - err = inflateReset(stream); - if (err != Z_OK) { - return -1; - } - - stream->avail_in = source_len; - stream->next_in = (uint8_t *)source; - stream->avail_out = dest_len; - stream->next_out = dest; - - err = inflate(stream, Z_NO_FLUSH); - if (err != Z_STREAM_END) { - return -1; - } - - return stream->total_out; -} - -static void *do_data_decompress(void *opaque) -{ - DecompressParam *param = opaque; - unsigned long pagesize; - uint8_t *des; - int len, ret; - - qemu_mutex_lock(¶m->mutex); - while (!param->quit) { - if (param->des) { - des = param->des; - len = param->len; - param->des = 0; - qemu_mutex_unlock(¶m->mutex); - - pagesize = TARGET_PAGE_SIZE; - - ret = qemu_uncompress_data(¶m->stream, des, pagesize, - param->compbuf, len); - if (ret < 0 && migrate_get_current()->decompress_error_check) { - error_report("decompress data failed"); - qemu_file_set_error(decomp_file, ret); - } - - qemu_mutex_lock(&decomp_done_lock); - param->done = true; - qemu_cond_signal(&decomp_done_cond); - qemu_mutex_unlock(&decomp_done_lock); - - qemu_mutex_lock(¶m->mutex); - } else { - qemu_cond_wait(¶m->cond, ¶m->mutex); - } - } - qemu_mutex_unlock(¶m->mutex); - - return NULL; -} - -static int wait_for_decompress_done(void) -{ - int idx, thread_count; - - if (!migrate_use_compression()) { - return 0; - } - - thread_count = migrate_decompress_threads(); - qemu_mutex_lock(&decomp_done_lock); - for (idx = 0; idx < thread_count; idx++) { - while (!decomp_param[idx].done) { - qemu_cond_wait(&decomp_done_cond, &decomp_done_lock); - } - } - qemu_mutex_unlock(&decomp_done_lock); - return qemu_file_get_error(decomp_file); -} - -static void compress_threads_load_cleanup(void) -{ - int i, thread_count; - - if (!migrate_use_compression()) { - return; - } - thread_count = migrate_decompress_threads(); - for (i = 0; i < thread_count; i++) { - /* - * we use it as a indicator which shows if the thread is - * properly init'd or not - */ - if (!decomp_param[i].compbuf) { - break; - } - - qemu_mutex_lock(&decomp_param[i].mutex); - decomp_param[i].quit = true; - qemu_cond_signal(&decomp_param[i].cond); - qemu_mutex_unlock(&decomp_param[i].mutex); - } - for (i = 0; i < thread_count; i++) { - if (!decomp_param[i].compbuf) { - break; - } - - qemu_thread_join(decompress_threads + i); - qemu_mutex_destroy(&decomp_param[i].mutex); - qemu_cond_destroy(&decomp_param[i].cond); - inflateEnd(&decomp_param[i].stream); - g_free(decomp_param[i].compbuf); - decomp_param[i].compbuf = NULL; - } - g_free(decompress_threads); - g_free(decomp_param); - decompress_threads = NULL; - decomp_param = NULL; - decomp_file = NULL; -} - -static int compress_threads_load_setup(QEMUFile *f) -{ - int i, thread_count; - - if (!migrate_use_compression()) { - return 0; - } - - thread_count = migrate_decompress_threads(); - decompress_threads = g_new0(QemuThread, thread_count); - decomp_param = g_new0(DecompressParam, thread_count); - qemu_mutex_init(&decomp_done_lock); - qemu_cond_init(&decomp_done_cond); - decomp_file = f; - for (i = 0; i < thread_count; i++) { - if (inflateInit(&decomp_param[i].stream) != Z_OK) { - goto exit; - } - - decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE)); - qemu_mutex_init(&decomp_param[i].mutex); - qemu_cond_init(&decomp_param[i].cond); - decomp_param[i].done = true; - decomp_param[i].quit = false; - qemu_thread_create(decompress_threads + i, "decompress", - do_data_decompress, decomp_param + i, - QEMU_THREAD_JOINABLE); - } - return 0; -exit: - compress_threads_load_cleanup(); - return -1; -} - -static void decompress_data_with_multi_threads(QEMUFile *f, - void *host, int len) -{ - int idx, thread_count; - - thread_count = migrate_decompress_threads(); - QEMU_LOCK_GUARD(&decomp_done_lock); - while (true) { - for (idx = 0; idx < thread_count; idx++) { - if (decomp_param[idx].done) { - decomp_param[idx].done = false; - qemu_mutex_lock(&decomp_param[idx].mutex); - qemu_get_buffer(f, decomp_param[idx].compbuf, len); - decomp_param[idx].des = host; - decomp_param[idx].len = len; - qemu_cond_signal(&decomp_param[idx].cond); - qemu_mutex_unlock(&decomp_param[idx].mutex); - break; - } - } - if (idx < thread_count) { - break; - } else { - qemu_cond_wait(&decomp_done_cond, &decomp_done_lock); - } - } -} - static void colo_init_ram_state(void) { ram_state_init(&ram_state); @@ -3926,7 +3495,7 @@ void colo_incoming_start_dirty_log(void) qemu_mutex_lock_iothread(); qemu_mutex_lock_ramlist(); - memory_global_dirty_log_sync(); + memory_global_dirty_log_sync(false); WITH_RCU_READ_LOCK_GUARD() { RAMBLOCK_FOREACH_NOT_IGNORED(block) { ramblock_sync_dirty_bitmap(ram_state, block); @@ -3972,10 +3541,6 @@ void colo_release_ram_cache(void) */ static int ram_load_setup(QEMUFile *f, void *opaque) { - if (compress_threads_load_setup(f)) { - return -1; - } - xbzrle_load_setup(); ramblock_recv_map_init(); @@ -3991,7 +3556,6 @@ static int ram_load_cleanup(void *opaque) } xbzrle_load_cleanup(); - compress_threads_load_cleanup(); RAMBLOCK_FOREACH_NOT_IGNORED(rb) { g_free(rb->receivedmap); @@ -4163,10 +3727,14 @@ int ram_load_postcopy(QEMUFile *f, int channel) } decompress_data_with_multi_threads(f, page_buffer, len); break; - + case RAM_SAVE_FLAG_MULTIFD_FLUSH: + multifd_recv_sync_main(); + break; case RAM_SAVE_FLAG_EOS: /* normal exit */ - multifd_recv_sync_main(); + if (migrate_multifd_flush_after_each_section()) { + multifd_recv_sync_main(); + } break; default: error_report("Unknown combination of migration flags: 0x%x" @@ -4217,7 +3785,8 @@ void colo_flush_ram_cache(void) void *src_host; unsigned long offset = 0; - memory_global_dirty_log_sync(); + memory_global_dirty_log_sync(false); + qemu_mutex_lock(&ram_state->bitmap_mutex); WITH_RCU_READ_LOCK_GUARD() { RAMBLOCK_FOREACH_NOT_IGNORED(block) { ramblock_sync_dirty_bitmap(ram_state, block); @@ -4252,6 +3821,7 @@ void colo_flush_ram_cache(void) } } } + qemu_mutex_unlock(&ram_state->bitmap_mutex); trace_colo_flush_ram_cache_end(); } @@ -4271,7 +3841,7 @@ static int ram_load_precopy(QEMUFile *f) int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0; /* ADVISE is earlier, it shows the source has the postcopy capability on */ bool postcopy_advised = migration_incoming_postcopy_advised(); - if (!migrate_use_compression()) { + if (!migrate_compress()) { invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE; } @@ -4435,18 +4005,21 @@ static int ram_load_precopy(QEMUFile *f) break; } break; - case RAM_SAVE_FLAG_EOS: - /* normal exit */ + case RAM_SAVE_FLAG_MULTIFD_FLUSH: multifd_recv_sync_main(); break; - default: - if (flags & RAM_SAVE_FLAG_HOOK) { - ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL); - } else { - error_report("Unknown combination of migration flags: 0x%x", - flags); - ret = -EINVAL; + case RAM_SAVE_FLAG_EOS: + /* normal exit */ + if (migrate_multifd_flush_after_each_section()) { + multifd_recv_sync_main(); } + break; + case RAM_SAVE_FLAG_HOOK: + ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL); + break; + default: + error_report("Unknown combination of migration flags: 0x%x", flags); + ret = -EINVAL; } if (!ret) { ret = qemu_file_get_error(f); diff --git a/migration/ram.h b/migration/ram.h index 81cbb0947c..ea1f3c25b5 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -32,28 +32,7 @@ #include "qapi/qapi-types-migration.h" #include "exec/cpu-common.h" #include "io/channel.h" -#include "qemu/stats64.h" -/* - * These are the migration statistic counters that need to be updated using - * atomic ops (can be accessed by more than one thread). Here since we - * cannot modify MigrationStats directly to use Stat64 as it was defined in - * the QAPI scheme, we define an internal structure to hold them, and we - * propagate the real values when QMP queries happen. - * - * IOW, the corresponding fields within ram_counters on these specific - * fields will be always zero and not being used at all; they're just - * placeholders to make it QAPI-compatible. - */ -typedef struct { - Stat64 transferred; - Stat64 duplicate; - Stat64 normal; - Stat64 postcopy_bytes; -} MigrationAtomicStats; - -extern MigrationAtomicStats ram_atomic_counters; -extern MigrationStats ram_counters; extern XBZRLECacheStats xbzrle_counters; extern CompressionStats compression_counters; @@ -74,7 +53,6 @@ void mig_throttle_counter_reset(void); uint64_t ram_pagesize_summary(void); int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len); -void acct_update_position(QEMUFile *f, size_t size, bool zero); void ram_postcopy_migrated_memory_release(MigrationState *ms); /* For outgoing discard bitmap */ void ram_postcopy_send_discard_bitmap(MigrationState *ms); @@ -104,6 +82,7 @@ int colo_init_ram_cache(void); void colo_flush_ram_cache(void); void colo_release_ram_cache(void); void colo_incoming_start_dirty_log(void); +void colo_record_bitmap(RAMBlock *block, ram_addr_t *normal, uint32_t pages); /* Background snapshot */ bool ram_write_tracking_available(void); @@ -112,6 +91,4 @@ void ram_write_tracking_prepare(void); int ram_write_tracking_start(void); void ram_write_tracking_stop(void); -void dirty_sync_missed_zero_copy(void); - #endif diff --git a/migration/rdma.c b/migration/rdma.c index 288eadc2d2..dd1c039e6c 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -17,8 +17,10 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/cutils.h" +#include "exec/target_page.h" #include "rdma.h" #include "migration.h" +#include "migration-stats.h" #include "qemu-file.h" #include "ram.h" #include "qemu/error-report.h" @@ -35,6 +37,7 @@ #include #include "trace.h" #include "qom/object.h" +#include "options.h" #include /* @@ -2119,7 +2122,8 @@ retry: return -EIO; } - acct_update_position(f, sge.length, true); + stat64_add(&mig_stats.zero_pages, + sge.length / qemu_target_page_size()); return 1; } @@ -2227,7 +2231,9 @@ retry: } set_bit(chunk, block->transit_bitmap); - acct_update_position(f, sge.length, false); + stat64_add(&mig_stats.normal_pages, sge.length / qemu_target_page_size()); + ram_transferred_add(sge.length); + qemu_file_credit_transfer(f, sge.length); rdma->total_writes++; return 0; @@ -3104,15 +3110,15 @@ static void qio_channel_rdma_set_aio_fd_handler(QIOChannel *ioc, { QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc); if (io_read) { - aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd, - false, io_read, io_write, NULL, NULL, opaque); - aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd, - false, io_read, io_write, NULL, NULL, opaque); + aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd, io_read, + io_write, NULL, NULL, opaque); + aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd, io_read, + io_write, NULL, NULL, opaque); } else { - aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd, - false, io_read, io_write, NULL, NULL, opaque); - aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd, - false, io_read, io_write, NULL, NULL, opaque); + aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd, io_read, + io_write, NULL, NULL, opaque); + aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd, io_read, + io_write, NULL, NULL, opaque); } } @@ -3228,6 +3234,10 @@ static size_t qemu_rdma_save_page(QEMUFile *f, RDMAContext *rdma; int ret; + if (migration_in_postcopy()) { + return RAM_SAVE_CONTROL_NOT_SUPP; + } + RCU_READ_LOCK_GUARD(); rdma = qatomic_rcu_read(&rioc->rdmaout); @@ -3237,10 +3247,6 @@ static size_t qemu_rdma_save_page(QEMUFile *f, CHECK_ERROR_STATE(); - if (migration_in_postcopy()) { - return RAM_SAVE_CONTROL_NOT_SUPP; - } - qemu_fflush(f); /* @@ -3336,9 +3342,8 @@ static void rdma_cm_poll_handler(void *opaque) } } rdma_ack_cm_event(cm_event); - - if (mis->migration_incoming_co) { - qemu_coroutine_enter(mis->migration_incoming_co); + if (mis->loadvm_co) { + qemu_coroutine_enter(mis->loadvm_co); } return; } @@ -3373,7 +3378,8 @@ static int qemu_rdma_accept(RDMAContext *rdma) * initialize the RDMAContext for return path for postcopy after first * connection request reached. */ - if (migrate_postcopy() && !rdma->is_return_path) { + if ((migrate_postcopy() || migrate_return_path()) + && !rdma->is_return_path) { rdma_return_path = qemu_rdma_data_init(rdma->host_port, NULL); if (rdma_return_path == NULL) { rdma_ack_cm_event(cm_event); @@ -3455,7 +3461,8 @@ static int qemu_rdma_accept(RDMAContext *rdma) } /* Accept the second connection request for return path */ - if (migrate_postcopy() && !rdma->is_return_path) { + if ((migrate_postcopy() || migrate_return_path()) + && !rdma->is_return_path) { qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration, NULL, (void *)(intptr_t)rdma->return_path); @@ -3519,7 +3526,7 @@ static int dest_ram_sort_func(const void *a, const void *b) * * Keep doing this until the source tells us to stop. */ -static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque) +static int qemu_rdma_registration_handle(QEMUFile *f) { RDMAControlHeader reg_resp = { .len = sizeof(RDMARegisterResult), .type = RDMA_CONTROL_REGISTER_RESULT, @@ -3531,7 +3538,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque) }; RDMAControlHeader blocks = { .type = RDMA_CONTROL_RAM_BLOCKS_RESULT, .repeat = 1 }; - QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(opaque); + QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f)); RDMAContext *rdma; RDMALocalBlocks *local; RDMAControlHeader head; @@ -3803,9 +3810,10 @@ out: * the source. */ static int -rdma_block_notification_handle(QIOChannelRDMA *rioc, const char *name) +rdma_block_notification_handle(QEMUFile *f, const char *name) { RDMAContext *rdma; + QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f)); int curr; int found = -1; @@ -3838,13 +3846,12 @@ rdma_block_notification_handle(QIOChannelRDMA *rioc, const char *name) static int rdma_load_hook(QEMUFile *f, uint64_t flags, void *data) { - QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f)); switch (flags) { case RAM_CONTROL_BLOCK_REG: - return rdma_block_notification_handle(rioc, data); + return rdma_block_notification_handle(f, data); case RAM_CONTROL_HOOK: - return qemu_rdma_registration_handle(f, rioc); + return qemu_rdma_registration_handle(f); default: /* Shouldn't be called with any other values */ @@ -3858,6 +3865,10 @@ static int qemu_rdma_registration_start(QEMUFile *f, QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f)); RDMAContext *rdma; + if (migration_in_postcopy()) { + return 0; + } + RCU_READ_LOCK_GUARD(); rdma = qatomic_rcu_read(&rioc->rdmaout); if (!rdma) { @@ -3866,10 +3877,6 @@ static int qemu_rdma_registration_start(QEMUFile *f, CHECK_ERROR_STATE(); - if (migration_in_postcopy()) { - return 0; - } - trace_qemu_rdma_registration_start(flags); qemu_put_be64(f, RAM_SAVE_FLAG_HOOK); qemu_fflush(f); @@ -3889,6 +3896,10 @@ static int qemu_rdma_registration_stop(QEMUFile *f, RDMAControlHeader head = { .len = 0, .repeat = 1 }; int ret = 0; + if (migration_in_postcopy()) { + return 0; + } + RCU_READ_LOCK_GUARD(); rdma = qatomic_rcu_read(&rioc->rdmaout); if (!rdma) { @@ -3897,10 +3908,6 @@ static int qemu_rdma_registration_stop(QEMUFile *f, CHECK_ERROR_STATE(); - if (migration_in_postcopy()) { - return 0; - } - qemu_fflush(f); ret = qemu_rdma_drain_cq(f, rdma); @@ -4109,7 +4116,7 @@ static void rdma_accept_incoming_migration(void *opaque) void rdma_start_incoming_migration(const char *host_port, Error **errp) { int ret; - RDMAContext *rdma, *rdma_return_path = NULL; + RDMAContext *rdma; Error *local_err = NULL; trace_rdma_start_incoming_migration(); @@ -4155,7 +4162,6 @@ err: g_free(rdma->host_port); } g_free(rdma); - g_free(rdma_return_path); } void rdma_start_outgoing_migration(void *opaque, @@ -4177,8 +4183,7 @@ void rdma_start_outgoing_migration(void *opaque, goto err; } - ret = qemu_rdma_source_init(rdma, - s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL], errp); + ret = qemu_rdma_source_init(rdma, migrate_rdma_pin_all(), errp); if (ret) { goto err; @@ -4192,7 +4197,7 @@ void rdma_start_outgoing_migration(void *opaque, } /* RDMA postcopy need a separate queue pair for return path */ - if (migrate_postcopy()) { + if (migrate_postcopy() || migrate_return_path()) { rdma_return_path = qemu_rdma_data_init(host_port, errp); if (rdma_return_path == NULL) { @@ -4200,7 +4205,7 @@ void rdma_start_outgoing_migration(void *opaque, } ret = qemu_rdma_source_init(rdma_return_path, - s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL], errp); + migrate_rdma_pin_all(), errp); if (ret) { goto return_path_err; diff --git a/migration/savevm.c b/migration/savevm.c index f2f345a391..c2b4c81cd2 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -31,6 +31,7 @@ #include "net/net.h" #include "migration.h" #include "migration/snapshot.h" +#include "migration-stats.h" #include "migration/vmstate.h" #include "migration/misc.h" #include "migration/register.h" @@ -67,6 +68,7 @@ #include "qemu/yank.h" #include "yank_functions.h" #include "sysemu/qtest.h" +#include "options.h" const unsigned int postcopy_ram_discard_version; @@ -253,7 +255,7 @@ static uint32_t get_validatable_capabilities_count(void) uint32_t result = 0; int i; for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { - if (should_validate_capability(i) && s->enabled_capabilities[i]) { + if (should_validate_capability(i) && s->capabilities[i]) { result++; } } @@ -275,7 +277,7 @@ static int configuration_pre_save(void *opaque) state->capabilities = g_renew(MigrationCapability, state->capabilities, state->caps_count); for (i = j = 0; i < MIGRATION_CAPABILITY__MAX; i++) { - if (should_validate_capability(i) && s->enabled_capabilities[i]) { + if (should_validate_capability(i) && s->capabilities[i]) { state->capabilities[j++] = i; } } @@ -325,7 +327,7 @@ static bool configuration_validate_capabilities(SaveState *state) continue; } source_state = test_bit(i, source_caps_bm); - target_state = s->enabled_capabilities[i]; + target_state = s->capabilities[i]; if (source_state != target_state) { error_report("Capability %s is %s, but received capability is %s", MigrationCapability_str(i), @@ -535,6 +537,9 @@ static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field, field->version_id); fprintf(out_file, "%*s\"field_exists\": %s,\n", indent, "", field->field_exists ? "true" : "false"); + if (field->flags & VMS_ARRAY) { + fprintf(out_file, "%*s\"num\": %d,\n", indent, "", field->num); + } fprintf(out_file, "%*s\"size\": %zu", indent, "", field->size); if (field->vmsd != NULL) { fprintf(out_file, ",\n"); @@ -922,11 +927,9 @@ static int vmstate_load(QEMUFile *f, SaveStateEntry *se) static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, JSONWriter *vmdesc) { - int64_t old_offset, size; - - old_offset = qemu_file_total_transferred_fast(f); + uint64_t old_offset = qemu_file_transferred_fast(f); se->ops->save_state(f, se->opaque); - size = qemu_file_total_transferred_fast(f) - old_offset; + uint64_t size = qemu_file_transferred_fast(f) - old_offset; if (vmdesc) { json_writer_int64(vmdesc, "size", size); @@ -1343,7 +1346,7 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) !(se->ops->has_postcopy && se->ops->has_postcopy(se->opaque))) { continue; } - if (qemu_file_rate_limit(f)) { + if (migration_rate_exceeded(f)) { return 0; } trace_savevm_section_start(se->idstr, se->section_id); @@ -1619,13 +1622,13 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp) return -EINVAL; } - if (migrate_use_block()) { + if (migrate_block()) { error_setg(errp, "Block migration and snapshots are incompatible"); return -EINVAL; } migrate_init(ms); - memset(&ram_counters, 0, sizeof(ram_counters)); + memset(&mig_stats, 0, sizeof(mig_stats)); memset(&compression_counters, 0, sizeof(compression_counters)); ms->to_dst_file = f; @@ -1760,7 +1763,8 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, return -EINVAL; } - if (!postcopy_ram_supported_by_host(mis)) { + if (!postcopy_ram_supported_by_host(mis, &local_err)) { + error_report_err(local_err); postcopy_state_set(POSTCOPY_INCOMING_NONE); return -1; } @@ -2960,7 +2964,7 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, goto the_end; } ret = qemu_savevm_state(f, errp); - vm_state_size = qemu_file_total_transferred(f); + vm_state_size = qemu_file_transferred(f); ret2 = qemu_fclose(f); if (ret < 0) { goto the_end; diff --git a/migration/socket.c b/migration/socket.c index e6fdf3c5e1..1b6f5baefb 100644 --- a/migration/socket.c +++ b/migration/socket.c @@ -27,6 +27,7 @@ #include "io/net-listener.h" #include "trace.h" #include "postcopy-ram.h" +#include "options.h" struct SocketOutgoingArgs { SocketAddress *saddr; @@ -97,7 +98,7 @@ static void socket_outgoing_migration(QIOTask *task, trace_migration_socket_outgoing_connected(data->hostname); - if (migrate_use_zero_copy_send() && + if (migrate_zero_copy_send() && !qio_channel_has_feature(sioc, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY)) { error_setg(&err, "Zero copy send feature not detected in host kernel"); } @@ -182,7 +183,7 @@ socket_start_incoming_migration_internal(SocketAddress *saddr, qio_net_listener_set_name(listener, "migration-socket-listener"); - if (migrate_use_multifd()) { + if (migrate_multifd()) { num = migrate_multifd_channels(); } else if (migrate_postcopy_preempt()) { num = RAM_CHANNEL_MAX; diff --git a/migration/target.c b/migration/target.c index 907ebf0a0a..00ca007f97 100644 --- a/migration/target.c +++ b/migration/target.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "qapi/qapi-types-migration.h" #include "migration.h" +#include CONFIG_DEVICES #ifdef CONFIG_VFIO #include "hw/vfio/vfio-common.h" @@ -17,7 +18,6 @@ void populate_vfio_info(MigrationInfo *info) { #ifdef CONFIG_VFIO if (vfio_mig_active()) { - info->has_vfio = true; info->vfio = g_malloc0(sizeof(*info->vfio)); info->vfio->transferred = vfio_mig_bytes_transferred(); } diff --git a/migration/tls.c b/migration/tls.c index 4d2166a209..fa03d9136c 100644 --- a/migration/tls.c +++ b/migration/tls.c @@ -22,31 +22,29 @@ #include "channel.h" #include "migration.h" #include "tls.h" +#include "options.h" #include "crypto/tlscreds.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "trace.h" static QCryptoTLSCreds * -migration_tls_get_creds(MigrationState *s, - QCryptoTLSCredsEndpoint endpoint, - Error **errp) +migration_tls_get_creds(QCryptoTLSCredsEndpoint endpoint, Error **errp) { Object *creds; + const char *tls_creds = migrate_tls_creds(); QCryptoTLSCreds *ret; - creds = object_resolve_path_component( - object_get_objects_root(), s->parameters.tls_creds); + creds = object_resolve_path_component(object_get_objects_root(), tls_creds); if (!creds) { - error_setg(errp, "No TLS credentials with id '%s'", - s->parameters.tls_creds); + error_setg(errp, "No TLS credentials with id '%s'", tls_creds); return NULL; } ret = (QCryptoTLSCreds *)object_dynamic_cast( creds, TYPE_QCRYPTO_TLS_CREDS); if (!ret) { error_setg(errp, "Object with id '%s' is not TLS credentials", - s->parameters.tls_creds); + tls_creds); return NULL; } if (!qcrypto_tls_creds_check_endpoint(ret, endpoint, errp)) { @@ -80,16 +78,12 @@ void migration_tls_channel_process_incoming(MigrationState *s, QCryptoTLSCreds *creds; QIOChannelTLS *tioc; - creds = migration_tls_get_creds( - s, QCRYPTO_TLS_CREDS_ENDPOINT_SERVER, errp); + creds = migration_tls_get_creds(QCRYPTO_TLS_CREDS_ENDPOINT_SERVER, errp); if (!creds) { return; } - tioc = qio_channel_tls_new_server( - ioc, creds, - s->parameters.tls_authz, - errp); + tioc = qio_channel_tls_new_server(ioc, creds, migrate_tls_authz(), errp); if (!tioc) { return; } @@ -120,21 +114,20 @@ static void migration_tls_outgoing_handshake(QIOTask *task, object_unref(OBJECT(ioc)); } -QIOChannelTLS *migration_tls_client_create(MigrationState *s, - QIOChannel *ioc, +QIOChannelTLS *migration_tls_client_create(QIOChannel *ioc, const char *hostname, Error **errp) { QCryptoTLSCreds *creds; - creds = migration_tls_get_creds( - s, QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT, errp); + creds = migration_tls_get_creds(QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT, errp); if (!creds) { return NULL; } - if (s->parameters.tls_hostname && *s->parameters.tls_hostname) { - hostname = s->parameters.tls_hostname; + const char *tls_hostname = migrate_tls_hostname(); + if (tls_hostname && *tls_hostname) { + hostname = tls_hostname; } return qio_channel_tls_new_client(ioc, creds, hostname, errp); @@ -147,7 +140,7 @@ void migration_tls_channel_connect(MigrationState *s, { QIOChannelTLS *tioc; - tioc = migration_tls_client_create(s, ioc, hostname, errp); + tioc = migration_tls_client_create(ioc, hostname, errp); if (!tioc) { return; } @@ -165,7 +158,7 @@ void migration_tls_channel_connect(MigrationState *s, bool migrate_channel_requires_tls_upgrade(QIOChannel *ioc) { - if (!migrate_use_tls()) { + if (!migrate_tls()) { return false; } diff --git a/migration/tls.h b/migration/tls.h index 98e23c9b0e..5797d153cb 100644 --- a/migration/tls.h +++ b/migration/tls.h @@ -28,8 +28,7 @@ void migration_tls_channel_process_incoming(MigrationState *s, QIOChannel *ioc, Error **errp); -QIOChannelTLS *migration_tls_client_create(MigrationState *s, - QIOChannel *ioc, +QIOChannelTLS *migration_tls_client_create(QIOChannel *ioc, const char *hostname, Error **errp); diff --git a/migration/trace-events b/migration/trace-events index 92161eeac5..cdaef7a1ea 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -186,6 +186,9 @@ process_incoming_migration_co_end(int ret, int ps) "ret=%d postcopy-state=%d" process_incoming_migration_co_postcopy_end_main(void) "" postcopy_preempt_enabled(bool value) "%d" +# migration-stats +migration_transferred_bytes(uint64_t qemu_file, uint64_t multifd) "qemu_file %" PRIu64 " multifd %" PRIu64 + # channel.c migration_set_incoming_channel(void *ioc, const char *ioctype) "ioc=%p ioctype=%s" migration_set_outgoing_channel(void *ioc, const char *ioctype, const char *hostname, void *err) "ioc=%p ioctype=%s hostname=%s err=%p" @@ -342,8 +345,8 @@ dirty_bitmap_load_success(void) "" # dirtyrate.c dirtyrate_set_state(const char *new_state) "new state %s" query_dirty_rate_info(const char *new_state) "current state %s" -get_ramblock_vfn_hash(const char *idstr, uint64_t vfn, uint32_t crc) "ramblock name: %s, vfn: %"PRIu64 ", crc: %" PRIu32 -calc_page_dirty_rate(const char *idstr, uint32_t new_crc, uint32_t old_crc) "ramblock name: %s, new crc: %" PRIu32 ", old crc: %" PRIu32 +get_ramblock_vfn_hash(const char *idstr, uint64_t vfn, uint32_t hash) "ramblock name: %s, vfn: %"PRIu64 ", hash: %" PRIu32 +calc_page_dirty_rate(const char *idstr, uint32_t new_hash, uint32_t old_hash) "ramblock name: %s, new hash: %" PRIu32 ", old hash: %" PRIu32 skip_sample_ramblock(const char *idstr, uint64_t ramblock_size) "ramblock name: %s, ramblock size: %" PRIu64 find_page_matched(const char *idstr) "ramblock %s addr or size changed" dirtyrate_calculate(int64_t dirtyrate) "dirty rate: %" PRIi64 " MB/s" diff --git a/migration/vmstate.c b/migration/vmstate.c index 83ca4c7d3e..af01d54b6f 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -349,7 +349,7 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, void *first_elem = opaque + field->offset; int i, n_elems = vmstate_n_elems(opaque, field); int size = vmstate_size(opaque, field); - int64_t old_offset, written_bytes; + uint64_t old_offset, written_bytes; JSONWriter *vmdesc_loop = vmdesc; trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems); @@ -361,7 +361,7 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, void *curr_elem = first_elem + size * i; vmsd_desc_field_start(vmsd, vmdesc_loop, field, i, n_elems); - old_offset = qemu_file_total_transferred_fast(f); + old_offset = qemu_file_transferred_fast(f); if (field->flags & VMS_ARRAY_OF_POINTER) { assert(curr_elem); curr_elem = *(void **)curr_elem; @@ -391,8 +391,7 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, return ret; } - written_bytes = qemu_file_total_transferred_fast(f) - - old_offset; + written_bytes = qemu_file_transferred_fast(f) - old_offset; vmsd_desc_field_end(vmsd, vmdesc_loop, field, written_bytes, i); /* Compressed arrays only care about the first element */ diff --git a/migration/xbzrle.c b/migration/xbzrle.c index 05366e86c0..3eddcf249b 100644 --- a/migration/xbzrle.c +++ b/migration/xbzrle.c @@ -12,8 +12,155 @@ */ #include "qemu/osdep.h" #include "qemu/cutils.h" +#include "qemu/host-utils.h" #include "xbzrle.h" +#if defined(CONFIG_AVX512BW_OPT) +#include +#include "host/cpuinfo.h" + +static int __attribute__((target("avx512bw"))) +xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, + uint8_t *dst, int dlen) +{ + uint32_t zrun_len = 0, nzrun_len = 0; + int d = 0, i = 0, num = 0; + uint8_t *nzrun_start = NULL; + /* add 1 to include residual part in main loop */ + uint32_t count512s = (slen >> 6) + 1; + /* countResidual is tail of data, i.e., countResidual = slen % 64 */ + uint32_t count_residual = slen & 0b111111; + bool never_same = true; + uint64_t mask_residual = 1; + mask_residual <<= count_residual; + mask_residual -= 1; + __m512i r = _mm512_set1_epi32(0); + + while (count512s) { + int bytes_to_check = 64; + uint64_t mask = 0xffffffffffffffff; + if (count512s == 1) { + bytes_to_check = count_residual; + mask = mask_residual; + } + __m512i old_data = _mm512_mask_loadu_epi8(r, + mask, old_buf + i); + __m512i new_data = _mm512_mask_loadu_epi8(r, + mask, new_buf + i); + uint64_t comp = _mm512_cmpeq_epi8_mask(old_data, new_data); + count512s--; + + bool is_same = (comp & 0x1); + while (bytes_to_check) { + if (d + 2 > dlen) { + return -1; + } + if (is_same) { + if (nzrun_len) { + d += uleb128_encode_small(dst + d, nzrun_len); + if (d + nzrun_len > dlen) { + return -1; + } + nzrun_start = new_buf + i - nzrun_len; + memcpy(dst + d, nzrun_start, nzrun_len); + d += nzrun_len; + nzrun_len = 0; + } + /* 64 data at a time for speed */ + if (count512s && (comp == 0xffffffffffffffff)) { + i += 64; + zrun_len += 64; + break; + } + never_same = false; + num = ctz64(~comp); + num = (num < bytes_to_check) ? num : bytes_to_check; + zrun_len += num; + bytes_to_check -= num; + comp >>= num; + i += num; + if (bytes_to_check) { + /* still has different data after same data */ + d += uleb128_encode_small(dst + d, zrun_len); + zrun_len = 0; + } else { + break; + } + } + if (never_same || zrun_len) { + /* + * never_same only acts if + * data begins with diff in first count512s + */ + d += uleb128_encode_small(dst + d, zrun_len); + zrun_len = 0; + never_same = false; + } + /* has diff, 64 data at a time for speed */ + if ((bytes_to_check == 64) && (comp == 0x0)) { + i += 64; + nzrun_len += 64; + break; + } + num = ctz64(comp); + num = (num < bytes_to_check) ? num : bytes_to_check; + nzrun_len += num; + bytes_to_check -= num; + comp >>= num; + i += num; + if (bytes_to_check) { + /* mask like 111000 */ + d += uleb128_encode_small(dst + d, nzrun_len); + /* overflow */ + if (d + nzrun_len > dlen) { + return -1; + } + nzrun_start = new_buf + i - nzrun_len; + memcpy(dst + d, nzrun_start, nzrun_len); + d += nzrun_len; + nzrun_len = 0; + is_same = true; + } + } + } + + if (nzrun_len != 0) { + d += uleb128_encode_small(dst + d, nzrun_len); + /* overflow */ + if (d + nzrun_len > dlen) { + return -1; + } + nzrun_start = new_buf + i - nzrun_len; + memcpy(dst + d, nzrun_start, nzrun_len); + d += nzrun_len; + } + return d; +} + +static int xbzrle_encode_buffer_int(uint8_t *old_buf, uint8_t *new_buf, + int slen, uint8_t *dst, int dlen); + +static int (*accel_func)(uint8_t *, uint8_t *, int, uint8_t *, int); + +static void __attribute__((constructor)) init_accel(void) +{ + unsigned info = cpuinfo_init(); + if (info & CPUINFO_AVX512BW) { + accel_func = xbzrle_encode_buffer_avx512; + } else { + accel_func = xbzrle_encode_buffer_int; + } +} + +int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen, + uint8_t *dst, int dlen) +{ + return accel_func(old_buf, new_buf, slen, dst, dlen); +} + +#define xbzrle_encode_buffer xbzrle_encode_buffer_int +#endif + /* page = zrun nzrun | zrun nzrun page @@ -174,127 +321,3 @@ int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t *dst, int dlen) return d; } - -#if defined(CONFIG_AVX512BW_OPT) -#pragma GCC push_options -#pragma GCC target("avx512bw") -#include -int xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, - uint8_t *dst, int dlen) -{ - uint32_t zrun_len = 0, nzrun_len = 0; - int d = 0, i = 0, num = 0; - uint8_t *nzrun_start = NULL; - /* add 1 to include residual part in main loop */ - uint32_t count512s = (slen >> 6) + 1; - /* countResidual is tail of data, i.e., countResidual = slen % 64 */ - uint32_t count_residual = slen & 0b111111; - bool never_same = true; - uint64_t mask_residual = 1; - mask_residual <<= count_residual; - mask_residual -= 1; - __m512i r = _mm512_set1_epi32(0); - - while (count512s) { - if (d + 2 > dlen) { - return -1; - } - - int bytes_to_check = 64; - uint64_t mask = 0xffffffffffffffff; - if (count512s == 1) { - bytes_to_check = count_residual; - mask = mask_residual; - } - __m512i old_data = _mm512_mask_loadu_epi8(r, - mask, old_buf + i); - __m512i new_data = _mm512_mask_loadu_epi8(r, - mask, new_buf + i); - uint64_t comp = _mm512_cmpeq_epi8_mask(old_data, new_data); - count512s--; - - bool is_same = (comp & 0x1); - while (bytes_to_check) { - if (is_same) { - if (nzrun_len) { - d += uleb128_encode_small(dst + d, nzrun_len); - if (d + nzrun_len > dlen) { - return -1; - } - nzrun_start = new_buf + i - nzrun_len; - memcpy(dst + d, nzrun_start, nzrun_len); - d += nzrun_len; - nzrun_len = 0; - } - /* 64 data at a time for speed */ - if (count512s && (comp == 0xffffffffffffffff)) { - i += 64; - zrun_len += 64; - break; - } - never_same = false; - num = __builtin_ctzll(~comp); - num = (num < bytes_to_check) ? num : bytes_to_check; - zrun_len += num; - bytes_to_check -= num; - comp >>= num; - i += num; - if (bytes_to_check) { - /* still has different data after same data */ - d += uleb128_encode_small(dst + d, zrun_len); - zrun_len = 0; - } else { - break; - } - } - if (never_same || zrun_len) { - /* - * never_same only acts if - * data begins with diff in first count512s - */ - d += uleb128_encode_small(dst + d, zrun_len); - zrun_len = 0; - never_same = false; - } - /* has diff, 64 data at a time for speed */ - if ((bytes_to_check == 64) && (comp == 0x0)) { - i += 64; - nzrun_len += 64; - break; - } - num = __builtin_ctzll(comp); - num = (num < bytes_to_check) ? num : bytes_to_check; - nzrun_len += num; - bytes_to_check -= num; - comp >>= num; - i += num; - if (bytes_to_check) { - /* mask like 111000 */ - d += uleb128_encode_small(dst + d, nzrun_len); - /* overflow */ - if (d + nzrun_len > dlen) { - return -1; - } - nzrun_start = new_buf + i - nzrun_len; - memcpy(dst + d, nzrun_start, nzrun_len); - d += nzrun_len; - nzrun_len = 0; - is_same = true; - } - } - } - - if (nzrun_len != 0) { - d += uleb128_encode_small(dst + d, nzrun_len); - /* overflow */ - if (d + nzrun_len > dlen) { - return -1; - } - nzrun_start = new_buf + i - nzrun_len; - memcpy(dst + d, nzrun_start, nzrun_len); - d += nzrun_len; - } - return d; -} -#pragma GCC pop_options -#endif diff --git a/migration/xbzrle.h b/migration/xbzrle.h index 6feb49160a..39e651b9ec 100644 --- a/migration/xbzrle.h +++ b/migration/xbzrle.h @@ -18,8 +18,5 @@ int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen, uint8_t *dst, int dlen); int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t *dst, int dlen); -#if defined(CONFIG_AVX512BW_OPT) -int xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, - uint8_t *dst, int dlen); -#endif + #endif diff --git a/monitor/fds.c b/monitor/fds.c index 26b39a0ce6..d86c2c674c 100644 --- a/monitor/fds.c +++ b/monitor/fds.c @@ -61,11 +61,48 @@ struct MonFdset { static QemuMutex mon_fdsets_lock; static QLIST_HEAD(, MonFdset) mon_fdsets; +static bool monitor_add_fd(Monitor *mon, int fd, const char *fdname, Error **errp) +{ + mon_fd_t *monfd; + + if (qemu_isdigit(fdname[0])) { + close(fd); + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdname", + "a name not starting with a digit"); + return false; + } + + /* See close() call below. */ + qemu_mutex_lock(&mon->mon_lock); + QLIST_FOREACH(monfd, &mon->fds, next) { + int tmp_fd; + + if (strcmp(monfd->name, fdname) != 0) { + continue; + } + + tmp_fd = monfd->fd; + monfd->fd = fd; + qemu_mutex_unlock(&mon->mon_lock); + /* Make sure close() is outside critical section */ + close(tmp_fd); + return true; + } + + monfd = g_new0(mon_fd_t, 1); + monfd->name = g_strdup(fdname); + monfd->fd = fd; + + QLIST_INSERT_HEAD(&mon->fds, monfd, next); + qemu_mutex_unlock(&mon->mon_lock); + return true; +} + +#ifdef CONFIG_POSIX void qmp_getfd(const char *fdname, Error **errp) { Monitor *cur_mon = monitor_cur(); - mon_fd_t *monfd; - int fd, tmp_fd; + int fd; fd = qemu_chr_fe_get_msgfd(&cur_mon->chr); if (fd == -1) { @@ -73,32 +110,9 @@ void qmp_getfd(const char *fdname, Error **errp) return; } - if (qemu_isdigit(fdname[0])) { - close(fd); - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdname", - "a name not starting with a digit"); - return; - } - - QEMU_LOCK_GUARD(&cur_mon->mon_lock); - QLIST_FOREACH(monfd, &cur_mon->fds, next) { - if (strcmp(monfd->name, fdname) != 0) { - continue; - } - - tmp_fd = monfd->fd; - monfd->fd = fd; - /* Make sure close() is outside critical section */ - close(tmp_fd); - return; - } - - monfd = g_new0(mon_fd_t, 1); - monfd->name = g_strdup(fdname); - monfd->fd = fd; - - QLIST_INSERT_HEAD(&cur_mon->fds, monfd, next); + monitor_add_fd(cur_mon, fd, fdname, errp); } +#endif void qmp_closefd(const char *fdname, Error **errp) { @@ -211,6 +225,41 @@ error: return NULL; } +#ifdef WIN32 +void qmp_get_win32_socket(const char *infos, const char *fdname, Error **errp) +{ + g_autofree WSAPROTOCOL_INFOW *info = NULL; + gsize len; + SOCKET sk; + int fd; + + info = (void *)g_base64_decode(infos, &len); + if (len != sizeof(*info)) { + error_setg(errp, "Invalid WSAPROTOCOL_INFOW value"); + return; + } + + sk = WSASocketW(FROM_PROTOCOL_INFO, + FROM_PROTOCOL_INFO, + FROM_PROTOCOL_INFO, + info, 0, 0); + if (sk == INVALID_SOCKET) { + error_setg_win32(errp, WSAGetLastError(), "Couldn't import socket"); + return; + } + + fd = _open_osfhandle(sk, _O_BINARY); + if (fd < 0) { + error_setg_errno(errp, errno, "Failed to associate a FD with the SOCKET"); + closesocket(sk); + return; + } + + monitor_add_fd(monitor_cur(), fd, fdname, errp); +} +#endif + + void qmp_remove_fd(int64_t fdset_id, bool has_fd, int64_t fd, Error **errp) { MonFdset *mon_fdset; diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c index 34bd8c67d7..6c559b48c8 100644 --- a/monitor/hmp-cmds.c +++ b/monitor/hmp-cmds.c @@ -192,6 +192,7 @@ void hmp_change(Monitor *mon, const QDict *qdict) hmp_handle_error(mon, err); } +#ifdef CONFIG_POSIX void hmp_getfd(Monitor *mon, const QDict *qdict) { const char *fdname = qdict_get_str(qdict, "fdname"); @@ -200,6 +201,7 @@ void hmp_getfd(Monitor *mon, const QDict *qdict) qmp_getfd(fdname, &err); hmp_handle_error(mon, err); } +#endif void hmp_closefd(Monitor *mon, const QDict *qdict) { diff --git a/monitor/hmp.c b/monitor/hmp.c index 2aa85d3982..69c1b7e98a 100644 --- a/monitor/hmp.c +++ b/monitor/hmp.c @@ -1167,7 +1167,7 @@ void handle_hmp_command(MonitorHMP *mon, const char *cmdline) Coroutine *co = qemu_coroutine_create(handle_hmp_command_co, &data); monitor_set_cur(co, &mon->common); aio_co_enter(qemu_get_aio_context(), co); - AIO_WAIT_WHILE(qemu_get_aio_context(), !data.done); + AIO_WAIT_WHILE_UNLOCKED(NULL, !data.done); } qobject_unref(qdict); @@ -1189,9 +1189,7 @@ static void cmd_completion(MonitorHMP *mon, const char *name, const char *list) } memcpy(cmd, pstart, len); cmd[len] = '\0'; - if (name[0] == '\0') { - readline_add_completion_of(mon->rs, name, cmd); - } + readline_add_completion_of(mon->rs, name, cmd); if (*p == '\0') { break; } @@ -1335,9 +1333,7 @@ static void monitor_find_completion_by_table(MonitorHMP *mon, /* block device name completion */ readline_set_completion_index(mon->rs, strlen(str)); while ((blk = blk_next(blk)) != NULL) { - if (str[0] == '\0') { - readline_add_completion_of(mon->rs, str, blk_name(blk)); - } + readline_add_completion_of(mon->rs, str, blk_name(blk)); } break; case 's': @@ -1405,45 +1401,42 @@ static void monitor_read(void *opaque, const uint8_t *buf, int size) static void monitor_event(void *opaque, QEMUChrEvent event) { Monitor *mon = opaque; - MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common); switch (event) { case CHR_EVENT_MUX_IN: qemu_mutex_lock(&mon->mon_lock); - mon->mux_out = 0; - qemu_mutex_unlock(&mon->mon_lock); - if (mon->reset_seen) { - readline_restart(hmp_mon->rs); + if (mon->mux_out) { + mon->mux_out = 0; monitor_resume(mon); - monitor_flush(mon); - } else { - qatomic_mb_set(&mon->suspend_cnt, 0); } + qemu_mutex_unlock(&mon->mon_lock); break; case CHR_EVENT_MUX_OUT: - if (mon->reset_seen) { - if (qatomic_mb_read(&mon->suspend_cnt) == 0) { - monitor_printf(mon, "\n"); - } - monitor_flush(mon); - monitor_suspend(mon); - } else { - qatomic_inc(&mon->suspend_cnt); - } qemu_mutex_lock(&mon->mon_lock); - mon->mux_out = 1; + if (!mon->mux_out) { + if (mon->reset_seen && !mon->suspend_cnt) { + monitor_puts_locked(mon, "\n"); + } else { + monitor_flush_locked(mon); + } + monitor_suspend(mon); + mon->mux_out = 1; + } qemu_mutex_unlock(&mon->mon_lock); break; case CHR_EVENT_OPENED: monitor_printf(mon, "QEMU %s monitor - type 'help' for more " "information\n", QEMU_VERSION); - if (!mon->mux_out) { - readline_restart(hmp_mon->rs); - readline_show_prompt(hmp_mon->rs); - } + qemu_mutex_lock(&mon->mon_lock); mon->reset_seen = 1; + if (!mon->mux_out) { + /* Suspend-resume forces the prompt to be printed. */ + monitor_suspend(mon); + monitor_resume(mon); + } + qemu_mutex_unlock(&mon->mon_lock); mon_refcount++; break; diff --git a/monitor/monitor-internal.h b/monitor/monitor-internal.h index 53e3808054..252de85681 100644 --- a/monitor/monitor-internal.h +++ b/monitor/monitor-internal.h @@ -94,7 +94,6 @@ typedef struct HMPCommand { struct Monitor { CharBackend chr; - int reset_seen; int suspend_cnt; /* Needs to be accessed atomically */ bool is_qmp; bool skip_flush; @@ -115,8 +114,8 @@ struct Monitor { QLIST_HEAD(, mon_fd_t) fds; GString *outbuf; guint out_watch; - /* Read under either BQL or mon_lock, written with BQL+mon_lock. */ int mux_out; + int reset_seen; }; struct MonitorHMP { @@ -166,7 +165,6 @@ typedef QTAILQ_HEAD(MonitorList, Monitor) MonitorList; extern IOThread *mon_iothread; extern Coroutine *qmp_dispatcher_co; extern bool qmp_dispatcher_co_shutdown; -extern bool qmp_dispatcher_co_busy; extern QmpCommandList qmp_commands, qmp_cap_negotiation_commands; extern QemuMutex monitor_lock; extern MonitorList mon_list; @@ -184,6 +182,7 @@ void monitor_fdsets_cleanup(void); void qmp_send_response(MonitorQMP *mon, const QDict *rsp); void monitor_data_destroy_qmp(MonitorQMP *mon); void coroutine_fn monitor_qmp_dispatcher_co(void *data); +void qmp_dispatcher_co_wake(void); int get_monitor_def(Monitor *mon, int64_t *pval, const char *name); void handle_hmp_command(MonitorHMP *mon, const char *cmdline); diff --git a/monitor/monitor.c b/monitor/monitor.c index 8dc96f6af9..dc352f9e9d 100644 --- a/monitor/monitor.c +++ b/monitor/monitor.c @@ -56,29 +56,11 @@ IOThread *mon_iothread; /* Coroutine to dispatch the requests received from I/O thread */ Coroutine *qmp_dispatcher_co; -/* Set to true when the dispatcher coroutine should terminate */ -bool qmp_dispatcher_co_shutdown; - /* - * qmp_dispatcher_co_busy is used for synchronisation between the - * monitor thread and the main thread to ensure that the dispatcher - * coroutine never gets scheduled a second time when it's already - * scheduled (scheduling the same coroutine twice is forbidden). - * - * It is true if the coroutine is active and processing requests. - * Additional requests may then be pushed onto mon->qmp_requests, - * and @qmp_dispatcher_co_shutdown may be set without further ado. - * @qmp_dispatcher_co_busy must not be woken up in this case. - * - * If false, you also have to set @qmp_dispatcher_co_busy to true and - * wake up @qmp_dispatcher_co after pushing the new requests. - * - * The coroutine will automatically change this variable back to false - * before it yields. Nobody else may set the variable to false. - * - * Access must be atomic for thread safety. + * Set to true when the dispatcher coroutine should terminate. Protected + * by monitor_lock. */ -bool qmp_dispatcher_co_busy; +bool qmp_dispatcher_co_shutdown; /* * Protects mon_list, monitor_qapi_event_state, coroutine_mon, @@ -154,22 +136,19 @@ static inline bool monitor_is_hmp_non_interactive(const Monitor *mon) return !monitor_uses_readline(container_of(mon, MonitorHMP, common)); } -static void monitor_flush_locked(Monitor *mon); - static gboolean monitor_unblocked(void *do_not_use, GIOCondition cond, void *opaque) { Monitor *mon = opaque; - qemu_mutex_lock(&mon->mon_lock); + QEMU_LOCK_GUARD(&mon->mon_lock); mon->out_watch = 0; monitor_flush_locked(mon); - qemu_mutex_unlock(&mon->mon_lock); return FALSE; } /* Caller must hold mon->mon_lock */ -static void monitor_flush_locked(Monitor *mon) +void monitor_flush_locked(Monitor *mon) { int rc; size_t len; @@ -203,18 +182,16 @@ static void monitor_flush_locked(Monitor *mon) void monitor_flush(Monitor *mon) { - qemu_mutex_lock(&mon->mon_lock); + QEMU_LOCK_GUARD(&mon->mon_lock); monitor_flush_locked(mon); - qemu_mutex_unlock(&mon->mon_lock); } /* flush at every end of line */ -int monitor_puts(Monitor *mon, const char *str) +int monitor_puts_locked(Monitor *mon, const char *str) { int i; char c; - qemu_mutex_lock(&mon->mon_lock); for (i = 0; str[i]; i++) { c = str[i]; if (c == '\n') { @@ -225,11 +202,16 @@ int monitor_puts(Monitor *mon, const char *str) monitor_flush_locked(mon); } } - qemu_mutex_unlock(&mon->mon_lock); return i; } +int monitor_puts(Monitor *mon, const char *str) +{ + QEMU_LOCK_GUARD(&mon->mon_lock); + return monitor_puts_locked(mon, str); +} + int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap) { char *buf; @@ -569,6 +551,17 @@ static void monitor_accept_input(void *opaque) { Monitor *mon = opaque; + qemu_mutex_lock(&mon->mon_lock); + if (!monitor_is_qmp(mon) && mon->reset_seen) { + MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common); + assert(hmp_mon->rs); + readline_restart(hmp_mon->rs); + qemu_mutex_unlock(&mon->mon_lock); + readline_show_prompt(hmp_mon->rs); + } else { + qemu_mutex_unlock(&mon->mon_lock); + } + qemu_chr_fe_accept_input(&mon->chr); } @@ -587,12 +580,6 @@ void monitor_resume(Monitor *mon) ctx = qemu_get_aio_context(); } - if (!monitor_is_qmp(mon)) { - MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common); - assert(hmp_mon->rs); - readline_show_prompt(hmp_mon->rs); - } - aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon); } @@ -603,7 +590,7 @@ int monitor_can_read(void *opaque) { Monitor *mon = opaque; - return !qatomic_mb_read(&mon->suspend_cnt); + return !qatomic_read(&mon->suspend_cnt); } void monitor_list_append(Monitor *mon) @@ -666,7 +653,7 @@ void monitor_cleanup(void) * We need to poll both qemu_aio_context and iohandler_ctx to make * sure that the dispatcher coroutine keeps making progress and * eventually terminates. qemu_aio_context is automatically - * polled by calling AIO_WAIT_WHILE on it, but we must poll + * polled by calling AIO_WAIT_WHILE_UNLOCKED on it, but we must poll * iohandler_ctx manually. * * Letting the iothread continue while shutting down the dispatcher @@ -674,14 +661,14 @@ void monitor_cleanup(void) * we'll just leave them in the queue without sending a response * and monitor_data_destroy() will free them. */ - qmp_dispatcher_co_shutdown = true; - if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) { - aio_co_wake(qmp_dispatcher_co); + WITH_QEMU_LOCK_GUARD(&monitor_lock) { + qmp_dispatcher_co_shutdown = true; } + qmp_dispatcher_co_wake(); - AIO_WAIT_WHILE(qemu_get_aio_context(), + AIO_WAIT_WHILE_UNLOCKED(NULL, (aio_poll(iohandler_get_aio_context(), false), - qatomic_mb_read(&qmp_dispatcher_co_busy))); + qatomic_read(&qmp_dispatcher_co))); /* * We need to explicitly stop the I/O thread (but not destroy it), @@ -732,7 +719,6 @@ void monitor_init_globals(void) * rid of those assumptions. */ qmp_dispatcher_co = qemu_coroutine_create(monitor_qmp_dispatcher_co, NULL); - qatomic_mb_set(&qmp_dispatcher_co_busy, true); aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co); } diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c index 859012aef4..b0f948d337 100644 --- a/monitor/qmp-cmds.c +++ b/monitor/qmp-cmds.c @@ -14,6 +14,7 @@ */ #include "qemu/osdep.h" +#include "qemu/sockets.h" #include "monitor-internal.h" #include "monitor/qdev.h" #include "monitor/qmp-helpers.h" @@ -139,6 +140,12 @@ void qmp_add_client(const char *protocol, const char *fdname, return; } + if (!fd_is_socket(fd)) { + error_setg(errp, "parameter @fdname must name a socket"); + close(fd); + return; + } + for (i = 0; i < ARRAY_SIZE(protocol_table); i++) { if (!strcmp(protocol, protocol_table[i].name)) { if (!protocol_table[i].add_client(fd, has_skipauth, skipauth, diff --git a/monitor/qmp.c b/monitor/qmp.c index 092c527b6f..c8e0156974 100644 --- a/monitor/qmp.c +++ b/monitor/qmp.c @@ -33,6 +33,30 @@ #include "qapi/qmp/qlist.h" #include "trace.h" +/* + * qmp_dispatcher_co_busy is used for synchronisation between the + * monitor thread and the main thread to ensure that the dispatcher + * coroutine never gets scheduled a second time when it's already + * scheduled (scheduling the same coroutine twice is forbidden). + * + * It is true if the coroutine will process at least one more request + * before going to sleep. Either it has been kicked already, or it + * is active and processing requests. Additional requests may therefore + * be pushed onto mon->qmp_requests, and @qmp_dispatcher_co_shutdown may + * be set without further ado. @qmp_dispatcher_co must not be woken up + * in this case. + * + * If false, you have to wake up @qmp_dispatcher_co after pushing new + * requests. You also have to set @qmp_dispatcher_co_busy to true + * before waking up the coroutine. + * + * The coroutine will automatically change this variable back to false + * before it yields. Nobody else may set the variable to false. + * + * Access must be atomic for thread safety. + */ +static bool qmp_dispatcher_co_busy = true; + struct QMPRequest { /* Owner of the request */ MonitorQMP *mon; @@ -178,8 +202,6 @@ static QMPRequest *monitor_qmp_requests_pop_any_with_lock(void) Monitor *mon; MonitorQMP *qmp_mon; - QEMU_LOCK_GUARD(&monitor_lock); - QTAILQ_FOREACH(mon, &mon_list, entry) { if (!monitor_is_qmp(mon)) { continue; @@ -207,53 +229,56 @@ static QMPRequest *monitor_qmp_requests_pop_any_with_lock(void) return req_obj; } -void coroutine_fn monitor_qmp_dispatcher_co(void *data) +static QMPRequest *monitor_qmp_dispatcher_pop_any(void) { - QMPRequest *req_obj = NULL; - QDict *rsp; - bool oob_enabled; - MonitorQMP *mon; - while (true) { - assert(qatomic_mb_read(&qmp_dispatcher_co_busy) == true); + /* + * To avoid double scheduling, busy is true on entry to + * monitor_qmp_dispatcher_co(), and must be set again before + * aio_co_wake()-ing it. + */ + assert(qatomic_read(&qmp_dispatcher_co_busy) == true); /* * Mark the dispatcher as not busy already here so that we * don't miss any new requests coming in the middle of our * processing. + * + * Clear qmp_dispatcher_co_busy before reading request. */ qatomic_mb_set(&qmp_dispatcher_co_busy, false); - /* On shutdown, don't take any more requests from the queue */ - if (qmp_dispatcher_co_shutdown) { - return; - } + WITH_QEMU_LOCK_GUARD(&monitor_lock) { + QMPRequest *req_obj; - while (!(req_obj = monitor_qmp_requests_pop_any_with_lock())) { - /* - * No more requests to process. Wait to be reentered from - * handle_qmp_command() when it pushes more requests, or - * from monitor_cleanup() when it requests shutdown. - */ - if (!qmp_dispatcher_co_shutdown) { - qemu_coroutine_yield(); - - /* - * busy must be set to true again by whoever - * rescheduled us to avoid double scheduling - */ - assert(qatomic_xchg(&qmp_dispatcher_co_busy, false) == true); - } - - /* - * qmp_dispatcher_co_shutdown may have changed if we - * yielded and were reentered from monitor_cleanup() - */ + /* On shutdown, don't take any more requests from the queue */ if (qmp_dispatcher_co_shutdown) { - return; + return NULL; + } + + req_obj = monitor_qmp_requests_pop_any_with_lock(); + if (req_obj) { + return req_obj; } } + /* + * No more requests to process. Wait to be reentered from + * handle_qmp_command() when it pushes more requests, or + * from monitor_cleanup() when it requests shutdown. + */ + qemu_coroutine_yield(); + } +} + +void coroutine_fn monitor_qmp_dispatcher_co(void *data) +{ + QMPRequest *req_obj; + QDict *rsp; + bool oob_enabled; + MonitorQMP *mon; + + while ((req_obj = monitor_qmp_dispatcher_pop_any()) != NULL) { trace_monitor_qmp_in_band_dequeue(req_obj, req_obj->mon->qmp_requests->length); @@ -340,6 +365,17 @@ void coroutine_fn monitor_qmp_dispatcher_co(void *data) aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co); qemu_coroutine_yield(); } + qatomic_set(&qmp_dispatcher_co, NULL); +} + +void qmp_dispatcher_co_wake(void) +{ + /* Write request before reading qmp_dispatcher_co_busy. */ + smp_mb__before_rmw(); + + if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) { + aio_co_wake(qmp_dispatcher_co); + } } static void handle_qmp_command(void *opaque, QObject *req, Error *err) @@ -403,9 +439,7 @@ static void handle_qmp_command(void *opaque, QObject *req, Error *err) } /* Kick the dispatcher routine */ - if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) { - aio_co_wake(qmp_dispatcher_co); - } + qmp_dispatcher_co_wake(); } static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size) diff --git a/nbd/server.c b/nbd/server.c index a4750e4188..2664d43bff 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -1409,8 +1409,8 @@ nbd_read_eof(NBDClient *client, void *buffer, size_t size, Error **errp) return 1; } -static int nbd_receive_request(NBDClient *client, NBDRequest *request, - Error **errp) +static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *request, + Error **errp) { uint8_t buf[NBD_REQUEST_SIZE]; uint32_t magic; @@ -1599,8 +1599,7 @@ static bool nbd_drained_poll(void *opaque) * enter it here so we don't depend on the client to wake it up. */ if (client->recv_coroutine != NULL && client->read_yielding) { - qemu_aio_coroutine_enter(exp->common.ctx, - client->recv_coroutine); + qio_channel_wake_read(client->ioc); } return true; @@ -1847,15 +1846,13 @@ static void nbd_export_delete(BlockExport *blk_exp) g_free(exp->description); exp->description = NULL; - if (exp->common.blk) { - if (exp->eject_notifier_blk) { - notifier_remove(&exp->eject_notifier); - blk_unref(exp->eject_notifier_blk); - } - blk_remove_aio_context_notifier(exp->common.blk, blk_aio_attached, - blk_aio_detach, exp); - blk_set_disable_request_queuing(exp->common.blk, false); + if (exp->eject_notifier_blk) { + notifier_remove(&exp->eject_notifier); + blk_unref(exp->eject_notifier_blk); } + blk_remove_aio_context_notifier(exp->common.blk, blk_aio_attached, + blk_aio_detach, exp); + blk_set_disable_request_queuing(exp->common.blk, false); for (i = 0; i < exp->nr_export_bitmaps; i++) { bdrv_dirty_bitmap_set_busy(exp->export_bitmaps[i], false); @@ -1895,12 +1892,12 @@ static inline void set_be_simple_reply(NBDSimpleReply *reply, uint64_t error, stq_be_p(&reply->handle, handle); } -static int nbd_co_send_simple_reply(NBDClient *client, - uint64_t handle, - uint32_t error, - void *data, - size_t len, - Error **errp) +static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client, + uint64_t handle, + uint32_t error, + void *data, + size_t len, + Error **errp) { NBDSimpleReply reply; int nbd_err = system_errno_to_nbd_errno(error); @@ -2038,8 +2035,8 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client, stl_be_p(&chunk.length, pnum); ret = nbd_co_send_iov(client, iov, 1, errp); } else { - ret = blk_pread(exp->common.blk, offset + progress, pnum, - data + progress, 0); + ret = blk_co_pread(exp->common.blk, offset + progress, pnum, + data + progress, 0); if (ret < 0) { error_setg_errno(errp, -ret, "reading from file failed"); break; @@ -2198,9 +2195,9 @@ static int coroutine_fn blockalloc_to_extents(BlockBackend *blk, * @ea is converted to BE by the function * @last controls whether NBD_REPLY_FLAG_DONE is sent. */ -static int nbd_co_send_extents(NBDClient *client, uint64_t handle, - NBDExtentArray *ea, - bool last, uint32_t context_id, Error **errp) +static int coroutine_fn +nbd_co_send_extents(NBDClient *client, uint64_t handle, NBDExtentArray *ea, + bool last, uint32_t context_id, Error **errp) { NBDStructuredMeta chunk; struct iovec iov[] = { @@ -2277,10 +2274,10 @@ static void bitmap_to_extents(BdrvDirtyBitmap *bitmap, bdrv_dirty_bitmap_unlock(bitmap); } -static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle, - BdrvDirtyBitmap *bitmap, uint64_t offset, - uint32_t length, bool dont_fragment, bool last, - uint32_t context_id, Error **errp) +static int coroutine_fn nbd_co_send_bitmap(NBDClient *client, uint64_t handle, + BdrvDirtyBitmap *bitmap, uint64_t offset, + uint32_t length, bool dont_fragment, bool last, + uint32_t context_id, Error **errp) { unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents); @@ -2297,8 +2294,8 @@ static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle, * to the client (although the caller may still need to disconnect after * reporting the error). */ -static int nbd_co_receive_request(NBDRequestData *req, NBDRequest *request, - Error **errp) +static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *request, + Error **errp) { NBDClient *client = req->client; int valid_flags; @@ -2446,7 +2443,7 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request, data, request->len, errp); } - ret = blk_pread(exp->common.blk, request->from, request->len, data, 0); + ret = blk_co_pread(exp->common.blk, request->from, request->len, data, 0); if (ret < 0) { return nbd_send_generic_reply(client, request->handle, ret, "reading from file failed", errp); @@ -2513,8 +2510,8 @@ static coroutine_fn int nbd_handle_request(NBDClient *client, if (request->flags & NBD_CMD_FLAG_FUA) { flags |= BDRV_REQ_FUA; } - ret = blk_pwrite(exp->common.blk, request->from, request->len, data, - flags); + ret = blk_co_pwrite(exp->common.blk, request->from, request->len, data, + flags); return nbd_send_generic_reply(client, request->handle, ret, "writing to file failed", errp); @@ -2529,8 +2526,8 @@ static coroutine_fn int nbd_handle_request(NBDClient *client, if (request->flags & NBD_CMD_FLAG_FAST_ZERO) { flags |= BDRV_REQ_NO_FALLBACK; } - ret = blk_pwrite_zeroes(exp->common.blk, request->from, request->len, - flags); + ret = blk_co_pwrite_zeroes(exp->common.blk, request->from, request->len, + flags); return nbd_send_generic_reply(client, request->handle, ret, "writing to file failed", errp); @@ -2667,6 +2664,8 @@ static coroutine_fn void nbd_trip(void *opaque) goto disconnect; } + qio_channel_set_cork(client->ioc, true); + if (ret < 0) { /* It wasn't -EIO, so, according to nbd_co_receive_request() * semantics, we should return the error to the client. */ @@ -2692,6 +2691,7 @@ static coroutine_fn void nbd_trip(void *opaque) goto disconnect; } + qio_channel_set_cork(client->ioc, false); done: nbd_request_put(req); nbd_client_put(client); @@ -2755,6 +2755,7 @@ void nbd_client_new(QIOChannelSocket *sioc, } client->tlsauthz = g_strdup(tlsauthz); client->sioc = sioc; + qio_channel_set_delay(QIO_CHANNEL(sioc), false); object_ref(OBJECT(client->sioc)); client->ioc = QIO_CHANNEL(sioc); object_ref(OBJECT(client->ioc)); diff --git a/net/dgram.c b/net/dgram.c index 9f7bf38376..48f653bceb 100644 --- a/net/dgram.c +++ b/net/dgram.c @@ -230,7 +230,7 @@ static int net_dgram_mcast_create(struct sockaddr_in *mcastaddr, return fd; fail: if (fd >= 0) { - closesocket(fd); + close(fd); } return -1; } @@ -352,7 +352,7 @@ static int net_dgram_mcast_init(NetClientState *peer, if (convert_host_port(saddr, local->u.inet.host, local->u.inet.port, errp) < 0) { g_free(saddr); - closesocket(fd); + close(fd); return -1; } @@ -360,14 +360,14 @@ static int net_dgram_mcast_init(NetClientState *peer, if (saddr->sin_addr.s_addr == 0) { error_setg(errp, "can't setup multicast destination address"); g_free(saddr); - closesocket(fd); + close(fd); return -1; } /* clone dgram socket */ newfd = net_dgram_mcast_create(saddr, NULL, errp); if (newfd < 0) { g_free(saddr); - closesocket(fd); + close(fd); return -1; } /* clone newfd to fd, close newfd */ @@ -494,14 +494,14 @@ int net_init_dgram(const Netdev *netdev, const char *name, if (ret < 0) { error_setg_errno(errp, errno, "can't set socket option SO_REUSEADDR"); - closesocket(fd); + close(fd); return -1; } ret = bind(fd, (struct sockaddr *)&laddr_in, sizeof(laddr_in)); if (ret < 0) { error_setg_errno(errp, errno, "can't bind ip=%s to socket", inet_ntoa(laddr_in.sin_addr)); - closesocket(fd); + close(fd); return -1; } qemu_socket_set_nonblock(fd); @@ -548,7 +548,7 @@ int net_init_dgram(const Netdev *netdev, const char *name, if (ret < 0) { error_setg_errno(errp, errno, "can't bind unix=%s to socket", laddr_un.sun_path); - closesocket(fd); + close(fd); return -1; } qemu_socket_set_nonblock(fd); diff --git a/net/dump.c b/net/dump.c index 6a63b15359..7d05f16ca7 100644 --- a/net/dump.c +++ b/net/dump.c @@ -61,12 +61,13 @@ struct pcap_sf_pkthdr { uint32_t len; }; -static ssize_t dump_receive_iov(DumpState *s, const struct iovec *iov, int cnt) +static ssize_t dump_receive_iov(DumpState *s, const struct iovec *iov, int cnt, + int offset) { struct pcap_sf_pkthdr hdr; int64_t ts; int caplen; - size_t size = iov_size(iov, cnt); + size_t size = iov_size(iov, cnt) - offset; struct iovec dumpiov[cnt + 1]; /* Early return in case of previous error. */ @@ -84,7 +85,7 @@ static ssize_t dump_receive_iov(DumpState *s, const struct iovec *iov, int cnt) dumpiov[0].iov_base = &hdr; dumpiov[0].iov_len = sizeof(hdr); - cnt = iov_copy(&dumpiov[1], cnt, iov, cnt, 0, caplen); + cnt = iov_copy(&dumpiov[1], cnt, iov, cnt, offset, caplen); if (writev(s->fd, dumpiov, cnt + 1) != sizeof(hdr) + caplen) { error_report("network dump write error - stopping dump"); @@ -153,8 +154,10 @@ static ssize_t filter_dump_receive_iov(NetFilterState *nf, NetClientState *sndr, int iovcnt, NetPacketSent *sent_cb) { NetFilterDumpState *nfds = FILTER_DUMP(nf); + int offset = qemu_get_using_vnet_hdr(nf->netdev) ? + qemu_get_vnet_hdr_len(nf->netdev) : 0; - dump_receive_iov(&nfds->ds, iov, iovcnt); + dump_receive_iov(&nfds->ds, iov, iovcnt, offset); return 0; } diff --git a/net/eth.c b/net/eth.c index f074b2f9f3..649e66bb1f 100644 --- a/net/eth.c +++ b/net/eth.c @@ -21,26 +21,16 @@ #include "net/checksum.h" #include "net/tap.h" -void eth_setup_vlan_headers_ex(struct eth_header *ehdr, uint16_t vlan_tag, - uint16_t vlan_ethtype, bool *is_new) +void eth_setup_vlan_headers(struct eth_header *ehdr, size_t *ehdr_size, + uint16_t vlan_tag, uint16_t vlan_ethtype) { struct vlan_header *vhdr = PKT_GET_VLAN_HDR(ehdr); - switch (be16_to_cpu(ehdr->h_proto)) { - case ETH_P_VLAN: - case ETH_P_DVLAN: - /* vlan hdr exists */ - *is_new = false; - break; - - default: - /* No VLAN header, put a new one */ - vhdr->h_proto = ehdr->h_proto; - ehdr->h_proto = cpu_to_be16(vlan_ethtype); - *is_new = true; - break; - } + memmove(vhdr + 1, vhdr, *ehdr_size - ETH_HLEN); vhdr->h_tci = cpu_to_be16(vlan_tag); + vhdr->h_proto = ehdr->h_proto; + ehdr->h_proto = cpu_to_be16(vlan_ethtype); + *ehdr_size += sizeof(*vhdr); } uint8_t @@ -136,9 +126,8 @@ _eth_tcp_has_data(bool is_ip4, return l4len > TCP_HEADER_DATA_OFFSET(tcp); } -void eth_get_protocols(const struct iovec *iov, int iovcnt, - bool *isip4, bool *isip6, - bool *isudp, bool *istcp, +void eth_get_protocols(const struct iovec *iov, size_t iovcnt, size_t iovoff, + bool *hasip4, bool *hasip6, size_t *l3hdr_off, size_t *l4hdr_off, size_t *l5hdr_off, @@ -148,96 +137,94 @@ void eth_get_protocols(const struct iovec *iov, int iovcnt, { int proto; bool fragment = false; - size_t l2hdr_len = eth_get_l2_hdr_length_iov(iov, iovcnt); size_t input_size = iov_size(iov, iovcnt); size_t copied; + uint8_t ip_p; - *isip4 = *isip6 = *isudp = *istcp = false; + *hasip4 = *hasip6 = false; + *l3hdr_off = iovoff + eth_get_l2_hdr_length_iov(iov, iovcnt, iovoff); + l4hdr_info->proto = ETH_L4_HDR_PROTO_INVALID; - proto = eth_get_l3_proto(iov, iovcnt, l2hdr_len); - - *l3hdr_off = l2hdr_len; + proto = eth_get_l3_proto(iov, iovcnt, *l3hdr_off); if (proto == ETH_P_IP) { struct ip_header *iphdr = &ip4hdr_info->ip4_hdr; - if (input_size < l2hdr_len) { + if (input_size < *l3hdr_off) { return; } - copied = iov_to_buf(iov, iovcnt, l2hdr_len, iphdr, sizeof(*iphdr)); - - *isip4 = true; - - if (copied < sizeof(*iphdr)) { + copied = iov_to_buf(iov, iovcnt, *l3hdr_off, iphdr, sizeof(*iphdr)); + if (copied < sizeof(*iphdr) || + IP_HEADER_VERSION(iphdr) != IP_HEADER_VERSION_4) { return; } - if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) { - if (iphdr->ip_p == IP_PROTO_TCP) { - *istcp = true; - } else if (iphdr->ip_p == IP_PROTO_UDP) { - *isudp = true; - } - } - + *hasip4 = true; + ip_p = iphdr->ip_p; ip4hdr_info->fragment = IP4_IS_FRAGMENT(iphdr); - *l4hdr_off = l2hdr_len + IP_HDR_GET_LEN(iphdr); + *l4hdr_off = *l3hdr_off + IP_HDR_GET_LEN(iphdr); fragment = ip4hdr_info->fragment; } else if (proto == ETH_P_IPV6) { - - *isip6 = true; - if (eth_parse_ipv6_hdr(iov, iovcnt, l2hdr_len, - ip6hdr_info)) { - if (ip6hdr_info->l4proto == IP_PROTO_TCP) { - *istcp = true; - } else if (ip6hdr_info->l4proto == IP_PROTO_UDP) { - *isudp = true; - } - } else { + if (!eth_parse_ipv6_hdr(iov, iovcnt, *l3hdr_off, ip6hdr_info)) { return; } - *l4hdr_off = l2hdr_len + ip6hdr_info->full_hdr_len; + *hasip6 = true; + ip_p = ip6hdr_info->l4proto; + *l4hdr_off = *l3hdr_off + ip6hdr_info->full_hdr_len; fragment = ip6hdr_info->fragment; + } else { + return; } - if (!fragment) { - if (*istcp) { - *istcp = _eth_copy_chunk(input_size, - iov, iovcnt, - *l4hdr_off, sizeof(l4hdr_info->hdr.tcp), - &l4hdr_info->hdr.tcp); + if (fragment) { + return; + } - if (*istcp) { - *l5hdr_off = *l4hdr_off + - TCP_HEADER_DATA_OFFSET(&l4hdr_info->hdr.tcp); + switch (ip_p) { + case IP_PROTO_TCP: + if (_eth_copy_chunk(input_size, + iov, iovcnt, + *l4hdr_off, sizeof(l4hdr_info->hdr.tcp), + &l4hdr_info->hdr.tcp)) { + l4hdr_info->proto = ETH_L4_HDR_PROTO_TCP; + *l5hdr_off = *l4hdr_off + + TCP_HEADER_DATA_OFFSET(&l4hdr_info->hdr.tcp); - l4hdr_info->has_tcp_data = - _eth_tcp_has_data(proto == ETH_P_IP, - &ip4hdr_info->ip4_hdr, - &ip6hdr_info->ip6_hdr, - *l4hdr_off - *l3hdr_off, - &l4hdr_info->hdr.tcp); - } - } else if (*isudp) { - *isudp = _eth_copy_chunk(input_size, - iov, iovcnt, - *l4hdr_off, sizeof(l4hdr_info->hdr.udp), - &l4hdr_info->hdr.udp); + l4hdr_info->has_tcp_data = + _eth_tcp_has_data(proto == ETH_P_IP, + &ip4hdr_info->ip4_hdr, + &ip6hdr_info->ip6_hdr, + *l4hdr_off - *l3hdr_off, + &l4hdr_info->hdr.tcp); + } + break; + + case IP_PROTO_UDP: + if (_eth_copy_chunk(input_size, + iov, iovcnt, + *l4hdr_off, sizeof(l4hdr_info->hdr.udp), + &l4hdr_info->hdr.udp)) { + l4hdr_info->proto = ETH_L4_HDR_PROTO_UDP; *l5hdr_off = *l4hdr_off + sizeof(l4hdr_info->hdr.udp); } + break; + + case IP_PROTO_SCTP: + l4hdr_info->proto = ETH_L4_HDR_PROTO_SCTP; + break; } } size_t eth_strip_vlan(const struct iovec *iov, int iovcnt, size_t iovoff, - uint8_t *new_ehdr_buf, + void *new_ehdr_buf, uint16_t *payload_offset, uint16_t *tci) { struct vlan_header vlan_hdr; - struct eth_header *new_ehdr = (struct eth_header *) new_ehdr_buf; + struct eth_header *new_ehdr = new_ehdr_buf; size_t copied = iov_to_buf(iov, iovcnt, iovoff, new_ehdr, sizeof(*new_ehdr)); @@ -282,63 +269,50 @@ eth_strip_vlan(const struct iovec *iov, int iovcnt, size_t iovoff, } size_t -eth_strip_vlan_ex(const struct iovec *iov, int iovcnt, size_t iovoff, - uint16_t vet, uint8_t *new_ehdr_buf, +eth_strip_vlan_ex(const struct iovec *iov, int iovcnt, size_t iovoff, int index, + uint16_t vet, uint16_t vet_ext, void *new_ehdr_buf, uint16_t *payload_offset, uint16_t *tci) { struct vlan_header vlan_hdr; - struct eth_header *new_ehdr = (struct eth_header *) new_ehdr_buf; + uint16_t *new_ehdr_proto; + size_t new_ehdr_size; + size_t copied; - size_t copied = iov_to_buf(iov, iovcnt, iovoff, - new_ehdr, sizeof(*new_ehdr)); + switch (index) { + case 0: + new_ehdr_proto = &PKT_GET_ETH_HDR(new_ehdr_buf)->h_proto; + new_ehdr_size = sizeof(struct eth_header); + copied = iov_to_buf(iov, iovcnt, iovoff, new_ehdr_buf, new_ehdr_size); + break; - if (copied < sizeof(*new_ehdr)) { + case 1: + new_ehdr_proto = &PKT_GET_VLAN_HDR(new_ehdr_buf)->h_proto; + new_ehdr_size = sizeof(struct eth_header) + sizeof(struct vlan_header); + copied = iov_to_buf(iov, iovcnt, iovoff, new_ehdr_buf, new_ehdr_size); + if (be16_to_cpu(PKT_GET_ETH_HDR(new_ehdr_buf)->h_proto) != vet_ext) { + return 0; + } + break; + + default: return 0; } - if (be16_to_cpu(new_ehdr->h_proto) == vet) { - copied = iov_to_buf(iov, iovcnt, iovoff + sizeof(*new_ehdr), - &vlan_hdr, sizeof(vlan_hdr)); - - if (copied < sizeof(vlan_hdr)) { - return 0; - } - - new_ehdr->h_proto = vlan_hdr.h_proto; - - *tci = be16_to_cpu(vlan_hdr.h_tci); - *payload_offset = iovoff + sizeof(*new_ehdr) + sizeof(vlan_hdr); - return sizeof(struct eth_header); + if (copied < new_ehdr_size || be16_to_cpu(*new_ehdr_proto) != vet) { + return 0; } - return 0; -} - -void -eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len, - void *l3hdr, size_t l3hdr_len, - size_t l3payload_len, - size_t frag_offset, bool more_frags) -{ - const struct iovec l2vec = { - .iov_base = (void *) l2hdr, - .iov_len = l2hdr_len - }; - - if (eth_get_l3_proto(&l2vec, 1, l2hdr_len) == ETH_P_IP) { - uint16_t orig_flags; - struct ip_header *iphdr = (struct ip_header *) l3hdr; - uint16_t frag_off_units = frag_offset / IP_FRAG_UNIT_SIZE; - uint16_t new_ip_off; - - assert(frag_offset % IP_FRAG_UNIT_SIZE == 0); - assert((frag_off_units & ~IP_OFFMASK) == 0); - - orig_flags = be16_to_cpu(iphdr->ip_off) & ~(IP_OFFMASK|IP_MF); - new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0); - iphdr->ip_off = cpu_to_be16(new_ip_off); - iphdr->ip_len = cpu_to_be16(l3payload_len + l3hdr_len); + copied = iov_to_buf(iov, iovcnt, iovoff + new_ehdr_size, + &vlan_hdr, sizeof(vlan_hdr)); + if (copied < sizeof(vlan_hdr)) { + return 0; } + + *new_ehdr_proto = vlan_hdr.h_proto; + *payload_offset = iovoff + new_ehdr_size + sizeof(vlan_hdr); + *tci = be16_to_cpu(vlan_hdr.h_tci); + + return new_ehdr_size; } void diff --git a/net/meson.build b/net/meson.build index 87afca3e93..6f4ecde57f 100644 --- a/net/meson.build +++ b/net/meson.build @@ -1,13 +1,10 @@ softmmu_ss.add(files( 'announce.c', 'checksum.c', - 'colo-compare.c', - 'colo.c', 'dump.c', 'eth.c', 'filter-buffer.c', 'filter-mirror.c', - 'filter-rewriter.c', 'filter.c', 'hub.c', 'net-hmp-cmds.c', @@ -19,6 +16,16 @@ softmmu_ss.add(files( 'util.c', )) +if get_option('replication').allowed() or \ + get_option('colo_proxy').allowed() + softmmu_ss.add(files('colo-compare.c')) + softmmu_ss.add(files('colo.c')) +endif + +if get_option('colo_proxy').allowed() + softmmu_ss.add(files('filter-rewriter.c')) +endif + softmmu_ss.add(when: 'CONFIG_TCG', if_true: files('filter-replay.c')) if have_l2tpv3 diff --git a/net/net.c b/net/net.c index ebc7ce0231..6492ad530e 100644 --- a/net/net.c +++ b/net/net.c @@ -513,6 +513,15 @@ bool qemu_has_vnet_hdr_len(NetClientState *nc, int len) return nc->info->has_vnet_hdr_len(nc, len); } +bool qemu_get_using_vnet_hdr(NetClientState *nc) +{ + if (!nc || !nc->info->get_using_vnet_hdr) { + return false; + } + + return nc->info->get_using_vnet_hdr(nc); +} + void qemu_using_vnet_hdr(NetClientState *nc, bool enable) { if (!nc || !nc->info->using_vnet_hdr) { @@ -532,6 +541,15 @@ void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6, nc->info->set_offload(nc, csum, tso4, tso6, ecn, ufo); } +int qemu_get_vnet_hdr_len(NetClientState *nc) +{ + if (!nc || !nc->info->get_vnet_hdr_len) { + return 0; + } + + return nc->info->get_vnet_hdr_len(nc); +} + void qemu_set_vnet_hdr_len(NetClientState *nc, int len) { if (!nc || !nc->info->set_vnet_hdr_len) { diff --git a/net/slirp.c b/net/slirp.c index 2ee3f1a0d7..c33b3e02e7 100644 --- a/net/slirp.c +++ b/net/slirp.c @@ -248,12 +248,24 @@ static void net_slirp_timer_mod(void *timer, int64_t expire_timer, static void net_slirp_register_poll_fd(int fd, void *opaque) { - qemu_fd_register(fd); +#ifdef WIN32 + AioContext *ctxt = qemu_get_aio_context(); + + if (WSAEventSelect(fd, event_notifier_get_handle(&ctxt->notifier), + FD_READ | FD_ACCEPT | FD_CLOSE | + FD_CONNECT | FD_WRITE | FD_OOB) != 0) { + error_setg_win32(&error_warn, WSAGetLastError(), "failed to WSAEventSelect()"); + } +#endif } static void net_slirp_unregister_poll_fd(int fd, void *opaque) { - /* no qemu_fd_unregister */ +#ifdef WIN32 + if (WSAEventSelect(fd, NULL, 0) != 0) { + error_setg_win32(&error_warn, WSAGetLastError(), "failed to WSAEventSelect()"); + } +#endif } static void net_slirp_notify(void *opaque) diff --git a/net/socket.c b/net/socket.c index 2fc5696755..ba6e5b0b00 100644 --- a/net/socket.c +++ b/net/socket.c @@ -172,7 +172,7 @@ static void net_socket_send(void *opaque) if (s->listen_fd != -1) { qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s); } - closesocket(s->fd); + close(s->fd); s->fd = -1; net_socket_rs_init(&s->rs, net_socket_rs_finalize, false); @@ -299,7 +299,7 @@ static int net_socket_mcast_create(struct sockaddr_in *mcastaddr, return fd; fail: if (fd >= 0) - closesocket(fd); + close(fd); return -1; } @@ -314,7 +314,7 @@ static void net_socket_cleanup(NetClientState *nc) } if (s->listen_fd != -1) { qemu_set_fd_handler(s->listen_fd, NULL, NULL, NULL); - closesocket(s->listen_fd); + close(s->listen_fd); s->listen_fd = -1; } } @@ -399,7 +399,7 @@ static NetSocketState *net_socket_fd_init_dgram(NetClientState *peer, return s; err: - closesocket(fd); + close(fd); return NULL; } @@ -456,7 +456,7 @@ static NetSocketState *net_socket_fd_init(NetClientState *peer, if(getsockopt(fd, SOL_SOCKET, SO_TYPE, (char *)&so_type, (socklen_t *)&optlen)< 0) { error_setg(errp, "can't get socket option SO_TYPE"); - closesocket(fd); + close(fd); return NULL; } switch(so_type) { @@ -468,7 +468,7 @@ static NetSocketState *net_socket_fd_init(NetClientState *peer, default: error_setg(errp, "socket type=%d for fd=%d must be either" " SOCK_DGRAM or SOCK_STREAM", so_type, fd); - closesocket(fd); + close(fd); } return NULL; } @@ -526,13 +526,13 @@ static int net_socket_listen_init(NetClientState *peer, if (ret < 0) { error_setg_errno(errp, errno, "can't bind ip=%s to socket", inet_ntoa(saddr.sin_addr)); - closesocket(fd); + close(fd); return -1; } ret = listen(fd, 0); if (ret < 0) { error_setg_errno(errp, errno, "can't listen on socket"); - closesocket(fd); + close(fd); return -1; } @@ -579,7 +579,7 @@ static int net_socket_connect_init(NetClientState *peer, break; } else { error_setg_errno(errp, errno, "can't connect socket"); - closesocket(fd); + close(fd); return -1; } } else { @@ -671,14 +671,14 @@ static int net_socket_udp_init(NetClientState *peer, if (ret < 0) { error_setg_errno(errp, errno, "can't set socket option SO_REUSEADDR"); - closesocket(fd); + close(fd); return -1; } ret = bind(fd, (struct sockaddr *)&laddr, sizeof(laddr)); if (ret < 0) { error_setg_errno(errp, errno, "can't bind ip=%s to socket", inet_ntoa(laddr.sin_addr)); - closesocket(fd); + close(fd); return -1; } qemu_socket_set_nonblock(fd); diff --git a/net/tap.c b/net/tap.c index 7d7bc1dc5f..1bf085d422 100644 --- a/net/tap.c +++ b/net/tap.c @@ -255,6 +255,13 @@ static bool tap_has_vnet_hdr_len(NetClientState *nc, int len) return !!tap_probe_vnet_hdr_len(s->fd, len); } +static int tap_get_vnet_hdr_len(NetClientState *nc) +{ + TAPState *s = DO_UPCAST(TAPState, nc, nc); + + return s->host_vnet_hdr_len; +} + static void tap_set_vnet_hdr_len(NetClientState *nc, int len) { TAPState *s = DO_UPCAST(TAPState, nc, nc); @@ -268,6 +275,13 @@ static void tap_set_vnet_hdr_len(NetClientState *nc, int len) s->host_vnet_hdr_len = len; } +static bool tap_get_using_vnet_hdr(NetClientState *nc) +{ + TAPState *s = DO_UPCAST(TAPState, nc, nc); + + return s->using_vnet_hdr; +} + static void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr) { TAPState *s = DO_UPCAST(TAPState, nc, nc); @@ -372,8 +386,10 @@ static NetClientInfo net_tap_info = { .has_ufo = tap_has_ufo, .has_vnet_hdr = tap_has_vnet_hdr, .has_vnet_hdr_len = tap_has_vnet_hdr_len, + .get_using_vnet_hdr = tap_get_using_vnet_hdr, .using_vnet_hdr = tap_using_vnet_hdr, .set_offload = tap_set_offload, + .get_vnet_hdr_len = tap_get_vnet_hdr_len, .set_vnet_hdr_len = tap_set_vnet_hdr_len, .set_vnet_le = tap_set_vnet_le, .set_vnet_be = tap_set_vnet_be, diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index de5ed8ff22..37cdc84562 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -26,12 +26,15 @@ #include #include "standard-headers/linux/virtio_net.h" #include "monitor/monitor.h" +#include "migration/migration.h" +#include "migration/misc.h" #include "hw/virtio/vhost.h" /* Todo:need to add the multiqueue support here */ typedef struct VhostVDPAState { NetClientState nc; struct vhost_vdpa vhost_vdpa; + Notifier migration_state; VHostNetState *vhost_net; /* Control commands shadow buffers */ @@ -98,8 +101,11 @@ static const uint64_t vdpa_svq_device_features = BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_F_ANY_LAYOUT) | BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | + /* VHOST_F_LOG_ALL is exposed by SVQ */ + BIT_ULL(VHOST_F_LOG_ALL) | BIT_ULL(VIRTIO_NET_F_RSC_EXT) | - BIT_ULL(VIRTIO_NET_F_STANDBY); + BIT_ULL(VIRTIO_NET_F_STANDBY) | + BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX); #define VHOST_VDPA_NET_CVQ_ASID 1 @@ -178,13 +184,9 @@ err_init: static void vhost_vdpa_cleanup(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); - struct vhost_dev *dev = &s->vhost_net->dev; qemu_vfree(s->cvq_cmd_out_buffer); qemu_vfree(s->status); - if (dev->vq_index + dev->nvqs == dev->vq_index_end) { - g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); - } if (s->vhost_net) { vhost_net_cleanup(s->vhost_net); g_free(s->vhost_net); @@ -234,10 +236,126 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, return size; } +/** From any vdpa net client, get the netclient of the first queue pair */ +static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) +{ + NICState *nic = qemu_get_nic(s->nc.peer); + NetClientState *nc0 = qemu_get_peer(nic->ncs, 0); + + return DO_UPCAST(VhostVDPAState, nc, nc0); +} + +static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) +{ + struct vhost_vdpa *v = &s->vhost_vdpa; + VirtIONet *n; + VirtIODevice *vdev; + int data_queue_pairs, cvq, r; + + /* We are only called on the first data vqs and only if x-svq is not set */ + if (s->vhost_vdpa.shadow_vqs_enabled == enable) { + return; + } + + vdev = v->dev->vdev; + n = VIRTIO_NET(vdev); + if (!n->vhost_started) { + return; + } + + data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; + cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ? + n->max_ncs - n->max_queue_pairs : 0; + /* + * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter + * in the future and resume the device if read-only operations between + * suspend and reset goes wrong. + */ + vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq); + + /* Start will check migration setup_or_active to configure or not SVQ */ + r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq); + if (unlikely(r < 0)) { + error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); + } +} + +static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data) +{ + MigrationState *migration = data; + VhostVDPAState *s = container_of(notifier, VhostVDPAState, + migration_state); + + if (migration_in_setup(migration)) { + vhost_vdpa_net_log_global_enable(s, true); + } else if (migration_has_failed(migration)) { + vhost_vdpa_net_log_global_enable(s, false); + } +} + +static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) +{ + struct vhost_vdpa *v = &s->vhost_vdpa; + + add_migration_state_change_notifier(&s->migration_state); + if (v->shadow_vqs_enabled) { + v->iova_tree = vhost_iova_tree_new(v->iova_range.first, + v->iova_range.last); + } +} + +static int vhost_vdpa_net_data_start(NetClientState *nc) +{ + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + struct vhost_vdpa *v = &s->vhost_vdpa; + + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); + + if (s->always_svq || + migration_is_setup_or_active(migrate_get_current()->state)) { + v->shadow_vqs_enabled = true; + v->shadow_data = true; + } else { + v->shadow_vqs_enabled = false; + v->shadow_data = false; + } + + if (v->index == 0) { + vhost_vdpa_net_data_start_first(s); + return 0; + } + + if (v->shadow_vqs_enabled) { + VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s); + v->iova_tree = s0->vhost_vdpa.iova_tree; + } + + return 0; +} + +static void vhost_vdpa_net_client_stop(NetClientState *nc) +{ + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + struct vhost_dev *dev; + + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); + + if (s->vhost_vdpa.index == 0) { + remove_migration_state_change_notifier(&s->migration_state); + } + + dev = s->vhost_vdpa.dev; + if (dev->vq_index + dev->nvqs == dev->vq_index_end) { + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); + } +} + static NetClientInfo net_vhost_vdpa_info = { .type = NET_CLIENT_DRIVER_VHOST_VDPA, .size = sizeof(VhostVDPAState), .receive = vhost_vdpa_receive, + .start = vhost_vdpa_net_data_start, + .stop = vhost_vdpa_net_client_stop, .cleanup = vhost_vdpa_cleanup, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, .has_ufo = vhost_vdpa_has_ufo, @@ -351,7 +469,7 @@ dma_map_err: static int vhost_vdpa_net_cvq_start(NetClientState *nc) { - VhostVDPAState *s; + VhostVDPAState *s, *s0; struct vhost_vdpa *v; uint64_t backend_features; int64_t cvq_group; @@ -362,11 +480,12 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc) s = DO_UPCAST(VhostVDPAState, nc, nc); v = &s->vhost_vdpa; - v->shadow_data = s->always_svq; + s0 = vhost_vdpa_net_first_nc_vdpa(s); + v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled; v->shadow_vqs_enabled = s->always_svq; s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; - if (s->always_svq) { + if (s->vhost_vdpa.shadow_data) { /* SVQ is already configured for all virtqueues */ goto out; } @@ -415,8 +534,6 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc) return r; } - v->iova_tree = vhost_iova_tree_new(v->iova_range.first, - v->iova_range.last); v->shadow_vqs_enabled = true; s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; @@ -425,6 +542,26 @@ out: return 0; } + if (s0->vhost_vdpa.iova_tree) { + /* + * SVQ is already configured for all virtqueues. Reuse IOVA tree for + * simplicity, whether CVQ shares ASID with guest or not, because: + * - Memory listener need access to guest's memory addresses allocated + * in the IOVA tree. + * - There should be plenty of IOVA address space for both ASID not to + * worry about collisions between them. Guest's translations are + * still validated with virtio virtqueue_pop so there is no risk for + * the guest to access memory that it shouldn't. + * + * To allocate a iova tree per ASID is doable but it complicates the + * code and it is not worth it for the moment. + */ + v->iova_tree = s0->vhost_vdpa.iova_tree; + } else { + v->iova_tree = vhost_iova_tree_new(v->iova_range.first, + v->iova_range.last); + } + r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len(), false); if (unlikely(r < 0)) { @@ -449,15 +586,9 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc) if (s->vhost_vdpa.shadow_vqs_enabled) { vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); - if (!s->always_svq) { - /* - * If only the CVQ is shadowed we can delete this safely. - * If all the VQs are shadows this will be needed by the time the - * device is started again to register SVQ vrings and similar. - */ - g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); - } } + + vhost_vdpa_net_client_stop(nc); } static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len, @@ -668,7 +799,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, bool is_datapath, bool svq, struct vhost_vdpa_iova_range iova_range, - VhostIOVATree *iova_tree) + uint64_t features) { NetClientState *nc = NULL; VhostVDPAState *s; @@ -687,11 +818,14 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.device_fd = vdpa_device_fd; s->vhost_vdpa.index = queue_pair_index; s->always_svq = svq; + s->migration_state.notify = vdpa_net_migration_state_notifier; s->vhost_vdpa.shadow_vqs_enabled = svq; s->vhost_vdpa.iova_range = iova_range; s->vhost_vdpa.shadow_data = svq; - s->vhost_vdpa.iova_tree = iova_tree; - if (!is_datapath) { + if (queue_pair_index == 0) { + vhost_vdpa_net_valid_svq_features(features, + &s->vhost_vdpa.migration_blocker); + } else if (!is_datapath) { s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(), vhost_vdpa_net_cvq_cmd_page_len()); memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len()); @@ -701,6 +835,15 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; s->vhost_vdpa.shadow_vq_ops_opaque = s; + + /* + * TODO: We cannot migrate devices with CVQ as there is no way to set + * the device state (MAC, MQ, etc) before starting the datapath. + * + * Migration blocker ownership now belongs to s->vhost_vdpa. + */ + error_setg(&s->vhost_vdpa.migration_blocker, + "net vdpa cannot migrate with CVQ feature"); } ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); if (ret) { @@ -760,7 +903,6 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, uint64_t features; int vdpa_device_fd; g_autofree NetClientState **ncs = NULL; - g_autoptr(VhostIOVATree) iova_tree = NULL; struct vhost_vdpa_iova_range iova_range; NetClientState *nc; int queue_pairs, r, i = 0, has_cvq = 0; @@ -812,12 +954,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, goto err; } - if (opts->x_svq) { - if (!vhost_vdpa_net_valid_svq_features(features, errp)) { - goto err_svq; - } - - iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last); + if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { + goto err; } ncs = g_malloc0(sizeof(*ncs) * queue_pairs); @@ -825,7 +963,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, for (i = 0; i < queue_pairs; i++) { ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd, i, 2, true, opts->x_svq, - iova_range, iova_tree); + iova_range, features); if (!ncs[i]) goto err; } @@ -833,13 +971,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, if (has_cvq) { nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd, i, 1, false, - opts->x_svq, iova_range, iova_tree); + opts->x_svq, iova_range, features); if (!nc) goto err; } - /* iova_tree ownership belongs to last NetClientState */ - g_steal_pointer(&iova_tree); return 0; err: @@ -849,7 +985,6 @@ err: } } -err_svq: qemu_close(vdpa_device_fd); return -1; diff --git a/os-posix.c b/os-posix.c index 5adc69f560..90ea71725f 100644 --- a/os-posix.c +++ b/os-posix.c @@ -36,6 +36,8 @@ #include "qemu/log.h" #include "sysemu/runstate.h" #include "qemu/cutils.h" +#include "qemu/config-file.h" +#include "qemu/option.h" #ifdef CONFIG_LINUX #include @@ -152,9 +154,21 @@ int os_parse_cmd_args(int index, const char *optarg) daemonize = 1; break; #if defined(CONFIG_LINUX) + /* deprecated */ case QEMU_OPTION_asyncteardown: init_async_teardown(); break; + case QEMU_OPTION_run_with: { + QemuOpts *opts = qemu_opts_parse_noisily(qemu_find_opts("run-with"), + optarg, false); + if (!opts) { + exit(1); + } + if (qemu_opt_get_bool(opts, "async-teardown", false)) { + init_async_teardown(); + } + break; + } #endif default: return -1; diff --git a/pc-bios/README b/pc-bios/README index 3702ed485c..c555dd324e 100644 --- a/pc-bios/README +++ b/pc-bios/README @@ -50,8 +50,8 @@ variable store templates built from the TianoCore community's EFI Development Kit II project . The images - were built at git tag "edk2-stable202008". The firmware binaries bundle parts - of the OpenSSL project, at git tag "OpenSSL_1_1_1g" (the OpenSSL tag is a + were built at git tag "edk2-stable202302". The firmware binaries bundle parts + of the OpenSSL project, at git tag "OpenSSL_1_1_1s" (the OpenSSL tag is a function of the edk2 tag). Parts of the Berkeley SoftFloat library are bundled as well, at Release 3e plus a subsequent typo fix (commit b64af41c3276f97f0e181920400ee056b9c88037), as an OpenSSL dependency on 32-bit diff --git a/pc-bios/bios-256k.bin b/pc-bios/bios-256k.bin index 211b2a4da2..f70aa72c60 100644 Binary files a/pc-bios/bios-256k.bin and b/pc-bios/bios-256k.bin differ diff --git a/pc-bios/bios-microvm.bin b/pc-bios/bios-microvm.bin index 6204a714cd..94792cf3f4 100644 Binary files a/pc-bios/bios-microvm.bin and b/pc-bios/bios-microvm.bin differ diff --git a/pc-bios/bios.bin b/pc-bios/bios.bin index 12d6a037be..6a196cf72a 100644 Binary files a/pc-bios/bios.bin and b/pc-bios/bios.bin differ diff --git a/pc-bios/edk2-aarch64-code.fd.bz2 b/pc-bios/edk2-aarch64-code.fd.bz2 index 0262f5bd8f..4bc824e48d 100644 Binary files a/pc-bios/edk2-aarch64-code.fd.bz2 and b/pc-bios/edk2-aarch64-code.fd.bz2 differ diff --git a/pc-bios/edk2-arm-code.fd.bz2 b/pc-bios/edk2-arm-code.fd.bz2 index 4ca97b43ea..7899fca426 100644 Binary files a/pc-bios/edk2-arm-code.fd.bz2 and b/pc-bios/edk2-arm-code.fd.bz2 differ diff --git a/pc-bios/edk2-i386-code.fd.bz2 b/pc-bios/edk2-i386-code.fd.bz2 index 6e02c9b995..a68ae4fa15 100644 Binary files a/pc-bios/edk2-i386-code.fd.bz2 and b/pc-bios/edk2-i386-code.fd.bz2 differ diff --git a/pc-bios/edk2-i386-secure-code.fd.bz2 b/pc-bios/edk2-i386-secure-code.fd.bz2 index a4b1cc92bd..91936ebbc9 100644 Binary files a/pc-bios/edk2-i386-secure-code.fd.bz2 and b/pc-bios/edk2-i386-secure-code.fd.bz2 differ diff --git a/pc-bios/edk2-riscv.fd.bz2 b/pc-bios/edk2-riscv.fd.bz2 new file mode 100644 index 0000000000..ef566b374a Binary files /dev/null and b/pc-bios/edk2-riscv.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-code.fd.bz2 b/pc-bios/edk2-x86_64-code.fd.bz2 index 37bfb0dbed..35c2340e46 100644 Binary files a/pc-bios/edk2-x86_64-code.fd.bz2 and b/pc-bios/edk2-x86_64-code.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-microvm.fd.bz2 b/pc-bios/edk2-x86_64-microvm.fd.bz2 index 1d65c61ded..742abf06c5 100644 Binary files a/pc-bios/edk2-x86_64-microvm.fd.bz2 and b/pc-bios/edk2-x86_64-microvm.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-secure-code.fd.bz2 b/pc-bios/edk2-x86_64-secure-code.fd.bz2 index 76dc6d5aad..311b7b91d7 100644 Binary files a/pc-bios/edk2-x86_64-secure-code.fd.bz2 and b/pc-bios/edk2-x86_64-secure-code.fd.bz2 differ diff --git a/pc-bios/openbios-ppc b/pc-bios/openbios-ppc index d8203a5074..4af60026b4 100644 Binary files a/pc-bios/openbios-ppc and b/pc-bios/openbios-ppc differ diff --git a/pc-bios/openbios-sparc32 b/pc-bios/openbios-sparc32 index 118b1cc1c0..41b6a607fb 100644 Binary files a/pc-bios/openbios-sparc32 and b/pc-bios/openbios-sparc32 differ diff --git a/pc-bios/openbios-sparc64 b/pc-bios/openbios-sparc64 index 846cb4854c..902b4b3508 100644 Binary files a/pc-bios/openbios-sparc64 and b/pc-bios/openbios-sparc64 differ diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin index 81bab1adc9..6a8425885c 100644 Binary files a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin differ diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin index 5eb0a74326..80bdbf2170 100644 Binary files a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin differ diff --git a/pc-bios/optionrom/optionrom.h b/pc-bios/optionrom/optionrom.h index 8d74c0ddf3..7bcdf0eeb2 100644 --- a/pc-bios/optionrom/optionrom.h +++ b/pc-bios/optionrom/optionrom.h @@ -34,8 +34,8 @@ #define FW_CFG_SETUP_SIZE 0x17 #define FW_CFG_SETUP_DATA 0x18 -#define BIOS_CFG_IOPORT_CFG 0x510 -#define BIOS_CFG_IOPORT_DATA 0x511 +#define BIOS_CFG_IOPORT_CFG 0x510 +#define BIOS_CFG_IOPORT_DATA 0x511 #define FW_CFG_DMA_CTL_ERROR 0x01 #define FW_CFG_DMA_CTL_READ 0x02 @@ -49,65 +49,65 @@ #define BIOS_CFG_DMA_ADDR_LOW 0x518 /* Break the translation block flow so -d cpu shows us values */ -#define DEBUG_HERE \ - jmp 1f; \ - 1: - +#define DEBUG_HERE \ + jmp 1f; \ + 1: + /* * Read a variable from the fw_cfg device. - * Clobbers: %edx - * Out: %eax + * Clobbers: %edx + * Out: %eax */ .macro read_fw VAR - mov $\VAR, %ax - mov $BIOS_CFG_IOPORT_CFG, %dx - outw %ax, (%dx) - mov $BIOS_CFG_IOPORT_DATA, %dx - inb (%dx), %al - shl $8, %eax - inb (%dx), %al - shl $8, %eax - inb (%dx), %al - shl $8, %eax - inb (%dx), %al - bswap %eax + mov $\VAR, %ax + mov $BIOS_CFG_IOPORT_CFG, %dx + outw %ax, (%dx) + mov $BIOS_CFG_IOPORT_DATA, %dx + inb (%dx), %al + shl $8, %eax + inb (%dx), %al + shl $8, %eax + inb (%dx), %al + shl $8, %eax + inb (%dx), %al + bswap %eax .endm /* * Read data from the fw_cfg device using DMA. - * Clobbers: %edx, %eax, ADDR, SIZE, memory[%esp-16] to memory[%esp] + * Clobbers: %edx, %eax, ADDR, SIZE, memory[%esp-16] to memory[%esp] */ .macro read_fw_dma VAR, SIZE, ADDR /* Address */ - bswapl \ADDR - pushl \ADDR + bswapl \ADDR + pushl \ADDR - /* We only support 32 bit target addresses */ - xorl %eax, %eax - pushl %eax - mov $BIOS_CFG_DMA_ADDR_HIGH, %dx - outl %eax, (%dx) + /* We only support 32 bit target addresses */ + xorl %eax, %eax + pushl %eax + mov $BIOS_CFG_DMA_ADDR_HIGH, %dx + outl %eax, (%dx) - /* Size */ - bswapl \SIZE - pushl \SIZE + /* Size */ + bswapl \SIZE + pushl \SIZE /* Control */ - movl $(\VAR << 16) | (FW_CFG_DMA_CTL_READ | FW_CFG_DMA_CTL_SELECT), %eax - bswapl %eax - pushl %eax + movl $(\VAR << 16) | (FW_CFG_DMA_CTL_READ | FW_CFG_DMA_CTL_SELECT), %eax + bswapl %eax + pushl %eax - movl %esp, %eax /* Address of the struct we generated */ - bswapl %eax - mov $BIOS_CFG_DMA_ADDR_LOW, %dx - outl %eax, (%dx) /* Initiate DMA */ + movl %esp, %eax /* Address of the struct we generated */ + bswapl %eax + mov $BIOS_CFG_DMA_ADDR_LOW, %dx + outl %eax, (%dx) /* Initiate DMA */ -1: mov (%esp), %eax /* Wait for completion */ - bswapl %eax - testl $~FW_CFG_DMA_CTL_ERROR, %eax - jnz 1b - addl $16, %esp +1: mov (%esp), %eax /* Wait for completion */ + bswapl %eax + testl $~FW_CFG_DMA_CTL_ERROR, %eax + jnz 1b + addl $16, %esp .endm @@ -115,116 +115,116 @@ * Read a blob from the fw_cfg device using DMA * Requires _ADDR, _SIZE and _DATA values for the parameter. * - * Clobbers: %eax, %edx, %es, %ecx, %edi and adresses %esp-20 to %esp + * Clobbers: %eax, %edx, %es, %ecx, %edi and adresses %esp-20 to %esp */ #ifdef USE_FW_CFG_DMA -#define read_fw_blob_dma(var) \ - read_fw var ## _SIZE; \ - mov %eax, %ecx; \ - read_fw var ## _ADDR; \ - mov %eax, %edi ;\ - read_fw_dma var ## _DATA, %ecx, %edi +#define read_fw_blob_dma(var) \ + read_fw var ## _SIZE; \ + mov %eax, %ecx; \ + read_fw var ## _ADDR; \ + mov %eax, %edi ; \ + read_fw_dma var ## _DATA, %ecx, %edi #else #define read_fw_blob_dma(var) read_fw_blob(var) #endif -#define read_fw_blob_pre(var) \ - read_fw var ## _SIZE; \ - mov %eax, %ecx; \ - mov $var ## _DATA, %ax; \ - mov $BIOS_CFG_IOPORT_CFG, %edx; \ - outw %ax, (%dx); \ - mov $BIOS_CFG_IOPORT_DATA, %dx; \ - cld +#define read_fw_blob_pre(var) \ + read_fw var ## _SIZE; \ + mov %eax, %ecx; \ + mov $var ## _DATA, %ax; \ + mov $BIOS_CFG_IOPORT_CFG, %edx; \ + outw %ax, (%dx); \ + mov $BIOS_CFG_IOPORT_DATA, %dx; \ + cld /* * Read a blob from the fw_cfg device. * Requires _ADDR, _SIZE and _DATA values for the parameter. * - * Clobbers: %eax, %edx, %es, %ecx, %edi + * Clobbers: %eax, %edx, %es, %ecx, %edi */ -#define read_fw_blob(var) \ - read_fw var ## _ADDR; \ - mov %eax, %edi; \ - read_fw_blob_pre(var); \ - /* old as(1) doesn't like this insn so emit the bytes instead: \ - rep insb (%dx), %es:(%edi); \ - */ \ - .dc.b 0xf3,0x6c +#define read_fw_blob(var) \ + read_fw var ## _ADDR; \ + mov %eax, %edi; \ + read_fw_blob_pre(var); \ + /* old as(1) doesn't like this insn so emit the bytes instead: \ + rep insb (%dx), %es:(%edi); \ + */ \ + .dc.b 0xf3,0x6c /* * Read a blob from the fw_cfg device in forced addr32 mode. * Requires _ADDR, _SIZE and _DATA values for the parameter. * - * Clobbers: %eax, %edx, %es, %ecx, %edi + * Clobbers: %eax, %edx, %es, %ecx, %edi */ -#define read_fw_blob_addr32(var) \ - read_fw var ## _ADDR; \ - mov %eax, %edi; \ - read_fw_blob_pre(var); \ - /* old as(1) doesn't like this insn so emit the bytes instead: \ - addr32 rep insb (%dx), %es:(%edi); \ - */ \ - .dc.b 0x67,0xf3,0x6c +#define read_fw_blob_addr32(var) \ + read_fw var ## _ADDR; \ + mov %eax, %edi; \ + read_fw_blob_pre(var); \ + /* old as(1) doesn't like this insn so emit the bytes instead: \ + addr32 rep insb (%dx), %es:(%edi); \ + */ \ + .dc.b 0x67,0xf3,0x6c /* * Read a blob from the fw_cfg device in forced addr32 mode, address is in %edi. * Requires _SIZE and _DATA values for the parameter. * - * Clobbers: %eax, %edx, %edi, %es, %ecx + * Clobbers: %eax, %edx, %edi, %es, %ecx */ -#define read_fw_blob_addr32_edi(var) \ - read_fw_blob_pre(var); \ - /* old as(1) doesn't like this insn so emit the bytes instead: \ - addr32 rep insb (%dx), %es:(%edi); \ - */ \ - .dc.b 0x67,0xf3,0x6c +#define read_fw_blob_addr32_edi(var) \ + read_fw_blob_pre(var); \ + /* old as(1) doesn't like this insn so emit the bytes instead: \ + addr32 rep insb (%dx), %es:(%edi); \ + */ \ + .dc.b 0x67,0xf3,0x6c -#define OPTION_ROM_START \ - .code16; \ - .text; \ - .global _start; \ - _start:; \ - .short 0xaa55; \ - .byte (_end - _start) / 512; +#define OPTION_ROM_START \ + .code16; \ + .text; \ + .global _start; \ + _start:; \ + .short 0xaa55; \ + .byte (_end - _start) / 512; -#define BOOT_ROM_START \ - OPTION_ROM_START \ - lret; \ - .org 0x18; \ - .short 0; \ - .short _pnph; \ - _pnph: \ - .ascii "$PnP"; \ - .byte 0x01; \ - .byte ( _pnph_len / 16 ); \ - .short 0x0000; \ - .byte 0x00; \ - .byte 0x00; \ - .long 0x00000000; \ - .short _manufacturer; \ - .short _product; \ - .long 0x00000000; \ - .short 0x0000; \ - .short 0x0000; \ - .short _bev; \ - .short 0x0000; \ - .short 0x0000; \ - .equ _pnph_len, . - _pnph; \ - _bev:; \ - /* DS = CS */ \ - movw %cs, %ax; \ - movw %ax, %ds; +#define BOOT_ROM_START \ + OPTION_ROM_START \ + lret; \ + .org 0x18; \ + .short 0; \ + .short _pnph; \ + _pnph: \ + .ascii "$PnP"; \ + .byte 0x01; \ + .byte ( _pnph_len / 16 ); \ + .short 0x0000; \ + .byte 0x00; \ + .byte 0x00; \ + .long 0x00000000; \ + .short _manufacturer; \ + .short _product; \ + .long 0x00000000; \ + .short 0x0000; \ + .short 0x0000; \ + .short _bev; \ + .short 0x0000; \ + .short 0x0000; \ + .equ _pnph_len, . - _pnph; \ + _bev:; \ + /* DS = CS */ \ + movw %cs, %ax; \ + movw %ax, %ds; -#define OPTION_ROM_END \ - .byte 0; \ - .align 512, 0; \ +#define OPTION_ROM_END \ + .byte 0; \ + .align 512, 0; \ _end: -#define BOOT_ROM_END \ - _manufacturer:; \ - .asciz "QEMU"; \ - _product:; \ - .asciz BOOT_ROM_PRODUCT; \ - OPTION_ROM_END +#define BOOT_ROM_END \ + _manufacturer:; \ + .asciz "QEMU"; \ + _product:; \ + .asciz BOOT_ROM_PRODUCT; \ + OPTION_ROM_END diff --git a/pc-bios/s390-ccw.img b/pc-bios/s390-ccw.img index 554fcbd1b7..c9a5a21c50 100644 Binary files a/pc-bios/s390-ccw.img and b/pc-bios/s390-ccw.img differ diff --git a/pc-bios/s390-ccw/bootmap.c b/pc-bios/s390-ccw/bootmap.c index 994e59c0b0..a2137449dc 100644 --- a/pc-bios/s390-ccw/bootmap.c +++ b/pc-bios/s390-ccw/bootmap.c @@ -72,42 +72,74 @@ static inline void verify_boot_info(BootInfo *bip) "Bad block size in zIPL section of the 1st record."); } -static block_number_t eckd_block_num(EckdCHS *chs) +static void eckd_format_chs(ExtEckdBlockPtr *ptr, bool ldipl, + uint64_t *c, + uint64_t *h, + uint64_t *s) +{ + if (ldipl) { + *c = ptr->ldptr.chs.cylinder; + *h = ptr->ldptr.chs.head; + *s = ptr->ldptr.chs.sector; + } else { + *c = ptr->bptr.chs.cylinder; + *h = ptr->bptr.chs.head; + *s = ptr->bptr.chs.sector; + } +} + +static block_number_t eckd_chs_to_block(uint64_t c, uint64_t h, uint64_t s) { const uint64_t sectors = virtio_get_sectors(); const uint64_t heads = virtio_get_heads(); - const uint64_t cylinder = chs->cylinder - + ((chs->head & 0xfff0) << 12); - const uint64_t head = chs->head & 0x000f; + const uint64_t cylinder = c + ((h & 0xfff0) << 12); + const uint64_t head = h & 0x000f; const block_number_t block = sectors * heads * cylinder + sectors * head - + chs->sector - - 1; /* block nr starts with zero */ + + s - 1; /* block nr starts with zero */ return block; } -static bool eckd_valid_address(BootMapPointer *p) +static block_number_t eckd_block_num(EckdCHS *chs) { - const uint64_t head = p->eckd.chs.head & 0x000f; + return eckd_chs_to_block(chs->cylinder, chs->head, chs->sector); +} +static block_number_t gen_eckd_block_num(ExtEckdBlockPtr *ptr, bool ldipl) +{ + uint64_t cyl, head, sec; + eckd_format_chs(ptr, ldipl, &cyl, &head, &sec); + return eckd_chs_to_block(cyl, head, sec); +} + +static bool eckd_valid_chs(uint64_t cyl, uint64_t head, uint64_t sector) +{ if (head >= virtio_get_heads() - || p->eckd.chs.sector > virtio_get_sectors() - || p->eckd.chs.sector <= 0) { + || sector > virtio_get_sectors() + || sector <= 0) { return false; } if (!virtio_guessed_disk_nature() && - eckd_block_num(&p->eckd.chs) >= virtio_get_blocks()) { + eckd_chs_to_block(cyl, head, sector) >= virtio_get_blocks()) { return false; } return true; } -static block_number_t load_eckd_segments(block_number_t blk, uint64_t *address) +static bool eckd_valid_address(ExtEckdBlockPtr *ptr, bool ldipl) +{ + uint64_t cyl, head, sec; + eckd_format_chs(ptr, ldipl, &cyl, &head, &sec); + return eckd_valid_chs(cyl, head, sec); +} + +static block_number_t load_eckd_segments(block_number_t blk, bool ldipl, + uint64_t *address) { block_number_t block_nr; - int j, rc; + int j, rc, count; BootMapPointer *bprs = (void *)_bprs; bool more_data; @@ -117,7 +149,7 @@ static block_number_t load_eckd_segments(block_number_t blk, uint64_t *address) do { more_data = false; for (j = 0;; j++) { - block_nr = eckd_block_num(&bprs[j].xeckd.bptr.chs); + block_nr = gen_eckd_block_num(&bprs[j].xeckd, ldipl); if (is_null_block_number(block_nr)) { /* end of chunk */ break; } @@ -129,11 +161,26 @@ static block_number_t load_eckd_segments(block_number_t blk, uint64_t *address) break; } - IPL_assert(block_size_ok(bprs[j].xeckd.bptr.size), + /* List directed pointer does not store block size */ + IPL_assert(ldipl || block_size_ok(bprs[j].xeckd.bptr.size), "bad chunk block size"); - IPL_assert(eckd_valid_address(&bprs[j]), "bad chunk ECKD addr"); - if ((bprs[j].xeckd.bptr.count == 0) && unused_space(&(bprs[j+1]), + if (!eckd_valid_address(&bprs[j].xeckd, ldipl)) { + /* + * If an invalid address is found during LD-IPL then break and + * retry as CCW + */ + IPL_assert(ldipl, "bad chunk ECKD addr"); + break; + } + + if (ldipl) { + count = bprs[j].xeckd.ldptr.count; + } else { + count = bprs[j].xeckd.bptr.count; + } + + if (count == 0 && unused_space(&bprs[j + 1], sizeof(EckdBlockPtr))) { /* This is a "continue" pointer. * This ptr should be the last one in the current @@ -149,11 +196,10 @@ static block_number_t load_eckd_segments(block_number_t blk, uint64_t *address) /* Load (count+1) blocks of code at (block_nr) * to memory (address). */ - rc = virtio_read_many(block_nr, (void *)(*address), - bprs[j].xeckd.bptr.count+1); + rc = virtio_read_many(block_nr, (void *)(*address), count + 1); IPL_assert(rc == 0, "code chunk read failed"); - *address += (bprs[j].xeckd.bptr.count+1) * virtio_get_block_size(); + *address += (count + 1) * virtio_get_block_size(); } } while (more_data); return block_nr; @@ -237,8 +283,10 @@ static void run_eckd_boot_script(block_number_t bmt_block_nr, uint64_t address; BootMapTable *bmt = (void *)sec; BootMapScript *bms = (void *)sec; + /* The S1B block number is NULL_BLOCK_NR if and only if it's an LD-IPL */ + bool ldipl = (s1b_block_nr == NULL_BLOCK_NR); - if (menu_is_enabled_zipl()) { + if (menu_is_enabled_zipl() && !ldipl) { loadparm = eckd_get_boot_menu_index(s1b_block_nr); } @@ -249,7 +297,7 @@ static void run_eckd_boot_script(block_number_t bmt_block_nr, memset(sec, FREE_SPACE_FILLER, sizeof(sec)); read_block(bmt_block_nr, sec, "Cannot read Boot Map Table"); - block_nr = eckd_block_num(&bmt->entry[loadparm].xeckd.bptr.chs); + block_nr = gen_eckd_block_num(&bmt->entry[loadparm].xeckd, ldipl); IPL_assert(block_nr != -1, "Cannot find Boot Map Table Entry"); memset(sec, FREE_SPACE_FILLER, sizeof(sec)); @@ -264,13 +312,18 @@ static void run_eckd_boot_script(block_number_t bmt_block_nr, } address = bms->entry[i].address.load_address; - block_nr = eckd_block_num(&bms->entry[i].blkptr.xeckd.bptr.chs); + block_nr = gen_eckd_block_num(&bms->entry[i].blkptr.xeckd, ldipl); do { - block_nr = load_eckd_segments(block_nr, &address); + block_nr = load_eckd_segments(block_nr, ldipl, &address); } while (block_nr != -1); } + if (ldipl && bms->entry[i].type != BOOT_SCRIPT_EXEC) { + /* Abort LD-IPL and retry as CCW-IPL */ + return; + } + IPL_assert(bms->entry[i].type == BOOT_SCRIPT_EXEC, "Unknown script entry type"); write_reset_psw(bms->entry[i].address.load_address); /* no return */ @@ -380,6 +433,23 @@ static void ipl_eckd_ldl(ECKD_IPL_mode_t mode) /* no return */ } +static block_number_t eckd_find_bmt(ExtEckdBlockPtr *ptr) +{ + block_number_t blockno; + uint8_t tmp_sec[MAX_SECTOR_SIZE]; + BootRecord *br; + + blockno = gen_eckd_block_num(ptr, 0); + read_block(blockno, tmp_sec, "Cannot read boot record"); + br = (BootRecord *)tmp_sec; + if (!magic_match(br->magic, ZIPL_MAGIC)) { + /* If the boot record is invalid, return and try CCW-IPL instead */ + return NULL_BLOCK_NR; + } + + return gen_eckd_block_num(&br->pgt.xeckd, 1); +} + static void print_eckd_msg(void) { char msg[] = "Using ECKD scheme (block size *****), "; @@ -401,28 +471,43 @@ static void print_eckd_msg(void) static void ipl_eckd(void) { - XEckdMbr *mbr = (void *)sec; - LDL_VTOC *vlbl = (void *)sec; + IplVolumeLabel *vlbl = (void *)sec; + LDL_VTOC *vtoc = (void *)sec; + block_number_t ldipl_bmt; /* Boot Map Table for List-Directed IPL */ print_eckd_msg(); - /* Grab the MBR again */ - memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(0, mbr, "Cannot read block 0 on DASD"); - - if (magic_match(mbr->magic, IPL1_MAGIC)) { - ipl_eckd_cdl(); /* only returns in case of error */ - return; - } - - /* LDL/CMS? */ + /* Block 2 can contain either the CDL VOL1 label or the LDL VTOC */ memset(sec, FREE_SPACE_FILLER, sizeof(sec)); read_block(2, vlbl, "Cannot read block 2"); - if (magic_match(vlbl->magic, CMS1_MAGIC)) { + /* + * First check for a list-directed-format pointer which would + * supersede the CCW pointer. + */ + if (eckd_valid_address((ExtEckdBlockPtr *)&vlbl->f.br, 0)) { + ldipl_bmt = eckd_find_bmt((ExtEckdBlockPtr *)&vlbl->f.br); + if (ldipl_bmt) { + sclp_print("List-Directed\n"); + /* LD-IPL does not use the S1B bock, just make it NULL */ + run_eckd_boot_script(ldipl_bmt, NULL_BLOCK_NR); + /* Only return in error, retry as CCW-IPL */ + sclp_print("Retrying IPL "); + print_eckd_msg(); + } + memset(sec, FREE_SPACE_FILLER, sizeof(sec)); + read_block(2, vtoc, "Cannot read block 2"); + } + + /* Not list-directed */ + if (magic_match(vtoc->magic, VOL1_MAGIC)) { + ipl_eckd_cdl(); /* may return in error */ + } + + if (magic_match(vtoc->magic, CMS1_MAGIC)) { ipl_eckd_ldl(ECKD_CMS); /* no return */ } - if (magic_match(vlbl->magic, LNX1_MAGIC)) { + if (magic_match(vtoc->magic, LNX1_MAGIC)) { ipl_eckd_ldl(ECKD_LDL); /* no return */ } diff --git a/pc-bios/s390-ccw/bootmap.h b/pc-bios/s390-ccw/bootmap.h index 3946aa3f8d..d4690a88c2 100644 --- a/pc-bios/s390-ccw/bootmap.h +++ b/pc-bios/s390-ccw/bootmap.h @@ -45,9 +45,23 @@ typedef struct EckdBlockPtr { * it's 0 for TablePtr, ScriptPtr, and SectionPtr */ } __attribute__ ((packed)) EckdBlockPtr; -typedef struct ExtEckdBlockPtr { +typedef struct LdEckdCHS { + uint32_t cylinder; + uint8_t head; + uint8_t sector; +} __attribute__ ((packed)) LdEckdCHS; + +typedef struct LdEckdBlockPtr { + LdEckdCHS chs; /* cylinder/head/sector is an address of the block */ + uint8_t reserved[4]; + uint16_t count; + uint32_t pad; +} __attribute__ ((packed)) LdEckdBlockPtr; + +/* bptr is used for CCW type IPL, while ldptr is for list-directed IPL */ +typedef union ExtEckdBlockPtr { EckdBlockPtr bptr; - uint8_t reserved[8]; + LdEckdBlockPtr ldptr; } __attribute__ ((packed)) ExtEckdBlockPtr; typedef union BootMapPointer { @@ -57,6 +71,15 @@ typedef union BootMapPointer { ExtEckdBlockPtr xeckd; } __attribute__ ((packed)) BootMapPointer; +typedef struct BootRecord { + uint8_t magic[4]; + uint32_t version; + uint64_t res1; + BootMapPointer pgt; + uint8_t reserved[510 - 32]; + uint16_t os_id; +} __attribute__ ((packed)) BootRecord; + /* aka Program Table */ typedef struct BootMapTable { uint8_t magic[4]; @@ -292,7 +315,8 @@ typedef struct IplVolumeLabel { struct { unsigned char key[4]; /* == "VOL1" */ unsigned char volser[6]; - unsigned char reserved[6]; + unsigned char reserved[64]; + EckdCHS br; /* Location of Boot Record for list-directed IPL */ } f; }; } __attribute__((packed)) IplVolumeLabel; diff --git a/pc-bios/vgabios-ati.bin b/pc-bios/vgabios-ati.bin index 39b2405148..9fb862777f 100644 Binary files a/pc-bios/vgabios-ati.bin and b/pc-bios/vgabios-ati.bin differ diff --git a/pc-bios/vgabios-bochs-display.bin b/pc-bios/vgabios-bochs-display.bin index b20d67ccf5..91969ae270 100644 Binary files a/pc-bios/vgabios-bochs-display.bin and b/pc-bios/vgabios-bochs-display.bin differ diff --git a/pc-bios/vgabios-cirrus.bin b/pc-bios/vgabios-cirrus.bin index ebe53366e4..c429540cde 100644 Binary files a/pc-bios/vgabios-cirrus.bin and b/pc-bios/vgabios-cirrus.bin differ diff --git a/pc-bios/vgabios-qxl.bin b/pc-bios/vgabios-qxl.bin index 4b5573a857..088385f747 100644 Binary files a/pc-bios/vgabios-qxl.bin and b/pc-bios/vgabios-qxl.bin differ diff --git a/pc-bios/vgabios-ramfb.bin b/pc-bios/vgabios-ramfb.bin index d458ec7436..134c751642 100644 Binary files a/pc-bios/vgabios-ramfb.bin and b/pc-bios/vgabios-ramfb.bin differ diff --git a/pc-bios/vgabios-stdvga.bin b/pc-bios/vgabios-stdvga.bin index 797e1036c9..4cd0d52e77 100644 Binary files a/pc-bios/vgabios-stdvga.bin and b/pc-bios/vgabios-stdvga.bin differ diff --git a/pc-bios/vgabios-virtio.bin b/pc-bios/vgabios-virtio.bin index 3f8fe9de13..976c78667c 100644 Binary files a/pc-bios/vgabios-virtio.bin and b/pc-bios/vgabios-virtio.bin differ diff --git a/pc-bios/vgabios-vmware.bin b/pc-bios/vgabios-vmware.bin index d5f263a9f7..119a2b188b 100644 Binary files a/pc-bios/vgabios-vmware.bin and b/pc-bios/vgabios-vmware.bin differ diff --git a/pc-bios/vgabios.bin b/pc-bios/vgabios.bin index d26af416ce..cac6131e1b 100644 Binary files a/pc-bios/vgabios.bin and b/pc-bios/vgabios.bin differ diff --git a/plugins/core.c b/plugins/core.c index e04ffa1ba4..9912f2cfdb 100644 --- a/plugins/core.c +++ b/plugins/core.c @@ -24,6 +24,7 @@ #include "exec/cpu-common.h" #include "exec/exec-all.h" +#include "exec/tb-flush.h" #include "exec/helper-proto.h" #include "tcg/tcg.h" #include "tcg/tcg-op.h" @@ -552,17 +553,6 @@ void qemu_plugin_user_postfork(bool is_child) } } - -/* - * Call this function after longjmp'ing to the main loop. It's possible that the - * last instruction of a TB might have used helpers, and therefore the - * "disable" instruction will never execute because it ended up as dead code. - */ -void qemu_plugin_disable_mem_helpers(CPUState *cpu) -{ - cpu->plugin_mem_cbs = NULL; -} - static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp) { return ap == bp; diff --git a/plugins/loader.c b/plugins/loader.c index 88c30bde2d..809f3f9b13 100644 --- a/plugins/loader.c +++ b/plugins/loader.c @@ -29,7 +29,7 @@ #include "qemu/plugin.h" #include "qemu/memalign.h" #include "hw/core/cpu.h" -#include "exec/exec-all.h" +#include "exec/tb-flush.h" #ifndef CONFIG_USER_ONLY #include "hw/boards.h" #endif diff --git a/python/Makefile b/python/Makefile index c5bd6ff83a..7c70dcc8d1 100644 --- a/python/Makefile +++ b/python/Makefile @@ -9,14 +9,14 @@ help: @echo "make check-minreqs:" @echo " Run tests in the minreqs virtual environment." @echo " These tests use the oldest dependencies." - @echo " Requires: Python 3.6" - @echo " Hint (Fedora): 'sudo dnf install python3.6'" + @echo " Requires: Python 3.7" + @echo " Hint (Fedora): 'sudo dnf install python3.7'" @echo "" @echo "make check-tox:" @echo " Run tests against multiple python versions." @echo " These tests use the newest dependencies." - @echo " Requires: Python 3.6 - 3.10, and tox." - @echo " Hint (Fedora): 'sudo dnf install python3-tox python3.10'" + @echo " Requires: Python 3.7 - 3.11, and tox." + @echo " Hint (Fedora): 'sudo dnf install python3-tox python3.11'" @echo " The variable QEMU_TOX_EXTRA_ARGS can be use to pass extra" @echo " arguments to tox". @echo "" @@ -54,18 +54,21 @@ pipenv check-pipenv: @echo "pipenv was dropped; try 'make check-minreqs' or 'make min-venv'" @exit 1 +PIP_INSTALL = pip install --disable-pip-version-check .PHONY: min-venv min-venv: $(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate $(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate: setup.cfg tests/minreqs.txt @echo "VENV $(QEMU_MINVENV_DIR)" - @python3.6 -m venv $(QEMU_MINVENV_DIR) + @python3.7 -m venv $(QEMU_MINVENV_DIR) @( \ echo "ACTIVATE $(QEMU_MINVENV_DIR)"; \ . $(QEMU_MINVENV_DIR)/bin/activate; \ + echo "INSTALL wheel $(QEMU_MINVENV_DIR)"; \ + $(PIP_INSTALL) wheel 1>/dev/null; \ echo "INSTALL -r tests/minreqs.txt $(QEMU_MINVENV_DIR)";\ - pip install -r tests/minreqs.txt 1>/dev/null; \ + $(PIP_INSTALL) -r tests/minreqs.txt 1>/dev/null; \ echo "INSTALL -e qemu $(QEMU_MINVENV_DIR)"; \ - pip install -e . 1>/dev/null; \ + $(PIP_INSTALL) -e . 1>/dev/null; \ ) @touch $(QEMU_MINVENV_DIR) @@ -100,7 +103,7 @@ check-dev: dev-venv .PHONY: develop develop: - pip3 install --disable-pip-version-check -e .[devel] + $(PIP_INSTALL) -e .[devel] .PHONY: check check: diff --git a/python/qemu/machine/machine.py b/python/qemu/machine/machine.py index e57c254484..c16a0b6fed 100644 --- a/python/qemu/machine/machine.py +++ b/python/qemu/machine/machine.py @@ -337,18 +337,18 @@ class QEMUMachine: self._remove_files.append(self._console_address) if self._qmp_set: - monitor_address = None - sock = None if self._monitor_address is None: self._sock_pair = socket.socketpair() sock = self._sock_pair[1] if isinstance(self._monitor_address, str): self._remove_files.append(self._monitor_address) - monitor_address = self._monitor_address + + sock_or_addr = self._monitor_address or sock + assert sock_or_addr is not None + self._qmp_connection = QEMUMonitorProtocol( - address=monitor_address, - sock=sock, - server=True, + sock_or_addr, + server=bool(self._monitor_address), nickname=self._name ) @@ -370,7 +370,10 @@ class QEMUMachine: if self._sock_pair: self._sock_pair[0].close() if self._qmp_connection: - self._qmp.accept(self._qmp_timer) + if self._sock_pair: + self._qmp.connect() + else: + self._qmp.accept(self._qmp_timer) def _close_qemu_log_file(self) -> None: if self._qemu_log_file is not None: diff --git a/python/qemu/qmp/legacy.py b/python/qemu/qmp/legacy.py index 8b09ee7dbb..e1e9383978 100644 --- a/python/qemu/qmp/legacy.py +++ b/python/qemu/qmp/legacy.py @@ -68,34 +68,31 @@ class QEMUMonitorProtocol: Provide an API to connect to QEMU via QEMU Monitor Protocol (QMP) and then allow to handle commands and events. - :param address: QEMU address, can be either a unix socket path (string) - or a tuple in the form ( address, port ) for a TCP - connection or None - :param sock: a socket or None + :param address: QEMU address, can be a unix socket path (string), a tuple + in the form ( address, port ) for a TCP connection, or an + existing `socket.socket` object. :param server: Act as the socket server. (See 'accept') + Not applicable when passing a socket directly. :param nickname: Optional nickname used for logging. """ def __init__(self, - address: Optional[SocketAddrT] = None, - sock: Optional[socket.socket] = None, + address: Union[SocketAddrT, socket.socket], server: bool = False, nickname: Optional[str] = None): - assert address or sock + if server and isinstance(address, socket.socket): + raise ValueError( + "server argument should be False when passing a socket") + self._qmp = QMPClient(nickname) self._aloop = asyncio.get_event_loop() self._address = address - self._sock = sock self._timeout: Optional[float] = None if server: - if sock: - assert self._sock is not None - self._sync(self._qmp.open_with_socket(self._sock)) - else: - assert self._address is not None - self._sync(self._qmp.start_server(self._address)) + assert not isinstance(self._address, socket.socket) + self._sync(self._qmp.start_server(self._address)) _T = TypeVar('_T') @@ -150,7 +147,6 @@ class QEMUMonitorProtocol: :return: QMP greeting dict, or None if negotiate is false :raise ConnectError: on connection errors """ - assert self._address is not None self._qmp.await_greeting = negotiate self._qmp.negotiate = negotiate diff --git a/python/qemu/qmp/models.py b/python/qemu/qmp/models.py index de87f87804..da52848d5a 100644 --- a/python/qemu/qmp/models.py +++ b/python/qemu/qmp/models.py @@ -54,7 +54,7 @@ class Model: class Greeting(Model): """ - Defined in qmp-spec.txt, section 2.2, "Server Greeting". + Defined in qmp-spec.rst, section "Server Greeting". :param raw: The raw Greeting object. :raise KeyError: If any required fields are absent. @@ -82,7 +82,7 @@ class Greeting(Model): class QMPGreeting(Model): """ - Defined in qmp-spec.txt, section 2.2, "Server Greeting". + Defined in qmp-spec.rst, section "Server Greeting". :param raw: The raw QMPGreeting object. :raise KeyError: If any required fields are absent. @@ -104,7 +104,7 @@ class QMPGreeting(Model): class ErrorResponse(Model): """ - Defined in qmp-spec.txt, section 2.4.2, "error". + Defined in qmp-spec.rst, section "Error". :param raw: The raw ErrorResponse object. :raise KeyError: If any required fields are absent. @@ -126,7 +126,7 @@ class ErrorResponse(Model): class ErrorInfo(Model): """ - Defined in qmp-spec.txt, section 2.4.2, "error". + Defined in qmp-spec.rst, section "Error". :param raw: The raw ErrorInfo object. :raise KeyError: If any required fields are absent. diff --git a/python/qemu/qmp/protocol.py b/python/qemu/qmp/protocol.py index 22e60298d2..753182131f 100644 --- a/python/qemu/qmp/protocol.py +++ b/python/qemu/qmp/protocol.py @@ -297,19 +297,6 @@ class AsyncProtocol(Generic[T]): await self.accept() assert self.runstate == Runstate.RUNNING - @upper_half - @require(Runstate.IDLE) - async def open_with_socket(self, sock: socket.socket) -> None: - """ - Start connection with given socket. - - :param sock: A socket. - - :raise StateError: When the `Runstate` is not `IDLE`. - """ - self._reader, self._writer = await asyncio.open_connection(sock=sock) - self._set_state(Runstate.CONNECTING) - @upper_half @require(Runstate.IDLE) async def start_server(self, address: SocketAddrT, @@ -357,12 +344,11 @@ class AsyncProtocol(Generic[T]): protocol-level failure occurs while establishing a new session, the wrapped error may also be an `QMPError`. """ - if not self._reader: - if self._accepted is None: - raise QMPError("Cannot call accept() before start_server().") - await self._session_guard( - self._do_accept(), - 'Failed to establish connection') + if self._accepted is None: + raise QMPError("Cannot call accept() before start_server().") + await self._session_guard( + self._do_accept(), + 'Failed to establish connection') await self._session_guard( self._establish_session(), 'Failed to establish session') @@ -370,7 +356,7 @@ class AsyncProtocol(Generic[T]): @upper_half @require(Runstate.IDLE) - async def connect(self, address: SocketAddrT, + async def connect(self, address: Union[SocketAddrT, socket.socket], ssl: Optional[SSLContext] = None) -> None: """ Connect to the server and begin processing message queues. @@ -615,7 +601,7 @@ class AsyncProtocol(Generic[T]): self.logger.debug("Connection accepted.") @upper_half - async def _do_connect(self, address: SocketAddrT, + async def _do_connect(self, address: Union[SocketAddrT, socket.socket], ssl: Optional[SSLContext] = None) -> None: """ Acting as the transport client, initiate a connection to a server. @@ -634,9 +620,17 @@ class AsyncProtocol(Generic[T]): # otherwise yield. await asyncio.sleep(0) - self.logger.debug("Connecting to %s ...", address) - - if isinstance(address, tuple): + if isinstance(address, socket.socket): + self.logger.debug("Connecting with existing socket: " + "fd=%d, family=%r, type=%r", + address.fileno(), address.family, address.type) + connect = asyncio.open_connection( + limit=self._limit, + ssl=ssl, + sock=address, + ) + elif isinstance(address, tuple): + self.logger.debug("Connecting to %s ...", address) connect = asyncio.open_connection( address[0], address[1], @@ -644,13 +638,14 @@ class AsyncProtocol(Generic[T]): limit=self._limit, ) else: + self.logger.debug("Connecting to file://%s ...", address) connect = asyncio.open_unix_connection( path=address, ssl=ssl, limit=self._limit, ) - self._reader, self._writer = await connect + self._reader, self._writer = await connect self.logger.debug("Connected.") @upper_half diff --git a/python/qemu/qmp/qmp_client.py b/python/qemu/qmp/qmp_client.py index 9d73ae6e7a..2a817f9db3 100644 --- a/python/qemu/qmp/qmp_client.py +++ b/python/qemu/qmp/qmp_client.py @@ -369,7 +369,7 @@ class QMPClient(AsyncProtocol[Message], Events): # This is very likely a server parsing error. # It doesn't inherently belong to any pending execution. # Instead of performing clever recovery, just terminate. - # See "NOTE" in qmp-spec.txt, section 2.4.2 + # See "NOTE" in qmp-spec.rst, section "Error". raise ServerParseError( ("Server sent an error response without an ID, " "but there are no ID-less executions pending. " @@ -377,7 +377,7 @@ class QMPClient(AsyncProtocol[Message], Events): msg ) - # qmp-spec.txt, section 2.4: + # qmp-spec.rst, section "Commands Responses": # 'Clients should drop all the responses # that have an unknown "id" field.' self.logger.log( diff --git a/python/scripts/mkvenv.py b/python/scripts/mkvenv.py new file mode 100644 index 0000000000..3a9aef46a5 --- /dev/null +++ b/python/scripts/mkvenv.py @@ -0,0 +1,927 @@ +""" +mkvenv - QEMU pyvenv bootstrapping utility + +usage: mkvenv [-h] command ... + +QEMU pyvenv bootstrapping utility + +options: + -h, --help show this help message and exit + +Commands: + command Description + create create a venv + post_init + post-venv initialization + ensure Ensure that the specified package is installed. + +-------------------------------------------------- + +usage: mkvenv create [-h] target + +positional arguments: + target Target directory to install virtual environment into. + +options: + -h, --help show this help message and exit + +-------------------------------------------------- + +usage: mkvenv post_init [-h] + +options: + -h, --help show this help message and exit + +-------------------------------------------------- + +usage: mkvenv ensure [-h] [--online] [--dir DIR] dep_spec... + +positional arguments: + dep_spec PEP 508 Dependency specification, e.g. 'meson>=0.61.5' + +options: + -h, --help show this help message and exit + --online Install packages from PyPI, if necessary. + --dir DIR Path to vendored packages where we may install from. + +""" + +# Copyright (C) 2022-2023 Red Hat, Inc. +# +# Authors: +# John Snow +# Paolo Bonzini +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import argparse +from importlib.util import find_spec +import logging +import os +from pathlib import Path +import re +import shutil +import site +import subprocess +import sys +import sysconfig +from types import SimpleNamespace +from typing import ( + Any, + Iterator, + Optional, + Sequence, + Tuple, + Union, +) +import venv + + +# Try to load distlib, with a fallback to pip's vendored version. +# HAVE_DISTLIB is checked below, just-in-time, so that mkvenv does not fail +# outside the venv or before a potential call to ensurepip in checkpip(). +HAVE_DISTLIB = True +try: + import distlib.scripts + import distlib.version +except ImportError: + try: + # Reach into pip's cookie jar. pylint and flake8 don't understand + # that these imports will be used via distlib.xxx. + from pip._vendor import distlib + import pip._vendor.distlib.scripts # noqa, pylint: disable=unused-import + import pip._vendor.distlib.version # noqa, pylint: disable=unused-import + except ImportError: + HAVE_DISTLIB = False + +# Do not add any mandatory dependencies from outside the stdlib: +# This script *must* be usable standalone! + +DirType = Union[str, bytes, "os.PathLike[str]", "os.PathLike[bytes]"] +logger = logging.getLogger("mkvenv") + + +def inside_a_venv() -> bool: + """Returns True if it is executed inside of a virtual environment.""" + return sys.prefix != sys.base_prefix + + +class Ouch(RuntimeError): + """An Exception class we can't confuse with a builtin.""" + + +class QemuEnvBuilder(venv.EnvBuilder): + """ + An extension of venv.EnvBuilder for building QEMU's configure-time venv. + + The primary difference is that it emulates a "nested" virtual + environment when invoked from inside of an existing virtual + environment by including packages from the parent. Also, + "ensurepip" is replaced if possible with just recreating pip's + console_scripts inside the virtual environment. + + Parameters for base class init: + - system_site_packages: bool = False + - clear: bool = False + - symlinks: bool = False + - upgrade: bool = False + - with_pip: bool = False + - prompt: Optional[str] = None + - upgrade_deps: bool = False (Since 3.9) + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + logger.debug("QemuEnvBuilder.__init__(...)") + + # For nested venv emulation: + self.use_parent_packages = False + if inside_a_venv(): + # Include parent packages only if we're in a venv and + # system_site_packages was True. + self.use_parent_packages = kwargs.pop( + "system_site_packages", False + ) + # Include system_site_packages only when the parent, + # The venv we are currently in, also does so. + kwargs["system_site_packages"] = sys.base_prefix in site.PREFIXES + + # ensurepip is slow: venv creation can be very fast for cases where + # we allow the use of system_site_packages. Therefore, ensurepip is + # replaced with our own script generation once the virtual environment + # is setup. + self.want_pip = kwargs.get("with_pip", False) + if self.want_pip: + if ( + kwargs.get("system_site_packages", False) + and not need_ensurepip() + ): + kwargs["with_pip"] = False + else: + check_ensurepip(suggest_remedy=True) + + super().__init__(*args, **kwargs) + + # Make the context available post-creation: + self._context: Optional[SimpleNamespace] = None + + def get_parent_libpath(self) -> Optional[str]: + """Return the libpath of the parent venv, if applicable.""" + if self.use_parent_packages: + return sysconfig.get_path("purelib") + return None + + @staticmethod + def compute_venv_libpath(context: SimpleNamespace) -> str: + """ + Compatibility wrapper for context.lib_path for Python < 3.12 + """ + # Python 3.12+, not strictly necessary because it's documented + # to be the same as 3.10 code below: + if sys.version_info >= (3, 12): + return context.lib_path + + # Python 3.10+ + if "venv" in sysconfig.get_scheme_names(): + lib_path = sysconfig.get_path( + "purelib", scheme="venv", vars={"base": context.env_dir} + ) + assert lib_path is not None + return lib_path + + # For Python <= 3.9 we need to hardcode this. Fortunately the + # code below was the same in Python 3.6-3.10, so there is only + # one case. + if sys.platform == "win32": + return os.path.join(context.env_dir, "Lib", "site-packages") + return os.path.join( + context.env_dir, + "lib", + "python%d.%d" % sys.version_info[:2], + "site-packages", + ) + + def ensure_directories(self, env_dir: DirType) -> SimpleNamespace: + logger.debug("ensure_directories(env_dir=%s)", env_dir) + self._context = super().ensure_directories(env_dir) + return self._context + + def create(self, env_dir: DirType) -> None: + logger.debug("create(env_dir=%s)", env_dir) + super().create(env_dir) + assert self._context is not None + self.post_post_setup(self._context) + + def post_post_setup(self, context: SimpleNamespace) -> None: + """ + The final, final hook. Enter the venv and run commands inside of it. + """ + if self.use_parent_packages: + # We're inside of a venv and we want to include the parent + # venv's packages. + parent_libpath = self.get_parent_libpath() + assert parent_libpath is not None + logger.debug("parent_libpath: %s", parent_libpath) + + our_libpath = self.compute_venv_libpath(context) + logger.debug("our_libpath: %s", our_libpath) + + pth_file = os.path.join(our_libpath, "nested.pth") + with open(pth_file, "w", encoding="UTF-8") as file: + file.write(parent_libpath + os.linesep) + + if self.want_pip: + args = [ + context.env_exe, + __file__, + "post_init", + ] + subprocess.run(args, check=True) + + def get_value(self, field: str) -> str: + """ + Get a string value from the context namespace after a call to build. + + For valid field names, see: + https://docs.python.org/3/library/venv.html#venv.EnvBuilder.ensure_directories + """ + ret = getattr(self._context, field) + assert isinstance(ret, str) + return ret + + +def need_ensurepip() -> bool: + """ + Tests for the presence of setuptools and pip. + + :return: `True` if we do not detect both packages. + """ + # Don't try to actually import them, it's fraught with danger: + # https://github.com/pypa/setuptools/issues/2993 + if find_spec("setuptools") and find_spec("pip"): + return False + return True + + +def check_ensurepip(prefix: str = "", suggest_remedy: bool = False) -> None: + """ + Check that we have ensurepip. + + Raise a fatal exception with a helpful hint if it isn't available. + """ + if not find_spec("ensurepip"): + msg = ( + "Python's ensurepip module is not found.\n" + "It's normally part of the Python standard library, " + "maybe your distribution packages it separately?\n" + "(Debian puts ensurepip in its python3-venv package.)\n" + ) + if suggest_remedy: + msg += ( + "Either install ensurepip, or alleviate the need for it in the" + " first place by installing pip and setuptools for " + f"'{sys.executable}'.\n" + ) + raise Ouch(prefix + msg) + + # ensurepip uses pyexpat, which can also go missing on us: + if not find_spec("pyexpat"): + msg = ( + "Python's pyexpat module is not found.\n" + "It's normally part of the Python standard library, " + "maybe your distribution packages it separately?\n" + "(NetBSD's pkgsrc debundles this to e.g. 'py310-expat'.)\n" + ) + if suggest_remedy: + msg += ( + "Either install pyexpat, or alleviate the need for it in the " + "first place by installing pip and setuptools for " + f"'{sys.executable}'.\n" + ) + raise Ouch(prefix + msg) + + +def make_venv( # pylint: disable=too-many-arguments + env_dir: Union[str, Path], + system_site_packages: bool = False, + clear: bool = True, + symlinks: Optional[bool] = None, + with_pip: bool = True, +) -> None: + """ + Create a venv using `QemuEnvBuilder`. + + This is analogous to the `venv.create` module-level convenience + function that is part of the Python stdblib, except it uses + `QemuEnvBuilder` instead. + + :param env_dir: The directory to create/install to. + :param system_site_packages: + Allow inheriting packages from the system installation. + :param clear: When True, fully remove any prior venv and files. + :param symlinks: + Whether to use symlinks to the target interpreter or not. If + left unspecified, it will use symlinks except on Windows to + match behavior with the "venv" CLI tool. + :param with_pip: + Whether to install "pip" binaries or not. + """ + logger.debug( + "%s: make_venv(env_dir=%s, system_site_packages=%s, " + "clear=%s, symlinks=%s, with_pip=%s)", + __file__, + str(env_dir), + system_site_packages, + clear, + symlinks, + with_pip, + ) + + if symlinks is None: + # Default behavior of standard venv CLI + symlinks = os.name != "nt" + + builder = QemuEnvBuilder( + system_site_packages=system_site_packages, + clear=clear, + symlinks=symlinks, + with_pip=with_pip, + ) + + style = "non-isolated" if builder.system_site_packages else "isolated" + nested = "" + if builder.use_parent_packages: + nested = f"(with packages from '{builder.get_parent_libpath()}') " + print( + f"mkvenv: Creating {style} virtual environment" + f" {nested}at '{str(env_dir)}'", + file=sys.stderr, + ) + + try: + logger.debug("Invoking builder.create()") + try: + builder.create(str(env_dir)) + except SystemExit as exc: + # Some versions of the venv module raise SystemExit; *nasty*! + # We want the exception that prompted it. It might be a subprocess + # error that has output we *really* want to see. + logger.debug("Intercepted SystemExit from EnvBuilder.create()") + raise exc.__cause__ or exc.__context__ or exc + logger.debug("builder.create() finished") + except subprocess.CalledProcessError as exc: + logger.error("mkvenv subprocess failed:") + logger.error("cmd: %s", exc.cmd) + logger.error("returncode: %d", exc.returncode) + + def _stringify(data: Union[str, bytes]) -> str: + if isinstance(data, bytes): + return data.decode() + return data + + lines = [] + if exc.stdout: + lines.append("========== stdout ==========") + lines.append(_stringify(exc.stdout)) + lines.append("============================") + if exc.stderr: + lines.append("========== stderr ==========") + lines.append(_stringify(exc.stderr)) + lines.append("============================") + if lines: + logger.error(os.linesep.join(lines)) + + raise Ouch("VENV creation subprocess failed.") from exc + + # print the python executable to stdout for configure. + print(builder.get_value("env_exe")) + + +def _gen_importlib(packages: Sequence[str]) -> Iterator[str]: + # pylint: disable=import-outside-toplevel + # pylint: disable=no-name-in-module + # pylint: disable=import-error + try: + # First preference: Python 3.8+ stdlib + from importlib.metadata import ( # type: ignore + PackageNotFoundError, + distribution, + ) + except ImportError as exc: + logger.debug("%s", str(exc)) + # Second preference: Commonly available PyPI backport + from importlib_metadata import ( # type: ignore + PackageNotFoundError, + distribution, + ) + + def _generator() -> Iterator[str]: + for package in packages: + try: + entry_points = distribution(package).entry_points + except PackageNotFoundError: + continue + + # The EntryPoints type is only available in 3.10+, + # treat this as a vanilla list and filter it ourselves. + entry_points = filter( + lambda ep: ep.group == "console_scripts", entry_points + ) + + for entry_point in entry_points: + yield f"{entry_point.name} = {entry_point.value}" + + return _generator() + + +def _gen_pkg_resources(packages: Sequence[str]) -> Iterator[str]: + # pylint: disable=import-outside-toplevel + # Bundled with setuptools; has a good chance of being available. + import pkg_resources + + def _generator() -> Iterator[str]: + for package in packages: + try: + eps = pkg_resources.get_entry_map(package, "console_scripts") + except pkg_resources.DistributionNotFound: + continue + + for entry_point in eps.values(): + yield str(entry_point) + + return _generator() + + +def generate_console_scripts( + packages: Sequence[str], + python_path: Optional[str] = None, + bin_path: Optional[str] = None, +) -> None: + """ + Generate script shims for console_script entry points in @packages. + """ + if python_path is None: + python_path = sys.executable + if bin_path is None: + bin_path = sysconfig.get_path("scripts") + assert bin_path is not None + + logger.debug( + "generate_console_scripts(packages=%s, python_path=%s, bin_path=%s)", + packages, + python_path, + bin_path, + ) + + if not packages: + return + + def _get_entry_points() -> Iterator[str]: + """Python 3.7 compatibility shim for iterating entry points.""" + # Python 3.8+, or Python 3.7 with importlib_metadata installed. + try: + return _gen_importlib(packages) + except ImportError as exc: + logger.debug("%s", str(exc)) + + # Python 3.7 with setuptools installed. + try: + return _gen_pkg_resources(packages) + except ImportError as exc: + logger.debug("%s", str(exc)) + raise Ouch( + "Neither importlib.metadata nor pkg_resources found, " + "can't generate console script shims.\n" + "Use Python 3.8+, or install importlib-metadata or setuptools." + ) from exc + + maker = distlib.scripts.ScriptMaker(None, bin_path) + maker.variants = {""} + maker.clobber = False + + for entry_point in _get_entry_points(): + for filename in maker.make(entry_point): + logger.debug("wrote console_script '%s'", filename) + + +def checkpip() -> bool: + """ + Debian10 has a pip that's broken when used inside of a virtual environment. + + We try to detect and correct that case here. + """ + try: + # pylint: disable=import-outside-toplevel,unused-import,import-error + # pylint: disable=redefined-outer-name + import pip._internal # type: ignore # noqa: F401 + + logger.debug("pip appears to be working correctly.") + return False + except ModuleNotFoundError as exc: + if exc.name == "pip._internal": + # Uh, fair enough. They did say "internal". + # Let's just assume it's fine. + return False + logger.warning("pip appears to be malfunctioning: %s", str(exc)) + + check_ensurepip("pip appears to be non-functional, and ") + + logger.debug("Attempting to repair pip ...") + subprocess.run( + (sys.executable, "-m", "ensurepip"), + stdout=subprocess.DEVNULL, + check=True, + ) + logger.debug("Pip is now (hopefully) repaired!") + return True + + +def pkgname_from_depspec(dep_spec: str) -> str: + """ + Parse package name out of a PEP-508 depspec. + + See https://peps.python.org/pep-0508/#names + """ + match = re.match( + r"^([A-Z0-9]([A-Z0-9._-]*[A-Z0-9])?)", dep_spec, re.IGNORECASE + ) + if not match: + raise ValueError( + f"dep_spec '{dep_spec}'" + " does not appear to contain a valid package name" + ) + return match.group(0) + + +def _get_version_importlib(package: str) -> Optional[str]: + # pylint: disable=import-outside-toplevel + # pylint: disable=no-name-in-module + # pylint: disable=import-error + try: + # First preference: Python 3.8+ stdlib + from importlib.metadata import ( # type: ignore + PackageNotFoundError, + distribution, + ) + except ImportError as exc: + logger.debug("%s", str(exc)) + # Second preference: Commonly available PyPI backport + from importlib_metadata import ( # type: ignore + PackageNotFoundError, + distribution, + ) + + try: + return str(distribution(package).version) + except PackageNotFoundError: + return None + + +def _get_version_pkg_resources(package: str) -> Optional[str]: + # pylint: disable=import-outside-toplevel + # Bundled with setuptools; has a good chance of being available. + import pkg_resources + + try: + return str(pkg_resources.get_distribution(package).version) + except pkg_resources.DistributionNotFound: + return None + + +def _get_version(package: str) -> Optional[str]: + try: + return _get_version_importlib(package) + except ImportError as exc: + logger.debug("%s", str(exc)) + + try: + return _get_version_pkg_resources(package) + except ImportError as exc: + logger.debug("%s", str(exc)) + raise Ouch( + "Neither importlib.metadata nor pkg_resources found. " + "Use Python 3.8+, or install importlib-metadata or setuptools." + ) from exc + + +def diagnose( + dep_spec: str, + online: bool, + wheels_dir: Optional[Union[str, Path]], + prog: Optional[str], +) -> Tuple[str, bool]: + """ + Offer a summary to the user as to why a package failed to be installed. + + :param dep_spec: The package we tried to ensure, e.g. 'meson>=0.61.5' + :param online: Did we allow PyPI access? + :param prog: + Optionally, a shell program name that can be used as a + bellwether to detect if this program is installed elsewhere on + the system. This is used to offer advice when a program is + detected for a different python version. + :param wheels_dir: + Optionally, a directory that was searched for vendored packages. + """ + # pylint: disable=too-many-branches + + # Some errors are not particularly serious + bad = False + + pkg_name = pkgname_from_depspec(dep_spec) + pkg_version = _get_version(pkg_name) + + lines = [] + + if pkg_version: + lines.append( + f"Python package '{pkg_name}' version '{pkg_version}' was found," + " but isn't suitable." + ) + else: + lines.append( + f"Python package '{pkg_name}' was not found nor installed." + ) + + if wheels_dir: + lines.append( + "No suitable version found in, or failed to install from" + f" '{wheels_dir}'." + ) + bad = True + + if online: + lines.append("A suitable version could not be obtained from PyPI.") + bad = True + else: + lines.append( + "mkvenv was configured to operate offline and did not check PyPI." + ) + + if prog and not pkg_version: + which = shutil.which(prog) + if which: + if sys.base_prefix in site.PREFIXES: + pypath = Path(sys.executable).resolve() + lines.append( + f"'{prog}' was detected on your system at '{which}', " + f"but the Python package '{pkg_name}' was not found by " + f"this Python interpreter ('{pypath}'). " + f"Typically this means that '{prog}' has been installed " + "against a different Python interpreter on your system." + ) + else: + lines.append( + f"'{prog}' was detected on your system at '{which}', " + "but the build is using an isolated virtual environment." + ) + bad = True + + lines = [f" • {line}" for line in lines] + if bad: + lines.insert(0, f"Could not provide build dependency '{dep_spec}':") + else: + lines.insert(0, f"'{dep_spec}' not found:") + return os.linesep.join(lines), bad + + +def pip_install( + args: Sequence[str], + online: bool = False, + wheels_dir: Optional[Union[str, Path]] = None, +) -> None: + """ + Use pip to install a package or package(s) as specified in @args. + """ + loud = bool( + os.environ.get("DEBUG") + or os.environ.get("GITLAB_CI") + or os.environ.get("V") + ) + + full_args = [ + sys.executable, + "-m", + "pip", + "install", + "--disable-pip-version-check", + "-v" if loud else "-q", + ] + if not online: + full_args += ["--no-index"] + if wheels_dir: + full_args += ["--find-links", f"file://{str(wheels_dir)}"] + full_args += list(args) + subprocess.run( + full_args, + check=True, + ) + + +def _do_ensure( + dep_specs: Sequence[str], + online: bool = False, + wheels_dir: Optional[Union[str, Path]] = None, + prog: Optional[str] = None, +) -> Optional[Tuple[str, bool]]: + """ + Use pip to ensure we have the package specified by @dep_specs. + + If the package is already installed, do nothing. If online and + wheels_dir are both provided, prefer packages found in wheels_dir + first before connecting to PyPI. + + :param dep_specs: + PEP 508 dependency specifications. e.g. ['meson>=0.61.5']. + :param online: If True, fall back to PyPI. + :param wheels_dir: If specified, search this path for packages. + """ + absent = [] + present = [] + for spec in dep_specs: + matcher = distlib.version.LegacyMatcher(spec) + ver = _get_version(matcher.name) + if ver is None or not matcher.match( + distlib.version.LegacyVersion(ver) + ): + absent.append(spec) + else: + logger.info("found %s %s", matcher.name, ver) + present.append(matcher.name) + + if present: + generate_console_scripts(present) + + if absent: + if online or wheels_dir: + # Some packages are missing or aren't a suitable version, + # install a suitable (possibly vendored) package. + print(f"mkvenv: installing {', '.join(absent)}", file=sys.stderr) + try: + pip_install(args=absent, online=online, wheels_dir=wheels_dir) + return None + except subprocess.CalledProcessError: + pass + + return diagnose( + absent[0], + online, + wheels_dir, + prog if absent[0] == dep_specs[0] else None, + ) + + return None + + +def ensure( + dep_specs: Sequence[str], + online: bool = False, + wheels_dir: Optional[Union[str, Path]] = None, + prog: Optional[str] = None, +) -> None: + """ + Use pip to ensure we have the package specified by @dep_specs. + + If the package is already installed, do nothing. If online and + wheels_dir are both provided, prefer packages found in wheels_dir + first before connecting to PyPI. + + :param dep_specs: + PEP 508 dependency specifications. e.g. ['meson>=0.61.5']. + :param online: If True, fall back to PyPI. + :param wheels_dir: If specified, search this path for packages. + :param prog: + If specified, use this program name for error diagnostics that will + be presented to the user. e.g., 'sphinx-build' can be used as a + bellwether for the presence of 'sphinx'. + """ + print(f"mkvenv: checking for {', '.join(dep_specs)}", file=sys.stderr) + + if not HAVE_DISTLIB: + raise Ouch("a usable distlib could not be found, please install it") + + result = _do_ensure(dep_specs, online, wheels_dir, prog) + if result: + # Well, that's not good. + if result[1]: + raise Ouch(result[0]) + raise SystemExit(f"\n{result[0]}\n\n") + + +def post_venv_setup() -> None: + """ + This is intended to be run *inside the venv* after it is created. + """ + logger.debug("post_venv_setup()") + # Test for a broken pip (Debian 10 or derivative?) and fix it if needed + if not checkpip(): + # Finally, generate a 'pip' script so the venv is usable in a normal + # way from the CLI. This only happens when we inherited pip from a + # parent/system-site and haven't run ensurepip in some way. + generate_console_scripts(["pip"]) + + +def _add_create_subcommand(subparsers: Any) -> None: + subparser = subparsers.add_parser("create", help="create a venv") + subparser.add_argument( + "target", + type=str, + action="store", + help="Target directory to install virtual environment into.", + ) + + +def _add_post_init_subcommand(subparsers: Any) -> None: + subparsers.add_parser("post_init", help="post-venv initialization") + + +def _add_ensure_subcommand(subparsers: Any) -> None: + subparser = subparsers.add_parser( + "ensure", help="Ensure that the specified package is installed." + ) + subparser.add_argument( + "--online", + action="store_true", + help="Install packages from PyPI, if necessary.", + ) + subparser.add_argument( + "--dir", + type=str, + action="store", + help="Path to vendored packages where we may install from.", + ) + subparser.add_argument( + "--diagnose", + type=str, + action="store", + help=( + "Name of a shell utility to use for " + "diagnostics if this command fails." + ), + ) + subparser.add_argument( + "dep_specs", + type=str, + action="store", + help="PEP 508 Dependency specification, e.g. 'meson>=0.61.5'", + nargs="+", + ) + + +def main() -> int: + """CLI interface to make_qemu_venv. See module docstring.""" + if os.environ.get("DEBUG") or os.environ.get("GITLAB_CI"): + # You're welcome. + logging.basicConfig(level=logging.DEBUG) + else: + if os.environ.get("V"): + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser( + prog="mkvenv", + description="QEMU pyvenv bootstrapping utility", + ) + subparsers = parser.add_subparsers( + title="Commands", + dest="command", + required=True, + metavar="command", + help="Description", + ) + + _add_create_subcommand(subparsers) + _add_post_init_subcommand(subparsers) + _add_ensure_subcommand(subparsers) + + args = parser.parse_args() + try: + if args.command == "create": + make_venv( + args.target, + system_site_packages=True, + clear=True, + ) + if args.command == "post_init": + post_venv_setup() + if args.command == "ensure": + ensure( + dep_specs=args.dep_specs, + online=args.online, + wheels_dir=args.dir, + prog=args.diagnose, + ) + logger.debug("mkvenv.py %s: exiting", args.command) + except Ouch as exc: + print("\n*** Ouch! ***\n", file=sys.stderr) + print(str(exc), "\n\n", file=sys.stderr) + return 1 + except SystemExit: + raise + except: # pylint: disable=bare-except + logger.exception("mkvenv did not complete successfully:") + return 2 + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/python/scripts/vendor.py b/python/scripts/vendor.py new file mode 100755 index 0000000000..34486a51f4 --- /dev/null +++ b/python/scripts/vendor.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +""" +vendor - QEMU python vendoring utility + +usage: vendor [-h] + +QEMU python vendoring utility + +options: + -h, --help show this help message and exit +""" + +# Copyright (C) 2023 Red Hat, Inc. +# +# Authors: +# John Snow +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import argparse +import os +from pathlib import Path +import subprocess +import sys +import tempfile + + +def main() -> int: + """Run the vendoring utility. See module-level docstring.""" + loud = False + if os.environ.get("DEBUG") or os.environ.get("V"): + loud = True + + # No options or anything for now, but I guess + # you'll figure that out when you run --help. + parser = argparse.ArgumentParser( + prog="vendor", + description="QEMU python vendoring utility", + ) + parser.parse_args() + + packages = { + "meson==0.63.3": + "d677b809c4895dcbaac9bf6c43703fcb3609a4b24c6057c78f828590049cf43a", + } + + vendor_dir = Path(__file__, "..", "..", "wheels").resolve() + + with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as file: + for dep_spec, checksum in packages.items(): + file.write(f"{dep_spec} --hash=sha256:{checksum}") + file.flush() + + cli_args = [ + "pip", + "download", + "--dest", + str(vendor_dir), + "--require-hashes", + "-r", + file.name, + ] + if loud: + cli_args.append("-v") + + print(" ".join(cli_args)) + subprocess.run(cli_args, check=True) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/python/setup.cfg b/python/setup.cfg index 9e923d9762..42f0b0be07 100644 --- a/python/setup.cfg +++ b/python/setup.cfg @@ -14,7 +14,6 @@ classifiers = Natural Language :: English Operating System :: OS Independent Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 @@ -23,7 +22,7 @@ classifiers = Typing :: Typed [options] -python_requires = >= 3.6 +python_requires = >= 3.7 packages = qemu.qmp qemu.machine @@ -36,11 +35,12 @@ packages = # Remember to update tests/minreqs.txt if changing anything below: devel = avocado-framework >= 90.0 - flake8 >= 3.6.0 + distlib >= 0.3.6 + flake8 >= 5.0.4 fusepy >= 2.0.4 isort >= 5.1.2 mypy >= 0.780 - pylint >= 2.8.0 + pylint >= 2.17.3 tox >= 3.18.0 urwid >= 2.1.2 urwid-readline >= 0.13 @@ -76,7 +76,7 @@ exclude = __pycache__, [mypy] strict = True -python_version = 3.6 +python_version = 3.7 warn_unused_configs = True namespace_packages = True warn_unused_ignores = False @@ -103,6 +103,33 @@ ignore_missing_imports = True [mypy-pygments] ignore_missing_imports = True +[mypy-importlib.metadata] +ignore_missing_imports = True + +[mypy-importlib_metadata] +ignore_missing_imports = True + +[mypy-pkg_resources] +ignore_missing_imports = True + +[mypy-distlib] +ignore_missing_imports = True + +[mypy-distlib.scripts] +ignore_missing_imports = True + +[mypy-distlib.version] +ignore_missing_imports = True + +[mypy-pip._vendor.distlib] +ignore_missing_imports = True + +[mypy-pip._vendor.distlib.scripts] +ignore_missing_imports = True + +[mypy-pip._vendor.distlib.version] +ignore_missing_imports = True + [pylint.messages control] # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this @@ -132,6 +159,7 @@ good-names=i, fd, # fd = os.open(...) c, # for c in string: ... T, # for TypeVars. See pylint#3401 + SocketAddrT, # Not sure why this is invalid. [pylint.similarities] # Ignore imports when computing similarities. @@ -158,7 +186,7 @@ multi_line_output=3 # of python available on your system to run this test. [tox:tox] -envlist = py36, py37, py38, py39, py310, py311 +envlist = py37, py38, py39, py310, py311 skip_missing_interpreters = true [testenv] diff --git a/python/tests/flake8.sh b/python/tests/flake8.sh index 1cd7d40fad..e013699645 100755 --- a/python/tests/flake8.sh +++ b/python/tests/flake8.sh @@ -1,2 +1,3 @@ #!/bin/sh -e python3 -m flake8 qemu/ +python3 -m flake8 scripts/ diff --git a/python/tests/isort.sh b/python/tests/isort.sh index 4480405bfb..66c2f7df0f 100755 --- a/python/tests/isort.sh +++ b/python/tests/isort.sh @@ -1,2 +1,3 @@ #!/bin/sh -e python3 -m isort -c qemu/ +python3 -m isort -c scripts/ diff --git a/python/tests/minreqs.txt b/python/tests/minreqs.txt index dfb8abb155..1ce72cef6d 100644 --- a/python/tests/minreqs.txt +++ b/python/tests/minreqs.txt @@ -1,5 +1,5 @@ # This file lists the ***oldest possible dependencies*** needed to run -# "make check" successfully under ***Python 3.6***. It is used primarily +# "make check" successfully under ***Python 3.7***. It is used primarily # by GitLab CI to ensure that our stated minimum versions in setup.cfg # are truthful and regularly validated. # @@ -16,6 +16,9 @@ urwid==2.1.2 urwid-readline==0.13 Pygments==2.9.0 +# Dependencies for mkvenv +distlib==0.3.6 + # Dependencies for FUSE support for qom-fuse fusepy==2.0.4 @@ -23,23 +26,23 @@ fusepy==2.0.4 avocado-framework==90.0 # Linters -flake8==3.6.0 +flake8==5.0.4 isort==5.1.2 mypy==0.780 -pylint==2.8.0 +pylint==2.17.3 # Transitive flake8 dependencies -mccabe==0.6.0 -pycodestyle==2.4.0 -pyflakes==2.0.0 +mccabe==0.7.0 +pycodestyle==2.9.1 +pyflakes==2.5.0 # Transitive mypy dependencies mypy-extensions==0.4.3 typed-ast==1.4.0 -typing-extensions==3.7.4 +typing-extensions==4.5.0 # Transitive pylint dependencies -astroid==2.5.4 +astroid==2.15.4 lazy-object-proxy==1.4.0 toml==0.10.0 wrapt==1.12.1 diff --git a/python/tests/mypy.sh b/python/tests/mypy.sh index 5f980f563b..a33a3f58ab 100755 --- a/python/tests/mypy.sh +++ b/python/tests/mypy.sh @@ -1,2 +1,3 @@ #!/bin/sh -e python3 -m mypy -p qemu +python3 -m mypy scripts/ diff --git a/python/tests/pylint.sh b/python/tests/pylint.sh index 03d64705a1..2b68da90df 100755 --- a/python/tests/pylint.sh +++ b/python/tests/pylint.sh @@ -1,3 +1,4 @@ #!/bin/sh -e # See commit message for environment variable explainer. SETUPTOOLS_USE_DISTUTILS=stdlib python3 -m pylint qemu/ +SETUPTOOLS_USE_DISTUTILS=stdlib python3 -m pylint scripts/ diff --git a/python/wheels/meson-0.63.3-py3-none-any.whl b/python/wheels/meson-0.63.3-py3-none-any.whl new file mode 100644 index 0000000000..8a191e3a20 Binary files /dev/null and b/python/wheels/meson-0.63.3-py3-none-any.whl differ diff --git a/qapi/acpi.json b/qapi/acpi.json index d148f6db9f..e0739bd6ae 100644 --- a/qapi/acpi.json +++ b/qapi/acpi.json @@ -14,18 +14,19 @@ # # Specify an ACPI table on the command line to load. # -# At most one of @file and @data can be specified. The list of files specified -# by any one of them is loaded and concatenated in order. If both are omitted, -# @data is implied. +# At most one of @file and @data can be specified. The list of files +# specified by any one of them is loaded and concatenated in order. +# If both are omitted, @data is implied. # -# Other fields / optargs can be used to override fields of the generic ACPI -# table header; refer to the ACPI specification 5.0, section 5.2.6 System -# Description Table Header. If a header field is not overridden, then the -# corresponding value from the concatenated blob is used (in case of @file), or -# it is filled in with a hard-coded value (in case of @data). +# Other fields / optargs can be used to override fields of the generic +# ACPI table header; refer to the ACPI specification 5.0, section +# 5.2.6 System Description Table Header. If a header field is not +# overridden, then the corresponding value from the concatenated blob +# is used (in case of @file), or it is filled in with a hard-coded +# value (in case of @data). # -# String fields are copied into the matching ACPI member from lowest address -# upwards, and silently truncated / NUL-padded to length. +# String fields are copied into the matching ACPI member from lowest +# address upwards, and silently truncated / NUL-padded to length. # # @sig: table signature / identifier (4 bytes) # @@ -38,20 +39,20 @@ # @oem_rev: OEM-supplied revision number (4 bytes) # # @asl_compiler_id: identifier of the utility that created the table -# (4 bytes) +# (4 bytes) # # @asl_compiler_rev: revision number of the utility that created the -# table (4 bytes) +# table (4 bytes) # -# @file: colon (:) separated list of pathnames to load and -# concatenate as table data. The resultant binary blob is expected to -# have an ACPI table header. At least one file is required. This field -# excludes @data. +# @file: colon (:) separated list of pathnames to load and concatenate +# as table data. The resultant binary blob is expected to have an +# ACPI table header. At least one file is required. This field +# excludes @data. # -# @data: colon (:) separated list of pathnames to load and -# concatenate as table data. The resultant binary blob must not have an -# ACPI table header. At least one file is required. This field excludes -# @file. +# @data: colon (:) separated list of pathnames to load and concatenate +# as table data. The resultant binary blob must not have an ACPI +# table header. At least one file is required. This field +# excludes @file. # # Since: 1.5 ## @@ -71,6 +72,7 @@ # @ACPISlotType: # # @DIMM: memory slot +# # @CPU: logical CPU slot (since 2.7) ## { 'enum': 'ACPISlotType', 'data': [ 'DIMM', 'CPU' ] } @@ -78,9 +80,9 @@ ## # @ACPIOSTInfo: # -# OSPM Status Indication for a device -# For description of possible values of @source and @status fields -# see "_OST (OSPM Status Indication)" chapter of ACPI5.0 spec. +# OSPM Status Indication for a device For description of possible +# values of @source and @status fields see "_OST (OSPM Status +# Indication)" chapter of ACPI5.0 spec. # # @device: device ID associated with slot # @@ -117,7 +119,6 @@ # { "slot": "2", "slot-type": "DIMM", "source": 0, "status": 0}, # { "slot": "3", "slot-type": "DIMM", "source": 0, "status": 0} # ]} -# ## { 'command': 'query-acpi-ospm-status', 'returns': ['ACPIOSTInfo'] } @@ -136,7 +137,6 @@ # "data": { "info": { "device": "d1", "slot": "0", # "slot-type": "DIMM", "source": 1, "status": 0 } }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } -# ## { 'event': 'ACPI_DEVICE_OST', 'data': { 'info': 'ACPIOSTInfo' } } diff --git a/qapi/audio.json b/qapi/audio.json index 4e54c00f51..534f10d8b1 100644 --- a/qapi/audio.json +++ b/qapi/audio.json @@ -16,24 +16,24 @@ # General audio backend options that are used for both playback and # recording. # -# @mixing-engine: use QEMU's mixing engine to mix all streams inside QEMU and -# convert audio formats when not supported by the backend. When -# set to off, fixed-settings must be also off (default on, -# since 4.2) +# @mixing-engine: use QEMU's mixing engine to mix all streams inside +# QEMU and convert audio formats when not supported by the +# backend. When set to off, fixed-settings must be also off +# (default on, since 4.2) # -# @fixed-settings: use fixed settings for host input/output. When off, -# frequency, channels and format must not be -# specified (default true) +# @fixed-settings: use fixed settings for host input/output. When +# off, frequency, channels and format must not be specified +# (default true) # -# @frequency: frequency to use when using fixed settings -# (default 44100) +# @frequency: frequency to use when using fixed settings (default +# 44100) # # @channels: number of channels when using fixed settings (default 2) # # @voices: number of voices to use (default 1) # -# @format: sample format to use when using fixed settings -# (default s16) +# @format: sample format to use when using fixed settings (default +# s16) # # @buffer-length: the buffer length in microseconds # @@ -76,7 +76,7 @@ # @period-length: the period length in microseconds # # @try-poll: attempt to use poll mode, falling back to non-polling -# access on failure (default true) +# access on failure (default true) # # Since: 4.0 ## @@ -131,8 +131,8 @@ ## # @AudiodevCoreaudioPerDirectionOptions: # -# Options of the Core Audio backend that are used for both playback and -# recording. +# Options of the Core Audio backend that are used for both playback +# and recording. # # @buffer-count: number of buffers # @@ -168,8 +168,8 @@ # # @out: options of the playback stream # -# @latency: add extra latency to playback in microseconds -# (default 10000) +# @latency: add extra latency to playback in microseconds (default +# 10000) # # Since: 4.0 ## @@ -185,21 +185,22 @@ # Options of the JACK backend that are used for both playback and # recording. # -# @server-name: select from among several possible concurrent server instances -# (default: environment variable $JACK_DEFAULT_SERVER if set, else "default") +# @server-name: select from among several possible concurrent server +# instances (default: environment variable $JACK_DEFAULT_SERVER if +# set, else "default") # -# @client-name: the client name to use. The server will modify this name to -# create a unique variant, if needed unless @exact-name is true (default: the -# guest's name) +# @client-name: the client name to use. The server will modify this +# name to create a unique variant, if needed unless @exact-name is +# true (default: the guest's name) # -# @connect-ports: if set, a regular expression of JACK client port name(s) to -# monitor for and automatically connect to +# @connect-ports: if set, a regular expression of JACK client port +# name(s) to monitor for and automatically connect to # -# @start-server: start a jack server process if one is not already present -# (default: false) +# @start-server: start a jack server process if one is not already +# present (default: false) # -# @exact-name: use the exact name requested otherwise JACK automatically -# generates a unique one, if needed (default: false) +# @exact-name: use the exact name requested otherwise JACK +# automatically generates a unique one, if needed (default: false) # # Since: 5.1 ## @@ -239,7 +240,7 @@ # @buffer-count: number of buffers # # @try-poll: attempt to use poll mode, falling back to non-polling -# access on failure (default true) +# access on failure (default true) # # Since: 4.0 ## @@ -260,15 +261,15 @@ # @out: options of the playback stream # # @try-mmap: try using memory-mapped access, falling back to -# non-memory-mapped access on failure (default true) +# non-memory-mapped access on failure (default true) # -# @exclusive: open device in exclusive mode (vmix won't work) -# (default false) +# @exclusive: open device in exclusive mode (vmix won't work) (default +# false) # # @dsp-policy: set the timing policy of the device (between 0 and 10, -# where smaller number means smaller latency but higher -# CPU usage) or -1 to use fragment mode (option ignored -# on some platforms) (default 5) +# where smaller number means smaller latency but higher CPU usage) +# or -1 to use fragment mode (option ignored on some platforms) +# (default 5) # # Since: 4.0 ## @@ -283,18 +284,18 @@ ## # @AudiodevPaPerDirectionOptions: # -# Options of the Pulseaudio backend that are used for both playback and -# recording. +# Options of the Pulseaudio backend that are used for both playback +# and recording. # # @name: name of the sink/source to use # # @stream-name: name of the PulseAudio stream created by qemu. Can be -# used to identify the stream in PulseAudio when you -# create multiple PulseAudio devices or run multiple qemu -# instances (default: audiodev's id, since 4.2) +# used to identify the stream in PulseAudio when you create +# multiple PulseAudio devices or run multiple qemu instances +# (default: audiodev's id, since 4.2) # # @latency: latency you want PulseAudio to achieve in microseconds -# (default 15000) +# (default 15000) # # Since: 4.0 ## @@ -324,6 +325,47 @@ '*out': 'AudiodevPaPerDirectionOptions', '*server': 'str' } } +## +# @AudiodevPipewirePerDirectionOptions: +# +# Options of the Pipewire backend that are used for both playback and +# recording. +# +# @name: name of the sink/source to use +# +# @stream-name: name of the Pipewire stream created by qemu. Can be +# used to identify the stream in Pipewire when you create multiple +# Pipewire devices or run multiple qemu instances (default: +# audiodev's id) +# +# @latency: latency you want Pipewire to achieve in microseconds +# (default 46000) +# +# Since: 8.1 +## +{ 'struct': 'AudiodevPipewirePerDirectionOptions', + 'base': 'AudiodevPerDirectionOptions', + 'data': { + '*name': 'str', + '*stream-name': 'str', + '*latency': 'uint32' } } + +## +# @AudiodevPipewireOptions: +# +# Options of the Pipewire audio backend. +# +# @in: options of the capture stream +# +# @out: options of the playback stream +# +# Since: 8.1 +## +{ 'struct': 'AudiodevPipewireOptions', + 'data': { + '*in': 'AudiodevPipewirePerDirectionOptions', + '*out': 'AudiodevPipewirePerDirectionOptions' } } + ## # @AudiodevSdlPerDirectionOptions: # @@ -416,6 +458,7 @@ { 'name': 'jack', 'if': 'CONFIG_AUDIO_JACK' }, { 'name': 'oss', 'if': 'CONFIG_AUDIO_OSS' }, { 'name': 'pa', 'if': 'CONFIG_AUDIO_PA' }, + { 'name': 'pipewire', 'if': 'CONFIG_AUDIO_PIPEWIRE' }, { 'name': 'sdl', 'if': 'CONFIG_AUDIO_SDL' }, { 'name': 'sndio', 'if': 'CONFIG_AUDIO_SNDIO' }, { 'name': 'spice', 'if': 'CONFIG_SPICE' }, @@ -430,7 +473,8 @@ # # @driver: the backend driver to use # -# @timer-period: timer period (in microseconds, 0: use lowest possible) +# @timer-period: timer period (in microseconds, 0: use lowest +# possible) # # Since: 4.0 ## @@ -456,6 +500,8 @@ 'if': 'CONFIG_AUDIO_OSS' }, 'pa': { 'type': 'AudiodevPaOptions', 'if': 'CONFIG_AUDIO_PA' }, + 'pipewire': { 'type': 'AudiodevPipewireOptions', + 'if': 'CONFIG_AUDIO_PIPEWIRE' }, 'sdl': { 'type': 'AudiodevSdlOptions', 'if': 'CONFIG_AUDIO_SDL' }, 'sndio': { 'type': 'AudiodevSndioOptions', @@ -472,7 +518,6 @@ # Returns: array of @Audiodev # # Since: 8.0 -# ## { 'command': 'query-audiodevs', 'returns': ['Audiodev'] } diff --git a/qapi/authz.json b/qapi/authz.json index 51845e37cc..7fc6e3032e 100644 --- a/qapi/authz.json +++ b/qapi/authz.json @@ -11,6 +11,7 @@ # The authorization policy result # # @deny: deny access +# # @allow: allow access # # Since: 4.0 @@ -25,6 +26,7 @@ # The authorization policy match format # # @exact: an exact string match +# # @glob: string with ? and * shell wildcard support # # Since: 4.0 @@ -39,7 +41,9 @@ # A single authorization rule. # # @match: a string or glob to match against a user identity +# # @policy: the result to return if @match evaluates to true +# # @format: the format of the @match rule (default 'exact') # # Since: 4.0 @@ -54,7 +58,8 @@ # # Properties for authz-list objects. # -# @policy: Default policy to apply when no rule matches (default: deny) +# @policy: Default policy to apply when no rule matches (default: +# deny) # # @rules: Authorization rules based on matching user # @@ -69,14 +74,14 @@ # # Properties for authz-listfile objects. # -# @filename: File name to load the configuration from. The file must -# contain valid JSON for AuthZListProperties. +# @filename: File name to load the configuration from. The file must +# contain valid JSON for AuthZListProperties. # -# @refresh: If true, inotify is used to monitor the file, automatically -# reloading changes. If an error occurs during reloading, all -# authorizations will fail until the file is next successfully -# loaded. (default: true if the binary was built with -# CONFIG_INOTIFY1, false otherwise) +# @refresh: If true, inotify is used to monitor the file, +# automatically reloading changes. If an error occurs during +# reloading, all authorizations will fail until the file is next +# successfully loaded. (default: true if the binary was built +# with CONFIG_INOTIFY1, false otherwise) # # Since: 4.0 ## @@ -101,10 +106,10 @@ # # Properties for authz-simple objects. # -# @identity: Identifies the allowed user. Its format depends on the network -# service that authorization object is associated with. For -# authorizing based on TLS x509 certificates, the identity must be -# the x509 distinguished name. +# @identity: Identifies the allowed user. Its format depends on the +# network service that authorization object is associated with. +# For authorizing based on TLS x509 certificates, the identity +# must be the x509 distinguished name. # # Since: 4.0 ## diff --git a/qapi/block-core.json b/qapi/block-core.json index c05ad0c07e..98d9116dae 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -25,15 +25,16 @@ # # @vm-clock-sec: VM clock relative to boot in seconds # -# @vm-clock-nsec: fractional part in nano seconds to be used with vm-clock-sec +# @vm-clock-nsec: fractional part in nano seconds to be used with +# vm-clock-sec # -# @icount: Current instruction count. Appears when execution record/replay -# is enabled. Used for "time-traveling" to match the moment -# in the recorded execution with the snapshots. This counter may -# be obtained through @query-replay command (since 5.2) +# @icount: Current instruction count. Appears when execution +# record/replay is enabled. Used for "time-traveling" to match +# the moment in the recorded execution with the snapshots. This +# counter may be obtained through @query-replay command (since +# 5.2) # # Since: 1.3 -# ## { 'struct': 'SnapshotInfo', 'data': { 'id': 'str', 'name': 'str', 'vm-state-size': 'int', @@ -66,25 +67,26 @@ # # @compat: compatibility level # -# @data-file: the filename of the external data file that is stored in the -# image and used as a default for opening the image (since: 4.0) +# @data-file: the filename of the external data file that is stored in +# the image and used as a default for opening the image +# (since: 4.0) # # @data-file-raw: True if the external data file must stay valid as a -# standalone (read-only) raw image without looking at qcow2 -# metadata (since: 4.0) +# standalone (read-only) raw image without looking at qcow2 +# metadata (since: 4.0) # -# @extended-l2: true if the image has extended L2 entries; only valid for -# compat >= 1.1 (since 5.2) +# @extended-l2: true if the image has extended L2 entries; only valid +# for compat >= 1.1 (since 5.2) # # @lazy-refcounts: on or off; only valid for compat >= 1.1 # # @corrupt: true if the image has been marked corrupt; only valid for -# compat >= 1.1 (since 2.2) +# compat >= 1.1 (since 2.2) # # @refcount-bits: width of a refcount entry in bits (since 2.3) # -# @encrypt: details about encryption parameters; only set if image -# is encrypted (since 2.10) +# @encrypt: details about encryption parameters; only set if image is +# encrypted (since 2.10) # # @bitmaps: A list of qcow2 bitmap details (since 4.0) # @@ -134,7 +136,7 @@ # # @filename: Name of the extent file # -# @format: Extent type (e.g. FLAT or SPARSE) +# @format: Extent type (e.g. FLAT or SPARSE) # # @virtual-size: Number of bytes covered by this extent # @@ -181,7 +183,9 @@ # @ImageInfoSpecificKind: # # @luks: Since 2.7 +# # @rbd: Since 6.1 +# # @file: Since 8.0 # # Since: 1.7 @@ -235,7 +239,8 @@ ## # @ImageInfoSpecific: # -# A discriminated record of image format specific information structures. +# A discriminated record of image format specific information +# structures. # # Since: 1.7 ## @@ -280,7 +285,7 @@ # @snapshots: list of VM snapshots # # @format-specific: structure supplying additional format-specific -# information (since 1.7) +# information (since 1.7) # # Since: 8.0 ## @@ -295,7 +300,8 @@ ## # @ImageInfo: # -# Information about a QEMU image file, and potentially its backing image +# Information about a QEMU image file, and potentially its backing +# image # # @backing-image: info of the backing image # @@ -310,11 +316,12 @@ ## # @BlockChildInfo: # -# Information about all nodes in the block graph starting at some node, -# annotated with information about that node in relation to its parent. +# Information about all nodes in the block graph starting at some +# node, annotated with information about that node in relation to its +# parent. # -# @name: Child name of the root node in the BlockGraphInfo struct, in its role -# as the child of some undescribed parent node +# @name: Child name of the root node in the BlockGraphInfo struct, in +# its role as the child of some undescribed parent node # # @info: Block graph information starting at this node # @@ -329,10 +336,9 @@ ## # @BlockGraphInfo: # -# Information about all nodes in a block (sub)graph in the form of BlockNodeInfo -# data. -# The base BlockNodeInfo struct contains the information for the (sub)graph's -# root node. +# Information about all nodes in a block (sub)graph in the form of +# BlockNodeInfo data. The base BlockNodeInfo struct contains the +# information for the (sub)graph's root node. # # @children: Array of links to this node's child nodes' information # @@ -354,32 +360,28 @@ # @check-errors: number of unexpected errors occurred during check # # @image-end-offset: offset (in bytes) where the image ends, this -# field is present if the driver for the image format -# supports it +# field is present if the driver for the image format supports it # # @corruptions: number of corruptions found during the check if any # # @leaks: number of leaks found during the check if any # -# @corruptions-fixed: number of corruptions fixed during the check -# if any +# @corruptions-fixed: number of corruptions fixed during the check if +# any # # @leaks-fixed: number of leaks fixed during the check if any # -# @total-clusters: total number of clusters, this field is present -# if the driver for the image format supports it +# @total-clusters: total number of clusters, this field is present if +# the driver for the image format supports it # -# @allocated-clusters: total number of allocated clusters, this -# field is present if the driver for the image format -# supports it +# @allocated-clusters: total number of allocated clusters, this field +# is present if the driver for the image format supports it # # @fragmented-clusters: total number of fragmented clusters, this -# field is present if the driver for the image format -# supports it +# field is present if the driver for the image format supports it # # @compressed-clusters: total number of compressed clusters, this -# field is present if the driver for the image format -# supports it +# field is present if the driver for the image format supports it # # Since: 1.4 ## @@ -396,27 +398,27 @@ # Mapping information from a virtual block range to a host file range # # @start: virtual (guest) offset of the first byte described by this -# entry +# entry # # @length: the number of bytes of the mapped virtual range # # @data: reading the image will actually read data from a file (in -# particular, if @offset is present this means that the sectors -# are not simply preallocated, but contain actual data in raw -# format) +# particular, if @offset is present this means that the sectors +# are not simply preallocated, but contain actual data in raw +# format) # # @zero: whether the virtual blocks read as zeroes # # @depth: number of layers (0 = top image, 1 = top image's backing -# file, ..., n - 1 = bottom image (where n is the number of -# images in the chain)) before reaching one for which the -# range is allocated +# file, ..., n - 1 = bottom image (where n is the number of images +# in the chain)) before reaching one for which the range is +# allocated # -# @present: true if this layer provides the data, false if adding a backing -# layer could impact this region (since 6.1) +# @present: true if this layer provides the data, false if adding a +# backing layer could impact this region (since 6.1) # # @offset: if present, the image file stores the data for this range -# in raw format at the given (host) offset +# in raw format at the given (host) offset # # @filename: filename that is referred to by @offset # @@ -433,7 +435,9 @@ # Cache mode information for a block device # # @writeback: true if writeback mode is enabled +# # @direct: true if the host page cache is bypassed (O_DIRECT) +# # @no-flush: true if flush requests are ignored for the device # # Since: 2.3 @@ -454,21 +458,19 @@ # # @ro: true if the backing device was open read-only # -# @drv: the name of the block format used to open the backing device. As of -# 0.14 this can be: 'blkdebug', 'bochs', 'cloop', 'cow', 'dmg', -# 'file', 'file', 'ftp', 'ftps', 'host_cdrom', 'host_device', -# 'http', 'https', 'luks', 'nbd', 'parallels', 'qcow', -# 'qcow2', 'raw', 'vdi', 'vmdk', 'vpc', 'vvfat' -# 2.2: 'archipelago' added, 'cow' dropped -# 2.3: 'host_floppy' deprecated -# 2.5: 'host_floppy' dropped -# 2.6: 'luks' added -# 2.8: 'replication' added, 'tftp' dropped -# 2.9: 'archipelago' dropped +# @drv: the name of the block format used to open the backing device. +# As of 0.14 this can be: 'blkdebug', 'bochs', 'cloop', 'cow', +# 'dmg', 'file', 'file', 'ftp', 'ftps', 'host_cdrom', +# 'host_device', 'http', 'https', 'luks', 'nbd', 'parallels', +# 'qcow', 'qcow2', 'raw', 'vdi', 'vmdk', 'vpc', 'vvfat' 2.2: +# 'archipelago' added, 'cow' dropped 2.3: 'host_floppy' deprecated +# 2.5: 'host_floppy' dropped 2.6: 'luks' added 2.8: 'replication' +# added, 'tftp' dropped 2.9: 'archipelago' dropped # # @backing_file: the name of the backing file (for copy-on-write) # -# @backing_file_depth: number of files in the backing file chain (since: 1.2) +# @backing_file_depth: number of files in the backing file chain +# (since: 1.2) # # @encrypted: true if the backing device is encrypted # @@ -488,41 +490,40 @@ # # @image: the info of image used (since: 1.6) # -# @bps_max: total throughput limit during bursts, -# in bytes (Since 1.7) +# @bps_max: total throughput limit during bursts, in bytes (Since 1.7) # -# @bps_rd_max: read throughput limit during bursts, -# in bytes (Since 1.7) +# @bps_rd_max: read throughput limit during bursts, in bytes (Since +# 1.7) # -# @bps_wr_max: write throughput limit during bursts, -# in bytes (Since 1.7) +# @bps_wr_max: write throughput limit during bursts, in bytes (Since +# 1.7) # -# @iops_max: total I/O operations per second during bursts, -# in bytes (Since 1.7) +# @iops_max: total I/O operations per second during bursts, in bytes +# (Since 1.7) # -# @iops_rd_max: read I/O operations per second during bursts, -# in bytes (Since 1.7) +# @iops_rd_max: read I/O operations per second during bursts, in bytes +# (Since 1.7) # -# @iops_wr_max: write I/O operations per second during bursts, -# in bytes (Since 1.7) +# @iops_wr_max: write I/O operations per second during bursts, in +# bytes (Since 1.7) # -# @bps_max_length: maximum length of the @bps_max burst -# period, in seconds. (Since 2.6) +# @bps_max_length: maximum length of the @bps_max burst period, in +# seconds. (Since 2.6) # -# @bps_rd_max_length: maximum length of the @bps_rd_max -# burst period, in seconds. (Since 2.6) +# @bps_rd_max_length: maximum length of the @bps_rd_max burst period, +# in seconds. (Since 2.6) # -# @bps_wr_max_length: maximum length of the @bps_wr_max -# burst period, in seconds. (Since 2.6) +# @bps_wr_max_length: maximum length of the @bps_wr_max burst period, +# in seconds. (Since 2.6) # -# @iops_max_length: maximum length of the @iops burst -# period, in seconds. (Since 2.6) +# @iops_max_length: maximum length of the @iops burst period, in +# seconds. (Since 2.6) # -# @iops_rd_max_length: maximum length of the @iops_rd_max -# burst period, in seconds. (Since 2.6) +# @iops_rd_max_length: maximum length of the @iops_rd_max burst +# period, in seconds. (Since 2.6) # -# @iops_wr_max_length: maximum length of the @iops_wr_max -# burst period, in seconds. (Since 2.6) +# @iops_wr_max_length: maximum length of the @iops_wr_max burst +# period, in seconds. (Since 2.6) # # @iops_size: an I/O size in bytes (Since 1.7) # @@ -530,11 +531,11 @@ # # @cache: the cache mode used for the block device (since: 2.3) # -# @write_threshold: configured write threshold for the device. -# 0 if disabled. (Since 2.3) +# @write_threshold: configured write threshold for the device. 0 if +# disabled. (Since 2.3) # -# @dirty-bitmaps: dirty bitmaps information (only present if node -# has one or more dirty bitmaps) (Since 4.2) +# @dirty-bitmaps: dirty bitmaps information (only present if node has +# one or more dirty bitmaps) (Since 4.2) # # Since: 0.14 ## @@ -564,7 +565,8 @@ # # @failed: The last I/O operation has failed # -# @nospace: The last I/O operation has failed due to a no-space condition +# @nospace: The last I/O operation has failed due to a no-space +# condition # # Since: 1.0 ## @@ -581,20 +583,20 @@ # # @granularity: granularity of the dirty bitmap in bytes (since 1.4) # -# @recording: true if the bitmap is recording new writes from the guest. -# Replaces ``active`` and ``disabled`` statuses. (since 4.0) +# @recording: true if the bitmap is recording new writes from the +# guest. (since 4.0) # # @busy: true if the bitmap is in-use by some operation (NBD or jobs) -# and cannot be modified via QMP or used by another operation. -# Replaces ``locked`` and ``frozen`` statuses. (since 4.0) +# and cannot be modified via QMP or used by another operation. +# (since 4.0) # -# @persistent: true if the bitmap was stored on disk, is scheduled to be stored -# on disk, or both. (since 4.0) +# @persistent: true if the bitmap was stored on disk, is scheduled to +# be stored on disk, or both. (since 4.0) # -# @inconsistent: true if this is a persistent bitmap that was improperly -# stored. Implies @persistent to be true; @recording and -# @busy to be false. This bitmap cannot be used. To remove -# it, use @block-dirty-bitmap-remove. (Since 4.0) +# @inconsistent: true if this is a persistent bitmap that was +# improperly stored. Implies @persistent to be true; @recording +# and @busy to be false. This bitmap cannot be used. To remove +# it, use @block-dirty-bitmap-remove. (Since 4.0) # # Since: 1.3 ## @@ -608,14 +610,14 @@ # # An enumeration of flags that a bitmap can report to the user. # -# @in-use: This flag is set by any process actively modifying the qcow2 file, -# and cleared when the updated bitmap is flushed to the qcow2 image. -# The presence of this flag in an offline image means that the bitmap -# was not saved correctly after its last usage, and may contain -# inconsistent data. +# @in-use: This flag is set by any process actively modifying the +# qcow2 file, and cleared when the updated bitmap is flushed to +# the qcow2 image. The presence of this flag in an offline image +# means that the bitmap was not saved correctly after its last +# usage, and may contain inconsistent data. # -# @auto: The bitmap must reflect all changes of the virtual disk by any -# application that would write to this qcow2 file. +# @auto: The bitmap must reflect all changes of the virtual disk by +# any application that would write to this qcow2 file. # # Since: 4.0 ## @@ -644,15 +646,16 @@ # # Block latency histogram. # -# @boundaries: list of interval boundary values in nanoseconds, all greater -# than zero and in ascending order. -# For example, the list [10, 50, 100] produces the following -# histogram intervals: [0, 10), [10, 50), [50, 100), [100, +inf). +# @boundaries: list of interval boundary values in nanoseconds, all +# greater than zero and in ascending order. For example, the list +# [10, 50, 100] produces the following histogram intervals: [0, +# 10), [10, 50), [50, 100), [100, +inf). # -# @bins: list of io request counts corresponding to histogram intervals. -# len(@bins) = len(@boundaries) + 1 -# For the example above, @bins may be something like [3, 1, 5, 2], -# and corresponding histogram looks like: +# @bins: list of io request counts corresponding to histogram +# intervals. +# len(@bins) = len(@boundaries) + 1 +# For the example above, @bins may be something like [3, 1, 5, 2], +# and corresponding histogram looks like: # # :: # @@ -672,32 +675,32 @@ ## # @BlockInfo: # -# Block device information. This structure describes a virtual device and -# the backing device associated with it. +# Block device information. This structure describes a virtual device +# and the backing device associated with it. # # @device: The device name associated with the virtual device. # -# @qdev: The qdev ID, or if no ID is assigned, the QOM path of the block -# device. (since 2.10) +# @qdev: The qdev ID, or if no ID is assigned, the QOM path of the +# block device. (since 2.10) # -# @type: This field is returned only for compatibility reasons, it should -# not be used (always returns 'unknown') +# @type: This field is returned only for compatibility reasons, it +# should not be used (always returns 'unknown') # # @removable: True if the device supports removable media. # -# @locked: True if the guest has locked this device from having its media -# removed +# @locked: True if the guest has locked this device from having its +# media removed # -# @tray_open: True if the device's tray is open -# (only present if it has a tray) +# @tray_open: True if the device's tray is open (only present if it +# has a tray) # -# @io-status: @BlockDeviceIoStatus. Only present if the device -# supports it and the VM is configured to stop on errors -# (supported device models: virtio-blk, IDE, SCSI except -# scsi-generic) +# @io-status: @BlockDeviceIoStatus. Only present if the device +# supports it and the VM is configured to stop on errors +# (supported device models: virtio-blk, IDE, SCSI except +# scsi-generic) # # @inserted: @BlockDeviceInfo describing the device if media is -# present +# present # # Since: 0.14 ## @@ -709,28 +712,31 @@ ## # @BlockMeasureInfo: # -# Image file size calculation information. This structure describes the size -# requirements for creating a new image file. +# Image file size calculation information. This structure describes +# the size requirements for creating a new image file. # -# The size requirements depend on the new image file format. File size always -# equals virtual disk size for the 'raw' format, even for sparse POSIX files. -# Compact formats such as 'qcow2' represent unallocated and zero regions -# efficiently so file size may be smaller than virtual disk size. +# The size requirements depend on the new image file format. File +# size always equals virtual disk size for the 'raw' format, even for +# sparse POSIX files. Compact formats such as 'qcow2' represent +# unallocated and zero regions efficiently so file size may be smaller +# than virtual disk size. # -# The values are upper bounds that are guaranteed to fit the new image file. -# Subsequent modification, such as internal snapshot or further bitmap -# creation, may require additional space and is not covered here. +# The values are upper bounds that are guaranteed to fit the new image +# file. Subsequent modification, such as internal snapshot or further +# bitmap creation, may require additional space and is not covered +# here. # -# @required: Size required for a new image file, in bytes, when copying just -# allocated guest-visible contents. +# @required: Size required for a new image file, in bytes, when +# copying just allocated guest-visible contents. # -# @fully-allocated: Image file size, in bytes, once data has been written -# to all sectors, when copying just guest-visible contents. +# @fully-allocated: Image file size, in bytes, once data has been +# written to all sectors, when copying just guest-visible +# contents. # -# @bitmaps: Additional size required if all the top-level bitmap metadata -# in the source image were to be copied to the destination, -# present only when source and destination both support -# persistent bitmaps. (since 5.1) +# @bitmaps: Additional size required if all the top-level bitmap +# metadata in the source image were to be copied to the +# destination, present only when source and destination both +# support persistent bitmaps. (since 5.1) # # Since: 2.10 ## @@ -742,8 +748,8 @@ # # Get a list of BlockInfo for all virtual block devices. # -# Returns: a list of @BlockInfo describing each virtual block device. Filter -# nodes that were created implicitly are skipped over. +# Returns: a list of @BlockInfo describing each virtual block device. +# Filter nodes that were created implicitly are skipped over. # # Since: 0.14 # @@ -830,7 +836,6 @@ # } # ] # } -# ## { 'command': 'query-block', 'returns': ['BlockInfo'], 'allow-preconfig': true } @@ -840,41 +845,57 @@ # # Statistics of a block device during a given interval of time. # -# @interval_length: Interval used for calculating the statistics, -# in seconds. +# @interval_length: Interval used for calculating the statistics, in +# seconds. # # @min_rd_latency_ns: Minimum latency of read operations in the -# defined interval, in nanoseconds. +# defined interval, in nanoseconds. # # @min_wr_latency_ns: Minimum latency of write operations in the -# defined interval, in nanoseconds. +# defined interval, in nanoseconds. +# +# @min_zone_append_latency_ns: Minimum latency of zone append operations +# in the defined interval, in nanoseconds +# (since 8.1) # # @min_flush_latency_ns: Minimum latency of flush operations in the -# defined interval, in nanoseconds. +# defined interval, in nanoseconds. # # @max_rd_latency_ns: Maximum latency of read operations in the -# defined interval, in nanoseconds. +# defined interval, in nanoseconds. # # @max_wr_latency_ns: Maximum latency of write operations in the -# defined interval, in nanoseconds. +# defined interval, in nanoseconds. +# +# @max_zone_append_latency_ns: Maximum latency of zone append operations +# in the defined interval, in nanoseconds +# (since 8.1) # # @max_flush_latency_ns: Maximum latency of flush operations in the -# defined interval, in nanoseconds. +# defined interval, in nanoseconds. # # @avg_rd_latency_ns: Average latency of read operations in the -# defined interval, in nanoseconds. +# defined interval, in nanoseconds. # # @avg_wr_latency_ns: Average latency of write operations in the -# defined interval, in nanoseconds. +# defined interval, in nanoseconds. +# +# @avg_zone_append_latency_ns: Average latency of zone append operations +# in the defined interval, in nanoseconds +# (since 8.1) # # @avg_flush_latency_ns: Average latency of flush operations in the -# defined interval, in nanoseconds. +# defined interval, in nanoseconds. # -# @avg_rd_queue_depth: Average number of pending read operations -# in the defined interval. +# @avg_rd_queue_depth: Average number of pending read operations in +# the defined interval. # -# @avg_wr_queue_depth: Average number of pending write operations -# in the defined interval. +# @avg_wr_queue_depth: Average number of pending write operations in +# the defined interval. +# +# @avg_zone_append_queue_depth: Average number of pending zone append +# operations in the defined interval +# (since 8.1). # # Since: 2.5 ## @@ -882,9 +903,13 @@ 'data': { 'interval_length': 'int', 'min_rd_latency_ns': 'int', 'max_rd_latency_ns': 'int', 'avg_rd_latency_ns': 'int', 'min_wr_latency_ns': 'int', 'max_wr_latency_ns': 'int', - 'avg_wr_latency_ns': 'int', 'min_flush_latency_ns': 'int', - 'max_flush_latency_ns': 'int', 'avg_flush_latency_ns': 'int', - 'avg_rd_queue_depth': 'number', 'avg_wr_queue_depth': 'number' } } + 'avg_wr_latency_ns': 'int', 'min_zone_append_latency_ns': 'int', + 'max_zone_append_latency_ns': 'int', + 'avg_zone_append_latency_ns': 'int', + 'min_flush_latency_ns': 'int', 'max_flush_latency_ns': 'int', + 'avg_flush_latency_ns': 'int', 'avg_rd_queue_depth': 'number', + 'avg_wr_queue_depth': 'number', + 'avg_zone_append_queue_depth': 'number' } } ## # @BlockDeviceStats: @@ -895,104 +920,134 @@ # # @wr_bytes: The number of bytes written by the device. # +# @zone_append_bytes: The number of bytes appended by the zoned devices +# (since 8.1) +# # @unmap_bytes: The number of bytes unmapped by the device (Since 4.2) # -# @rd_operations: The number of read operations performed by the device. +# @rd_operations: The number of read operations performed by the +# device. # -# @wr_operations: The number of write operations performed by the device. +# @wr_operations: The number of write operations performed by the +# device. # -# @flush_operations: The number of cache flush operations performed by the -# device (since 0.15) +# @zone_append_operations: The number of zone append operations performed +# by the zoned devices (since 8.1) # -# @unmap_operations: The number of unmap operations performed by the device -# (Since 4.2) +# @flush_operations: The number of cache flush operations performed by +# the device (since 0.15) # -# @rd_total_time_ns: Total time spent on reads in nanoseconds (since 0.15). +# @unmap_operations: The number of unmap operations performed by the +# device (Since 4.2) # -# @wr_total_time_ns: Total time spent on writes in nanoseconds (since 0.15). +# @rd_total_time_ns: Total time spent on reads in nanoseconds (since +# 0.15). # -# @flush_total_time_ns: Total time spent on cache flushes in nanoseconds -# (since 0.15). +# @wr_total_time_ns: Total time spent on writes in nanoseconds (since +# 0.15). # -# @unmap_total_time_ns: Total time spent on unmap operations in nanoseconds -# (Since 4.2) +# @zone_append_total_time_ns: Total time spent on zone append writes +# in nanoseconds (since 8.1) # -# @wr_highest_offset: The offset after the greatest byte written to the -# device. The intended use of this information is for -# growable sparse files (like qcow2) that are used on top -# of a physical device. +# @flush_total_time_ns: Total time spent on cache flushes in +# nanoseconds (since 0.15). # -# @rd_merged: Number of read requests that have been merged into another -# request (Since 2.3). +# @unmap_total_time_ns: Total time spent on unmap operations in +# nanoseconds (Since 4.2) # -# @wr_merged: Number of write requests that have been merged into another -# request (Since 2.3). +# @wr_highest_offset: The offset after the greatest byte written to +# the device. The intended use of this information is for +# growable sparse files (like qcow2) that are used on top of a +# physical device. # -# @unmap_merged: Number of unmap requests that have been merged into another -# request (Since 4.2) +# @rd_merged: Number of read requests that have been merged into +# another request (Since 2.3). # -# @idle_time_ns: Time since the last I/O operation, in -# nanoseconds. If the field is absent it means that -# there haven't been any operations yet (Since 2.5). +# @wr_merged: Number of write requests that have been merged into +# another request (Since 2.3). +# +# @zone_append_merged: Number of zone append requests that have been merged +# into another request (since 8.1) +# +# @unmap_merged: Number of unmap requests that have been merged into +# another request (Since 4.2) +# +# @idle_time_ns: Time since the last I/O operation, in nanoseconds. +# If the field is absent it means that there haven't been any +# operations yet (Since 2.5). # # @failed_rd_operations: The number of failed read operations -# performed by the device (Since 2.5) +# performed by the device (Since 2.5) # # @failed_wr_operations: The number of failed write operations -# performed by the device (Since 2.5) +# performed by the device (Since 2.5) +# +# @failed_zone_append_operations: The number of failed zone append write +# operations performed by the zoned devices +# (since 8.1) # # @failed_flush_operations: The number of failed flush operations -# performed by the device (Since 2.5) +# performed by the device (Since 2.5) # -# @failed_unmap_operations: The number of failed unmap operations performed -# by the device (Since 4.2) +# @failed_unmap_operations: The number of failed unmap operations +# performed by the device (Since 4.2) # # @invalid_rd_operations: The number of invalid read operations -# performed by the device (Since 2.5) +# performed by the device (Since 2.5) # # @invalid_wr_operations: The number of invalid write operations -# performed by the device (Since 2.5) +# performed by the device (Since 2.5) +# +# @invalid_zone_append_operations: The number of invalid zone append operations +# performed by the zoned device (since 8.1) # # @invalid_flush_operations: The number of invalid flush operations -# performed by the device (Since 2.5) +# performed by the device (Since 2.5) # -# @invalid_unmap_operations: The number of invalid unmap operations performed -# by the device (Since 4.2) +# @invalid_unmap_operations: The number of invalid unmap operations +# performed by the device (Since 4.2) # # @account_invalid: Whether invalid operations are included in the -# last access statistics (Since 2.5) +# last access statistics (Since 2.5) # # @account_failed: Whether failed operations are included in the -# latency and last access statistics (Since 2.5) +# latency and last access statistics (Since 2.5) # # @timed_stats: Statistics specific to the set of previously defined -# intervals of time (Since 2.5) +# intervals of time (Since 2.5) # -# @rd_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) +# @rd_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) # -# @wr_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) +# @wr_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) # -# @flush_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) +# @zone_append_latency_histogram: @BlockLatencyHistogramInfo. (since 8.1) +# +# @flush_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) # # Since: 0.14 ## { 'struct': 'BlockDeviceStats', - 'data': {'rd_bytes': 'int', 'wr_bytes': 'int', 'unmap_bytes' : 'int', - 'rd_operations': 'int', 'wr_operations': 'int', + 'data': {'rd_bytes': 'int', 'wr_bytes': 'int', 'zone_append_bytes': 'int', + 'unmap_bytes' : 'int', 'rd_operations': 'int', + 'wr_operations': 'int', 'zone_append_operations': 'int', 'flush_operations': 'int', 'unmap_operations': 'int', 'rd_total_time_ns': 'int', 'wr_total_time_ns': 'int', - 'flush_total_time_ns': 'int', 'unmap_total_time_ns': 'int', - 'wr_highest_offset': 'int', - 'rd_merged': 'int', 'wr_merged': 'int', 'unmap_merged': 'int', - '*idle_time_ns': 'int', + 'zone_append_total_time_ns': 'int', 'flush_total_time_ns': 'int', + 'unmap_total_time_ns': 'int', 'wr_highest_offset': 'int', + 'rd_merged': 'int', 'wr_merged': 'int', 'zone_append_merged': 'int', + 'unmap_merged': 'int', '*idle_time_ns': 'int', 'failed_rd_operations': 'int', 'failed_wr_operations': 'int', - 'failed_flush_operations': 'int', 'failed_unmap_operations': 'int', - 'invalid_rd_operations': 'int', 'invalid_wr_operations': 'int', + 'failed_zone_append_operations': 'int', + 'failed_flush_operations': 'int', + 'failed_unmap_operations': 'int', 'invalid_rd_operations': 'int', + 'invalid_wr_operations': 'int', + 'invalid_zone_append_operations': 'int', 'invalid_flush_operations': 'int', 'invalid_unmap_operations': 'int', 'account_invalid': 'bool', 'account_failed': 'bool', 'timed_stats': ['BlockDeviceTimedStats'], '*rd_latency_histogram': 'BlockLatencyHistogramInfo', '*wr_latency_histogram': 'BlockLatencyHistogramInfo', + '*zone_append_latency_histogram': 'BlockLatencyHistogramInfo', '*flush_latency_histogram': 'BlockLatencyHistogramInfo' } } ## @@ -1000,11 +1055,11 @@ # # File driver statistics # -# @discard-nb-ok: The number of successful discard operations performed by -# the driver. +# @discard-nb-ok: The number of successful discard operations +# performed by the driver. # -# @discard-nb-failed: The number of failed discard operations performed by -# the driver. +# @discard-nb-failed: The number of failed discard operations +# performed by the driver. # # @discard-bytes-ok: The number of bytes discarded by the driver. # @@ -1023,11 +1078,11 @@ # # @completion-errors: The number of completion errors. # -# @aligned-accesses: The number of aligned accesses performed by -# the driver. +# @aligned-accesses: The number of aligned accesses performed by the +# driver. # # @unaligned-accesses: The number of unaligned accesses performed by -# the driver. +# the driver. # # Since: 5.2 ## @@ -1059,24 +1114,24 @@ # Statistics of a virtual block device or a block backing device. # # @device: If the stats are for a virtual block device, the name -# corresponding to the virtual block device. +# corresponding to the virtual block device. # -# @node-name: The node name of the device. (Since 2.3) +# @node-name: The node name of the device. (Since 2.3) # -# @qdev: The qdev ID, or if no ID is assigned, the QOM path of the block -# device. (since 3.0) +# @qdev: The qdev ID, or if no ID is assigned, the QOM path of the +# block device. (since 3.0) # # @stats: A @BlockDeviceStats for the device. # -# @driver-specific: Optional driver-specific stats. (Since 4.2) +# @driver-specific: Optional driver-specific stats. (Since 4.2) # # @parent: This describes the file block device if it has one. -# Contains recursively the statistics of the underlying -# protocol (e.g. the host file for a qcow2 image). If there is -# no underlying protocol, this field is omitted +# Contains recursively the statistics of the underlying protocol +# (e.g. the host file for a qcow2 image). If there is no +# underlying protocol, this field is omitted # # @backing: This describes the backing block device if it has one. -# (Since 2.0) +# (Since 2.0) # # Since: 0.14 ## @@ -1093,12 +1148,12 @@ # Query the @BlockStats for all virtual block devices. # # @query-nodes: If true, the command will query all the block nodes -# that have a node name, in a list which will include "parent" -# information, but not "backing". -# If false or omitted, the behavior is as before - query all the -# device backends, recursively including their "parent" and -# "backing". Filter nodes that were created implicitly are -# skipped over in this mode. (Since 2.3) +# that have a node name, in a list which will include "parent" +# information, but not "backing". If false or omitted, the +# behavior is as before - query all the device backends, +# recursively including their "parent" and "backing". Filter nodes +# that were created implicitly are skipped over in this mode. +# (Since 2.3) # # Returns: A list of @BlockStats for each virtual block devices. # @@ -1205,7 +1260,6 @@ # } # ] # } -# ## { 'command': 'query-blockstats', 'data': { '*query-nodes': 'bool' }, @@ -1216,22 +1270,22 @@ # @BlockdevOnError: # # An enumeration of possible behaviors for errors on I/O operations. -# The exact meaning depends on whether the I/O was initiated by a guest -# or by a block job +# The exact meaning depends on whether the I/O was initiated by a +# guest or by a block job # -# @report: for guest operations, report the error to the guest; -# for jobs, cancel the job +# @report: for guest operations, report the error to the guest; for +# jobs, cancel the job # # @ignore: ignore the error, only report a QMP event (BLOCK_IO_ERROR -# or BLOCK_JOB_ERROR). The backup, mirror and commit block jobs retry -# the failing request later and may still complete successfully. The -# stream block job continues to stream and will complete with an -# error. +# or BLOCK_JOB_ERROR). The backup, mirror and commit block jobs +# retry the failing request later and may still complete +# successfully. The stream block job continues to stream and will +# complete with an error. # # @enospc: same as @stop on ENOSPC, same as @report otherwise. # -# @stop: for guest operations, stop the virtual machine; -# for jobs, pause the job +# @stop: for guest operations, stop the virtual machine; for jobs, +# pause the job # # @auto: inherit the error handling policy of the backend (since: 2.7) # @@ -1252,10 +1306,11 @@ # # @none: only copy data written from now on # -# @incremental: only copy data described by the dirty bitmap. (since: 2.4) +# @incremental: only copy data described by the dirty bitmap. +# (since: 2.4) # -# @bitmap: only copy data described by the dirty bitmap. (since: 4.2) -# Behavior on completion is determined by the BitmapSyncMode. +# @bitmap: only copy data described by the dirty bitmap. (since: 4.2) +# Behavior on completion is determined by the BitmapSyncMode. # # Since: 1.3 ## @@ -1265,17 +1320,18 @@ ## # @BitmapSyncMode: # -# An enumeration of possible behaviors for the synchronization of a bitmap -# when used for data copy operations. +# An enumeration of possible behaviors for the synchronization of a +# bitmap when used for data copy operations. # -# @on-success: The bitmap is only synced when the operation is successful. -# This is the behavior always used for 'INCREMENTAL' backups. +# @on-success: The bitmap is only synced when the operation is +# successful. This is the behavior always used for 'INCREMENTAL' +# backups. # # @never: The bitmap is never synchronized with the operation, and is -# treated solely as a read-only manifest of blocks to copy. +# treated solely as a read-only manifest of blocks to copy. # # @always: The bitmap is always synchronized with the operation, -# regardless of whether or not the operation was successful. +# regardless of whether or not the operation was successful. # # Since: 4.2 ## @@ -1291,9 +1347,8 @@ # @background: copy data in background only. # # @write-blocking: when data is written to the source, write it -# (synchronously) to the target as well. In -# addition, data is copied in background just like in -# @background mode. +# (synchronously) to the target as well. In addition, data is +# copied in background just like in @background mode. # # Since: 3.0 ## @@ -1307,21 +1362,22 @@ # # @type: the job type ('stream' for image streaming) # -# @device: The job identifier. Originally the device name but other -# values are allowed since QEMU 2.7 +# @device: The job identifier. Originally the device name but other +# values are allowed since QEMU 2.7 # -# @len: Estimated @offset value at the completion of the job. This value can -# arbitrarily change while the job is running, in both directions. +# @len: Estimated @offset value at the completion of the job. This +# value can arbitrarily change while the job is running, in both +# directions. # -# @offset: Progress made until now. The unit is arbitrary and the value can -# only meaningfully be used for the ratio of @offset to @len. The -# value is monotonically increasing. +# @offset: Progress made until now. The unit is arbitrary and the +# value can only meaningfully be used for the ratio of @offset to +# @len. The value is monotonically increasing. # -# @busy: false if the job is known to be in a quiescent state, with -# no pending I/O. Since 1.3. +# @busy: false if the job is known to be in a quiescent state, with no +# pending I/O. (Since 1.3) # -# @paused: whether the job is paused or, if @busy is true, will -# pause itself as soon as possible. Since 1.3. +# @paused: whether the job is paused or, if @busy is true, will pause +# itself as soon as possible. (Since 1.3) # # @speed: the rate limit, bytes per second # @@ -1331,14 +1387,14 @@ # # @status: Current job state/status (since 2.12) # -# @auto-finalize: Job will finalize itself when PENDING, moving to -# the CONCLUDED state. (since 2.12) +# @auto-finalize: Job will finalize itself when PENDING, moving to the +# CONCLUDED state. (since 2.12) # -# @auto-dismiss: Job will dismiss itself when CONCLUDED, moving to the NULL -# state and disappearing from the query list. (since 2.12) +# @auto-dismiss: Job will dismiss itself when CONCLUDED, moving to the +# NULL state and disappearing from the query list. (since 2.12) # # @error: Error information if the job did not complete successfully. -# Not set if the job completed successfully. (since 2.12.1) +# Not set if the job completed successfully. (since 2.12.1) # # Since: 1.1 ## @@ -1375,8 +1431,9 @@ # # @size: new image size in bytes # -# Returns: - nothing on success -# - If @device is not a valid block device, DeviceNotFound +# Returns: +# - nothing on success +# - If @device is not a valid block device, DeviceNotFound # # Since: 0.14 # @@ -1385,7 +1442,6 @@ # -> { "execute": "block_resize", # "arguments": { "device": "scratch", "size": 1073741824 } } # <- { "return": {} } -# ## { 'command': 'block_resize', 'data': { '*device': 'str', @@ -1397,14 +1453,14 @@ ## # @NewImageMode: # -# An enumeration that tells QEMU how to set the backing file path in -# a new image file. +# An enumeration that tells QEMU how to set the backing file path in a +# new image file. # # @existing: QEMU should look for an existing image file. # # @absolute-paths: QEMU should create a new image with absolute paths -# for the backing file. If there is no backing file available, the new -# image will not be backed either. +# for the backing file. If there is no backing file available, +# the new image will not be backed either. # # Since: 1.1 ## @@ -1418,13 +1474,15 @@ # # @device: the name of the device to take a snapshot of. # -# @node-name: graph node name to generate the snapshot from (Since 2.0) +# @node-name: graph node name to generate the snapshot from (Since +# 2.0) # -# @snapshot-file: the target of the new overlay image. If the file -# exists, or if it is a device, the overlay will be created in the -# existing file/device. Otherwise, a new file will be created. +# @snapshot-file: the target of the new overlay image. If the file +# exists, or if it is a device, the overlay will be created in the +# existing file/device. Otherwise, a new file will be created. # -# @snapshot-node-name: the graph node name of the new image (Since 2.0) +# @snapshot-node-name: the graph node name of the new image (Since +# 2.0) # # @format: the format of the overlay image, default is 'qcow2'. # @@ -1442,9 +1500,9 @@ # @node: device or node name that will have a snapshot taken. # # @overlay: reference to the existing block device that will become -# the overlay of @node, as part of taking the snapshot. -# It must not have a current backing file (this can be -# achieved by passing "backing": null to blockdev-add). +# the overlay of @node, as part of taking the snapshot. It must +# not have a current backing file (this can be achieved by passing +# "backing": null to blockdev-add). # # Since: 2.5 ## @@ -1454,20 +1512,20 @@ ## # @BackupPerf: # -# Optional parameters for backup. These parameters don't affect +# Optional parameters for backup. These parameters don't affect # functionality, but may significantly affect performance. # -# @use-copy-range: Use copy offloading. Default false. +# @use-copy-range: Use copy offloading. Default false. # -# @max-workers: Maximum number of parallel requests for the sustained background -# copying process. Doesn't influence copy-before-write operations. -# Default 64. +# @max-workers: Maximum number of parallel requests for the sustained +# background copying process. Doesn't influence copy-before-write +# operations. Default 64. # -# @max-chunk: Maximum request length for the sustained background copying -# process. Doesn't influence copy-before-write operations. -# 0 means unlimited. If max-chunk is non-zero then it should not be -# less than job cluster size which is calculated as maximum of -# target image cluster size and 64k. Default 0. +# @max-chunk: Maximum request length for the sustained background +# copying process. Doesn't influence copy-before-write +# operations. 0 means unlimited. If max-chunk is non-zero then +# it should not be less than job cluster size which is calculated +# as maximum of target image cluster size and 64k. Default 0. # # Since: 6.0 ## @@ -1478,66 +1536,65 @@ ## # @BackupCommon: # -# @job-id: identifier for the newly-created block job. If -# omitted, the device name will be used. (Since 2.7) +# @job-id: identifier for the newly-created block job. If omitted, +# the device name will be used. (Since 2.7) # -# @device: the device name or node-name of a root node which should be copied. +# @device: the device name or node-name of a root node which should be +# copied. # -# @sync: what parts of the disk image should be copied to the destination -# (all the disk, only the sectors allocated in the topmost image, from a -# dirty bitmap, or only new I/O). +# @sync: what parts of the disk image should be copied to the +# destination (all the disk, only the sectors allocated in the +# topmost image, from a dirty bitmap, or only new I/O). # -# @speed: the maximum speed, in bytes per second. The default is 0, -# for unlimited. +# @speed: the maximum speed, in bytes per second. The default is 0, +# for unlimited. # -# @bitmap: The name of a dirty bitmap to use. -# Must be present if sync is "bitmap" or "incremental". -# Can be present if sync is "full" or "top". -# Must not be present otherwise. -# (Since 2.4 (drive-backup), 3.1 (blockdev-backup)) +# @bitmap: The name of a dirty bitmap to use. Must be present if sync +# is "bitmap" or "incremental". Can be present if sync is "full" +# or "top". Must not be present otherwise. +# (Since 2.4 (drive-backup), 3.1 (blockdev-backup)) # -# @bitmap-mode: Specifies the type of data the bitmap should contain after -# the operation concludes. -# Must be present if a bitmap was provided, -# Must NOT be present otherwise. (Since 4.2) +# @bitmap-mode: Specifies the type of data the bitmap should contain +# after the operation concludes. Must be present if a bitmap was +# provided, Must NOT be present otherwise. (Since 4.2) # # @compress: true to compress data, if the target format supports it. -# (default: false) (since 2.8) +# (default: false) (since 2.8) # # @on-source-error: the action to take on an error on the source, -# default 'report'. 'stop' and 'enospc' can only be used -# if the block device supports io-status (see BlockInfo). +# default 'report'. 'stop' and 'enospc' can only be used if the +# block device supports io-status (see BlockInfo). # # @on-target-error: the action to take on an error on the target, -# default 'report' (no limitations, since this applies to -# a different block device than @device). +# default 'report' (no limitations, since this applies to a +# different block device than @device). # -# @auto-finalize: When false, this job will wait in a PENDING state after it has -# finished its work, waiting for @block-job-finalize before -# making any block graph changes. -# When true, this job will automatically -# perform its abort or commit actions. -# Defaults to true. (Since 2.12) +# @auto-finalize: When false, this job will wait in a PENDING state +# after it has finished its work, waiting for @block-job-finalize +# before making any block graph changes. When true, this job will +# automatically perform its abort or commit actions. Defaults to +# true. (Since 2.12) # -# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it -# has completely ceased all work, and awaits @block-job-dismiss. -# When true, this job will automatically disappear from the query -# list without user intervention. -# Defaults to true. (Since 2.12) +# @auto-dismiss: When false, this job will wait in a CONCLUDED state +# after it has completely ceased all work, and awaits +# @block-job-dismiss. When true, this job will automatically +# disappear from the query list without user intervention. +# Defaults to true. (Since 2.12) # # @filter-node-name: the node name that should be assigned to the -# filter driver that the backup job inserts into the graph -# above node specified by @drive. If this option is not given, -# a node name is autogenerated. (Since: 4.2) +# filter driver that the backup job inserts into the graph above +# node specified by @drive. If this option is not given, a node +# name is autogenerated. (Since: 4.2) # -# @x-perf: Performance options. (Since 6.0) +# @x-perf: Performance options. (Since 6.0) # # Features: +# # @unstable: Member @x-perf is experimental. # # Note: @on-source-error and @on-target-error only affect background -# I/O. If an error occurs during a guest write request, the device's -# rerror/werror actions will be used. +# I/O. If an error occurs during a guest write request, the +# device's rerror/werror actions will be used. # # Since: 4.2 ## @@ -1556,15 +1613,15 @@ ## # @DriveBackup: # -# @target: the target of the new image. If the file exists, or if it -# is a device, the existing file/device will be used as the new -# destination. If it does not exist, a new file will be created. +# @target: the target of the new image. If the file exists, or if it +# is a device, the existing file/device will be used as the new +# destination. If it does not exist, a new file will be created. # -# @format: the format of the new destination, default is to -# probe if @mode is 'existing', else the format of the source +# @format: the format of the new destination, default is to probe if +# @mode is 'existing', else the format of the source # # @mode: whether and how QEMU should create a new image, default is -# 'absolute-paths'. +# 'absolute-paths'. # # Since: 1.6 ## @@ -1592,8 +1649,9 @@ # # For the arguments, see the documentation of BlockdevSnapshotSync. # -# Returns: - nothing on success -# - If @device is not a valid block device, DeviceNotFound +# Returns: +# - nothing on success +# - If @device is not a valid block device, DeviceNotFound # # Since: 0.14 # @@ -1605,7 +1663,6 @@ # "/some/place/my-image", # "format": "qcow2" } } # <- { "return": {} } -# ## { 'command': 'blockdev-snapshot-sync', 'data': 'BlockdevSnapshotSync', @@ -1618,16 +1675,16 @@ # # Take a snapshot, by installing 'node' as the backing image of # 'overlay'. Additionally, if 'node' is associated with a block -# device, the block device changes to using 'overlay' as its new active -# image. +# device, the block device changes to using 'overlay' as its new +# active image. # # For the arguments, see the documentation of BlockdevSnapshot. # # Features: -# @allow-write-only-overlay: If present, the check whether this operation is safe -# was relaxed so that it can be used to change -# backing file of a destination of a blockdev-mirror. -# (since 5.0) +# +# @allow-write-only-overlay: If present, the check whether this +# operation is safe was relaxed so that it can be used to change +# backing file of a destination of a blockdev-mirror. (since 5.0) # # Since: 2.5 # @@ -1646,7 +1703,6 @@ # "arguments": { "node": "ide-hd0", # "overlay": "node1534" } } # <- { "return": {} } -# ## { 'command': 'blockdev-snapshot', 'data': 'BlockdevSnapshot', @@ -1658,26 +1714,25 @@ # # Change the backing file in the image file metadata. This does not # cause QEMU to reopen the image file to reparse the backing filename -# (it may, however, perform a reopen to change permissions from -# r/o -> r/w -> r/o, if needed). The new backing file string is written -# into the image file metadata, and the QEMU internal strings are -# updated. +# (it may, however, perform a reopen to change permissions from r/o -> +# r/w -> r/o, if needed). The new backing file string is written into +# the image file metadata, and the QEMU internal strings are updated. # # @image-node-name: The name of the block driver state node of the -# image to modify. The "device" argument is used -# to verify "image-node-name" is in the chain -# described by "device". +# image to modify. The "device" argument is used to verify +# "image-node-name" is in the chain described by "device". # # @device: The device name or node-name of the root node that owns -# image-node-name. +# image-node-name. # -# @backing-file: The string to write as the backing file. This -# string is not validated, so care should be taken -# when specifying the string or the image chain may -# not be able to be reopened again. +# @backing-file: The string to write as the backing file. This string +# is not validated, so care should be taken when specifying the +# string or the image chain may not be able to be reopened again. # -# Returns: - Nothing on success -# - If "device" does not exist or cannot be determined, DeviceNotFound +# Returns: +# - Nothing on success +# - If "device" does not exist or cannot be determined, +# DeviceNotFound # # Since: 2.1 ## @@ -1689,14 +1744,14 @@ ## # @block-commit: # -# Live commit of data from overlay image nodes into backing nodes - i.e., -# writes data between 'top' and 'base' into 'base'. +# Live commit of data from overlay image nodes into backing nodes - +# i.e., writes data between 'top' and 'base' into 'base'. # -# If top == base, that is an error. -# If top has no overlays on top of it, or if it is in use by a writer, -# the job will not be completed by itself. The user needs to complete -# the job with the block-job-complete command after getting the ready -# event. (Since 2.0) +# If top == base, that is an error. If top has no overlays on top of +# it, or if it is in use by a writer, the job will not be completed by +# itself. The user needs to complete the job with the +# block-job-complete command after getting the ready event. (Since +# 2.0) # # If the base image is smaller than top, then the base image will be # resized to be the same size as top. If top is smaller than the base @@ -1704,77 +1759,75 @@ # size to match the size of the smaller top, you can safely truncate # it yourself once the commit operation successfully completes. # -# @job-id: identifier for the newly-created block job. If -# omitted, the device name will be used. (Since 2.7) +# @job-id: identifier for the newly-created block job. If omitted, +# the device name will be used. (Since 2.7) # # @device: the device name or node-name of a root node # # @base-node: The node name of the backing image to write data into. -# If not specified, this is the deepest backing image. -# (since: 3.1) +# If not specified, this is the deepest backing image. +# (since: 3.1) # -# @base: Same as @base-node, except that it is a file name rather than a node -# name. This must be the exact filename string that was used to open the -# node; other strings, even if addressing the same file, are not -# accepted +# @base: Same as @base-node, except that it is a file name rather than +# a node name. This must be the exact filename string that was +# used to open the node; other strings, even if addressing the +# same file, are not accepted # # @top-node: The node name of the backing image within the image chain -# which contains the topmost data to be committed down. If -# not specified, this is the active layer. (since: 3.1) +# which contains the topmost data to be committed down. If not +# specified, this is the active layer. (since: 3.1) # -# @top: Same as @top-node, except that it is a file name rather than a node -# name. This must be the exact filename string that was used to open the -# node; other strings, even if addressing the same file, are not -# accepted +# @top: Same as @top-node, except that it is a file name rather than a +# node name. This must be the exact filename string that was used +# to open the node; other strings, even if addressing the same +# file, are not accepted # # @backing-file: The backing file string to write into the overlay -# image of 'top'. If 'top' does not have an overlay -# image, or if 'top' is in use by a writer, specifying -# a backing file string is an error. +# image of 'top'. If 'top' does not have an overlay image, or if +# 'top' is in use by a writer, specifying a backing file string is +# an error. # -# This filename is not validated. If a pathname string -# is such that it cannot be resolved by QEMU, that -# means that subsequent QMP or HMP commands must use -# node-names for the image in question, as filename -# lookup methods will fail. +# This filename is not validated. If a pathname string is such +# that it cannot be resolved by QEMU, that means that subsequent +# QMP or HMP commands must use node-names for the image in +# question, as filename lookup methods will fail. # -# If not specified, QEMU will automatically determine -# the backing file string to use, or error out if -# there is no obvious choice. Care should be taken -# when specifying the string, to specify a valid -# filename or protocol. -# (Since 2.1) +# If not specified, QEMU will automatically determine the backing +# file string to use, or error out if there is no obvious choice. +# Care should be taken when specifying the string, to specify a +# valid filename or protocol. (Since 2.1) # # @speed: the maximum speed, in bytes per second # -# @on-error: the action to take on an error. 'ignore' means that the request -# should be retried. (default: report; Since: 5.0) +# @on-error: the action to take on an error. 'ignore' means that the +# request should be retried. (default: report; Since: 5.0) # # @filter-node-name: the node name that should be assigned to the -# filter driver that the commit job inserts into the graph -# above @top. If this option is not given, a node name is -# autogenerated. (Since: 2.9) +# filter driver that the commit job inserts into the graph above +# @top. If this option is not given, a node name is +# autogenerated. (Since: 2.9) # -# @auto-finalize: When false, this job will wait in a PENDING state after it has -# finished its work, waiting for @block-job-finalize before -# making any block graph changes. -# When true, this job will automatically -# perform its abort or commit actions. -# Defaults to true. (Since 3.1) +# @auto-finalize: When false, this job will wait in a PENDING state +# after it has finished its work, waiting for @block-job-finalize +# before making any block graph changes. When true, this job will +# automatically perform its abort or commit actions. Defaults to +# true. (Since 3.1) # -# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it -# has completely ceased all work, and awaits @block-job-dismiss. -# When true, this job will automatically disappear from the query -# list without user intervention. -# Defaults to true. (Since 3.1) +# @auto-dismiss: When false, this job will wait in a CONCLUDED state +# after it has completely ceased all work, and awaits +# @block-job-dismiss. When true, this job will automatically +# disappear from the query list without user intervention. +# Defaults to true. (Since 3.1) # # Features: -# @deprecated: Members @base and @top are deprecated. Use @base-node -# and @top-node instead. # -# Returns: - Nothing on success -# - If @device does not exist, DeviceNotFound -# - Any other error returns a GenericError. +# @deprecated: Members @base and @top are deprecated. Use @base-node +# and @top-node instead. +# +# Returns: +# - Nothing on success +# - If @device does not exist, DeviceNotFound +# - Any other error returns a GenericError. # # Since: 1.3 # @@ -1784,7 +1837,6 @@ # "arguments": { "device": "virtio0", # "top": "/tmp/snap1.qcow2" } } # <- { "return": {} } -# ## { 'command': 'block-commit', 'data': { '*job-id': 'str', 'device': 'str', '*base-node': 'str', @@ -1800,17 +1852,20 @@ ## # @drive-backup: # -# Start a point-in-time copy of a block device to a new destination. The -# status of ongoing drive-backup operations can be checked with -# query-block-jobs where the BlockJobInfo.type field has the value 'backup'. -# The operation can be stopped before it has completed using the -# block-job-cancel command. +# Start a point-in-time copy of a block device to a new destination. +# The status of ongoing drive-backup operations can be checked with +# query-block-jobs where the BlockJobInfo.type field has the value +# 'backup'. The operation can be stopped before it has completed using +# the block-job-cancel command. # # Features: -# @deprecated: This command is deprecated. Use @blockdev-backup instead. # -# Returns: - nothing on success -# - If @device is not a valid block device, GenericError +# @deprecated: This command is deprecated. Use @blockdev-backup +# instead. +# +# Returns: +# - nothing on success +# - If @device is not a valid block device, GenericError # # Since: 1.6 # @@ -1821,7 +1876,6 @@ # "sync": "full", # "target": "backup.img" } } # <- { "return": {} } -# ## { 'command': 'drive-backup', 'boxed': true, 'data': 'DriveBackup', 'features': ['deprecated'], @@ -1830,14 +1884,15 @@ ## # @blockdev-backup: # -# Start a point-in-time copy of a block device to a new destination. The -# status of ongoing blockdev-backup operations can be checked with -# query-block-jobs where the BlockJobInfo.type field has the value 'backup'. -# The operation can be stopped before it has completed using the -# block-job-cancel command. +# Start a point-in-time copy of a block device to a new destination. +# The status of ongoing blockdev-backup operations can be checked with +# query-block-jobs where the BlockJobInfo.type field has the value +# 'backup'. The operation can be stopped before it has completed using +# the block-job-cancel command. # -# Returns: - nothing on success -# - If @device is not a valid block device, DeviceNotFound +# Returns: +# - nothing on success +# - If @device is not a valid block device, DeviceNotFound # # Since: 2.3 # @@ -1848,7 +1903,6 @@ # "sync": "full", # "target": "tgt-id" } } # <- { "return": {} } -# ## { 'command': 'blockdev-backup', 'boxed': true, 'data': 'BlockdevBackup', @@ -1859,8 +1913,8 @@ # # Get the named block driver list # -# @flat: Omit the nested data about backing image ("backing-image" key) if true. -# Default is false (Since 5.0) +# @flat: Omit the nested data about backing image ("backing-image" +# key) if true. Default is false (Since 5.0) # # Returns: the list of BlockDeviceInfo # @@ -1914,7 +1968,6 @@ # "virtual-size":2048000 # } # } } ] } -# ## { 'command': 'query-named-block-nodes', 'returns': [ 'BlockDeviceInfo' ], @@ -1938,16 +1991,16 @@ ## # @XDbgBlockGraphNode: # -# @id: Block graph node identifier. This @id is generated only for -# x-debug-query-block-graph and does not relate to any other identifiers in -# Qemu. +# @id: Block graph node identifier. This @id is generated only for +# x-debug-query-block-graph and does not relate to any other +# identifiers in Qemu. # -# @type: Type of graph node. Can be one of block-backend, block-job or -# block-driver-state. +# @type: Type of graph node. Can be one of block-backend, block-job +# or block-driver-state. # -# @name: Human readable name of the node. Corresponds to node-name for -# block-driver-state nodes; is not guaranteed to be unique in the whole -# graph (with block-jobs and block-backends). +# @name: Human readable name of the node. Corresponds to node-name +# for block-driver-state nodes; is not guaranteed to be unique in +# the whole graph (with block-jobs and block-backends). # # Since: 4.0 ## @@ -1959,25 +2012,26 @@ # # Enum of base block permissions. # -# @consistent-read: A user that has the "permission" of consistent reads is -# guaranteed that their view of the contents of the block -# device is complete and self-consistent, representing the -# contents of a disk at a specific point. -# For most block devices (including their backing files) this -# is true, but the property cannot be maintained in a few -# situations like for intermediate nodes of a commit block -# job. +# @consistent-read: A user that has the "permission" of consistent +# reads is guaranteed that their view of the contents of the block +# device is complete and self-consistent, representing the +# contents of a disk at a specific point. For most block devices +# (including their backing files) this is true, but the property +# cannot be maintained in a few situations like for intermediate +# nodes of a commit block job. # -# @write: This permission is required to change the visible disk contents. +# @write: This permission is required to change the visible disk +# contents. # -# @write-unchanged: This permission (which is weaker than BLK_PERM_WRITE) is -# both enough and required for writes to the block node when -# the caller promises that the visible disk content doesn't -# change. -# As the BLK_PERM_WRITE permission is strictly stronger, -# either is sufficient to perform an unchanging write. +# @write-unchanged: This permission (which is weaker than +# BLK_PERM_WRITE) is both enough and required for writes to the +# block node when the caller promises that the visible disk +# content doesn't change. As the BLK_PERM_WRITE permission is +# strictly stronger, either is sufficient to perform an unchanging +# write. # -# @resize: This permission is required to change the size of a block node. +# @resize: This permission is required to change the size of a block +# node. # # Since: 4.0 ## @@ -1997,8 +2051,8 @@ # # @perm: granted permissions for the parent operating on the child # -# @shared-perm: permissions that can still be granted to other users of the -# child while it is still attached to this parent +# @shared-perm: permissions that can still be granted to other users +# of the child while it is still attached to this parent # # Since: 4.0 ## @@ -2023,6 +2077,7 @@ # Get the block graph. # # Features: +# # @unstable: This command is meant for debugging. # # Since: 4.0 @@ -2034,15 +2089,16 @@ ## # @drive-mirror: # -# Start mirroring a block device's writes to a new destination. target -# specifies the target of the new image. If the file exists, or if it -# is a device, it will be used as the new destination for writes. If -# it does not exist, a new file will be created. format specifies the -# format of the mirror image, default is to probe if mode='existing', -# else the format of the source. +# Start mirroring a block device's writes to a new destination. +# target specifies the target of the new image. If the file exists, +# or if it is a device, it will be used as the new destination for +# writes. If it does not exist, a new file will be created. format +# specifies the format of the mirror image, default is to probe if +# mode='existing', else the format of the source. # -# Returns: - nothing on success -# - If @device is not a valid block device, GenericError +# Returns: +# - nothing on success +# - If @device is not a valid block device, GenericError # # Since: 1.3 # @@ -2054,7 +2110,6 @@ # "sync": "full", # "format": "qcow2" } } # <- { "return": {} } -# ## { 'command': 'drive-mirror', 'boxed': true, 'data': 'DriveMirror', @@ -2065,73 +2120,72 @@ # # A set of parameters describing drive mirror setup. # -# @job-id: identifier for the newly-created block job. If -# omitted, the device name will be used. (Since 2.7) +# @job-id: identifier for the newly-created block job. If omitted, +# the device name will be used. (Since 2.7) # -# @device: the device name or node-name of a root node whose writes should be -# mirrored. +# @device: the device name or node-name of a root node whose writes +# should be mirrored. # -# @target: the target of the new image. If the file exists, or if it -# is a device, the existing file/device will be used as the new -# destination. If it does not exist, a new file will be created. +# @target: the target of the new image. If the file exists, or if it +# is a device, the existing file/device will be used as the new +# destination. If it does not exist, a new file will be created. # -# @format: the format of the new destination, default is to -# probe if @mode is 'existing', else the format of the source +# @format: the format of the new destination, default is to probe if +# @mode is 'existing', else the format of the source # -# @node-name: the new block driver state node name in the graph -# (Since 2.1) +# @node-name: the new block driver state node name in the graph (Since +# 2.1) # # @replaces: with sync=full graph node name to be replaced by the new -# image when a whole image copy is done. This can be used to repair -# broken Quorum files. By default, @device is replaced, although -# implicitly created filters on it are kept. (Since 2.1) +# image when a whole image copy is done. This can be used to +# repair broken Quorum files. By default, @device is replaced, +# although implicitly created filters on it are kept. (Since 2.1) # # @mode: whether and how QEMU should create a new image, default is -# 'absolute-paths'. +# 'absolute-paths'. # # @speed: the maximum speed, in bytes per second # -# @sync: what parts of the disk image should be copied to the destination -# (all the disk, only the sectors allocated in the topmost image, or -# only new I/O). +# @sync: what parts of the disk image should be copied to the +# destination (all the disk, only the sectors allocated in the +# topmost image, or only new I/O). # -# @granularity: granularity of the dirty bitmap, default is 64K -# if the image format doesn't have clusters, 4K if the clusters -# are smaller than that, else the cluster size. Must be a -# power of 2 between 512 and 64M (since 1.4). +# @granularity: granularity of the dirty bitmap, default is 64K if the +# image format doesn't have clusters, 4K if the clusters are +# smaller than that, else the cluster size. Must be a power of 2 +# between 512 and 64M (since 1.4). # -# @buf-size: maximum amount of data in flight from source to -# target (since 1.4). +# @buf-size: maximum amount of data in flight from source to target +# (since 1.4). # # @on-source-error: the action to take on an error on the source, -# default 'report'. 'stop' and 'enospc' can only be used -# if the block device supports io-status (see BlockInfo). +# default 'report'. 'stop' and 'enospc' can only be used if the +# block device supports io-status (see BlockInfo). # # @on-target-error: the action to take on an error on the target, -# default 'report' (no limitations, since this applies to -# a different block device than @device). +# default 'report' (no limitations, since this applies to a +# different block device than @device). # -# @unmap: Whether to try to unmap target sectors where source has -# only zero. If true, and target unallocated sectors will read as zero, -# target image sectors will be unmapped; otherwise, zeroes will be -# written. Both will result in identical contents. -# Default is true. (Since 2.4) +# @unmap: Whether to try to unmap target sectors where source has only +# zero. If true, and target unallocated sectors will read as +# zero, target image sectors will be unmapped; otherwise, zeroes +# will be written. Both will result in identical contents. +# Default is true. (Since 2.4) # -# @copy-mode: when to copy data to the destination; defaults to 'background' -# (Since: 3.0) +# @copy-mode: when to copy data to the destination; defaults to +# 'background' (Since: 3.0) # -# @auto-finalize: When false, this job will wait in a PENDING state after it has -# finished its work, waiting for @block-job-finalize before -# making any block graph changes. -# When true, this job will automatically -# perform its abort or commit actions. -# Defaults to true. (Since 3.1) +# @auto-finalize: When false, this job will wait in a PENDING state +# after it has finished its work, waiting for @block-job-finalize +# before making any block graph changes. When true, this job will +# automatically perform its abort or commit actions. Defaults to +# true. (Since 3.1) # -# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it -# has completely ceased all work, and awaits @block-job-dismiss. -# When true, this job will automatically disappear from the query -# list without user intervention. -# Defaults to true. (Since 3.1) +# @auto-dismiss: When false, this job will wait in a CONCLUDED state +# after it has completely ceased all work, and awaits +# @block-job-dismiss. When true, this job will automatically +# disappear from the query list without user intervention. +# Defaults to true. (Since 3.1) # # Since: 1.3 ## @@ -2165,16 +2219,16 @@ # @name: name of the dirty bitmap (must be less than 1024 bytes) # # @granularity: the bitmap granularity, default is 64k for -# block-dirty-bitmap-add +# block-dirty-bitmap-add # # @persistent: the bitmap is persistent, i.e. it will be saved to the -# corresponding block device image file on its close. For now only -# Qcow2 disks support persistent bitmaps. Default is false for -# block-dirty-bitmap-add. (Since: 2.10) +# corresponding block device image file on its close. For now +# only Qcow2 disks support persistent bitmaps. Default is false +# for block-dirty-bitmap-add. (Since: 2.10) # -# @disabled: the bitmap is created in the disabled state, which means that -# it will not track drive changes. The bitmap may be enabled with -# block-dirty-bitmap-enable. Default is false. (Since: 4.0) +# @disabled: the bitmap is created in the disabled state, which means +# that it will not track drive changes. The bitmap may be enabled +# with block-dirty-bitmap-enable. Default is false. (Since: 4.0) # # Since: 2.4 ## @@ -2185,7 +2239,8 @@ ## # @BlockDirtyBitmapOrStr: # -# @local: name of the bitmap, attached to the same node as target bitmap. +# @local: name of the bitmap, attached to the same node as target +# bitmap. # # @external: bitmap with specified node # @@ -2202,9 +2257,9 @@ # # @target: name of the destination dirty bitmap # -# @bitmaps: name(s) of the source dirty bitmap(s) at @node and/or fully -# specified BlockDirtyBitmap elements. The latter are supported -# since 4.1. +# @bitmaps: name(s) of the source dirty bitmap(s) at @node and/or +# fully specified BlockDirtyBitmap elements. The latter are +# supported since 4.1. # # Since: 4.0 ## @@ -2215,11 +2270,13 @@ ## # @block-dirty-bitmap-add: # -# Create a dirty bitmap with a name on the node, and start tracking the writes. +# Create a dirty bitmap with a name on the node, and start tracking +# the writes. # -# Returns: - nothing on success -# - If @node is not a valid block device or node, DeviceNotFound -# - If @name is already taken, GenericError with an explanation +# Returns: +# - nothing on success +# - If @node is not a valid block device or node, DeviceNotFound +# - If @name is already taken, GenericError with an explanation # # Since: 2.4 # @@ -2228,7 +2285,6 @@ # -> { "execute": "block-dirty-bitmap-add", # "arguments": { "node": "drive0", "name": "bitmap0" } } # <- { "return": {} } -# ## { 'command': 'block-dirty-bitmap-add', 'data': 'BlockDirtyBitmapAdd', @@ -2238,13 +2294,14 @@ # @block-dirty-bitmap-remove: # # Stop write tracking and remove the dirty bitmap that was created -# with block-dirty-bitmap-add. If the bitmap is persistent, remove it from its -# storage too. +# with block-dirty-bitmap-add. If the bitmap is persistent, remove it +# from its storage too. # -# Returns: - nothing on success -# - If @node is not a valid block device or node, DeviceNotFound -# - If @name is not found, GenericError with an explanation -# - if @name is frozen by an operation, GenericError +# Returns: +# - nothing on success +# - If @node is not a valid block device or node, DeviceNotFound +# - If @name is not found, GenericError with an explanation +# - if @name is frozen by an operation, GenericError # # Since: 2.4 # @@ -2253,7 +2310,6 @@ # -> { "execute": "block-dirty-bitmap-remove", # "arguments": { "node": "drive0", "name": "bitmap0" } } # <- { "return": {} } -# ## { 'command': 'block-dirty-bitmap-remove', 'data': 'BlockDirtyBitmap', @@ -2266,9 +2322,10 @@ # backup from this point in time forward will only backup clusters # modified after this clear operation. # -# Returns: - nothing on success -# - If @node is not a valid block device, DeviceNotFound -# - If @name is not found, GenericError with an explanation +# Returns: +# - nothing on success +# - If @node is not a valid block device, DeviceNotFound +# - If @name is not found, GenericError with an explanation # # Since: 2.4 # @@ -2277,7 +2334,6 @@ # -> { "execute": "block-dirty-bitmap-clear", # "arguments": { "node": "drive0", "name": "bitmap0" } } # <- { "return": {} } -# ## { 'command': 'block-dirty-bitmap-clear', 'data': 'BlockDirtyBitmap', @@ -2288,9 +2344,10 @@ # # Enables a dirty bitmap so that it will begin tracking disk changes. # -# Returns: - nothing on success -# - If @node is not a valid block device, DeviceNotFound -# - If @name is not found, GenericError with an explanation +# Returns: +# - nothing on success +# - If @node is not a valid block device, DeviceNotFound +# - If @name is not found, GenericError with an explanation # # Since: 4.0 # @@ -2299,7 +2356,6 @@ # -> { "execute": "block-dirty-bitmap-enable", # "arguments": { "node": "drive0", "name": "bitmap0" } } # <- { "return": {} } -# ## { 'command': 'block-dirty-bitmap-enable', 'data': 'BlockDirtyBitmap', @@ -2310,9 +2366,10 @@ # # Disables a dirty bitmap so that it will stop tracking disk changes. # -# Returns: - nothing on success -# - If @node is not a valid block device, DeviceNotFound -# - If @name is not found, GenericError with an explanation +# Returns: +# - nothing on success +# - If @node is not a valid block device, DeviceNotFound +# - If @name is not found, GenericError with an explanation # # Since: 4.0 # @@ -2321,7 +2378,6 @@ # -> { "execute": "block-dirty-bitmap-disable", # "arguments": { "node": "drive0", "name": "bitmap0" } } # <- { "return": {} } -# ## { 'command': 'block-dirty-bitmap-disable', 'data': 'BlockDirtyBitmap', @@ -2331,20 +2387,22 @@ # @block-dirty-bitmap-merge: # # Merge dirty bitmaps listed in @bitmaps to the @target dirty bitmap. -# Dirty bitmaps in @bitmaps will be unchanged, except if it also appears -# as the @target bitmap. Any bits already set in @target will still be -# set after the merge, i.e., this operation does not clear the target. -# On error, @target is unchanged. +# Dirty bitmaps in @bitmaps will be unchanged, except if it also +# appears as the @target bitmap. Any bits already set in @target will +# still be set after the merge, i.e., this operation does not clear +# the target. On error, @target is unchanged. # -# The resulting bitmap will count as dirty any clusters that were dirty in any -# of the source bitmaps. This can be used to achieve backup checkpoints, or in -# simpler usages, to copy bitmaps. +# The resulting bitmap will count as dirty any clusters that were +# dirty in any of the source bitmaps. This can be used to achieve +# backup checkpoints, or in simpler usages, to copy bitmaps. # -# Returns: - nothing on success -# - If @node is not a valid block device, DeviceNotFound -# - If any bitmap in @bitmaps or @target is not found, GenericError -# - If any of the bitmaps have different sizes or granularities, -# GenericError +# Returns: +# - nothing on success +# - If @node is not a valid block device, DeviceNotFound +# - If any bitmap in @bitmaps or @target is not found, +# GenericError +# - If any of the bitmaps have different sizes or granularities, +# GenericError # # Since: 4.0 # @@ -2354,7 +2412,6 @@ # "arguments": { "node": "drive0", "target": "bitmap0", # "bitmaps": ["bitmap1"] } } # <- { "return": {} } -# ## { 'command': 'block-dirty-bitmap-merge', 'data': 'BlockDirtyBitmapMerge', @@ -2378,12 +2435,14 @@ # Get bitmap SHA256. # # Features: +# # @unstable: This command is meant for debugging. # -# Returns: - BlockDirtyBitmapSha256 on success -# - If @node is not a valid block device, DeviceNotFound -# - If @name is not found or if hashing has failed, GenericError with an -# explanation +# Returns: +# - BlockDirtyBitmapSha256 on success +# - If @node is not a valid block device, DeviceNotFound +# - If @name is not found or if hashing has failed, GenericError +# with an explanation # # Since: 2.10 ## @@ -2397,62 +2456,60 @@ # # Start mirroring a block device's writes to a new destination. # -# @job-id: identifier for the newly-created block job. If -# omitted, the device name will be used. (Since 2.7) +# @job-id: identifier for the newly-created block job. If omitted, +# the device name will be used. (Since 2.7) # -# @device: The device name or node-name of a root node whose writes should be -# mirrored. +# @device: The device name or node-name of a root node whose writes +# should be mirrored. # -# @target: the id or node-name of the block device to mirror to. This mustn't be -# attached to guest. +# @target: the id or node-name of the block device to mirror to. This +# mustn't be attached to guest. # # @replaces: with sync=full graph node name to be replaced by the new -# image when a whole image copy is done. This can be used to repair -# broken Quorum files. By default, @device is replaced, although -# implicitly created filters on it are kept. +# image when a whole image copy is done. This can be used to +# repair broken Quorum files. By default, @device is replaced, +# although implicitly created filters on it are kept. # # @speed: the maximum speed, in bytes per second # -# @sync: what parts of the disk image should be copied to the destination -# (all the disk, only the sectors allocated in the topmost image, or -# only new I/O). +# @sync: what parts of the disk image should be copied to the +# destination (all the disk, only the sectors allocated in the +# topmost image, or only new I/O). # -# @granularity: granularity of the dirty bitmap, default is 64K -# if the image format doesn't have clusters, 4K if the clusters -# are smaller than that, else the cluster size. Must be a -# power of 2 between 512 and 64M +# @granularity: granularity of the dirty bitmap, default is 64K if the +# image format doesn't have clusters, 4K if the clusters are +# smaller than that, else the cluster size. Must be a power of 2 +# between 512 and 64M # -# @buf-size: maximum amount of data in flight from source to -# target +# @buf-size: maximum amount of data in flight from source to target # # @on-source-error: the action to take on an error on the source, -# default 'report'. 'stop' and 'enospc' can only be used -# if the block device supports io-status (see BlockInfo). +# default 'report'. 'stop' and 'enospc' can only be used if the +# block device supports io-status (see BlockInfo). # # @on-target-error: the action to take on an error on the target, -# default 'report' (no limitations, since this applies to -# a different block device than @device). +# default 'report' (no limitations, since this applies to a +# different block device than @device). # # @filter-node-name: the node name that should be assigned to the -# filter driver that the mirror job inserts into the graph -# above @device. If this option is not given, a node name is -# autogenerated. (Since: 2.9) +# filter driver that the mirror job inserts into the graph above +# @device. If this option is not given, a node name is +# autogenerated. (Since: 2.9) # -# @copy-mode: when to copy data to the destination; defaults to 'background' -# (Since: 3.0) +# @copy-mode: when to copy data to the destination; defaults to +# 'background' (Since: 3.0) # -# @auto-finalize: When false, this job will wait in a PENDING state after it has -# finished its work, waiting for @block-job-finalize before -# making any block graph changes. -# When true, this job will automatically -# perform its abort or commit actions. -# Defaults to true. (Since 3.1) +# @auto-finalize: When false, this job will wait in a PENDING state +# after it has finished its work, waiting for @block-job-finalize +# before making any block graph changes. When true, this job will +# automatically perform its abort or commit actions. Defaults to +# true. (Since 3.1) # -# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it -# has completely ceased all work, and awaits @block-job-dismiss. -# When true, this job will automatically disappear from the query -# list without user intervention. -# Defaults to true. (Since 3.1) +# @auto-dismiss: When false, this job will wait in a CONCLUDED state +# after it has completely ceased all work, and awaits +# @block-job-dismiss. When true, this job will automatically +# disappear from the query list without user intervention. +# Defaults to true. (Since 3.1) # # Returns: nothing on success. # @@ -2465,7 +2522,6 @@ # "target": "target0", # "sync": "full" } } # <- { "return": {} } -# ## { 'command': 'blockdev-mirror', 'data': { '*job-id': 'str', 'device': 'str', 'target': 'str', @@ -2500,59 +2556,53 @@ # # @iops_wr: write I/O operations per second # -# @bps_max: total throughput limit during bursts, -# in bytes (Since 1.7) +# @bps_max: total throughput limit during bursts, in bytes (Since 1.7) # -# @bps_rd_max: read throughput limit during bursts, -# in bytes (Since 1.7) +# @bps_rd_max: read throughput limit during bursts, in bytes (Since +# 1.7) # -# @bps_wr_max: write throughput limit during bursts, -# in bytes (Since 1.7) +# @bps_wr_max: write throughput limit during bursts, in bytes (Since +# 1.7) # -# @iops_max: total I/O operations per second during bursts, -# in bytes (Since 1.7) +# @iops_max: total I/O operations per second during bursts, in bytes +# (Since 1.7) # -# @iops_rd_max: read I/O operations per second during bursts, -# in bytes (Since 1.7) +# @iops_rd_max: read I/O operations per second during bursts, in bytes +# (Since 1.7) # -# @iops_wr_max: write I/O operations per second during bursts, -# in bytes (Since 1.7) +# @iops_wr_max: write I/O operations per second during bursts, in +# bytes (Since 1.7) # -# @bps_max_length: maximum length of the @bps_max burst -# period, in seconds. It must only -# be set if @bps_max is set as well. -# Defaults to 1. (Since 2.6) +# @bps_max_length: maximum length of the @bps_max burst period, in +# seconds. It must only be set if @bps_max is set as well. +# Defaults to 1. (Since 2.6) # -# @bps_rd_max_length: maximum length of the @bps_rd_max -# burst period, in seconds. It must only -# be set if @bps_rd_max is set as well. -# Defaults to 1. (Since 2.6) +# @bps_rd_max_length: maximum length of the @bps_rd_max burst period, +# in seconds. It must only be set if @bps_rd_max is set as well. +# Defaults to 1. (Since 2.6) # -# @bps_wr_max_length: maximum length of the @bps_wr_max -# burst period, in seconds. It must only -# be set if @bps_wr_max is set as well. -# Defaults to 1. (Since 2.6) +# @bps_wr_max_length: maximum length of the @bps_wr_max burst period, +# in seconds. It must only be set if @bps_wr_max is set as well. +# Defaults to 1. (Since 2.6) # -# @iops_max_length: maximum length of the @iops burst -# period, in seconds. It must only -# be set if @iops_max is set as well. -# Defaults to 1. (Since 2.6) +# @iops_max_length: maximum length of the @iops burst period, in +# seconds. It must only be set if @iops_max is set as well. +# Defaults to 1. (Since 2.6) # -# @iops_rd_max_length: maximum length of the @iops_rd_max -# burst period, in seconds. It must only -# be set if @iops_rd_max is set as well. -# Defaults to 1. (Since 2.6) +# @iops_rd_max_length: maximum length of the @iops_rd_max burst +# period, in seconds. It must only be set if @iops_rd_max is set +# as well. Defaults to 1. (Since 2.6) # -# @iops_wr_max_length: maximum length of the @iops_wr_max -# burst period, in seconds. It must only -# be set if @iops_wr_max is set as well. -# Defaults to 1. (Since 2.6) +# @iops_wr_max_length: maximum length of the @iops_wr_max burst +# period, in seconds. It must only be set if @iops_wr_max is set +# as well. Defaults to 1. (Since 2.6) # # @iops_size: an I/O size in bytes (Since 1.7) # # @group: throttle group name (Since 2.4) # # Features: +# # @deprecated: Member @device is deprecated. Use @id instead. # # Since: 1.1 @@ -2572,35 +2622,55 @@ ## # @ThrottleLimits: # -# Limit parameters for throttling. -# Since some limit combinations are illegal, limits should always be set in one -# transaction. All fields are optional. When setting limits, if a field is -# missing the current value is not changed. +# Limit parameters for throttling. Since some limit combinations are +# illegal, limits should always be set in one transaction. All fields +# are optional. When setting limits, if a field is missing the +# current value is not changed. # # @iops-total: limit total I/O operations per second +# # @iops-total-max: I/O operations burst -# @iops-total-max-length: length of the iops-total-max burst period, in seconds -# It must only be set if @iops-total-max is set as well. +# +# @iops-total-max-length: length of the iops-total-max burst period, +# in seconds It must only be set if @iops-total-max is set as +# well. +# # @iops-read: limit read operations per second +# # @iops-read-max: I/O operations read burst -# @iops-read-max-length: length of the iops-read-max burst period, in seconds -# It must only be set if @iops-read-max is set as well. +# +# @iops-read-max-length: length of the iops-read-max burst period, in +# seconds It must only be set if @iops-read-max is set as well. +# # @iops-write: limit write operations per second +# # @iops-write-max: I/O operations write burst -# @iops-write-max-length: length of the iops-write-max burst period, in seconds -# It must only be set if @iops-write-max is set as well. +# +# @iops-write-max-length: length of the iops-write-max burst period, +# in seconds It must only be set if @iops-write-max is set as +# well. +# # @bps-total: limit total bytes per second +# # @bps-total-max: total bytes burst -# @bps-total-max-length: length of the bps-total-max burst period, in seconds. -# It must only be set if @bps-total-max is set as well. +# +# @bps-total-max-length: length of the bps-total-max burst period, in +# seconds. It must only be set if @bps-total-max is set as well. +# # @bps-read: limit read bytes per second +# # @bps-read-max: total bytes read burst -# @bps-read-max-length: length of the bps-read-max burst period, in seconds -# It must only be set if @bps-read-max is set as well. +# +# @bps-read-max-length: length of the bps-read-max burst period, in +# seconds It must only be set if @bps-read-max is set as well. +# # @bps-write: limit write bytes per second +# # @bps-write-max: total bytes write burst -# @bps-write-max-length: length of the bps-write-max burst period, in seconds -# It must only be set if @bps-write-max is set as well. +# +# @bps-write-max-length: length of the bps-write-max burst period, in +# seconds It must only be set if @bps-write-max is set as well. +# # @iops-size: when limiting by iops max size of an I/O in bytes # # Since: 2.11 @@ -2625,11 +2695,11 @@ # @limits: limits to apply for this throttle group # # Features: +# # @unstable: All members starting with x- are aliases for the same key -# without x- in the @limits object. This is not a stable -# interface and may be removed or changed incompatibly in -# the future. Use @limits for a supported stable -# interface. +# without x- in the @limits object. This is not a stable +# interface and may be removed or changed incompatibly in the +# future. Use @limits for a supported stable interface. # # Since: 2.11 ## @@ -2679,90 +2749,90 @@ # # Copy data from a backing file into a block device. # -# The block streaming operation is performed in the background until the entire -# backing file has been copied. This command returns immediately once streaming -# has started. The status of ongoing block streaming operations can be checked -# with query-block-jobs. The operation can be stopped before it has completed -# using the block-job-cancel command. +# The block streaming operation is performed in the background until +# the entire backing file has been copied. This command returns +# immediately once streaming has started. The status of ongoing block +# streaming operations can be checked with query-block-jobs. The +# operation can be stopped before it has completed using the +# block-job-cancel command. # -# The node that receives the data is called the top image, can be located in -# any part of the chain (but always above the base image; see below) and can be -# specified using its device or node name. Earlier qemu versions only allowed -# 'device' to name the top level node; presence of the 'base-node' parameter -# during introspection can be used as a witness of the enhanced semantics -# of 'device'. +# The node that receives the data is called the top image, can be +# located in any part of the chain (but always above the base image; +# see below) and can be specified using its device or node name. +# Earlier qemu versions only allowed 'device' to name the top level +# node; presence of the 'base-node' parameter during introspection can +# be used as a witness of the enhanced semantics of 'device'. # -# If a base file is specified then sectors are not copied from that base file and -# its backing chain. This can be used to stream a subset of the backing file -# chain instead of flattening the entire image. -# When streaming completes the image file will have the base file as its backing -# file, unless that node was changed while the job was running. In that case, -# base's parent's backing (or filtered, whichever exists) child (i.e., base at -# the beginning of the job) will be the new backing file. +# If a base file is specified then sectors are not copied from that +# base file and its backing chain. This can be used to stream a +# subset of the backing file chain instead of flattening the entire +# image. When streaming completes the image file will have the base +# file as its backing file, unless that node was changed while the job +# was running. In that case, base's parent's backing (or filtered, +# whichever exists) child (i.e., base at the beginning of the job) +# will be the new backing file. # -# On successful completion the image file is updated to drop the backing file -# and the BLOCK_JOB_COMPLETED event is emitted. +# On successful completion the image file is updated to drop the +# backing file and the BLOCK_JOB_COMPLETED event is emitted. # -# In case @device is a filter node, block-stream modifies the first non-filter -# overlay node below it to point to the new backing node instead of modifying -# @device itself. +# In case @device is a filter node, block-stream modifies the first +# non-filter overlay node below it to point to the new backing node +# instead of modifying @device itself. # -# @job-id: identifier for the newly-created block job. If -# omitted, the device name will be used. (Since 2.7) +# @job-id: identifier for the newly-created block job. If omitted, +# the device name will be used. (Since 2.7) # # @device: the device or node name of the top image # -# @base: the common backing file name. -# It cannot be set if @base-node or @bottom is also set. +# @base: the common backing file name. It cannot be set if @base-node +# or @bottom is also set. # -# @base-node: the node name of the backing file. -# It cannot be set if @base or @bottom is also set. (Since 2.8) +# @base-node: the node name of the backing file. It cannot be set if +# @base or @bottom is also set. (Since 2.8) # # @bottom: the last node in the chain that should be streamed into -# top. It cannot be set if @base or @base-node is also set. -# It cannot be filter node. (Since 6.0) +# top. It cannot be set if @base or @base-node is also set. It +# cannot be filter node. (Since 6.0) # -# @backing-file: The backing file string to write into the top -# image. This filename is not validated. +# @backing-file: The backing file string to write into the top image. +# This filename is not validated. # -# If a pathname string is such that it cannot be -# resolved by QEMU, that means that subsequent QMP or -# HMP commands must use node-names for the image in -# question, as filename lookup methods will fail. +# If a pathname string is such that it cannot be resolved by QEMU, +# that means that subsequent QMP or HMP commands must use +# node-names for the image in question, as filename lookup methods +# will fail. # -# If not specified, QEMU will automatically determine -# the backing file string to use, or error out if there -# is no obvious choice. Care should be taken when -# specifying the string, to specify a valid filename or -# protocol. -# (Since 2.1) +# If not specified, QEMU will automatically determine the backing +# file string to use, or error out if there is no obvious choice. +# Care should be taken when specifying the string, to specify a +# valid filename or protocol. (Since 2.1) # # @speed: the maximum speed, in bytes per second # -# @on-error: the action to take on an error (default report). -# 'stop' and 'enospc' can only be used if the block device -# supports io-status (see BlockInfo). Since 1.3. +# @on-error: the action to take on an error (default report). 'stop' +# and 'enospc' can only be used if the block device supports +# io-status (see BlockInfo). (Since 1.3) # # @filter-node-name: the node name that should be assigned to the -# filter driver that the stream job inserts into the graph -# above @device. If this option is not given, a node name is -# autogenerated. (Since: 6.0) +# filter driver that the stream job inserts into the graph above +# @device. If this option is not given, a node name is +# autogenerated. (Since: 6.0) # -# @auto-finalize: When false, this job will wait in a PENDING state after it has -# finished its work, waiting for @block-job-finalize before -# making any block graph changes. -# When true, this job will automatically -# perform its abort or commit actions. -# Defaults to true. (Since 3.1) +# @auto-finalize: When false, this job will wait in a PENDING state +# after it has finished its work, waiting for @block-job-finalize +# before making any block graph changes. When true, this job will +# automatically perform its abort or commit actions. Defaults to +# true. (Since 3.1) # -# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it -# has completely ceased all work, and awaits @block-job-dismiss. -# When true, this job will automatically disappear from the query -# list without user intervention. -# Defaults to true. (Since 3.1) +# @auto-dismiss: When false, this job will wait in a CONCLUDED state +# after it has completely ceased all work, and awaits +# @block-job-dismiss. When true, this job will automatically +# disappear from the query list without user intervention. +# Defaults to true. (Since 3.1) # -# Returns: - Nothing on success. -# - If @device does not exist, DeviceNotFound. +# Returns: +# - Nothing on success. +# - If @device does not exist, DeviceNotFound. # # Since: 1.1 # @@ -2772,7 +2842,6 @@ # "arguments": { "device": "virtio0", # "base": "/tmp/master.qcow2" } } # <- { "return": {} } -# ## { 'command': 'block-stream', 'data': { '*job-id': 'str', 'device': 'str', '*base': 'str', @@ -2791,15 +2860,17 @@ # # Throttling can be disabled by setting the speed to 0. # -# @device: The job identifier. This used to be a device name (hence -# the name of the parameter), but since QEMU 2.7 it can have -# other values. +# @device: The job identifier. This used to be a device name (hence +# the name of the parameter), but since QEMU 2.7 it can have other +# values. # # @speed: the maximum speed, in bytes per second, or 0 for unlimited. -# Defaults to 0. +# Defaults to 0. # -# Returns: - Nothing on success -# - If no background operation is active on this device, DeviceNotActive +# Returns: +# - Nothing on success +# - If no background operation is active on this device, +# DeviceNotActive # # Since: 1.1 ## @@ -2812,35 +2883,39 @@ # # Stop an active background block operation. # -# This command returns immediately after marking the active background block -# operation for cancellation. It is an error to call this command if no -# operation is in progress. +# This command returns immediately after marking the active background +# block operation for cancellation. It is an error to call this +# command if no operation is in progress. # # The operation will cancel as soon as possible and then emit the -# BLOCK_JOB_CANCELLED event. Before that happens the job is still visible when -# enumerated using query-block-jobs. +# BLOCK_JOB_CANCELLED event. Before that happens the job is still +# visible when enumerated using query-block-jobs. # -# Note that if you issue 'block-job-cancel' after 'drive-mirror' has indicated -# (via the event BLOCK_JOB_READY) that the source and destination are -# synchronized, then the event triggered by this command changes to -# BLOCK_JOB_COMPLETED, to indicate that the mirroring has ended and the -# destination now has a point-in-time copy tied to the time of the cancellation. +# Note that if you issue 'block-job-cancel' after 'drive-mirror' has +# indicated (via the event BLOCK_JOB_READY) that the source and +# destination are synchronized, then the event triggered by this +# command changes to BLOCK_JOB_COMPLETED, to indicate that the +# mirroring has ended and the destination now has a point-in-time copy +# tied to the time of the cancellation. # -# For streaming, the image file retains its backing file unless the streaming -# operation happens to complete just as it is being cancelled. A new streaming -# operation can be started at a later time to finish copying all data from the -# backing file. +# For streaming, the image file retains its backing file unless the +# streaming operation happens to complete just as it is being +# cancelled. A new streaming operation can be started at a later time +# to finish copying all data from the backing file. # -# @device: The job identifier. This used to be a device name (hence -# the name of the parameter), but since QEMU 2.7 it can have -# other values. +# @device: The job identifier. This used to be a device name (hence +# the name of the parameter), but since QEMU 2.7 it can have other +# values. # -# @force: If true, and the job has already emitted the event BLOCK_JOB_READY, -# abandon the job immediately (even if it is paused) instead of waiting -# for the destination to complete its final synchronization (since 1.3) +# @force: If true, and the job has already emitted the event +# BLOCK_JOB_READY, abandon the job immediately (even if it is +# paused) instead of waiting for the destination to complete its +# final synchronization (since 1.3) # -# Returns: - Nothing on success -# - If no background operation is active on this device, DeviceNotActive +# Returns: +# - Nothing on success +# - If no background operation is active on this device, +# DeviceNotActive # # Since: 1.1 ## @@ -2852,20 +2927,22 @@ # # Pause an active background block operation. # -# This command returns immediately after marking the active background block -# operation for pausing. It is an error to call this command if no -# operation is in progress or if the job is already paused. +# This command returns immediately after marking the active background +# block operation for pausing. It is an error to call this command if +# no operation is in progress or if the job is already paused. # -# The operation will pause as soon as possible. No event is emitted when -# the operation is actually paused. Cancelling a paused job automatically -# resumes it. +# The operation will pause as soon as possible. No event is emitted +# when the operation is actually paused. Cancelling a paused job +# automatically resumes it. # -# @device: The job identifier. This used to be a device name (hence -# the name of the parameter), but since QEMU 2.7 it can have -# other values. +# @device: The job identifier. This used to be a device name (hence +# the name of the parameter), but since QEMU 2.7 it can have other +# values. # -# Returns: - Nothing on success -# - If no background operation is active on this device, DeviceNotActive +# Returns: +# - Nothing on success +# - If no background operation is active on this device, +# DeviceNotActive # # Since: 1.3 ## @@ -2877,18 +2954,20 @@ # # Resume an active background block operation. # -# This command returns immediately after resuming a paused background block -# operation. It is an error to call this command if no operation is in -# progress or if the job is not paused. +# This command returns immediately after resuming a paused background +# block operation. It is an error to call this command if no +# operation is in progress or if the job is not paused. # # This command also clears the error status of the job. # -# @device: The job identifier. This used to be a device name (hence -# the name of the parameter), but since QEMU 2.7 it can have -# other values. +# @device: The job identifier. This used to be a device name (hence +# the name of the parameter), but since QEMU 2.7 it can have other +# values. # -# Returns: - Nothing on success -# - If no background operation is active on this device, DeviceNotActive +# Returns: +# - Nothing on success +# - If no background operation is active on this device, +# DeviceNotActive # # Since: 1.3 ## @@ -2898,26 +2977,29 @@ ## # @block-job-complete: # -# Manually trigger completion of an active background block operation. This -# is supported for drive mirroring, where it also switches the device to -# write to the target path only. The ability to complete is signaled with -# a BLOCK_JOB_READY event. +# Manually trigger completion of an active background block operation. +# This is supported for drive mirroring, where it also switches the +# device to write to the target path only. The ability to complete is +# signaled with a BLOCK_JOB_READY event. # -# This command completes an active background block operation synchronously. -# The ordering of this command's return with the BLOCK_JOB_COMPLETED event -# is not defined. Note that if an I/O error occurs during the processing of -# this command: 1) the command itself will fail; 2) the error will be processed -# according to the rerror/werror arguments that were specified when starting -# the operation. +# This command completes an active background block operation +# synchronously. The ordering of this command's return with the +# BLOCK_JOB_COMPLETED event is not defined. Note that if an I/O error +# occurs during the processing of this command: 1) the command itself +# will fail; 2) the error will be processed according to the +# rerror/werror arguments that were specified when starting the +# operation. # # A cancelled or paused job cannot be completed. # -# @device: The job identifier. This used to be a device name (hence -# the name of the parameter), but since QEMU 2.7 it can have -# other values. +# @device: The job identifier. This used to be a device name (hence +# the name of the parameter), but since QEMU 2.7 it can have other +# values. # -# Returns: - Nothing on success -# - If no background operation is active on this device, DeviceNotActive +# Returns: +# - Nothing on success +# - If no background operation is active on this device, +# DeviceNotActive # # Since: 1.3 ## @@ -2927,14 +3009,15 @@ ## # @block-job-dismiss: # -# For jobs that have already concluded, remove them from the block-job-query -# list. This command only needs to be run for jobs which were started with -# QEMU 2.12+ job lifetime management semantics. +# For jobs that have already concluded, remove them from the +# block-job-query list. This command only needs to be run for jobs +# which were started with QEMU 2.12+ job lifetime management +# semantics. # -# This command will refuse to operate on any job that has not yet reached -# its terminal state, JOB_STATUS_CONCLUDED. For jobs that make use of the -# BLOCK_JOB_READY event, block-job-cancel or block-job-complete will still need -# to be used as appropriate. +# This command will refuse to operate on any job that has not yet +# reached its terminal state, JOB_STATUS_CONCLUDED. For jobs that make +# use of the BLOCK_JOB_READY event, block-job-cancel or +# block-job-complete will still need to be used as appropriate. # # @id: The job identifier. # @@ -2949,11 +3032,11 @@ # @block-job-finalize: # # Once a job that has manual=true reaches the pending state, it can be -# instructed to finalize any graph changes and do any necessary cleanup -# via this command. -# For jobs in a transaction, instructing one job to finalize will force -# ALL jobs in the transaction to finalize, so it is only necessary to instruct -# a single member job to finalize. +# instructed to finalize any graph changes and do any necessary +# cleanup via this command. For jobs in a transaction, instructing +# one job to finalize will force ALL jobs in the transaction to +# finalize, so it is only necessary to instruct a single member job to +# finalize. # # @id: The job identifier. # @@ -2970,6 +3053,7 @@ # Determines how to handle discard requests. # # @ignore: Ignore the request +# # @unmap: Forward as an unmap request # # Since: 2.9 @@ -2981,12 +3065,16 @@ # @BlockdevDetectZeroesOptions: # # Describes the operation mode for the automatic conversion of plain -# zero writes by the OS to driver specific optimized zero write commands. +# zero writes by the OS to driver specific optimized zero write +# commands. # # @off: Disabled (default) +# # @on: Enabled -# @unmap: Enabled and even try to unmap blocks if possible. This requires -# also that @BlockdevDiscardOptions is set to unmap for this device. +# +# @unmap: Enabled and even try to unmap blocks if possible. This +# requires also that @BlockdevDiscardOptions is set to unmap for +# this device. # # Since: 2.1 ## @@ -2999,7 +3087,9 @@ # Selects the AIO backend to handle I/O requests # # @threads: Use qemu's thread pool +# # @native: Use native AIO backend (only Linux and Windows) +# # @io_uring: Use linux io_uring (since 5.0) # # Since: 2.9 @@ -3014,9 +3104,9 @@ # Includes cache-related options for block devices # # @direct: enables use of O_DIRECT (bypass the host page cache; -# default: false) -# @no-flush: ignore any flush requests for the device (default: -# false) +# default: false) +# +# @no-flush: ignore any flush requests for the device (default: false) # # Since: 2.9 ## @@ -3030,12 +3120,19 @@ # Drivers that are supported in block device operations. # # @throttle: Since 2.11 +# # @nvme: Since 2.12 +# # @copy-on-read: Since 3.0 +# # @blklogwrites: Since 3.0 +# # @blkreplay: Since 4.2 +# # @compress: Since 5.0 +# # @copy-before-write: Since 6.2 +# # @snapshot-access: Since 7.0 # # Since: 2.9 @@ -3066,36 +3163,43 @@ # Driver specific block device options for the file backend. # # @filename: path to the image file -# @pr-manager: the id for the object that will handle persistent reservations -# for this device (default: none, forward the commands via SG_IO; -# since 2.11) +# +# @pr-manager: the id for the object that will handle persistent +# reservations for this device (default: none, forward the +# commands via SG_IO; since 2.11) +# # @aio: AIO backend (default: threads) (since: 2.8) -# @aio-max-batch: maximum number of requests to batch together into a single -# submission in the AIO backend. The smallest value between -# this and the aio-max-batch value of the IOThread object is -# chosen. -# 0 means that the AIO backend will handle it automatically. -# (default: 0, since 6.2) -# @locking: whether to enable file locking. If set to 'auto', only enable -# when Open File Descriptor (OFD) locking API is available -# (default: auto, since 2.10) -# @drop-cache: invalidate page cache during live migration. This prevents -# stale data on the migration destination with cache.direct=off. -# Currently only supported on Linux hosts. -# (default: on, since: 4.0) -# @x-check-cache-dropped: whether to check that page cache was dropped on live -# migration. May cause noticeable delays if the image -# file is large, do not use in production. -# (default: off) (since: 3.0) +# +# @aio-max-batch: maximum number of requests to batch together into a +# single submission in the AIO backend. The smallest value +# between this and the aio-max-batch value of the IOThread object +# is chosen. 0 means that the AIO backend will handle it +# automatically. (default: 0, since 6.2) +# +# @locking: whether to enable file locking. If set to 'auto', only +# enable when Open File Descriptor (OFD) locking API is available +# (default: auto, since 2.10) +# +# @drop-cache: invalidate page cache during live migration. This +# prevents stale data on the migration destination with +# cache.direct=off. Currently only supported on Linux hosts. +# (default: on, since: 4.0) +# +# @x-check-cache-dropped: whether to check that page cache was dropped +# on live migration. May cause noticeable delays if the image +# file is large, do not use in production. (default: off) +# (since: 3.0) # # Features: -# @dynamic-auto-read-only: If present, enabled auto-read-only means that the -# driver will open the image read-only at first, -# dynamically reopen the image file read-write when -# the first writer is attached to the node and reopen -# read-only when the last writer is detached. This -# allows giving QEMU write permissions only on demand -# when an operation actually needs write access. +# +# @dynamic-auto-read-only: If present, enabled auto-read-only means +# that the driver will open the image read-only at first, +# dynamically reopen the image file read-write when the first +# writer is attached to the node and reopen read-only when the +# last writer is detached. This allows giving QEMU write +# permissions only on demand when an operation actually needs +# write access. +# # @unstable: Member x-check-cache-dropped is meant for debugging. # # Since: 2.9 @@ -3119,11 +3223,14 @@ # Driver specific block device options for the null backend. # # @size: size of the device in bytes. +# # @latency-ns: emulated latency (in nanoseconds) in processing -# requests. Default to zero which completes requests immediately. -# (Since 2.4) -# @read-zeroes: if true, reads from the device produce zeroes; if false, the -# buffer is left unchanged. (default: false; since: 4.1) +# requests. Default to zero which completes requests immediately. +# (Since 2.4) +# +# @read-zeroes: if true, reads from the device produce zeroes; if +# false, the buffer is left unchanged. +# (default: false; since: 4.1) # # Since: 2.9 ## @@ -3135,8 +3242,9 @@ # # Driver specific block device options for the NVMe backend. # -# @device: PCI controller address of the NVMe device in -# format hhhh:bb:ss.f (host:bus:slot.function) +# @device: PCI controller address of the NVMe device in format +# hhhh:bb:ss.f (host:bus:slot.function) +# # @namespace: namespace number of the device, starting from 1. # # Note that the PCI @device must have been unbound from any host @@ -3153,13 +3261,17 @@ # Driver specific block device options for the vvfat protocol. # # @dir: directory to be exported as FAT image +# # @fat-type: FAT type: 12, 16 or 32 -# @floppy: whether to export a floppy image (true) or -# partitioned hard disk (false; default) -# @label: set the volume label, limited to 11 bytes. FAT16 and -# FAT32 traditionally have some restrictions on labels, which are -# ignored by most operating systems. Defaults to "QEMU VVFAT". -# (since 2.4) +# +# @floppy: whether to export a floppy image (true) or partitioned hard +# disk (false; default) +# +# @label: set the volume label, limited to 11 bytes. FAT16 and FAT32 +# traditionally have some restrictions on labels, which are +# ignored by most operating systems. Defaults to "QEMU VVFAT". +# (since 2.4) +# # @rw: whether to allow write operations (default: false) # # Since: 2.9 @@ -3171,8 +3283,8 @@ ## # @BlockdevOptionsGenericFormat: # -# Driver specific block device options for image format that have no option -# besides their data source. +# Driver specific block device options for image format that have no +# option besides their data source. # # @file: reference to or definition of the data source block device # @@ -3186,9 +3298,9 @@ # # Driver specific block device options for LUKS. # -# @key-secret: the ID of a QCryptoSecret object providing -# the decryption key (since 2.6). Mandatory except when -# doing a metadata-only probe of the image. +# @key-secret: the ID of a QCryptoSecret object providing the +# decryption key (since 2.6). Mandatory except when doing a +# metadata-only probe of the image. # # Since: 2.9 ## @@ -3199,12 +3311,12 @@ ## # @BlockdevOptionsGenericCOWFormat: # -# Driver specific block device options for image format that have no option -# besides their data source and an optional backing file. +# Driver specific block device options for image format that have no +# option besides their data source and an optional backing file. # # @backing: reference to or definition of the backing file block -# device, null disables the backing file entirely. -# Defaults to the backing file stored the image file. +# device, null disables the backing file entirely. Defaults to +# the backing file stored the image file. # # Since: 2.9 ## @@ -3219,11 +3331,11 @@ # # @none: Do not perform any checks # -# @constant: Perform only checks which can be done in constant time and -# without reading anything from disk +# @constant: Perform only checks which can be done in constant time +# and without reading anything from disk # -# @cached: Perform only checks which can be done without reading anything -# from disk +# @cached: Perform only checks which can be done without reading +# anything from disk # # @all: Perform all available overlap checks # @@ -3235,12 +3347,13 @@ ## # @Qcow2OverlapCheckFlags: # -# Structure of flags for each metadata structure. Setting a field to 'true' -# makes qemu guard that structure against unintended overwriting. The default -# value is chosen according to the template given. +# Structure of flags for each metadata structure. Setting a field to +# 'true' makes qemu guard that structure against unintended +# overwriting. The default value is chosen according to the template +# given. # -# @template: Specifies a template mode which can be adjusted using the other -# flags, defaults to 'cached' +# @template: Specifies a template mode which can be adjusted using the +# other flags, defaults to 'cached' # # @bitmap-directory: since 3.0 # @@ -3261,11 +3374,11 @@ ## # @Qcow2OverlapChecks: # -# Specifies which metadata structures should be guarded against unintended -# overwriting. +# Specifies which metadata structures should be guarded against +# unintended overwriting. # -# @flags: set of flags for separate specification of each metadata structure -# type +# @flags: set of flags for separate specification of each metadata +# structure type # # @mode: named mode which chooses a specific set of flags # @@ -3300,9 +3413,8 @@ # # Driver specific block device options for qcow. # -# @encrypt: Image decryption options. Mandatory for -# encrypted images, except when doing a metadata-only -# probe of the image. +# @encrypt: Image decryption options. Mandatory for encrypted images, +# except when doing a metadata-only probe of the image. # # Since: 2.10 ## @@ -3334,11 +3446,11 @@ ## # @BlockdevOptionsPreallocate: # -# Filter driver intended to be inserted between format and protocol node -# and do preallocation in protocol node on write. +# Filter driver intended to be inserted between format and protocol +# node and do preallocation in protocol node on write. # # @prealloc-align: on preallocation, align file length to this number, -# default 1048576 (1M) +# default 1048576 (1M) # # @prealloc-size: how much to preallocate, default 134217728 (128M) # @@ -3353,51 +3465,48 @@ # # Driver specific block device options for qcow2. # -# @lazy-refcounts: whether to enable the lazy refcounts -# feature (default is taken from the image file) +# @lazy-refcounts: whether to enable the lazy refcounts feature +# (default is taken from the image file) # -# @pass-discard-request: whether discard requests to the qcow2 -# device should be forwarded to the data source +# @pass-discard-request: whether discard requests to the qcow2 device +# should be forwarded to the data source # # @pass-discard-snapshot: whether discard requests for the data source -# should be issued when a snapshot operation (e.g. -# deleting a snapshot) frees clusters in the qcow2 file +# should be issued when a snapshot operation (e.g. deleting a +# snapshot) frees clusters in the qcow2 file # # @pass-discard-other: whether discard requests for the data source -# should be issued on other occasions where a cluster -# gets freed +# should be issued on other occasions where a cluster gets freed # -# @overlap-check: which overlap checks to perform for writes -# to the image, defaults to 'cached' (since 2.2) +# @overlap-check: which overlap checks to perform for writes to the +# image, defaults to 'cached' (since 2.2) # -# @cache-size: the maximum total size of the L2 table and -# refcount block caches in bytes (since 2.2) +# @cache-size: the maximum total size of the L2 table and refcount +# block caches in bytes (since 2.2) # -# @l2-cache-size: the maximum size of the L2 table cache in -# bytes (since 2.2) +# @l2-cache-size: the maximum size of the L2 table cache in bytes +# (since 2.2) # # @l2-cache-entry-size: the size of each entry in the L2 cache in -# bytes. It must be a power of two between 512 -# and the cluster size. The default value is -# the cluster size (since 2.12) +# bytes. It must be a power of two between 512 and the cluster +# size. The default value is the cluster size (since 2.12) # # @refcount-cache-size: the maximum size of the refcount block cache -# in bytes (since 2.2) +# in bytes (since 2.2) # # @cache-clean-interval: clean unused entries in the L2 and refcount -# caches. The interval is in seconds. The default value -# is 600 on supporting platforms, and 0 on other -# platforms. 0 disables this feature. (since 2.5) +# caches. The interval is in seconds. The default value is 600 +# on supporting platforms, and 0 on other platforms. 0 disables +# this feature. (since 2.5) # -# @encrypt: Image decryption options. Mandatory for -# encrypted images, except when doing a metadata-only -# probe of the image. (since 2.10) +# @encrypt: Image decryption options. Mandatory for encrypted images, +# except when doing a metadata-only probe of the image. (since +# 2.10) # # @data-file: reference to or definition of the external data file. -# This may only be specified for images that require an -# external data file. If it is not specified for such -# an image, the data file name is loaded from the image -# file. (since 4.0) +# This may only be specified for images that require an external +# data file. If it is not specified for such an image, the data +# file name is loaded from the image file. (since 4.0) # # Since: 2.9 ## @@ -3420,7 +3529,9 @@ # @SshHostKeyCheckMode: # # @none: Don't check the host key at all +# # @hash: Compare the host key with a given hash +# # @known_hosts: Check the host key against the known_hosts file # # Since: 2.12 @@ -3432,7 +3543,9 @@ # @SshHostKeyCheckHashType: # # @md5: The given hash is an md5 hash +# # @sha1: The given hash is an sha1 hash +# # @sha256: The given hash is an sha256 hash # # Since: 2.12 @@ -3444,6 +3557,7 @@ # @SshHostKeyHash: # # @type: The hash algorithm used for the hash +# # @hash: The expected hash value # # Since: 2.12 @@ -3472,7 +3586,7 @@ # @user: user as which to connect, defaults to current local user name # # @host-key-check: Defines how and what to check the host key against -# (default: known_hosts) +# (default: known_hosts) # # Since: 2.9 ## @@ -3488,13 +3602,14 @@ # Trigger events supported by blkdebug. # # @l1_shrink_write_table: write zeros to the l1 table to shrink image. -# (since 2.11) +# (since 2.11) # -# @l1_shrink_free_l2_clusters: discard the l2 tables. (since 2.11) +# @l1_shrink_free_l2_clusters: discard the l2 tables. (since 2.11) # # @cor_write: a write due to copy-on-read (since 2.11) # -# @cluster_alloc_space: an allocation of file space for a cluster (since 4.1) +# @cluster_alloc_space: an allocation of file space for a cluster +# (since 4.1) # # @none: triggers once at creation of the blkdebug node (since 4.1) # @@ -3548,23 +3663,20 @@ # # @event: trigger event # -# @state: the state identifier blkdebug needs to be in to -# actually trigger the event; defaults to "any" +# @state: the state identifier blkdebug needs to be in to actually +# trigger the event; defaults to "any" # -# @iotype: the type of I/O operations on which this error should -# be injected; defaults to "all read, write, -# write-zeroes, discard, and flush operations" -# (since: 4.1) +# @iotype: the type of I/O operations on which this error should be +# injected; defaults to "all read, write, write-zeroes, discard, +# and flush operations" (since: 4.1) # -# @errno: error identifier (errno) to be returned; defaults to -# EIO +# @errno: error identifier (errno) to be returned; defaults to EIO # -# @sector: specifies the sector index which has to be affected -# in order to actually trigger the event; defaults to "any -# sector" +# @sector: specifies the sector index which has to be affected in +# order to actually trigger the event; defaults to "any sector" # -# @once: disables further events after this one has been -# triggered; defaults to false +# @once: disables further events after this one has been triggered; +# defaults to false # # @immediately: fail immediately; defaults to false # @@ -3587,10 +3699,10 @@ # @event: trigger event # # @state: the current state identifier blkdebug needs to be in; -# defaults to "any" +# defaults to "any" # # @new_state: the state identifier blkdebug is supposed to assume if -# this event is triggered +# this event is triggered # # Since: 2.9 ## @@ -3608,47 +3720,45 @@ # # @config: filename of the configuration file # -# @align: required alignment for requests in bytes, must be -# positive power of 2, or 0 for default +# @align: required alignment for requests in bytes, must be positive +# power of 2, or 0 for default # # @max-transfer: maximum size for I/O transfers in bytes, must be -# positive multiple of @align and of the underlying -# file's request alignment (but need not be a power of -# 2), or 0 for default (since 2.10) +# positive multiple of @align and of the underlying file's request +# alignment (but need not be a power of 2), or 0 for default +# (since 2.10) # -# @opt-write-zero: preferred alignment for write zero requests in bytes, -# must be positive multiple of @align and of the -# underlying file's request alignment (but need not be a -# power of 2), or 0 for default (since 2.10) +# @opt-write-zero: preferred alignment for write zero requests in +# bytes, must be positive multiple of @align and of the underlying +# file's request alignment (but need not be a power of 2), or 0 +# for default (since 2.10) # -# @max-write-zero: maximum size for write zero requests in bytes, must be -# positive multiple of @align, of @opt-write-zero, and of -# the underlying file's request alignment (but need not -# be a power of 2), or 0 for default (since 2.10) +# @max-write-zero: maximum size for write zero requests in bytes, must +# be positive multiple of @align, of @opt-write-zero, and of the +# underlying file's request alignment (but need not be a power of +# 2), or 0 for default (since 2.10) # -# @opt-discard: preferred alignment for discard requests in bytes, must -# be positive multiple of @align and of the underlying -# file's request alignment (but need not be a power of -# 2), or 0 for default (since 2.10) +# @opt-discard: preferred alignment for discard requests in bytes, +# must be positive multiple of @align and of the underlying file's +# request alignment (but need not be a power of 2), or 0 for +# default (since 2.10) # # @max-discard: maximum size for discard requests in bytes, must be -# positive multiple of @align, of @opt-discard, and of -# the underlying file's request alignment (but need not -# be a power of 2), or 0 for default (since 2.10) +# positive multiple of @align, of @opt-discard, and of the +# underlying file's request alignment (but need not be a power of +# 2), or 0 for default (since 2.10) # # @inject-error: array of error injection descriptions # # @set-state: array of state-change descriptions # # @take-child-perms: Permissions to take on @image in addition to what -# is necessary anyway (which depends on how the -# blkdebug node is used). Defaults to none. -# (since 5.0) +# is necessary anyway (which depends on how the blkdebug node is +# used). Defaults to none. (since 5.0) # # @unshare-child-perms: Permissions not to share on @image in addition -# to what cannot be shared anyway (which depends -# on how the blkdebug node is used). Defaults -# to none. (since 5.0) +# to what cannot be shared anyway (which depends on how the +# blkdebug node is used). Defaults to none. (since 5.0) # # Since: 2.9 ## @@ -3672,13 +3782,14 @@ # # @log: block device used to log writes to @file # -# @log-sector-size: sector size used in logging writes to @file, determines -# granularity of offsets and sizes of writes (default: 512) +# @log-sector-size: sector size used in logging writes to @file, +# determines granularity of offsets and sizes of writes +# (default: 512) # # @log-append: append to an existing log (default: false) # -# @log-super-update-interval: interval of write requests after which the log -# super block is updated to disk (default: 4096) +# @log-super-update-interval: interval of write requests after which +# the log super block is updated to disk (default: 4096) # # Since: 3.0 ## @@ -3734,18 +3845,18 @@ # # Driver specific block device options for Quorum # -# @blkverify: true if the driver must print content mismatch -# set to false by default +# @blkverify: true if the driver must print content mismatch set to +# false by default # # @children: the children block devices to use # # @vote-threshold: the vote limit under which a read will fail # # @rewrite-corrupted: rewrite corrupted data when quorum is reached -# (Since 2.1) +# (Since 2.1) # # @read-pattern: choose read pattern and set to quorum by default -# (Since 2.2) +# (Since 2.2) # # Since: 2.9 ## @@ -3767,8 +3878,7 @@ # # @server: gluster servers description # -# @debug: libgfapi log level (default '4' which is Error) -# (Since 2.8) +# @debug: libgfapi log level (default '4' which is Error) (Since 2.8) # # @logfile: libgfapi log file (default /dev/stderr) (Since 2.8) # @@ -3799,7 +3909,8 @@ # # Driver specific block device options for the nvme-io_uring backend. # -# @path: path to the NVMe namespace's character device (e.g. /dev/ng0n1). +# @path: path to the NVMe namespace's character device (e.g. +# /dev/ng0n1). # # Since: 7.2 ## @@ -3810,10 +3921,11 @@ ## # @BlockdevOptionsVirtioBlkVfioPci: # -# Driver specific block device options for the virtio-blk-vfio-pci backend. +# Driver specific block device options for the virtio-blk-vfio-pci +# backend. # # @path: path to the PCI device's sysfs directory (e.g. -# /sys/bus/pci/devices/0000:00:01.0). +# /sys/bus/pci/devices/0000:00:01.0). # # Since: 7.2 ## @@ -3824,7 +3936,8 @@ ## # @BlockdevOptionsVirtioBlkVhostUser: # -# Driver specific block device options for the virtio-blk-vhost-user backend. +# Driver specific block device options for the virtio-blk-vhost-user +# backend. # # @path: path to the vhost-user UNIX domain socket. # @@ -3837,7 +3950,8 @@ ## # @BlockdevOptionsVirtioBlkVhostVdpa: # -# Driver specific block device options for the virtio-blk-vhost-vdpa backend. +# Driver specific block device options for the virtio-blk-vhost-vdpa +# backend. # # @path: path to the vhost-vdpa character device. # @@ -3877,24 +3991,23 @@ # # @target: The target iqn name # -# @lun: LUN to connect to. Defaults to 0. +# @lun: LUN to connect to. Defaults to 0. # -# @user: User name to log in with. If omitted, no CHAP -# authentication is performed. +# @user: User name to log in with. If omitted, no CHAP authentication +# is performed. # -# @password-secret: The ID of a QCryptoSecret object providing -# the password for the login. This option is required if -# @user is specified. +# @password-secret: The ID of a QCryptoSecret object providing the +# password for the login. This option is required if @user is +# specified. # -# @initiator-name: The iqn name we want to identify to the target -# as. If this option is not specified, an initiator name is -# generated automatically. +# @initiator-name: The iqn name we want to identify to the target as. +# If this option is not specified, an initiator name is generated +# automatically. # -# @header-digest: The desired header digest. Defaults to -# none-crc32c. +# @header-digest: The desired header digest. Defaults to none-crc32c. # -# @timeout: Timeout in seconds after which a request will -# timeout. 0 means no timeout and is the default. +# @timeout: Timeout in seconds after which a request will timeout. 0 +# means no timeout and is the default. # # Driver specific block device options for iscsi # @@ -3932,8 +4045,8 @@ ## # @RbdEncryptionOptionsLUKSBase: # -# @key-secret: ID of a QCryptoSecret object providing a passphrase -# for unlocking the encryption +# @key-secret: ID of a QCryptoSecret object providing a passphrase for +# unlocking the encryption # # Since: 6.1 ## @@ -4001,11 +4114,10 @@ # # @format: Encryption format. # -# @parent: Parent image encryption options (for cloned images). -# Can be left unspecified if this cloned image is encrypted -# using the same format and secret as its parent image (i.e. -# not explicitly formatted) or if its parent image is not -# encrypted. (Since 8.0) +# @parent: Parent image encryption options (for cloned images). Can +# be left unspecified if this cloned image is encrypted using the +# same format and secret as its parent image (i.e. not explicitly +# formatted) or if its parent image is not encrypted. (Since 8.0) # # Since: 6.1 ## @@ -4033,31 +4145,29 @@ # # @pool: Ceph pool name. # -# @namespace: Rados namespace name in the Ceph pool. (Since 5.0) +# @namespace: Rados namespace name in the Ceph pool. (Since 5.0) # # @image: Image name in the Ceph pool. # -# @conf: path to Ceph configuration file. Values -# in the configuration file will be overridden by -# options specified via QAPI. +# @conf: path to Ceph configuration file. Values in the configuration +# file will be overridden by options specified via QAPI. # # @snapshot: Ceph snapshot name. # -# @encrypt: Image encryption options. (Since 6.1) +# @encrypt: Image encryption options. (Since 6.1) # # @user: Ceph id name. # -# @auth-client-required: Acceptable authentication modes. -# This maps to Ceph configuration option -# "auth_client_required". (Since 3.0) +# @auth-client-required: Acceptable authentication modes. This maps +# to Ceph configuration option "auth_client_required". (Since +# 3.0) # -# @key-secret: ID of a QCryptoSecret object providing a key -# for cephx authentication. -# This maps to Ceph configuration option -# "key". (Since 3.0) +# @key-secret: ID of a QCryptoSecret object providing a key for cephx +# authentication. This maps to Ceph configuration option "key". +# (Since 3.0) # -# @server: Monitor host address and port. This maps -# to the "mon_host" Ceph option. +# @server: Monitor host address and port. This maps to the "mon_host" +# Ceph option. # # Since: 2.9 ## @@ -4078,9 +4188,11 @@ # # An enumeration of replication modes. # -# @primary: Primary mode, the vm's state will be sent to secondary QEMU. +# @primary: Primary mode, the vm's state will be sent to secondary +# QEMU. # -# @secondary: Secondary mode, receive the vm's state from primary QEMU. +# @secondary: Secondary mode, receive the vm's state from primary +# QEMU. # # Since: 2.9 ## @@ -4094,9 +4206,9 @@ # # @mode: the replication mode # -# @top-id: In secondary mode, node name or device ID of the root -# node who owns the replication node chain. Must not be given in -# primary mode. +# @top-id: In secondary mode, node name or device ID of the root node +# who owns the replication node chain. Must not be given in +# primary mode. # # Since: 2.9 ## @@ -4142,25 +4254,22 @@ # # @path: path of the image on the host # -# @user: UID value to use when talking to the -# server (defaults to 65534 on Windows and getuid() -# on unix) +# @user: UID value to use when talking to the server (defaults to +# 65534 on Windows and getuid() on unix) # -# @group: GID value to use when talking to the -# server (defaults to 65534 on Windows and getgid() -# in unix) +# @group: GID value to use when talking to the server (defaults to +# 65534 on Windows and getgid() in unix) # -# @tcp-syn-count: number of SYNs during the session -# establishment (defaults to libnfs default) +# @tcp-syn-count: number of SYNs during the session establishment +# (defaults to libnfs default) # -# @readahead-size: set the readahead size in bytes (defaults -# to libnfs default) +# @readahead-size: set the readahead size in bytes (defaults to libnfs +# default) # -# @page-cache-size: set the pagecache size in bytes (defaults -# to libnfs default) +# @page-cache-size: set the pagecache size in bytes (defaults to +# libnfs default) # -# @debug: set the NFS debug level (max 2) (defaults -# to libnfs default) +# @debug: set the NFS debug level (max 2) (defaults to libnfs default) # # Since: 2.9 ## @@ -4177,25 +4286,26 @@ ## # @BlockdevOptionsCurlBase: # -# Driver specific block device options shared by all protocols supported by the -# curl backend. +# Driver specific block device options shared by all protocols +# supported by the curl backend. # # @url: URL of the image file # -# @readahead: Size of the read-ahead cache; must be a multiple of -# 512 (defaults to 256 kB) +# @readahead: Size of the read-ahead cache; must be a multiple of 512 +# (defaults to 256 kB) # # @timeout: Timeout for connections, in seconds (defaults to 5) # # @username: Username for authentication (defaults to none) # # @password-secret: ID of a QCryptoSecret object providing a password -# for authentication (defaults to no password) +# for authentication (defaults to no password) # -# @proxy-username: Username for proxy authentication (defaults to none) +# @proxy-username: Username for proxy authentication (defaults to +# none) # -# @proxy-password-secret: ID of a QCryptoSecret object providing a password -# for proxy authentication (defaults to no password) +# @proxy-password-secret: ID of a QCryptoSecret object providing a +# password for proxy authentication (defaults to no password) # # Since: 2.9 ## @@ -4211,15 +4321,15 @@ ## # @BlockdevOptionsCurlHttp: # -# Driver specific block device options for HTTP connections over the curl -# backend. URLs must start with "http://". +# Driver specific block device options for HTTP connections over the +# curl backend. URLs must start with "http://". # -# @cookie: List of cookies to set; format is -# "name1=content1; name2=content2;" as explained by -# CURLOPT_COOKIE(3). Defaults to no cookies. +# @cookie: List of cookies to set; format is "name1=content1; +# name2=content2;" as explained by CURLOPT_COOKIE(3). Defaults to +# no cookies. # -# @cookie-secret: ID of a QCryptoSecret object providing the cookie data in a -# secure way. See @cookie for the format. (since 2.10) +# @cookie-secret: ID of a QCryptoSecret object providing the cookie +# data in a secure way. See @cookie for the format. (since 2.10) # # Since: 2.9 ## @@ -4231,18 +4341,18 @@ ## # @BlockdevOptionsCurlHttps: # -# Driver specific block device options for HTTPS connections over the curl -# backend. URLs must start with "https://". +# Driver specific block device options for HTTPS connections over the +# curl backend. URLs must start with "https://". # -# @cookie: List of cookies to set; format is -# "name1=content1; name2=content2;" as explained by -# CURLOPT_COOKIE(3). Defaults to no cookies. +# @cookie: List of cookies to set; format is "name1=content1; +# name2=content2;" as explained by CURLOPT_COOKIE(3). Defaults to +# no cookies. # -# @sslverify: Whether to verify the SSL certificate's validity (defaults to -# true) +# @sslverify: Whether to verify the SSL certificate's validity +# (defaults to true) # -# @cookie-secret: ID of a QCryptoSecret object providing the cookie data in a -# secure way. See @cookie for the format. (since 2.10) +# @cookie-secret: ID of a QCryptoSecret object providing the cookie +# data in a secure way. See @cookie for the format. (since 2.10) # # Since: 2.9 ## @@ -4255,8 +4365,8 @@ ## # @BlockdevOptionsCurlFtp: # -# Driver specific block device options for FTP connections over the curl -# backend. URLs must start with "ftp://". +# Driver specific block device options for FTP connections over the +# curl backend. URLs must start with "ftp://". # # Since: 2.9 ## @@ -4267,11 +4377,11 @@ ## # @BlockdevOptionsCurlFtps: # -# Driver specific block device options for FTPS connections over the curl -# backend. URLs must start with "ftps://". +# Driver specific block device options for FTPS connections over the +# curl backend. URLs must start with "ftps://". # -# @sslverify: Whether to verify the SSL certificate's validity (defaults to -# true) +# @sslverify: Whether to verify the SSL certificate's validity +# (defaults to true) # # Since: 2.9 ## @@ -4290,30 +4400,31 @@ # # @tls-creds: TLS credentials ID # -# @tls-hostname: TLS hostname override for certificate validation (Since 7.0) +# @tls-hostname: TLS hostname override for certificate validation +# (Since 7.0) # -# @x-dirty-bitmap: A metadata context name such as "qemu:dirty-bitmap:NAME" -# or "qemu:allocation-depth" to query in place of the -# traditional "base:allocation" block status (see -# NBD_OPT_LIST_META_CONTEXT in the NBD protocol; and -# yes, naming this option x-context would have made -# more sense) (since 3.0) +# @x-dirty-bitmap: A metadata context name such as +# "qemu:dirty-bitmap:NAME" or "qemu:allocation-depth" to query in +# place of the traditional "base:allocation" block status (see +# NBD_OPT_LIST_META_CONTEXT in the NBD protocol; and yes, naming +# this option x-context would have made more sense) (since 3.0) # -# @reconnect-delay: On an unexpected disconnect, the nbd client tries to -# connect again until succeeding or encountering a serious -# error. During the first @reconnect-delay seconds, all -# requests are paused and will be rerun on a successful -# reconnect. After that time, any delayed requests and all -# future requests before a successful reconnect will -# immediately fail. Default 0 (Since 4.2) +# @reconnect-delay: On an unexpected disconnect, the nbd client tries +# to connect again until succeeding or encountering a serious +# error. During the first @reconnect-delay seconds, all requests +# are paused and will be rerun on a successful reconnect. After +# that time, any delayed requests and all future requests before a +# successful reconnect will immediately fail. Default 0 (Since +# 4.2) # -# @open-timeout: In seconds. If zero, the nbd driver tries the connection -# only once, and fails to open if the connection fails. -# If non-zero, the nbd driver will repeat connection attempts -# until successful or until @open-timeout seconds have elapsed. -# Default 0 (Since 7.0) +# @open-timeout: In seconds. If zero, the nbd driver tries the +# connection only once, and fails to open if the connection fails. +# If non-zero, the nbd driver will repeat connection attempts +# until successful or until @open-timeout seconds have elapsed. +# Default 0 (Since 7.0) # # Features: +# # @unstable: Member @x-dirty-bitmap is experimental. # # Since: 2.9 @@ -4333,6 +4444,7 @@ # Driver specific block device options for the raw driver. # # @offset: position where the block device starts +# # @size: the assumed size of the device # # Since: 2.9 @@ -4346,8 +4458,9 @@ # # Driver specific block device options for the throttle driver # -# @throttle-group: the name of the throttle-group object to use. It -# must already exist. +# @throttle-group: the name of the throttle-group object to use. It +# must already exist. +# # @file: reference to or definition of the data source block device # # Since: 2.11 @@ -4362,11 +4475,11 @@ # # Driver specific block device options for the copy-on-read driver. # -# @bottom: The name of a non-filter node (allocation-bearing layer) that -# limits the COR operations in the backing chain (inclusive), so -# that no data below this node will be copied by this filter. -# If option is absent, the limit is not applied, so that data -# from all backing layers may be copied. +# @bottom: The name of a non-filter node (allocation-bearing layer) +# that limits the COR operations in the backing chain (inclusive), +# so that no data below this node will be copied by this filter. +# If option is absent, the limit is not applied, so that data from +# all backing layers may be copied. # # Since: 6.0 ## @@ -4380,13 +4493,13 @@ # An enumeration of possible behaviors for copy-before-write operation # failures. # -# @break-guest-write: report the error to the guest. This way, the guest -# will not be able to overwrite areas that cannot be -# backed up, so the backup process remains valid. +# @break-guest-write: report the error to the guest. This way, the +# guest will not be able to overwrite areas that cannot be backed +# up, so the backup process remains valid. # -# @break-snapshot: continue guest write. Doing so will make the provided -# snapshot state invalid and any backup or export -# process based on it will finally fail. +# @break-snapshot: continue guest write. Doing so will make the +# provided snapshot state invalid and any backup or export process +# based on it will finally fail. # # Since: 7.1 ## @@ -4396,32 +4509,32 @@ ## # @BlockdevOptionsCbw: # -# Driver specific block device options for the copy-before-write driver, -# which does so called copy-before-write operations: when data is -# written to the filter, the filter first reads corresponding blocks -# from its file child and copies them to @target child. After successfully -# copying, the write request is propagated to file child. If copying -# fails, the original write request is failed too and no data is written -# to file child. +# Driver specific block device options for the copy-before-write +# driver, which does so called copy-before-write operations: when data +# is written to the filter, the filter first reads corresponding +# blocks from its file child and copies them to @target child. After +# successfully copying, the write request is propagated to file child. +# If copying fails, the original write request is failed too and no +# data is written to file child. # # @target: The target for copy-before-write operations. # # @bitmap: If specified, copy-before-write filter will do -# copy-before-write operations only for dirty regions of the -# bitmap. Bitmap size must be equal to length of file and -# target child of the filter. Note also, that bitmap is used -# only to initialize internal bitmap of the process, so further -# modifications (or removing) of specified bitmap doesn't -# influence the filter. (Since 7.0) +# copy-before-write operations only for dirty regions of the +# bitmap. Bitmap size must be equal to length of file and target +# child of the filter. Note also, that bitmap is used only to +# initialize internal bitmap of the process, so further +# modifications (or removing) of specified bitmap doesn't +# influence the filter. (Since 7.0) # # @on-cbw-error: Behavior on failure of copy-before-write operation. -# Default is @break-guest-write. (Since 7.1) +# Default is @break-guest-write. (Since 7.1) # -# @cbw-timeout: Zero means no limit. Non-zero sets the timeout in seconds -# for copy-before-write operation. When a timeout occurs, -# the respective copy-before-write operation will fail, and -# the @on-cbw-error parameter will decide how this failure -# is handled. Default 0. (Since 7.1) +# @cbw-timeout: Zero means no limit. Non-zero sets the timeout in +# seconds for copy-before-write operation. When a timeout occurs, +# the respective copy-before-write operation will fail, and the +# @on-cbw-error parameter will decide how this failure is handled. +# Default 0. (Since 7.1) # # Since: 6.2 ## @@ -4433,32 +4546,39 @@ ## # @BlockdevOptions: # -# Options for creating a block device. Many options are available for all -# block devices, independent of the block driver: +# Options for creating a block device. Many options are available for +# all block devices, independent of the block driver: # # @driver: block driver name -# @node-name: the node name of the new node (Since 2.0). -# This option is required on the top level of blockdev-add. -# Valid node names start with an alphabetic character and may -# contain only alphanumeric characters, '-', '.' and '_'. Their -# maximum length is 31 characters. +# +# @node-name: the node name of the new node (Since 2.0). This option +# is required on the top level of blockdev-add. Valid node names +# start with an alphabetic character and may contain only +# alphanumeric characters, '-', '.' and '_'. Their maximum length +# is 31 characters. +# # @discard: discard-related options (default: ignore) +# # @cache: cache-related options -# @read-only: whether the block device should be read-only (default: false). -# Note that some block drivers support only read-only access, -# either generally or in certain configurations. In this case, -# the default value does not work and the option must be -# specified explicitly. -# @auto-read-only: if true and @read-only is false, QEMU may automatically -# decide not to open the image read-write as requested, but -# fall back to read-only instead (and switch between the modes -# later), e.g. depending on whether the image file is writable -# or whether a writing user is attached to the node -# (default: false, since 3.1) +# +# @read-only: whether the block device should be read-only (default: +# false). Note that some block drivers support only read-only +# access, either generally or in certain configurations. In this +# case, the default value does not work and the option must be +# specified explicitly. +# +# @auto-read-only: if true and @read-only is false, QEMU may +# automatically decide not to open the image read-write as +# requested, but fall back to read-only instead (and switch +# between the modes later), e.g. depending on whether the image +# file is writable or whether a writing user is attached to the +# node (default: false, since 3.1) +# # @detect-zeroes: detect and optimize zero writes (Since 2.1) -# (default: off) -# @force-share: force share all permission on added nodes. -# Requires read-only=true. (Since 2.10) +# (default: off) +# +# @force-share: force share all permission on added nodes. Requires +# read-only=true. (Since 2.10) # # Remaining options are determined by the block driver. # @@ -4541,6 +4661,7 @@ # Reference to a block device. # # @definition: defines a new block device inline +# # @reference: references the ID of an existing block device # # Since: 2.9 @@ -4555,9 +4676,11 @@ # Reference to a block device. # # @definition: defines a new block device inline -# @reference: references the ID of an existing block device. -# An empty string means that no block device should -# be referenced. Deprecated; use null instead. +# +# @reference: references the ID of an existing block device. An empty +# string means that no block device should be referenced. +# Deprecated; use null instead. +# # @null: No block device should be referenced (since 2.10) # # Since: 2.9 @@ -4574,9 +4697,8 @@ # # Since: 2.9 # -# Example: +# Examples: # -# 1. # -> { "execute": "blockdev-add", # "arguments": { # "driver": "qcow2", @@ -4589,7 +4711,6 @@ # } # <- { "return": {} } # -# 2. # -> { "execute": "blockdev-add", # "arguments": { # "driver": "qcow2", @@ -4613,7 +4734,6 @@ # } # # <- { "return": {} } -# ## { 'command': 'blockdev-add', 'data': 'BlockdevOptions', 'boxed': true, 'allow-preconfig': true } @@ -4622,18 +4742,19 @@ # @blockdev-reopen: # # Reopens one or more block devices using the given set of options. -# Any option not specified will be reset to its default value regardless -# of its previous status. If an option cannot be changed or a particular -# driver does not support reopening then the command will return an -# error. All devices in the list are reopened in one transaction, so -# if one of them fails then the whole transaction is cancelled. +# Any option not specified will be reset to its default value +# regardless of its previous status. If an option cannot be changed +# or a particular driver does not support reopening then the command +# will return an error. All devices in the list are reopened in one +# transaction, so if one of them fails then the whole transaction is +# cancelled. # -# The command receives a list of block devices to reopen. For each one -# of them, the top-level @node-name option (from BlockdevOptions) must be -# specified and is used to select the block device to be reopened. -# Other @node-name options must be either omitted or set to the -# current name of the appropriate node. This command won't change any -# node name and any attempt to do it will result in an error. +# The command receives a list of block devices to reopen. For each +# one of them, the top-level @node-name option (from BlockdevOptions) +# must be specified and is used to select the block device to be +# reopened. Other @node-name options must be either omitted or set to +# the current name of the appropriate node. This command won't change +# any node name and any attempt to do it will result in an error. # # In the case of options that refer to child nodes, the behavior of # this command depends on the value: @@ -4649,7 +4770,7 @@ # # 4) NULL: the current child (if any) is detached. # -# Options (1) and (2) are supported in all cases. Option (3) is +# Options (1) and (2) are supported in all cases. Option (3) is # supported for @file and @backing, and option (4) for @backing only. # # Unlike with blockdev-add, the @backing option must always be present @@ -4666,8 +4787,8 @@ ## # @blockdev-del: # -# Deletes a block device that has been added using blockdev-add. -# The command will fail if the node is attached to a device or is +# Deletes a block device that has been added using blockdev-add. The +# command will fail if the node is attached to a device or is # otherwise being used. # # @node-name: Name of the graph node to delete. @@ -4692,7 +4813,6 @@ # "arguments": { "node-name": "node0" } # } # <- { "return": {} } -# ## { 'command': 'blockdev-del', 'data': { 'node-name': 'str' }, 'allow-preconfig': true } @@ -4703,14 +4823,17 @@ # Driver specific image creation options for file. # # @filename: Filename for the new image file +# # @size: Size of the virtual disk in bytes +# # @preallocation: Preallocation mode for the new image (default: off; -# allowed values: off, -# falloc (if CONFIG_POSIX_FALLOCATE), -# full (if CONFIG_POSIX)) +# allowed values: off, falloc (if CONFIG_POSIX_FALLOCATE), full +# (if CONFIG_POSIX)) +# # @nocow: Turn off copy-on-write (valid only on btrfs; default: off) -# @extent-size-hint: Extent size hint to add to the image file; 0 for not -# adding an extent size hint (default: 1 MB, since 5.1) +# +# @extent-size-hint: Extent size hint to add to the image file; 0 for +# not adding an extent size hint (default: 1 MB, since 5.1) # # Since: 2.12 ## @@ -4727,11 +4850,12 @@ # Driver specific image creation options for gluster. # # @location: Where to store the new image file +# # @size: Size of the virtual disk in bytes +# # @preallocation: Preallocation mode for the new image (default: off; -# allowed values: off, -# falloc (if CONFIG_GLUSTERFS_FALLOCATE), -# full (if CONFIG_GLUSTERFS_ZEROFILL)) +# allowed values: off, falloc (if CONFIG_GLUSTERFS_FALLOCATE), +# full (if CONFIG_GLUSTERFS_ZEROFILL)) # # Since: 2.12 ## @@ -4746,10 +4870,11 @@ # Driver specific image creation options for LUKS. # # @file: Node to create the image format on +# # @size: Size of the virtual disk in bytes -# @preallocation: Preallocation mode for the new image -# (since: 4.2) -# (default: off; allowed values: off, metadata, falloc, full) +# +# @preallocation: Preallocation mode for the new image (since: 4.2) +# (default: off; allowed values: off, metadata, falloc, full) # # Since: 2.12 ## @@ -4765,6 +4890,7 @@ # Driver specific image creation options for NFS. # # @location: Where to store the new image file +# # @size: Size of the virtual disk in bytes # # Since: 2.12 @@ -4779,7 +4905,9 @@ # Driver specific image creation options for parallels. # # @file: Node to create the image format on +# # @size: Size of the virtual disk in bytes +# # @cluster-size: Cluster size in bytes (default: 1 MB) # # Since: 2.12 @@ -4795,9 +4923,12 @@ # Driver specific image creation options for qcow. # # @file: Node to create the image format on +# # @size: Size of the virtual disk in bytes +# # @backing-file: File name of the backing file if a backing file -# should be used +# should be used +# # @encrypt: Encryption options if the image should be encrypted # # Since: 2.12 @@ -4811,7 +4942,9 @@ ## # @BlockdevQcow2Version: # -# @v2: The original QCOW2 format as introduced in qemu 0.10 (version 2) +# @v2: The original QCOW2 format as introduced in qemu 0.10 (version +# 2) +# # @v3: The extended QCOW2 format as introduced in qemu 1.1 (version 3) # # Since: 2.12 @@ -4825,6 +4958,7 @@ # Compression type used in qcow2 image file # # @zlib: zlib compression, see +# # @zstd: zstd compression, see # # Since: 5.1 @@ -4838,27 +4972,41 @@ # Driver specific image creation options for qcow2. # # @file: Node to create the image format on +# # @data-file: Node to use as an external data file in which all guest -# data is stored so that only metadata remains in the qcow2 -# file (since: 4.0) +# data is stored so that only metadata remains in the qcow2 file +# (since: 4.0) +# # @data-file-raw: True if the external data file must stay valid as a -# standalone (read-only) raw image without looking at qcow2 -# metadata (default: false; since: 4.0) +# standalone (read-only) raw image without looking at qcow2 +# metadata (default: false; since: 4.0) +# # @extended-l2: True to make the image have extended L2 entries -# (default: false; since 5.2) +# (default: false; since 5.2) +# # @size: Size of the virtual disk in bytes +# # @version: Compatibility level (default: v3) +# # @backing-file: File name of the backing file if a backing file -# should be used +# should be used +# # @backing-fmt: Name of the block driver to use for the backing file +# # @encrypt: Encryption options if the image should be encrypted +# # @cluster-size: qcow2 cluster size in bytes (default: 65536) +# # @preallocation: Preallocation mode for the new image (default: off; -# allowed values: off, falloc, full, metadata) -# @lazy-refcounts: True if refcounts may be updated lazily (default: off) +# allowed values: off, falloc, full, metadata) +# +# @lazy-refcounts: True if refcounts may be updated lazily +# (default: off) +# # @refcount-bits: Width of reference counts in bits (default: 16) +# # @compression-type: The image cluster compression method -# (default: zlib, since 5.1) +# (default: zlib, since 5.1) # # Since: 2.12 ## @@ -4884,11 +5032,16 @@ # Driver specific image creation options for qed. # # @file: Node to create the image format on +# # @size: Size of the virtual disk in bytes +# # @backing-file: File name of the backing file if a backing file -# should be used +# should be used +# # @backing-fmt: Name of the block driver to use for the backing file +# # @cluster-size: Cluster size in bytes (default: 65536) +# # @table-size: L1/L2 table size (in clusters) # # Since: 2.12 @@ -4906,11 +5059,14 @@ # # Driver specific image creation options for rbd/Ceph. # -# @location: Where to store the new image file. This location cannot -# point to a snapshot. +# @location: Where to store the new image file. This location cannot +# point to a snapshot. +# # @size: Size of the virtual disk in bytes +# # @cluster-size: RBD object size -# @encrypt: Image encryption options. (Since 6.1) +# +# @encrypt: Image encryption options. (Since 6.1) # # Since: 2.12 ## @@ -4929,14 +5085,14 @@ # # @monolithicFlat: Single flat data image and a descriptor file # -# @twoGbMaxExtentSparse: Data is split into 2GB (per virtual LBA) sparse extent -# files, in addition to a descriptor file +# @twoGbMaxExtentSparse: Data is split into 2GB (per virtual LBA) +# sparse extent files, in addition to a descriptor file # -# @twoGbMaxExtentFlat: Data is split into 2GB (per virtual LBA) flat extent -# files, in addition to a descriptor file +# @twoGbMaxExtentFlat: Data is split into 2GB (per virtual LBA) flat +# extent files, in addition to a descriptor file # -# @streamOptimized: Single file image sparse cluster allocation, optimized -# for streaming over network. +# @streamOptimized: Single file image sparse cluster allocation, +# optimized for streaming over network. # # Since: 4.0 ## @@ -4959,25 +5115,36 @@ # # Driver specific image creation options for VMDK. # -# @file: Where to store the new image file. This refers to the image -# file for monolithcSparse and streamOptimized format, or the -# descriptor file for other formats. +# @file: Where to store the new image file. This refers to the image +# file for monolithcSparse and streamOptimized format, or the +# descriptor file for other formats. +# # @size: Size of the virtual disk in bytes -# @extents: Where to store the data extents. Required for monolithcFlat, -# twoGbMaxExtentSparse and twoGbMaxExtentFlat formats. For -# monolithicFlat, only one entry is required; for -# twoGbMaxExtent* formats, the number of entries required is -# calculated as extent_number = virtual_size / 2GB. Providing -# more extents than will be used is an error. -# @subformat: The subformat of the VMDK image. Default: "monolithicSparse". -# @backing-file: The path of backing file. Default: no backing file is used. -# @adapter-type: The adapter type used to fill in the descriptor. Default: ide. -# @hwversion: Hardware version. The meaningful options are "4" or "6". -# Default: "4". -# @toolsversion: VMware guest tools version. -# Default: "2147483647" (Since 6.2) -# @zeroed-grain: Whether to enable zeroed-grain feature for sparse subformats. -# Default: false. +# +# @extents: Where to store the data extents. Required for +# monolithcFlat, twoGbMaxExtentSparse and twoGbMaxExtentFlat +# formats. For monolithicFlat, only one entry is required; for +# twoGbMaxExtent* formats, the number of entries required is +# calculated as extent_number = virtual_size / 2GB. Providing more +# extents than will be used is an error. +# +# @subformat: The subformat of the VMDK image. Default: +# "monolithicSparse". +# +# @backing-file: The path of backing file. Default: no backing file +# is used. +# +# @adapter-type: The adapter type used to fill in the descriptor. +# Default: ide. +# +# @hwversion: Hardware version. The meaningful options are "4" or +# "6". Default: "4". +# +# @toolsversion: VMware guest tools version. Default: "2147483647" +# (Since 6.2) +# +# @zeroed-grain: Whether to enable zeroed-grain feature for sparse +# subformats. Default: false. # # Since: 4.0 ## @@ -4998,6 +5165,7 @@ # Driver specific image creation options for SSH. # # @location: Where to store the new image file +# # @size: Size of the virtual disk in bytes # # Since: 2.12 @@ -5012,9 +5180,11 @@ # Driver specific image creation options for VDI. # # @file: Node to create the image format on +# # @size: Size of the virtual disk in bytes +# # @preallocation: Preallocation mode for the new image (default: off; -# allowed values: off, metadata) +# allowed values: off, metadata) # # Since: 2.12 ## @@ -5027,6 +5197,7 @@ # @BlockdevVhdxSubformat: # # @dynamic: Growing image file +# # @fixed: Preallocated fixed-size image file # # Since: 2.12 @@ -5040,16 +5211,21 @@ # Driver specific image creation options for vhdx. # # @file: Node to create the image format on +# # @size: Size of the virtual disk in bytes -# @log-size: Log size in bytes, must be a multiple of 1 MB -# (default: 1 MB) +# +# @log-size: Log size in bytes, must be a multiple of 1 MB (default: 1 +# MB) +# # @block-size: Block size in bytes, must be a multiple of 1 MB and not -# larger than 256 MB (default: automatically choose a block -# size depending on the image size) +# larger than 256 MB (default: automatically choose a block size +# depending on the image size) +# # @subformat: vhdx subformat (default: dynamic) -# @block-state-zero: Force use of payload blocks of type 'ZERO'. Non-standard, -# but default. Do not set to 'off' when using 'qemu-img -# convert' with subformat=dynamic. +# +# @block-state-zero: Force use of payload blocks of type +# 'ZERO'. Non-standard, but default. Do not set to 'off' when +# using 'qemu-img convert' with subformat=dynamic. # # Since: 2.12 ## @@ -5065,6 +5241,7 @@ # @BlockdevVpcSubformat: # # @dynamic: Growing image file +# # @fixed: Preallocated fixed-size image file # # Since: 2.12 @@ -5078,11 +5255,14 @@ # Driver specific image creation options for vpc (VHD). # # @file: Node to create the image format on +# # @size: Size of the virtual disk in bytes +# # @subformat: vhdx subformat (default: dynamic) -# @force-size: Force use of the exact byte size instead of rounding to the -# next size that can be represented in CHS geometry -# (default: false) +# +# @force-size: Force use of the exact byte size instead of rounding to +# the next size that can be represented in CHS geometry +# (default: false) # # Since: 2.12 ## @@ -5125,7 +5305,7 @@ ## # @blockdev-create: # -# Starts a job to create an image format on a given node. The job is +# Starts a job to create an image format on a given node. The job is # automatically finalized, but a manual job-dismiss is required. # # @job-id: Identifier for the newly created job. @@ -5154,10 +5334,10 @@ ## # @BlockdevAmendOptionsQcow2: # -# Driver specific image amend options for qcow2. -# For now, only encryption options can be amended +# Driver specific image amend options for qcow2. For now, only +# encryption options can be amended # -# @encrypt Encryption options to be amended +# @encrypt: Encryption options to be amended # # Since: 5.1 ## @@ -5184,8 +5364,9 @@ ## # @x-blockdev-amend: # -# Starts a job to amend format specific options of an existing open block device -# The job is automatically finalized, but a manual job-dismiss is required. +# Starts a job to amend format specific options of an existing open +# block device The job is automatically finalized, but a manual +# job-dismiss is required. # # @job-id: Identifier for the newly created job. # @@ -5193,13 +5374,13 @@ # # @options: Options (driver specific) # -# @force: Allow unsafe operations, format specific -# For luks that allows erase of the last active keyslot -# (permanent loss of data), -# and replacement of an active keyslot -# (possible loss of data if IO error happens) +# @force: Allow unsafe operations, format specific For luks that +# allows erase of the last active keyslot (permanent loss of +# data), and replacement of an active keyslot (possible loss of +# data if IO error happens) # # Features: +# # @unstable: This command is experimental. # # Since: 5.1 @@ -5231,33 +5412,33 @@ ## # @BLOCK_IMAGE_CORRUPTED: # -# Emitted when a disk image is being marked corrupt. The image can be -# identified by its device or node name. The 'device' field is always +# Emitted when a disk image is being marked corrupt. The image can be +# identified by its device or node name. The 'device' field is always # present for compatibility reasons, but it can be empty ("") if the # image does not have a device name associated. # -# @device: device name. This is always present for compatibility -# reasons, but it can be empty ("") if the image does not -# have a device name associated. +# @device: device name. This is always present for compatibility +# reasons, but it can be empty ("") if the image does not have a +# device name associated. # # @node-name: node name (Since: 2.4) # # @msg: informative message for human consumption, such as the kind of -# corruption being detected. It should not be parsed by machine as it is -# not guaranteed to be stable +# corruption being detected. It should not be parsed by machine +# as it is not guaranteed to be stable # # @offset: if the corruption resulted from an image access, this is -# the host's access offset into the image +# the host's access offset into the image # -# @size: if the corruption resulted from an image access, this is -# the access size +# @size: if the corruption resulted from an image access, this is the +# access size # -# @fatal: if set, the image is marked corrupt and therefore unusable after this -# event and must be repaired (Since 2.2; before, every -# BLOCK_IMAGE_CORRUPTED event was fatal) +# @fatal: if set, the image is marked corrupt and therefore unusable +# after this event and must be repaired (Since 2.2; before, every +# BLOCK_IMAGE_CORRUPTED event was fatal) # # Note: If action is "stop", a STOP event will eventually follow the -# BLOCK_IO_ERROR event. +# BLOCK_IO_ERROR event. # # Example: # @@ -5281,30 +5462,30 @@ # # Emitted when a disk I/O error occurs # -# @device: device name. This is always present for compatibility -# reasons, but it can be empty ("") if the image does not -# have a device name associated. +# @device: device name. This is always present for compatibility +# reasons, but it can be empty ("") if the image does not have a +# device name associated. # -# @node-name: node name. Note that errors may be reported for the root node -# that is directly attached to a guest device rather than for the -# node where the error occurred. The node name is not present if -# the drive is empty. (Since: 2.8) +# @node-name: node name. Note that errors may be reported for the +# root node that is directly attached to a guest device rather +# than for the node where the error occurred. The node name is +# not present if the drive is empty. (Since: 2.8) # # @operation: I/O operation # # @action: action that has been taken # -# @nospace: true if I/O error was caused due to a no-space -# condition. This key is only present if query-block's -# io-status is present, please see query-block documentation -# for more information (since: 2.2) +# @nospace: true if I/O error was caused due to a no-space condition. +# This key is only present if query-block's io-status is present, +# please see query-block documentation for more information +# (since: 2.2) # -# @reason: human readable string describing the error cause. -# (This field is a debugging aid for humans, it should not -# be parsed by applications) (since: 2.2) +# @reason: human readable string describing the error cause. (This +# field is a debugging aid for humans, it should not be parsed by +# applications) (since: 2.2) # # Note: If action is "stop", a STOP event will eventually follow the -# BLOCK_IO_ERROR event +# BLOCK_IO_ERROR event # # Since: 0.13 # @@ -5317,7 +5498,6 @@ # "action": "stop", # "reason": "No space left on device" }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } -# ## { 'event': 'BLOCK_IO_ERROR', 'data': { 'device': 'str', '*node-name': 'str', @@ -5332,20 +5512,20 @@ # # @type: job type # -# @device: The job identifier. Originally the device name but other -# values are allowed since QEMU 2.7 +# @device: The job identifier. Originally the device name but other +# values are allowed since QEMU 2.7 # # @len: maximum progress value # -# @offset: current progress value. On success this is equal to len. -# On failure this is less than len +# @offset: current progress value. On success this is equal to len. +# On failure this is less than len # # @speed: rate limit, bytes per second # -# @error: error message. Only present on failure. This field -# contains a human-readable error message. There are no semantics -# other than that streaming has failed and clients should not try to -# interpret the error string +# @error: error message. Only present on failure. This field +# contains a human-readable error message. There are no semantics +# other than that streaming has failed and clients should not try +# to interpret the error string # # Since: 1.1 # @@ -5356,7 +5536,6 @@ # "len": 10737418240, "offset": 10737418240, # "speed": 0 }, # "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } -# ## { 'event': 'BLOCK_JOB_COMPLETED', 'data': { 'type' : 'JobType', @@ -5373,13 +5552,13 @@ # # @type: job type # -# @device: The job identifier. Originally the device name but other -# values are allowed since QEMU 2.7 +# @device: The job identifier. Originally the device name but other +# values are allowed since QEMU 2.7 # # @len: maximum progress value # -# @offset: current progress value. On success this is equal to len. -# On failure this is less than len +# @offset: current progress value. On success this is equal to len. +# On failure this is less than len # # @speed: rate limit, bytes per second # @@ -5392,7 +5571,6 @@ # "len": 10737418240, "offset": 134217728, # "speed": 0 }, # "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } -# ## { 'event': 'BLOCK_JOB_CANCELLED', 'data': { 'type' : 'JobType', @@ -5406,8 +5584,8 @@ # # Emitted when a block job encounters an error # -# @device: The job identifier. Originally the device name but other -# values are allowed since QEMU 2.7 +# @device: The job identifier. Originally the device name but other +# values are allowed since QEMU 2.7 # # @operation: I/O operation # @@ -5422,7 +5600,6 @@ # "operation": "write", # "action": "stop" }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } -# ## { 'event': 'BLOCK_JOB_ERROR', 'data': { 'device' : 'str', @@ -5436,18 +5613,18 @@ # # @type: job type # -# @device: The job identifier. Originally the device name but other -# values are allowed since QEMU 2.7 +# @device: The job identifier. Originally the device name but other +# values are allowed since QEMU 2.7 # # @len: maximum progress value # -# @offset: current progress value. On success this is equal to len. -# On failure this is less than len +# @offset: current progress value. On success this is equal to len. +# On failure this is less than len # # @speed: rate limit, bytes per second # -# Note: The "ready to complete" status is always reset by a @BLOCK_JOB_ERROR -# event +# Note: The "ready to complete" status is always reset by a +# @BLOCK_JOB_ERROR event # # Since: 1.3 # @@ -5457,7 +5634,6 @@ # "data": { "device": "drive0", "type": "mirror", "speed": 0, # "len": 2097152, "offset": 2097152 }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } -# ## { 'event': 'BLOCK_JOB_READY', 'data': { 'type' : 'JobType', @@ -5469,9 +5645,10 @@ ## # @BLOCK_JOB_PENDING: # -# Emitted when a block job is awaiting explicit authorization to finalize graph -# changes via @block-job-finalize. If this job is part of a transaction, it will -# not emit this event until the transaction has converged first. +# Emitted when a block job is awaiting explicit authorization to +# finalize graph changes via @block-job-finalize. If this job is part +# of a transaction, it will not emit this event until the transaction +# has converged first. # # @type: job type # @@ -5484,7 +5661,6 @@ # <- { "event": "BLOCK_JOB_PENDING", # "data": { "type": "mirror", "id": "backup_1" }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } -# ## { 'event': 'BLOCK_JOB_PENDING', 'data': { 'type' : 'JobType', @@ -5496,13 +5672,16 @@ # Preallocation mode of QEMU image file # # @off: no preallocation +# # @metadata: preallocate only for metadata +# # @falloc: like @full preallocation but allocate disk space by -# posix_fallocate() rather than writing data. +# posix_fallocate() rather than writing data. +# # @full: preallocate all data by writing it to the device to ensure -# disk space is really available. This data may or may not be -# zero, depending on the image format and storage. -# @full preallocation also sets up metadata correctly. +# disk space is really available. This data may or may not be +# zero, depending on the image format and storage. @full +# preallocation also sets up metadata correctly. # # Since: 2.2 ## @@ -5513,15 +5692,15 @@ # @BLOCK_WRITE_THRESHOLD: # # Emitted when writes on block device reaches or exceeds the -# configured write threshold. For thin-provisioned devices, this -# means the device should be extended to avoid pausing for -# disk exhaustion. -# The event is one shot. Once triggered, it needs to be +# configured write threshold. For thin-provisioned devices, this +# means the device should be extended to avoid pausing for disk +# exhaustion. The event is one shot. Once triggered, it needs to be # re-registered with another block-set-write-threshold command. # # @node-name: graph node name on which the threshold was exceeded. # -# @amount-exceeded: amount of data which exceeded the threshold, in bytes. +# @amount-exceeded: amount of data which exceeded the threshold, in +# bytes. # # @write-threshold: last configured threshold, in bytes. # @@ -5535,19 +5714,19 @@ ## # @block-set-write-threshold: # -# Change the write threshold for a block drive. An event will be +# Change the write threshold for a block drive. An event will be # delivered if a write to this block drive crosses the configured -# threshold. The threshold is an offset, thus must be -# non-negative. Default is no write threshold. Setting the threshold -# to zero disables it. +# threshold. The threshold is an offset, thus must be non-negative. +# Default is no write threshold. Setting the threshold to zero +# disables it. # -# This is useful to transparently resize thin-provisioned drives without -# the guest OS noticing. +# This is useful to transparently resize thin-provisioned drives +# without the guest OS noticing. # # @node-name: graph node name on which the threshold must be set. # # @write-threshold: configured threshold for the block device, bytes. -# Use 0 to disable the threshold. +# Use 0 to disable the threshold. # # Since: 2.3 # @@ -5557,7 +5736,6 @@ # "arguments": { "node-name": "mydev", # "write-threshold": 17179869184 } } # <- { "return": {} } -# ## { 'command': 'block-set-write-threshold', 'data': { 'node-name': 'str', 'write-threshold': 'uint64' }, @@ -5566,13 +5744,13 @@ ## # @x-blockdev-change: # -# Dynamically reconfigure the block driver state graph. It can be used -# to add, remove, insert or replace a graph node. Currently only the -# Quorum driver implements this feature to add or remove its child. This -# is useful to fix a broken quorum child. +# Dynamically reconfigure the block driver state graph. It can be +# used to add, remove, insert or replace a graph node. Currently only +# the Quorum driver implements this feature to add or remove its +# child. This is useful to fix a broken quorum child. # -# If @node is specified, it will be inserted under @parent. @child -# may not be specified in this case. If both @parent and @child are +# If @node is specified, it will be inserted under @parent. @child +# may not be specified in this case. If both @parent and @child are # specified but @node is not, @child will be detached from @parent. # # @parent: the id or name of the parent node. @@ -5582,23 +5760,25 @@ # @node: the name of the node that will be added. # # Features: -# @unstable: This command is experimental, and its API is not stable. It -# does not support all kinds of operations, all kinds of -# children, nor all block drivers. # -# FIXME Removing children from a quorum node means introducing -# gaps in the child indices. This cannot be represented in the -# 'children' list of BlockdevOptionsQuorum, as returned by -# .bdrv_refresh_filename(). +# @unstable: This command is experimental, and its API is not stable. +# It does not support all kinds of operations, all kinds of +# children, nor all block drivers. # -# Warning: The data in a new quorum child MUST be consistent -# with that of the rest of the array. +# FIXME Removing children from a quorum node means introducing +# gaps in the child indices. This cannot be represented in the +# 'children' list of BlockdevOptionsQuorum, as returned by +# .bdrv_refresh_filename(). +# +# Warning: The data in a new quorum child MUST be consistent with +# that of the rest of the array. # # Since: 2.7 # -# Example: +# Examples: # # 1. Add a new node to a quorum +# # -> { "execute": "blockdev-add", # "arguments": { # "driver": "raw", @@ -5612,11 +5792,11 @@ # <- { "return": {} } # # 2. Delete a quorum's node +# # -> { "execute": "x-blockdev-change", # "arguments": { "parent": "disk1", # "child": "children.1" } } # <- { "return": {} } -# ## { 'command': 'x-blockdev-change', 'data' : { 'parent': 'str', @@ -5628,8 +5808,8 @@ ## # @x-blockdev-set-iothread: # -# Move @node and its children into the @iothread. If @iothread is null then -# move @node and its children into the main loop. +# Move @node and its children into the @iothread. If @iothread is +# null then move @node and its children into the main loop. # # The node must not be attached to a BlockBackend. # @@ -5637,29 +5817,31 @@ # # @iothread: the name of the IOThread object or null for the main loop # -# @force: true if the node and its children should be moved when a BlockBackend -# is already attached +# @force: true if the node and its children should be moved when a +# BlockBackend is already attached # # Features: -# @unstable: This command is experimental and intended for test cases that -# need control over IOThreads only. +# +# @unstable: This command is experimental and intended for test cases +# that need control over IOThreads only. # # Since: 2.12 # -# Example: +# Examples: # # 1. Move a node into an IOThread +# # -> { "execute": "x-blockdev-set-iothread", # "arguments": { "node-name": "disk1", # "iothread": "iothread0" } } # <- { "return": {} } # # 2. Move a node into the main loop +# # -> { "execute": "x-blockdev-set-iothread", # "arguments": { "node-name": "disk1", # "iothread": null } } # <- { "return": {} } -# ## { 'command': 'x-blockdev-set-iothread', 'data' : { 'node-name': 'str', @@ -5704,7 +5886,6 @@ # <- { "event": "QUORUM_FAILURE", # "data": { "reference": "usr1", "sector-num": 345435, "sectors-count": 5 }, # "timestamp": { "seconds": 1344522075, "microseconds": 745528 } } -# ## { 'event': 'QUORUM_FAILURE', 'data': { 'reference': 'str', 'sector-num': 'int', 'sectors-count': 'int' } } @@ -5716,10 +5897,10 @@ # # @type: quorum operation type (Since 2.6) # -# @error: error message. Only present on failure. This field -# contains a human-readable error message. There are no semantics other -# than that the block layer reported an error and clients should not -# try to interpret the error string. +# @error: error message. Only present on failure. This field +# contains a human-readable error message. There are no semantics +# other than that the block layer reported an error and clients +# should not try to interpret the error string. # # @node-name: the graph node name of the block driver state # @@ -5731,22 +5912,21 @@ # # Since: 2.0 # -# Example: +# Examples: # # 1. Read operation # -# { "event": "QUORUM_REPORT_BAD", +# <- { "event": "QUORUM_REPORT_BAD", # "data": { "node-name": "node0", "sector-num": 345435, "sectors-count": 5, # "type": "read" }, # "timestamp": { "seconds": 1344522075, "microseconds": 745528 } } # # 2. Flush operation # -# { "event": "QUORUM_REPORT_BAD", +# <- { "event": "QUORUM_REPORT_BAD", # "data": { "node-name": "node0", "sector-num": 0, "sectors-count": 2097120, # "type": "flush", "error": "Broken pipe" }, # "timestamp": { "seconds": 1456406829, "microseconds": 291763 } } -# ## { 'event': 'QUORUM_REPORT_BAD', 'data': { 'type': 'QuorumOpType', '*error': 'str', 'node-name': 'str', @@ -5755,14 +5935,14 @@ ## # @BlockdevSnapshotInternal: # -# @device: the device name or node-name of a root node to generate the snapshot -# from +# @device: the device name or node-name of a root node to generate the +# snapshot from # # @name: the name of the internal snapshot to be created # -# Notes: In transaction, if @name is empty, or any snapshot matching @name -# exists, the operation will fail. Only some image formats support it, -# for example, qcow2, and rbd. +# Notes: In transaction, if @name is empty, or any snapshot matching +# @name exists, the operation will fail. Only some image formats +# support it, for example, qcow2, and rbd. # # Since: 1.7 ## @@ -5773,18 +5953,20 @@ # @blockdev-snapshot-internal-sync: # # Synchronously take an internal snapshot of a block device, when the -# format of the image used supports it. If the name is an empty +# format of the image used supports it. If the name is an empty # string, or a snapshot with name already exists, the operation will # fail. # -# For the arguments, see the documentation of BlockdevSnapshotInternal. +# For the arguments, see the documentation of +# BlockdevSnapshotInternal. # -# Returns: - nothing on success -# - If @device is not a valid block device, GenericError -# - If any snapshot matching @name exists, or @name is empty, -# GenericError -# - If the format of the image used does not support it, -# BlockFormatFeatureNotSupported +# Returns: +# - nothing on success +# - If @device is not a valid block device, GenericError +# - If any snapshot matching @name exists, or @name is empty, +# GenericError +# - If the format of the image used does not support it, +# GenericError # # Since: 1.7 # @@ -5795,7 +5977,6 @@ # "name": "snapshot0" } # } # <- { "return": {} } -# ## { 'command': 'blockdev-snapshot-internal-sync', 'data': 'BlockdevSnapshotInternal', @@ -5804,24 +5985,25 @@ ## # @blockdev-snapshot-delete-internal-sync: # -# Synchronously delete an internal snapshot of a block device, when the format -# of the image used support it. The snapshot is identified by name or id or -# both. One of the name or id is required. Return SnapshotInfo for the -# successfully deleted snapshot. +# Synchronously delete an internal snapshot of a block device, when +# the format of the image used support it. The snapshot is identified +# by name or id or both. One of the name or id is required. Return +# SnapshotInfo for the successfully deleted snapshot. # -# @device: the device name or node-name of a root node to delete the snapshot -# from +# @device: the device name or node-name of a root node to delete the +# snapshot from # # @id: optional the snapshot's ID to be deleted # # @name: optional the snapshot's name to be deleted # -# Returns: - SnapshotInfo on success -# - If @device is not a valid block device, GenericError -# - If snapshot not found, GenericError -# - If the format of the image used does not support it, -# BlockFormatFeatureNotSupported -# - If @id and @name are both not specified, GenericError +# Returns: +# - SnapshotInfo on success +# - If @device is not a valid block device, GenericError +# - If snapshot not found, GenericError +# - If the format of the image used does not support it, +# GenericError +# - If @id and @name are both not specified, GenericError # # Since: 1.7 # @@ -5842,7 +6024,6 @@ # "icount": 220414 # } # } -# ## { 'command': 'blockdev-snapshot-delete-internal-sync', 'data': { 'device': 'str', '*id': 'str', '*name': 'str'}, diff --git a/qapi/block-export.json b/qapi/block-export.json index 4627bbc4e6..7874a49ba7 100644 --- a/qapi/block-export.json +++ b/qapi/block-export.json @@ -11,20 +11,24 @@ ## # @NbdServerOptions: # -# Keep this type consistent with the nbd-server-start arguments. The only -# intended difference is using SocketAddress instead of SocketAddressLegacy. +# Keep this type consistent with the nbd-server-start arguments. The +# only intended difference is using SocketAddress instead of +# SocketAddressLegacy. # # @addr: Address on which to listen. +# # @tls-creds: ID of the TLS credentials object (since 2.6). +# # @tls-authz: ID of the QAuthZ authorization object used to validate -# the client's x509 distinguished name. This object is -# is only resolved at time of use, so can be deleted and -# recreated on the fly while the NBD server is active. -# If missing, it will default to denying access (since 4.0). -# @max-connections: The maximum number of connections to allow at the same -# time, 0 for unlimited. Setting this to 1 also stops -# the server from advertising multiple client support -# (since 5.2; default: 0) +# the client's x509 distinguished name. This object is is only +# resolved at time of use, so can be deleted and recreated on the +# fly while the NBD server is active. If missing, it will default +# to denying access (since 4.0). +# +# @max-connections: The maximum number of connections to allow at the +# same time, 0 for unlimited. Setting this to 1 also stops the +# server from advertising multiple client support (since 5.2; +# default: 0) # # Since: 4.2 ## @@ -38,24 +42,28 @@ # @nbd-server-start: # # Start an NBD server listening on the given host and port. Block -# devices can then be exported using @nbd-server-add. The NBD -# server will present them as named exports; for example, another -# QEMU instance could refer to them as "nbd:HOST:PORT:exportname=NAME". +# devices can then be exported using @nbd-server-add. The NBD server +# will present them as named exports; for example, another QEMU +# instance could refer to them as "nbd:HOST:PORT:exportname=NAME". # -# Keep this type consistent with the NbdServerOptions type. The only intended -# difference is using SocketAddressLegacy instead of SocketAddress. +# Keep this type consistent with the NbdServerOptions type. The only +# intended difference is using SocketAddressLegacy instead of +# SocketAddress. # # @addr: Address on which to listen. +# # @tls-creds: ID of the TLS credentials object (since 2.6). +# # @tls-authz: ID of the QAuthZ authorization object used to validate -# the client's x509 distinguished name. This object is -# is only resolved at time of use, so can be deleted and -# recreated on the fly while the NBD server is active. -# If missing, it will default to denying access (since 4.0). -# @max-connections: The maximum number of connections to allow at the same -# time, 0 for unlimited. Setting this to 1 also stops -# the server from advertising multiple client support -# (since 5.2; default: 0). +# the client's x509 distinguished name. This object is is only +# resolved at time of use, so can be deleted and recreated on the +# fly while the NBD server is active. If missing, it will default +# to denying access (since 4.0). +# +# @max-connections: The maximum number of connections to allow at the +# same time, 0 for unlimited. Setting this to 1 also stops the +# server from advertising multiple client support (since 5.2; +# default: 0). # # Returns: error if the server is already running. # @@ -71,14 +79,14 @@ ## # @BlockExportOptionsNbdBase: # -# An NBD block export (common options shared between nbd-server-add and -# the NBD branch of block-export-add). +# An NBD block export (common options shared between nbd-server-add +# and the NBD branch of block-export-add). # -# @name: Export name. If unspecified, the @device parameter is used as the -# export name. (Since 2.12) +# @name: Export name. If unspecified, the @device parameter is used +# as the export name. (Since 2.12) # # @description: Free-form description of the export, up to 4096 bytes. -# (Since 5.0) +# (Since 5.0) # # Since: 5.0 ## @@ -92,15 +100,15 @@ # block-export-add). # # @bitmaps: Also export each of the named dirty bitmaps reachable from -# @device, so the NBD client can use NBD_OPT_SET_META_CONTEXT with -# the metadata context name "qemu:dirty-bitmap:BITMAP" to inspect -# each bitmap. -# Since 7.1 bitmap may be specified by node/name pair. +# @device, so the NBD client can use NBD_OPT_SET_META_CONTEXT with +# the metadata context name "qemu:dirty-bitmap:BITMAP" to inspect +# each bitmap. Since 7.1 bitmap may be specified by node/name +# pair. # -# @allocation-depth: Also export the allocation depth map for @device, so -# the NBD client can use NBD_OPT_SET_META_CONTEXT with -# the metadata context name "qemu:allocation-depth" to -# inspect allocation details. (since 5.2) +# @allocation-depth: Also export the allocation depth map for @device, +# so the NBD client can use NBD_OPT_SET_META_CONTEXT with the +# metadata context name "qemu:allocation-depth" to inspect +# allocation details. (since 5.2) # # Since: 5.2 ## @@ -114,12 +122,15 @@ # # A vhost-user-blk block export. # -# @addr: The vhost-user socket on which to listen. Both 'unix' and 'fd' -# SocketAddress types are supported. Passed fds must be UNIX domain -# sockets. -# @logical-block-size: Logical block size in bytes. Defaults to 512 bytes. -# @num-queues: Number of request virtqueues. Must be greater than 0. Defaults -# to 1. +# @addr: The vhost-user socket on which to listen. Both 'unix' and +# 'fd' SocketAddress types are supported. Passed fds must be UNIX +# domain sockets. +# +# @logical-block-size: Logical block size in bytes. Defaults to 512 +# bytes. +# +# @num-queues: Number of request virtqueues. Must be greater than 0. +# Defaults to 1. # # Since: 5.2 ## @@ -138,7 +149,7 @@ # @on: Pass allow_other as a mount option. # # @auto: Try mounting with allow_other first, and if that fails, retry -# without allow_other. +# without allow_other. # # Since: 6.1 ## @@ -151,24 +162,21 @@ # Options for exporting a block graph node on some (file) mountpoint # as a raw image. # -# @mountpoint: Path on which to export the block device via FUSE. -# This must point to an existing regular file. +# @mountpoint: Path on which to export the block device via FUSE. This +# must point to an existing regular file. # # @growable: Whether writes beyond the EOF should grow the block node -# accordingly. (default: false) +# accordingly. (default: false) # # @allow-other: If this is off, only qemu's user is allowed access to -# this export. That cannot be changed even with chmod or -# chown. -# Enabling this option will allow other users access to -# the export with the FUSE mount option "allow_other". -# Note that using allow_other as a non-root user requires -# user_allow_other to be enabled in the global fuse.conf -# configuration file. -# In auto mode (the default), the FUSE export driver will -# first attempt to mount the export with allow_other, and -# if that fails, try again without. -# (since 6.1; default: auto) +# this export. That cannot be changed even with chmod or chown. +# Enabling this option will allow other users access to the export +# with the FUSE mount option "allow_other". Note that using +# allow_other as a non-root user requires user_allow_other to be +# enabled in the global fuse.conf configuration file. In auto +# mode (the default), the FUSE export driver will first attempt to +# mount the export with allow_other, and if that fails, try again +# without. (since 6.1; default: auto) # # Since: 6.0 ## @@ -184,11 +192,16 @@ # A vduse-blk block export. # # @name: the name of VDUSE device (must be unique across the host). -# @num-queues: the number of virtqueues. Defaults to 1. -# @queue-size: the size of virtqueue. Defaults to 256. -# @logical-block-size: Logical block size in bytes. Range [512, PAGE_SIZE] -# and must be power of 2. Defaults to 512 bytes. -# @serial: the serial number of virtio block device. Defaults to empty string. +# +# @num-queues: the number of virtqueues. Defaults to 1. +# +# @queue-size: the size of virtqueue. Defaults to 256. +# +# @logical-block-size: Logical block size in bytes. Range [512, +# PAGE_SIZE] and must be power of 2. Defaults to 512 bytes. +# +# @serial: the serial number of virtio block device. Defaults to +# empty string. # # Since: 7.1 ## @@ -206,13 +219,13 @@ # # @device: The device name or node name of the node to be exported # -# @writable: Whether clients should be able to write to the device via the -# NBD connection (default false). +# @writable: Whether clients should be able to write to the device via +# the NBD connection (default false). # -# @bitmap: Also export a single dirty bitmap reachable from @device, so the -# NBD client can use NBD_OPT_SET_META_CONTEXT with the metadata -# context name "qemu:dirty-bitmap:BITMAP" to inspect the bitmap -# (since 4.0). +# @bitmap: Also export a single dirty bitmap reachable from @device, +# so the NBD client can use NBD_OPT_SET_META_CONTEXT with the +# metadata context name "qemu:dirty-bitmap:BITMAP" to inspect the +# bitmap (since 4.0). # # Since: 5.0 ## @@ -226,13 +239,16 @@ # # Export a block node to QEMU's embedded NBD server. # -# The export name will be used as the id for the resulting block export. +# The export name will be used as the id for the resulting block +# export. # # Features: -# @deprecated: This command is deprecated. Use @block-export-add instead. # -# Returns: error if the server is not running, or export with the same name -# already exists. +# @deprecated: This command is deprecated. Use @block-export-add +# instead. +# +# Returns: error if the server is not running, or export with the same +# name already exists. # # Since: 1.3 ## @@ -245,17 +261,18 @@ # # Mode for removing a block export. # -# @safe: Remove export if there are no existing connections, fail otherwise. +# @safe: Remove export if there are no existing connections, fail +# otherwise. # # @hard: Drop all connections immediately and remove export. # -# TODO: Potential additional modes to be added in the future: +# Potential additional modes to be added in the future: # -# hide: Just hide export from new clients, leave existing connections as is. -# Remove export after all clients are disconnected. +# hide: Just hide export from new clients, leave existing connections +# as is. Remove export after all clients are disconnected. # -# soft: Hide export from new clients, answer with ESHUTDOWN for all further -# requests from existing clients. +# soft: Hide export from new clients, answer with ESHUTDOWN for all +# further requests from existing clients. # # Since: 2.12 ## @@ -268,16 +285,19 @@ # # @name: Block export id. # -# @mode: Mode of command operation. See @BlockExportRemoveMode description. -# Default is 'safe'. +# @mode: Mode of command operation. See @BlockExportRemoveMode +# description. Default is 'safe'. # # Features: -# @deprecated: This command is deprecated. Use @block-export-del instead. +# +# @deprecated: This command is deprecated. Use @block-export-del +# instead. # # Returns: error if -# - the server is not running -# - export is not found -# - mode is 'safe' and there are existing connections +# +# - the server is not running +# - export is not found +# - mode is 'safe' and there are existing connections # # Since: 2.12 ## @@ -289,8 +309,8 @@ ## # @nbd-server-stop: # -# Stop QEMU's embedded NBD server, and unregister all devices previously -# added via @nbd-server-add. +# Stop QEMU's embedded NBD server, and unregister all devices +# previously added via @nbd-server-add. # # Since: 1.3 ## @@ -303,8 +323,11 @@ # An enumeration of block export types # # @nbd: NBD export +# # @vhost-user-blk: vhost-user-blk export (since 5.2) +# # @fuse: FUSE export (since: 6.0) +# # @vduse-blk: vduse-blk export (since 7.1) # # Since: 4.2 @@ -319,28 +342,31 @@ ## # @BlockExportOptions: # -# Describes a block export, i.e. how single node should be exported on an -# external interface. +# Describes a block export, i.e. how single node should be exported on +# an external interface. # -# @id: A unique identifier for the block export (across all export types) +# @id: A unique identifier for the block export (across all export +# types) # -# @node-name: The node name of the block node to be exported (since: 5.2) +# @node-name: The node name of the block node to be exported +# (since: 5.2) # # @writable: True if clients should be able to write to the export -# (default false) +# (default false) # -# @writethrough: If true, caches are flushed after every write request to the -# export before completion is signalled. (since: 5.2; -# default: false) +# @writethrough: If true, caches are flushed after every write request +# to the export before completion is signalled. (since: 5.2; +# default: false) # -# @iothread: The name of the iothread object where the export will run. The -# default is to use the thread currently associated with the -# block node. (since: 5.2) +# @iothread: The name of the iothread object where the export will +# run. The default is to use the thread currently associated with +# the block node. (since: 5.2) # -# @fixed-iothread: True prevents the block node from being moved to another -# thread while the export is active. If true and @iothread is -# given, export creation fails if the block node cannot be -# moved to the iothread. The default is false. (since: 5.2) +# @fixed-iothread: True prevents the block node from being moved to +# another thread while the export is active. If true and +# @iothread is given, export creation fails if the block node +# cannot be moved to the iothread. The default is false. +# (since: 5.2) # # Since: 4.2 ## @@ -377,17 +403,17 @@ ## # @block-export-del: # -# Request to remove a block export. This drops the user's reference to the -# export, but the export may still stay around after this command returns until -# the shutdown of the export has completed. +# Request to remove a block export. This drops the user's reference +# to the export, but the export may still stay around after this +# command returns until the shutdown of the export has completed. # # @id: Block export id. # -# @mode: Mode of command operation. See @BlockExportRemoveMode description. -# Default is 'safe'. +# @mode: Mode of command operation. See @BlockExportRemoveMode +# description. Default is 'safe'. # -# Returns: Error if the export is not found or @mode is 'safe' and the export -# is still in use (e.g. by existing client connections) +# Returns: Error if the export is not found or @mode is 'safe' and the +# export is still in use (e.g. by existing client connections) # # Since: 5.2 ## @@ -419,8 +445,7 @@ # @node-name: The node name of the block node that is exported # # @shutting-down: True if the export is shutting down (e.g. after a -# block-export-del command, but before the shutdown has -# completed) +# block-export-del command, but before the shutdown has completed) # # Since: 5.2 ## diff --git a/qapi/block.json b/qapi/block.json index 5fe068f903..0f25ce3961 100644 --- a/qapi/block.json +++ b/qapi/block.json @@ -19,26 +19,26 @@ # translate logical CHS to physical; instead, they will use logical # block addressing. # -# @auto: If cylinder/heads/sizes are passed, choose between none and LBA -# depending on the size of the disk. If they are not passed, -# choose none if QEMU can guess that the disk had 16 or fewer -# heads, large if QEMU can guess that the disk had 131072 or -# fewer tracks across all heads (i.e. cylinders*heads<131072), -# otherwise LBA. +# @auto: If cylinder/heads/sizes are passed, choose between none and +# LBA depending on the size of the disk. If they are not passed, +# choose none if QEMU can guess that the disk had 16 or fewer +# heads, large if QEMU can guess that the disk had 131072 or fewer +# tracks across all heads (i.e. cylinders*heads<131072), otherwise +# LBA. # # @none: The physical disk geometry is equal to the logical geometry. # # @lba: Assume 63 sectors per track and one of 16, 32, 64, 128 or 255 -# heads (if fewer than 255 are enough to cover the whole disk -# with 1024 cylinders/head). The number of cylinders/head is -# then computed based on the number of sectors and heads. +# heads (if fewer than 255 are enough to cover the whole disk with +# 1024 cylinders/head). The number of cylinders/head is then +# computed based on the number of sectors and heads. # -# @large: The number of cylinders per head is scaled down to 1024 -# by correspondingly scaling up the number of heads. +# @large: The number of cylinders per head is scaled down to 1024 by +# correspondingly scaling up the number of heads. # # @rechs: Same as @large, but first convert a 16-head geometry to -# 15-head, by proportionally scaling up the number of -# cylinders/head. +# 15-head, by proportionally scaling up the number of +# cylinders/head. # # Since: 2.0 ## @@ -51,9 +51,13 @@ # Type of Floppy drive to be emulated by the Floppy Disk Controller. # # @144: 1.44MB 3.5" drive +# # @288: 2.88MB 3.5" drive +# # @120: 1.2MB 5.25" drive +# # @none: No drive connected +# # @auto: Automatically determined by inserted media at boot # # Since: 2.6 @@ -68,8 +72,8 @@ # # @id: the identifier of the persistent reservation manager # -# @connected: true if the persistent reservation manager is connected to -# the underlying storage or helper +# @connected: true if the persistent reservation manager is connected +# to the underlying storage or helper # # Since: 3.0 ## @@ -79,9 +83,11 @@ ## # @query-pr-managers: # -# Returns a list of information about each persistent reservation manager. +# Returns a list of information about each persistent reservation +# manager. # -# Returns: a list of @PRManagerInfo for each persistent reservation manager +# Returns: a list of @PRManagerInfo for each persistent reservation +# manager # # Since: 3.0 ## @@ -98,13 +104,15 @@ # @id: The name or QOM path of the guest device (since: 2.8) # # @force: If true, eject regardless of whether the drive is locked. -# If not specified, the default value is false. +# If not specified, the default value is false. # # Features: +# # @deprecated: Member @device is deprecated. Use @id instead. # -# Returns: - Nothing on success -# - If @device is not a valid block device, DeviceNotFound +# Returns: +# - Nothing on success +# - If @device is not a valid block device, DeviceNotFound # # Notes: Ejecting a device with no media results in success # @@ -123,32 +131,33 @@ ## # @blockdev-open-tray: # -# Opens a block device's tray. If there is a block driver state tree inserted as -# a medium, it will become inaccessible to the guest (but it will remain -# associated to the block device, so closing the tray will make it accessible -# again). +# Opens a block device's tray. If there is a block driver state tree +# inserted as a medium, it will become inaccessible to the guest (but +# it will remain associated to the block device, so closing the tray +# will make it accessible again). # # If the tray was already open before, this will be a no-op. # -# Once the tray opens, a DEVICE_TRAY_MOVED event is emitted. There are cases in -# which no such event will be generated, these include: +# Once the tray opens, a DEVICE_TRAY_MOVED event is emitted. There +# are cases in which no such event will be generated, these include: # -# - if the guest has locked the tray, @force is false and the guest does not -# respond to the eject request -# - if the BlockBackend denoted by @device does not have a guest device attached -# to it +# - if the guest has locked the tray, @force is false and the guest +# does not respond to the eject request +# - if the BlockBackend denoted by @device does not have a guest +# device attached to it # - if the guest device does not have an actual tray # # @device: Block device name # # @id: The name or QOM path of the guest device (since: 2.8) # -# @force: if false (the default), an eject request will be sent to -# the guest if it has locked the tray (and the tray will not be opened -# immediately); if true, the tray will be opened regardless of whether -# it is locked +# @force: if false (the default), an eject request will be sent to the +# guest if it has locked the tray (and the tray will not be opened +# immediately); if true, the tray will be opened regardless of +# whether it is locked # # Features: +# # @deprecated: Member @device is deprecated. Use @id instead. # # Since: 2.5 @@ -166,7 +175,6 @@ # "tray-open": true } } # # <- { "return": {} } -# ## { 'command': 'blockdev-open-tray', 'data': { '*device': { 'type': 'str', 'features': [ 'deprecated' ] }, @@ -176,9 +184,9 @@ ## # @blockdev-close-tray: # -# Closes a block device's tray. If there is a block driver state tree associated -# with the block device (which is currently ejected), that tree will be loaded -# as the medium. +# Closes a block device's tray. If there is a block driver state tree +# associated with the block device (which is currently ejected), that +# tree will be loaded as the medium. # # If the tray was already closed before, this will be a no-op. # @@ -187,6 +195,7 @@ # @id: The name or QOM path of the guest device (since: 2.8) # # Features: +# # @deprecated: Member @device is deprecated. Use @id instead. # # Since: 2.5 @@ -204,7 +213,6 @@ # "tray-open": false } } # # <- { "return": {} } -# ## { 'command': 'blockdev-close-tray', 'data': { '*device': { 'type': 'str', 'features': [ 'deprecated' ] }, @@ -213,11 +221,12 @@ ## # @blockdev-remove-medium: # -# Removes a medium (a block driver state tree) from a block device. That block -# device's tray must currently be open (unless there is no attached guest -# device). +# Removes a medium (a block driver state tree) from a block device. +# That block device's tray must currently be open (unless there is no +# attached guest device). # -# If the tray is open and there is no medium inserted, this will be a no-op. +# If the tray is open and there is no medium inserted, this will be a +# no-op. # # @id: The name or QOM path of the guest device # @@ -247,7 +256,6 @@ # "arguments": { "id": "ide0-1-0" } } # # <- { "return": {} } -# ## { 'command': 'blockdev-remove-medium', 'data': { 'id': 'str' } } @@ -255,9 +263,9 @@ ## # @blockdev-insert-medium: # -# Inserts a medium (a block driver state tree) into a block device. That block -# device's tray must currently be open (unless there is no attached guest -# device) and there must be no medium inserted already. +# Inserts a medium (a block driver state tree) into a block device. +# That block device's tray must currently be open (unless there is no +# attached guest device) and there must be no medium inserted already. # # @id: The name or QOM path of the guest device # @@ -280,7 +288,6 @@ # "node-name": "node0" } } # # <- { "return": {} } -# ## { 'command': 'blockdev-insert-medium', 'data': { 'id': 'str', @@ -306,30 +313,32 @@ ## # @blockdev-change-medium: # -# Changes the medium inserted into a block device by ejecting the current medium -# and loading a new image file which is inserted as the new medium (this command -# combines blockdev-open-tray, blockdev-remove-medium, blockdev-insert-medium -# and blockdev-close-tray). +# Changes the medium inserted into a block device by ejecting the +# current medium and loading a new image file which is inserted as the +# new medium (this command combines blockdev-open-tray, +# blockdev-remove-medium, blockdev-insert-medium and +# blockdev-close-tray). # # @device: Block device name # -# @id: The name or QOM path of the guest device -# (since: 2.8) +# @id: The name or QOM path of the guest device (since: 2.8) # # @filename: filename of the new image to be loaded # -# @format: format to open the new image with (defaults to -# the probed format) +# @format: format to open the new image with (defaults to the probed +# format) # # @read-only-mode: change the read-only mode of the device; defaults -# to 'retain' +# to 'retain' # -# @force: if false (the default), an eject request through blockdev-open-tray -# will be sent to the guest if it has locked the tray (and the tray -# will not be opened immediately); if true, the tray will be opened -# regardless of whether it is locked. (since 7.1) +# @force: if false (the default), an eject request through +# blockdev-open-tray will be sent to the guest if it has locked +# the tray (and the tray will not be opened immediately); if true, +# the tray will be opened regardless of whether it is locked. +# (since 7.1) # # Features: +# # @deprecated: Member @device is deprecated. Use @id instead. # # Since: 2.5 @@ -363,7 +372,6 @@ # "read-only-mode": "read-only" } } # # <- { "return": {} } -# ## { 'command': 'blockdev-change-medium', 'data': { '*device': { 'type': 'str', 'features': [ 'deprecated' ] }, @@ -376,16 +384,17 @@ ## # @DEVICE_TRAY_MOVED: # -# Emitted whenever the tray of a removable device is moved by the guest or by -# HMP/QMP commands +# Emitted whenever the tray of a removable device is moved by the +# guest or by HMP/QMP commands # -# @device: Block device name. This is always present for compatibility -# reasons, but it can be empty ("") if the image does not -# have a device name associated. +# @device: Block device name. This is always present for +# compatibility reasons, but it can be empty ("") if the image +# does not have a device name associated. # # @id: The name or QOM path of the guest device (since 2.8) # -# @tray-open: true if the tray has been opened or false if it has been closed +# @tray-open: true if the tray has been opened or false if it has been +# closed # # Since: 1.1 # @@ -397,7 +406,6 @@ # "tray-open": true # }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } -# ## { 'event': 'DEVICE_TRAY_MOVED', 'data': { 'device': 'str', 'id': 'str', 'tray-open': 'bool' } } @@ -421,7 +429,6 @@ # "connected": true # }, # "timestamp": { "seconds": 1519840375, "microseconds": 450486 } } -# ## { 'event': 'PR_MANAGER_STATUS_CHANGED', 'data': { 'id': 'str', 'connected': 'bool' } } @@ -436,28 +443,29 @@ # # If two or more devices are members of the same group, the limits # will apply to the combined I/O of the whole group in a round-robin -# fashion. Therefore, setting new I/O limits to a device will affect +# fashion. Therefore, setting new I/O limits to a device will affect # the whole group. # # The name of the group can be specified using the 'group' parameter. # If the parameter is unset, it is assumed to be the current group of -# that device. If it's not in any group yet, the name of the device +# that device. If it's not in any group yet, the name of the device # will be used as the name for its group. # # The 'group' parameter can also be used to move a device to a -# different group. In this case the limits specified in the parameters -# will be applied to the new group only. +# different group. In this case the limits specified in the +# parameters will be applied to the new group only. # # I/O limits can be disabled by setting all of them to 0. In this case # the device will be removed from its group and the rest of its -# members will not be affected. The 'group' parameter is ignored. +# members will not be affected. The 'group' parameter is ignored. # -# Returns: - Nothing on success -# - If @device is not a valid block device, DeviceNotFound +# Returns: +# - Nothing on success +# - If @device is not a valid block device, DeviceNotFound # # Since: 1.1 # -# Example: +# Examples: # # -> { "execute": "block_set_io_throttle", # "arguments": { "id": "virtio-blk-pci0/virtio-backend", @@ -504,37 +512,43 @@ # # Manage read, write and flush latency histograms for the device. # -# If only @id parameter is specified, remove all present latency histograms -# for the device. Otherwise, add/reset some of (or all) latency histograms. +# If only @id parameter is specified, remove all present latency +# histograms for the device. Otherwise, add/reset some of (or all) +# latency histograms. # # @id: The name or QOM path of the guest device. # # @boundaries: list of interval boundary values (see description in -# BlockLatencyHistogramInfo definition). If specified, all -# latency histograms are removed, and empty ones created for all -# io types with intervals corresponding to @boundaries (except for -# io types, for which specific boundaries are set through the -# following parameters). +# BlockLatencyHistogramInfo definition). If specified, all latency +# histograms are removed, and empty ones created for all io types +# with intervals corresponding to @boundaries (except for io +# types, for which specific boundaries are set through the +# following parameters). # # @boundaries-read: list of interval boundary values for read latency -# histogram. If specified, old read latency histogram is -# removed, and empty one created with intervals -# corresponding to @boundaries-read. The parameter has higher -# priority then @boundaries. +# histogram. If specified, old read latency histogram is removed, +# and empty one created with intervals corresponding to +# @boundaries-read. The parameter has higher priority then +# @boundaries. # -# @boundaries-write: list of interval boundary values for write latency -# histogram. +# @boundaries-write: list of interval boundary values for write +# latency histogram. # -# @boundaries-flush: list of interval boundary values for flush latency -# histogram. +# @boundaries-zap: list of interval boundary values for zone append write +# latency histogram. # -# Returns: error if device is not found or any boundary arrays are invalid. +# @boundaries-flush: list of interval boundary values for flush +# latency histogram. +# +# Returns: error if device is not found or any boundary arrays are +# invalid. # # Since: 4.0 # # Example: -# set new histograms for all io types with intervals -# [0, 10), [10, 50), [50, 100), [100, +inf): +# +# set new histograms for all io types with intervals [0, 10), [10, +# 50), [50, 100), [100, +inf): # # -> { "execute": "block-latency-histogram-set", # "arguments": { "id": "drive0", @@ -542,8 +556,9 @@ # <- { "return": {} } # # Example: -# set new histogram only for write, other histograms will remain -# not changed (or not created): +# +# set new histogram only for write, other histograms will remain not +# changed (or not created): # # -> { "execute": "block-latency-histogram-set", # "arguments": { "id": "drive0", @@ -551,9 +566,10 @@ # <- { "return": {} } # # Example: -# set new histograms with the following intervals: -# read, flush: [0, 10), [10, 50), [50, 100), [100, +inf) -# write: [0, 1000), [1000, 5000), [5000, +inf) +# +# set new histograms with the following intervals: read, flush: [0, +# 10), [10, 50), [50, 100), [100, +inf) write: [0, 1000), [1000, +# 5000), [5000, +inf) # # -> { "execute": "block-latency-histogram-set", # "arguments": { "id": "drive0", @@ -562,6 +578,7 @@ # <- { "return": {} } # # Example: +# # remove all latency histograms: # # -> { "execute": "block-latency-histogram-set", @@ -573,5 +590,6 @@ '*boundaries': ['uint64'], '*boundaries-read': ['uint64'], '*boundaries-write': ['uint64'], + '*boundaries-zap': ['uint64'], '*boundaries-flush': ['uint64'] }, 'allow-preconfig': true } diff --git a/qapi/char.json b/qapi/char.json index 923dc5056d..e413ac2b70 100644 --- a/qapi/char.json +++ b/qapi/char.json @@ -17,12 +17,12 @@ # # @filename: the filename of the character device # -# @frontend-open: shows whether the frontend device attached to this backend -# (eg. with the chardev=... option) is in open or closed state -# (since 2.1) +# @frontend-open: shows whether the frontend device attached to this +# backend (eg. with the chardev=... option) is in open or closed +# state (since 2.1) # -# Notes: @filename is encoded using the QEMU command line character device -# encoding. See the QEMU man page for details. +# Notes: @filename is encoded using the QEMU command line character +# device encoding. See the QEMU man page for details. # # Since: 0.14 ## @@ -62,7 +62,6 @@ # } # ] # } -# ## { 'command': 'query-chardev', 'returns': ['ChardevInfo'], 'allow-preconfig': true } @@ -106,7 +105,6 @@ # } # ] # } -# ## { 'command': 'query-chardev-backends', 'returns': ['ChardevBackendInfo'] } @@ -135,11 +133,11 @@ # # @format: data encoding (default 'utf8'). # -# - base64: data must be base64 encoded text. Its binary -# decoding gets written. -# - utf8: data's UTF-8 encoding is written -# - data itself is always Unicode regardless of format, like -# any other string. +# - base64: data must be base64 encoded text. Its binary decoding +# gets written. +# - utf8: data's UTF-8 encoding is written +# - data itself is always Unicode regardless of format, like any +# other string. # # Returns: Nothing on success # @@ -152,7 +150,6 @@ # "data": "abcdefgh", # "format": "utf8" } } # <- { "return": {} } -# ## { 'command': 'ringbuf-write', 'data': { 'device': 'str', @@ -170,14 +167,13 @@ # # @format: data encoding (default 'utf8'). # -# - base64: the data read is returned in base64 encoding. -# - utf8: the data read is interpreted as UTF-8. -# Bug: can screw up when the buffer contains invalid UTF-8 -# sequences, NUL characters, after the ring buffer lost -# data, and when reading stops because the size limit is -# reached. -# - The return value is always Unicode regardless of format, -# like any other string. +# - base64: the data read is returned in base64 encoding. +# - utf8: the data read is interpreted as UTF-8. +# Bug: can screw up when the buffer contains invalid UTF-8 +# sequences, NUL characters, after the ring buffer lost data, +# and when reading stops because the size limit is reached. +# - The return value is always Unicode regardless of format, like +# any other string. # # Returns: data read from the device # @@ -190,7 +186,6 @@ # "size": 1000, # "format": "utf8" } } # <- { "return": "abcdefgh" } -# ## { 'command': 'ringbuf-read', 'data': {'device': 'str', 'size': 'int', '*format': 'DataFormat'}, @@ -202,8 +197,9 @@ # Configuration shared across all chardev backends # # @logfile: The name of a logfile to save output -# @logappend: true to append instead of truncate -# (default to false to truncate) +# +# @logappend: true to append instead of truncate (default to false to +# truncate) # # Since: 2.6 ## @@ -217,9 +213,11 @@ # Configuration info for file chardevs. # # @in: The name of the input file +# # @out: The name of the output file -# @append: Open the file in append mode (default false to -# truncate) (Since 2.6) +# +# @append: Open the file in append mode (default false to truncate) +# (Since 2.6) # # Since: 1.4 ## @@ -234,8 +232,8 @@ # # Configuration info for device and pipe chardevs. # -# @device: The name of the special file for the device, -# i.e. /dev/ttyS0 on Unix or COM1: on Windows +# @device: The name of the special file for the device, i.e. +# /dev/ttyS0 on Unix or COM1: on Windows # # Since: 1.4 ## @@ -248,29 +246,36 @@ # # Configuration info for (stream) socket chardevs. # -# @addr: socket address to listen on (server=true) -# or connect to (server=false) +# @addr: socket address to listen on (server=true) or connect to +# (server=false) +# # @tls-creds: the ID of the TLS credentials object (since 2.6) +# # @tls-authz: the ID of the QAuthZ authorization object against which -# the client's x509 distinguished name will be validated. This -# object is only resolved at time of use, so can be deleted -# and recreated on the fly while the chardev server is active. -# If missing, it will default to denying access (since 4.0) +# the client's x509 distinguished name will be validated. This +# object is only resolved at time of use, so can be deleted and +# recreated on the fly while the chardev server is active. If +# missing, it will default to denying access (since 4.0) +# # @server: create server socket (default: true) -# @wait: wait for incoming connection on server -# sockets (default: false). -# Silently ignored with server: false. This use is deprecated. +# +# @wait: wait for incoming connection on server sockets (default: +# false). Silently ignored with server: false. This use is +# deprecated. +# # @nodelay: set TCP_NODELAY socket option (default: false) -# @telnet: enable telnet protocol on server -# sockets (default: false) -# @tn3270: enable tn3270 protocol on server -# sockets (default: false) (Since: 2.10) -# @websocket: enable websocket protocol on server -# sockets (default: false) (Since: 3.1) -# @reconnect: For a client socket, if a socket is disconnected, -# then attempt a reconnect after the given number of seconds. -# Setting this to zero disables this function. (default: 0) -# (Since: 2.2) +# +# @telnet: enable telnet protocol on server sockets (default: false) +# +# @tn3270: enable tn3270 protocol on server sockets (default: false) +# (Since: 2.10) +# +# @websocket: enable websocket protocol on server sockets +# (default: false) (Since: 3.1) +# +# @reconnect: For a client socket, if a socket is disconnected, then +# attempt a reconnect after the given number of seconds. Setting +# this to zero disables this function. (default: 0) (Since: 2.2) # # Since: 1.4 ## @@ -293,6 +298,7 @@ # Configuration info for datagram socket chardevs. # # @remote: remote address +# # @local: local address # # Since: 1.5 @@ -320,8 +326,8 @@ # # Configuration info for stdio chardevs. # -# @signal: Allow signals (such as SIGINT triggered by ^C) -# be delivered to qemu. Default: true. +# @signal: Allow signals (such as SIGINT triggered by ^C) be delivered +# to qemu. Default: true. # # Since: 1.5 ## @@ -377,8 +383,11 @@ # Configuration info for virtual console chardevs. # # @width: console width, in pixels +# # @height: console height, in pixels +# # @cols: console width, in chars +# # @rows: console height, in chars # # Since: 1.5 @@ -409,6 +418,7 @@ # Configuration info for qemu vdagent implementation. # # @mouse: enable/disable mouse, default is enabled. +# # @clipboard: enable/disable clipboard, default is disabled. # # Since: 6.1 @@ -423,20 +433,35 @@ # @ChardevBackendKind: # # @pipe: Since 1.5 +# # @udp: Since 1.5 +# # @mux: Since 1.5 +# # @msmouse: Since 1.5 +# # @wctablet: Since 2.9 +# # @braille: Since 1.5 +# # @testdev: Since 2.2 +# # @stdio: Since 1.5 +# # @console: Since 1.5 +# # @spicevmc: Since 1.5 +# # @spiceport: Since 1.5 +# # @qemu-vdagent: Since 6.1 +# # @dbus: Since 7.0 +# # @vc: v1.5 +# # @ringbuf: Since 1.6 +# # @memory: Since 1.5 # # Since: 1.4 @@ -617,8 +642,8 @@ # # Return info about the chardev backend just created. # -# @pty: name of the slave pseudoterminal device, present if -# and only if a chardev of type 'pty' was created +# @pty: name of the slave pseudoterminal device, present if and only +# if a chardev of type 'pty' was created # # Since: 1.4 ## @@ -631,13 +656,14 @@ # Add a character device backend # # @id: the chardev's ID, must be unique +# # @backend: backend type and parameters # # Returns: ChardevReturn. # # Since: 1.4 # -# Example: +# Examples: # # -> { "execute" : "chardev-add", # "arguments" : { "id" : "foo", @@ -654,7 +680,6 @@ # "arguments" : { "id" : "baz", # "backend" : { "type" : "pty", "data" : {} } } } # <- { "return": { "pty" : "/dev/pty/42" } } -# ## { 'command': 'chardev-add', 'data': { 'id': 'str', @@ -667,13 +692,14 @@ # Change a character device backend # # @id: the chardev's ID, must exist +# # @backend: new backend type and parameters # # Returns: ChardevReturn. # # Since: 2.10 # -# Example: +# Examples: # # -> { "execute" : "chardev-change", # "arguments" : { "id" : "baz", @@ -695,7 +721,6 @@ # "server" : true, # "wait" : false }}}} # <- {"return": {}} -# ## { 'command': 'chardev-change', 'data': { 'id': 'str', @@ -717,7 +742,6 @@ # # -> { "execute": "chardev-remove", "arguments": { "id" : "foo" } } # <- { "return": {} } -# ## { 'command': 'chardev-remove', 'data': { 'id': 'str' } } @@ -737,7 +761,6 @@ # # -> { "execute": "chardev-send-break", "arguments": { "id" : "foo" } } # <- { "return": {} } -# ## { 'command': 'chardev-send-break', 'data': { 'id': 'str' } } @@ -760,7 +783,6 @@ # <- { "event": "VSERPORT_CHANGE", # "data": { "id": "channel0", "open": true }, # "timestamp": { "seconds": 1401385907, "microseconds": 422329 } } -# ## { 'event': 'VSERPORT_CHANGE', 'data': { 'id': 'str', diff --git a/qapi/common.json b/qapi/common.json index 356db3f670..6fed9cde1a 100644 --- a/qapi/common.json +++ b/qapi/common.json @@ -70,6 +70,7 @@ # has a different meaning. # # @s: the string value +# # @n: no string value # # Since: 2.10 @@ -155,11 +156,11 @@ # # @preferred: set the preferred host nodes for allocation # -# @bind: a strict policy that restricts memory allocation to the -# host nodes specified +# @bind: a strict policy that restricts memory allocation to the host +# nodes specified # -# @interleave: memory allocations are interleaved across the set -# of host nodes specified +# @interleave: memory allocations are interleaved across the set of +# host nodes specified # # Since: 2.1 ## @@ -169,17 +170,17 @@ ## # @NetFilterDirection: # -# Indicates whether a netfilter is attached to a netdev's transmit queue or -# receive queue or both. +# Indicates whether a netfilter is attached to a netdev's transmit +# queue or receive queue or both. # # @all: the filter is attached both to the receive and the transmit -# queue of the netdev (default). +# queue of the netdev (default). # # @rx: the filter is attached to the receive queue of the netdev, -# where it will receive packets sent to the netdev. +# where it will receive packets sent to the netdev. # # @tx: the filter is attached to the transmit queue of the netdev, -# where it will receive packets sent by the netdev. +# where it will receive packets sent by the netdev. # # Since: 2.5 ## diff --git a/qapi/compat.json b/qapi/compat.json index 39b52872d5..f4c19837eb 100644 --- a/qapi/compat.json +++ b/qapi/compat.json @@ -11,7 +11,9 @@ # Policy for handling "funny" input. # # @accept: Accept silently +# # @reject: Reject with an error +# # @crash: abort() the process # # Since: 6.0 @@ -25,6 +27,7 @@ # Policy for handling "funny" output. # # @accept: Pass on unchanged +# # @hide: Filter out # # Since: 6.0 @@ -47,11 +50,15 @@ # enumeration values. They behave the same as with policy @accept. # # @deprecated-input: how to handle deprecated input (default 'accept') -# @deprecated-output: how to handle deprecated output (default 'accept') +# +# @deprecated-output: how to handle deprecated output (default +# 'accept') +# # @unstable-input: how to handle unstable input (default 'accept') -# (since 6.2) +# (since 6.2) +# # @unstable-output: how to handle unstable output (default 'accept') -# (since 6.2) +# (since 6.2) # # Since: 6.0 ## diff --git a/qapi/control.json b/qapi/control.json index afca2043af..a91fa33407 100644 --- a/qapi/control.json +++ b/qapi/control.json @@ -14,10 +14,9 @@ # Arguments: # # @enable: An optional list of QMPCapability values to enable. The -# client must not enable any capability that is not -# mentioned in the QMP greeting message. If the field is not -# provided, it means no QMP capabilities will be enabled. -# (since 2.12) +# client must not enable any capability that is not mentioned in +# the QMP greeting message. If the field is not provided, it +# means no QMP capabilities will be enabled. (since 2.12) # # Example: # @@ -25,12 +24,14 @@ # "arguments": { "enable": [ "oob" ] } } # <- { "return": {} } # -# Notes: This command is valid exactly when first connecting: it must be -# issued before any other command will be accepted, and will fail once the -# monitor is accepting other commands. (see qemu docs/interop/qmp-spec.txt) +# Notes: This command is valid exactly when first connecting: it must +# be issued before any other command will be accepted, and will +# fail once the monitor is accepting other commands. (see qemu +# docs/interop/qmp-spec.rst) # -# The QMP client needs to explicitly enable QMP capabilities, otherwise -# all the QMP capabilities will be turned off by default. +# The QMP client needs to explicitly enable QMP capabilities, +# otherwise all the QMP capabilities will be turned off by +# default. # # Since: 0.13 ## @@ -44,8 +45,8 @@ # Enumeration of capabilities to be advertised during initial client # connection, used for agreeing on particular QMP extension behaviors. # -# @oob: QMP ability to support out-of-band requests. -# (Please refer to qmp-spec.txt for more information on OOB) +# @oob: QMP ability to support out-of-band requests. (Please refer to +# qmp-spec.rst for more information on OOB) # # Since: 2.12 ## @@ -73,16 +74,16 @@ # # A description of QEMU's version. # -# @qemu: The version of QEMU. By current convention, a micro -# version of 50 signifies a development branch. A micro version -# greater than or equal to 90 signifies a release candidate for -# the next minor version. A micro version of less than 50 -# signifies a stable release. +# @qemu: The version of QEMU. By current convention, a micro version +# of 50 signifies a development branch. A micro version greater +# than or equal to 90 signifies a release candidate for the next +# minor version. A micro version of less than 50 signifies a +# stable release. # -# @package: QEMU will always set this field to an empty string. Downstream -# versions of QEMU should set this to a non-empty string. The -# exact format depends on the downstream however it highly -# recommended that a unique name is used. +# @package: QEMU will always set this field to an empty string. +# Downstream versions of QEMU should set this to a non-empty +# string. The exact format depends on the downstream however it +# highly recommended that a unique name is used. # # Since: 0.14 ## @@ -94,7 +95,8 @@ # # Returns the current version of QEMU. # -# Returns: A @VersionInfo object describing the current version of QEMU. +# Returns: A @VersionInfo object describing the current version of +# QEMU. # # Since: 0.14 # @@ -111,7 +113,6 @@ # "package":"" # } # } -# ## { 'command': 'query-version', 'returns': 'VersionInfo', 'allow-preconfig': true } @@ -150,8 +151,8 @@ # ] # } # -# Note: This example has been shortened as the real response is too long. -# +# Note: This example has been shortened as the real response is too +# long. ## { 'command': 'query-commands', 'returns': ['CommandInfo'], 'allow-preconfig': true } @@ -159,10 +160,10 @@ ## # @quit: # -# This command will cause the QEMU process to exit gracefully. While every -# attempt is made to send the QMP response before terminating, this is not -# guaranteed. When using this interface, a premature EOF would not be -# unexpected. +# This command will cause the QEMU process to exit gracefully. While +# every attempt is made to send the QMP response before terminating, +# this is not guaranteed. When using this interface, a premature EOF +# would not be unexpected. # # Since: 0.14 # @@ -195,7 +196,7 @@ # @id: Name of the monitor # # @mode: Selects the monitor mode (default: readline in the system -# emulator, control in qemu-storage-daemon) +# emulator, control in qemu-storage-daemon) # # @pretty: Enables pretty printing (QMP only) # diff --git a/qapi/crypto.json b/qapi/crypto.json index 653e6e3f3d..fd3d46ebd1 100644 --- a/qapi/crypto.json +++ b/qapi/crypto.json @@ -11,8 +11,7 @@ # # The type of network endpoint that will be using the credentials. # Most types of credential require different setup / structures -# depending on whether they will be used in a server versus a -# client. +# depending on whether they will be used in a server versus a client. # # @client: the network endpoint is acting as the client # @@ -29,7 +28,9 @@ # # The data format that the secret is provided in # -# @raw: raw bytes. When encoded in JSON only valid UTF-8 sequences can be used +# @raw: raw bytes. When encoded in JSON only valid UTF-8 sequences +# can be used +# # @base64: arbitrary base64 encoded binary data # # Since: 2.6 @@ -44,11 +45,17 @@ # The supported algorithms for computing content digests # # @md5: MD5. Should not be used in any new code, legacy compat only +# # @sha1: SHA-1. Should not be used in any new code, legacy compat only +# # @sha224: SHA-224. (since 2.7) +# # @sha256: SHA-256. Current recommended strong hash. +# # @sha384: SHA-384. (since 2.7) +# # @sha512: SHA-512. (since 2.7) +# # @ripemd160: RIPEMD-160. (since 2.7) # # Since: 2.6 @@ -63,16 +70,28 @@ # The supported algorithms for content encryption ciphers # # @aes-128: AES with 128 bit / 16 byte keys +# # @aes-192: AES with 192 bit / 24 byte keys +# # @aes-256: AES with 256 bit / 32 byte keys -# @des: DES with 56 bit / 8 byte keys. Do not use except in VNC. (since 6.1) +# +# @des: DES with 56 bit / 8 byte keys. Do not use except in VNC. +# (since 6.1) +# # @3des: 3DES(EDE) with 192 bit / 24 byte keys (since 2.9) +# # @cast5-128: Cast5 with 128 bit / 16 byte keys +# # @serpent-128: Serpent with 128 bit / 16 byte keys +# # @serpent-192: Serpent with 192 bit / 24 byte keys +# # @serpent-256: Serpent with 256 bit / 32 byte keys +# # @twofish-128: Twofish with 128 bit / 16 byte keys +# # @twofish-192: Twofish with 192 bit / 24 byte keys +# # @twofish-256: Twofish with 256 bit / 32 byte keys # # Since: 2.6 @@ -91,8 +110,11 @@ # The supported modes for content encryption ciphers # # @ecb: Electronic Code Book +# # @cbc: Cipher Block Chaining +# # @xts: XEX with tweaked code book and ciphertext stealing +# # @ctr: Counter (Since 2.8) # # Since: 2.6 @@ -104,15 +126,17 @@ ## # @QCryptoIVGenAlgorithm: # -# The supported algorithms for generating initialization -# vectors for full disk encryption. The 'plain' generator -# should not be used for disks with sector numbers larger -# than 2^32, except where compatibility with pre-existing -# Linux dm-crypt volumes is required. +# The supported algorithms for generating initialization vectors for +# full disk encryption. The 'plain' generator should not be used for +# disks with sector numbers larger than 2^32, except where +# compatibility with pre-existing Linux dm-crypt volumes is required. # # @plain: 64-bit sector number truncated to 32-bits +# # @plain64: 64-bit sector number -# @essiv: 64-bit sector number encrypted with a hash of the encryption key +# +# @essiv: 64-bit sector number encrypted with a hash of the encryption +# key # # Since: 2.6 ## @@ -125,9 +149,10 @@ # # The supported full disk encryption formats # -# @qcow: QCow/QCow2 built-in AES-CBC encryption. Use only -# for liberating data from old images. -# @luks: LUKS encryption format. Recommended for new images +# @qcow: QCow/QCow2 built-in AES-CBC encryption. Use only for +# liberating data from old images. +# +# @luks: LUKS encryption format. Recommended for new images # # Since: 2.6 ## @@ -138,8 +163,7 @@ ## # @QCryptoBlockOptionsBase: # -# The common options that apply to all full disk -# encryption formats +# The common options that apply to all full disk encryption formats # # @format: the encryption format # @@ -154,8 +178,8 @@ # The options that apply to QCow/QCow2 AES-CBC encryption format # # @key-secret: the ID of a QCryptoSecret object providing the -# decryption key. Mandatory except when probing image for -# metadata only. +# decryption key. Mandatory except when probing image for +# metadata only. # # Since: 2.6 ## @@ -168,8 +192,8 @@ # The options that apply to LUKS encryption format # # @key-secret: the ID of a QCryptoSecret object providing the -# decryption key. Mandatory except when probing image for -# metadata only. +# decryption key. Mandatory except when probing image for +# metadata only. # # Since: 2.6 ## @@ -181,19 +205,23 @@ # # The options that apply to LUKS encryption format initialization # -# @cipher-alg: the cipher algorithm for data encryption -# Currently defaults to 'aes-256'. -# @cipher-mode: the cipher mode for data encryption -# Currently defaults to 'xts' -# @ivgen-alg: the initialization vector generator -# Currently defaults to 'plain64' -# @ivgen-hash-alg: the initialization vector generator hash -# Currently defaults to 'sha256' -# @hash-alg: the master key hash algorithm -# Currently defaults to 'sha256' -# @iter-time: number of milliseconds to spend in -# PBKDF passphrase processing. Currently defaults -# to 2000. (since 2.8) +# @cipher-alg: the cipher algorithm for data encryption Currently +# defaults to 'aes-256'. +# +# @cipher-mode: the cipher mode for data encryption Currently defaults +# to 'xts' +# +# @ivgen-alg: the initialization vector generator Currently defaults +# to 'plain64' +# +# @ivgen-hash-alg: the initialization vector generator hash Currently +# defaults to 'sha256' +# +# @hash-alg: the master key hash algorithm Currently defaults to +# 'sha256' +# +# @iter-time: number of milliseconds to spend in PBKDF passphrase +# processing. Currently defaults to 2000. (since 2.8) # # Since: 2.6 ## @@ -209,8 +237,8 @@ ## # @QCryptoBlockOpenOptions: # -# The options that are available for all encryption formats -# when opening an existing volume +# The options that are available for all encryption formats when +# opening an existing volume # # Since: 2.6 ## @@ -223,8 +251,8 @@ ## # @QCryptoBlockCreateOptions: # -# The options that are available for all encryption formats -# when initializing a new volume +# The options that are available for all encryption formats when +# initializing a new volume # # Since: 2.6 ## @@ -237,8 +265,8 @@ ## # @QCryptoBlockInfoBase: # -# The common information that applies to all full disk -# encryption formats +# The common information that applies to all full disk encryption +# formats # # @format: the encryption format # @@ -250,12 +278,14 @@ ## # @QCryptoBlockInfoLUKSSlot: # -# Information about the LUKS block encryption key -# slot options +# Information about the LUKS block encryption key slot options # # @active: whether the key slot is currently in use +# # @key-offset: offset to the key material in bytes +# # @iters: number of PBKDF2 iterations for key material +# # @stripes: number of stripes for splitting key material # # Since: 2.7 @@ -272,13 +302,21 @@ # Information about the LUKS block encryption options # # @cipher-alg: the cipher algorithm for data encryption +# # @cipher-mode: the cipher mode for data encryption +# # @ivgen-alg: the initialization vector generator +# # @ivgen-hash-alg: the initialization vector generator hash +# # @hash-alg: the master key hash algorithm +# # @payload-offset: offset to the payload data in bytes +# # @master-key-iters: number of PBKDF2 iterations for key material +# # @uuid: unique identifier for the volume +# # @slots: information about each key slot # # Since: 2.7 @@ -312,7 +350,9 @@ # Defines state of keyslots that are affected by the update # # @active: The slots contain the given password and marked as active -# @inactive: The slots are erased (contain garbage) and marked as inactive +# +# @inactive: The slots are erased (contain garbage) and marked as +# inactive # # Since: 5.1 ## @@ -322,35 +362,34 @@ ## # @QCryptoBlockAmendOptionsLUKS: # -# This struct defines the update parameters that activate/de-activate set -# of keyslots +# This struct defines the update parameters that activate/de-activate +# set of keyslots # # @state: the desired state of the keyslots # -# @new-secret: The ID of a QCryptoSecret object providing the password to be -# written into added active keyslots +# @new-secret: The ID of a QCryptoSecret object providing the password +# to be written into added active keyslots # -# @old-secret: Optional (for deactivation only) -# If given will deactivate all keyslots that -# match password located in QCryptoSecret with this ID +# @old-secret: Optional (for deactivation only) If given will +# deactivate all keyslots that match password located in +# QCryptoSecret with this ID # -# @iter-time: Optional (for activation only) -# Number of milliseconds to spend in -# PBKDF passphrase processing for the newly activated keyslot. -# Currently defaults to 2000. +# @iter-time: Optional (for activation only) Number of milliseconds to +# spend in PBKDF passphrase processing for the newly activated +# keyslot. Currently defaults to 2000. # -# @keyslot: Optional. ID of the keyslot to activate/deactivate. -# For keyslot activation, keyslot should not be active already -# (this is unsafe to update an active keyslot), -# but possible if 'force' parameter is given. -# If keyslot is not given, first free keyslot will be written. +# @keyslot: Optional. ID of the keyslot to activate/deactivate. For +# keyslot activation, keyslot should not be active already (this +# is unsafe to update an active keyslot), but possible if 'force' +# parameter is given. If keyslot is not given, first free keyslot +# will be written. # -# For keyslot deactivation, this parameter specifies the exact -# keyslot to deactivate +# For keyslot deactivation, this parameter specifies the exact +# keyslot to deactivate # -# @secret: Optional. The ID of a QCryptoSecret object providing the -# password to use to retrieve current master key. -# Defaults to the same secret that was used to open the image +# @secret: Optional. The ID of a QCryptoSecret object providing the +# password to use to retrieve current master key. Defaults to the +# same secret that was used to open the image # # Since: 5.1 ## @@ -365,8 +404,8 @@ ## # @QCryptoBlockAmendOptions: # -# The options that are available for all encryption formats -# when amending encryption settings +# The options that are available for all encryption formats when +# amending encryption settings # # Since: 5.1 ## @@ -381,22 +420,27 @@ # # Properties for objects of classes derived from secret-common. # -# @loaded: if true, the secret is loaded immediately when applying this option -# and will probably fail when processing the next option. Don't use; -# only provided for compatibility. (default: false) +# @loaded: if true, the secret is loaded immediately when applying +# this option and will probably fail when processing the next +# option. Don't use; only provided for compatibility. +# (default: false) # -# @format: the data format that the secret is provided in (default: raw) +# @format: the data format that the secret is provided in +# (default: raw) # -# @keyid: the name of another secret that should be used to decrypt the -# provided data. If not present, the data is assumed to be unencrypted. +# @keyid: the name of another secret that should be used to decrypt +# the provided data. If not present, the data is assumed to be +# unencrypted. # -# @iv: the random initialization vector used for encryption of this particular -# secret. Should be a base64 encrypted string of the 16-byte IV. Mandatory -# if @keyid is given. Ignored if @keyid is absent. +# @iv: the random initialization vector used for encryption of this +# particular secret. Should be a base64 encrypted string of the +# 16-byte IV. Mandatory if @keyid is given. Ignored if @keyid is +# absent. # # Features: -# @deprecated: Member @loaded is deprecated. Setting true doesn't make sense, -# and false is already the default. +# +# @deprecated: Member @loaded is deprecated. Setting true doesn't +# make sense, and false is already the default. # # Since: 2.6 ## @@ -443,16 +487,17 @@ # Properties for objects of classes derived from tls-creds. # # @verify-peer: if true the peer credentials will be verified once the -# handshake is completed. This is a no-op for anonymous -# credentials. (default: true) +# handshake is completed. This is a no-op for anonymous +# credentials. (default: true) # # @dir: the path of the directory that contains the credential files # -# @endpoint: whether the QEMU network backend that uses the credentials will be -# acting as a client or as a server (default: client) +# @endpoint: whether the QEMU network backend that uses the +# credentials will be acting as a client or as a server +# (default: client) # # @priority: a gnutls priority string as described at -# https://gnutls.org/manual/html_node/Priority-Strings.html +# https://gnutls.org/manual/html_node/Priority-Strings.html # # Since: 2.5 ## @@ -467,13 +512,15 @@ # # Properties for tls-creds-anon objects. # -# @loaded: if true, the credentials are loaded immediately when applying this -# option and will ignore options that are processed later. Don't use; -# only provided for compatibility. (default: false) +# @loaded: if true, the credentials are loaded immediately when +# applying this option and will ignore options that are processed +# later. Don't use; only provided for compatibility. +# (default: false) # # Features: -# @deprecated: Member @loaded is deprecated. Setting true doesn't make sense, -# and false is already the default. +# +# @deprecated: Member @loaded is deprecated. Setting true doesn't +# make sense, and false is already the default. # # Since: 2.5 ## @@ -486,17 +533,19 @@ # # Properties for tls-creds-psk objects. # -# @loaded: if true, the credentials are loaded immediately when applying this -# option and will ignore options that are processed later. Don't use; -# only provided for compatibility. (default: false) +# @loaded: if true, the credentials are loaded immediately when +# applying this option and will ignore options that are processed +# later. Don't use; only provided for compatibility. +# (default: false) # -# @username: the username which will be sent to the server. For clients only. -# If absent, "qemu" is sent and the property will read back as an -# empty string. +# @username: the username which will be sent to the server. For +# clients only. If absent, "qemu" is sent and the property will +# read back as an empty string. # # Features: -# @deprecated: Member @loaded is deprecated. Setting true doesn't make sense, -# and false is already the default. +# +# @deprecated: Member @loaded is deprecated. Setting true doesn't +# make sense, and false is already the default. # # Since: 3.0 ## @@ -510,22 +559,24 @@ # # Properties for tls-creds-x509 objects. # -# @loaded: if true, the credentials are loaded immediately when applying this -# option and will ignore options that are processed later. Don't use; -# only provided for compatibility. (default: false) +# @loaded: if true, the credentials are loaded immediately when +# applying this option and will ignore options that are processed +# later. Don't use; only provided for compatibility. +# (default: false) # # @sanity-check: if true, perform some sanity checks before using the -# credentials (default: true) +# credentials (default: true) # -# @passwordid: For the server-key.pem and client-key.pem files which contain -# sensitive private keys, it is possible to use an encrypted -# version by providing the @passwordid parameter. This provides -# the ID of a previously created secret object containing the -# password for decryption. +# @passwordid: For the server-key.pem and client-key.pem files which +# contain sensitive private keys, it is possible to use an +# encrypted version by providing the @passwordid parameter. This +# provides the ID of a previously created secret object containing +# the password for decryption. # # Features: -# @deprecated: Member @loaded is deprecated. Setting true doesn't make sense, -# and false is already the default. +# +# @deprecated: Member @loaded is deprecated. Setting true doesn't +# make sense, and false is already the default. # # Since: 2.5 ## @@ -564,6 +615,7 @@ # The padding algorithm for RSA. # # @raw: no padding used +# # @pkcs1: pkcs1#v1.5 # # Since: 7.1 @@ -578,6 +630,7 @@ # Specific parameters for RSA algorithm. # # @hash-alg: QCryptoHashAlgorithm +# # @padding-alg: QCryptoRSAPaddingAlgorithm # # Since: 7.1 diff --git a/qapi/cryptodev.json b/qapi/cryptodev.json new file mode 100644 index 0000000000..77f48a9c21 --- /dev/null +++ b/qapi/cryptodev.json @@ -0,0 +1,96 @@ +# -*- Mode: Python -*- +# vim: filetype=python +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. + +## +# = Cryptography devices +## + +## +# @QCryptodevBackendAlgType: +# +# The supported algorithm types of a crypto device. +# +# @sym: symmetric encryption +# +# @asym: asymmetric Encryption +# +# Since: 8.0 +## +{ 'enum': 'QCryptodevBackendAlgType', + 'prefix': 'QCRYPTODEV_BACKEND_ALG', + 'data': ['sym', 'asym']} + +## +# @QCryptodevBackendServiceType: +# +# The supported service types of a crypto device. +# +# Since: 8.0 +## +{ 'enum': 'QCryptodevBackendServiceType', + 'prefix': 'QCRYPTODEV_BACKEND_SERVICE', + 'data': ['cipher', 'hash', 'mac', 'aead', 'akcipher']} + +## +# @QCryptodevBackendType: +# +# The crypto device backend type +# +# @builtin: the QEMU builtin support +# +# @vhost-user: vhost-user +# +# @lkcf: Linux kernel cryptographic framework +# +# Since: 8.0 +## +{ 'enum': 'QCryptodevBackendType', + 'prefix': 'QCRYPTODEV_BACKEND_TYPE', + 'data': ['builtin', 'vhost-user', 'lkcf']} + +## +# @QCryptodevBackendClient: +# +# Information about a queue of crypto device. +# +# @queue: the queue index of the crypto device +# +# @type: the type of the crypto device +# +# Since: 8.0 +## +{ 'struct': 'QCryptodevBackendClient', + 'data': { 'queue': 'uint32', + 'type': 'QCryptodevBackendType' } } + +## +# @QCryptodevInfo: +# +# Information about a crypto device. +# +# @id: the id of the crypto device +# +# @service: supported service types of a crypto device +# +# @client: the additional infomation of the crypto device +# +# Since: 8.0 +## +{ 'struct': 'QCryptodevInfo', + 'data': { 'id': 'str', + 'service': ['QCryptodevBackendServiceType'], + 'client': ['QCryptodevBackendClient'] } } + +## +# @query-cryptodev: +# +# Returns information about current crypto devices. +# +# Returns: a list of @QCryptodevInfo +# +# Since: 8.0 +## +{ 'command': 'query-cryptodev', 'returns': ['QCryptodevInfo']} diff --git a/qapi/cxl.json b/qapi/cxl.json new file mode 100644 index 0000000000..b21c9b4c1c --- /dev/null +++ b/qapi/cxl.json @@ -0,0 +1,154 @@ +# -*- Mode: Python -*- +# vim: filetype=python + +## +# = CXL devices +## + +## +# @CxlUncorErrorType: +# +# Type of uncorrectable CXL error to inject. These errors are +# reported via an AER uncorrectable internal error with additional +# information logged at the CXL device. +# +# @cache-data-parity: Data error such as data parity or data ECC error +# CXL.cache +# +# @cache-address-parity: Address parity or other errors associated +# with the address field on CXL.cache +# +# @cache-be-parity: Byte enable parity or other byte enable errors on +# CXL.cache +# +# @cache-data-ecc: ECC error on CXL.cache +# +# @mem-data-parity: Data error such as data parity or data ECC error +# on CXL.mem +# +# @mem-address-parity: Address parity or other errors associated with +# the address field on CXL.mem +# +# @mem-be-parity: Byte enable parity or other byte enable errors on +# CXL.mem. +# +# @mem-data-ecc: Data ECC error on CXL.mem. +# +# @reinit-threshold: REINIT threshold hit. +# +# @rsvd-encoding: Received unrecognized encoding. +# +# @poison-received: Received poison from the peer. +# +# @receiver-overflow: Buffer overflows (first 3 bits of header log +# indicate which) +# +# @internal: Component specific error +# +# @cxl-ide-tx: Integrity and data encryption tx error. +# +# @cxl-ide-rx: Integrity and data encryption rx error. +# +# Since: 8.0 +## + +{ 'enum': 'CxlUncorErrorType', + 'data': ['cache-data-parity', + 'cache-address-parity', + 'cache-be-parity', + 'cache-data-ecc', + 'mem-data-parity', + 'mem-address-parity', + 'mem-be-parity', + 'mem-data-ecc', + 'reinit-threshold', + 'rsvd-encoding', + 'poison-received', + 'receiver-overflow', + 'internal', + 'cxl-ide-tx', + 'cxl-ide-rx' + ] + } + +## +# @CXLUncorErrorRecord: +# +# Record of a single error including header log. +# +# @type: Type of error +# +# @header: 16 DWORD of header. +# +# Since: 8.0 +## +{ 'struct': 'CXLUncorErrorRecord', + 'data': { + 'type': 'CxlUncorErrorType', + 'header': [ 'uint32' ] + } +} + +## +# @cxl-inject-uncorrectable-errors: +# +# Command to allow injection of multiple errors in one go. This +# allows testing of multiple header log handling in the OS. +# +# @path: CXL Type 3 device canonical QOM path +# +# @errors: Errors to inject +# +# Since: 8.0 +## +{ 'command': 'cxl-inject-uncorrectable-errors', + 'data': { 'path': 'str', + 'errors': [ 'CXLUncorErrorRecord' ] }} + +## +# @CxlCorErrorType: +# +# Type of CXL correctable error to inject +# +# @cache-data-ecc: Data ECC error on CXL.cache +# +# @mem-data-ecc: Data ECC error on CXL.mem +# +# @crc-threshold: Component specific and applicable to 68 byte Flit +# mode only. +# +# @cache-poison-received: Received poison from a peer on CXL.cache. +# +# @mem-poison-received: Received poison from a peer on CXL.mem +# +# @physical: Received error indication from the physical layer. +# +# Since: 8.0 +## +{ 'enum': 'CxlCorErrorType', + 'data': ['cache-data-ecc', + 'mem-data-ecc', + 'crc-threshold', + 'retry-threshold', + 'cache-poison-received', + 'mem-poison-received', + 'physical'] +} + +## +# @cxl-inject-correctable-error: +# +# Command to inject a single correctable error. Multiple error +# injection of this error type is not interesting as there is no +# associated header log. These errors are reported via AER as a +# correctable internal error, with additional detail available from +# the CXL device. +# +# @path: CXL Type 3 device canonical QOM path +# +# @type: Type of error. +# +# Since: 8.0 +## +{'command': 'cxl-inject-correctable-error', + 'data': {'path': 'str', 'type': 'CxlCorErrorType'}} diff --git a/qapi/dump.json b/qapi/dump.json index 6fc215dd47..4ae1f722a9 100644 --- a/qapi/dump.json +++ b/qapi/dump.json @@ -21,8 +21,8 @@ # # @kdump-snappy: kdump-compressed format with snappy-compressed # -# @win-dmp: Windows full crashdump format, -# can be used instead of ELF converting (since 2.13) +# @win-dmp: Windows full crashdump format, can be used instead of ELF +# converting (since 2.13) # # Since: 2.0 ## @@ -32,47 +32,47 @@ ## # @dump-guest-memory: # -# Dump guest's memory to vmcore. It is a synchronous operation that can take -# very long depending on the amount of guest memory. +# Dump guest's memory to vmcore. It is a synchronous operation that +# can take very long depending on the amount of guest memory. # -# @paging: if true, do paging to get guest's memory mapping. This allows -# using gdb to process the core file. +# @paging: if true, do paging to get guest's memory mapping. This +# allows using gdb to process the core file. # -# IMPORTANT: this option can make QEMU allocate several gigabytes -# of RAM. This can happen for a large guest, or a -# malicious guest pretending to be large. +# IMPORTANT: this option can make QEMU allocate several gigabytes +# of RAM. This can happen for a large guest, or a malicious guest +# pretending to be large. # -# Also, paging=true has the following limitations: +# Also, paging=true has the following limitations: # -# 1. The guest may be in a catastrophic state or can have corrupted -# memory, which cannot be trusted -# 2. The guest can be in real-mode even if paging is enabled. For -# example, the guest uses ACPI to sleep, and ACPI sleep state -# goes in real-mode -# 3. Currently only supported on i386 and x86_64. +# 1. The guest may be in a catastrophic state or can have +# corrupted memory, which cannot be trusted +# 2. The guest can be in real-mode even if paging is enabled. For +# example, the guest uses ACPI to sleep, and ACPI sleep state +# goes in real-mode +# 3. Currently only supported on i386 and x86_64. # -# @protocol: the filename or file descriptor of the vmcore. The supported -# protocols are: +# @protocol: the filename or file descriptor of the vmcore. The +# supported protocols are: # -# 1. file: the protocol starts with "file:", and the following -# string is the file's path. -# 2. fd: the protocol starts with "fd:", and the following string -# is the fd's name. +# 1. file: the protocol starts with "file:", and the following +# string is the file's path. +# 2. fd: the protocol starts with "fd:", and the following string +# is the fd's name. # -# @detach: if true, QMP will return immediately rather than -# waiting for the dump to finish. The user can track progress -# using "query-dump". (since 2.6). +# @detach: if true, QMP will return immediately rather than waiting +# for the dump to finish. The user can track progress using +# "query-dump". (since 2.6). # # @begin: if specified, the starting physical address. # -# @length: if specified, the memory size, in bytes. If you don't -# want to dump all guest's memory, please specify the start @begin -# and @length +# @length: if specified, the memory size, in bytes. If you don't want +# to dump all guest's memory, please specify the start @begin and +# @length # -# @format: if specified, the format of guest memory dump. But non-elf -# format is conflict with paging and filter, ie. @paging, @begin and -# @length is not allowed to be specified with non-elf @format at the -# same time (since 2.0) +# @format: if specified, the format of guest memory dump. But non-elf +# format is conflict with paging and filter, ie. @paging, @begin +# and @length is not allowed to be specified with non-elf @format +# at the same time (since 2.0) # # Note: All boolean arguments default to false # @@ -85,7 +85,6 @@ # -> { "execute": "dump-guest-memory", # "arguments": { "paging": false, "protocol": "fd:dump" } } # <- { "return": {} } -# ## { 'command': 'dump-guest-memory', 'data': { 'paging': 'bool', 'protocol': 'str', '*detach': 'bool', @@ -142,7 +141,6 @@ # -> { "execute": "query-dump" } # <- { "return": { "status": "active", "completed": 1024000, # "total": 2048000 } } -# ## { 'command': 'query-dump', 'returns': 'DumpQueryResult' } @@ -153,9 +151,9 @@ # # @result: final dump status # -# @error: human-readable error string that provides -# hint on why dump failed. Only presents on failure. The -# user should not try to interpret the error string. +# @error: human-readable error string that provides hint on why dump +# failed. Only presents on failure. The user should not try to +# interpret the error string. # # Since: 2.6 # @@ -165,7 +163,6 @@ # "data": { "result": { "total": 1090650112, "status": "completed", # "completed": 1090650112 } }, # "timestamp": { "seconds": 1648244171, "microseconds": 950316 } } -# ## { 'event': 'DUMP_COMPLETED' , 'data': { 'result': 'DumpQueryResult', '*error': 'str' } } @@ -186,8 +183,8 @@ # # Returns the available formats for dump-guest-memory # -# Returns: A @DumpGuestMemoryCapability object listing available formats for -# dump-guest-memory +# Returns: A @DumpGuestMemoryCapability object listing available +# formats for dump-guest-memory # # Since: 2.0 # @@ -196,7 +193,6 @@ # -> { "execute": "query-dump-guest-memory-capability" } # <- { "return": { "formats": # ["elf", "kdump-zlib", "kdump-lzo", "kdump-snappy"] } } -# ## { 'command': 'query-dump-guest-memory-capability', 'returns': 'DumpGuestMemoryCapability' } diff --git a/qapi/error.json b/qapi/error.json index 94a6502de9..135c1e8231 100644 --- a/qapi/error.json +++ b/qapi/error.json @@ -10,8 +10,8 @@ # # QEMU error classes # -# @GenericError: this is used for errors that don't require a specific error -# class. This should be the default case for most errors +# @GenericError: this is used for errors that don't require a specific +# error class. This should be the default case for most errors # # @CommandNotFound: the requested command has not been found # @@ -20,7 +20,7 @@ # @DeviceNotFound: the requested device has not been found # # @KVMMissingCap: the requested operation can't be fulfilled because a -# required KVM capability is missing +# required KVM capability is missing # # Since: 1.2 ## diff --git a/qapi/introspect.json b/qapi/introspect.json index 183148b2e9..9173e60fdd 100644 --- a/qapi/introspect.json +++ b/qapi/introspect.json @@ -35,15 +35,15 @@ # alternate that includes the original type alongside something else. # # Returns: array of @SchemaInfo, where each element describes an -# entity in the ABI: command, event, type, ... +# entity in the ABI: command, event, type, ... # -# The order of the various SchemaInfo is unspecified; however, all -# names are guaranteed to be unique (no name will be duplicated with -# different meta-types). +# The order of the various SchemaInfo is unspecified; however, all +# names are guaranteed to be unique (no name will be duplicated +# with different meta-types). # # Note: the QAPI schema is also used to help define *internal* -# interfaces, by defining QAPI types. These are not part of the QMP -# wire ABI, and therefore not returned by this command. +# interfaces, by defining QAPI types. These are not part of the +# QMP wire ABI, and therefore not returned by this command. # # Since: 2.5 ## @@ -80,20 +80,18 @@ ## # @SchemaInfo: # -# @name: the entity's name, inherited from @base. -# The SchemaInfo is always referenced by this name. -# Commands and events have the name defined in the QAPI schema. -# Unlike command and event names, type names are not part of -# the wire ABI. Consequently, type names are meaningless -# strings here, although they are still guaranteed unique -# regardless of @meta-type. +# @name: the entity's name, inherited from @base. The SchemaInfo is +# always referenced by this name. Commands and events have the +# name defined in the QAPI schema. Unlike command and event +# names, type names are not part of the wire ABI. Consequently, +# type names are meaningless strings here, although they are still +# guaranteed unique regardless of @meta-type. # # @meta-type: the entity's meta type, inherited from @base. # # @features: names of features associated with the entity, in no -# particular order. -# (since 4.1 for object types, 4.2 for commands, 5.0 for -# the rest) +# particular order. (since 4.1 for object types, 4.2 for +# commands, 5.0 for the rest) # # Additional members depend on the value of @meta-type. # @@ -142,13 +140,15 @@ # # Additional SchemaInfo members for meta-type 'enum'. # -# @members: the enum type's members, in no particular order -# (since 6.2). +# @members: the enum type's members, in no particular order (since +# 6.2). # -# @values: the enumeration type's member names, in no particular order. -# Redundant with @members. Just for backward compatibility. +# @values: the enumeration type's member names, in no particular +# order. Redundant with @members. Just for backward +# compatibility. # # Features: +# # @deprecated: Member @values is deprecated. Use @members instead. # # Values of this type are JSON string on the wire. @@ -168,7 +168,7 @@ # @name: the member's name, as defined in the QAPI schema. # # @features: names of features associated with the member, in no -# particular order. +# particular order. # # Since: 6.2 ## @@ -194,16 +194,16 @@ # # Additional SchemaInfo members for meta-type 'object'. # -# @members: the object type's (non-variant) members, in no particular order. +# @members: the object type's (non-variant) members, in no particular +# order. # -# @tag: the name of the member serving as type tag. -# An element of @members with this name must exist. +# @tag: the name of the member serving as type tag. An element of +# @members with this name must exist. # -# @variants: variant members, i.e. additional members that -# depend on the type tag's value. Present exactly when -# @tag is present. The variants are in no particular order, -# and may even differ from the order of the values of the -# enum type of the @tag. +# @variants: variant members, i.e. additional members that depend on +# the type tag's value. Present exactly when @tag is present. +# The variants are in no particular order, and may even differ +# from the order of the values of the enum type of the @tag. # # Values of this type are JSON object on the wire. # @@ -223,16 +223,14 @@ # # @type: the name of the member's type. # -# @default: default when used as command parameter. -# If absent, the parameter is mandatory. -# If present, the value must be null. The parameter is -# optional, and behavior when it's missing is not specified -# here. -# Future extension: if present and non-null, the parameter -# is optional, and defaults to this value. +# @default: default when used as command parameter. If absent, the +# parameter is mandatory. If present, the value must be null. +# The parameter is optional, and behavior when it's missing is not +# specified here. Future extension: if present and non-null, the +# parameter is optional, and defaults to this value. # # @features: names of features associated with the member, in no -# particular order. (since 5.0) +# particular order. (since 5.0) # # Since: 2.5 ## @@ -249,7 +247,7 @@ # @case: a value of the type tag. # # @type: the name of the object type that provides the variant members -# when the type tag has value @case. +# when the type tag has value @case. # # Since: 2.5 ## @@ -261,9 +259,9 @@ # # Additional SchemaInfo members for meta-type 'alternate'. # -# @members: the alternate type's members, in no particular order. -# The members' wire encoding is distinct, see -# docs/devel/qapi-code-gen.txt section Alternate types. +# @members: the alternate type's members, in no particular order. The +# members' wire encoding is distinct, see +# docs/devel/qapi-code-gen.txt section Alternate types. # # On the wire, this can be any of the members. # @@ -290,14 +288,15 @@ # Additional SchemaInfo members for meta-type 'command'. # # @arg-type: the name of the object type that provides the command's -# parameters. +# parameters. # # @ret-type: the name of the command's result type. # # @allow-oob: whether the command allows out-of-band execution, -# defaults to false (Since: 2.12) +# defaults to false (Since: 2.12) # -# TODO: @success-response (currently irrelevant, because it's QGA, not QMP) +# TODO: @success-response (currently irrelevant, because it's QGA, not +# QMP) # # Since: 2.5 ## @@ -311,7 +310,7 @@ # Additional SchemaInfo members for meta-type 'event'. # # @arg-type: the name of the object type that provides the event's -# parameters. +# parameters. # # Since: 2.5 ## diff --git a/qapi/job.json b/qapi/job.json index d5f84e9615..7f0ba090de 100644 --- a/qapi/job.json +++ b/qapi/job.json @@ -2,7 +2,7 @@ # vim: filetype=python ## -# == Background jobs +# = Background jobs ## ## @@ -20,13 +20,17 @@ # # @create: image creation job type, see "blockdev-create" (since 3.0) # -# @amend: image options amend job type, see "x-blockdev-amend" (since 5.1) +# @amend: image options amend job type, see "x-blockdev-amend" (since +# 5.1) # -# @snapshot-load: snapshot load job type, see "snapshot-load" (since 6.0) +# @snapshot-load: snapshot load job type, see "snapshot-load" (since +# 6.0) # -# @snapshot-save: snapshot save job type, see "snapshot-save" (since 6.0) +# @snapshot-save: snapshot save job type, see "snapshot-save" (since +# 6.0) # -# @snapshot-delete: snapshot delete job type, see "snapshot-delete" (since 6.0) +# @snapshot-delete: snapshot delete job type, see "snapshot-delete" +# (since 6.0) # # Since: 1.7 ## @@ -39,41 +43,42 @@ # # Indicates the present state of a given job in its lifetime. # -# @undefined: Erroneous, default state. Should not ever be visible. +# @undefined: Erroneous, default state. Should not ever be visible. # # @created: The job has been created, but not yet started. # # @running: The job is currently running. # -# @paused: The job is running, but paused. The pause may be requested by -# either the QMP user or by internal processes. +# @paused: The job is running, but paused. The pause may be requested +# by either the QMP user or by internal processes. # -# @ready: The job is running, but is ready for the user to signal completion. -# This is used for long-running jobs like mirror that are designed to -# run indefinitely. +# @ready: The job is running, but is ready for the user to signal +# completion. This is used for long-running jobs like mirror that +# are designed to run indefinitely. # -# @standby: The job is ready, but paused. This is nearly identical to @paused. -# The job may return to @ready or otherwise be canceled. +# @standby: The job is ready, but paused. This is nearly identical to +# @paused. The job may return to @ready or otherwise be canceled. # -# @waiting: The job is waiting for other jobs in the transaction to converge -# to the waiting state. This status will likely not be visible for -# the last job in a transaction. +# @waiting: The job is waiting for other jobs in the transaction to +# converge to the waiting state. This status will likely not be +# visible for the last job in a transaction. # -# @pending: The job has finished its work, but has finalization steps that it -# needs to make prior to completing. These changes will require -# manual intervention via @job-finalize if auto-finalize was set to -# false. These pending changes may still fail. +# @pending: The job has finished its work, but has finalization steps +# that it needs to make prior to completing. These changes will +# require manual intervention via @job-finalize if auto-finalize +# was set to false. These pending changes may still fail. # -# @aborting: The job is in the process of being aborted, and will finish with -# an error. The job will afterwards report that it is @concluded. -# This status may not be visible to the management process. +# @aborting: The job is in the process of being aborted, and will +# finish with an error. The job will afterwards report that it is +# @concluded. This status may not be visible to the management +# process. # -# @concluded: The job has finished all work. If auto-dismiss was set to false, -# the job will remain in the query list until it is dismissed via -# @job-dismiss. +# @concluded: The job has finished all work. If auto-dismiss was set +# to false, the job will remain in the query list until it is +# dismissed via @job-dismiss. # -# @null: The job is in the process of being dismantled. This state should not -# ever be visible externally. +# @null: The job is in the process of being dismantled. This state +# should not ever be visible externally. # # Since: 2.12 ## @@ -112,6 +117,7 @@ # Emitted when a job transitions to a different status. # # @id: The job identifier +# # @status: The new job status # # Since: 3.0 @@ -125,12 +131,12 @@ # # Pause an active job. # -# This command returns immediately after marking the active job for pausing. -# Pausing an already paused job is an error. +# This command returns immediately after marking the active job for +# pausing. Pausing an already paused job is an error. # -# The job will pause as soon as possible, which means transitioning into the -# PAUSED state if it was RUNNING, or into STANDBY if it was READY. The -# corresponding JOB_STATUS_CHANGE event will be emitted. +# The job will pause as soon as possible, which means transitioning +# into the PAUSED state if it was RUNNING, or into STANDBY if it was +# READY. The corresponding JOB_STATUS_CHANGE event will be emitted. # # Cancelling a paused job automatically resumes it. # @@ -145,10 +151,10 @@ # # Resume a paused job. # -# This command returns immediately after resuming a paused job. Resuming an -# already running job is an error. +# This command returns immediately after resuming a paused job. +# Resuming an already running job is an error. # -# @id : The job identifier. +# @id: The job identifier. # # Since: 3.0 ## @@ -161,11 +167,11 @@ # This command returns immediately after marking the active job for # cancellation. # -# The job will cancel as soon as possible and then emit a JOB_STATUS_CHANGE -# event. Usually, the status will change to ABORTING, but it is possible that -# a job successfully completes (e.g. because it was almost done and there was -# no opportunity to cancel earlier than completing the job) and transitions to -# PENDING instead. +# The job will cancel as soon as possible and then emit a +# JOB_STATUS_CHANGE event. Usually, the status will change to +# ABORTING, but it is possible that a job successfully completes (e.g. +# because it was almost done and there was no opportunity to cancel +# earlier than completing the job) and transitions to PENDING instead. # # @id: The job identifier. # @@ -187,12 +193,14 @@ ## # @job-dismiss: # -# Deletes a job that is in the CONCLUDED state. This command only needs to be -# run explicitly for jobs that don't have automatic dismiss enabled. +# Deletes a job that is in the CONCLUDED state. This command only +# needs to be run explicitly for jobs that don't have automatic +# dismiss enabled. # -# This command will refuse to operate on any job that has not yet reached its -# terminal state, JOB_STATUS_CONCLUDED. For jobs that make use of JOB_READY -# event, job-cancel or job-complete will still need to be used as appropriate. +# This command will refuse to operate on any job that has not yet +# reached its terminal state, JOB_STATUS_CONCLUDED. For jobs that make +# use of JOB_READY event, job-cancel or job-complete will still need +# to be used as appropriate. # # @id: The job identifier. # @@ -203,16 +211,17 @@ ## # @job-finalize: # -# Instructs all jobs in a transaction (or a single job if it is not part of any -# transaction) to finalize any graph changes and do any necessary cleanup. This -# command requires that all involved jobs are in the PENDING state. +# Instructs all jobs in a transaction (or a single job if it is not +# part of any transaction) to finalize any graph changes and do any +# necessary cleanup. This command requires that all involved jobs are +# in the PENDING state. # -# For jobs in a transaction, instructing one job to finalize will force -# ALL jobs in the transaction to finalize, so it is only necessary to instruct -# a single member job to finalize. +# For jobs in a transaction, instructing one job to finalize will +# force ALL jobs in the transaction to finalize, so it is only +# necessary to instruct a single member job to finalize. # -# @id: The identifier of any job in the transaction, or of a job that is not -# part of any transaction. +# @id: The identifier of any job in the transaction, or of a job that +# is not part of any transaction. # # Since: 3.0 ## @@ -229,22 +238,22 @@ # # @status: Current job state/status # -# @current-progress: Progress made until now. The unit is arbitrary and the -# value can only meaningfully be used for the ratio of -# @current-progress to @total-progress. The value is -# monotonically increasing. +# @current-progress: Progress made until now. The unit is arbitrary +# and the value can only meaningfully be used for the ratio of +# @current-progress to @total-progress. The value is +# monotonically increasing. # -# @total-progress: Estimated @current-progress value at the completion of -# the job. This value can arbitrarily change while the -# job is running, in both directions. +# @total-progress: Estimated @current-progress value at the completion +# of the job. This value can arbitrarily change while the job is +# running, in both directions. # -# @error: If this field is present, the job failed; if it is -# still missing in the CONCLUDED state, this indicates -# successful completion. +# @error: If this field is present, the job failed; if it is still +# missing in the CONCLUDED state, this indicates successful +# completion. # -# The value is a human-readable error message to describe -# the reason for the job failure. It should not be parsed -# by applications. +# The value is a human-readable error message to describe the +# reason for the job failure. It should not be parsed by +# applications. # # Since: 3.0 ## diff --git a/qapi/machine-target.json b/qapi/machine-target.json index 2e267fa458..3362f8dc3f 100644 --- a/qapi/machine-target.json +++ b/qapi/machine-target.json @@ -9,12 +9,13 @@ # # Virtual CPU model. # -# A CPU model consists of the name of a CPU definition, to which -# delta changes are applied (e.g. features added/removed). Most magic values +# A CPU model consists of the name of a CPU definition, to which delta +# changes are applied (e.g. features added/removed). Most magic values # that an architecture might require should be hidden behind the name. # However, if required, architectures can expose relevant properties. # # @name: the name of the CPU definition the model is based on +# # @props: a dictionary of QOM properties to be applied # # Since: 2.8 @@ -28,26 +29,28 @@ # # An enumeration of CPU model expansion types. # -# @static: Expand to a static CPU model, a combination of a static base -# model name and property delta changes. As the static base model will -# never change, the expanded CPU model will be the same, independent of -# QEMU version, machine type, machine options, and accelerator options. -# Therefore, the resulting model can be used by tooling without having -# to specify a compatibility machine - e.g. when displaying the "host" -# model. The @static CPU models are migration-safe. - -# @full: Expand all properties. The produced model is not guaranteed to be -# migration-safe, but allows tooling to get an insight and work with -# model details. +# @static: Expand to a static CPU model, a combination of a static +# base model name and property delta changes. As the static base +# model will never change, the expanded CPU model will be the +# same, independent of QEMU version, machine type, machine +# options, and accelerator options. Therefore, the resulting +# model can be used by tooling without having to specify a +# compatibility machine - e.g. when displaying the "host" model. +# The @static CPU models are migration-safe. # -# Note: When a non-migration-safe CPU model is expanded in static mode, some -# features enabled by the CPU model may be omitted, because they can't be -# implemented by a static CPU model definition (e.g. cache info passthrough and -# PMU passthrough in x86). If you need an accurate representation of the -# features enabled by a non-migration-safe CPU model, use @full. If you need a -# static representation that will keep ABI compatibility even when changing QEMU -# version or machine-type, use @static (but keep in mind that some features may -# be omitted). +# @full: Expand all properties. The produced model is not guaranteed +# to be migration-safe, but allows tooling to get an insight and +# work with model details. +# +# Note: When a non-migration-safe CPU model is expanded in static +# mode, some features enabled by the CPU model may be omitted, +# because they can't be implemented by a static CPU model +# definition (e.g. cache info passthrough and PMU passthrough in +# x86). If you need an accurate representation of the features +# enabled by a non-migration-safe CPU model, use @full. If you +# need a static representation that will keep ABI compatibility +# even when changing QEMU version or machine-type, use @static +# (but keep in mind that some features may be omitted). # # Since: 2.8 ## @@ -57,20 +60,22 @@ ## # @CpuModelCompareResult: # -# An enumeration of CPU model comparison results. The result is usually -# calculated using e.g. CPU features or CPU generations. +# An enumeration of CPU model comparison results. The result is +# usually calculated using e.g. CPU features or CPU generations. # # @incompatible: If model A is incompatible to model B, model A is not -# guaranteed to run where model B runs and the other way around. +# guaranteed to run where model B runs and the other way around. # -# @identical: If model A is identical to model B, model A is guaranteed to run -# where model B runs and the other way around. +# @identical: If model A is identical to model B, model A is +# guaranteed to run where model B runs and the other way around. # -# @superset: If model A is a superset of model B, model B is guaranteed to run -# where model A runs. There are no guarantees about the other way. +# @superset: If model A is a superset of model B, model B is +# guaranteed to run where model A runs. There are no guarantees +# about the other way. # -# @subset: If model A is a subset of model B, model A is guaranteed to run -# where model B runs. There are no guarantees about the other way. +# @subset: If model A is a subset of model B, model A is guaranteed to +# run where model B runs. There are no guarantees about the other +# way. # # Since: 2.8 ## @@ -96,15 +101,16 @@ # The result of a CPU model comparison. # # @result: The result of the compare operation. -# @responsible-properties: List of properties that led to the comparison result -# not being identical. +# +# @responsible-properties: List of properties that led to the +# comparison result not being identical. # # @responsible-properties is a list of QOM property names that led to -# both CPUs not being detected as identical. For identical models, this -# list is empty. -# If a QOM property is read-only, that means there's no known way to make the -# CPU models identical. If the special property name "type" is included, the -# models are by definition not identical and cannot be made identical. +# both CPUs not being detected as identical. For identical models, +# this list is empty. If a QOM property is read-only, that means +# there's no known way to make the CPU models identical. If the +# special property name "type" is included, the models are by +# definition not identical and cannot be made identical. # # Since: 2.8 ## @@ -117,38 +123,42 @@ # @query-cpu-model-comparison: # # Compares two CPU models, returning how they compare in a specific -# configuration. The results indicates how both models compare regarding -# runnability. This result can be used by tooling to make decisions if a -# certain CPU model will run in a certain configuration or if a compatible -# CPU model has to be created by baselining. +# configuration. The results indicates how both models compare +# regarding runnability. This result can be used by tooling to make +# decisions if a certain CPU model will run in a certain configuration +# or if a compatible CPU model has to be created by baselining. # -# Usually, a CPU model is compared against the maximum possible CPU model -# of a certain configuration (e.g. the "host" model for KVM). If that CPU -# model is identical or a subset, it will run in that configuration. +# Usually, a CPU model is compared against the maximum possible CPU +# model of a certain configuration (e.g. the "host" model for KVM). +# If that CPU model is identical or a subset, it will run in that +# configuration. # # The result returned by this command may be affected by: # -# * QEMU version: CPU models may look different depending on the QEMU version. -# (Except for CPU models reported as "static" in query-cpu-definitions.) -# * machine-type: CPU model may look different depending on the machine-type. -# (Except for CPU models reported as "static" in query-cpu-definitions.) -# * machine options (including accelerator): in some architectures, CPU models -# may look different depending on machine and accelerator options. (Except for -# CPU models reported as "static" in query-cpu-definitions.) -# * "-cpu" arguments and global properties: arguments to the -cpu option and -# global properties may affect expansion of CPU models. Using -# query-cpu-model-expansion while using these is not advised. +# * QEMU version: CPU models may look different depending on the QEMU +# version. (Except for CPU models reported as "static" in +# query-cpu-definitions.) +# * machine-type: CPU model may look different depending on the +# machine-type. (Except for CPU models reported as "static" in +# query-cpu-definitions.) +# * machine options (including accelerator): in some architectures, +# CPU models may look different depending on machine and accelerator +# options. (Except for CPU models reported as "static" in +# query-cpu-definitions.) +# * "-cpu" arguments and global properties: arguments to the -cpu +# option and global properties may affect expansion of CPU models. +# Using query-cpu-model-expansion while using these is not advised. # -# Some architectures may not support comparing CPU models. s390x supports -# comparing CPU models. +# Some architectures may not support comparing CPU models. s390x +# supports comparing CPU models. # -# Returns: a CpuModelBaselineInfo. Returns an error if comparing CPU models is -# not supported, if a model cannot be used, if a model contains -# an unknown cpu definition name, unknown properties or properties -# with wrong types. +# Returns: a CpuModelBaselineInfo. Returns an error if comparing CPU +# models is not supported, if a model cannot be used, if a model +# contains an unknown cpu definition name, unknown properties or +# properties with wrong types. # # Note: this command isn't specific to s390x, but is only implemented -# on this architecture currently. +# on this architecture currently. # # Since: 2.8 ## @@ -160,38 +170,42 @@ ## # @query-cpu-model-baseline: # -# Baseline two CPU models, creating a compatible third model. The created -# model will always be a static, migration-safe CPU model (see "static" -# CPU model expansion for details). +# Baseline two CPU models, creating a compatible third model. The +# created model will always be a static, migration-safe CPU model (see +# "static" CPU model expansion for details). # -# This interface can be used by tooling to create a compatible CPU model out -# two CPU models. The created CPU model will be identical to or a subset of -# both CPU models when comparing them. Therefore, the created CPU model is -# guaranteed to run where the given CPU models run. +# This interface can be used by tooling to create a compatible CPU +# model out two CPU models. The created CPU model will be identical +# to or a subset of both CPU models when comparing them. Therefore, +# the created CPU model is guaranteed to run where the given CPU +# models run. # # The result returned by this command may be affected by: # -# * QEMU version: CPU models may look different depending on the QEMU version. -# (Except for CPU models reported as "static" in query-cpu-definitions.) -# * machine-type: CPU model may look different depending on the machine-type. -# (Except for CPU models reported as "static" in query-cpu-definitions.) -# * machine options (including accelerator): in some architectures, CPU models -# may look different depending on machine and accelerator options. (Except for -# CPU models reported as "static" in query-cpu-definitions.) -# * "-cpu" arguments and global properties: arguments to the -cpu option and -# global properties may affect expansion of CPU models. Using -# query-cpu-model-expansion while using these is not advised. +# * QEMU version: CPU models may look different depending on the QEMU +# version. (Except for CPU models reported as "static" in +# query-cpu-definitions.) +# * machine-type: CPU model may look different depending on the +# machine-type. (Except for CPU models reported as "static" in +# query-cpu-definitions.) +# * machine options (including accelerator): in some architectures, +# CPU models may look different depending on machine and accelerator +# options. (Except for CPU models reported as "static" in +# query-cpu-definitions.) +# * "-cpu" arguments and global properties: arguments to the -cpu +# option and global properties may affect expansion of CPU models. +# Using query-cpu-model-expansion while using these is not advised. # -# Some architectures may not support baselining CPU models. s390x supports -# baselining CPU models. +# Some architectures may not support baselining CPU models. s390x +# supports baselining CPU models. # -# Returns: a CpuModelBaselineInfo. Returns an error if baselining CPU models is -# not supported, if a model cannot be used, if a model contains -# an unknown cpu definition name, unknown properties or properties -# with wrong types. +# Returns: a CpuModelBaselineInfo. Returns an error if baselining CPU +# models is not supported, if a model cannot be used, if a model +# contains an unknown cpu definition name, unknown properties or +# properties with wrong types. # # Note: this command isn't specific to s390x, but is only implemented -# on this architecture currently. +# on this architecture currently. # # Since: 2.8 ## @@ -219,33 +233,37 @@ ## # @query-cpu-model-expansion: # -# Expands a given CPU model (or a combination of CPU model + additional options) -# to different granularities, allowing tooling to get an understanding what a -# specific CPU model looks like in QEMU under a certain configuration. +# Expands a given CPU model (or a combination of CPU model + +# additional options) to different granularities, allowing tooling to +# get an understanding what a specific CPU model looks like in QEMU +# under a certain configuration. # # This interface can be used to query the "host" CPU model. # # The data returned by this command may be affected by: # -# * QEMU version: CPU models may look different depending on the QEMU version. -# (Except for CPU models reported as "static" in query-cpu-definitions.) -# * machine-type: CPU model may look different depending on the machine-type. -# (Except for CPU models reported as "static" in query-cpu-definitions.) -# * machine options (including accelerator): in some architectures, CPU models -# may look different depending on machine and accelerator options. (Except for -# CPU models reported as "static" in query-cpu-definitions.) -# * "-cpu" arguments and global properties: arguments to the -cpu option and -# global properties may affect expansion of CPU models. Using -# query-cpu-model-expansion while using these is not advised. +# * QEMU version: CPU models may look different depending on the QEMU +# version. (Except for CPU models reported as "static" in +# query-cpu-definitions.) +# * machine-type: CPU model may look different depending on the +# machine-type. (Except for CPU models reported as "static" in +# query-cpu-definitions.) +# * machine options (including accelerator): in some architectures, +# CPU models may look different depending on machine and accelerator +# options. (Except for CPU models reported as "static" in +# query-cpu-definitions.) +# * "-cpu" arguments and global properties: arguments to the -cpu +# option and global properties may affect expansion of CPU models. +# Using query-cpu-model-expansion while using these is not advised. # -# Some architectures may not support all expansion types. s390x supports -# "full" and "static". Arm only supports "full". +# Some architectures may not support all expansion types. s390x +# supports "full" and "static". Arm only supports "full". # -# Returns: a CpuModelExpansionInfo. Returns an error if expanding CPU models is -# not supported, if the model cannot be expanded, if the model contains -# an unknown CPU definition name, unknown properties or properties -# with a wrong type. Also returns an error if an expansion type is -# not supported. +# Returns: a CpuModelExpansionInfo. Returns an error if expanding CPU +# models is not supported, if the model cannot be expanded, if the +# model contains an unknown CPU definition name, unknown +# properties or properties with a wrong type. Also returns an +# error if an expansion type is not supported. # # Since: 2.8 ## @@ -265,49 +283,48 @@ # @name: the name of the CPU definition # # @migration-safe: whether a CPU definition can be safely used for -# migration in combination with a QEMU compatibility machine -# when migrating between different QEMU versions and between -# hosts with different sets of (hardware or software) -# capabilities. If not provided, information is not available -# and callers should not assume the CPU definition to be -# migration-safe. (since 2.8) +# migration in combination with a QEMU compatibility machine when +# migrating between different QEMU versions and between hosts with +# different sets of (hardware or software) capabilities. If not +# provided, information is not available and callers should not +# assume the CPU definition to be migration-safe. (since 2.8) # -# @static: whether a CPU definition is static and will not change depending on -# QEMU version, machine type, machine options and accelerator options. -# A static model is always migration-safe. (since 2.8) +# @static: whether a CPU definition is static and will not change +# depending on QEMU version, machine type, machine options and +# accelerator options. A static model is always migration-safe. +# (since 2.8) # -# @unavailable-features: List of properties that prevent -# the CPU model from running in the current -# host. (since 2.8) -# @typename: Type name that can be used as argument to @device-list-properties, -# to introspect properties configurable using -cpu or -global. -# (since 2.9) +# @unavailable-features: List of properties that prevent the CPU model +# from running in the current host. (since 2.8) # -# @alias-of: Name of CPU model this model is an alias for. The target of the -# CPU model alias may change depending on the machine type. -# Management software is supposed to translate CPU model aliases -# in the VM configuration, because aliases may stop being -# migration-safe in the future (since 4.1) +# @typename: Type name that can be used as argument to +# @device-list-properties, to introspect properties configurable +# using -cpu or -global. (since 2.9) # -# @deprecated: If true, this CPU model is deprecated and may be removed in -# in some future version of QEMU according to the QEMU deprecation -# policy. (since 5.2) +# @alias-of: Name of CPU model this model is an alias for. The target +# of the CPU model alias may change depending on the machine type. +# Management software is supposed to translate CPU model aliases +# in the VM configuration, because aliases may stop being +# migration-safe in the future (since 4.1) # -# @unavailable-features is a list of QOM property names that -# represent CPU model attributes that prevent the CPU from running. -# If the QOM property is read-only, that means there's no known -# way to make the CPU model run in the current host. Implementations -# that choose not to provide specific information return the -# property name "type". -# If the property is read-write, it means that it MAY be possible -# to run the CPU model in the current host if that property is -# changed. Management software can use it as hints to suggest or -# choose an alternative for the user, or just to generate meaningful -# error messages explaining why the CPU model can't be used. -# If @unavailable-features is an empty list, the CPU model is -# runnable using the current host and machine-type. -# If @unavailable-features is not present, runnability -# information for the CPU is not available. +# @deprecated: If true, this CPU model is deprecated and may be +# removed in in some future version of QEMU according to the QEMU +# deprecation policy. (since 5.2) +# +# @unavailable-features is a list of QOM property names that represent +# CPU model attributes that prevent the CPU from running. If the QOM +# property is read-only, that means there's no known way to make the +# CPU model run in the current host. Implementations that choose not +# to provide specific information return the property name "type". If +# the property is read-write, it means that it MAY be possible to run +# the CPU model in the current host if that property is changed. +# Management software can use it as hints to suggest or choose an +# alternative for the user, or just to generate meaningful error +# messages explaining why the CPU model can't be used. If +# @unavailable-features is an empty list, the CPU model is runnable +# using the current host and machine-type. If @unavailable-features +# is not present, runnability information for the CPU is not +# available. # # Since: 1.2 ## @@ -324,14 +341,15 @@ 'TARGET_I386', 'TARGET_S390X', 'TARGET_MIPS', - 'TARGET_LOONGARCH64' ] } } + 'TARGET_LOONGARCH64', + 'TARGET_RISCV' ] } } ## # @query-cpu-definitions: # # Return a list of supported virtual CPU definitions # -# Returns: a list of CpuDefInfo +# Returns: a list of CpuDefinitionInfo # # Since: 1.2 ## @@ -341,4 +359,5 @@ 'TARGET_I386', 'TARGET_S390X', 'TARGET_MIPS', - 'TARGET_LOONGARCH64' ] } } + 'TARGET_LOONGARCH64', + 'TARGET_RISCV' ] } } diff --git a/qapi/machine.json b/qapi/machine.json index b9228a5e46..37660d8f2a 100644 --- a/qapi/machine.json +++ b/qapi/machine.json @@ -14,17 +14,18 @@ # @SysEmuTarget: # # The comprehensive enumeration of QEMU system emulation ("softmmu") -# targets. Run "./configure --help" in the project root directory, and -# look for the \*-softmmu targets near the "--target-list" option. The -# individual target constants are not documented here, for the time -# being. +# targets. Run "./configure --help" in the project root directory, +# and look for the \*-softmmu targets near the "--target-list" option. +# The individual target constants are not documented here, for the +# time being. # # @rx: since 5.0 +# # @avr: since 5.1 # -# Notes: The resulting QMP strings can be appended to the "qemu-system-" -# prefix to produce the corresponding QEMU executable name. This -# is true even for "qemu-system-x86_64". +# Notes: The resulting QMP strings can be appended to the +# "qemu-system-" prefix to produce the corresponding QEMU +# executable name. This is true even for "qemu-system-x86_64". # # Since: 3.0 ## @@ -39,8 +40,8 @@ ## # @CpuS390State: # -# An enumeration of cpu states that can be assumed by a virtual -# S390 CPU +# An enumeration of cpu states that can be assumed by a virtual S390 +# CPU # # Since: 2.12 ## @@ -71,10 +72,10 @@ # @thread-id: ID of the underlying host thread # # @props: properties describing to which node/socket/core/thread -# virtual CPU belongs to, provided if supported by board +# virtual CPU belongs to, provided if supported by board # # @target: the QEMU system emulation target, which determines which -# additional fields will be listed (since 3.0) +# additional fields will be listed (since 3.0) # # Since: 2.12 ## @@ -139,21 +140,24 @@ # @is-default: whether the machine is default # # @cpu-max: maximum number of CPUs supported by the machine type -# (since 1.5) +# (since 1.5) # # @hotpluggable-cpus: cpu hotplug via -device is supported (since 2.7) # # @numa-mem-supported: true if '-numa node,mem' option is supported by -# the machine type and false otherwise (since 4.1) +# the machine type and false otherwise (since 4.1) # -# @deprecated: if true, the machine type is deprecated and may be removed -# in future versions of QEMU according to the QEMU deprecation -# policy (since 4.1) +# @deprecated: if true, the machine type is deprecated and may be +# removed in future versions of QEMU according to the QEMU +# deprecation policy (since 4.1) # -# @default-cpu-type: default CPU model typename if none is requested via -# the -cpu argument. (since 4.2) +# @default-cpu-type: default CPU model typename if none is requested +# via the -cpu argument. (since 4.2) # -# @default-ram-id: the default ID of initial RAM memory backend (since 5.2) +# @default-ram-id: the default ID of initial RAM memory backend (since +# 5.2) +# +# @acpi: machine type supports ACPI (since 8.0) # # Since: 1.2 ## @@ -162,7 +166,7 @@ '*is-default': 'bool', 'cpu-max': 'int', 'hotpluggable-cpus': 'bool', 'numa-mem-supported': 'bool', 'deprecated': 'bool', '*default-cpu-type': 'str', - '*default-ram-id': 'str' } } + '*default-ram-id': 'str', 'acpi': 'bool' } } ## # @query-machines: @@ -181,7 +185,7 @@ # Information describing the running machine parameters. # # @wakeup-suspend-support: true if the machine supports wake up from -# suspend +# suspend # # Since: 4.0 ## @@ -231,7 +235,8 @@ # # Since: 0.14 # -# Notes: If no UUID was specified for the guest, a null UUID is returned. +# Notes: If no UUID was specified for the guest, a null UUID is +# returned. ## { 'struct': 'UuidInfo', 'data': {'UUID': 'str'} } @@ -248,7 +253,6 @@ # # -> { "execute": "query-uuid" } # <- { "return": { "UUID": "550e8400-e29b-41d4-a716-446655440000" } } -# ## { 'command': 'query-uuid', 'returns': 'UuidInfo', 'allow-preconfig': true } @@ -283,7 +287,6 @@ # # -> { "execute": "system_reset" } # <- { "return": {} } -# ## { 'command': 'system_reset' } @@ -295,67 +298,65 @@ # Since: 0.14 # # Notes: A guest may or may not respond to this command. This command -# returning does not indicate that a guest has accepted the request or -# that it has shut down. Many guests will respond to this command by -# prompting the user in some way. +# returning does not indicate that a guest has accepted the +# request or that it has shut down. Many guests will respond to +# this command by prompting the user in some way. # # Example: # # -> { "execute": "system_powerdown" } # <- { "return": {} } -# ## { 'command': 'system_powerdown' } ## # @system_wakeup: # -# Wake up guest from suspend. If the guest has wake-up from suspend +# Wake up guest from suspend. If the guest has wake-up from suspend # support enabled (wakeup-suspend-support flag from # query-current-machine), wake-up guest from suspend if the guest is -# in SUSPENDED state. Return an error otherwise. +# in SUSPENDED state. Return an error otherwise. # # Since: 1.1 # # Returns: nothing. # # Note: prior to 4.0, this command does nothing in case the guest -# isn't suspended. +# isn't suspended. # # Example: # # -> { "execute": "system_wakeup" } # <- { "return": {} } -# ## { 'command': 'system_wakeup' } ## # @LostTickPolicy: # -# Policy for handling lost ticks in timer devices. Ticks end up getting -# lost when, for example, the guest is paused. +# Policy for handling lost ticks in timer devices. Ticks end up +# getting lost when, for example, the guest is paused. # -# @discard: throw away the missed ticks and continue with future injection -# normally. The guest OS will see the timer jump ahead by a -# potentially quite significant amount all at once, as if the -# intervening chunk of time had simply not existed; needless to -# say, such a sudden jump can easily confuse a guest OS which is -# not specifically prepared to deal with it. Assuming the guest -# OS can deal correctly with the time jump, the time in the guest -# and in the host should now match. +# @discard: throw away the missed ticks and continue with future +# injection normally. The guest OS will see the timer jump ahead +# by a potentially quite significant amount all at once, as if the +# intervening chunk of time had simply not existed; needless to +# say, such a sudden jump can easily confuse a guest OS which is +# not specifically prepared to deal with it. Assuming the guest +# OS can deal correctly with the time jump, the time in the guest +# and in the host should now match. # -# @delay: continue to deliver ticks at the normal rate. The guest OS will -# not notice anything is amiss, as from its point of view time will -# have continued to flow normally. The time in the guest should now -# be behind the time in the host by exactly the amount of time during -# which ticks have been missed. +# @delay: continue to deliver ticks at the normal rate. The guest OS +# will not notice anything is amiss, as from its point of view +# time will have continued to flow normally. The time in the +# guest should now be behind the time in the host by exactly the +# amount of time during which ticks have been missed. # -# @slew: deliver ticks at a higher rate to catch up with the missed ticks. -# The guest OS will not notice anything is amiss, as from its point -# of view time will have continued to flow normally. Once the timer -# has managed to catch up with all the missing ticks, the time in -# the guest and in the host should match. +# @slew: deliver ticks at a higher rate to catch up with the missed +# ticks. The guest OS will not notice anything is amiss, as from +# its point of view time will have continued to flow normally. +# Once the timer has managed to catch up with all the missing +# ticks, the time in the guest and in the host should match. # # Since: 2.0 ## @@ -365,20 +366,21 @@ ## # @inject-nmi: # -# Injects a Non-Maskable Interrupt into the default CPU (x86/s390) or all CPUs (ppc64). -# The command fails when the guest doesn't support injecting. +# Injects a Non-Maskable Interrupt into the default CPU (x86/s390) or +# all CPUs (ppc64). The command fails when the guest doesn't support +# injecting. # # Returns: If successful, nothing # # Since: 0.14 # -# Note: prior to 2.1, this command was only supported for x86 and s390 VMs +# Note: prior to 2.1, this command was only supported for x86 and s390 +# VMs # # Example: # # -> { "execute": "inject-nmi" } # <- { "return": {} } -# ## { 'command': 'inject-nmi' } @@ -408,7 +410,6 @@ # # -> { "execute": "query-kvm" } # <- { "return": { "enabled": true, "present": true } } -# ## { 'command': 'query-kvm', 'returns': 'KvmInfo' } @@ -433,7 +434,7 @@ ## # @NumaOptions: # -# A discriminated record of NUMA options. (for OptsVisitor) +# A discriminated record of NUMA options. (for OptsVisitor) # # Since: 2.1 ## @@ -450,26 +451,25 @@ ## # @NumaNodeOptions: # -# Create a guest NUMA node. (for OptsVisitor) +# Create a guest NUMA node. (for OptsVisitor) # # @nodeid: NUMA node ID (increase by 1 from 0 if omitted) # -# @cpus: VCPUs belonging to this node (assign VCPUS round-robin -# if omitted) +# @cpus: VCPUs belonging to this node (assign VCPUS round-robin if +# omitted) # # @mem: memory size of this node; mutually exclusive with @memdev. -# Equally divide total memory among nodes if both @mem and @memdev are -# omitted. +# Equally divide total memory among nodes if both @mem and @memdev +# are omitted. # -# @memdev: memory backend object. If specified for one node, -# it must be specified for all nodes. +# @memdev: memory backend object. If specified for one node, it must +# be specified for all nodes. # -# @initiator: defined in ACPI 6.3 Chapter 5.2.27.3 Table 5-145, -# points to the nodeid which has the memory controller -# responsible for this NUMA node. This field provides -# additional information as to the initiator node that -# is closest (as in directly attached) to this node, and -# therefore has the best performance (since 5.0) +# @initiator: defined in ACPI 6.3 Chapter 5.2.27.3 Table 5-145, points +# to the nodeid which has the memory controller responsible for +# this NUMA node. This field provides additional information as +# to the initiator node that is closest (as in directly attached) +# to this node, and therefore has the best performance (since 5.0) # # Since: 2.1 ## @@ -490,9 +490,9 @@ # # @dst: destination NUMA node. # -# @val: NUMA distance from source node to destination node. -# When a node is unreachable from another node, set the distance -# between them to 255. +# @val: NUMA distance from source node to destination node. When a +# node is unreachable from another node, set the distance between +# them to 255. # # Since: 2.10 ## @@ -507,15 +507,17 @@ # # Create a CXL Fixed Memory Window # -# @size: Size of the Fixed Memory Window in bytes. Must be a multiple -# of 256MiB. -# @interleave-granularity: Number of contiguous bytes for which -# accesses will go to a given interleave target. -# Accepted values [256, 512, 1k, 2k, 4k, 8k, 16k] -# @targets: Target root bridge IDs from -device ...,id= for each root -# bridge. +# @size: Size of the Fixed Memory Window in bytes. Must be a multiple +# of 256MiB. # -# Since 7.1 +# @interleave-granularity: Number of contiguous bytes for which +# accesses will go to a given interleave target. Accepted values +# [256, 512, 1k, 2k, 4k, 8k, 16k] +# +# @targets: Target root bridge IDs from -device ...,id= for each +# root bridge. +# +# Since: 7.1 ## { 'struct': 'CXLFixedMemoryWindowOptions', 'data': { @@ -530,7 +532,7 @@ # # @cxl-fmw: List of CXLFixedMemoryWindowOptions # -# Since 7.1 +# Since: 7.1 ## { 'struct' : 'CXLFMWProperties', 'data': { 'cxl-fmw': ['CXLFixedMemoryWindowOptions'] } @@ -551,10 +553,11 @@ # # Information about a X86 CPU feature word # -# @cpuid-input-eax: Input EAX value for CPUID instruction for that feature word +# @cpuid-input-eax: Input EAX value for CPUID instruction for that +# feature word # # @cpuid-input-ecx: Input ECX value for CPUID instruction for that -# feature word +# feature word # # @cpuid-register: Output register containing the feature bits # @@ -571,7 +574,8 @@ ## # @DummyForceArrays: # -# Not used by QMP; hack to let us use X86CPUFeatureWordInfoList internally +# Not used by QMP; hack to let us use X86CPUFeatureWordInfoList +# internally # # Since: 2.5 ## @@ -581,8 +585,8 @@ ## # @NumaCpuOptions: # -# Option "-numa cpu" overrides default cpu to node mapping. -# It accepts the same set of cpu properties as returned by +# Option "-numa cpu" overrides default cpu to node mapping. It +# accepts the same set of cpu properties as returned by # query-hotpluggable-cpus[].props, where node-id could be used to # override default node mapping. # @@ -617,11 +621,11 @@ ## # @HmatLBDataType: # -# Data type in the System Locality Latency and Bandwidth -# Information Structure of HMAT (Heterogeneous Memory Attribute Table) +# Data type in the System Locality Latency and Bandwidth Information +# Structure of HMAT (Heterogeneous Memory Attribute Table) # -# For more information about @HmatLBDataType, see chapter -# 5.2.27.4: Table 5-146: Field "Data Type" of ACPI 6.3 spec. +# For more information about @HmatLBDataType, see chapter 5.2.27.4: +# Table 5-146: Field "Data Type" of ACPI 6.3 spec. # # @access-latency: access latency (nanoseconds) # @@ -644,28 +648,27 @@ ## # @NumaHmatLBOptions: # -# Set the system locality latency and bandwidth information -# between Initiator and Target proximity Domains. +# Set the system locality latency and bandwidth information between +# Initiator and Target proximity Domains. # -# For more information about @NumaHmatLBOptions, see chapter -# 5.2.27.4: Table 5-146 of ACPI 6.3 spec. +# For more information about @NumaHmatLBOptions, see chapter 5.2.27.4: +# Table 5-146 of ACPI 6.3 spec. # # @initiator: the Initiator Proximity Domain. # # @target: the Target Proximity Domain. # -# @hierarchy: the Memory Hierarchy. Indicates the performance -# of memory or side cache. +# @hierarchy: the Memory Hierarchy. Indicates the performance of +# memory or side cache. # -# @data-type: presents the type of data, access/read/write -# latency or hit latency. +# @data-type: presents the type of data, access/read/write latency or +# hit latency. # -# @latency: the value of latency from @initiator to @target -# proximity domain, the latency unit is "ns(nanosecond)". +# @latency: the value of latency from @initiator to @target proximity +# domain, the latency unit is "ns(nanosecond)". # # @bandwidth: the value of bandwidth between @initiator and @target -# proximity domain, the bandwidth unit is -# "Bytes per second". +# proximity domain, the bandwidth unit is "Bytes per second". # # Since: 5.0 ## @@ -687,8 +690,8 @@ # For more information of @HmatCacheAssociativity, see chapter # 5.2.27.5: Table 5-147 of ACPI 6.3 spec. # -# @none: None (no memory side cache in this proximity domain, -# or cache associativity unknown) +# @none: None (no memory side cache in this proximity domain, or cache +# associativity unknown) # # @direct: Direct Mapped # @@ -702,14 +705,14 @@ ## # @HmatCacheWritePolicy: # -# Cache write policy in the Memory Side Cache Information Structure -# of HMAT +# Cache write policy in the Memory Side Cache Information Structure of +# HMAT # -# For more information of @HmatCacheWritePolicy, see chapter -# 5.2.27.5: Table 5-147: Field "Cache Attributes" of ACPI 6.3 spec. +# For more information of @HmatCacheWritePolicy, see chapter 5.2.27.5: +# Table 5-147: Field "Cache Attributes" of ACPI 6.3 spec. # -# @none: None (no memory side cache in this proximity domain, -# or cache write policy unknown) +# @none: None (no memory side cache in this proximity domain, or cache +# write policy unknown) # # @write-back: Write Back (WB) # @@ -725,8 +728,8 @@ # # Set the memory side cache information for a given memory domain. # -# For more information of @NumaHmatCacheOptions, see chapter -# 5.2.27.5: Table 5-147: Field "Cache Attributes" of ACPI 6.3 spec. +# For more information of @NumaHmatCacheOptions, see chapter 5.2.27.5: +# Table 5-147: Field "Cache Attributes" of ACPI 6.3 spec. # # @node-id: the memory proximity domain to which the memory belongs. # @@ -735,7 +738,7 @@ # @level: the cache level described in this structure. # # @associativity: the cache associativity, -# none/direct-mapped/complex(complex cache indexing). +# none/direct-mapped/complex(complex cache indexing). # # @policy: the write policy, none/write-back/write-through. # @@ -764,7 +767,7 @@ # @filename: the file to save the memory to as binary data # # @cpu-index: the index of the virtual CPU to use for translating the -# virtual address (defaults to CPU 0) +# virtual address (defaults to CPU 0) # # Returns: Nothing on success # @@ -779,7 +782,6 @@ # "size": 100, # "filename": "/tmp/virtual-mem-dump" } } # <- { "return": {} } -# ## { 'command': 'memsave', 'data': {'val': 'int', 'size': 'int', 'filename': 'str', '*cpu-index': 'int'} } @@ -808,7 +810,6 @@ # "size": 100, # "filename": "/tmp/physical-mem-dump" } } # <- { "return": {} } -# ## { 'command': 'pmemsave', 'data': {'val': 'int', 'size': 'int', 'filename': 'str'} } @@ -830,11 +831,11 @@ # # @share: whether memory is private to QEMU or shared (since 6.1) # -# @reserve: whether swap space (or huge pages) was reserved if applicable. -# This corresponds to the user configuration and not the actual -# behavior implemented in the OS to perform the reservation. -# For example, Linux will never reserve swap space for shared -# file mappings. (since 6.1) +# @reserve: whether swap space (or huge pages) was reserved if +# applicable. This corresponds to the user configuration and not +# the actual behavior implemented in the OS to perform the +# reservation. For example, Linux will never reserve swap space +# for shared file mappings. (since 6.1) # # @host-nodes: host nodes for its memory policy # @@ -888,29 +889,34 @@ # } # ] # } -# ## { 'command': 'query-memdev', 'returns': ['Memdev'], 'allow-preconfig': true } ## # @CpuInstanceProperties: # -# List of properties to be used for hotplugging a CPU instance, -# it should be passed by management with device_add command when -# a CPU is being hotplugged. +# List of properties to be used for hotplugging a CPU instance, it +# should be passed by management with device_add command when a CPU is +# being hotplugged. # # @node-id: NUMA node ID the CPU belongs to +# # @socket-id: socket number within node/board the CPU belongs to +# # @die-id: die number within socket the CPU belongs to (since 4.1) -# @cluster-id: cluster number within die the CPU belongs to (since 7.1) +# +# @cluster-id: cluster number within die the CPU belongs to (since +# 7.1) +# # @core-id: core number within cluster the CPU belongs to +# # @thread-id: thread number within core the CPU belongs to # -# Note: currently there are 6 properties that could be present -# but management should be prepared to pass through other -# properties with device_add command to allow for future -# interface extension. This also requires the filed names to be kept in -# sync with the properties passed to -device/device_add. +# Note: currently there are 6 properties that could be present but +# management should be prepared to pass through other properties +# with device_add command to allow for future interface extension. +# This also requires the filed names to be kept in sync with the +# properties passed to -device/device_add. # # Since: 2.7 ## @@ -928,10 +934,14 @@ # @HotpluggableCPU: # # @type: CPU object type for usage with device_add command +# # @props: list of properties to be used for hotplugging CPU -# @vcpus-count: number of logical VCPU threads @HotpluggableCPU provides -# @qom-path: link to existing CPU object if CPU is present or -# omitted if CPU is not present. +# +# @vcpus-count: number of logical VCPU threads @HotpluggableCPU +# provides +# +# @qom-path: link to existing CPU object if CPU is present or omitted +# if CPU is not present. # # Since: 2.7 ## @@ -952,9 +962,10 @@ # # Since: 2.7 # -# Example: +# Examples: # -# For pseries machine type started with -smp 2,cores=2,maxcpus=4 -cpu POWER8: +# For pseries machine type started with -smp 2,cores=2,maxcpus=4 -cpu +# POWER8: # # -> { "execute": "query-hotpluggable-cpus" } # <- {"return": [ @@ -979,8 +990,8 @@ # } # ]} # -# For s390x-virtio-ccw machine type started with -smp 1,maxcpus=2 -cpu qemu -# (Since: 2.11): +# For s390x-virtio-ccw machine type started with -smp 1,maxcpus=2 -cpu +# qemu (Since: 2.11): # # -> { "execute": "query-hotpluggable-cpus" } # <- {"return": [ @@ -994,7 +1005,6 @@ # "props": { "core-id": 0 } # } # ]} -# ## { 'command': 'query-hotpluggable-cpus', 'returns': ['HotpluggableCPU'], 'allow-preconfig': true } @@ -1002,9 +1012,8 @@ ## # @set-numa-node: # -# Runtime equivalent of '-numa' CLI option, available at -# preconfigure stage to configure numa mapping before initializing -# machine. +# Runtime equivalent of '-numa' CLI option, available at preconfigure +# stage to configure numa mapping before initializing machine. # # Since: 3.0 ## @@ -1018,21 +1027,22 @@ # # Request the balloon driver to change its balloon size. # -# @value: the target logical size of the VM in bytes. -# We can deduce the size of the balloon using this formula: +# @value: the target logical size of the VM in bytes. We can deduce +# the size of the balloon using this formula: # -# logical_vm_size = vm_ram_size - balloon_size +# logical_vm_size = vm_ram_size - balloon_size # -# From it we have: balloon_size = vm_ram_size - @value +# From it we have: balloon_size = vm_ram_size - @value # -# Returns: - Nothing on success -# - If the balloon driver is enabled but not functional because the KVM -# kernel module cannot support it, KvmMissingCap -# - If no balloon device is present, DeviceNotActive +# Returns: +# - Nothing on success +# - If the balloon driver is enabled but not functional because the +# KVM kernel module cannot support it, KVMMissingCap +# - If no balloon device is present, DeviceNotActive # -# Notes: This command just issues a request to the guest. When it returns, -# the balloon size may not have changed. A guest can change the balloon -# size independent of this command. +# Notes: This command just issues a request to the guest. When it +# returns, the balloon size may not have changed. A guest can +# change the balloon size independent of this command. # # Since: 0.14 # @@ -1042,7 +1052,6 @@ # <- { "return": {} } # # With a 2.5GiB guest this command inflated the ballon to 3GiB. -# ## { 'command': 'balloon', 'data': {'value': 'int'} } @@ -1051,8 +1060,8 @@ # # Information about the guest balloon device. # -# @actual: the logical size of the VM in bytes -# Formula used: logical_vm_size = vm_ram_size - balloon_size +# @actual: the logical size of the VM in bytes Formula used: +# logical_vm_size = vm_ram_size - balloon_size # # Since: 0.14 ## @@ -1063,10 +1072,11 @@ # # Return information about the balloon device. # -# Returns: - @BalloonInfo on success -# - If the balloon driver is enabled but not functional because the KVM -# kernel module cannot support it, KvmMissingCap -# - If no balloon device is present, DeviceNotActive +# Returns: +# - @BalloonInfo on success +# - If the balloon driver is enabled but not functional because the +# KVM kernel module cannot support it, KVMMissingCap +# - If no balloon device is present, DeviceNotActive # # Since: 0.14 # @@ -1077,18 +1087,18 @@ # "actual": 1073741824 # } # } -# ## { 'command': 'query-balloon', 'returns': 'BalloonInfo' } ## # @BALLOON_CHANGE: # -# Emitted when the guest changes the actual BALLOON level. This value is -# equivalent to the @actual field return by the 'query-balloon' command +# Emitted when the guest changes the actual BALLOON level. This value +# is equivalent to the @actual field return by the 'query-balloon' +# command # -# @actual: the logical size of the VM in bytes -# Formula used: logical_vm_size = vm_ram_size - balloon_size +# @actual: the logical size of the VM in bytes Formula used: +# logical_vm_size = vm_ram_size - balloon_size # # Note: this event is rate-limited. # @@ -1099,7 +1109,6 @@ # <- { "event": "BALLOON_CHANGE", # "data": { "actual": 944766976 }, # "timestamp": { "seconds": 1267020223, "microseconds": 435656 } } -# ## { 'event': 'BALLOON_CHANGE', 'data': { 'actual': 'int' } } @@ -1110,11 +1119,11 @@ # Actual memory information in bytes. # # @base-memory: size of "base" memory specified with command line -# option -m. +# option -m. # -# @plugged-memory: size of memory that can be hot-unplugged. This field -# is omitted if target doesn't support memory hotplug -# (i.e. CONFIG_MEM_DEVICE not defined at build time). +# @plugged-memory: size of memory that can be hot-unplugged. This +# field is omitted if target doesn't support memory hotplug (i.e. +# CONFIG_MEM_DEVICE not defined at build time). # # Since: 2.11 ## @@ -1124,8 +1133,8 @@ ## # @query-memory-size-summary: # -# Return the amount of initially allocated and present hotpluggable (if -# enabled) memory in bytes. +# Return the amount of initially allocated and present hotpluggable +# (if enabled) memory in bytes. # # Example: # @@ -1155,7 +1164,8 @@ # # @hotplugged: true if device was hotplugged # -# @hotpluggable: true if device if could be added/removed while machine is running +# @hotpluggable: true if device if could be added/removed while +# machine is running # # Since: 2.1 ## @@ -1258,6 +1268,14 @@ ## # @MemoryDeviceInfoKind: # +# @nvdimm: since 2.12 +# +# @virtio-pmem: since 4.1 +# +# @virtio-mem: since 5.1 +# +# @sgx-epc: since 6.2. +# # Since: 2.1 ## { 'enum': 'MemoryDeviceInfoKind', @@ -1300,9 +1318,6 @@ # # Union containing information about a memory device # -# nvdimm is included since 2.12. virtio-pmem is included since 4.1. -# virtio-mem is included since 5.1. sgx-epc is included since 6.2. -# # Since: 2.1 ## { 'union': 'MemoryDeviceInfo', @@ -1367,16 +1382,15 @@ # "slot": 0}, # "type": "dimm" # } ] } -# ## { 'command': 'query-memory-devices', 'returns': ['MemoryDeviceInfo'] } ## # @MEMORY_DEVICE_SIZE_CHANGE: # -# Emitted when the size of a memory device changes. Only emitted for memory -# devices that can actually change the size (e.g., virtio-mem due to guest -# action). +# Emitted when the size of a memory device changes. Only emitted for +# memory devices that can actually change the size (e.g., virtio-mem +# due to guest action). # # @id: device's ID # @@ -1394,7 +1408,6 @@ # "data": { "id": "vm0", "size": 1073741824, # "qom-path": "/machine/unattached/device[2]" }, # "timestamp": { "seconds": 1588168529, "microseconds": 201316 } } -# ## { 'event': 'MEMORY_DEVICE_SIZE_CHANGE', 'data': { '*id': 'str', 'size': 'size', 'qom-path' : 'str'} } @@ -1409,8 +1422,9 @@ # @msg: Informative message # # Features: -# @deprecated: This event is deprecated. Use @DEVICE_UNPLUG_GUEST_ERROR -# instead. +# +# @deprecated: This event is deprecated. Use +# @DEVICE_UNPLUG_GUEST_ERROR instead. # # Since: 2.4 # @@ -1421,7 +1435,6 @@ # "msg": "acpi: device unplug for unsupported device" # }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } -# ## { 'event': 'MEM_UNPLUG_ERROR', 'data': { 'device': 'str', 'msg': 'str' }, @@ -1438,13 +1451,15 @@ # # @menu: Whether to show a boot menu # -# @splash: The name of the file to be passed to the firmware as logo picture, if @menu is true. +# @splash: The name of the file to be passed to the firmware as logo +# picture, if @menu is true. # # @splash-time: How long to show the logo picture, in milliseconds # # @reboot-timeout: Timeout before guest reboots after boot fails # -# @strict: Whether to attempt booting from devices not included in the boot order +# @strict: Whether to attempt booting from devices not included in the +# boot order # # Since: 7.1 ## @@ -1460,8 +1475,8 @@ ## # @SMPConfiguration: # -# Schema for CPU topology configuration. A missing value lets -# QEMU figure out a suitable value based on the ones that are provided. +# Schema for CPU topology configuration. A missing value lets QEMU +# figure out a suitable value based on the ones that are provided. # # @cpus: number of virtual CPUs in the virtual machine # @@ -1469,13 +1484,15 @@ # # @dies: number of dies per socket in the CPU topology # -# @clusters: number of clusters per die in the CPU topology (since 7.0) +# @clusters: number of clusters per die in the CPU topology (since +# 7.0) # # @cores: number of cores per cluster in the CPU topology # # @threads: number of threads per core in the CPU topology # -# @maxcpus: maximum number of hotpluggable virtual CPUs in the virtual machine +# @maxcpus: maximum number of hotpluggable virtual CPUs in the virtual +# machine # # Since: 6.1 ## @@ -1494,6 +1511,7 @@ # Query interrupt statistics # # Features: +# # @unstable: This command is meant for debugging. # # Returns: interrupt statistics @@ -1510,6 +1528,7 @@ # Query TCG compiler statistics # # Features: +# # @unstable: This command is meant for debugging. # # Returns: TCG compiler statistics @@ -1527,6 +1546,7 @@ # Query NUMA topology information # # Features: +# # @unstable: This command is meant for debugging. # # Returns: topology information @@ -1543,6 +1563,7 @@ # Query TCG opcode counters # # Features: +# # @unstable: This command is meant for debugging. # # Returns: TCG opcode counters @@ -1560,6 +1581,7 @@ # Query TCG profiling information # # Features: +# # @unstable: This command is meant for debugging. # # Returns: profile information @@ -1577,6 +1599,7 @@ # Query system ramblock information # # Features: +# # @unstable: This command is meant for debugging. # # Returns: system ramblock information @@ -1593,6 +1616,7 @@ # Query RDMA state # # Features: +# # @unstable: This command is meant for debugging. # # Returns: RDMA state @@ -1609,6 +1633,7 @@ # Query information on the registered ROMS # # Features: +# # @unstable: This command is meant for debugging. # # Returns: registered ROMs @@ -1625,6 +1650,7 @@ # Query information on the USB devices # # Features: +# # @unstable: This command is meant for debugging. # # Returns: USB device information @@ -1675,9 +1701,10 @@ # Since: 7.2 # # Example: -# {"execute": "dumpdtb"} -# "arguments": { "filename": "fdt.dtb" } } # +# -> { "execute": "dumpdtb" } +# "arguments": { "filename": "fdt.dtb" } } +# <- { "return": {} } ## { 'command': 'dumpdtb', 'data': { 'filename': 'str' }, diff --git a/qapi/meson.build b/qapi/meson.build index fbdb442fdf..9fd480c4d8 100644 --- a/qapi/meson.build +++ b/qapi/meson.build @@ -31,6 +31,7 @@ qapi_all_modules = [ 'compat', 'control', 'crypto', + 'cxl', 'dump', 'error', 'introspect', @@ -56,6 +57,7 @@ if have_system qapi_all_modules += [ 'acpi', 'audio', + 'cryptodev', 'qdev', 'pci', 'rdma', diff --git a/qapi/migration.json b/qapi/migration.json index c84fa10e86..179af0c4d8 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -16,7 +16,8 @@ # # @transferred: amount of bytes already transferred to the target VM # -# @remaining: amount of bytes remaining to be transferred to the target VM +# @remaining: amount of bytes remaining to be transferred to the +# target VM # # @total: total amount of bytes involved in the migration process # @@ -28,37 +29,39 @@ # # @normal-bytes: number of normal bytes sent (since 1.2) # -# @dirty-pages-rate: number of pages dirtied by second by the -# guest (since 1.3) +# @dirty-pages-rate: number of pages dirtied by second by the guest +# (since 1.3) # -# @mbps: throughput in megabits/sec. (since 1.6) +# @mbps: throughput in megabits/sec. (since 1.6) # -# @dirty-sync-count: number of times that dirty ram was synchronized (since 2.1) +# @dirty-sync-count: number of times that dirty ram was synchronized +# (since 2.1) # -# @postcopy-requests: The number of page requests received from the destination -# (since 2.7) +# @postcopy-requests: The number of page requests received from the +# destination (since 2.7) # # @page-size: The number of bytes per page for the various page-based -# statistics (since 2.10) +# statistics (since 2.10) # # @multifd-bytes: The number of bytes sent through multifd (since 3.0) # # @pages-per-second: the number of memory pages transferred per second -# (Since 4.0) +# (Since 4.0) # # @precopy-bytes: The number of bytes sent in the pre-copy phase -# (since 7.0). +# (since 7.0). # # @downtime-bytes: The number of bytes sent while the guest is paused -# (since 7.0). +# (since 7.0). # # @postcopy-bytes: The number of bytes sent during the post-copy phase -# (since 7.0). +# (since 7.0). +# +# @dirty-sync-missed-zero-copy: Number of times dirty RAM +# synchronization could not avoid copying dirty pages. This is +# between 0 and @dirty-sync-count * @multifd-channels. (since +# 7.1) # -# @dirty-sync-missed-zero-copy: Number of times dirty RAM synchronization could -# not avoid copying dirty pages. This is between -# 0 and @dirty-sync-count * @multifd-channels. -# (since 7.1) # Since: 0.14 ## { 'struct': 'MigrationStats', @@ -105,7 +108,8 @@ # # @pages: amount of pages compressed and transferred to the target VM # -# @busy: count of times that no free thread was available to compress data +# @busy: count of times that no free thread was available to compress +# data # # @busy-rate: rate of thread busy # @@ -134,26 +138,29 @@ # # @active: in the process of doing migration. # -# @postcopy-active: like active, but now in postcopy mode. (since 2.5) +# @postcopy-active: like active, but now in postcopy mode. (since +# 2.5) # -# @postcopy-paused: during postcopy but paused. (since 3.0) +# @postcopy-paused: during postcopy but paused. (since 3.0) # -# @postcopy-recover: trying to recover from a paused postcopy. (since 3.0) +# @postcopy-recover: trying to recover from a paused postcopy. (since +# 3.0) # # @completed: migration is finished. # # @failed: some error occurred during migration process. # -# @colo: VM is in the process of fault tolerance, VM can not get into this -# state unless colo capability is enabled for migration. (since 2.8) +# @colo: VM is in the process of fault tolerance, VM can not get into +# this state unless colo capability is enabled for migration. +# (since 2.8) # -# @pre-switchover: Paused before device serialisation. (since 2.11) +# @pre-switchover: Paused before device serialisation. (since 2.11) # -# @device: During device serialisation when pause-before-switchover is enabled -# (since 2.11) +# @device: During device serialisation when pause-before-switchover is +# enabled (since 2.11) # -# @wait-unplug: wait for device unplug request by guest OS to be completed. -# (since 4.2) +# @wait-unplug: wait for device unplug request by guest OS to be +# completed. (since 4.2) # # Since: 2.3 ## @@ -167,7 +174,8 @@ # # Detailed VFIO devices migration statistics # -# @transferred: amount of bytes transferred to the target VM by VFIO devices +# @transferred: amount of bytes transferred to the target VM by VFIO +# devices # # Since: 5.2 ## @@ -180,67 +188,67 @@ # Information about current migration process. # # @status: @MigrationStatus describing the current migration status. -# If this field is not returned, no migration process -# has been initiated +# If this field is not returned, no migration process has been +# initiated # -# @ram: @MigrationStats containing detailed migration -# status, only returned if status is 'active' or -# 'completed'(since 1.2) +# @ram: @MigrationStats containing detailed migration status, only +# returned if status is 'active' or 'completed'(since 1.2) # -# @disk: @MigrationStats containing detailed disk migration -# status, only returned if status is 'active' and it is a block -# migration +# @disk: @MigrationStats containing detailed disk migration status, +# only returned if status is 'active' and it is a block migration # # @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE -# migration statistics, only returned if XBZRLE feature is on and -# status is 'active' or 'completed' (since 1.2) +# migration statistics, only returned if XBZRLE feature is on and +# status is 'active' or 'completed' (since 1.2) # # @total-time: total amount of milliseconds since migration started. -# If migration has ended, it returns the total migration -# time. (since 1.2) +# If migration has ended, it returns the total migration time. +# (since 1.2) # -# @downtime: only present when migration finishes correctly -# total downtime in milliseconds for the guest. -# (since 1.3) +# @downtime: only present when migration finishes correctly total +# downtime in milliseconds for the guest. (since 1.3) # -# @expected-downtime: only present while migration is active -# expected downtime in milliseconds for the guest in last walk -# of the dirty bitmap. (since 1.3) +# @expected-downtime: only present while migration is active expected +# downtime in milliseconds for the guest in last walk of the dirty +# bitmap. (since 1.3) # # @setup-time: amount of setup time in milliseconds *before* the -# iterations begin but *after* the QMP command is issued. This is designed -# to provide an accounting of any activities (such as RDMA pinning) which -# may be expensive, but do not actually occur during the iterative -# migration rounds themselves. (since 1.6) +# iterations begin but *after* the QMP command is issued. This is +# designed to provide an accounting of any activities (such as +# RDMA pinning) which may be expensive, but do not actually occur +# during the iterative migration rounds themselves. (since 1.6) # # @cpu-throttle-percentage: percentage of time guest cpus are being -# throttled during auto-converge. This is only present when auto-converge -# has started throttling guest cpus. (Since 2.7) +# throttled during auto-converge. This is only present when +# auto-converge has started throttling guest cpus. (Since 2.7) # # @error-desc: the human readable error description string, when -# @status is 'failed'. Clients should not attempt to parse the -# error strings. (Since 2.7) +# @status is 'failed'. Clients should not attempt to parse the +# error strings. (Since 2.7) # -# @postcopy-blocktime: total time when all vCPU were blocked during postcopy -# live migration. This is only present when the postcopy-blocktime -# migration capability is enabled. (Since 3.0) +# @postcopy-blocktime: total time when all vCPU were blocked during +# postcopy live migration. This is only present when the +# postcopy-blocktime migration capability is enabled. (Since 3.0) # -# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. This is -# only present when the postcopy-blocktime migration capability -# is enabled. (Since 3.0) +# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU. +# This is only present when the postcopy-blocktime migration +# capability is enabled. (Since 3.0) # -# @compression: migration compression statistics, only returned if compression -# feature is on and status is 'active' or 'completed' (Since 3.1) +# @compression: migration compression statistics, only returned if +# compression feature is on and status is 'active' or 'completed' +# (Since 3.1) # -# @socket-address: Only used for tcp, to know what the real port is (Since 4.0) +# @socket-address: Only used for tcp, to know what the real port is +# (Since 4.0) # -# @vfio: @VfioStats containing detailed VFIO devices migration statistics, -# only returned if VFIO device is present, migration is supported by all -# VFIO devices and status is 'active' or 'completed' (since 5.2) +# @vfio: @VfioStats containing detailed VFIO devices migration +# statistics, only returned if VFIO device is present, migration +# is supported by all VFIO devices and status is 'active' or +# 'completed' (since 5.2) # -# @blocked-reasons: A list of reasons an outgoing migration is blocked. -# Present and non-empty when migration is blocked. -# (since 6.0) +# @blocked-reasons: A list of reasons an outgoing migration is +# blocked. Present and non-empty when migration is blocked. +# (since 6.0) # # Since: 0.14 ## @@ -264,7 +272,7 @@ ## # @query-migrate: # -# Returns information about current migration process. If migration +# Returns information about current migration process. If migration # is active there will be another json-object with RAM migration # status and if block migration is active another one with block # migration status. @@ -273,7 +281,7 @@ # # Since: 0.14 # -# Example: +# Examples: # # 1. Before the first migration # @@ -381,7 +389,6 @@ # } # } # } -# ## { 'command': 'query-migrate', 'returns': 'MigrationInfo' } @@ -390,95 +397,98 @@ # # Migration capabilities enumeration # -# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length Encoding). -# This feature allows us to minimize migration traffic for certain work -# loads, by sending compressed difference of the pages +# @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length +# Encoding). This feature allows us to minimize migration traffic +# for certain work loads, by sending compressed difference of the +# pages # -# @rdma-pin-all: Controls whether or not the entire VM memory footprint is -# mlock()'d on demand or all at once. Refer to docs/rdma.txt for usage. -# Disabled by default. (since 2.0) +# @rdma-pin-all: Controls whether or not the entire VM memory +# footprint is mlock()'d on demand or all at once. Refer to +# docs/rdma.txt for usage. Disabled by default. (since 2.0) # -# @zero-blocks: During storage migration encode blocks of zeroes efficiently. This -# essentially saves 1MB of zeroes per block on the wire. Enabling requires -# source and target VM to support this feature. To enable it is sufficient -# to enable the capability on the source VM. The feature is disabled by -# default. (since 1.6) +# @zero-blocks: During storage migration encode blocks of zeroes +# efficiently. This essentially saves 1MB of zeroes per block on +# the wire. Enabling requires source and target VM to support +# this feature. To enable it is sufficient to enable the +# capability on the source VM. The feature is disabled by default. +# (since 1.6) # -# @compress: Use multiple compression threads to accelerate live migration. -# This feature can help to reduce the migration traffic, by sending -# compressed pages. Please note that if compress and xbzrle are both -# on, compress only takes effect in the ram bulk stage, after that, -# it will be disabled and only xbzrle takes effect, this can help to -# minimize migration traffic. The feature is disabled by default. -# (since 2.4 ) +# @compress: Use multiple compression threads to accelerate live +# migration. This feature can help to reduce the migration +# traffic, by sending compressed pages. Please note that if +# compress and xbzrle are both on, compress only takes effect in +# the ram bulk stage, after that, it will be disabled and only +# xbzrle takes effect, this can help to minimize migration +# traffic. The feature is disabled by default. (since 2.4 ) # -# @events: generate events for each migration state change -# (since 2.4 ) +# @events: generate events for each migration state change (since 2.4 +# ) # -# @auto-converge: If enabled, QEMU will automatically throttle down the guest -# to speed up convergence of RAM migration. (since 1.6) +# @auto-converge: If enabled, QEMU will automatically throttle down +# the guest to speed up convergence of RAM migration. (since 1.6) # -# @postcopy-ram: Start executing on the migration target before all of RAM has -# been migrated, pulling the remaining pages along as needed. The -# capacity must have the same setting on both source and target -# or migration will not even start. NOTE: If the migration fails during -# postcopy the VM will fail. (since 2.6) +# @postcopy-ram: Start executing on the migration target before all of +# RAM has been migrated, pulling the remaining pages along as +# needed. The capacity must have the same setting on both source +# and target or migration will not even start. NOTE: If the +# migration fails during postcopy the VM will fail. (since 2.6) # -# @x-colo: If enabled, migration will never end, and the state of the VM on the -# primary side will be migrated continuously to the VM on secondary -# side, this process is called COarse-Grain LOck Stepping (COLO) for -# Non-stop Service. (since 2.8) +# @x-colo: If enabled, migration will never end, and the state of the +# VM on the primary side will be migrated continuously to the VM +# on secondary side, this process is called COarse-Grain LOck +# Stepping (COLO) for Non-stop Service. (since 2.8) # -# @release-ram: if enabled, qemu will free the migrated ram pages on the source -# during postcopy-ram migration. (since 2.9) +# @release-ram: if enabled, qemu will free the migrated ram pages on +# the source during postcopy-ram migration. (since 2.9) # # @block: If enabled, QEMU will also migrate the contents of all block -# devices. Default is disabled. A possible alternative uses -# mirror jobs to a builtin NBD server on the destination, which -# offers more flexibility. -# (Since 2.10) +# devices. Default is disabled. A possible alternative uses +# mirror jobs to a builtin NBD server on the destination, which +# offers more flexibility. (Since 2.10) # # @return-path: If enabled, migration will use the return path even -# for precopy. (since 2.10) +# for precopy. (since 2.10) # -# @pause-before-switchover: Pause outgoing migration before serialising device -# state and before disabling block IO (since 2.11) +# @pause-before-switchover: Pause outgoing migration before +# serialising device state and before disabling block IO (since +# 2.11) # # @multifd: Use more than one fd for migration (since 4.0) # # @dirty-bitmaps: If enabled, QEMU will migrate named dirty bitmaps. -# (since 2.12) +# (since 2.12) # # @postcopy-blocktime: Calculate downtime for postcopy live migration -# (since 3.0) +# (since 3.0) # -# @late-block-activate: If enabled, the destination will not activate block -# devices (and thus take locks) immediately at the end of migration. -# (since 3.0) +# @late-block-activate: If enabled, the destination will not activate +# block devices (and thus take locks) immediately at the end of +# migration. (since 3.0) # -# @x-ignore-shared: If enabled, QEMU will not migrate shared memory (since 4.0) +# @x-ignore-shared: If enabled, QEMU will not migrate shared memory +# (since 4.0) # # @validate-uuid: Send the UUID of the source to allow the destination -# to ensure it is the same. (since 4.2) +# to ensure it is the same. (since 4.2) # -# @background-snapshot: If enabled, the migration stream will be a snapshot -# of the VM exactly at the point when the migration -# procedure starts. The VM RAM is saved with running VM. -# (since 6.0) +# @background-snapshot: If enabled, the migration stream will be a +# snapshot of the VM exactly at the point when the migration +# procedure starts. The VM RAM is saved with running VM. (since +# 6.0) # -# @zero-copy-send: Controls behavior on sending memory pages on migration. -# When true, enables a zero-copy mechanism for sending -# memory pages, if host supports it. -# Requires that QEMU be permitted to use locked memory -# for guest RAM pages. -# (since 7.1) -# @postcopy-preempt: If enabled, the migration process will allow postcopy -# requests to preempt precopy stream, so postcopy requests -# will be handled faster. This is a performance feature and -# should not affect the correctness of postcopy migration. -# (since 7.1) +# @zero-copy-send: Controls behavior on sending memory pages on +# migration. When true, enables a zero-copy mechanism for sending +# memory pages, if host supports it. Requires that QEMU be +# permitted to use locked memory for guest RAM pages. (since 7.1) +# +# @postcopy-preempt: If enabled, the migration process will allow +# postcopy requests to preempt precopy stream, so postcopy +# requests will be handled faster. This is a performance feature +# and should not affect the correctness of postcopy migration. +# (since 7.1) # # Features: +# # @unstable: Members @x-colo and @x-ignore-shared are experimental. # # Since: 1.2 @@ -521,7 +531,7 @@ # # -> { "execute": "migrate-set-capabilities" , "arguments": # { "capabilities": [ { "capability": "xbzrle", "state": true } ] } } -# +# <- { "return": {} } ## { 'command': 'migrate-set-capabilities', 'data': { 'capabilities': ['MigrationCapabilityStatus'] } } @@ -531,7 +541,7 @@ # # Returns information about the current migration capabilities status # -# Returns: @MigrationCapabilitiesStatus +# Returns: @MigrationCapabilityStatus # # Since: 1.2 # @@ -548,7 +558,6 @@ # {"state": false, "capability": "postcopy-ram"}, # {"state": false, "capability": "x-colo"} # ]} -# ## { 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} @@ -558,7 +567,9 @@ # An enumeration of multifd compression methods. # # @none: no compression. +# # @zlib: use zlib compression method. +# # @zstd: use zstd compression method. # # Since: 5.0 @@ -570,8 +581,8 @@ ## # @BitmapMigrationBitmapAliasTransform: # -# @persistent: If present, the bitmap will be made persistent -# or transient depending on this parameter. +# @persistent: If present, the bitmap will be made persistent or +# transient depending on this parameter. # # Since: 6.0 ## @@ -586,10 +597,10 @@ # @name: The name of the bitmap. # # @alias: An alias name for migration (for example the bitmap name on -# the opposite site). +# the opposite site). # -# @transform: Allows the modification of the migrated bitmap. -# (since 6.0) +# @transform: Allows the modification of the migrated bitmap. (since +# 6.0) # # Since: 5.2 ## @@ -608,8 +619,8 @@ # # @node-name: A block node name. # -# @alias: An alias block node name for migration (for example the -# node name on the opposite site). +# @alias: An alias block node name for migration (for example the node +# name on the opposite site). # # @bitmaps: Mappings for the bitmaps on this node. # @@ -627,156 +638,149 @@ # # Migration parameters enumeration # -# @announce-initial: Initial delay (in milliseconds) before sending the first -# announce (Since 4.0) +# @announce-initial: Initial delay (in milliseconds) before sending +# the first announce (Since 4.0) # -# @announce-max: Maximum delay (in milliseconds) between packets in the -# announcement (Since 4.0) +# @announce-max: Maximum delay (in milliseconds) between packets in +# the announcement (Since 4.0) # -# @announce-rounds: Number of self-announce packets sent after migration -# (Since 4.0) +# @announce-rounds: Number of self-announce packets sent after +# migration (Since 4.0) # -# @announce-step: Increase in delay (in milliseconds) between subsequent -# packets in the announcement (Since 4.0) +# @announce-step: Increase in delay (in milliseconds) between +# subsequent packets in the announcement (Since 4.0) # -# @compress-level: Set the compression level to be used in live migration, -# the compression level is an integer between 0 and 9, where 0 means -# no compression, 1 means the best compression speed, and 9 means best -# compression ratio which will consume more CPU. +# @compress-level: Set the compression level to be used in live +# migration, the compression level is an integer between 0 and 9, +# where 0 means no compression, 1 means the best compression +# speed, and 9 means best compression ratio which will consume +# more CPU. # -# @compress-threads: Set compression thread count to be used in live migration, -# the compression thread count is an integer between 1 and 255. +# @compress-threads: Set compression thread count to be used in live +# migration, the compression thread count is an integer between 1 +# and 255. # -# @compress-wait-thread: Controls behavior when all compression threads are -# currently busy. If true (default), wait for a free -# compression thread to become available; otherwise, -# send the page uncompressed. (Since 3.1) +# @compress-wait-thread: Controls behavior when all compression +# threads are currently busy. If true (default), wait for a free +# compression thread to become available; otherwise, send the page +# uncompressed. (Since 3.1) # -# @decompress-threads: Set decompression thread count to be used in live -# migration, the decompression thread count is an integer between 1 -# and 255. Usually, decompression is at least 4 times as fast as -# compression, so set the decompress-threads to the number about 1/4 -# of compress-threads is adequate. +# @decompress-threads: Set decompression thread count to be used in +# live migration, the decompression thread count is an integer +# between 1 and 255. Usually, decompression is at least 4 times as +# fast as compression, so set the decompress-threads to the number +# about 1/4 of compress-threads is adequate. # -# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period -# to trigger throttling. It is expressed as percentage. -# The default value is 50. (Since 5.0) +# @throttle-trigger-threshold: The ratio of bytes_dirty_period and +# bytes_xfer_period to trigger throttling. It is expressed as +# percentage. The default value is 50. (Since 5.0) # -# @cpu-throttle-initial: Initial percentage of time guest cpus are throttled -# when migration auto-converge is activated. The -# default value is 20. (Since 2.7) +# @cpu-throttle-initial: Initial percentage of time guest cpus are +# throttled when migration auto-converge is activated. The +# default value is 20. (Since 2.7) # # @cpu-throttle-increment: throttle percentage increase each time -# auto-converge detects that migration is not making -# progress. The default value is 10. (Since 2.7) +# auto-converge detects that migration is not making progress. +# The default value is 10. (Since 2.7) # -# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage -# At the tail stage of throttling, the Guest is very -# sensitive to CPU percentage while the @cpu-throttle -# -increment is excessive usually at tail stage. -# If this parameter is true, we will compute the ideal -# CPU percentage used by the Guest, which may exactly make -# the dirty rate match the dirty rate threshold. Then we -# will choose a smaller throttle increment between the -# one specified by @cpu-throttle-increment and the one -# generated by ideal CPU percentage. -# Therefore, it is compatible to traditional throttling, -# meanwhile the throttle increment won't be excessive -# at tail stage. -# The default value is false. (Since 5.1) +# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At +# the tail stage of throttling, the Guest is very sensitive to CPU +# percentage while the @cpu-throttle -increment is excessive +# usually at tail stage. If this parameter is true, we will +# compute the ideal CPU percentage used by the Guest, which may +# exactly make the dirty rate match the dirty rate threshold. +# Then we will choose a smaller throttle increment between the one +# specified by @cpu-throttle-increment and the one generated by +# ideal CPU percentage. Therefore, it is compatible to +# traditional throttling, meanwhile the throttle increment won't +# be excessive at tail stage. The default value is false. (Since +# 5.1) # -# @tls-creds: ID of the 'tls-creds' object that provides credentials for -# establishing a TLS connection over the migration data channel. -# On the outgoing side of the migration, the credentials must -# be for a 'client' endpoint, while for the incoming side the -# credentials must be for a 'server' endpoint. Setting this -# will enable TLS for all migrations. The default is unset, -# resulting in unsecured migration at the QEMU level. (Since 2.7) +# @tls-creds: ID of the 'tls-creds' object that provides credentials +# for establishing a TLS connection over the migration data +# channel. On the outgoing side of the migration, the credentials +# must be for a 'client' endpoint, while for the incoming side the +# credentials must be for a 'server' endpoint. Setting this will +# enable TLS for all migrations. The default is unset, resulting +# in unsecured migration at the QEMU level. (Since 2.7) # -# @tls-hostname: hostname of the target host for the migration. This is -# required when using x509 based TLS credentials and the -# migration URI does not already include a hostname. For -# example if using fd: or exec: based migration, the -# hostname must be provided so that the server's x509 -# certificate identity can be validated. (Since 2.7) +# @tls-hostname: hostname of the target host for the migration. This +# is required when using x509 based TLS credentials and the +# migration URI does not already include a hostname. For example +# if using fd: or exec: based migration, the hostname must be +# provided so that the server's x509 certificate identity can be +# validated. (Since 2.7) # -# @tls-authz: ID of the 'authz' object subclass that provides access control -# checking of the TLS x509 certificate distinguished name. -# This object is only resolved at time of use, so can be deleted -# and recreated on the fly while the migration server is active. -# If missing, it will default to denying access (Since 4.0) +# @tls-authz: ID of the 'authz' object subclass that provides access +# control checking of the TLS x509 certificate distinguished name. +# This object is only resolved at time of use, so can be deleted +# and recreated on the fly while the migration server is active. +# If missing, it will default to denying access (Since 4.0) # -# @max-bandwidth: to set maximum speed for migration. maximum speed in -# bytes per second. (Since 2.8) +# @max-bandwidth: to set maximum speed for migration. maximum speed +# in bytes per second. (Since 2.8) # -# @downtime-limit: set maximum tolerated downtime for migration. maximum -# downtime in milliseconds (Since 2.8) +# @downtime-limit: set maximum tolerated downtime for migration. +# maximum downtime in milliseconds (Since 2.8) # -# @x-checkpoint-delay: The delay time (in ms) between two COLO checkpoints in -# periodic mode. (Since 2.8) +# @x-checkpoint-delay: The delay time (in ms) between two COLO +# checkpoints in periodic mode. (Since 2.8) # # @block-incremental: Affects how much storage is migrated when the -# block migration capability is enabled. When false, the entire -# storage backing chain is migrated into a flattened image at -# the destination; when true, only the active qcow2 layer is -# migrated and the destination must already have access to the -# same backing chain as was used on the source. (since 2.10) +# block migration capability is enabled. When false, the entire +# storage backing chain is migrated into a flattened image at the +# destination; when true, only the active qcow2 layer is migrated +# and the destination must already have access to the same backing +# chain as was used on the source. (since 2.10) # # @multifd-channels: Number of channels used to migrate data in -# parallel. This is the same number that the -# number of sockets used for migration. The -# default value is 2 (since 4.0) +# parallel. This is the same number that the number of sockets +# used for migration. The default value is 2 (since 4.0) # # @xbzrle-cache-size: cache size to be used by XBZRLE migration. It -# needs to be a multiple of the target page size -# and a power of 2 -# (Since 2.11) +# needs to be a multiple of the target page size and a power of 2 +# (Since 2.11) # -# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. -# Defaults to 0 (unlimited). In bytes per second. -# (Since 3.0) +# @max-postcopy-bandwidth: Background transfer bandwidth during +# postcopy. Defaults to 0 (unlimited). In bytes per second. +# (Since 3.0) # -# @max-cpu-throttle: maximum cpu throttle percentage. -# Defaults to 99. (Since 3.1) +# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. +# (Since 3.1) # -# @multifd-compression: Which compression method to use. -# Defaults to none. (Since 5.0) +# @multifd-compression: Which compression method to use. Defaults to +# none. (Since 5.0) # # @multifd-zlib-level: Set the compression level to be used in live -# migration, the compression level is an integer between 0 -# and 9, where 0 means no compression, 1 means the best -# compression speed, and 9 means best compression ratio which -# will consume more CPU. -# Defaults to 1. (Since 5.0) +# migration, the compression level is an integer between 0 and 9, +# where 0 means no compression, 1 means the best compression +# speed, and 9 means best compression ratio which will consume +# more CPU. Defaults to 1. (Since 5.0) # # @multifd-zstd-level: Set the compression level to be used in live -# migration, the compression level is an integer between 0 -# and 20, where 0 means no compression, 1 means the best -# compression speed, and 20 means best compression ratio which -# will consume more CPU. -# Defaults to 1. (Since 5.0) -# +# migration, the compression level is an integer between 0 and 20, +# where 0 means no compression, 1 means the best compression +# speed, and 20 means best compression ratio which will consume +# more CPU. Defaults to 1. (Since 5.0) # # @block-bitmap-mapping: Maps block nodes and bitmaps on them to -# aliases for the purpose of dirty bitmap migration. Such -# aliases may for example be the corresponding names on the -# opposite site. -# The mapping must be one-to-one, but not necessarily -# complete: On the source, unmapped bitmaps and all bitmaps -# on unmapped nodes will be ignored. On the destination, -# encountering an unmapped alias in the incoming migration -# stream will result in a report, and all further bitmap -# migration data will then be discarded. -# Note that the destination does not know about bitmaps it -# does not receive, so there is no limitation or requirement -# regarding the number of bitmaps received, or how they are -# named, or on which nodes they are placed. -# By default (when this parameter has never been set), bitmap -# names are mapped to themselves. Nodes are mapped to their -# block device name if there is one, and to their node name -# otherwise. (Since 5.2) +# aliases for the purpose of dirty bitmap migration. Such aliases +# may for example be the corresponding names on the opposite site. +# The mapping must be one-to-one, but not necessarily complete: On +# the source, unmapped bitmaps and all bitmaps on unmapped nodes +# will be ignored. On the destination, encountering an unmapped +# alias in the incoming migration stream will result in a report, +# and all further bitmap migration data will then be discarded. +# Note that the destination does not know about bitmaps it does +# not receive, so there is no limitation or requirement regarding +# the number of bitmaps received, or how they are named, or on +# which nodes they are placed. By default (when this parameter +# has never been set), bitmap names are mapped to themselves. +# Nodes are mapped to their block device name if there is one, and +# to their node name otherwise. (Since 5.2) # # Features: +# # @unstable: Member @x-checkpoint-delay is experimental. # # Since: 2.4 @@ -801,153 +805,145 @@ ## # @MigrateSetParameters: # -# @announce-initial: Initial delay (in milliseconds) before sending the first -# announce (Since 4.0) +# @announce-initial: Initial delay (in milliseconds) before sending +# the first announce (Since 4.0) # -# @announce-max: Maximum delay (in milliseconds) between packets in the -# announcement (Since 4.0) +# @announce-max: Maximum delay (in milliseconds) between packets in +# the announcement (Since 4.0) # -# @announce-rounds: Number of self-announce packets sent after migration -# (Since 4.0) +# @announce-rounds: Number of self-announce packets sent after +# migration (Since 4.0) # -# @announce-step: Increase in delay (in milliseconds) between subsequent -# packets in the announcement (Since 4.0) +# @announce-step: Increase in delay (in milliseconds) between +# subsequent packets in the announcement (Since 4.0) # # @compress-level: compression level # # @compress-threads: compression thread count # -# @compress-wait-thread: Controls behavior when all compression threads are -# currently busy. If true (default), wait for a free -# compression thread to become available; otherwise, -# send the page uncompressed. (Since 3.1) +# @compress-wait-thread: Controls behavior when all compression +# threads are currently busy. If true (default), wait for a free +# compression thread to become available; otherwise, send the page +# uncompressed. (Since 3.1) # # @decompress-threads: decompression thread count # -# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period -# to trigger throttling. It is expressed as percentage. -# The default value is 50. (Since 5.0) +# @throttle-trigger-threshold: The ratio of bytes_dirty_period and +# bytes_xfer_period to trigger throttling. It is expressed as +# percentage. The default value is 50. (Since 5.0) # # @cpu-throttle-initial: Initial percentage of time guest cpus are -# throttled when migration auto-converge is activated. -# The default value is 20. (Since 2.7) +# throttled when migration auto-converge is activated. The +# default value is 20. (Since 2.7) # # @cpu-throttle-increment: throttle percentage increase each time -# auto-converge detects that migration is not making -# progress. The default value is 10. (Since 2.7) +# auto-converge detects that migration is not making progress. +# The default value is 10. (Since 2.7) # -# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage -# At the tail stage of throttling, the Guest is very -# sensitive to CPU percentage while the @cpu-throttle -# -increment is excessive usually at tail stage. -# If this parameter is true, we will compute the ideal -# CPU percentage used by the Guest, which may exactly make -# the dirty rate match the dirty rate threshold. Then we -# will choose a smaller throttle increment between the -# one specified by @cpu-throttle-increment and the one -# generated by ideal CPU percentage. -# Therefore, it is compatible to traditional throttling, -# meanwhile the throttle increment won't be excessive -# at tail stage. -# The default value is false. (Since 5.1) +# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At +# the tail stage of throttling, the Guest is very sensitive to CPU +# percentage while the @cpu-throttle -increment is excessive +# usually at tail stage. If this parameter is true, we will +# compute the ideal CPU percentage used by the Guest, which may +# exactly make the dirty rate match the dirty rate threshold. +# Then we will choose a smaller throttle increment between the one +# specified by @cpu-throttle-increment and the one generated by +# ideal CPU percentage. Therefore, it is compatible to +# traditional throttling, meanwhile the throttle increment won't +# be excessive at tail stage. The default value is false. (Since +# 5.1) # # @tls-creds: ID of the 'tls-creds' object that provides credentials -# for establishing a TLS connection over the migration data -# channel. On the outgoing side of the migration, the credentials -# must be for a 'client' endpoint, while for the incoming side the -# credentials must be for a 'server' endpoint. Setting this -# to a non-empty string enables TLS for all migrations. -# An empty string means that QEMU will use plain text mode for -# migration, rather than TLS (Since 2.9) -# Previously (since 2.7), this was reported by omitting -# tls-creds instead. +# for establishing a TLS connection over the migration data +# channel. On the outgoing side of the migration, the credentials +# must be for a 'client' endpoint, while for the incoming side the +# credentials must be for a 'server' endpoint. Setting this to a +# non-empty string enables TLS for all migrations. An empty +# string means that QEMU will use plain text mode for migration, +# rather than TLS (Since 2.9) Previously (since 2.7), this was +# reported by omitting tls-creds instead. # -# @tls-hostname: hostname of the target host for the migration. This -# is required when using x509 based TLS credentials and the -# migration URI does not already include a hostname. For -# example if using fd: or exec: based migration, the -# hostname must be provided so that the server's x509 -# certificate identity can be validated. (Since 2.7) -# An empty string means that QEMU will use the hostname -# associated with the migration URI, if any. (Since 2.9) -# Previously (since 2.7), this was reported by omitting -# tls-hostname instead. +# @tls-hostname: hostname of the target host for the migration. This +# is required when using x509 based TLS credentials and the +# migration URI does not already include a hostname. For example +# if using fd: or exec: based migration, the hostname must be +# provided so that the server's x509 certificate identity can be +# validated. (Since 2.7) An empty string means that QEMU will use +# the hostname associated with the migration URI, if any. (Since +# 2.9) Previously (since 2.7), this was reported by omitting +# tls-hostname instead. # -# @max-bandwidth: to set maximum speed for migration. maximum speed in -# bytes per second. (Since 2.8) +# @max-bandwidth: to set maximum speed for migration. maximum speed +# in bytes per second. (Since 2.8) # -# @downtime-limit: set maximum tolerated downtime for migration. maximum -# downtime in milliseconds (Since 2.8) +# @downtime-limit: set maximum tolerated downtime for migration. +# maximum downtime in milliseconds (Since 2.8) # -# @x-checkpoint-delay: the delay time between two COLO checkpoints. (Since 2.8) +# @x-checkpoint-delay: the delay time between two COLO checkpoints. +# (Since 2.8) # # @block-incremental: Affects how much storage is migrated when the -# block migration capability is enabled. When false, the entire -# storage backing chain is migrated into a flattened image at -# the destination; when true, only the active qcow2 layer is -# migrated and the destination must already have access to the -# same backing chain as was used on the source. (since 2.10) +# block migration capability is enabled. When false, the entire +# storage backing chain is migrated into a flattened image at the +# destination; when true, only the active qcow2 layer is migrated +# and the destination must already have access to the same backing +# chain as was used on the source. (since 2.10) # # @multifd-channels: Number of channels used to migrate data in -# parallel. This is the same number that the -# number of sockets used for migration. The -# default value is 2 (since 4.0) +# parallel. This is the same number that the number of sockets +# used for migration. The default value is 2 (since 4.0) # # @xbzrle-cache-size: cache size to be used by XBZRLE migration. It -# needs to be a multiple of the target page size -# and a power of 2 -# (Since 2.11) +# needs to be a multiple of the target page size and a power of 2 +# (Since 2.11) # -# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. -# Defaults to 0 (unlimited). In bytes per second. -# (Since 3.0) +# @max-postcopy-bandwidth: Background transfer bandwidth during +# postcopy. Defaults to 0 (unlimited). In bytes per second. +# (Since 3.0) # -# @max-cpu-throttle: maximum cpu throttle percentage. -# The default value is 99. (Since 3.1) +# @max-cpu-throttle: maximum cpu throttle percentage. The default +# value is 99. (Since 3.1) # -# @multifd-compression: Which compression method to use. -# Defaults to none. (Since 5.0) +# @multifd-compression: Which compression method to use. Defaults to +# none. (Since 5.0) # # @multifd-zlib-level: Set the compression level to be used in live -# migration, the compression level is an integer between 0 -# and 9, where 0 means no compression, 1 means the best -# compression speed, and 9 means best compression ratio which -# will consume more CPU. -# Defaults to 1. (Since 5.0) +# migration, the compression level is an integer between 0 and 9, +# where 0 means no compression, 1 means the best compression +# speed, and 9 means best compression ratio which will consume +# more CPU. Defaults to 1. (Since 5.0) # # @multifd-zstd-level: Set the compression level to be used in live -# migration, the compression level is an integer between 0 -# and 20, where 0 means no compression, 1 means the best -# compression speed, and 20 means best compression ratio which -# will consume more CPU. -# Defaults to 1. (Since 5.0) +# migration, the compression level is an integer between 0 and 20, +# where 0 means no compression, 1 means the best compression +# speed, and 20 means best compression ratio which will consume +# more CPU. Defaults to 1. (Since 5.0) # # @block-bitmap-mapping: Maps block nodes and bitmaps on them to -# aliases for the purpose of dirty bitmap migration. Such -# aliases may for example be the corresponding names on the -# opposite site. -# The mapping must be one-to-one, but not necessarily -# complete: On the source, unmapped bitmaps and all bitmaps -# on unmapped nodes will be ignored. On the destination, -# encountering an unmapped alias in the incoming migration -# stream will result in a report, and all further bitmap -# migration data will then be discarded. -# Note that the destination does not know about bitmaps it -# does not receive, so there is no limitation or requirement -# regarding the number of bitmaps received, or how they are -# named, or on which nodes they are placed. -# By default (when this parameter has never been set), bitmap -# names are mapped to themselves. Nodes are mapped to their -# block device name if there is one, and to their node name -# otherwise. (Since 5.2) +# aliases for the purpose of dirty bitmap migration. Such aliases +# may for example be the corresponding names on the opposite site. +# The mapping must be one-to-one, but not necessarily complete: On +# the source, unmapped bitmaps and all bitmaps on unmapped nodes +# will be ignored. On the destination, encountering an unmapped +# alias in the incoming migration stream will result in a report, +# and all further bitmap migration data will then be discarded. +# Note that the destination does not know about bitmaps it does +# not receive, so there is no limitation or requirement regarding +# the number of bitmaps received, or how they are named, or on +# which nodes they are placed. By default (when this parameter +# has never been set), bitmap names are mapped to themselves. +# Nodes are mapped to their block device name if there is one, and +# to their node name otherwise. (Since 5.2) # # Features: +# # @unstable: Member @x-checkpoint-delay is experimental. # +# TODO: either fuse back into MigrationParameters, or make +# MigrationParameters members mandatory +# # Since: 2.4 ## -# TODO either fuse back into MigrationParameters, or make -# MigrationParameters members mandatory { 'struct': 'MigrateSetParameters', 'data': { '*announce-initial': 'size', '*announce-max': 'size', @@ -989,7 +985,7 @@ # # -> { "execute": "migrate-set-parameters" , # "arguments": { "compress-level": 1 } } -# +# <- { "return": {} } ## { 'command': 'migrate-set-parameters', 'boxed': true, 'data': 'MigrateSetParameters' } @@ -999,149 +995,140 @@ # # The optional members aren't actually optional. # -# @announce-initial: Initial delay (in milliseconds) before sending the -# first announce (Since 4.0) +# @announce-initial: Initial delay (in milliseconds) before sending +# the first announce (Since 4.0) # -# @announce-max: Maximum delay (in milliseconds) between packets in the -# announcement (Since 4.0) +# @announce-max: Maximum delay (in milliseconds) between packets in +# the announcement (Since 4.0) # -# @announce-rounds: Number of self-announce packets sent after migration -# (Since 4.0) +# @announce-rounds: Number of self-announce packets sent after +# migration (Since 4.0) # -# @announce-step: Increase in delay (in milliseconds) between subsequent -# packets in the announcement (Since 4.0) +# @announce-step: Increase in delay (in milliseconds) between +# subsequent packets in the announcement (Since 4.0) # # @compress-level: compression level # # @compress-threads: compression thread count # -# @compress-wait-thread: Controls behavior when all compression threads are -# currently busy. If true (default), wait for a free -# compression thread to become available; otherwise, -# send the page uncompressed. (Since 3.1) +# @compress-wait-thread: Controls behavior when all compression +# threads are currently busy. If true (default), wait for a free +# compression thread to become available; otherwise, send the page +# uncompressed. (Since 3.1) # # @decompress-threads: decompression thread count # -# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period -# to trigger throttling. It is expressed as percentage. -# The default value is 50. (Since 5.0) +# @throttle-trigger-threshold: The ratio of bytes_dirty_period and +# bytes_xfer_period to trigger throttling. It is expressed as +# percentage. The default value is 50. (Since 5.0) # # @cpu-throttle-initial: Initial percentage of time guest cpus are -# throttled when migration auto-converge is activated. -# (Since 2.7) +# throttled when migration auto-converge is activated. (Since +# 2.7) # # @cpu-throttle-increment: throttle percentage increase each time -# auto-converge detects that migration is not making -# progress. (Since 2.7) +# auto-converge detects that migration is not making progress. +# (Since 2.7) # -# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage -# At the tail stage of throttling, the Guest is very -# sensitive to CPU percentage while the @cpu-throttle -# -increment is excessive usually at tail stage. -# If this parameter is true, we will compute the ideal -# CPU percentage used by the Guest, which may exactly make -# the dirty rate match the dirty rate threshold. Then we -# will choose a smaller throttle increment between the -# one specified by @cpu-throttle-increment and the one -# generated by ideal CPU percentage. -# Therefore, it is compatible to traditional throttling, -# meanwhile the throttle increment won't be excessive -# at tail stage. -# The default value is false. (Since 5.1) +# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At +# the tail stage of throttling, the Guest is very sensitive to CPU +# percentage while the @cpu-throttle -increment is excessive +# usually at tail stage. If this parameter is true, we will +# compute the ideal CPU percentage used by the Guest, which may +# exactly make the dirty rate match the dirty rate threshold. +# Then we will choose a smaller throttle increment between the one +# specified by @cpu-throttle-increment and the one generated by +# ideal CPU percentage. Therefore, it is compatible to +# traditional throttling, meanwhile the throttle increment won't +# be excessive at tail stage. The default value is false. (Since +# 5.1) # # @tls-creds: ID of the 'tls-creds' object that provides credentials -# for establishing a TLS connection over the migration data -# channel. On the outgoing side of the migration, the credentials -# must be for a 'client' endpoint, while for the incoming side the -# credentials must be for a 'server' endpoint. -# An empty string means that QEMU will use plain text mode for -# migration, rather than TLS (Since 2.7) -# Note: 2.8 reports this by omitting tls-creds instead. +# for establishing a TLS connection over the migration data +# channel. On the outgoing side of the migration, the credentials +# must be for a 'client' endpoint, while for the incoming side the +# credentials must be for a 'server' endpoint. An empty string +# means that QEMU will use plain text mode for migration, rather +# than TLS (Since 2.7) Note: 2.8 reports this by omitting +# tls-creds instead. # -# @tls-hostname: hostname of the target host for the migration. This -# is required when using x509 based TLS credentials and the -# migration URI does not already include a hostname. For -# example if using fd: or exec: based migration, the -# hostname must be provided so that the server's x509 -# certificate identity can be validated. (Since 2.7) -# An empty string means that QEMU will use the hostname -# associated with the migration URI, if any. (Since 2.9) -# Note: 2.8 reports this by omitting tls-hostname instead. +# @tls-hostname: hostname of the target host for the migration. This +# is required when using x509 based TLS credentials and the +# migration URI does not already include a hostname. For example +# if using fd: or exec: based migration, the hostname must be +# provided so that the server's x509 certificate identity can be +# validated. (Since 2.7) An empty string means that QEMU will use +# the hostname associated with the migration URI, if any. (Since +# 2.9) Note: 2.8 reports this by omitting tls-hostname instead. # -# @tls-authz: ID of the 'authz' object subclass that provides access control -# checking of the TLS x509 certificate distinguished name. (Since -# 4.0) +# @tls-authz: ID of the 'authz' object subclass that provides access +# control checking of the TLS x509 certificate distinguished name. +# (Since 4.0) # -# @max-bandwidth: to set maximum speed for migration. maximum speed in -# bytes per second. (Since 2.8) +# @max-bandwidth: to set maximum speed for migration. maximum speed +# in bytes per second. (Since 2.8) # -# @downtime-limit: set maximum tolerated downtime for migration. maximum -# downtime in milliseconds (Since 2.8) +# @downtime-limit: set maximum tolerated downtime for migration. +# maximum downtime in milliseconds (Since 2.8) # -# @x-checkpoint-delay: the delay time between two COLO checkpoints. (Since 2.8) +# @x-checkpoint-delay: the delay time between two COLO checkpoints. +# (Since 2.8) # # @block-incremental: Affects how much storage is migrated when the -# block migration capability is enabled. When false, the entire -# storage backing chain is migrated into a flattened image at -# the destination; when true, only the active qcow2 layer is -# migrated and the destination must already have access to the -# same backing chain as was used on the source. (since 2.10) +# block migration capability is enabled. When false, the entire +# storage backing chain is migrated into a flattened image at the +# destination; when true, only the active qcow2 layer is migrated +# and the destination must already have access to the same backing +# chain as was used on the source. (since 2.10) # # @multifd-channels: Number of channels used to migrate data in -# parallel. This is the same number that the -# number of sockets used for migration. -# The default value is 2 (since 4.0) +# parallel. This is the same number that the number of sockets +# used for migration. The default value is 2 (since 4.0) # # @xbzrle-cache-size: cache size to be used by XBZRLE migration. It -# needs to be a multiple of the target page size -# and a power of 2 -# (Since 2.11) +# needs to be a multiple of the target page size and a power of 2 +# (Since 2.11) # -# @max-postcopy-bandwidth: Background transfer bandwidth during postcopy. -# Defaults to 0 (unlimited). In bytes per second. -# (Since 3.0) +# @max-postcopy-bandwidth: Background transfer bandwidth during +# postcopy. Defaults to 0 (unlimited). In bytes per second. +# (Since 3.0) # -# @max-cpu-throttle: maximum cpu throttle percentage. -# Defaults to 99. -# (Since 3.1) +# @max-cpu-throttle: maximum cpu throttle percentage. Defaults to 99. +# (Since 3.1) # -# @multifd-compression: Which compression method to use. -# Defaults to none. (Since 5.0) +# @multifd-compression: Which compression method to use. Defaults to +# none. (Since 5.0) # # @multifd-zlib-level: Set the compression level to be used in live -# migration, the compression level is an integer between 0 -# and 9, where 0 means no compression, 1 means the best -# compression speed, and 9 means best compression ratio which -# will consume more CPU. -# Defaults to 1. (Since 5.0) +# migration, the compression level is an integer between 0 and 9, +# where 0 means no compression, 1 means the best compression +# speed, and 9 means best compression ratio which will consume +# more CPU. Defaults to 1. (Since 5.0) # # @multifd-zstd-level: Set the compression level to be used in live -# migration, the compression level is an integer between 0 -# and 20, where 0 means no compression, 1 means the best -# compression speed, and 20 means best compression ratio which -# will consume more CPU. -# Defaults to 1. (Since 5.0) +# migration, the compression level is an integer between 0 and 20, +# where 0 means no compression, 1 means the best compression +# speed, and 20 means best compression ratio which will consume +# more CPU. Defaults to 1. (Since 5.0) # # @block-bitmap-mapping: Maps block nodes and bitmaps on them to -# aliases for the purpose of dirty bitmap migration. Such -# aliases may for example be the corresponding names on the -# opposite site. -# The mapping must be one-to-one, but not necessarily -# complete: On the source, unmapped bitmaps and all bitmaps -# on unmapped nodes will be ignored. On the destination, -# encountering an unmapped alias in the incoming migration -# stream will result in a report, and all further bitmap -# migration data will then be discarded. -# Note that the destination does not know about bitmaps it -# does not receive, so there is no limitation or requirement -# regarding the number of bitmaps received, or how they are -# named, or on which nodes they are placed. -# By default (when this parameter has never been set), bitmap -# names are mapped to themselves. Nodes are mapped to their -# block device name if there is one, and to their node name -# otherwise. (Since 5.2) +# aliases for the purpose of dirty bitmap migration. Such aliases +# may for example be the corresponding names on the opposite site. +# The mapping must be one-to-one, but not necessarily complete: On +# the source, unmapped bitmaps and all bitmaps on unmapped nodes +# will be ignored. On the destination, encountering an unmapped +# alias in the incoming migration stream will result in a report, +# and all further bitmap migration data will then be discarded. +# Note that the destination does not know about bitmaps it does +# not receive, so there is no limitation or requirement regarding +# the number of bitmaps received, or how they are named, or on +# which nodes they are placed. By default (when this parameter +# has never been set), bitmap names are mapped to themselves. +# Nodes are mapped to their block device name if there is one, and +# to their node name otherwise. (Since 5.2) # # Features: +# # @unstable: Member @x-checkpoint-delay is experimental. # # Since: 2.4 @@ -1198,45 +1185,16 @@ # "downtime-limit": 300 # } # } -# ## { 'command': 'query-migrate-parameters', 'returns': 'MigrationParameters' } -## -# @client_migrate_info: -# -# Set migration information for remote display. This makes the server -# ask the client to automatically reconnect using the new parameters -# once migration finished successfully. Only implemented for SPICE. -# -# @protocol: must be "spice" -# @hostname: migration target hostname -# @port: spice tcp port for plaintext channels -# @tls-port: spice tcp port for tls-secured channels -# @cert-subject: server certificate subject -# -# Since: 0.14 -# -# Example: -# -# -> { "execute": "client_migrate_info", -# "arguments": { "protocol": "spice", -# "hostname": "virt42.lab.kraxel.org", -# "port": 1234 } } -# <- { "return": {} } -# -## -{ 'command': 'client_migrate_info', - 'data': { 'protocol': 'str', 'hostname': 'str', '*port': 'int', - '*tls-port': 'int', '*cert-subject': 'str' } } - ## # @migrate-start-postcopy: # -# Followup to a migration command to switch the migration to postcopy mode. -# The postcopy-ram capability must be set on both source and destination -# before the original migration command. +# Followup to a migration command to switch the migration to postcopy +# mode. The postcopy-ram capability must be set on both source and +# destination before the original migration command. # # Since: 2.5 # @@ -1244,7 +1202,6 @@ # # -> { "execute": "migrate-start-postcopy" } # <- { "return": {} } -# ## { 'command': 'migrate-start-postcopy' } @@ -1262,7 +1219,6 @@ # <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001}, # "event": "MIGRATION", # "data": {"status": "completed"} } -# ## { 'event': 'MIGRATION', 'data': {'status': 'MigrationStatus'}} @@ -1270,8 +1226,8 @@ ## # @MIGRATION_PASS: # -# Emitted from the source side of a migration at the start of each pass -# (when it syncs the dirty bitmap) +# Emitted from the source side of a migration at the start of each +# pass (when it syncs the dirty bitmap) # # @pass: An incrementing count (starting at 1 on the first pass) # @@ -1279,9 +1235,8 @@ # # Example: # -# { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, -# "event": "MIGRATION_PASS", "data": {"pass": 2} } -# +# <- { "timestamp": {"seconds": 1449669631, "microseconds": 239225}, +# "event": "MIGRATION_PASS", "data": {"pass": 2} } ## { 'event': 'MIGRATION_PASS', 'data': { 'pass': 'int' } } @@ -1293,7 +1248,8 @@ # # @checkpoint-ready: Secondary VM (SVM) is ready for checkpointing # -# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for checkpointing +# @checkpoint-request: Primary VM (PVM) tells SVM to prepare for +# checkpointing # # @checkpoint-reply: SVM gets PVM's checkpoint request # @@ -1341,7 +1297,8 @@ # # @completed: finish the process of failover # -# @relaunch: restart the failover process, from 'none' -> 'completed' (Since 2.9) +# @relaunch: restart the failover process, from 'none' -> 'completed' +# (Since 2.9) # # Since: 2.8 ## @@ -1364,7 +1321,6 @@ # # <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, # "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } -# ## { 'event': 'COLO_EXIT', 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } @@ -1374,9 +1330,9 @@ # # The reason for a COLO exit. # -# @none: failover has never happened. This state does not occur -# in the COLO_EXIT event, and is only visible in the result of -# query-colo-status. +# @none: failover has never happened. This state does not occur in +# the COLO_EXIT event, and is only visible in the result of +# query-colo-status. # # @request: COLO exit is due to an external request. # @@ -1392,12 +1348,14 @@ ## # @x-colo-lost-heartbeat: # -# Tell qemu that heartbeat is lost, request it to do takeover procedures. -# If this command is sent to the PVM, the Primary side will exit COLO mode. -# If sent to the Secondary, the Secondary side will run failover work, -# then takes over server operation to become the service VM. +# Tell qemu that heartbeat is lost, request it to do takeover +# procedures. If this command is sent to the PVM, the Primary side +# will exit COLO mode. If sent to the Secondary, the Secondary side +# will run failover work, then takes over server operation to become +# the service VM. # # Features: +# # @unstable: This command is experimental. # # Since: 2.8 @@ -1406,10 +1364,10 @@ # # -> { "execute": "x-colo-lost-heartbeat" } # <- { "return": {} } -# ## { 'command': 'x-colo-lost-heartbeat', - 'features': [ 'unstable' ] } + 'features': [ 'unstable' ], + 'if': 'CONFIG_REPLICATION' } ## # @migrate_cancel: @@ -1418,7 +1376,8 @@ # # Returns: nothing on success # -# Notes: This command succeeds even if there is no migration process running. +# Notes: This command succeeds even if there is no migration process +# running. # # Since: 0.14 # @@ -1426,7 +1385,6 @@ # # -> { "execute": "migrate_cancel" } # <- { "return": {} } -# ## { 'command': 'migrate_cancel' } @@ -1460,8 +1418,8 @@ # # @inc: incremental disk copy migration # -# @detach: this argument exists only for compatibility reasons and -# is ignored by QEMU +# @detach: this argument exists only for compatibility reasons and is +# ignored by QEMU # # @resume: resume one paused migration, default "off". (since 3.0) # @@ -1471,19 +1429,19 @@ # # Notes: # -# 1. The 'query-migrate' command should be used to check migration's progress -# and final result (this information is provided by the 'status' member) +# 1. The 'query-migrate' command should be used to check migration's +# progress and final result (this information is provided by the +# 'status' member) # # 2. All boolean arguments default to false # -# 3. The user Monitor's "detach" argument is invalid in QMP and should not -# be used +# 3. The user Monitor's "detach" argument is invalid in QMP and should +# not be used # # Example: # # -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } } # <- { "return": {} } -# ## { 'command': 'migrate', 'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', @@ -1492,11 +1450,11 @@ ## # @migrate-incoming: # -# Start an incoming migration, the qemu must have been started -# with -incoming defer +# Start an incoming migration, the qemu must have been started with +# -incoming defer # # @uri: The Uniform Resource Identifier identifying the source or -# address to listen on +# address to listen on # # Returns: nothing on success # @@ -1504,12 +1462,12 @@ # # Notes: # -# 1. It's a bad idea to use a string for the uri, but it needs to stay -# compatible with -incoming and the format of the uri is already exposed -# above libvirt. +# 1. It's a bad idea to use a string for the uri, but it needs +# to stay compatible with -incoming and the format of the uri +# is already exposed above libvirt. # -# 2. QEMU must be started with -incoming defer to allow migrate-incoming to -# be used. +# 2. QEMU must be started with -incoming defer to allow +# migrate-incoming to be used. # # 3. The uri format is the same as for -incoming # @@ -1518,22 +1476,21 @@ # -> { "execute": "migrate-incoming", # "arguments": { "uri": "tcp::4446" } } # <- { "return": {} } -# ## { 'command': 'migrate-incoming', 'data': {'uri': 'str' } } ## # @xen-save-devices-state: # -# Save the state of all devices to file. The RAM and the block devices -# of the VM are not saved by this command. +# Save the state of all devices to file. The RAM and the block +# devices of the VM are not saved by this command. # # @filename: the file to save the state of the devices to as binary -# data. See xen-save-devices-state.txt for a description of the binary -# format. +# data. See xen-save-devices-state.txt for a description of the +# binary format. # -# @live: Optional argument to ask QEMU to treat this command as part of a live -# migration. Default to true. (since 2.11) +# @live: Optional argument to ask QEMU to treat this command as part +# of a live migration. Default to true. (since 2.11) # # Returns: Nothing on success # @@ -1544,7 +1501,6 @@ # -> { "execute": "xen-save-devices-state", # "arguments": { "filename": "/tmp/save" } } # <- { "return": {} } -# ## { 'command': 'xen-save-devices-state', 'data': {'filename': 'str', '*live':'bool' } } @@ -1565,19 +1521,18 @@ # -> { "execute": "xen-set-global-dirty-log", # "arguments": { "enable": true } } # <- { "return": {} } -# ## { 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } } ## # @xen-load-devices-state: # -# Load the state of all devices from file. The RAM and the block devices -# of the VM are not loaded by this command. +# Load the state of all devices from file. The RAM and the block +# devices of the VM are not loaded by this command. # # @filename: the file to load the state of the devices from as binary -# data. See xen-save-devices-state.txt for a description of the binary -# format. +# data. See xen-save-devices-state.txt for a description of the +# binary format. # # Since: 2.7 # @@ -1586,7 +1541,6 @@ # -> { "execute": "xen-load-devices-state", # "arguments": { "filename": "/tmp/resume" } } # <- { "return": {} } -# ## { 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} } @@ -1599,8 +1553,8 @@ # # @primary: true for primary or false for secondary. # -# @failover: true to do failover, false to stop. but cannot be -# specified if 'enable' is true. default value is false. +# @failover: true to do failover, false to stop. but cannot be +# specified if 'enable' is true. default value is false. # # Returns: nothing. # @@ -1623,8 +1577,8 @@ # # @error: true if an error happened, false if replication is normal. # -# @desc: the human readable error description string, when -# @error is 'true'. +# @desc: the human readable error description string, when @error is +# 'true'. # # Since: 2.9 ## @@ -1672,12 +1626,12 @@ # # The result format for 'query-colo-status'. # -# @mode: COLO running mode. If COLO is running, this field will return -# 'primary' or 'secondary'. +# @mode: COLO running mode. If COLO is running, this field will +# return 'primary' or 'secondary'. # -# @last-mode: COLO last running mode. If COLO is running, this field -# will return same like mode field, after failover we can -# use this field to get last colo mode. (since 4.0) +# @last-mode: COLO last running mode. If COLO is running, this field +# will return same like mode field, after failover we can use this +# field to get last colo mode. (since 4.0) # # @reason: describes the reason for the COLO exit. # @@ -1685,7 +1639,8 @@ ## { 'struct': 'COLOStatus', 'data': { 'mode': 'COLOMode', 'last-mode': 'COLOMode', - 'reason': 'COLOExitReason' } } + 'reason': 'COLOExitReason' }, + 'if': 'CONFIG_REPLICATION' } ## # @query-colo-status: @@ -1702,7 +1657,8 @@ # Since: 3.1 ## { 'command': 'query-colo-status', - 'returns': 'COLOStatus' } + 'returns': 'COLOStatus', + 'if': 'CONFIG_REPLICATION' } ## # @migrate-recover: @@ -1745,9 +1701,9 @@ # @UNPLUG_PRIMARY: # # Emitted from source side of a migration when migration state is -# WAIT_UNPLUG. Device was unplugged by guest operating system. -# Device resources in QEMU are kept on standby to be able to re-plug it in case -# of migration failure. +# WAIT_UNPLUG. Device was unplugged by guest operating system. Device +# resources in QEMU are kept on standby to be able to re-plug it in +# case of migration failure. # # @device-id: QEMU device id of the unplugged device # @@ -1758,7 +1714,6 @@ # <- { "event": "UNPLUG_PRIMARY", # "data": { "device-id": "hostdev0" }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } -# ## { 'event': 'UNPLUG_PRIMARY', 'data': { 'device-id': 'str' } } @@ -1786,7 +1741,8 @@ # # @measuring: the dirtyrate thread is measuring. # -# @measured: the dirtyrate thread has measured and results are available. +# @measured: the dirtyrate thread has measured and results are +# available. # # Since: 5.2 ## @@ -1814,24 +1770,24 @@ # # Information about current dirty page rate of vm. # -# @dirty-rate: an estimate of the dirty page rate of the VM in units of -# MB/s, present only when estimating the rate has completed. +# @dirty-rate: an estimate of the dirty page rate of the VM in units +# of MB/s, present only when estimating the rate has completed. # # @status: status containing dirtyrate query status includes -# 'unstarted' or 'measuring' or 'measured' +# 'unstarted' or 'measuring' or 'measured' # # @start-time: start time in units of second for calculation # # @calc-time: time in units of second for sample dirty pages # -# @sample-pages: page count per GB for sample dirty pages -# the default value is 512 (since 6.1) +# @sample-pages: page count per GB for sample dirty pages the default +# value is 512 (since 6.1) # # @mode: mode containing method of calculate dirtyrate includes -# 'page-sampling' and 'dirty-ring' (Since 6.2) +# 'page-sampling' and 'dirty-ring' (Since 6.2) # -# @vcpu-dirty-rate: dirtyrate for each vcpu if dirty-ring -# mode specified (Since 6.2) +# @vcpu-dirty-rate: dirtyrate for each vcpu if dirty-ring mode +# specified (Since 6.2) # # Since: 5.2 ## @@ -1851,19 +1807,19 @@ # # @calc-time: time in units of second for sample dirty pages # -# @sample-pages: page count per GB for sample dirty pages -# the default value is 512 (since 6.1) +# @sample-pages: page count per GB for sample dirty pages the default +# value is 512 (since 6.1) # -# @mode: mechanism of calculating dirtyrate includes -# 'page-sampling' and 'dirty-ring' (Since 6.1) +# @mode: mechanism of calculating dirtyrate includes 'page-sampling' +# and 'dirty-ring' (Since 6.1) # # Since: 5.2 # # Example: # -# {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, -# 'sample-pages': 512} } -# +# -> {"execute": "calc-dirty-rate", "arguments": {"calc-time": 1, +# 'sample-pages': 512} } +# <- { "return": {} } ## { 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', '*sample-pages': 'int', @@ -1886,12 +1842,11 @@ # @cpu-index: index of a virtual CPU. # # @limit-rate: upper limit of dirty page rate (MB/s) for a virtual -# CPU, 0 means unlimited. +# CPU, 0 means unlimited. # # @current-rate: current dirty page rate (MB/s) for a virtual CPU. # # Since: 7.1 -# ## { 'struct': 'DirtyLimitInfo', 'data': { 'cpu-index': 'int', @@ -1903,9 +1858,9 @@ # # Set the upper limit of dirty page rate for virtual CPUs. # -# Requires KVM with accelerator property "dirty-ring-size" set. -# A virtual CPU's dirty page rate is a measure of its memory load. -# To observe dirty page rates, use @calc-dirty-rate. +# Requires KVM with accelerator property "dirty-ring-size" set. A +# virtual CPU's dirty page rate is a measure of its memory load. To +# observe dirty page rates, use @calc-dirty-rate. # # @cpu-index: index of a virtual CPU, default is all. # @@ -1914,10 +1869,11 @@ # Since: 7.1 # # Example: -# {"execute": "set-vcpu-dirty-limit"} -# "arguments": { "dirty-rate": 200, -# "cpu-index": 1 } } # +# -> {"execute": "set-vcpu-dirty-limit"} +# "arguments": { "dirty-rate": 200, +# "cpu-index": 1 } } +# <- { "return": {} } ## { 'command': 'set-vcpu-dirty-limit', 'data': { '*cpu-index': 'int', @@ -1929,7 +1885,7 @@ # Cancel the upper limit of dirty page rate for virtual CPUs. # # Cancel the dirty page limit for the vCPU which has been set with -# set-vcpu-dirty-limit command. Note that this command requires +# set-vcpu-dirty-limit command. Note that this command requires # support from dirty ring, same as the "set-vcpu-dirty-limit". # # @cpu-index: index of a virtual CPU, default is all. @@ -1937,9 +1893,10 @@ # Since: 7.1 # # Example: -# {"execute": "cancel-vcpu-dirty-limit"} -# "arguments": { "cpu-index": 1 } } # +# -> {"execute": "cancel-vcpu-dirty-limit"}, +# "arguments": { "cpu-index": 1 } } +# <- { "return": {} } ## { 'command': 'cancel-vcpu-dirty-limit', 'data': { '*cpu-index': 'int'} } @@ -1947,13 +1904,17 @@ ## # @query-vcpu-dirty-limit: # -# Returns information about virtual CPU dirty page rate limits, if any. +# Returns information about virtual CPU dirty page rate limits, if +# any. # # Since: 7.1 # # Example: -# {"execute": "query-vcpu-dirty-limit"} # +# -> {"execute": "query-vcpu-dirty-limit"} +# <- {"return": [ +# { "limit-rate": 60, "current-rate": 3, "cpu-index": 0}, +# { "limit-rate": 60, "current-rate": 3, "cpu-index": 1}]} ## { 'command': 'query-vcpu-dirty-limit', 'returns': [ 'DirtyLimitInfo' ] } @@ -1980,7 +1941,7 @@ # # data: migration thread name # -# returns: information about migration threads +# Returns: information about migration threads # # Since: 7.2 ## @@ -1993,20 +1954,24 @@ # Save a VM snapshot # # @job-id: identifier for the newly created job +# # @tag: name of the snapshot to create +# # @vmstate: block device node name to save vmstate to +# # @devices: list of block device node names to save a snapshot to # # Applications should not assume that the snapshot save is complete -# when this command returns. The job commands / events must be used -# to determine completion and to fetch details of any errors that arise. +# when this command returns. The job commands / events must be used +# to determine completion and to fetch details of any errors that +# arise. # -# Note that execution of the guest CPUs may be stopped during the -# time it takes to save the snapshot. A future version of QEMU -# may ensure CPUs are executing continuously. +# Note that execution of the guest CPUs may be stopped during the time +# it takes to save the snapshot. A future version of QEMU may ensure +# CPUs are executing continuously. # -# It is strongly recommended that @devices contain all writable -# block device nodes if a consistent snapshot is required. +# It is strongly recommended that @devices contain all writable block +# device nodes if a consistent snapshot is required. # # If @tag already exists, an error will be reported # @@ -2063,20 +2028,24 @@ # Load a VM snapshot # # @job-id: identifier for the newly created job +# # @tag: name of the snapshot to load. +# # @vmstate: block device node name to load vmstate from +# # @devices: list of block device node names to load a snapshot from # # Applications should not assume that the snapshot load is complete -# when this command returns. The job commands / events must be used -# to determine completion and to fetch details of any errors that arise. +# when this command returns. The job commands / events must be used +# to determine completion and to fetch details of any errors that +# arise. # # Note that execution of the guest CPUs will be stopped during the # time it takes to load the snapshot. # -# It is strongly recommended that @devices contain all writable -# block device nodes that can have changed since the original -# @snapshot-save command execution. +# It is strongly recommended that @devices contain all writable block +# device nodes that can have changed since the original @snapshot-save +# command execution. # # Returns: nothing # @@ -2131,12 +2100,15 @@ # Delete a VM snapshot # # @job-id: identifier for the newly created job +# # @tag: name of the snapshot to delete. +# # @devices: list of block device node names to delete a snapshot from # # Applications should not assume that the snapshot delete is complete -# when this command returns. The job commands / events must be used -# to determine completion and to fetch details of any errors that arise. +# when this command returns. The job commands / events must be used +# to determine completion and to fetch details of any errors that +# arise. # # Returns: nothing # diff --git a/qapi/misc-target.json b/qapi/misc-target.json index 5b6a8e9185..88291453ba 100644 --- a/qapi/misc-target.json +++ b/qapi/misc-target.json @@ -5,10 +5,9 @@ ## # @rtc-reset-reinjection: # -# This command will reset the RTC interrupt reinjection backlog. -# Can be used if another mechanism to synchronize guest time -# is in effect, for example QEMU guest agent's guest-set-time -# command. +# This command will reset the RTC interrupt reinjection backlog. Can +# be used if another mechanism to synchronize guest time is in effect, +# for example QEMU guest agent's guest-set-time command. # # Since: 2.1 # @@ -16,7 +15,6 @@ # # -> { "execute": "rtc-reset-reinjection" } # <- { "return": {} } -# ## { 'command': 'rtc-reset-reinjection', 'if': 'TARGET_I386' } @@ -28,17 +26,19 @@ # # @uninit: The guest is uninitialized. # -# @launch-update: The guest is currently being launched; plaintext data and -# register state is being imported. +# @launch-update: The guest is currently being launched; plaintext +# data and register state is being imported. # -# @launch-secret: The guest is currently being launched; ciphertext data -# is being imported. +# @launch-secret: The guest is currently being launched; ciphertext +# data is being imported. # # @running: The guest is fully launched or migrated in. # -# @send-update: The guest is currently being migrated out to another machine. +# @send-update: The guest is currently being migrated out to another +# machine. # -# @receive-update: The guest is currently being migrated from another machine. +# @receive-update: The guest is currently being migrated from another +# machine. # # Since: 2.12 ## @@ -95,7 +95,6 @@ # <- { "return": { "enabled": true, "api-major" : 0, "api-minor" : 0, # "build-id" : 0, "policy" : 0, "state" : "running", # "handle" : 1 } } -# ## { 'command': 'query-sev', 'returns': 'SevInfo', 'if': 'TARGET_I386' } @@ -125,7 +124,6 @@ # # -> { "execute": "query-sev-launch-measure" } # <- { "return": { "data": "4l8LXeNlSPUDlXPJG5966/8%YZ" } } -# ## { 'command': 'query-sev-launch-measure', 'returns': 'SevLaunchMeasureInfo', 'if': 'TARGET_I386' } @@ -133,8 +131,8 @@ ## # @SevCapability: # -# The struct describes capability for a Secure Encrypted Virtualization -# feature. +# The struct describes capability for a Secure Encrypted +# Virtualization feature. # # @pdh: Platform Diffie-Hellman key (base64 encoded) # @@ -144,8 +142,8 @@ # # @cbitpos: C-bit location in page table entry # -# @reduced-phys-bits: Number of physical Address bit reduction when SEV is -# enabled +# @reduced-phys-bits: Number of physical Address bit reduction when +# SEV is enabled # # Since: 2.12 ## @@ -160,8 +158,8 @@ ## # @query-sev-capabilities: # -# This command is used to get the SEV capabilities, and is supported on AMD -# X86 platforms only. +# This command is used to get the SEV capabilities, and is supported +# on AMD X86 platforms only. # # Returns: SevCapability objects. # @@ -172,8 +170,7 @@ # -> { "execute": "query-sev-capabilities" } # <- { "return": { "pdh": "8CCDD8DDD", "cert-chain": "888CCCDDDEE", # "cpu0-id": "2lvmGwo+...61iEinw==", -# "cbitpos": 47, "reduced-phys-bits": 5}} -# +# "cbitpos": 47, "reduced-phys-bits": 1}} ## { 'command': 'query-sev-capabilities', 'returns': 'SevCapability', 'if': 'TARGET_I386' } @@ -216,7 +213,7 @@ # supported on AMD X86 platforms only. # # @mnonce: a random 16 bytes value encoded in base64 (it will be -# included in report) +# included in report) # # Returns: SevAttestationReport objects. # @@ -227,7 +224,6 @@ # -> { "execute" : "query-sev-attestation-report", # "arguments": { "mnonce": "aaaaaaa" } } # <- { "return" : { "data": "aaaaaaaabbbddddd"} } -# ## { 'command': 'query-sev-attestation-report', 'data': { 'mnonce': 'str' }, @@ -250,7 +246,6 @@ # -> { "execute": "dump-skeys", # "arguments": { "filename": "/tmp/skeys" } } # <- { "return": {} } -# ## { 'command': 'dump-skeys', 'data': { 'filename': 'str' }, @@ -260,18 +255,18 @@ # @GICCapability: # # The struct describes capability for a specific GIC (Generic -# Interrupt Controller) version. These bits are not only decided by -# QEMU/KVM software version, but also decided by the hardware that -# the program is running upon. +# Interrupt Controller) version. These bits are not only decided by +# QEMU/KVM software version, but also decided by the hardware that the +# program is running upon. # -# @version: version of GIC to be described. Currently, only 2 and 3 -# are supported. +# @version: version of GIC to be described. Currently, only 2 and 3 +# are supported. # # @emulated: whether current QEMU/hardware supports emulated GIC -# device in user space. +# device in user space. # -# @kernel: whether current QEMU/hardware supports hardware -# accelerated GIC device in kernel. +# @kernel: whether current QEMU/hardware supports hardware accelerated +# GIC device in kernel. # # Since: 2.6 ## @@ -284,7 +279,7 @@ ## # @query-gic-capabilities: # -# This command is ARM-only. It will return a list of GICCapability +# This command is ARM-only. It will return a list of GICCapability # objects that describe its capability bits. # # Returns: a list of GICCapability objects. @@ -296,7 +291,6 @@ # -> { "execute": "query-gic-capabilities" } # <- { "return": [{ "version": 2, "emulated": true, "kernel": false }, # { "version": 3, "emulated": false, "kernel": true } ] } -# ## { 'command': 'query-gic-capabilities', 'returns': ['GICCapability'], 'if': 'TARGET_ARM' } @@ -357,7 +351,6 @@ # "flc": true, # "sections": [{"node": 0, "size": 67108864}, # {"node": 1, "size": 29360128}]} } -# ## { 'command': 'query-sgx', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } @@ -377,6 +370,120 @@ # "flc": true, # "section" : [{"node": 0, "size": 67108864}, # {"node": 1, "size": 29360128}]} } -# ## { 'command': 'query-sgx-capabilities', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } + + +## +# @EvtchnPortType: +# +# An enumeration of Xen event channel port types. +# +# @closed: The port is unused. +# +# @unbound: The port is allocated and ready to be bound. +# +# @interdomain: The port is connected as an interdomain interrupt. +# +# @pirq: The port is bound to a physical IRQ (PIRQ). +# +# @virq: The port is bound to a virtual IRQ (VIRQ). +# +# @ipi: The post is an inter-processor interrupt (IPI). +# +# Since: 8.0 +## +{ 'enum': 'EvtchnPortType', + 'data': ['closed', 'unbound', 'interdomain', 'pirq', 'virq', 'ipi'], + 'if': 'TARGET_I386' } + +## +# @EvtchnInfo: +# +# Information about a Xen event channel port +# +# @port: the port number +# +# @vcpu: target vCPU for this port +# +# @type: the port type +# +# @remote-domain: remote domain for interdomain ports +# +# @target: remote port ID, or virq/pirq number +# +# @pending: port is currently active pending delivery +# +# @masked: port is masked +# +# Since: 8.0 +## +{ 'struct': 'EvtchnInfo', + 'data': {'port': 'uint16', + 'vcpu': 'uint32', + 'type': 'EvtchnPortType', + 'remote-domain': 'str', + 'target': 'uint16', + 'pending': 'bool', + 'masked': 'bool'}, + 'if': 'TARGET_I386' } + + +## +# @xen-event-list: +# +# Query the Xen event channels opened by the guest. +# +# Returns: list of open event channel ports. +# +# Since: 8.0 +# +# Example: +# +# -> { "execute": "xen-event-list" } +# <- { "return": [ +# { +# "pending": false, +# "port": 1, +# "vcpu": 1, +# "remote-domain": "qemu", +# "masked": false, +# "type": "interdomain", +# "target": 1 +# }, +# { +# "pending": false, +# "port": 2, +# "vcpu": 0, +# "remote-domain": "", +# "masked": false, +# "type": "virq", +# "target": 0 +# } +# ] +# } +## +{ 'command': 'xen-event-list', + 'returns': ['EvtchnInfo'], + 'if': 'TARGET_I386' } + +## +# @xen-event-inject: +# +# Inject a Xen event channel port (interrupt) to the guest. +# +# @port: The port number +# +# Returns: +# - Nothing on success. +# +# Since: 8.0 +# +# Example: +# +# -> { "execute": "xen-event-inject", "arguments": { "port": 1 } } +# <- { "return": { } } +## +{ 'command': 'xen-event-inject', + 'data': { 'port': 'uint32' }, + 'if': 'TARGET_I386' } diff --git a/qapi/misc.json b/qapi/misc.json index 27ef5a2b20..ff070ec828 100644 --- a/qapi/misc.json +++ b/qapi/misc.json @@ -11,19 +11,22 @@ ## # @add_client: # -# Allow client connections for VNC, Spice and socket based -# character devices to be passed in to QEMU via SCM_RIGHTS. +# Allow client connections for VNC, Spice and socket based character +# devices to be passed in to QEMU via SCM_RIGHTS. # -# @protocol: protocol name. Valid names are "vnc", "spice", "@dbus-display" or -# the name of a character device (eg. from -chardev id=XXXX) +# If the FD associated with @fdname is not a socket, the command will +# fail and the FD will be closed. +# +# @protocol: protocol name. Valid names are "vnc", "spice", +# "@dbus-display" or the name of a character device (eg. from +# -chardev id=XXXX) # # @fdname: file descriptor name previously passed via 'getfd' command # -# @skipauth: whether to skip authentication. Only applies -# to "vnc" and "spice" protocols +# @skipauth: whether to skip authentication. Only applies to "vnc" +# and "spice" protocols # -# @tls: whether to perform TLS. Only applies to the "spice" -# protocol +# @tls: whether to perform TLS. Only applies to the "spice" protocol # # Returns: nothing on success. # @@ -34,7 +37,6 @@ # -> { "execute": "add_client", "arguments": { "protocol": "vnc", # "fdname": "myclient" } } # <- { "return": {} } -# ## { 'command': 'add_client', 'data': { 'protocol': 'str', 'fdname': 'str', '*skipauth': 'bool', @@ -64,7 +66,6 @@ # # -> { "execute": "query-name" } # <- { "return": { "name": "qemu-name" } } -# ## { 'command': 'query-name', 'returns': 'NameInfo', 'allow-preconfig': true } @@ -77,17 +78,17 @@ # # @thread-id: ID of the underlying host thread # -# @poll-max-ns: maximum polling time in ns, 0 means polling is disabled -# (since 2.9) +# @poll-max-ns: maximum polling time in ns, 0 means polling is +# disabled (since 2.9) # -# @poll-grow: how many ns will be added to polling time, 0 means that it's not -# configured (since 2.9) +# @poll-grow: how many ns will be added to polling time, 0 means that +# it's not configured (since 2.9) # -# @poll-shrink: how many ns will be removed from polling time, 0 means that -# it's not configured (since 2.9) +# @poll-shrink: how many ns will be removed from polling time, 0 means +# that it's not configured (since 2.9) # -# @aio-max-batch: maximum number of requests in a batch for the AIO engine, -# 0 means that the engine will use its default (since 6.1) +# @aio-max-batch: maximum number of requests in a batch for the AIO +# engine, 0 means that the engine will use its default (since 6.1) # # Since: 2.0 ## @@ -104,9 +105,9 @@ # # Returns a list of information about each iothread. # -# Note: this list excludes the QEMU main loop thread, which is not declared -# using the -object iothread command-line option. It is always the main thread -# of the process. +# Note: this list excludes the QEMU main loop thread, which is not +# declared using the -object iothread command-line option. It is +# always the main thread of the process. # # Returns: a list of @IOThreadInfo for each iothread # @@ -126,7 +127,6 @@ # } # ] # } -# ## { 'command': 'query-iothreads', 'returns': ['IOThreadInfo'], 'allow-preconfig': true } @@ -138,16 +138,15 @@ # # Since: 0.14 # -# Notes: This function will succeed even if the guest is already in the stopped -# state. In "inmigrate" state, it will ensure that the guest -# remains paused once migration finishes, as if the -S option was -# passed on the command line. +# Notes: This function will succeed even if the guest is already in +# the stopped state. In "inmigrate" state, it will ensure that +# the guest remains paused once migration finishes, as if the -S +# option was passed on the command line. # # Example: # # -> { "execute": "stop" } # <- { "return": {} } -# ## { 'command': 'stop' } @@ -160,17 +159,16 @@ # # Returns: If successful, nothing # -# Notes: This command will succeed if the guest is currently running. It -# will also succeed if the guest is in the "inmigrate" state; in -# this case, the effect of the command is to make sure the guest -# starts once migration finishes, removing the effect of the -S -# command line option if it was passed. +# Notes: This command will succeed if the guest is currently running. +# It will also succeed if the guest is in the "inmigrate" state; +# in this case, the effect of the command is to make sure the +# guest starts once migration finishes, removing the effect of the +# -S command line option if it was passed. # # Example: # # -> { "execute": "cont" } # <- { "return": {} } -# ## { 'command': 'cont' } @@ -179,13 +177,14 @@ # # Exit from "preconfig" state # -# This command makes QEMU exit the preconfig state and proceed with -# VM initialization using configuration data provided on the command line -# and via the QMP monitor during the preconfig state. The command is only -# available during the preconfig state (i.e. when the --preconfig command -# line option was in use). +# This command makes QEMU exit the preconfig state and proceed with VM +# initialization using configuration data provided on the command line +# and via the QMP monitor during the preconfig state. The command is +# only available during the preconfig state (i.e. when the --preconfig +# command line option was in use). # # Features: +# # @unstable: This command is experimental. # # Since: 3.0 @@ -196,7 +195,6 @@ # # -> { "execute": "x-exit-preconfig" } # <- { "return": {} } -# ## { 'command': 'x-exit-preconfig', 'allow-preconfig': true, 'features': [ 'unstable' ] } @@ -211,35 +209,33 @@ # @cpu-index: The CPU to use for commands that require an implicit CPU # # Features: +# # @savevm-monitor-nodes: If present, HMP command savevm only snapshots -# monitor-owned nodes if they have no parents. -# This allows the use of 'savevm' with -# -blockdev. (since 4.2) +# monitor-owned nodes if they have no parents. This allows the +# use of 'savevm' with -blockdev. (since 4.2) # # Returns: the output of the command as a string # # Since: 0.14 # # Notes: This command only exists as a stop-gap. Its use is highly -# discouraged. The semantics of this command are not -# guaranteed: this means that command names, arguments and -# responses can change or be removed at ANY time. Applications -# that rely on long term stability guarantees should NOT -# use this command. +# discouraged. The semantics of this command are not guaranteed: +# this means that command names, arguments and responses can +# change or be removed at ANY time. Applications that rely on +# long term stability guarantees should NOT use this command. # -# Known limitations: +# Known limitations: # -# * This command is stateless, this means that commands that depend -# on state information (such as getfd) might not work +# * This command is stateless, this means that commands that +# depend on state information (such as getfd) might not work # -# * Commands that prompt the user for data don't currently work +# * Commands that prompt the user for data don't currently work # # Example: # # -> { "execute": "human-monitor-command", # "arguments": { "command-line": "info kvm" } } # <- { "return": "kvm support: enabled\r\n" } -# ## { 'command': 'human-monitor-command', 'data': {'command-line': 'str', '*cpu-index': 'int'}, @@ -257,20 +253,47 @@ # # Since: 0.14 # -# Notes: If @fdname already exists, the file descriptor assigned to -# it will be closed and replaced by the received file -# descriptor. +# Notes: If @fdname already exists, the file descriptor assigned to it +# will be closed and replaced by the received file descriptor. # -# The 'closefd' command can be used to explicitly close the -# file descriptor when it is no longer needed. +# The 'closefd' command can be used to explicitly close the file +# descriptor when it is no longer needed. # # Example: # # -> { "execute": "getfd", "arguments": { "fdname": "fd1" } } # <- { "return": {} } -# ## -{ 'command': 'getfd', 'data': {'fdname': 'str'} } +{ 'command': 'getfd', 'data': {'fdname': 'str'}, 'if': 'CONFIG_POSIX' } + +## +# @get-win32-socket: +# +# Add a socket that was duplicated to QEMU process with +# WSADuplicateSocketW() via WSASocket() & WSAPROTOCOL_INFOW structure +# and assign it a name (the SOCKET is associated with a CRT file +# descriptor) +# +# @info: the WSAPROTOCOL_INFOW structure (encoded in base64) +# +# @fdname: file descriptor name +# +# Returns: Nothing on success +# +# Since: 8.0 +# +# Notes: If @fdname already exists, the file descriptor assigned to it +# will be closed and replaced by the received file descriptor. +# +# The 'closefd' command can be used to explicitly close the file +# descriptor when it is no longer needed. +# +# Example: +# +# -> { "execute": "get-win32-socket", "arguments": { "info": "abcd123..", fdname": "skclient" } } +# <- { "return": {} } +## +{ 'command': 'get-win32-socket', 'data': {'info': 'str', 'fdname': 'str'}, 'if': 'CONFIG_WIN32' } ## # @closefd: @@ -287,7 +310,6 @@ # # -> { "execute": "closefd", "arguments": { "fdname": "fd1" } } # <- { "return": {} } -# ## { 'command': 'closefd', 'data': {'fdname': 'str'} } @@ -298,8 +320,8 @@ # # @fdset-id: The ID of the fd set that @fd was added to. # -# @fd: The file descriptor that was received via SCM rights and -# added to the fd set. +# @fd: The file descriptor that was received via SCM rights and added +# to the fd set. # # Since: 1.2 ## @@ -314,13 +336,14 @@ # # @opaque: A free-form string that can be used to describe the fd. # -# Returns: - @AddfdInfo on success -# - If file descriptor was not received, FdNotSupplied -# - If @fdset-id is a negative value, InvalidParameterValue +# Returns: +# - @AddfdInfo on success +# - If file descriptor was not received, GenericError +# - If @fdset-id is a negative value, GenericError # # Notes: The list of fd sets is shared by all monitor connections. # -# If @fdset-id is not specified, a new fd set will be created. +# If @fdset-id is not specified, a new fd set will be created. # # Since: 1.2 # @@ -328,7 +351,6 @@ # # -> { "execute": "add-fd", "arguments": { "fdset-id": 1 } } # <- { "return": { "fdset-id": 1, "fd": 3 } } -# ## { 'command': 'add-fd', 'data': { '*fdset-id': 'int', @@ -344,21 +366,21 @@ # # @fd: The file descriptor that is to be removed. # -# Returns: - Nothing on success -# - If @fdset-id or @fd is not found, FdNotFound +# Returns: +# - Nothing on success +# - If @fdset-id or @fd is not found, GenericError # # Since: 1.2 # # Notes: The list of fd sets is shared by all monitor connections. # -# If @fd is not specified, all file descriptors in @fdset-id -# will be removed. +# If @fd is not specified, all file descriptors in @fdset-id will be +# removed. # # Example: # # -> { "execute": "remove-fd", "arguments": { "fdset-id": 1, "fd": 3 } } # <- { "return": {} } -# ## { 'command': 'remove-fd', 'data': {'fdset-id': 'int', '*fd': 'int'} } @@ -431,7 +453,6 @@ # } # ] # } -# ## { 'command': 'query-fdsets', 'returns': ['FdsetInfo'] } @@ -447,7 +468,7 @@ # @number: accepts a number # # @size: accepts a number followed by an optional suffix (K)ilo, -# (M)ega, (G)iga, (T)era +# (M)ega, (G)iga, (T)era # # Since: 1.5 ## @@ -478,7 +499,8 @@ ## # @CommandLineOptionInfo: # -# Details about a command line option, including its list of parameter details +# Details about a command line option, including its list of parameter +# details # # @option: option name # @@ -496,8 +518,9 @@ # # @option: option name # -# Returns: list of @CommandLineOptionInfo for all options (or for the given -# @option). Returns an error if the given @option doesn't exist. +# Returns: list of @CommandLineOptionInfo for all options (or for the +# given @option). Returns an error if the given @option doesn't +# exist. # # Since: 1.5 # @@ -521,35 +544,33 @@ # } # ] # } -# ## {'command': 'query-command-line-options', - 'data': { '*option': 'str' }, + 'data': {'*option': 'str'}, 'returns': ['CommandLineOptionInfo'], - 'allow-preconfig': true } + 'allow-preconfig': true} ## # @RTC_CHANGE: # # Emitted when the guest changes the RTC time. # -# @offset: offset in seconds between base RTC clock (as specified -# by -rtc base), and new RTC clock value +# @offset: offset in seconds between base RTC clock (as specified by +# -rtc base), and new RTC clock value # # @qom-path: path to the RTC object in the QOM tree # -# Note: This event is rate-limited. -# It is not guaranteed that the RTC in the system implements -# this event, or even that the system has an RTC at all. +# Note: This event is rate-limited. It is not guaranteed that the RTC +# in the system implements this event, or even that the system has +# an RTC at all. # # Since: 0.13 # # Example: # -# <- { "event": "RTC_CHANGE", -# "data": { "offset": 78 }, -# "timestamp": { "seconds": 1267020223, "microseconds": 435656 } } -# +# <- { "event": "RTC_CHANGE", +# "data": { "offset": 78 }, +# "timestamp": { "seconds": 1267020223, "microseconds": 435656 } } ## { 'event': 'RTC_CHANGE', 'data': { 'offset': 'int', 'qom-path': 'str' } } @@ -560,10 +581,11 @@ # Emitted when the client of a TYPE_VFIO_USER_SERVER closes the # communication channel # -# @vfu-id: ID of the TYPE_VFIO_USER_SERVER object. It is the last component -# of @vfu-qom-path referenced below +# @vfu-id: ID of the TYPE_VFIO_USER_SERVER object. It is the last +# component of @vfu-qom-path referenced below # -# @vfu-qom-path: path to the TYPE_VFIO_USER_SERVER object in the QOM tree +# @vfu-qom-path: path to the TYPE_VFIO_USER_SERVER object in the QOM +# tree # # @dev-id: ID of attached PCI device # @@ -579,7 +601,6 @@ # "dev-id": "sas1", # "dev-qom-path": "/machine/peripheral/sas1" }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } -# ## { 'event': 'VFU_CLIENT_HANGUP', 'data': { 'vfu-id': 'str', 'vfu-qom-path': 'str', diff --git a/qapi/net.json b/qapi/net.json index d6eb30008b..db67501308 100644 --- a/qapi/net.json +++ b/qapi/net.json @@ -18,21 +18,20 @@ # # @up: true to set the link status to be up # -# Returns: Nothing on success -# If @name is not a valid network device, DeviceNotFound +# Returns: Nothing on success If @name is not a valid network device, +# DeviceNotFound # # Since: 0.14 # -# Notes: Not all network adapters support setting link status. This command -# will succeed even if the network adapter does not support link status -# notification. +# Notes: Not all network adapters support setting link status. This +# command will succeed even if the network adapter does not +# support link status notification. # # Example: # # -> { "execute": "set_link", # "arguments": { "name": "e1000.0", "up": false } } # <- { "return": {} } -# ## { 'command': 'set_link', 'data': {'name': 'str', 'up': 'bool'} } @@ -45,8 +44,8 @@ # # Since: 0.14 # -# Returns: Nothing on success -# If @type is not a valid network backend, DeviceNotFound +# Returns: Nothing on success If @type is not a valid network backend, +# DeviceNotFound # # Example: # @@ -54,7 +53,6 @@ # "arguments": { "type": "user", "id": "netdev1", # "dnssearch": [ { "str": "example.org" } ] } } # <- { "return": {} } -# ## { 'command': 'netdev_add', 'data': 'Netdev', 'boxed': true, 'allow-preconfig': true } @@ -66,8 +64,8 @@ # # @id: the name of the network backend to remove # -# Returns: Nothing on success -# If @id is not a valid network backend, DeviceNotFound +# Returns: Nothing on success If @id is not a valid network backend, +# DeviceNotFound # # Since: 0.14 # @@ -75,7 +73,6 @@ # # -> { "execute": "netdev_del", "arguments": { "id": "netdev1" } } # <- { "return": {} } -# ## { 'command': 'netdev_del', 'data': {'id': 'str'}, 'allow-preconfig': true } @@ -108,25 +105,23 @@ ## # @NetdevUserOptions: # -# Use the user mode network stack which requires no administrator privilege to -# run. +# Use the user mode network stack which requires no administrator +# privilege to run. # # @hostname: client hostname reported by the builtin DHCP server # # @restrict: isolate the guest from the host # -# @ipv4: whether to support IPv4, default true for enabled -# (since 2.6) +# @ipv4: whether to support IPv4, default true for enabled (since 2.6) # -# @ipv6: whether to support IPv6, default true for enabled -# (since 2.6) +# @ipv6: whether to support IPv6, default true for enabled (since 2.6) # # @ip: legacy parameter, use net= instead # -# @net: IP network address that the guest will see, in the -# form addr[/netmask] The netmask is optional, and can be -# either in the form a.b.c.d or as a number of valid top-most -# bits. Default is 10.0.2.0/24. +# @net: IP network address that the guest will see, in the form +# addr[/netmask] The netmask is optional, and can be either in the +# form a.b.c.d or as a number of valid top-most bits. Default is +# 10.0.2.0/24. # # @host: guest-visible address of the host # @@ -135,34 +130,34 @@ # @bootfile: BOOTP filename, for use with tftp= # # @dhcpstart: the first of the 16 IPs the built-in DHCP server can -# assign +# assign # # @dns: guest-visible address of the virtual nameserver # -# @dnssearch: list of DNS suffixes to search, passed as DHCP option -# to the guest +# @dnssearch: list of DNS suffixes to search, passed as DHCP option to +# the guest # # @domainname: guest-visible domain name of the virtual nameserver -# (since 3.0) +# (since 3.0) # -# @ipv6-prefix: IPv6 network prefix (default is fec0::) (since -# 2.6). The network prefix is given in the usual -# hexadecimal IPv6 address notation. +# @ipv6-prefix: IPv6 network prefix (default is fec0::) (since 2.6). +# The network prefix is given in the usual hexadecimal IPv6 +# address notation. # -# @ipv6-prefixlen: IPv6 network prefix length (default is 64) -# (since 2.6) +# @ipv6-prefixlen: IPv6 network prefix length (default is 64) (since +# 2.6) # # @ipv6-host: guest-visible IPv6 address of the host (since 2.6) # -# @ipv6-dns: guest-visible IPv6 address of the virtual -# nameserver (since 2.6) +# @ipv6-dns: guest-visible IPv6 address of the virtual nameserver +# (since 2.6) # # @smb: root directory of the built-in SMB server # # @smbserver: IP address of the built-in SMB server # # @hostfwd: redirect incoming TCP or UDP host connections to guest -# endpoints +# endpoints # # @guestfwd: forward guest TCP connections # @@ -205,7 +200,7 @@ # @fd: file descriptor of an already opened tap # # @fds: multiple file descriptors of already opened multiqueue capable -# tap +# tap # # @script: script to initialize the interface # @@ -215,7 +210,7 @@ # # @helper: command to execute to configure bridge # -# @sndbuf: send buffer limit. Understands [TGMKkb] suffixes. +# @sndbuf: send buffer limit. Understands [TGMKkb] suffixes. # # @vnet_hdr: enable the IFF_VNET_HDR flag on the tap interface # @@ -224,14 +219,14 @@ # @vhostfd: file descriptor of an already opened vhost net device # # @vhostfds: file descriptors of multiple already opened vhost net -# devices +# devices # # @vhostforce: vhost on for non-MSIX virtio guests # # @queues: number of queues to be created for multiqueue capable tap # -# @poll-us: maximum number of microseconds that could -# be spent on busy polling for tap (since 2.7) +# @poll-us: maximum number of microseconds that could be spent on busy +# polling for tap (since 2.7) # # Since: 1.2 ## @@ -303,9 +298,8 @@ # # @counter: have sequence counter # -# @pincounter: pin sequence counter to zero - -# workaround for buggy implementations or -# networks with packet reorder +# @pincounter: pin sequence counter to zero - workaround for buggy +# implementations or networks with packet reorder # # @txcookie: 32 or 64 bit transmit cookie # @@ -313,11 +307,11 @@ # # @txsession: 32 bit transmit session # -# @rxsession: 32 bit receive session - if not specified -# set to the same value as transmit +# @rxsession: 32 bit receive session - if not specified set to the +# same value as transmit # -# @offset: additional offset - allows the insertion of -# additional application-specific data before the packet payload +# @offset: additional offset - allows the insertion of additional +# application-specific data before the packet payload # # Since: 2.1 ## @@ -382,7 +376,9 @@ # Connect two or more net clients through a software hub. # # @hubid: hub identifier number -# @netdev: used to connect hub to a netdev instead of a device (since 2.12) +# +# @netdev: used to connect hub to a netdev instead of a device (since +# 2.12) # # Since: 1.2 ## @@ -396,12 +392,12 @@ # # Connect a client to a netmap-enabled NIC or to a VALE switch port # -# @ifname: Either the name of an existing network interface supported by -# netmap, or the name of a VALE port (created on the fly). -# A VALE port name is in the form 'valeXXX:YYY', where XXX and -# YYY are non-negative integers. XXX identifies a switch and -# YYY identifies a port of the switch. VALE ports having the -# same XXX are therefore connected to the same switch. +# @ifname: Either the name of an existing network interface supported +# by netmap, or the name of a VALE port (created on the fly). A +# VALE port name is in the form 'valeXXX:YYY', where XXX and YYY +# are non-negative integers. XXX identifies a switch and YYY +# identifies a port of the switch. VALE ports having the same XXX +# are therefore connected to the same switch. # # @devname: path of the netmap device (default: '/dev/netmap'). # @@ -422,7 +418,7 @@ # @vhostforce: vhost on for non-MSIX virtio guests (default: false). # # @queues: number of queues to be created for multiqueue vhost-user -# (default: 1) (Since 2.5) +# (default: 1) (Since 2.5) # # Since: 2.1 ## @@ -437,21 +433,21 @@ # # Vhost-vdpa network backend # -# vDPA device is a device that uses a datapath which complies with the virtio -# specifications with a vendor specific control path. +# vDPA device is a device that uses a datapath which complies with the +# virtio specifications with a vendor specific control path. # -# @vhostdev: path of vhost-vdpa device -# (default:'/dev/vhost-vdpa-0') +# @vhostdev: path of vhost-vdpa device (default:'/dev/vhost-vdpa-0') # # @vhostfd: file descriptor of an already opened vhost vdpa device # # @queues: number of queues to be created for multiqueue vhost-vdpa -# (default: 1) +# (default: 1) # -# @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1) -# (default: false) +# @x-svq: Start device with (experimental) shadow virtqueue. (Since +# 7.1) (default: false) # # Features: +# # @unstable: Member @x-svq is experimental. # # Since: 5.1 @@ -472,31 +468,28 @@ # interfaces that are in host mode and also with the host. # # @start-address: The starting IPv4 address to use for the interface. -# Must be in the private IP range (RFC 1918). Must be -# specified along with @end-address and @subnet-mask. -# This address is used as the gateway address. The -# subsequent address up to and including end-address are -# placed in the DHCP pool. +# Must be in the private IP range (RFC 1918). Must be specified +# along with @end-address and @subnet-mask. This address is used +# as the gateway address. The subsequent address up to and +# including end-address are placed in the DHCP pool. # # @end-address: The DHCP IPv4 range end address to use for the -# interface. Must be in the private IP range (RFC 1918). -# Must be specified along with @start-address and -# @subnet-mask. +# interface. Must be in the private IP range (RFC 1918). Must be +# specified along with @start-address and @subnet-mask. # -# @subnet-mask: The IPv4 subnet mask to use on the interface. Must -# be specified along with @start-address and @subnet-mask. +# @subnet-mask: The IPv4 subnet mask to use on the interface. Must be +# specified along with @start-address and @subnet-mask. # -# @isolated: Enable isolation for this interface. Interface isolation -# ensures that vmnet interface is not able to communicate -# with any other vmnet interfaces. Only communication with -# host is allowed. Requires at least macOS Big Sur 11.0. +# @isolated: Enable isolation for this interface. Interface isolation +# ensures that vmnet interface is not able to communicate with any +# other vmnet interfaces. Only communication with host is +# allowed. Requires at least macOS Big Sur 11.0. # # @net-uuid: The identifier (UUID) to uniquely identify the isolated -# network vmnet interface should be added to. If -# set, no DHCP service is provided for this interface and -# network communication is allowed only with other interfaces -# added to this network identified by the UUID. Requires -# at least macOS Big Sur 11.0. +# network vmnet interface should be added to. If set, no DHCP +# service is provided for this interface and network communication +# is allowed only with other interfaces added to this network +# identified by the UUID. Requires at least macOS Big Sur 11.0. # # Since: 7.1 ## @@ -515,34 +508,33 @@ # vmnet (shared mode) network backend. # # Allows traffic originating from the vmnet interface to reach the -# Internet through a network address translator (NAT). -# The vmnet interface can communicate with the host and with -# other shared mode interfaces on the same subnet. If no DHCP -# settings, subnet mask and IPv6 prefix specified, the interface can -# communicate with any of other interfaces in shared mode. +# Internet through a network address translator (NAT). The vmnet +# interface can communicate with the host and with other shared mode +# interfaces on the same subnet. If no DHCP settings, subnet mask and +# IPv6 prefix specified, the interface can communicate with any of +# other interfaces in shared mode. # # @start-address: The starting IPv4 address to use for the interface. -# Must be in the private IP range (RFC 1918). Must be -# specified along with @end-address and @subnet-mask. -# This address is used as the gateway address. The -# subsequent address up to and including end-address are -# placed in the DHCP pool. +# Must be in the private IP range (RFC 1918). Must be specified +# along with @end-address and @subnet-mask. This address is used +# as the gateway address. The subsequent address up to and +# including end-address are placed in the DHCP pool. # # @end-address: The DHCP IPv4 range end address to use for the -# interface. Must be in the private IP range (RFC 1918). -# Must be specified along with @start-address and @subnet-mask. +# interface. Must be in the private IP range (RFC 1918). Must be +# specified along with @start-address and @subnet-mask. # -# @subnet-mask: The IPv4 subnet mask to use on the interface. Must -# be specified along with @start-address and @subnet-mask. +# @subnet-mask: The IPv4 subnet mask to use on the interface. Must be +# specified along with @start-address and @subnet-mask. # -# @isolated: Enable isolation for this interface. Interface isolation -# ensures that vmnet interface is not able to communicate -# with any other vmnet interfaces. Only communication with -# host is allowed. Requires at least macOS Big Sur 11.0. +# @isolated: Enable isolation for this interface. Interface isolation +# ensures that vmnet interface is not able to communicate with any +# other vmnet interfaces. Only communication with host is +# allowed. Requires at least macOS Big Sur 11.0. # -# @nat66-prefix: The IPv6 prefix to use into guest network. Must be a -# unique local address i.e. start with fd00::/8 and have -# length of 64. +# @nat66-prefix: The IPv6 prefix to use into guest network. Must be a +# unique local address i.e. start with fd00::/8 and have length of +# 64. # # Since: 7.1 ## @@ -564,10 +556,10 @@ # # @ifname: The name of the physical interface to be bridged. # -# @isolated: Enable isolation for this interface. Interface isolation -# ensures that vmnet interface is not able to communicate -# with any other vmnet interfaces. Only communication with -# host is allowed. Requires at least macOS Big Sur 11.0. +# @isolated: Enable isolation for this interface. Interface isolation +# ensures that vmnet interface is not able to communicate with any +# other vmnet interfaces. Only communication with host is +# allowed. Requires at least macOS Big Sur 11.0. # # Since: 7.1 ## @@ -582,13 +574,14 @@ # # Configuration info for stream socket netdev # -# @addr: socket address to listen on (server=true) -# or connect to (server=false) +# @addr: socket address to listen on (server=true) or connect to +# (server=false) +# # @server: create server socket (default: false) -# @reconnect: For a client socket, if a socket is disconnected, -# then attempt a reconnect after the given number of seconds. -# Setting this to zero disables this function. (default: 0) -# (since 8.0) +# +# @reconnect: For a client socket, if a socket is disconnected, then +# attempt a reconnect after the given number of seconds. Setting +# this to zero disables this function. (default: 0) (since 8.0) # # Only SocketAddress types 'unix', 'inet' and 'fd' are supported. # @@ -606,13 +599,14 @@ # Configuration info for datagram socket netdev. # # @remote: remote address +# # @local: local address # # Only SocketAddress types 'unix', 'inet' and 'fd' are supported. # -# If remote address is present and it's a multicast address, local address -# is optional. Otherwise local address is required and remote address is -# optional. +# If remote address is present and it's a multicast address, local +# address is optional. Otherwise local address is required and remote +# address is optional. # # .. table:: Valid parameters combination table # :widths: auto @@ -641,14 +635,15 @@ # # Available netdev drivers. # -# Since: 2.7 +# @l2tpv3: since 2.1 +# @vhost-vdpa: since 5.1 +# @vmnet-host: since 7.1 +# @vmnet-shared: since 7.1 +# @vmnet-bridged: since 7.1 +# @stream: since 7.2 +# @dgram: since 7.2 # -# @vhost-vdpa since 5.1 -# @vmnet-host since 7.1 -# @vmnet-shared since 7.1 -# @vmnet-bridged since 7.1 -# @stream since 7.2 -# @dgram since 7.2 +# Since: 2.7 ## { 'enum': 'NetClientDriver', 'data': [ 'none', 'nic', 'user', 'tap', 'l2tpv3', 'socket', 'stream', @@ -669,12 +664,6 @@ # # Since: 1.2 # -# 'l2tpv3' - since 2.1 -# 'vmnet-host' - since 7.1 -# 'vmnet-shared' - since 7.1 -# 'vmnet-bridged' - since 7.1 -# 'stream' since 7.2 -# 'dgram' since 7.2 ## { 'union': 'Netdev', 'base': { 'id': 'str', 'type': 'NetClientDriver' }, @@ -769,9 +758,9 @@ # @name: net client name # # Returns: list of @RxFilterInfo for all NICs (or for the given NIC). -# Returns an error if the given @name doesn't exist, or given -# NIC doesn't support rx-filter querying, or given net client -# isn't a NIC. +# Returns an error if the given @name doesn't exist, or given NIC +# doesn't support rx-filter querying, or given net client isn't a +# NIC. # # Since: 1.6 # @@ -803,7 +792,6 @@ # } # ] # } -# ## { 'command': 'query-rx-filter', 'data': { '*name': 'str' }, @@ -812,8 +800,8 @@ ## # @NIC_RX_FILTER_CHANGED: # -# Emitted once until the 'query-rx-filter' command is executed, the first event -# will always be emitted +# Emitted once until the 'query-rx-filter' command is executed, the +# first event will always be emitted # # @name: net client name # @@ -827,7 +815,6 @@ # "data": { "name": "vnet0", # "path": "/machine/peripheral/vnet0/virtio-backend" }, # "timestamp": { "seconds": 1368697518, "microseconds": 326866 } } -# ## { 'event': 'NIC_RX_FILTER_CHANGED', 'data': { '*name': 'str', 'path': 'str' } } @@ -838,7 +825,7 @@ # Parameters for self-announce timers # # @initial: Initial delay (in ms) before sending the first GARP/RARP -# announcement +# announcement # # @max: Maximum delay (in ms) between GARP/RARP announcement packets # @@ -846,12 +833,12 @@ # # @step: Delay increase (in ms) after each self-announcement attempt # -# @interfaces: An optional list of interface names, which restricts the -# announcement to the listed interfaces. (Since 4.1) +# @interfaces: An optional list of interface names, which restricts +# the announcement to the listed interfaces. (Since 4.1) # # @id: A name to be used to identify an instance of announce-timers -# and to allow it to modified later. Not for use as -# part of the migration parameters. (Since 4.1) +# and to allow it to modified later. Not for use as part of the +# migration parameters. (Since 4.1) # # Since: 4.0 ## @@ -867,8 +854,9 @@ ## # @announce-self: # -# Trigger generation of broadcast RARP frames to update network switches. -# This can be useful when network bonds fail-over the active slave. +# Trigger generation of broadcast RARP frames to update network +# switches. This can be useful when network bonds fail-over the +# active slave. # # Example: # @@ -886,9 +874,10 @@ ## # @FAILOVER_NEGOTIATED: # -# Emitted when VIRTIO_NET_F_STANDBY was enabled during feature negotiation. -# Failover primary devices which were hidden (not hotplugged when requested) -# before will now be hotplugged by the virtio-net standby device. +# Emitted when VIRTIO_NET_F_STANDBY was enabled during feature +# negotiation. Failover primary devices which were hidden (not +# hotplugged when requested) before will now be hotplugged by the +# virtio-net standby device. # # @device-id: QEMU device id of the unplugged device # @@ -899,7 +888,6 @@ # <- { "event": "FAILOVER_NEGOTIATED", # "data": { "device-id": "net1" }, # "timestamp": { "seconds": 1368697518, "microseconds": 326866 } } -# ## { 'event': 'FAILOVER_NEGOTIATED', 'data': {'device-id': 'str'} } @@ -910,11 +898,12 @@ # Emitted when the netdev stream backend is connected # # @netdev-id: QEMU netdev id that is connected +# # @addr: The destination address # # Since: 7.2 # -# Example: +# Examples: # # <- { "event": "NETDEV_STREAM_CONNECTED", # "data": { "netdev-id": "netdev0", @@ -922,13 +911,10 @@ # "host": "::1", "type": "inet" } }, # "timestamp": { "seconds": 1666269863, "microseconds": 311222 } } # -# or -# # <- { "event": "NETDEV_STREAM_CONNECTED", # "data": { "netdev-id": "netdev0", # "addr": { "path": "/tmp/qemu0", "type": "unix" } }, # "timestamp": { "seconds": 1666269706, "microseconds": 413651 } } -# ## { 'event': 'NETDEV_STREAM_CONNECTED', 'data': { 'netdev-id': 'str', @@ -948,7 +934,6 @@ # <- { 'event': 'NETDEV_STREAM_DISCONNECTED', # 'data': {'netdev-id': 'netdev0'}, # 'timestamp': {'seconds': 1663330937, 'microseconds': 526695} } -# ## { 'event': 'NETDEV_STREAM_DISCONNECTED', 'data': { 'netdev-id': 'str' } } diff --git a/qapi/pci.json b/qapi/pci.json index ee7c659fec..086c773052 100644 --- a/qapi/pci.json +++ b/qapi/pci.json @@ -29,8 +29,9 @@ # # @bar: the index of the Base Address Register for this region # -# @type: - 'io' if the region is a PIO region -# - 'memory' if the region is a MMIO region +# @type: +# - 'io' if the region is a PIO region +# - 'memory' if the region is a MMIO region # # @size: memory size # @@ -49,21 +50,21 @@ # # Information about a bus of a PCI Bridge device # -# @number: primary bus interface number. This should be the number of the -# bus the device resides on. +# @number: primary bus interface number. This should be the number of +# the bus the device resides on. # -# @secondary: secondary bus interface number. This is the number of the -# main bus for the bridge +# @secondary: secondary bus interface number. This is the number of +# the main bus for the bridge # # @subordinate: This is the highest number bus that resides below the -# bridge. +# bridge. # # @io_range: The PIO range for all devices on this bridge # # @memory_range: The MMIO range for all devices on this bridge # -# @prefetchable_range: The range of prefetchable MMIO for all devices on -# this bridge +# @prefetchable_range: The range of prefetchable MMIO for all devices +# on this bridge # # Since: 2.4 ## @@ -145,8 +146,8 @@ # # @regions: a list of the PCI I/O regions associated with the device # -# Notes: the contents of @class_info.desc are not stable and should only be -# treated as informational. +# Notes: the contents of @class_info.desc are not stable and should +# only be treated as informational. # # Since: 0.14 ## @@ -174,10 +175,10 @@ # # Return information about the PCI bus topology of the guest. # -# Returns: a list of @PciInfo for each PCI bus. Each bus is -# represented by a json-object, which has a key with a json-array of -# all PCI devices attached to it. Each device is represented by a -# json-object. +# Returns: a list of @PciInfo for each PCI bus. Each bus is +# represented by a json-object, which has a key with a json-array +# of all PCI devices attached to it. Each device is represented +# by a json-object. # # Since: 0.14 # @@ -310,7 +311,7 @@ # ] # } # -# Note: This example has been shortened as the real response is too long. -# +# Note: This example has been shortened as the real response is too +# long. ## { 'command': 'query-pci', 'returns': ['PciInfo'] } diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json index f000b90744..6594afba31 100644 --- a/qapi/qapi-schema.json +++ b/qapi/qapi-schema.json @@ -5,17 +5,20 @@ # # This document describes all commands currently supported by QMP. # -# Most of the time their usage is exactly the same as in the user Monitor, this -# means that any other document which also describe commands (the manpage, -# QEMU's manual, etc) can and should be consulted. +# Most of the time their usage is exactly the same as in the user +# Monitor, this means that any other document which also describe +# commands (the manpage, QEMU's manual, etc) can and should be +# consulted. # -# QMP has two types of commands: regular and query commands. Regular commands -# usually change the Virtual Machine's state someway, while query commands just -# return information. The sections below are divided accordingly. +# QMP has two types of commands: regular and query commands. Regular +# commands usually change the Virtual Machine's state someway, while +# query commands just return information. The sections below are +# divided accordingly. # -# It's important to observe that all communication examples are formatted in -# a reader-friendly way, so that they're easier to understand. However, in real -# protocol usage, they're emitted as a single line. +# It's important to observe that all communication examples are +# formatted in a reader-friendly way, so that they're easier to +# understand. However, in real protocol usage, they're emitted as a +# single line. # # Also, the following notation is used to denote data flow: # @@ -26,30 +29,9 @@ # -> data issued by the Client # <- Server data response # -# Please, refer to the QMP specification (docs/interop/qmp-spec.txt) for -# detailed information on the Server command and response formats. -# -# = Stability Considerations -# -# The current QMP command set (described in this file) may be useful for a -# number of use cases, however it's limited and several commands have bad -# defined semantics, specially with regard to command completion. -# -# These problems are going to be solved incrementally in the next QEMU releases -# and we're going to establish a deprecation policy for badly defined commands. -# -# If you're planning to adopt QMP, please observe the following: -# -# 1. The deprecation policy will take effect and be documented soon, please -# check the documentation of each used command as soon as a new release of -# QEMU is available -# -# 2. DO NOT rely on anything which is not explicit documented -# -# 3. Errors, in special, are not documented. Applications should NOT check -# for specific errors classes or data (it's strongly recommended to only -# check for the "error" key) -# +# Please refer to the +# :doc:`QEMU Machine Protocol Specification ` +# for detailed information on the Server command and response formats. ## { 'include': 'pragma.json' } @@ -65,11 +47,11 @@ { 'include': 'sockets.json' } { 'include': 'run-state.json' } { 'include': 'crypto.json' } +{ 'include': 'job.json' } { 'include': 'block.json' } { 'include': 'block-export.json' } { 'include': 'char.json' } { 'include': 'dump.json' } -{ 'include': 'job.json' } { 'include': 'net.json' } { 'include': 'rdma.json' } { 'include': 'rocker.json' } @@ -95,3 +77,5 @@ { 'include': 'pci.json' } { 'include': 'stats.json' } { 'include': 'virtio.json' } +{ 'include': 'cryptodev.json' } +{ 'include': 'cxl.json' } diff --git a/qapi/qdev.json b/qapi/qdev.json index 2708fb4e99..2d73b27c2a 100644 --- a/qapi/qdev.json +++ b/qapi/qdev.json @@ -17,11 +17,12 @@ # # @typename: the type name of a device # -# Returns: a list of ObjectPropertyInfo describing a devices properties +# Returns: a list of ObjectPropertyInfo describing a devices +# properties # -# Note: objects can create properties at runtime, for example to describe -# links between different devices and/or objects. These properties -# are not included in the output of this command. +# Note: objects can create properties at runtime, for example to +# describe links between different devices and/or objects. These +# properties are not included in the output of this command. # # Since: 1.2 ## @@ -41,12 +42,14 @@ # @id: the device's ID, must be unique # # Features: -# @json-cli: If present, the "-device" command line option supports JSON -# syntax with a structure identical to the arguments of this -# command. -# @json-cli-hotplug: If present, the "-device" command line option supports JSON -# syntax without the reference counting leak that broke -# hot-unplug +# +# @json-cli: If present, the "-device" command line option supports +# JSON syntax with a structure identical to the arguments of this +# command. +# +# @json-cli-hotplug: If present, the "-device" command line option +# supports JSON syntax without the reference counting leak that +# broke hot-unplug # # Notes: # @@ -68,9 +71,9 @@ # <- { "return": {} } # # TODO: This command effectively bypasses QAPI completely due to its -# "additional arguments" business. It shouldn't have been added to -# the schema in this form. It should be qapified properly, or -# replaced by a properly qapified command. +# "additional arguments" business. It shouldn't have been added +# to the schema in this form. It should be qapified properly, or +# replaced by a properly qapified command. # # Since: 0.13 ## @@ -86,21 +89,22 @@ # # @id: the device's ID or QOM path # -# Returns: Nothing on success -# If @id is not a valid device, DeviceNotFound +# Returns: Nothing on success If @id is not a valid device, +# DeviceNotFound # -# Notes: When this command completes, the device may not be removed from the -# guest. Hot removal is an operation that requires guest cooperation. -# This command merely requests that the guest begin the hot removal -# process. Completion of the device removal process is signaled with a -# DEVICE_DELETED event. Guest reset will automatically complete removal -# for all devices. If a guest-side error in the hot removal process is -# detected, the device will not be removed and a DEVICE_UNPLUG_GUEST_ERROR -# event is sent. Some errors cannot be detected. +# Notes: When this command completes, the device may not be removed +# from the guest. Hot removal is an operation that requires guest +# cooperation. This command merely requests that the guest begin +# the hot removal process. Completion of the device removal +# process is signaled with a DEVICE_DELETED event. Guest reset +# will automatically complete removal for all devices. If a +# guest-side error in the hot removal process is detected, the +# device will not be removed and a DEVICE_UNPLUG_GUEST_ERROR event +# is sent. Some errors cannot be detected. # # Since: 0.14 # -# Example: +# Examples: # # -> { "execute": "device_del", # "arguments": { "id": "net1" } } @@ -109,16 +113,16 @@ # -> { "execute": "device_del", # "arguments": { "id": "/machine/peripheral-anon/device[0]" } } # <- { "return": {} } -# ## { 'command': 'device_del', 'data': {'id': 'str'} } ## # @DEVICE_DELETED: # -# Emitted whenever the device removal completion is acknowledged by the guest. -# At this point, it's safe to reuse the specified device ID. Device removal can -# be initiated by the guest or by HMP/QMP commands. +# Emitted whenever the device removal completion is acknowledged by +# the guest. At this point, it's safe to reuse the specified device +# ID. Device removal can be initiated by the guest or by HMP/QMP +# commands. # # @device: the device's ID if it has one # @@ -132,7 +136,6 @@ # "data": { "device": "virtio-net-pci-0", # "path": "/machine/peripheral/virtio-net-pci-0" }, # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } -# ## { 'event': 'DEVICE_DELETED', 'data': { '*device': 'str', 'path': 'str' } } @@ -140,7 +143,8 @@ ## # @DEVICE_UNPLUG_GUEST_ERROR: # -# Emitted when a device hot unplug fails due to a guest reported error. +# Emitted when a device hot unplug fails due to a guest reported +# error. # # @device: the device's ID if it has one # @@ -154,7 +158,6 @@ # "data": { "device": "core1", # "path": "/machine/peripheral/core1" }, # "timestamp": { "seconds": 1615570772, "microseconds": 202844 } } -# ## { 'event': 'DEVICE_UNPLUG_GUEST_ERROR', 'data': { '*device': 'str', 'path': 'str' } } diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c index 0990873ec8..555528b6bb 100644 --- a/qapi/qmp-dispatch.c +++ b/qapi/qmp-dispatch.c @@ -134,8 +134,8 @@ static void do_qmp_dispatch_bh(void *opaque) * Runs outside of coroutine context for OOB commands, but in coroutine * context for everything else. */ -QDict *qmp_dispatch(const QmpCommandList *cmds, QObject *request, - bool allow_oob, Monitor *cur_mon) +QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *request, + bool allow_oob, Monitor *cur_mon) { Error *err = NULL; bool oob; diff --git a/qapi/qom.json b/qapi/qom.json index 30e76653ad..7f92ea43e8 100644 --- a/qapi/qom.json +++ b/qapi/qom.json @@ -18,17 +18,20 @@ # # @name: the name of the property # -# @type: the type of the property. This will typically come in one of four -# forms: +# @type: the type of the property. This will typically come in one of +# four forms: # -# 1) A primitive type such as 'u8', 'u16', 'bool', 'str', or 'double'. -# These types are mapped to the appropriate JSON type. +# 1) A primitive type such as 'u8', 'u16', 'bool', 'str', or +# 'double'. These types are mapped to the appropriate JSON +# type. # -# 2) A child type in the form 'child' where subtype is a qdev -# device type name. Child properties create the composition tree. +# 2) A child type in the form 'child' where subtype is a +# qdev device type name. Child properties create the +# composition tree. # -# 3) A link type in the form 'link' where subtype is a qdev -# device type name. Link properties form the device model graph. +# 3) A link type in the form 'link' where subtype is a +# qdev device type name. Link properties form the device model +# graph. # # @description: if specified, the description of the property. # @@ -45,14 +48,14 @@ ## # @qom-list: # -# This command will list any properties of a object given a path in the object -# model. +# This command will list any properties of a object given a path in +# the object model. # -# @path: the path within the object model. See @qom-get for a description of -# this parameter. +# @path: the path within the object model. See @qom-get for a +# description of this parameter. # -# Returns: a list of @ObjectPropertyInfo that describe the properties of the -# object. +# Returns: a list of @ObjectPropertyInfo that describe the properties +# of the object. # # Since: 1.2 # @@ -64,7 +67,6 @@ # { "name": "parallel0", "type": "child" }, # { "name": "serial0", "type": "child" }, # { "name": "mon0", "type": "child" } ] } -# ## { 'command': 'qom-list', 'data': { 'path': 'str' }, @@ -74,36 +76,35 @@ ## # @qom-get: # -# This command will get a property from a object model path and return the -# value. +# This command will get a property from a object model path and return +# the value. # -# @path: The path within the object model. There are two forms of supported -# paths--absolute and partial paths. +# @path: The path within the object model. There are two forms of +# supported paths--absolute and partial paths. # -# Absolute paths are derived from the root object and can follow child<> -# or link<> properties. Since they can follow link<> properties, they -# can be arbitrarily long. Absolute paths look like absolute filenames -# and are prefixed with a leading slash. +# Absolute paths are derived from the root object and can follow +# child<> or link<> properties. Since they can follow link<> +# properties, they can be arbitrarily long. Absolute paths look +# like absolute filenames and are prefixed with a leading slash. # -# Partial paths look like relative filenames. They do not begin -# with a prefix. The matching rules for partial paths are subtle but -# designed to make specifying objects easy. At each level of the -# composition tree, the partial path is matched as an absolute path. -# The first match is not returned. At least two matches are searched -# for. A successful result is only returned if only one match is -# found. If more than one match is found, a flag is return to -# indicate that the match was ambiguous. +# Partial paths look like relative filenames. They do not begin +# with a prefix. The matching rules for partial paths are subtle +# but designed to make specifying objects easy. At each level of +# the composition tree, the partial path is matched as an absolute +# path. The first match is not returned. At least two matches +# are searched for. A successful result is only returned if only +# one match is found. If more than one match is found, a flag is +# return to indicate that the match was ambiguous. # # @property: The property name to read # -# Returns: The property value. The type depends on the property -# type. child<> and link<> properties are returned as #str -# pathnames. All integer property types (u8, u16, etc) are -# returned as #int. +# Returns: The property value. The type depends on the property type. +# child<> and link<> properties are returned as #str pathnames. +# All integer property types (u8, u16, etc) are returned as #int. # # Since: 1.2 # -# Example: +# Examples: # # 1. Use absolute path # @@ -118,7 +119,6 @@ # "arguments": { "path": "unattached/sysbus", # "property": "type" } } # <- { "return": "System" } -# ## { 'command': 'qom-get', 'data': { 'path': 'str', 'property': 'str' }, @@ -134,8 +134,8 @@ # # @property: the property name to set # -# @value: a value who's type is appropriate for the property type. See @qom-get -# for a description of type mapping. +# @value: a value who's type is appropriate for the property type. +# See @qom-get for a description of type mapping. # # Since: 1.2 # @@ -146,7 +146,6 @@ # "property": "graphics", # "value": false } } # <- { "return": {} } -# ## { 'command': 'qom-set', 'data': { 'path': 'str', 'property': 'str', 'value': 'any' }, @@ -160,7 +159,7 @@ # @name: the type name found in the search # # @abstract: the type is abstract and can't be directly instantiated. -# Omitted if false. (since 2.10) +# Omitted if false. (since 2.10) # # @parent: Name of parent type, if any (since 2.10) # @@ -174,11 +173,13 @@ # # This command will return a list of types given search parameters # -# @implements: if specified, only return types that implement this type name +# @implements: if specified, only return types that implement this +# type name # # @abstract: if true, include abstract types in the results # -# Returns: a list of @ObjectTypeInfo or an empty list if no results are found +# Returns: a list of @ObjectTypeInfo or an empty list if no results +# are found # # Since: 1.1 ## @@ -194,9 +195,9 @@ # # @typename: the type name of an object # -# Note: objects can create properties at runtime, for example to describe -# links between different devices and/or objects. These properties -# are not included in the output of this command. +# Note: objects can create properties at runtime, for example to +# describe links between different devices and/or objects. These +# properties are not included in the output of this command. # # Returns: a list of ObjectPropertyInfo describing object properties # @@ -214,7 +215,8 @@ # # @if: interface name of the host system CAN bus to connect to # -# @canbus: object ID of the can-bus object to connect to the host interface +# @canbus: object ID of the can-bus object to connect to the host +# interface # # Since: 2.12 ## @@ -227,34 +229,35 @@ # # Properties for colo-compare objects. # -# @primary_in: name of the character device backend to use for the primary -# input (incoming packets are redirected to @outdev) +# @primary_in: name of the character device backend to use for the +# primary input (incoming packets are redirected to @outdev) # -# @secondary_in: name of the character device backend to use for secondary -# input (incoming packets are only compared to the input on -# @primary_in and then dropped) +# @secondary_in: name of the character device backend to use for +# secondary input (incoming packets are only compared to the input +# on @primary_in and then dropped) # # @outdev: name of the character device backend to use for output # # @iothread: name of the iothread to run in # -# @notify_dev: name of the character device backend to be used to communicate -# with the remote colo-frame (only for Xen COLO) +# @notify_dev: name of the character device backend to be used to +# communicate with the remote colo-frame (only for Xen COLO) # -# @compare_timeout: the maximum time to hold a packet from @primary_in for -# comparison with an incoming packet on @secondary_in in -# milliseconds (default: 3000) +# @compare_timeout: the maximum time to hold a packet from @primary_in +# for comparison with an incoming packet on @secondary_in in +# milliseconds (default: 3000) # -# @expired_scan_cycle: the interval at which colo-compare checks whether -# packets from @primary have timed out, in milliseconds -# (default: 3000) +# @expired_scan_cycle: the interval at which colo-compare checks +# whether packets from @primary have timed out, in milliseconds +# (default: 3000) # -# @max_queue_size: the maximum number of packets to keep in the queue for -# comparing with incoming packets from @secondary_in. If the -# queue is full and additional packets are received, the -# additional packets are dropped. (default: 1024) +# @max_queue_size: the maximum number of packets to keep in the queue +# for comparing with incoming packets from @secondary_in. If the +# queue is full and additional packets are received, the +# additional packets are dropped. (default: 1024) # -# @vnet_hdr_support: if true, vnet header support is enabled (default: false) +# @vnet_hdr_support: if true, vnet header support is enabled +# (default: false) # # Since: 2.8 ## @@ -272,24 +275,31 @@ ## # @CryptodevBackendProperties: # -# Properties for cryptodev-backend and cryptodev-backend-builtin objects. +# Properties for cryptodev-backend and cryptodev-backend-builtin +# objects. # -# @queues: the number of queues for the cryptodev backend. Ignored for -# cryptodev-backend and must be 1 for cryptodev-backend-builtin. -# (default: 1) +# @queues: the number of queues for the cryptodev backend. Ignored +# for cryptodev-backend and must be 1 for +# cryptodev-backend-builtin. (default: 1) +# +# @throttle-bps: limit total bytes per second (Since 8.0) +# +# @throttle-ops: limit total operations per second (Since 8.0) # # Since: 2.8 ## { 'struct': 'CryptodevBackendProperties', - 'data': { '*queues': 'uint32' } } + 'data': { '*queues': 'uint32', + '*throttle-bps': 'uint64', + '*throttle-ops': 'uint64' } } ## # @CryptodevVhostUserProperties: # # Properties for cryptodev-vhost-user objects. # -# @chardev: the name of a Unix domain socket character device that connects to -# the vhost-user server +# @chardev: the name of a Unix domain socket character device that +# connects to the vhost-user server # # Since: 2.12 ## @@ -304,8 +314,8 @@ # # @addr: the name of the DBus bus to connect to # -# @id-list: a comma separated list of DBus IDs of helpers whose data should be -# included in the VM state on migration +# @id-list: a comma separated list of DBus IDs of helpers whose data +# should be included in the VM state on migration # # Since: 5.0 ## @@ -316,7 +326,8 @@ ## # @NetfilterInsert: # -# Indicates where to insert a netfilter relative to a given other filter. +# Indicates where to insert a netfilter relative to a given other +# filter. # # @before: insert before the specified filter # @@ -336,20 +347,20 @@ # # @queue: indicates which queue(s) to filter (default: all) # -# @status: indicates whether the filter is enabled ("on") or disabled ("off") -# (default: "on") +# @status: indicates whether the filter is enabled ("on") or disabled +# ("off") (default: "on") # -# @position: specifies where the filter should be inserted in the filter list. -# "head" means the filter is inserted at the head of the filter list, -# before any existing filters. -# "tail" means the filter is inserted at the tail of the filter list, -# behind any existing filters (default). -# "id=" means the filter is inserted before or behind the filter -# specified by , depending on the @insert property. -# (default: "tail") +# @position: specifies where the filter should be inserted in the +# filter list. "head" means the filter is inserted at the head of +# the filter list, before any existing filters. "tail" means the +# filter is inserted at the tail of the filter list, behind any +# existing filters (default). "id=" means the filter is +# inserted before or behind the filter specified by , +# depending on the @insert property. (default: "tail") # -# @insert: where to insert the filter relative to the filter given in @position. -# Ignored if @position is "head" or "tail". (default: behind) +# @insert: where to insert the filter relative to the filter given in +# @position. Ignored if @position is "head" or "tail". +# (default: behind) # # Since: 2.5 ## @@ -365,8 +376,9 @@ # # Properties for filter-buffer objects. # -# @interval: a non-zero interval in microseconds. All packets arriving in the -# given interval are delayed until the end of the interval. +# @interval: a non-zero interval in microseconds. All packets +# arriving in the given interval are delayed until the end of the +# interval. # # Since: 2.5 ## @@ -381,7 +393,8 @@ # # @file: the filename where the dumped packets should be stored # -# @maxlen: maximum number of bytes in a packet that are stored (default: 65536) +# @maxlen: maximum number of bytes in a packet that are stored +# (default: 65536) # # Since: 2.5 ## @@ -395,10 +408,11 @@ # # Properties for filter-mirror objects. # -# @outdev: the name of a character device backend to which all incoming packets -# are mirrored +# @outdev: the name of a character device backend to which all +# incoming packets are mirrored # -# @vnet_hdr_support: if true, vnet header support is enabled (default: false) +# @vnet_hdr_support: if true, vnet header support is enabled +# (default: false) # # Since: 2.6 ## @@ -412,16 +426,17 @@ # # Properties for filter-redirector objects. # -# At least one of @indev or @outdev must be present. If both are present, they -# must not refer to the same character device backend. +# At least one of @indev or @outdev must be present. If both are +# present, they must not refer to the same character device backend. # -# @indev: the name of a character device backend from which packets are -# received and redirected to the filtered network device +# @indev: the name of a character device backend from which packets +# are received and redirected to the filtered network device # -# @outdev: the name of a character device backend to which all incoming packets -# are redirected +# @outdev: the name of a character device backend to which all +# incoming packets are redirected # -# @vnet_hdr_support: if true, vnet header support is enabled (default: false) +# @vnet_hdr_support: if true, vnet header support is enabled +# (default: false) # # Since: 2.6 ## @@ -436,7 +451,8 @@ # # Properties for filter-rewriter objects. # -# @vnet_hdr_support: if true, vnet header support is enabled (default: false) +# @vnet_hdr_support: if true, vnet header support is enabled +# (default: false) # # Since: 2.8 ## @@ -449,17 +465,18 @@ # # Properties for input-barrier objects. # -# @name: the screen name as declared in the screens section of barrier.conf +# @name: the screen name as declared in the screens section of +# barrier.conf # # @server: hostname of the Barrier server (default: "localhost") # # @port: TCP port of the Barrier server (default: "24800") # # @x-origin: x coordinate of the leftmost pixel on the guest screen -# (default: "0") +# (default: "0") # # @y-origin: y coordinate of the topmost pixel on the guest screen -# (default: "0") +# (default: "0") # # @width: the width of secondary screen in pixels (default: "1920") # @@ -483,13 +500,13 @@ # # @evdev: the path of the host evdev device to use # -# @grab_all: if true, grab is toggled for all devices (e.g. both keyboard and -# mouse) instead of just one device (default: false) +# @grab_all: if true, grab is toggled for all devices (e.g. both +# keyboard and mouse) instead of just one device (default: false) # # @repeat: enables auto-repeat events (default: false) # # @grab-toggle: the key or key combination that toggles device grab -# (default: ctrl-ctrl) +# (default: ctrl-ctrl) # # Since: 2.6 ## @@ -504,15 +521,15 @@ # # Common properties for event loops # -# @aio-max-batch: maximum number of requests in a batch for the AIO engine, -# 0 means that the engine will use its default. -# (default: 0) +# @aio-max-batch: maximum number of requests in a batch for the AIO +# engine, 0 means that the engine will use its default. +# (default: 0) # -# @thread-pool-min: minimum number of threads reserved in the thread pool -# (default:0) +# @thread-pool-min: minimum number of threads reserved in the thread +# pool (default:0) # -# @thread-pool-max: maximum number of threads the thread pool can contain -# (default:64) +# @thread-pool-max: maximum number of threads the thread pool can +# contain (default:64) # # Since: 7.1 ## @@ -526,17 +543,17 @@ # # Properties for iothread objects. # -# @poll-max-ns: the maximum number of nanoseconds to busy wait for events. -# 0 means polling is disabled (default: 32768 on POSIX hosts, -# 0 otherwise) +# @poll-max-ns: the maximum number of nanoseconds to busy wait for +# events. 0 means polling is disabled (default: 32768 on POSIX +# hosts, 0 otherwise) # -# @poll-grow: the multiplier used to increase the polling time when the -# algorithm detects it is missing events due to not polling long -# enough. 0 selects a default behaviour (default: 0) +# @poll-grow: the multiplier used to increase the polling time when +# the algorithm detects it is missing events due to not polling +# long enough. 0 selects a default behaviour (default: 0) # # @poll-shrink: the divisor used to decrease the polling time when the -# algorithm detects it is spending too long polling without -# encountering events. 0 selects a default behaviour (default: 0) +# algorithm detects it is spending too long polling without +# encountering events. 0 selects a default behaviour (default: 0) # # The @aio-max-batch option is available since 6.1. # @@ -564,11 +581,11 @@ # # Properties for objects of classes derived from memory-backend. # -# @merge: if true, mark the memory as mergeable (default depends on the machine -# type) +# @merge: if true, mark the memory as mergeable (default depends on +# the machine type) # -# @dump: if true, include the memory in core dumps (default depends on the -# machine type) +# @dump: if true, include the memory in core dumps (default depends on +# the machine type) # # @host-nodes: the list of NUMA host nodes to bind the memory to # @@ -576,31 +593,31 @@ # # @prealloc: if true, preallocate memory (default: false) # -# @prealloc-threads: number of CPU threads to use for prealloc (default: 1) +# @prealloc-threads: number of CPU threads to use for prealloc +# (default: 1) # -# @prealloc-context: thread context to use for creation of preallocation threads -# (default: none) (since 7.2) +# @prealloc-context: thread context to use for creation of +# preallocation threads (default: none) (since 7.2) # -# @share: if false, the memory is private to QEMU; if true, it is shared -# (default: false) +# @share: if false, the memory is private to QEMU; if true, it is +# shared (default: false) # # @reserve: if true, reserve swap space (or huge pages) if applicable -# (default: true) (since 6.1) +# (default: true) (since 6.1) # # @size: size of the memory region in bytes # -# @x-use-canonical-path-for-ramblock-id: if true, the canonical path is used -# for ramblock-id. Disable this for 4.0 -# machine types or older to allow -# migration with newer QEMU versions. -# (default: false generally, -# but true for machine types <= 4.0) +# @x-use-canonical-path-for-ramblock-id: if true, the canonical path +# is used for ramblock-id. Disable this for 4.0 machine types or +# older to allow migration with newer QEMU versions. +# (default: false generally, but true for machine types <= 4.0) # -# Note: prealloc=true and reserve=false cannot be set at the same time. With -# reserve=true, the behavior depends on the operating system: for example, -# Linux will not reserve swap space for shared file mappings -- -# "not applicable". In contrast, reserve=false will bail out if it cannot -# be configured accordingly. +# Note: prealloc=true and reserve=false cannot be set at the same +# time. With reserve=true, the behavior depends on the operating +# system: for example, Linux will not reserve swap space for +# shared file mappings -- "not applicable". In contrast, +# reserve=false will bail out if it cannot be configured +# accordingly. # # Since: 2.1 ## @@ -622,33 +639,40 @@ # # Properties for memory-backend-file objects. # -# @align: the base address alignment when QEMU mmap(2)s @mem-path. Some -# backend stores specified by @mem-path require an alignment different -# than the default one used by QEMU, e.g. the device DAX /dev/dax0.0 -# requires 2M alignment rather than 4K. In such cases, users can -# specify the required alignment via this option. -# 0 selects a default alignment (currently the page size). (default: 0) +# @align: the base address alignment when QEMU mmap(2)s @mem-path. +# Some backend stores specified by @mem-path require an alignment +# different than the default one used by QEMU, e.g. the device DAX +# /dev/dax0.0 requires 2M alignment rather than 4K. In such cases, +# users can specify the required alignment via this option. 0 +# selects a default alignment (currently the page size). +# (default: 0) # -# @discard-data: if true, the file contents can be destroyed when QEMU exits, -# to avoid unnecessarily flushing data to the backing file. Note -# that ``discard-data`` is only an optimization, and QEMU might -# not discard file contents if it aborts unexpectedly or is -# terminated using SIGKILL. (default: false) +# @offset: the offset into the target file that the region starts at. You +# can use this option to back multiple regions with a single file. Must +# be a multiple of the page size. (default: 0) (since 8.1) # -# @mem-path: the path to either a shared memory or huge page filesystem mount +# @discard-data: if true, the file contents can be destroyed when QEMU +# exits, to avoid unnecessarily flushing data to the backing file. +# Note that @discard-data is only an optimization, and QEMU might +# not discard file contents if it aborts unexpectedly or is +# terminated using SIGKILL. (default: false) # -# @pmem: specifies whether the backing file specified by @mem-path is in -# host persistent memory that can be accessed using the SNIA NVM -# programming model (e.g. Intel NVDIMM). +# @mem-path: the path to either a shared memory or huge page +# filesystem mount # -# @readonly: if true, the backing file is opened read-only; if false, it is -# opened read-write. (default: false) +# @pmem: specifies whether the backing file specified by @mem-path is +# in host persistent memory that can be accessed using the SNIA +# NVM programming model (e.g. Intel NVDIMM). +# +# @readonly: if true, the backing file is opened read-only; if false, +# it is opened read-write. (default: false) # # Since: 2.1 ## { 'struct': 'MemoryBackendFileProperties', 'base': 'MemoryBackendProperties', 'data': { '*align': 'size', + '*offset': 'size', '*discard-data': 'bool', 'mem-path': 'str', '*pmem': { 'type': 'bool', 'if': 'CONFIG_LIBPMEM' }, @@ -661,16 +685,16 @@ # # The @share boolean option is true by default with memfd. # -# @hugetlb: if true, the file to be created resides in the hugetlbfs filesystem -# (default: false) +# @hugetlb: if true, the file to be created resides in the hugetlbfs +# filesystem (default: false) # -# @hugetlbsize: the hugetlb page size on systems that support multiple hugetlb -# page sizes (it must be a power of 2 value supported by the -# system). 0 selects a default page size. This option is ignored -# if @hugetlb is false. (default: 0) +# @hugetlbsize: the hugetlb page size on systems that support multiple +# hugetlb page sizes (it must be a power of 2 value supported by +# the system). 0 selects a default page size. This option is +# ignored if @hugetlb is false. (default: 0) # -# @seal: if true, create a sealed-file, which will block further resizing of -# the memory (default: true) +# @seal: if true, create a sealed-file, which will block further +# resizing of the memory (default: true) # # Since: 2.12 ## @@ -702,7 +726,8 @@ # # Properties for pr-manager-helper objects. # -# @path: the path to a Unix domain socket for connecting to the external helper +# @path: the path to a Unix domain socket for connecting to the +# external helper # # Since: 2.11 ## @@ -731,7 +756,8 @@ # # @fd: file descriptor name previously passed via 'getfd' command # -# @devid: the id of the device to be associated with the file descriptor +# @devid: the id of the device to be associated with the file +# descriptor # # Since: 6.0 ## @@ -757,13 +783,15 @@ # # Properties for objects of classes derived from rng. # -# @opened: if true, the device is opened immediately when applying this option -# and will probably fail when processing the next option. Don't use; -# only provided for compatibility. (default: false) +# @opened: if true, the device is opened immediately when applying +# this option and will probably fail when processing the next +# option. Don't use; only provided for compatibility. +# (default: false) # # Features: -# @deprecated: Member @opened is deprecated. Setting true doesn't make sense, -# and false is already the default. +# +# @deprecated: Member @opened is deprecated. Setting true doesn't +# make sense, and false is already the default. # # Since: 1.3 ## @@ -775,8 +803,8 @@ # # Properties for rng-egd objects. # -# @chardev: the name of a character device backend that provides the connection -# to the RNG daemon +# @chardev: the name of a character device backend that provides the +# connection to the RNG daemon # # Since: 1.3 ## @@ -789,8 +817,8 @@ # # Properties for rng-random objects. # -# @filename: the filename of the device on the host to obtain entropy from -# (default: "/dev/urandom") +# @filename: the filename of the device on the host to obtain entropy +# from (default: "/dev/urandom") # # Since: 1.3 ## @@ -816,11 +844,11 @@ # @cbitpos: C-bit location in page table entry (default: 0) # # @reduced-phys-bits: number of bits in physical addresses that become -# unavailable when SEV is enabled +# unavailable when SEV is enabled # # @kernel-hashes: if true, add hashes of kernel/initrd/cmdline to a -# designated guest firmware page for measured boot -# with -kernel (default: false) (since 6.2) +# designated guest firmware page for measured boot with -kernel +# (default: false) (since 6.2) # # Since: 2.12 ## @@ -839,15 +867,15 @@ # # Properties for thread context objects. # -# @cpu-affinity: the list of host CPU numbers used as CPU affinity for all -# threads created in the thread context (default: QEMU main -# thread CPU affinity) +# @cpu-affinity: the list of host CPU numbers used as CPU affinity for +# all threads created in the thread context (default: QEMU main +# thread CPU affinity) # -# @node-affinity: the list of host node numbers that will be resolved to a -# list of host CPU numbers used as CPU affinity. This is a -# shortcut for specifying the list of host CPU numbers -# belonging to the host nodes manually by setting -# @cpu-affinity. (default: QEMU main thread affinity) +# @node-affinity: the list of host node numbers that will be resolved +# to a list of host CPU numbers used as CPU affinity. This is a +# shortcut for specifying the list of host CPU numbers belonging +# to the host nodes manually by setting @cpu-affinity. +# (default: QEMU main thread affinity) # # Since: 7.2 ## @@ -860,6 +888,7 @@ # @ObjectType: # # Features: +# # @unstable: Member @x-remote-object is experimental. # # Since: 6.0 @@ -992,8 +1021,8 @@ # # Create a QOM object. # -# Returns: Nothing on success -# Error if @qom-type is not a valid class name +# Returns: Nothing on success Error if @qom-type is not a valid class +# name # # Since: 2.0 # @@ -1003,7 +1032,6 @@ # "arguments": { "qom-type": "rng-random", "id": "rng1", # "filename": "/dev/hwrng" } } # <- { "return": {} } -# ## { 'command': 'object-add', 'data': 'ObjectOptions', 'boxed': true, 'allow-preconfig': true } @@ -1015,8 +1043,8 @@ # # @id: the name of the QOM object to remove # -# Returns: Nothing on success -# Error if @id is not a valid id for a QOM object +# Returns: Nothing on success Error if @id is not a valid id for a QOM +# object # # Since: 2.0 # @@ -1024,7 +1052,6 @@ # # -> { "execute": "object-del", "arguments": { "id": "rng1" } } # <- { "return": {} } -# ## { 'command': 'object-del', 'data': {'id': 'str'}, 'allow-preconfig': true } diff --git a/qapi/rdma.json b/qapi/rdma.json index a1d2175a8b..23ebcf7885 100644 --- a/qapi/rdma.json +++ b/qapi/rdma.json @@ -17,7 +17,7 @@ # # @subnet-prefix: Subnet Prefix # -# @interface-id : Interface ID +# @interface-id: Interface ID # # Since: 4.0 # @@ -30,7 +30,6 @@ # "interface-id": 15880512517475447892, # "gid-status": true, # "subnet-prefix": 33022}} -# ## { 'event': 'RDMA_GID_STATUS_CHANGED', 'data': { 'netdev' : 'str', diff --git a/qapi/replay.json b/qapi/replay.json index 729470300d..289b2d3658 100644 --- a/qapi/replay.json +++ b/qapi/replay.json @@ -13,13 +13,13 @@ # # Mode of the replay subsystem. # -# @none: normal execution mode. Replay or record are not enabled. +# @none: normal execution mode. Replay or record are not enabled. # -# @record: record mode. All non-deterministic data is written into the -# replay log. +# @record: record mode. All non-deterministic data is written into +# the replay log. # -# @play: replay mode. Non-deterministic data required for system execution -# is read from the log. +# @play: replay mode. Non-deterministic data required for system +# execution is read from the log. # # Since: 2.5 ## @@ -33,9 +33,8 @@ # # @mode: current mode. # -# @filename: name of the record/replay log file. -# It is present only in record or replay modes, when the log -# is recorded or replayed. +# @filename: name of the record/replay log file. It is present only +# in record or replay modes, when the log is recorded or replayed. # # @icount: current number of executed instructions. # @@ -47,9 +46,9 @@ ## # @query-replay: # -# Retrieve the record/replay information. -# It includes current instruction count which may be used for -# @replay-break and @replay-seek commands. +# Retrieve the record/replay information. It includes current +# instruction count which may be used for @replay-break and +# @replay-seek commands. # # Returns: record/replay information. # @@ -59,7 +58,6 @@ # # -> { "execute": "query-replay" } # <- { "return": { "mode": "play", "filename": "log.rr", "icount": 220414 } } -# ## { 'command': 'query-replay', 'returns': 'ReplayInfo' } @@ -67,12 +65,12 @@ ## # @replay-break: # -# Set replay breakpoint at instruction count @icount. -# Execution stops when the specified instruction is reached. -# There can be at most one breakpoint. When breakpoint is set, any prior -# one is removed. The breakpoint may be set only in replay mode and only -# "in the future", i.e. at instruction counts greater than the current one. -# The current instruction count can be observed with @query-replay. +# Set replay breakpoint at instruction count @icount. Execution stops +# when the specified instruction is reached. There can be at most one +# breakpoint. When breakpoint is set, any prior one is removed. The +# breakpoint may be set only in replay mode and only "in the future", +# i.e. at instruction counts greater than the current one. The +# current instruction count can be observed with @query-replay. # # @icount: instruction count to stop at # @@ -81,22 +79,22 @@ # Example: # # -> { "execute": "replay-break", "arguments": { "icount": 220414 } } -# +# <- { "return": {} } ## { 'command': 'replay-break', 'data': { 'icount': 'int' } } ## # @replay-delete-break: # -# Remove replay breakpoint which was set with @replay-break. -# The command is ignored when there are no replay breakpoints. +# Remove replay breakpoint which was set with @replay-break. The +# command is ignored when there are no replay breakpoints. # # Since: 5.2 # # Example: # # -> { "execute": "replay-delete-break" } -# +# <- { "return": {} } ## { 'command': 'replay-delete-break' } @@ -104,11 +102,11 @@ # @replay-seek: # # Automatically proceed to the instruction count @icount, when -# replaying the execution. The command automatically loads nearest +# replaying the execution. The command automatically loads nearest # snapshot and replays the execution to find the desired instruction. -# When there is no preceding snapshot or the execution is not replayed, -# then the command fails. -# icount for the reference may be obtained with @query-replay command. +# When there is no preceding snapshot or the execution is not +# replayed, then the command fails. icount for the reference may be +# obtained with @query-replay command. # # @icount: target instruction count # @@ -117,5 +115,6 @@ # Example: # # -> { "execute": "replay-seek", "arguments": { "icount": 220414 } } +# <- { "return": {} } ## { 'command': 'replay-seek', 'data': { 'icount': 'int' } } diff --git a/qapi/rocker.json b/qapi/rocker.json index b48e49a89b..31ce0b36f6 100644 --- a/qapi/rocker.json +++ b/qapi/rocker.json @@ -34,7 +34,6 @@ # # -> { "execute": "query-rocker", "arguments": { "name": "sw1" } } # <- { "return": {"name": "sw1", "ports": 2, "id": 1327446905938}} -# ## { 'command': 'query-rocker', 'data': { 'name': 'str' }, @@ -107,7 +106,6 @@ # {"duplex": "full", "enabled": true, "name": "sw1.2", # "autoneg": "off", "link-up": true, "speed": 10000} # ]} -# ## { 'command': 'query-rocker-ports', 'data': { 'name': 'str' }, @@ -141,7 +139,7 @@ # @ip-dst: IP header destination address # # Note: optional members may or may not appear in the flow key -# depending if they're relevant to the flow key. +# depending if they're relevant to the flow key. # # Since: 2.4 ## @@ -171,7 +169,7 @@ # @ip-tos: IP header TOS field # # Note: optional members may or may not appear in the flow mask -# depending if they're relevant to the flow mask. +# depending if they're relevant to the flow mask. # # Since: 2.4 ## @@ -198,7 +196,7 @@ # @out-pport: physical output port # # Note: optional members may or may not appear in the flow action -# depending if they're relevant to the flow action. +# depending if they're relevant to the flow action. # # Since: 2.4 ## @@ -235,8 +233,8 @@ # # @name: switch name # -# @tbl-id: flow table ID. If tbl-id is not specified, returns -# flow information for all tables. +# @tbl-id: flow table ID. If tbl-id is not specified, returns flow +# information for all tables. # # Returns: rocker OF-DPA flow information # @@ -254,7 +252,6 @@ # }, # {...more...}, # ]} -# ## { 'command': 'query-rocker-of-dpa-flows', 'data': { 'name': 'str', '*tbl-id': 'uint32' }, @@ -292,7 +289,7 @@ # @ttl-check: perform TTL check # # Note: optional members may or may not appear in the group depending -# if they're relevant to the group type. +# if they're relevant to the group type. # # Since: 2.4 ## @@ -311,8 +308,8 @@ # # @name: switch name # -# @type: group type. If type is not specified, returns -# group information for all group types. +# @type: group type. If type is not specified, returns group +# information for all group types. # # Returns: rocker OF-DPA group information # @@ -335,7 +332,6 @@ # "pport": 0, "vlan-id": 3840, # "pop-vlan": 1, "id": 251658240} # ]} -# ## { 'command': 'query-rocker-of-dpa-groups', 'data': { 'name': 'str', '*type': 'uint8' }, diff --git a/qapi/run-state.json b/qapi/run-state.json index 419c188dd1..f216ba54ec 100644 --- a/qapi/run-state.json +++ b/qapi/run-state.json @@ -16,16 +16,16 @@ # @finish-migrate: guest is paused to finish the migration process # # @inmigrate: guest is paused waiting for an incoming migration. Note -# that this state does not tell whether the machine will start at the -# end of the migration. This depends on the command-line -S option and -# any invocation of 'stop' or 'cont' that has happened since QEMU was -# started. +# that this state does not tell whether the machine will start at +# the end of the migration. This depends on the command-line -S +# option and any invocation of 'stop' or 'cont' that has happened +# since QEMU was started. # -# @internal-error: An internal error that prevents further guest execution -# has occurred +# @internal-error: An internal error that prevents further guest +# execution has occurred # -# @io-error: the last IOP has failed and the device is configured to pause -# on I/O errors +# @io-error: the last IOP has failed and the device is configured to +# pause on I/O errors # # @paused: guest has been paused via the 'stop' command # @@ -43,13 +43,15 @@ # # @suspended: guest is suspended (ACPI S3) # -# @watchdog: the watchdog action is configured to pause and has been triggered +# @watchdog: the watchdog action is configured to pause and has been +# triggered # -# @guest-panicked: guest has been panicked as a result of guest OS panic +# @guest-panicked: guest has been panicked as a result of guest OS +# panic # -# @colo: guest is paused to save/restore VM state under colo checkpoint, -# VM can not get into this state unless colo capability is enabled -# for migration. (since 2.8) +# @colo: guest is paused to save/restore VM state under colo +# checkpoint, VM can not get into this state unless colo +# capability is enabled for migration. (since 2.8) ## { 'enum': 'RunState', 'data': [ 'debug', 'inmigrate', 'internal-error', 'io-error', 'paused', @@ -75,21 +77,21 @@ # @host-ui: Reaction to a UI event, like window close # # @guest-shutdown: Guest shutdown/suspend request, via ACPI or other -# hardware-specific means +# hardware-specific means # # @guest-reset: Guest reset request, and command line turns that into -# a shutdown +# a shutdown # -# @guest-panic: Guest panicked, and command line turns that into a shutdown +# @guest-panic: Guest panicked, and command line turns that into a +# shutdown # -# @subsystem-reset: Partial guest reset that does not trigger QMP events and -# ignores --no-reboot. This is useful for sanitizing -# hypercalls on s390 that are used during kexec/kdump/boot +# @subsystem-reset: Partial guest reset that does not trigger QMP +# events and ignores --no-reboot. This is useful for sanitizing +# hypercalls on s390 that are used during kexec/kdump/boot # # @snapshot-load: A snapshot is being loaded by the record & replay -# subsystem. This value is used only within QEMU. It -# doesn't occur in QMP. (since 7.2) -# +# subsystem. This value is used only within QEMU. It doesn't +# occur in QMP. (since 7.2) ## { 'enum': 'ShutdownCause', # Beware, shutdown_caused_by_guest() depends on enumeration order @@ -104,16 +106,26 @@ # # @running: true if all VCPUs are runnable, false if not runnable # -# @singlestep: true if VCPUs are in single-step mode +# @singlestep: true if using TCG with one guest instruction per +# translation block # # @status: the virtual machine @RunState # +# Features: +# +# @deprecated: Member 'singlestep' is deprecated (with no +# replacement). +# # Since: 0.14 # -# Notes: @singlestep is enabled through the GDB stub +# Notes: @singlestep is enabled on the command line with '-accel +# tcg,one-insn-per-tb=on', or with the HMP 'one-insn-per-tb' +# command. ## { 'struct': 'StatusInfo', - 'data': {'running': 'bool', 'singlestep': 'bool', 'status': 'RunState'} } + 'data': {'running': 'bool', + 'singlestep': { 'type': 'bool', 'features': [ 'deprecated' ]}, + 'status': 'RunState'} } ## # @query-status: @@ -130,7 +142,6 @@ # <- { "return": { "running": true, # "singlestep": false, # "status": "running" } } -# ## { 'command': 'query-status', 'returns': 'StatusInfo', 'allow-preconfig': true } @@ -138,17 +149,20 @@ ## # @SHUTDOWN: # -# Emitted when the virtual machine has shut down, indicating that qemu is -# about to exit. +# Emitted when the virtual machine has shut down, indicating that qemu +# is about to exit. # -# @guest: If true, the shutdown was triggered by a guest request (such as -# a guest-initiated ACPI shutdown request or other hardware-specific action) -# rather than a host request (such as sending qemu a SIGINT). (since 2.10) +# @guest: If true, the shutdown was triggered by a guest request (such +# as a guest-initiated ACPI shutdown request or other +# hardware-specific action) rather than a host request (such as +# sending qemu a SIGINT). (since 2.10) # -# @reason: The @ShutdownCause which resulted in the SHUTDOWN. (since 4.0) +# @reason: The @ShutdownCause which resulted in the SHUTDOWN. (since +# 4.0) # -# Note: If the command-line option "-no-shutdown" has been specified, qemu will -# not exit, and a STOP event will eventually follow the SHUTDOWN event +# Note: If the command-line option "-no-shutdown" has been specified, +# qemu will not exit, and a STOP event will eventually follow the +# SHUTDOWN event # # Since: 0.12 # @@ -157,15 +171,14 @@ # <- { "event": "SHUTDOWN", # "data": { "guest": true, "reason": "guest-shutdown" }, # "timestamp": { "seconds": 1267040730, "microseconds": 682951 } } -# ## { 'event': 'SHUTDOWN', 'data': { 'guest': 'bool', 'reason': 'ShutdownCause' } } ## # @POWERDOWN: # -# Emitted when the virtual machine is powered down through the power control -# system, such as via ACPI. +# Emitted when the virtual machine is powered down through the power +# control system, such as via ACPI. # # Since: 0.12 # @@ -173,7 +186,6 @@ # # <- { "event": "POWERDOWN", # "timestamp": { "seconds": 1267040730, "microseconds": 682951 } } -# ## { 'event': 'POWERDOWN' } @@ -183,9 +195,9 @@ # Emitted when the virtual machine is reset # # @guest: If true, the reset was triggered by a guest request (such as -# a guest-initiated ACPI reboot request or other hardware-specific action) -# rather than a host request (such as the QMP command system_reset). -# (since 2.10) +# a guest-initiated ACPI reboot request or other hardware-specific +# action) rather than a host request (such as the QMP command +# system_reset). (since 2.10) # # @reason: The @ShutdownCause of the RESET. (since 4.0) # @@ -196,7 +208,6 @@ # <- { "event": "RESET", # "data": { "guest": false, "reason": "guest-reset" }, # "timestamp": { "seconds": 1267041653, "microseconds": 9518 } } -# ## { 'event': 'RESET', 'data': { 'guest': 'bool', 'reason': 'ShutdownCause' } } @@ -211,7 +222,6 @@ # # <- { "event": "STOP", # "timestamp": { "seconds": 1267041730, "microseconds": 281295 } } -# ## { 'event': 'STOP' } @@ -226,15 +236,14 @@ # # <- { "event": "RESUME", # "timestamp": { "seconds": 1271770767, "microseconds": 582542 } } -# ## { 'event': 'RESUME' } ## # @SUSPEND: # -# Emitted when guest enters a hardware suspension state, for example, S3 state, -# which is sometimes called standby state +# Emitted when guest enters a hardware suspension state, for example, +# S3 state, which is sometimes called standby state # # Since: 1.1 # @@ -242,32 +251,33 @@ # # <- { "event": "SUSPEND", # "timestamp": { "seconds": 1344456160, "microseconds": 309119 } } -# ## { 'event': 'SUSPEND' } ## # @SUSPEND_DISK: # -# Emitted when guest enters a hardware suspension state with data saved on -# disk, for example, S4 state, which is sometimes called hibernate state +# Emitted when guest enters a hardware suspension state with data +# saved on disk, for example, S4 state, which is sometimes called +# hibernate state # -# Note: QEMU shuts down (similar to event @SHUTDOWN) when entering this state +# Note: QEMU shuts down (similar to event @SHUTDOWN) when entering +# this state # # Since: 1.2 # # Example: # -# <- { "event": "SUSPEND_DISK", -# "timestamp": { "seconds": 1344456160, "microseconds": 309119 } } -# +# <- { "event": "SUSPEND_DISK", +# "timestamp": { "seconds": 1344456160, "microseconds": 309119 } } ## { 'event': 'SUSPEND_DISK' } ## # @WAKEUP: # -# Emitted when the guest has woken up from suspend state and is running +# Emitted when the guest has woken up from suspend state and is +# running # # Since: 1.1 # @@ -275,7 +285,6 @@ # # <- { "event": "WAKEUP", # "timestamp": { "seconds": 1344522075, "microseconds": 745528 } } -# ## { 'event': 'WAKEUP' } @@ -286,8 +295,9 @@ # # @action: action that has been taken # -# Note: If action is "reset", "shutdown", or "pause" the WATCHDOG event is -# followed respectively by the RESET, SHUTDOWN, or STOP events +# Note: If action is "reset", "shutdown", or "pause" the WATCHDOG +# event is followed respectively by the RESET, SHUTDOWN, or STOP +# events # # Note: This event is rate-limited. # @@ -298,7 +308,6 @@ # <- { "event": "WATCHDOG", # "data": { "action": "reset" }, # "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } -# ## { 'event': 'WATCHDOG', 'data': { 'action': 'WatchdogAction' } } @@ -306,13 +315,13 @@ ## # @WatchdogAction: # -# An enumeration of the actions taken when the watchdog device's timer is -# expired +# An enumeration of the actions taken when the watchdog device's timer +# is expired # # @reset: system resets # -# @shutdown: system shutdown, note that it is similar to @powerdown, which -# tries to set to system status and notify guest +# @shutdown: system shutdown, note that it is similar to @powerdown, +# which tries to set to system status and notify guest # # @poweroff: system poweroff, the emulator program exits # @@ -322,8 +331,8 @@ # # @none: nothing is done # -# @inject-nmi: a non-maskable interrupt is injected into the first VCPU (all -# VCPUS on x86) (since 2.4) +# @inject-nmi: a non-maskable interrupt is injected into the first +# VCPU (all VCPUS on x86) (since 2.4) # # Since: 2.1 ## @@ -338,7 +347,8 @@ # # @reset: Reset the VM # -# @shutdown: Shutdown the VM and exit, according to the shutdown action +# @shutdown: Shutdown the VM and exit, according to the shutdown +# action # # Since: 6.0 ## @@ -366,10 +376,11 @@ # # @pause: Pause the VM # -# @shutdown: Shutdown the VM and exit, according to the shutdown action +# @shutdown: Shutdown the VM and exit, according to the shutdown +# action # -# @exit-failure: Shutdown the VM and exit with nonzero status -# (since 7.1) +# @exit-failure: Shutdown the VM and exit with nonzero status (since +# 7.1) # # Since: 6.0 ## @@ -388,8 +399,8 @@ ## # @set-action: # -# Set the actions that will be taken by the emulator in response to guest -# events. +# Set the actions that will be taken by the emulator in response to +# guest events. # # @reboot: @RebootAction action taken on guest reboot. # @@ -397,7 +408,8 @@ # # @panic: @PanicAction action taken on guest panic. # -# @watchdog: @WatchdogAction action taken when watchdog timer expires . +# @watchdog: @WatchdogAction action taken when watchdog timer expires +# . # # Returns: Nothing on success. # @@ -435,7 +447,6 @@ # <- { "event": "GUEST_PANICKED", # "data": { "action": "pause" }, # "timestamp": { "seconds": 1648245231, "microseconds": 900001 } } -# ## { 'event': 'GUEST_PANICKED', 'data': { 'action': 'GuestPanicAction', '*info': 'GuestPanicInformation' } } @@ -456,7 +467,6 @@ # <- { "event": "GUEST_CRASHLOADED", # "data": { "action": "run" }, # "timestamp": { "seconds": 1648245259, "microseconds": 893771 } } -# ## { 'event': 'GUEST_CRASHLOADED', 'data': { 'action': 'GuestPanicAction', '*info': 'GuestPanicInformation' } } @@ -468,7 +478,11 @@ # # @pause: system pauses # -# Since: 2.1 (poweroff since 2.8, run since 5.0) +# @poweroff: system powers off (since 2.8) +# +# @run: system continues to run (since 5.0) +# +# Since: 2.1 ## { 'enum': 'GuestPanicAction', 'data': [ 'pause', 'poweroff', 'run' ] } @@ -499,8 +513,8 @@ {'union': 'GuestPanicInformation', 'base': {'type': 'GuestPanicInformationType'}, 'discriminator': 'type', - 'data': { 'hyper-v': 'GuestPanicInformationHyperV', - 's390': 'GuestPanicInformationS390' } } + 'data': {'hyper-v': 'GuestPanicInformationHyperV', + 's390': 'GuestPanicInformationS390'}} ## # @GuestPanicInformationHyperV: @@ -510,11 +524,11 @@ # Since: 2.9 ## {'struct': 'GuestPanicInformationHyperV', - 'data': { 'arg1': 'uint64', - 'arg2': 'uint64', - 'arg3': 'uint64', - 'arg4': 'uint64', - 'arg5': 'uint64' } } + 'data': {'arg1': 'uint64', + 'arg2': 'uint64', + 'arg3': 'uint64', + 'arg4': 'uint64', + 'arg5': 'uint64'}} ## # @S390CrashReason: @@ -525,13 +539,13 @@ # # @disabled-wait: the CPU has entered a disabled wait state # -# @extint-loop: clock comparator or cpu timer interrupt with new PSW enabled -# for external interrupts +# @extint-loop: clock comparator or cpu timer interrupt with new PSW +# enabled for external interrupts # # @pgmint-loop: program interrupt with BAD new PSW # -# @opint-loop: operation exception interrupt with invalid code at the program -# interrupt new PSW +# @opint-loop: operation exception interrupt with invalid code at the +# program interrupt new PSW # # Since: 2.12 ## @@ -548,17 +562,20 @@ # S390 specific guest panic information (PSW) # # @core: core id of the CPU that crashed +# # @psw-mask: control fields of guest PSW +# # @psw-addr: guest instruction address +# # @reason: guest crash reason # # Since: 2.12 ## {'struct': 'GuestPanicInformationS390', - 'data': { 'core': 'uint32', - 'psw-mask': 'uint64', - 'psw-addr': 'uint64', - 'reason': 'S390CrashReason' } } + 'data': {'core': 'uint32', + 'psw-mask': 'uint64', + 'psw-addr': 'uint64', + 'reason': 'S390CrashReason'}} ## # @MEMORY_FAILURE: @@ -567,9 +584,11 @@ # # @recipient: recipient is defined as @MemoryFailureRecipient. # -# @action: action that has been taken. action is defined as @MemoryFailureAction. +# @action: action that has been taken. action is defined as +# @MemoryFailureAction. # -# @flags: flags for MemoryFailureAction. action is defined as @MemoryFailureFlags. +# @flags: flags for MemoryFailureAction. action is defined as +# @MemoryFailureFlags. # # Since: 5.2 # @@ -581,7 +600,6 @@ # "flags": { "action-required": false, # "recursive": false } }, # "timestamp": { "seconds": 1267061043, "microseconds": 959568 } } -# ## { 'event': 'MEMORY_FAILURE', 'data': { 'recipient': 'MemoryFailureRecipient', @@ -593,8 +611,8 @@ # # Hardware memory failure occurs, handled by recipient. # -# @hypervisor: memory failure at QEMU process address space. -# (none guest memory, but used by QEMU itself). +# @hypervisor: memory failure at QEMU process address space. (none +# guest memory, but used by QEMU itself). # # @guest: memory failure at guest memory, # @@ -609,19 +627,20 @@ # # Actions taken by QEMU in response to a hardware memory failure. # -# @ignore: the memory failure could be ignored. This will only be the case -# for action-optional failures. +# @ignore: the memory failure could be ignored. This will only be the +# case for action-optional failures. # -# @inject: memory failure occurred in guest memory, the guest enabled MCE -# handling mechanism, and QEMU could inject the MCE into the guest -# successfully. +# @inject: memory failure occurred in guest memory, the guest enabled +# MCE handling mechanism, and QEMU could inject the MCE into the +# guest successfully. # -# @fatal: the failure is unrecoverable. This occurs for action-required -# failures if the recipient is the hypervisor; QEMU will exit. +# @fatal: the failure is unrecoverable. This occurs for +# action-required failures if the recipient is the hypervisor; +# QEMU will exit. # -# @reset: the failure is unrecoverable but confined to the guest. This -# occurs if the recipient is a guest guest which is not ready -# to handle memory failures. +# @reset: the failure is unrecoverable but confined to the guest. +# This occurs if the recipient is a guest guest which is not ready +# to handle memory failures. # # Since: 5.2 ## @@ -637,10 +656,10 @@ # Additional information on memory failures. # # @action-required: whether a memory failure event is action-required -# or action-optional (e.g. a failure during memory scrub). +# or action-optional (e.g. a failure during memory scrub). # -# @recursive: whether the failure occurred while the previous -# failure was still in progress. +# @recursive: whether the failure occurred while the previous failure +# was still in progress. # # Since: 5.2 ## @@ -653,14 +672,15 @@ # # An enumeration of the options specified when enabling notify VM exit # -# @run: enable the feature, do nothing and continue if the notify VM exit happens. +# @run: enable the feature, do nothing and continue if the notify VM +# exit happens. # -# @internal-error: enable the feature, raise a internal error if the notify -# VM exit happens. +# @internal-error: enable the feature, raise a internal error if the +# notify VM exit happens. # # @disable: disable the feature. # # Since: 7.2 ## { 'enum': 'NotifyVmexitOption', - 'data': [ 'run', 'internal-error', 'disable' ] } \ No newline at end of file + 'data': [ 'run', 'internal-error', 'disable' ] } diff --git a/qapi/sockets.json b/qapi/sockets.json index bad74e34d3..6213154525 100644 --- a/qapi/sockets.json +++ b/qapi/sockets.json @@ -41,21 +41,24 @@ ## # @InetSocketAddress: # -# Captures a socket address or address range in the Internet namespace. +# Captures a socket address or address range in the Internet +# namespace. # -# @numeric: true if the host/port are guaranteed to be numeric, -# false if name resolution should be attempted. Defaults to false. -# (Since 2.9) +# @numeric: true if the host/port are guaranteed to be numeric, false +# if name resolution should be attempted. Defaults to false. +# (Since 2.9) # # @to: If present, this is range of possible addresses, with port -# between @port and @to. +# between @port and @to. # -# @ipv4: whether to accept IPv4 addresses, default try both IPv4 and IPv6 +# @ipv4: whether to accept IPv4 addresses, default try both IPv4 and +# IPv6 # -# @ipv6: whether to accept IPv6 addresses, default try both IPv4 and IPv6 +# @ipv6: whether to accept IPv6 addresses, default try both IPv4 and +# IPv6 # -# @keep-alive: enable keep-alive when connecting to this socket. Not supported -# for passive sockets. (Since 4.2) +# @keep-alive: enable keep-alive when connecting to this socket. Not +# supported for passive sockets. (Since 4.2) # # @mptcp: enable multi-path TCP. (Since 6.1) # @@ -77,12 +80,14 @@ # Captures a socket address in the local ("Unix socket") namespace. # # @path: filesystem path to use +# # @abstract: if true, this is a Linux abstract socket address. @path -# will be prefixed by a null byte, and optionally padded -# with null bytes. Defaults to false. (Since 5.1) +# will be prefixed by a null byte, and optionally padded with null +# bytes. Defaults to false. (Since 5.1) +# # @tight: if false, pad an abstract socket address with enough null -# bytes to make it fill struct sockaddr_un member sun_path. -# Defaults to true. (Since 5.1) +# bytes to make it fill struct sockaddr_un member sun_path. +# Defaults to true. (Since 5.1) # # Since: 1.3 ## @@ -98,10 +103,11 @@ # Captures a socket address in the vsock namespace. # # @cid: unique host identifier +# # @port: port # # Note: string types are used to allow for possible future hostname or -# service resolution support. +# service resolution support. # # Since: 2.8 ## @@ -145,11 +151,12 @@ ## # @SocketAddressLegacy: # -# Captures the address of a socket, which could also be a named file descriptor +# Captures the address of a socket, which could also be a named file +# descriptor # # Note: This type is deprecated in favor of SocketAddress. The -# difference between SocketAddressLegacy and SocketAddress is that the -# latter has fewer {} on the wire. +# difference between SocketAddressLegacy and SocketAddress is that +# the latter has fewer {} on the wire. # # Since: 1.3 ## @@ -173,10 +180,11 @@ # # @vsock: VMCI address # -# @fd: decimal is for file descriptor number, otherwise a file descriptor name. -# Named file descriptors are permitted in monitor commands, in combination -# with the 'getfd' command. Decimal file descriptors are permitted at -# startup or other contexts where no monitor context is active. +# @fd: decimal is for file descriptor number, otherwise a file +# descriptor name. Named file descriptors are permitted in +# monitor commands, in combination with the 'getfd' command. +# Decimal file descriptors are permitted at startup or other +# contexts where no monitor context is active. # # Since: 2.9 ## diff --git a/qapi/stats.json b/qapi/stats.json index 57db5b1c74..01791e86d5 100644 --- a/qapi/stats.json +++ b/qapi/stats.json @@ -18,11 +18,15 @@ # Enumeration of statistics types # # @cumulative: stat is cumulative; value can only increase. +# # @instant: stat is instantaneous; value can increase or decrease. +# # @peak: stat is the peak value; value can only increase. +# # @linear-histogram: stat is a linear histogram. +# # @log2-histogram: stat is a logarithmic histogram, with one bucket -# for each power of two. +# for each power of two. # # Since: 7.1 ## @@ -36,8 +40,11 @@ # Enumeration of unit of measurement for statistics # # @bytes: stat reported in bytes. +# # @seconds: stat reported in seconds. +# # @cycles: stat reported in clock cycles. +# # @boolean: stat is a boolean value. # # Since: 7.1 @@ -50,33 +57,40 @@ # # Enumeration of statistics providers. # +# @kvm: since 7.1 +# +# @cryptodev: since 8.0 +# # Since: 7.1 ## { 'enum': 'StatsProvider', - 'data': [ 'kvm' ] } + 'data': [ 'kvm', 'cryptodev' ] } ## # @StatsTarget: # # The kinds of objects on which one can request statistics. # -# @vm: statistics that apply to the entire virtual machine or -# the entire QEMU process. +# @vm: statistics that apply to the entire virtual machine or the +# entire QEMU process. # # @vcpu: statistics that apply to a single virtual CPU. # +# @cryptodev: statistics that apply to a crypto device (since 8.0) +# # Since: 7.1 ## { 'enum': 'StatsTarget', - 'data': [ 'vm', 'vcpu' ] } + 'data': [ 'vm', 'vcpu', 'cryptodev' ] } ## # @StatsRequest: # -# Indicates a set of statistics that should be returned by query-stats. +# Indicates a set of statistics that should be returned by +# query-stats. # # @provider: provider for which to return statistics. - +# # @names: statistics to be returned (all if omitted). # # Since: 7.1 @@ -98,9 +112,10 @@ ## # @StatsFilter: # -# The arguments to the query-stats command; specifies a target for which to -# request statistics and optionally the required subset of information for -# that target: +# The arguments to the query-stats command; specifies a target for +# which to request statistics and optionally the required subset of +# information for that target: +# # - which vCPUs to request statistics for # - which providers to request statistics from # - which named values to return within each provider @@ -118,6 +133,7 @@ # @StatsValue: # # @scalar: single unsigned 64-bit integers. +# # @list: list of unsigned 64-bit integers (used for histograms). # # Since: 7.1 @@ -131,6 +147,7 @@ # @Stats: # # @name: name of stat. +# # @value: stat value. # # Since: 7.1 @@ -145,7 +162,7 @@ # @provider: provider for this set of statistics. # # @qom-path: Path to the object for which the statistics are returned, -# if the object is exposed in the QOM tree +# if the object is exposed in the QOM tree # # @stats: list of statistics. # @@ -159,14 +176,14 @@ ## # @query-stats: # -# Return runtime-collected statistics for objects such as the -# VM or its vCPUs. +# Return runtime-collected statistics for objects such as the VM or +# its vCPUs. # # The arguments are a StatsFilter and specify the provider and objects # to return statistics about. # # Returns: a list of StatsResult, one for each provider and object -# (e.g., for each vCPU). +# (e.g., for each vCPU). # # Since: 7.1 ## @@ -181,24 +198,25 @@ # Schema for a single statistic. # # @name: name of the statistic; each element of the schema is uniquely -# identified by a target, a provider (both available in @StatsSchema) -# and the name. +# identified by a target, a provider (both available in +# @StatsSchema) and the name. # # @type: kind of statistic. # -# @unit: basic unit of measure for the statistic; if missing, the statistic -# is a simple number or counter. +# @unit: basic unit of measure for the statistic; if missing, the +# statistic is a simple number or counter. # -# @base: base for the multiple of @unit in which the statistic is measured. -# Only present if @exponent is non-zero; @base and @exponent together -# form a SI prefix (e.g., _nano-_ for ``base=10`` and ``exponent=-9``) -# or IEC binary prefix (e.g. _kibi-_ for ``base=2`` and ``exponent=10``) +# @base: base for the multiple of @unit in which the statistic is +# measured. Only present if @exponent is non-zero; @base and +# @exponent together form a SI prefix (e.g., _nano-_ for +# ``base=10`` and ``exponent=-9``) or IEC binary prefix (e.g. +# _kibi-_ for ``base=2`` and ``exponent=10``) # -# @exponent: exponent for the multiple of @unit in which the statistic is -# expressed, or 0 for the basic unit +# @exponent: exponent for the multiple of @unit in which the statistic +# is expressed, or 0 for the basic unit # -# @bucket-size: Present when @type is "linear-histogram", contains the width -# of each bucket of the histogram. +# @bucket-size: Present when @type is "linear-histogram", contains the +# width of each bucket of the histogram. # # Since: 7.1 ## @@ -217,7 +235,8 @@ # # @provider: provider for this set of statistics. # -# @target: the kind of object that can be queried through the provider. +# @target: the kind of object that can be queried through the +# provider. # # @stats: list of statistics. # @@ -233,16 +252,17 @@ # # Return the schema for all available runtime-collected statistics. # -# Note: runtime-collected statistics and their names fall outside QEMU's usual -# deprecation policies. QEMU will try to keep the set of available data -# stable, together with their names, but will not guarantee stability -# at all costs; the same is true of providers that source statistics -# externally, e.g. from Linux. For example, if the same value is being -# tracked with different names on different architectures or by different -# providers, one of them might be renamed. A statistic might go away if -# an algorithm is changed or some code is removed; changing a default -# might cause previously useful statistics to always report 0. Such -# changes, however, are expected to be rare. +# Note: runtime-collected statistics and their names fall outside +# QEMU's usual deprecation policies. QEMU will try to keep the +# set of available data stable, together with their names, but +# will not guarantee stability at all costs; the same is true of +# providers that source statistics externally, e.g. from Linux. +# For example, if the same value is being tracked with different +# names on different architectures or by different providers, one +# of them might be renamed. A statistic might go away if an +# algorithm is changed or some code is removed; changing a default +# might cause previously useful statistics to always report 0. +# Such changes, however, are expected to be rare. # # Since: 7.1 ## diff --git a/qapi/tpm.json b/qapi/tpm.json index 4e2ea9756a..a754455ca5 100644 --- a/qapi/tpm.json +++ b/qapi/tpm.json @@ -12,7 +12,9 @@ # An enumeration of TPM models # # @tpm-tis: TPM TIS model +# # @tpm-crb: TPM CRB model (since 2.12) +# # @tpm-spapr: TPM SPAPR model (since 5.0) # # Since: 1.5 @@ -33,7 +35,6 @@ # # -> { "execute": "query-tpm-models" } # <- { "return": [ "tpm-tis", "tpm-crb", "tpm-spapr" ] } -# ## { 'command': 'query-tpm-models', 'returns': ['TpmModel'], 'if': 'CONFIG_TPM' } @@ -44,8 +45,8 @@ # An enumeration of TPM types # # @passthrough: TPM passthrough type -# @emulator: Software Emulator TPM type -# Since: 2.11 +# +# @emulator: Software Emulator TPM type (since 2.11) # # Since: 1.5 ## @@ -65,7 +66,6 @@ # # -> { "execute": "query-tpm-types" } # <- { "return": [ "passthrough", "emulator" ] } -# ## { 'command': 'query-tpm-types', 'returns': ['TpmType'], 'if': 'CONFIG_TPM' } @@ -77,8 +77,8 @@ # # @path: string describing the path used for accessing the TPM device # -# @cancel-path: string showing the TPM's sysfs cancel file -# for cancellation of TPM commands while they are executing +# @cancel-path: string showing the TPM's sysfs cancel file for +# cancellation of TPM commands while they are executing # # Since: 1.5 ## @@ -120,10 +120,14 @@ ## # @TpmTypeOptions: # -# A union referencing different TPM backend types' configuration options +# A union referencing different TPM backend types' configuration +# options # -# @type: - 'passthrough' The configuration options for the TPM passthrough type -# - 'emulator' The configuration options for TPM emulator backend type +# @type: +# - 'passthrough' The configuration options for the TPM +# passthrough type +# - 'emulator' The configuration options for TPM emulator backend +# type # # Since: 1.5 ## @@ -179,7 +183,6 @@ # } # ] # } -# ## { 'command': 'query-tpm', 'returns': ['TPMInfo'], 'if': 'CONFIG_TPM' } diff --git a/qapi/trace.json b/qapi/trace.json index 6c6982a587..6bf0af0946 100644 --- a/qapi/trace.json +++ b/qapi/trace.json @@ -32,11 +32,13 @@ # Information of a tracing event. # # @name: Event name. +# # @state: Tracing state. +# # @vcpu: Whether this is a per-vCPU event (since 2.7). # -# An event is per-vCPU if it has the "vcpu" property in the "trace-events" -# files. +# An event is per-vCPU if it has the "vcpu" property in the +# "trace-events" files. # # Since: 2.2 ## @@ -49,19 +51,20 @@ # Query the state of events. # # @name: Event name pattern (case-sensitive glob). +# # @vcpu: The vCPU to query (any by default; since 2.7). # # Returns: a list of @TraceEventInfo for the matching events # -# An event is returned if: +# An event is returned if: # -# - its name matches the @name pattern, and -# - if @vcpu is given, the event has the "vcpu" property. +# - its name matches the @name pattern, and +# - if @vcpu is given, the event has the "vcpu" property. # -# Therefore, if @vcpu is given, the operation will only match per-vCPU events, -# returning their state on the specified vCPU. Special case: if @name is an -# exact match, @vcpu is given and the event does not have the "vcpu" property, -# an error is returned. +# Therefore, if @vcpu is given, the operation will only match per-vCPU +# events, returning their state on the specified vCPU. Special case: +# if @name is an exact match, @vcpu is given and the event does not +# have the "vcpu" property, an error is returned. # # Since: 2.2 # @@ -70,7 +73,6 @@ # -> { "execute": "trace-event-get-state", # "arguments": { "name": "qemu_memalign" } } # <- { "return": [ { "name": "qemu_memalign", "state": "disabled", "vcpu": false } ] } -# ## { 'command': 'trace-event-get-state', 'data': {'name': 'str', '*vcpu': 'int'}, @@ -82,18 +84,22 @@ # Set the dynamic tracing state of events. # # @name: Event name pattern (case-sensitive glob). +# # @enable: Whether to enable tracing. +# # @ignore-unavailable: Do not match unavailable events with @name. +# # @vcpu: The vCPU to act upon (all by default; since 2.7). # # An event's state is modified if: +# # - its name matches the @name pattern, and # - if @vcpu is given, the event has the "vcpu" property. # -# Therefore, if @vcpu is given, the operation will only match per-vCPU events, -# setting their state on the specified vCPU. Special case: if @name is an exact -# match, @vcpu is given and the event does not have the "vcpu" property, an -# error is returned. +# Therefore, if @vcpu is given, the operation will only match per-vCPU +# events, setting their state on the specified vCPU. Special case: if +# @name is an exact match, @vcpu is given and the event does not have +# the "vcpu" property, an error is returned. # # Since: 2.2 # @@ -102,7 +108,6 @@ # -> { "execute": "trace-event-set-state", # "arguments": { "name": "qemu_memalign", "enable": true } } # <- { "return": {} } -# ## { 'command': 'trace-event-set-state', 'data': {'name': 'str', 'enable': 'bool', '*ignore-unavailable': 'bool', diff --git a/qapi/transaction.json b/qapi/transaction.json index 381a2df782..cffee2de28 100644 --- a/qapi/transaction.json +++ b/qapi/transaction.json @@ -23,15 +23,15 @@ # # An enumeration of Transactional completion modes. # -# @individual: Do not attempt to cancel any other Actions if any Actions fail -# after the Transaction request succeeds. All Actions that -# can complete successfully will do so without waiting on others. -# This is the default. +# @individual: Do not attempt to cancel any other Actions if any +# Actions fail after the Transaction request succeeds. All +# Actions that can complete successfully will do so without +# waiting on others. This is the default. # -# @grouped: If any Action fails after the Transaction succeeds, cancel all -# Actions. Actions do not complete until all Actions are ready to -# complete. May be rejected by Actions that do not support this -# completion mode. +# @grouped: If any Action fails after the Transaction succeeds, cancel +# all Actions. Actions do not complete until all Actions are +# ready to complete. May be rejected by Actions that do not +# support this completion mode. # # Since: 2.5 ## @@ -42,21 +42,33 @@ # @TransactionActionKind: # # @abort: Since 1.6 +# # @block-dirty-bitmap-add: Since 2.5 +# # @block-dirty-bitmap-remove: Since 4.2 +# # @block-dirty-bitmap-clear: Since 2.5 +# # @block-dirty-bitmap-enable: Since 4.0 +# # @block-dirty-bitmap-disable: Since 4.0 +# # @block-dirty-bitmap-merge: Since 4.0 +# # @blockdev-backup: Since 2.3 +# # @blockdev-snapshot: Since 2.5 +# # @blockdev-snapshot-internal-sync: Since 1.7 +# # @blockdev-snapshot-sync: since 1.1 +# # @drive-backup: Since 1.6 # # Features: +# # @deprecated: Member @drive-backup is deprecated. Use member -# @blockdev-backup instead. +# @blockdev-backup instead. # # Since: 1.1 ## @@ -172,8 +184,8 @@ # Optional arguments to modify the behavior of a Transaction. # # @completion-mode: Controls how jobs launched asynchronously by -# Actions will complete or fail as a group. -# See @ActionCompletionMode for details. +# Actions will complete or fail as a group. See +# @ActionCompletionMode for details. # # Since: 2.5 ## @@ -186,46 +198,48 @@ ## # @transaction: # -# Executes a number of transactionable QMP commands atomically. If any -# operation fails, then the entire set of actions will be abandoned and the -# appropriate error returned. +# Executes a number of transactionable QMP commands atomically. If +# any operation fails, then the entire set of actions will be +# abandoned and the appropriate error returned. # -# For external snapshots, the dictionary contains the device, the file to use for -# the new snapshot, and the format. The default format, if not specified, is -# qcow2. +# For external snapshots, the dictionary contains the device, the file +# to use for the new snapshot, and the format. The default format, if +# not specified, is qcow2. # # Each new snapshot defaults to being created by QEMU (wiping any -# contents if the file already exists), but it is also possible to reuse -# an externally-created file. In the latter case, you should ensure that -# the new image file has the same contents as the current one; QEMU cannot -# perform any meaningful check. Typically this is achieved by using the -# current image file as the backing file for the new image. +# contents if the file already exists), but it is also possible to +# reuse an externally-created file. In the latter case, you should +# ensure that the new image file has the same contents as the current +# one; QEMU cannot perform any meaningful check. Typically this is +# achieved by using the current image file as the backing file for the +# new image. # # On failure, the original disks pre-snapshot attempt will be used. # # For internal snapshots, the dictionary contains the device and the -# snapshot's name. If an internal snapshot matching name already exists, -# the request will be rejected. Only some image formats support it, for -# example, qcow2, and rbd, +# snapshot's name. If an internal snapshot matching name already +# exists, the request will be rejected. Only some image formats +# support it, for example, qcow2, and rbd, # -# On failure, qemu will try delete the newly created internal snapshot in the -# transaction. When an I/O error occurs during deletion, the user needs to fix -# it later with qemu-img or other command. +# On failure, qemu will try delete the newly created internal snapshot +# in the transaction. When an I/O error occurs during deletion, the +# user needs to fix it later with qemu-img or other command. # -# @actions: List of @TransactionAction; -# information needed for the respective operations. +# @actions: List of @TransactionAction; information needed for the +# respective operations. # # @properties: structure of additional options to control the -# execution of the transaction. See @TransactionProperties -# for additional detail. +# execution of the transaction. See @TransactionProperties for +# additional detail. # # Returns: nothing on success # -# Errors depend on the operations of the transaction +# Errors depend on the operations of the transaction # -# Note: The transaction aborts on the first failure. Therefore, there will be -# information on only one failed operation returned in an error condition, and -# subsequent actions will not have been attempted. +# Note: The transaction aborts on the first failure. Therefore, there +# will be information on only one failed operation returned in an +# error condition, and subsequent actions will not have been +# attempted. # # Since: 1.1 # @@ -249,7 +263,6 @@ # "device": "ide-hd2", # "name": "snapshot0" } } ] } } # <- { "return": {} } -# ## { 'command': 'transaction', 'data': { 'actions': [ 'TransactionAction' ], diff --git a/qapi/ui.json b/qapi/ui.json index 0abba3e930..2755395483 100644 --- a/qapi/ui.json +++ b/qapi/ui.json @@ -22,7 +22,8 @@ ## # @SetPasswordAction: # -# An action to take on changing a password on a connection with active clients. +# An action to take on changing a password on a connection with active +# clients. # # @keep: maintain existing clients # @@ -40,14 +41,15 @@ # # Options for set_password. # -# @protocol: - 'vnc' to modify the VNC server password -# - 'spice' to modify the Spice server password +# @protocol: +# - 'vnc' to modify the VNC server password +# - 'spice' to modify the Spice server password # # @password: the new password # # @connected: How to handle existing clients when changing the -# password. If nothing is specified, defaults to 'keep'. -# For VNC, only 'keep' is currently implemented. +# password. If nothing is specified, defaults to 'keep'. For VNC, +# only 'keep' is currently implemented. # # Since: 7.0 ## @@ -63,8 +65,8 @@ # # Options for set_password specific to the VNC procotol. # -# @display: The id of the display where the password should be changed. -# Defaults to the first. +# @display: The id of the display where the password should be +# changed. Defaults to the first. # # Since: 7.0 ## @@ -76,8 +78,9 @@ # # Set the password of a remote display server. # -# Returns: - Nothing on success -# - If Spice is not enabled, DeviceNotFound +# Returns: +# - Nothing on success +# - If Spice is not enabled, DeviceNotFound # # Since: 0.14 # @@ -86,7 +89,6 @@ # -> { "execute": "set_password", "arguments": { "protocol": "vnc", # "password": "secret" } } # <- { "return": {} } -# ## { 'command': 'set_password', 'boxed': true, 'data': 'SetPasswordOptions' } @@ -95,20 +97,22 @@ # # General options for expire_password. # -# @protocol: - 'vnc' to modify the VNC server expiration -# - 'spice' to modify the Spice server expiration +# @protocol: +# - 'vnc' to modify the VNC server expiration +# - 'spice' to modify the Spice server expiration # # @time: when to expire the password. # -# - 'now' to expire the password immediately -# - 'never' to cancel password expiration -# - '+INT' where INT is the number of seconds from now (integer) -# - 'INT' where INT is the absolute time in seconds +# - 'now' to expire the password immediately +# - 'never' to cancel password expiration +# - '+INT' where INT is the number of seconds from now (integer) +# - 'INT' where INT is the absolute time in seconds # -# Notes: Time is relative to the server and currently there is no way to -# coordinate server time with client time. It is not recommended to -# use the absolute time version of the @time parameter unless you're -# sure you are on the same machine as the QEMU instance. +# Notes: Time is relative to the server and currently there is no way +# to coordinate server time with client time. It is not +# recommended to use the absolute time version of the @time +# parameter unless you're sure you are on the same machine as the +# QEMU instance. # # Since: 7.0 ## @@ -123,8 +127,8 @@ # # Options for expire_password specific to the VNC procotol. # -# @display: The id of the display where the expiration should be changed. -# Defaults to the first. +# @display: The id of the display where the expiration should be +# changed. Defaults to the first. # # Since: 7.0 ## @@ -136,8 +140,10 @@ # # Expire the password of a remote display server. # -# Returns: - Nothing on success -# - If @protocol is 'spice' and Spice is not active, DeviceNotFound +# Returns: +# - Nothing on success +# - If @protocol is 'spice' and Spice is not active, +# DeviceNotFound # # Since: 0.14 # @@ -146,7 +152,6 @@ # -> { "execute": "expire_password", "arguments": { "protocol": "vnc", # "time": "+60" } } # <- { "return": {} } -# ## { 'command': 'expire_password', 'boxed': true, 'data': 'ExpirePasswordOptions' } @@ -171,14 +176,16 @@ # # @filename: the path of a new file to store the image # -# @device: ID of the display device that should be dumped. If this parameter -# is missing, the primary display will be used. (Since 2.12) +# @device: ID of the display device that should be dumped. If this +# parameter is missing, the primary display will be used. (Since +# 2.12) # -# @head: head to use in case the device supports multiple heads. If this -# parameter is missing, head #0 will be used. Also note that the head -# can only be specified in conjunction with the device ID. (Since 2.12) +# @head: head to use in case the device supports multiple heads. If +# this parameter is missing, head #0 will be used. Also note that +# the head can only be specified in conjunction with the device +# ID. (Since 2.12) # -# @format: image format for screendump. (default: ppm) (Since 7.1) +# @format: image format for screendump. (default: ppm) (Since 7.1) # # Returns: Nothing on success # @@ -189,7 +196,6 @@ # -> { "execute": "screendump", # "arguments": { "filename": "/tmp/image" } } # <- { "return": {} } -# ## { 'command': 'screendump', 'data': {'filename': 'str', '*device': 'str', '*head': 'int', @@ -238,16 +244,16 @@ # # Information about a SPICE client channel. # -# @connection-id: SPICE connection id number. All channels with the same id -# belong to the same SPICE session. +# @connection-id: SPICE connection id number. All channels with the +# same id belong to the same SPICE session. # # @channel-type: SPICE channel type number. "1" is the main control -# channel, filter for this one if you want to track spice -# sessions only +# channel, filter for this one if you want to track spice sessions +# only # -# @channel-id: SPICE channel ID number. Usually "0", might be different when -# multiple channels of the same type exist, such as multiple -# display channels in a multihead setup +# @channel-id: SPICE channel ID number. Usually "0", might be +# different when multiple channels of the same type exist, such as +# multiple display channels in a multihead setup # # @tls: true if the channel is encrypted, false otherwise. # @@ -268,8 +274,8 @@ # # @server: Mouse cursor position is determined by the server. # -# @unknown: No information is available about mouse mode used by -# the spice server. +# @unknown: No information is available about mouse mode used by the +# spice server. # # Note: spice/enums.h has a SpiceMouseMode already, hence the name. # @@ -287,10 +293,10 @@ # @enabled: true if the SPICE server is enabled, false otherwise # # @migrated: true if the last guest migration completed and spice -# migration had completed as well. false otherwise. (since 1.4) +# migration had completed as well. false otherwise. (since 1.4) # # @host: The hostname the SPICE server is bound to. This depends on -# the name resolution on the host and may be an IP address. +# the name resolution on the host and may be an IP address. # # @port: The SPICE server's port number. # @@ -300,13 +306,14 @@ # # @auth: the current authentication type used by the server # -# - 'none' if no authentication is being used -# - 'spice' uses SASL or direct TLS authentication, depending on command -# line options +# - 'none' if no authentication is being used +# - 'spice' uses SASL or direct TLS authentication, depending on +# command line options # -# @mouse-mode: The mode in which the mouse cursor is displayed currently. Can -# be determined by the client or the server, or unknown if spice -# server doesn't provide this information. (since: 1.1) +# @mouse-mode: The mode in which the mouse cursor is displayed +# currently. Can be determined by the client or the server, or +# unknown if spice server doesn't provide this information. +# (since: 1.1) # # @channels: a list of @SpiceChannel for each active spice channel # @@ -361,7 +368,6 @@ # ] # } # } -# ## { 'command': 'query-spice', 'returns': 'SpiceInfo', 'if': 'CONFIG_SPICE' } @@ -385,7 +391,6 @@ # "server": { "port": "5920", "family": "ipv4", "host": "127.0.0.1"}, # "client": {"port": "52873", "family": "ipv4", "host": "127.0.0.1"} # }} -# ## { 'event': 'SPICE_CONNECTED', 'data': { 'server': 'SpiceBasicInfo', @@ -395,8 +400,8 @@ ## # @SPICE_INITIALIZED: # -# Emitted after initial handshake and authentication takes place (if any) -# and the SPICE channel is up and running +# Emitted after initial handshake and authentication takes place (if +# any) and the SPICE channel is up and running # # @server: server information # @@ -414,7 +419,6 @@ # "connection-id": 1804289383, "host": "127.0.0.1", # "channel-id": 0, "tls": true} # }} -# ## { 'event': 'SPICE_INITIALIZED', 'data': { 'server': 'SpiceServerInfo', @@ -440,7 +444,6 @@ # "server": { "port": "5920", "family": "ipv4", "host": "127.0.0.1"}, # "client": {"port": "52873", "family": "ipv4", "host": "127.0.0.1"} # }} -# ## { 'event': 'SPICE_DISCONNECTED', 'data': { 'server': 'SpiceBasicInfo', @@ -458,7 +461,6 @@ # # <- { "timestamp": {"seconds": 1290688046, "microseconds": 417172}, # "event": "SPICE_MIGRATE_COMPLETED" } -# ## { 'event': 'SPICE_MIGRATE_COMPLETED', 'if': 'CONFIG_SPICE' } @@ -474,9 +476,9 @@ # # @host: IP address # -# @service: The service name of the vnc port. This may depend on the host -# system's service database so symbolic names should not be relied -# on. +# @service: The service name of the vnc port. This may depend on the +# host system's service database so symbolic names should not be +# relied on. # # @family: address family # @@ -496,8 +498,8 @@ # # The network connection information for server # -# @auth: authentication method used for -# the plain (non-websocket) VNC server +# @auth: authentication method used for the plain (non-websocket) VNC +# server # # Since: 2.1 ## @@ -512,10 +514,10 @@ # Information about a connected VNC client. # # @x509_dname: If x509 authentication is in use, the Distinguished -# Name of the client. +# Name of the client. # # @sasl_username: If SASL authentication is in use, the SASL username -# used for authentication. +# used for authentication. # # Since: 0.14 ## @@ -531,33 +533,41 @@ # # @enabled: true if the VNC server is enabled, false otherwise # -# @host: The hostname the VNC server is bound to. This depends on -# the name resolution on the host and may be an IP address. +# @host: The hostname the VNC server is bound to. This depends on the +# name resolution on the host and may be an IP address. # -# @family: - 'ipv6' if the host is listening for IPv6 connections -# - 'ipv4' if the host is listening for IPv4 connections -# - 'unix' if the host is listening on a unix domain socket -# - 'unknown' otherwise +# @family: +# - 'ipv6' if the host is listening for IPv6 connections +# - 'ipv4' if the host is listening for IPv4 connections +# - 'unix' if the host is listening on a unix domain socket +# - 'unknown' otherwise # # @service: The service name of the server's port. This may depends -# on the host system's service database so symbolic names should not -# be relied on. +# on the host system's service database so symbolic names should +# not be relied on. # # @auth: the current authentication type used by the server # -# - 'none' if no authentication is being used -# - 'vnc' if VNC authentication is being used -# - 'vencrypt+plain' if VEncrypt is used with plain text authentication -# - 'vencrypt+tls+none' if VEncrypt is used with TLS and no authentication -# - 'vencrypt+tls+vnc' if VEncrypt is used with TLS and VNC authentication -# - 'vencrypt+tls+plain' if VEncrypt is used with TLS and plain text auth -# - 'vencrypt+x509+none' if VEncrypt is used with x509 and no auth -# - 'vencrypt+x509+vnc' if VEncrypt is used with x509 and VNC auth -# - 'vencrypt+x509+plain' if VEncrypt is used with x509 and plain text auth -# - 'vencrypt+tls+sasl' if VEncrypt is used with TLS and SASL auth -# - 'vencrypt+x509+sasl' if VEncrypt is used with x509 and SASL auth +# - 'none' if no authentication is being used +# - 'vnc' if VNC authentication is being used +# - 'vencrypt+plain' if VEncrypt is used with plain text +# authentication +# - 'vencrypt+tls+none' if VEncrypt is used with TLS and no +# authentication +# - 'vencrypt+tls+vnc' if VEncrypt is used with TLS and VNC +# authentication +# - 'vencrypt+tls+plain' if VEncrypt is used with TLS and plain +# text auth +# - 'vencrypt+x509+none' if VEncrypt is used with x509 and no auth +# - 'vencrypt+x509+vnc' if VEncrypt is used with x509 and VNC auth +# - 'vencrypt+x509+plain' if VEncrypt is used with x509 and plain +# text auth +# - 'vencrypt+tls+sasl' if VEncrypt is used with TLS and SASL auth +# - 'vencrypt+x509+sasl' if VEncrypt is used with x509 and SASL +# auth # -# @clients: a list of @VncClientInfo of all currently connected clients +# @clients: a list of @VncClientInfo of all currently connected +# clients # # Since: 0.14 ## @@ -601,8 +611,8 @@ # # @auth: The current authentication type used by the servers # -# @vencrypt: The vencrypt sub authentication type used by the -# servers, only specified in case auth == vencrypt. +# @vencrypt: The vencrypt sub authentication type used by the servers, +# only specified in case auth == vencrypt. # # Since: 2.9 ## @@ -620,17 +630,18 @@ # @id: vnc server name. # # @server: A list of @VncBasincInfo describing all listening sockets. -# The list can be empty (in case the vnc server is disabled). -# It also may have multiple entries: normal + websocket, -# possibly also ipv4 + ipv6 in the future. +# The list can be empty (in case the vnc server is disabled). It +# also may have multiple entries: normal + websocket, possibly +# also ipv4 + ipv6 in the future. # -# @clients: A list of @VncClientInfo of all currently connected clients. -# The list can be empty, for obvious reasons. +# @clients: A list of @VncClientInfo of all currently connected +# clients. The list can be empty, for obvious reasons. # -# @auth: The current authentication type used by the non-websockets servers +# @auth: The current authentication type used by the non-websockets +# servers # # @vencrypt: The vencrypt authentication type used by the servers, -# only specified in case auth == vencrypt. +# only specified in case auth == vencrypt. # # @display: The display device the vnc server is linked to. # @@ -673,7 +684,6 @@ # ] # } # } -# ## { 'command': 'query-vnc', 'returns': 'VncInfo', 'if': 'CONFIG_VNC' } @@ -698,8 +708,9 @@ # # Since: 1.1 # -# Notes: An empty password in this command will set the password to the empty -# string. Existing clients are unaffected by executing this command. +# Notes: An empty password in this command will set the password to +# the empty string. Existing clients are unaffected by executing +# this command. ## { 'command': 'change-vnc-password', 'data': { 'password': 'str' }, @@ -714,8 +725,8 @@ # # @client: client information # -# Note: This event is emitted before any authentication takes place, thus -# the authentication ID is not provided +# Note: This event is emitted before any authentication takes place, +# thus the authentication ID is not provided # # Since: 0.13 # @@ -728,7 +739,6 @@ # "client": { "family": "ipv4", "service": "58425", # "host": "127.0.0.1", "websocket": false } }, # "timestamp": { "seconds": 1262976601, "microseconds": 975795 } } -# ## { 'event': 'VNC_CONNECTED', 'data': { 'server': 'VncServerInfo', @@ -738,8 +748,8 @@ ## # @VNC_INITIALIZED: # -# Emitted after authentication takes place (if any) and the VNC session is -# made active +# Emitted after authentication takes place (if any) and the VNC +# session is made active # # @server: server information # @@ -756,7 +766,6 @@ # "client": { "family": "ipv4", "service": "46089", "websocket": false, # "host": "127.0.0.1", "sasl_username": "luiz" } }, # "timestamp": { "seconds": 1263475302, "microseconds": 150772 } } -# ## { 'event': 'VNC_INITIALIZED', 'data': { 'server': 'VncServerInfo', @@ -783,7 +792,6 @@ # "client": { "family": "ipv4", "service": "58425", "websocket": false, # "host": "127.0.0.1", "sasl_username": "luiz" } }, # "timestamp": { "seconds": 1262976601, "microseconds": 975795 } } -# ## { 'event': 'VNC_DISCONNECTED', 'data': { 'server': 'VncServerInfo', @@ -805,7 +813,8 @@ # # @current: true if this device is currently receiving mouse events # -# @absolute: true if this device supports absolute coordinates as input +# @absolute: true if this device supports absolute coordinates as +# input # # Since: 0.14 ## @@ -840,7 +849,6 @@ # } # ] # } -# ## { 'command': 'query-mice', 'returns': ['MouseInfo'] } @@ -852,46 +860,96 @@ # This is used by the @send-key command. # # @unmapped: since 2.0 +# # @pause: since 2.0 +# # @ro: since 2.4 +# # @kp_comma: since 2.4 +# # @kp_equals: since 2.6 +# # @power: since 2.6 +# # @hiragana: since 2.9 +# # @henkan: since 2.9 +# # @yen: since 2.9 # # @sleep: since 2.10 +# # @wake: since 2.10 +# # @audionext: since 2.10 +# # @audioprev: since 2.10 +# # @audiostop: since 2.10 +# # @audioplay: since 2.10 +# # @audiomute: since 2.10 +# # @volumeup: since 2.10 +# # @volumedown: since 2.10 +# # @mediaselect: since 2.10 +# # @mail: since 2.10 +# # @calculator: since 2.10 +# # @computer: since 2.10 +# # @ac_home: since 2.10 +# # @ac_back: since 2.10 +# # @ac_forward: since 2.10 +# # @ac_refresh: since 2.10 +# # @ac_bookmarks: since 2.10 # # @muhenkan: since 2.12 +# # @katakanahiragana: since 2.12 # # @lang1: since 6.1 +# # @lang2: since 6.1 # -# 'sysrq' was mistakenly added to hack around the fact that -# the ps2 driver was not generating correct scancodes sequences -# when 'alt+print' was pressed. This flaw is now fixed and the -# 'sysrq' key serves no further purpose. Any further use of -# 'sysrq' will be transparently changed to 'print', so they -# are effectively synonyms. +# @f13: since 8.0 +# +# @f14: since 8.0 +# +# @f15: since 8.0 +# +# @f16: since 8.0 +# +# @f17: since 8.0 +# +# @f18: since 8.0 +# +# @f19: since 8.0 +# +# @f20: since 8.0 +# +# @f21: since 8.0 +# +# @f22: since 8.0 +# +# @f23: since 8.0 +# +# @f24: since 8.0 +# +# 'sysrq' was mistakenly added to hack around the fact that the ps2 +# driver was not generating correct scancodes sequences when +# 'alt+print' was pressed. This flaw is now fixed and the 'sysrq' key +# serves no further purpose. Any further use of 'sysrq' will be +# transparently changed to 'print', so they are effectively synonyms. # # Since: 1.3 ## @@ -918,7 +976,7 @@ 'volumeup', 'volumedown', 'mediaselect', 'mail', 'calculator', 'computer', 'ac_home', 'ac_back', 'ac_forward', 'ac_refresh', 'ac_bookmarks', - 'lang1', 'lang2' ] } + 'lang1', 'lang2','f13','f14','f15','f16','f17','f18','f19','f20','f21','f22','f23','f24' ] } ## # @KeyValueKind: @@ -963,16 +1021,17 @@ # # Send keys to guest. # -# @keys: An array of @KeyValue elements. All @KeyValues in this array are -# simultaneously sent to the guest. A @KeyValue.number value is sent -# directly to the guest, while @KeyValue.qcode must be a valid -# @QKeyCode value +# @keys: An array of @KeyValue elements. All @KeyValues in this array +# are simultaneously sent to the guest. A @KeyValue.number value +# is sent directly to the guest, while @KeyValue.qcode must be a +# valid @QKeyCode value # -# @hold-time: time to delay key up events, milliseconds. Defaults -# to 100 +# @hold-time: time to delay key up events, milliseconds. Defaults to +# 100 # -# Returns: - Nothing on success -# - If key is unknown or redundant, InvalidParameter +# Returns: +# - Nothing on success +# - If key is unknown or redundant, GenericError # # Since: 1.3 # @@ -983,7 +1042,6 @@ # { "type": "qcode", "data": "alt" }, # { "type": "qcode", "data": "delete" } ] } } # <- { "return": {} } -# ## { 'command': 'send-key', 'data': { 'keys': ['KeyValue'], '*hold-time': 'int' } } @@ -997,11 +1055,13 @@ # # @extra: rear side button of a 5-button mouse (since 2.9) # +# @touch: screen contact on a multi-touch device (since 8.1) +# # Since: 2.0 ## { 'enum' : 'InputButton', 'data' : [ 'left', 'middle', 'right', 'wheel-up', 'wheel-down', 'side', - 'extra', 'wheel-left', 'wheel-right' ] } + 'extra', 'wheel-left', 'wheel-right', 'touch' ] } ## # @InputAxis: @@ -1013,12 +1073,24 @@ { 'enum' : 'InputAxis', 'data' : [ 'x', 'y' ] } +## +# @InputMultiTouchType: +# +# Type of a multi-touch event. +# +# Since: 8.1 +## +{ 'enum' : 'InputMultiTouchType', + 'data' : [ 'begin', 'update', 'end', 'cancel', 'data' ] } + + ## # @InputKeyEvent: # # Keyboard input event. # # @key: Which key this event is for. +# # @down: True for key-down and false for key-up events. # # Since: 2.0 @@ -1033,6 +1105,7 @@ # Pointer button input event. # # @button: Which button this event is for. +# # @down: True for key-down and false for key-up events. # # Since: 2.0 @@ -1047,8 +1120,9 @@ # Pointer motion input event. # # @axis: Which axis is referenced by @value. -# @value: Pointer position. For absolute coordinates the -# valid range is 0 -> 0x7ffff +# +# @value: Pointer position. For absolute coordinates the valid range +# is 0 -> 0x7ffff # # Since: 2.0 ## @@ -1056,13 +1130,46 @@ 'data' : { 'axis' : 'InputAxis', 'value' : 'int' } } +## +# @InputMultiTouchEvent: +# +# MultiTouch input event. +# +# @slot: Which slot has generated the event. +# +# @tracking-id: ID to correlate this event with previously generated +# events. +# +# @axis: Which axis is referenced by @value. +# +# @value: Contact position. +# +# Since: 8.1 +## +{ 'struct' : 'InputMultiTouchEvent', + 'data' : { 'type' : 'InputMultiTouchType', + 'slot' : 'int', + 'tracking-id': 'int', + 'axis' : 'InputAxis', + 'value' : 'int' } } + ## # @InputEventKind: # +# @key: a keyboard input event +# +# @btn: a pointer button input event +# +# @rel: a relative pointer motion input event +# +# @abs: an absolute pointer motion input event +# +# @mtt: a multi-touch input event +# # Since: 2.0 ## { 'enum': 'InputEventKind', - 'data': [ 'key', 'btn', 'rel', 'abs' ] } + 'data': [ 'key', 'btn', 'rel', 'abs', 'mtt' ] } ## # @InputKeyEventWrapper: @@ -1088,17 +1195,20 @@ { 'struct': 'InputMoveEventWrapper', 'data': { 'data': 'InputMoveEvent' } } +## +# @InputMultiTouchEventWrapper: +# +# Since: 8.1 +## +{ 'struct': 'InputMultiTouchEventWrapper', + 'data': { 'data': 'InputMultiTouchEvent' } } + ## # @InputEvent: # # Input event union. # -# @type: the input type, one of: -# -# - 'key': Input event of Keyboard -# - 'btn': Input event of pointer buttons -# - 'rel': Input event of relative pointer motion -# - 'abs': Input event of absolute pointer motion +# @type: the type of input event # # Since: 2.0 ## @@ -1108,7 +1218,8 @@ 'data' : { 'key' : 'InputKeyEventWrapper', 'btn' : 'InputBtnEventWrapper', 'rel' : 'InputMoveEventWrapper', - 'abs' : 'InputMoveEventWrapper' } } + 'abs' : 'InputMoveEventWrapper', + 'mtt' : 'InputMultiTouchEventWrapper' } } ## # @input-send-event: @@ -1127,8 +1238,10 @@ # precedence. # # @device: display device to send event(s) to. -# @head: head to send event(s) to, in case the -# display device supports multiple scanouts. +# +# @head: head to send event(s) to, in case the display device supports +# multiple scanouts. +# # @events: List of InputEvent union. # # Returns: Nothing on success. @@ -1136,11 +1249,11 @@ # Since: 2.6 # # Note: The consoles are visible in the qom tree, under -# /backend/console[$index]. They have a device link and head property, -# so it is possible to map which console belongs to which device and -# display. +# /backend/console[$index]. They have a device link and head +# property, so it is possible to map which console belongs to +# which device and display. # -# Example: +# Examples: # # 1. Press left mouse button. # @@ -1175,7 +1288,6 @@ # { "type": "abs", "data" : { "axis": "x", "value" : 20000 } }, # { "type": "abs", "data" : { "axis": "y", "value" : 400 } } ] } } # <- { "return": {} } -# ## { 'command': 'input-send-event', 'data': { '*device': 'str', @@ -1188,19 +1300,20 @@ # GTK display options. # # @grab-on-hover: Grab keyboard input on mouse hover. +# # @zoom-to-fit: Zoom guest display to fit into the host window. When -# turned off the host window will be resized instead. -# In case the display device can notify the guest on -# window resizes (virtio-gpu) this will default to "on", -# assuming the guest will resize the display to match -# the window size then. Otherwise it defaults to "off". -# Since 3.1 -# @show-tabs: Display the tab bar for switching between the various graphical -# interfaces (e.g. VGA and virtual console character devices) -# by default. -# Since 7.1 -# @show-menubar: Display the main window menubar. Defaults to "on". -# Since 8.0 +# turned off the host window will be resized instead. In case the +# display device can notify the guest on window resizes +# (virtio-gpu) this will default to "on", assuming the guest will +# resize the display to match the window size then. Otherwise it +# defaults to "off". (Since 3.1) +# +# @show-tabs: Display the tab bar for switching between the various +# graphical interfaces (e.g. VGA and virtual console character +# devices) by default. (Since 7.1) +# +# @show-menubar: Display the main window menubar. Defaults to "on". +# (Since 8.0) # # Since: 2.12 ## @@ -1215,8 +1328,8 @@ # # EGL headless display options. # -# @rendernode: Which DRM render node should be used. Default is the first -# available node on the host. +# @rendernode: Which DRM render node should be used. Default is the +# first available node on the host. # # Since: 3.1 ## @@ -1230,11 +1343,11 @@ # # @addr: The D-Bus bus address (default to the session bus). # -# @rendernode: Which DRM render node should be used. Default is the first -# available node on the host. +# @rendernode: Which DRM render node should be used. Default is the +# first available node on the host. # # @p2p: Whether to use peer-to-peer connections (accepted through -# ``add_client``). +# @add_client). # # @audiodev: Use the specified DBus audiodev to export audio. # @@ -1252,10 +1365,13 @@ # Display OpenGL mode. # # @off: Disable OpenGL (default). -# @on: Use OpenGL, pick context type automatically. -# Would better be named 'auto' but is called 'on' for backward -# compatibility with bool type. +# +# @on: Use OpenGL, pick context type automatically. Would better be +# named 'auto' but is called 'on' for backward compatibility with +# bool type. +# # @core: Use OpenGL with Core (desktop) Context. +# # @es: Use OpenGL with ES (embedded systems) Context. # # Since: 3.0 @@ -1281,18 +1397,17 @@ # Cocoa display options. # # @left-command-key: Enable/disable forwarding of left command key to -# guest. Allows command-tab window switching on the -# host without sending this key to the guest when -# "off". Defaults to "on" +# guest. Allows command-tab window switching on the host without +# sending this key to the guest when "off". Defaults to "on" # -# @full-grab: Capture all key presses, including system combos. This -# requires accessibility permissions, since it performs -# a global grab on key events. (default: off) -# See https://support.apple.com/en-in/guide/mac-help/mh32356/mac +# @full-grab: Capture all key presses, including system combos. This +# requires accessibility permissions, since it performs a global +# grab on key events. (default: off) See +# https://support.apple.com/en-in/guide/mac-help/mh32356/mac # -# @swap-opt-cmd: Swap the Option and Command keys so that their key codes match -# their position on non-Mac keyboards and you can use Meta/Super -# and Alt where you expect them. (default: off) +# @swap-opt-cmd: Swap the Option and Command keys so that their key +# codes match their position on non-Mac keyboards and you can use +# Meta/Super and Alt where you expect them. (default: off) # # Since: 7.0 ## @@ -1318,8 +1433,8 @@ # # SDL2 display options. # -# @grab-mod: Modifier keys that should be pressed together with the -# "G" key to release the mouse grab. +# @grab-mod: Modifier keys that should be pressed together with the +# "G" key to release the mouse grab. # # Since: 7.1 ## @@ -1331,36 +1446,35 @@ # # Display (user interface) type. # -# @default: The default user interface, selecting from the first available -# of gtk, sdl, cocoa, and vnc. +# @default: The default user interface, selecting from the first +# available of gtk, sdl, cocoa, and vnc. # -# @none: No user interface or video output display. The guest will -# still see an emulated graphics card, but its output will not -# be displayed to the QEMU user. +# @none: No user interface or video output display. The guest will +# still see an emulated graphics card, but its output will not be +# displayed to the QEMU user. # # @gtk: The GTK user interface. # # @sdl: The SDL user interface. # # @egl-headless: No user interface, offload GL operations to a local -# DRI device. Graphical display need to be paired with -# VNC or Spice. (Since 3.1) +# DRI device. Graphical display need to be paired with VNC or +# Spice. (Since 3.1) # # @curses: Display video output via curses. For graphics device -# models which support a text mode, QEMU can display this -# output using a curses/ncurses interface. Nothing is -# displayed when the graphics device is in graphical mode or -# if the graphics device does not support a text -# mode. Generally only the VGA device models support text -# mode. +# models which support a text mode, QEMU can display this output +# using a curses/ncurses interface. Nothing is displayed when the +# graphics device is in graphical mode or if the graphics device +# does not support a text mode. Generally only the VGA device +# models support text mode. # # @cocoa: The Cocoa user interface. # # @spice-app: Set up a Spice server and run the default associated -# application to connect to it. The server will redirect -# the serial console and QEMU monitors. (Since 4.0) +# application to connect to it. The server will redirect the +# serial console and QEMU monitors. (Since 4.0) # -# @dbus: Start a D-Bus service for the display. (Since 7.0) +# @dbus: Start a D-Bus service for the display. (Since 7.0) # # Since: 2.12 ## @@ -1385,9 +1499,16 @@ # Display (user interface) options. # # @type: Which DisplayType qemu should use. -# @full-screen: Start user interface in fullscreen mode (default: off). -# @window-close: Allow to quit qemu with window close button (default: on). -# @show-cursor: Force showing the mouse cursor (default: off). (since: 5.0) +# +# @full-screen: Start user interface in fullscreen mode +# (default: off). +# +# @window-close: Allow to quit qemu with window close button +# (default: on). +# +# @show-cursor: Force showing the mouse cursor (default: off). +# (since: 5.0) +# # @gl: Enable OpenGL support (default: off). # # Since: 2.12 @@ -1474,7 +1595,6 @@ # -> { "execute": "display-reload", # "arguments": { "type": "vnc", "tls-certs": true } } # <- { "return": {} } -# ## { 'command': 'display-reload', 'data': 'DisplayReloadOptions', @@ -1497,9 +1617,9 @@ # # Specify the VNC reload options. # -# @addresses: If specified, change set of addresses -# to listen for connections. Addresses configured -# for websockets are not touched. +# @addresses: If specified, change set of addresses to listen for +# connections. Addresses configured for websockets are not +# touched. # # Since: 7.1 ## @@ -1536,8 +1656,38 @@ # [ { "type": "inet", "host": "0.0.0.0", # "port": "5901" } ] } } # <- { "return": {} } -# ## { 'command': 'display-update', 'data': 'DisplayUpdateOptions', 'boxed' : true } + +## +# @client_migrate_info: +# +# Set migration information for remote display. This makes the server +# ask the client to automatically reconnect using the new parameters +# once migration finished successfully. Only implemented for SPICE. +# +# @protocol: must be "spice" +# +# @hostname: migration target hostname +# +# @port: spice tcp port for plaintext channels +# +# @tls-port: spice tcp port for tls-secured channels +# +# @cert-subject: server certificate subject +# +# Since: 0.14 +# +# Example: +# +# -> { "execute": "client_migrate_info", +# "arguments": { "protocol": "spice", +# "hostname": "virt42.lab.kraxel.org", +# "port": 1234 } } +# <- { "return": {} } +## +{ 'command': 'client_migrate_info', + 'data': { 'protocol': 'str', 'hostname': 'str', '*port': 'int', + '*tls-port': 'int', '*cert-subject': 'str' } } diff --git a/qapi/virtio.json b/qapi/virtio.json index 019d2d1987..e6dcee7b83 100644 --- a/qapi/virtio.json +++ b/qapi/virtio.json @@ -16,7 +16,6 @@ # @name: Name of the VirtIODevice # # Since: 7.2 -# ## { 'struct': 'VirtioInfo', 'data': { 'path': 'str', @@ -28,6 +27,7 @@ # Returns a list of all realized VirtIODevices # # Features: +# # @unstable: This command is meant for debugging. # # Returns: List of gathered VirtIODevices @@ -60,9 +60,7 @@ # } # ] # } -# ## - { 'command': 'x-query-virtio', 'returns': [ 'VirtioInfo' ], 'features': [ 'unstable' ] } @@ -70,7 +68,7 @@ ## # @VhostStatus: # -# Information about a vhost device. This information will only be +# Information about a vhost device. This information will only be # displayed if the vhost device is active. # # @n-mem-sections: vhost_dev n_mem_sections @@ -98,9 +96,7 @@ # @log-size: vhost_dev log_size # # Since: 7.2 -# ## - { 'struct': 'VhostStatus', 'data': { 'n-mem-sections': 'int', 'n-tmp-sections': 'int', @@ -119,8 +115,8 @@ # @VirtioStatus: # # Full status of the virtio device with most VirtIODevice members. -# Also includes the full status of the corresponding vhost device -# if the vhost device is active. +# Also includes the full status of the corresponding vhost device if +# the vhost device is active. # # @name: VirtIODevice name # @@ -136,8 +132,8 @@ # # @device-endian: VirtIODevice device_endian # -# @num-vqs: VirtIODevice virtqueue count. This is the number of active -# virtqueues being used by the VirtIODevice. +# @num-vqs: VirtIODevice virtqueue count. This is the number of +# active virtqueues being used by the VirtIODevice. # # @status: VirtIODevice configuration status (VirtioDeviceStatus) # @@ -163,14 +159,12 @@ # # @use-guest-notifier-mask: VirtIODevice use_guest_notifier_mask flag # -# @vhost-dev: Corresponding vhost device info for a given VirtIODevice. -# Present if the given VirtIODevice has an active vhost -# device. +# @vhost-dev: Corresponding vhost device info for a given +# VirtIODevice. Present if the given VirtIODevice has an active +# vhost device. # # Since: 7.2 -# ## - { 'struct': 'VirtioStatus', 'data': { 'name': 'str', 'device-id': 'uint16', @@ -202,6 +196,7 @@ # @path: Canonical QOM path of the VirtIODevice # # Features: +# # @unstable: This command is meant for debugging. # # Returns: VirtioStatus of the virtio device @@ -433,9 +428,7 @@ # "use-started": true # } # } -# ## - { 'command': 'x-query-virtio-status', 'data': { 'path': 'str' }, 'returns': 'VirtioStatus', @@ -448,13 +441,13 @@ # device # # @statuses: List of decoded configuration statuses of the virtio -# device +# device # -# @unknown-statuses: Virtio device statuses bitmap that have not been decoded +# @unknown-statuses: Virtio device statuses bitmap that have not been +# decoded # # Since: 7.2 ## - { 'struct': 'VirtioDeviceStatus', 'data': { 'statuses': [ 'str' ], '*unknown-statuses': 'uint8' } } @@ -466,14 +459,13 @@ # Vhost User device # # @protocols: List of decoded vhost user protocol features of a vhost -# user device +# user device # # @unknown-protocols: Vhost user device protocol features bitmap that -# have not been decoded +# have not been decoded # # Since: 7.2 ## - { 'struct': 'VhostDeviceProtocols', 'data': { 'protocols': [ 'str' ], '*unknown-protocols': 'uint64' } } @@ -481,20 +473,19 @@ ## # @VirtioDeviceFeatures: # -# The common fields that apply to most Virtio devices. Some devices +# The common fields that apply to most Virtio devices. Some devices # may not have their own device-specific features (e.g. virtio-rng). # # @transports: List of transport features of the virtio device # # @dev-features: List of device-specific features (if the device has -# unique features) +# unique features) # # @unknown-dev-features: Virtio device features bitmap that have not -# been decoded +# been decoded # # Since: 7.2 ## - { 'struct': 'VirtioDeviceFeatures', 'data': { 'transports': [ 'str' ], '*dev-features': [ 'str' ], @@ -525,7 +516,7 @@ # @vring-used: VirtQueue vring.used (device area) # # @last-avail-idx: VirtQueue last_avail_idx or return of vhost_dev -# vhost_get_vring_base (if vhost active) +# vhost_get_vring_base (if vhost active) # # @shadow-avail-idx: VirtQueue shadow_avail_idx # @@ -536,9 +527,7 @@ # @signalled-used-valid: VirtQueue signalled_used_valid flag # # Since: 7.2 -# ## - { 'struct': 'VirtQueueStatus', 'data': { 'name': 'str', 'queue-index': 'uint16', @@ -565,16 +554,17 @@ # @queue: VirtQueue index to examine # # Features: +# # @unstable: This command is meant for debugging. # # Returns: VirtQueueStatus of the VirtQueue # -# Notes: last_avail_idx will not be displayed in the case where -# the selected VirtIODevice has a running vhost device and -# the VirtIODevice VirtQueue index (queue) does not exist for -# the corresponding vhost device vhost_virtqueue. Also, -# shadow_avail_idx will not be displayed in the case where -# the selected VirtIODevice has a running vhost device. +# Notes: last_avail_idx will not be displayed in the case where the +# selected VirtIODevice has a running vhost device and the +# VirtIODevice VirtQueue index (queue) does not exist for the +# corresponding vhost device vhost_virtqueue. Also, +# shadow_avail_idx will not be displayed in the case where the +# selected VirtIODevice has a running vhost device. # # Since: 7.2 # @@ -626,9 +616,7 @@ # "vring-num": 128 # } # } -# ## - { 'command': 'x-query-virtio-queue-status', 'data': { 'path': 'str', 'queue': 'uint16' }, 'returns': 'VirtQueueStatus', @@ -667,9 +655,7 @@ # @used-size: vhost_virtqueue used_size # # Since: 7.2 -# ## - { 'struct': 'VirtVhostQueueStatus', 'data': { 'name': 'str', 'kick': 'int', @@ -695,6 +681,7 @@ # @queue: vhost_virtqueue index to examine # # Features: +# # @unstable: This command is meant for debugging. # # Returns: VirtVhostQueueStatus of the vhost_virtqueue @@ -748,9 +735,7 @@ # "kick": 0 # } # } -# ## - { 'command': 'x-query-virtio-vhost-queue-status', 'data': { 'path': 'str', 'queue': 'uint16' }, 'returns': 'VirtVhostQueueStatus', @@ -768,9 +753,7 @@ # @flags: List of descriptor flags # # Since: 7.2 -# ## - { 'struct': 'VirtioRingDesc', 'data': { 'addr': 'uint64', 'len': 'uint32', @@ -788,9 +771,7 @@ # @ring: VRingAvail ring[] entry at provided index # # Since: 7.2 -# ## - { 'struct': 'VirtioRingAvail', 'data': { 'flags': 'uint16', 'idx': 'uint16', @@ -806,9 +787,7 @@ # @idx: VRingUsed index # # Since: 7.2 -# ## - { 'struct': 'VirtioRingUsed', 'data': { 'flags': 'uint16', 'idx': 'uint16' } } @@ -830,9 +809,7 @@ # @used: VRingUsed info # # Since: 7.2 -# ## - { 'struct': 'VirtioQueueElement', 'data': { 'name': 'str', 'index': 'uint32', @@ -849,10 +826,11 @@ # # @queue: VirtQueue index to examine # -# @index: Index of the element in the queue -# (default: head of the queue) +# @index: Index of the element in the queue (default: head of the +# queue) # # Features: +# # @unstable: This command is meant for debugging. # # Returns: VirtioQueueElement information @@ -945,9 +923,7 @@ # } # } # } -# ## - { 'command': 'x-query-virtio-queue-element', 'data': { 'path': 'str', 'queue': 'uint16', '*index': 'uint16' }, 'returns': 'VirtioQueueElement', diff --git a/qapi/yank.json b/qapi/yank.json index 167a775594..87ec7cab96 100644 --- a/qapi/yank.json +++ b/qapi/yank.json @@ -9,7 +9,7 @@ ## # @YankInstanceType: # -# An enumeration of yank instance types. See @YankInstance for more +# An enumeration of yank instance types. See @YankInstance for more # information. # # Since: 6.0 @@ -20,8 +20,8 @@ ## # @YankInstanceBlockNode: # -# Specifies which block graph node to yank. See @YankInstance for more -# information. +# Specifies which block graph node to yank. See @YankInstance for +# more information. # # @node-name: the name of the block graph node # @@ -33,8 +33,8 @@ ## # @YankInstanceChardev: # -# Specifies which character device to yank. See @YankInstance for more -# information. +# Specifies which character device to yank. See @YankInstance for +# more information. # # @id: the chardev's ID # @@ -46,20 +46,18 @@ ## # @YankInstance: # -# A yank instance can be yanked with the @yank qmp command to recover from a -# hanging QEMU. +# A yank instance can be yanked with the @yank qmp command to recover +# from a hanging QEMU. # # Currently implemented yank instances: -# - nbd block device: -# Yanking it will shut down the connection to the nbd server without -# attempting to reconnect. -# - socket chardev: -# Yanking it will shut down the connected socket. -# - migration: -# Yanking it will shut down all migration connections. Unlike -# @migrate_cancel, it will not notify the migration process, so migration -# will go into @failed state, instead of @cancelled state. @yank should be -# used to recover from hangs. +# +# - nbd block device: Yanking it will shut down the connection to the +# nbd server without attempting to reconnect. +# - socket chardev: Yanking it will shut down the connected socket. +# - migration: Yanking it will shut down all migration connections. +# Unlike @migrate_cancel, it will not notify the migration process, +# so migration will go into @failed state, instead of @cancelled +# state. @yank should be used to recover from hangs. # # Since: 6.0 ## @@ -73,13 +71,14 @@ ## # @yank: # -# Try to recover from hanging QEMU by yanking the specified instances. See -# @YankInstance for more information. +# Try to recover from hanging QEMU by yanking the specified instances. +# See @YankInstance for more information. # # Takes a list of @YankInstance as argument. # -# Returns: - Nothing on success -# - @DeviceNotFound error, if any of the YankInstances doesn't exist +# Returns: +# - Nothing on success +# - @DeviceNotFound error, if any of the YankInstances doesn't exist # # Example: # @@ -100,7 +99,7 @@ ## # @query-yank: # -# Query yank instances. See @YankInstance for more information. +# Query yank instances. See @YankInstance for more information. # # Returns: list of @YankInstance # diff --git a/qemu-img.c b/qemu-img.c index 9aeac69fa6..27f48051b0 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -2945,7 +2945,10 @@ static BlockGraphInfoList *collect_image_info_list(bool image_opts, * duplicate the backing chain information that we obtain by walking * the chain manually here. */ + bdrv_graph_rdlock_main_loop(); bdrv_query_block_graph_info(bs, &info, &err); + bdrv_graph_rdunlock_main_loop(); + if (err) { error_report_err(err); blk_unref(blk); diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c index e7a02f5b99..3f75d2f5a6 100644 --- a/qemu-io-cmds.c +++ b/qemu-io-cmds.c @@ -1730,6 +1730,224 @@ static const cmdinfo_t flush_cmd = { .oneline = "flush all in-core file state to disk", }; +static inline int64_t tosector(int64_t bytes) +{ + return bytes >> BDRV_SECTOR_BITS; +} + +static int zone_report_f(BlockBackend *blk, int argc, char **argv) +{ + int ret; + int64_t offset; + unsigned int nr_zones; + + ++optind; + offset = cvtnum(argv[optind]); + ++optind; + nr_zones = cvtnum(argv[optind]); + + g_autofree BlockZoneDescriptor *zones = NULL; + zones = g_new(BlockZoneDescriptor, nr_zones); + ret = blk_zone_report(blk, offset, &nr_zones, zones); + if (ret < 0) { + printf("zone report failed: %s\n", strerror(-ret)); + } else { + for (int i = 0; i < nr_zones; ++i) { + printf("start: 0x%" PRIx64 ", len 0x%" PRIx64 ", " + "cap"" 0x%" PRIx64 ", wptr 0x%" PRIx64 ", " + "zcond:%u, [type: %u]\n", + tosector(zones[i].start), tosector(zones[i].length), + tosector(zones[i].cap), tosector(zones[i].wp), + zones[i].state, zones[i].type); + } + } + return ret; +} + +static const cmdinfo_t zone_report_cmd = { + .name = "zone_report", + .altname = "zrp", + .cfunc = zone_report_f, + .argmin = 2, + .argmax = 2, + .args = "offset number", + .oneline = "report zone information", +}; + +static int zone_open_f(BlockBackend *blk, int argc, char **argv) +{ + int ret; + int64_t offset, len; + ++optind; + offset = cvtnum(argv[optind]); + ++optind; + len = cvtnum(argv[optind]); + ret = blk_zone_mgmt(blk, BLK_ZO_OPEN, offset, len); + if (ret < 0) { + printf("zone open failed: %s\n", strerror(-ret)); + } + return ret; +} + +static const cmdinfo_t zone_open_cmd = { + .name = "zone_open", + .altname = "zo", + .cfunc = zone_open_f, + .argmin = 2, + .argmax = 2, + .args = "offset len", + .oneline = "explicit open a range of zones in zone block device", +}; + +static int zone_close_f(BlockBackend *blk, int argc, char **argv) +{ + int ret; + int64_t offset, len; + ++optind; + offset = cvtnum(argv[optind]); + ++optind; + len = cvtnum(argv[optind]); + ret = blk_zone_mgmt(blk, BLK_ZO_CLOSE, offset, len); + if (ret < 0) { + printf("zone close failed: %s\n", strerror(-ret)); + } + return ret; +} + +static const cmdinfo_t zone_close_cmd = { + .name = "zone_close", + .altname = "zc", + .cfunc = zone_close_f, + .argmin = 2, + .argmax = 2, + .args = "offset len", + .oneline = "close a range of zones in zone block device", +}; + +static int zone_finish_f(BlockBackend *blk, int argc, char **argv) +{ + int ret; + int64_t offset, len; + ++optind; + offset = cvtnum(argv[optind]); + ++optind; + len = cvtnum(argv[optind]); + ret = blk_zone_mgmt(blk, BLK_ZO_FINISH, offset, len); + if (ret < 0) { + printf("zone finish failed: %s\n", strerror(-ret)); + } + return ret; +} + +static const cmdinfo_t zone_finish_cmd = { + .name = "zone_finish", + .altname = "zf", + .cfunc = zone_finish_f, + .argmin = 2, + .argmax = 2, + .args = "offset len", + .oneline = "finish a range of zones in zone block device", +}; + +static int zone_reset_f(BlockBackend *blk, int argc, char **argv) +{ + int ret; + int64_t offset, len; + ++optind; + offset = cvtnum(argv[optind]); + ++optind; + len = cvtnum(argv[optind]); + ret = blk_zone_mgmt(blk, BLK_ZO_RESET, offset, len); + if (ret < 0) { + printf("zone reset failed: %s\n", strerror(-ret)); + } + return ret; +} + +static const cmdinfo_t zone_reset_cmd = { + .name = "zone_reset", + .altname = "zrs", + .cfunc = zone_reset_f, + .argmin = 2, + .argmax = 2, + .args = "offset len", + .oneline = "reset a zone write pointer in zone block device", +}; + +static int do_aio_zone_append(BlockBackend *blk, QEMUIOVector *qiov, + int64_t *offset, int flags, int *total) +{ + int async_ret = NOT_DONE; + + blk_aio_zone_append(blk, offset, qiov, flags, aio_rw_done, &async_ret); + while (async_ret == NOT_DONE) { + main_loop_wait(false); + } + + *total = qiov->size; + return async_ret < 0 ? async_ret : 1; +} + +static int zone_append_f(BlockBackend *blk, int argc, char **argv) +{ + int ret; + bool pflag = false; + int flags = 0; + int total = 0; + int64_t offset; + char *buf; + int c, nr_iov; + int pattern = 0xcd; + QEMUIOVector qiov; + + if (optind > argc - 3) { + return -EINVAL; + } + + if ((c = getopt(argc, argv, "p")) != -1) { + pflag = true; + } + + offset = cvtnum(argv[optind]); + if (offset < 0) { + print_cvtnum_err(offset, argv[optind]); + return offset; + } + optind++; + nr_iov = argc - optind; + buf = create_iovec(blk, &qiov, &argv[optind], nr_iov, pattern, + flags & BDRV_REQ_REGISTERED_BUF); + if (buf == NULL) { + return -EINVAL; + } + ret = do_aio_zone_append(blk, &qiov, &offset, flags, &total); + if (ret < 0) { + printf("zone append failed: %s\n", strerror(-ret)); + goto out; + } + + if (pflag) { + printf("After zap done, the append sector is 0x%" PRIx64 "\n", + tosector(offset)); + } + +out: + qemu_io_free(blk, buf, qiov.size, + flags & BDRV_REQ_REGISTERED_BUF); + qemu_iovec_destroy(&qiov); + return ret; +} + +static const cmdinfo_t zone_append_cmd = { + .name = "zone_append", + .altname = "zap", + .cfunc = zone_append_f, + .argmin = 3, + .argmax = 4, + .args = "offset len [len..]", + .oneline = "append write a number of bytes at a specified offset", +}; + static int truncate_f(BlockBackend *blk, int argc, char **argv); static const cmdinfo_t truncate_cmd = { .name = "truncate", @@ -2523,6 +2741,12 @@ static void __attribute((constructor)) init_qemuio_commands(void) qemuio_add_command(&aio_write_cmd); qemuio_add_command(&aio_flush_cmd); qemuio_add_command(&flush_cmd); + qemuio_add_command(&zone_report_cmd); + qemuio_add_command(&zone_open_cmd); + qemuio_add_command(&zone_close_cmd); + qemuio_add_command(&zone_finish_cmd); + qemuio_add_command(&zone_reset_cmd); + qemuio_add_command(&zone_append_cmd); qemuio_add_command(&truncate_cmd); qemuio_add_command(&length_cmd); qemuio_add_command(&info_cmd); diff --git a/qemu-nbd.c b/qemu-nbd.c index 6ff45308a9..4276163564 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -1071,7 +1071,11 @@ int main(int argc, char **argv) qdict_put_str(raw_opts, "driver", "raw"); qdict_put_str(raw_opts, "file", bs->node_name); qdict_put_int(raw_opts, "offset", dev_offset); + + aio_context_acquire(qemu_get_aio_context()); bs = bdrv_open(NULL, NULL, raw_opts, flags, &error_fatal); + aio_context_release(qemu_get_aio_context()); + blk_remove_bs(blk); blk_insert_bs(blk, bs, &error_fatal); bdrv_unref(bs); diff --git a/qemu-options.hx b/qemu-options.hx index beeb4475ba..b37eb9662b 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -182,6 +182,7 @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel, " igd-passthru=on|off (enable Xen integrated Intel graphics passthrough, default=off)\n" " kernel-irqchip=on|off|split controls accelerated irqchip support (default=on)\n" " kvm-shadow-mem=size of KVM shadow MMU in bytes\n" + " one-insn-per-tb=on|off (one guest instruction per TCG translation block)\n" " split-wx=on|off (enable TCG split w^x mapping)\n" " tb-size=n (TCG translation block cache size)\n" " dirty-ring-size=n (KVM dirty ring GFN count, default 0)\n" @@ -210,6 +211,12 @@ SRST ``kvm-shadow-mem=size`` Defines the size of the KVM shadow MMU. + ``one-insn-per-tb=on|off`` + Makes the TCG accelerator put only one guest instruction into + each translation block. This slows down emulation a lot, but + can be useful in some situations, such as when trying to analyse + the logs produced by the ``-d`` option. + ``split-wx=on|off`` Controls the use of split w^x mapping for the TCG code generation buffer. Some operating systems require this to be enabled, and in @@ -405,15 +412,22 @@ SRST -numa node,nodeid=0 -numa node,nodeid=1 \ -numa cpu,node-id=0,socket-id=0 -numa cpu,node-id=1,socket-id=1 - Legacy '\ ``mem``\ ' assigns a given RAM amount to a node (not supported - for 5.1 and newer machine types). '\ ``memdev``\ ' assigns RAM from - a given memory backend device to a node. If '\ ``mem``\ ' and - '\ ``memdev``\ ' are omitted in all nodes, RAM is split equally between them. + '\ ``memdev``\ ' option assigns RAM from a given memory backend + device to a node. It is recommended to use '\ ``memdev``\ ' option + over legacy '\ ``mem``\ ' option. This is because '\ ``memdev``\ ' + option provides better performance and more control over the + backend's RAM (e.g. '\ ``prealloc``\ ' parameter of + '\ ``-memory-backend-ram``\ ' allows memory preallocation). + For compatibility reasons, legacy '\ ``mem``\ ' option is + supported in 5.0 and older machine types. Note that '\ ``mem``\ ' + and '\ ``memdev``\ ' are mutually exclusive. If one node uses + '\ ``memdev``\ ', the rest nodes have to use '\ ``memdev``\ ' + option, and vice versa. - '\ ``mem``\ ' and '\ ``memdev``\ ' are mutually exclusive. - Furthermore, if one node uses '\ ``memdev``\ ', all of them have to - use it. + Users must specify memory for all NUMA nodes by '\ ``memdev``\ ' + (or legacy '\ ``mem``\ ' if available). In QEMU 5.2, the support + for '\ ``-numa node``\ ' without memory specified was removed. '\ ``initiator``\ ' is an additional option that points to an initiator NUMA node that has best performance (the lowest latency or @@ -779,6 +793,12 @@ DEF("audiodev", HAS_ARG, QEMU_OPTION_audiodev, " in|out.name= source/sink device name\n" " in|out.latency= desired latency in microseconds\n" #endif +#ifdef CONFIG_AUDIO_PIPEWIRE + "-audiodev pipewire,id=id[,prop[=value][,...]]\n" + " in|out.name= source/sink device name\n" + " in|out.stream-name= name of pipewire stream\n" + " in|out.latency= desired latency in microseconds\n" +#endif #ifdef CONFIG_AUDIO_SDL "-audiodev sdl,id=id[,prop[=value][,...]]\n" " in|out.buffer-count= number of buffers\n" @@ -942,6 +962,21 @@ SRST Desired latency in microseconds. The PulseAudio server will try to honor this value but actual latencies may be lower or higher. +``-audiodev pipewire,id=id[,prop[=value][,...]]`` + Creates a backend using Pipewire. This backend is available on + most systems. + + Pipewire specific options are: + + ``in|out.latency=usecs`` + Desired latency in microseconds. + + ``in|out.name=sink`` + Use the specified source/sink for recording/playback. + + ``in|out.stream-name`` + Specify the name of pipewire stream. + ``-audiodev sdl,id=id[,prop[=value][,...]]`` Creates a backend using SDL. This backend is available on most systems, but you should use your platform's native backend if @@ -1143,10 +1178,22 @@ have gone through several iterations as the feature set and complexity of the block layer have grown. Many online guides to QEMU often reference older and deprecated options, which can lead to confusion. -The recommended modern way to describe disks is to use a combination of +The most explicit way to describe disks is to use a combination of ``-device`` to specify the hardware device and ``-blockdev`` to describe the backend. The device defines what the guest sees and the -backend describes how QEMU handles the data. +backend describes how QEMU handles the data. It is the only guaranteed +stable interface for describing block devices and as such is +recommended for management tools and scripting. + +The ``-drive`` option combines the device and backend into a single +command line option which is a more human friendly. There is however no +interface stability guarantee although some older board models still +need updating to work with the modern blockdev forms. + +Older options like ``-hda`` are essentially macros which expand into +``-drive`` options for various drive interfaces. The original forms +bake in a lot of assumptions from the days when QEMU was emulating a +legacy PC, they are not recommended for modern configurations. ERST @@ -1606,7 +1653,7 @@ SRST .. parsed-literal:: - |qemu_system_x86| -drive file=a -drive file=b" + |qemu_system_x86| -drive file=a -drive file=b is interpreted like: @@ -1639,6 +1686,14 @@ SRST the raw disk image you use is not written back. You can however force the write back by pressing C-a s (see the :ref:`disk images` chapter in the System Emulation Users Guide). + + .. warning:: + snapshot is incompatible with ``-blockdev`` (instead use qemu-img + to manually create snapshot images to attach to your blockdev). + If you have mixed ``-blockdev`` and ``-drive`` declarations you + can use the 'snapshot' property on your drive declarations + instead of this global option. + ERST DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev, @@ -2585,7 +2640,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, " specify SMBIOS type 17 fields\n" "-smbios type=41[,designation=str][,kind=str][,instance=%d][,pcidev=str]\n" " specify SMBIOS type 41 fields\n", - QEMU_ARCH_I386 | QEMU_ARCH_ARM) + QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH) SRST ``-smbios file=binary`` Load SMBIOS entry from binary file. @@ -3360,7 +3415,7 @@ DEF("chardev", HAS_ARG, QEMU_OPTION_chardev, "-chardev vc,id=id[[,width=width][,height=height]][[,cols=cols][,rows=rows]]\n" " [,mux=on|off][,logfile=PATH][,logappend=on|off]\n" "-chardev ringbuf,id=id[,size=size][,logfile=PATH][,logappend=on|off]\n" - "-chardev file,id=id,path=path[,mux=on|off][,logfile=PATH][,logappend=on|off]\n" + "-chardev file,id=id,path=path[,input-path=input-file][,mux=on|off][,logfile=PATH][,logappend=on|off]\n" "-chardev pipe,id=id,path=path[,mux=on|off][,logfile=PATH][,logappend=on|off]\n" #ifdef _WIN32 "-chardev console,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n" @@ -3563,13 +3618,19 @@ The available backends are: Create a ring buffer with fixed size ``size``. size must be a power of two and defaults to ``64K``. -``-chardev file,id=id,path=path`` +``-chardev file,id=id,path=path[,input-path=input-path]`` Log all traffic received from the guest to a file. ``path`` specifies the path of the file to be opened. This file will be created if it does not already exist, and overwritten if it does. ``path`` is required. + If ``input-path`` is specified, this is the path of a second file + which will be used for input. If ``input-path`` is not specified, + no input will be available from the chardev. + + Note that ``input-path`` is not supported on Windows hosts. + ``-chardev pipe,id=id,path=path`` Create a two-way connection to the guest. The behaviour differs slightly between Windows hosts and other hosts: @@ -4109,26 +4170,42 @@ DEF("qmp", HAS_ARG, QEMU_OPTION_qmp, \ QEMU_ARCH_ALL) SRST ``-qmp dev`` - Like -monitor but opens in 'control' mode. + Like ``-monitor`` but opens in 'control' mode. For example, to make + QMP available on localhost port 4444:: + + -qmp tcp:localhost:4444,server=on,wait=off + + Not all options are configurable via this syntax; for maximum + flexibility use the ``-mon`` option and an accompanying ``-chardev``. + ERST DEF("qmp-pretty", HAS_ARG, QEMU_OPTION_qmp_pretty, \ "-qmp-pretty dev like -qmp but uses pretty JSON formatting\n", QEMU_ARCH_ALL) SRST ``-qmp-pretty dev`` - Like -qmp but uses pretty JSON formatting. + Like ``-qmp`` but uses pretty JSON formatting. ERST DEF("mon", HAS_ARG, QEMU_OPTION_mon, \ "-mon [chardev=]name[,mode=readline|control][,pretty[=on|off]]\n", QEMU_ARCH_ALL) SRST ``-mon [chardev=]name[,mode=readline|control][,pretty[=on|off]]`` - Setup monitor on chardev name. ``mode=control`` configures - a QMP monitor (a JSON RPC-style protocol) and it is not the - same as HMP, the human monitor that has a "(qemu)" prompt. - ``pretty`` is only valid when ``mode=control``, + Set up a monitor connected to the chardev ``name``. + QEMU supports two monitors: the Human Monitor Protocol + (HMP; for human interaction), and the QEMU Monitor Protocol + (QMP; a JSON RPC-style protocol). + The default is HMP; ``mode=control`` selects QMP instead. + ``pretty`` is only valid when ``mode=control``, turning on JSON pretty printing to ease human reading and debugging. + + For example:: + + -chardev socket,id=mon1,host=localhost,port=4444,server=on,wait=off \ + -mon chardev=mon1,mode=control,pretty=on + + enables the QMP monitor on localhost port 4444 with pretty-printing. ERST DEF("debugcon", HAS_ARG, QEMU_OPTION_debugcon, \ @@ -4152,10 +4229,11 @@ SRST ERST DEF("singlestep", 0, QEMU_OPTION_singlestep, \ - "-singlestep always run in singlestep mode\n", QEMU_ARCH_ALL) + "-singlestep deprecated synonym for -accel tcg,one-insn-per-tb=on\n", QEMU_ARCH_ALL) SRST ``-singlestep`` - Run the emulation in single step mode. + This is a deprecated synonym for the TCG accelerator property + ``one-insn-per-tb``. ERST DEF("preconfig", 0, QEMU_OPTION_preconfig, \ @@ -4766,20 +4844,32 @@ DEF("qtest-log", HAS_ARG, QEMU_OPTION_qtest_log, "", QEMU_ARCH_ALL) DEF("async-teardown", 0, QEMU_OPTION_asyncteardown, "-async-teardown enable asynchronous teardown\n", QEMU_ARCH_ALL) -#endif SRST ``-async-teardown`` - Enable asynchronous teardown. A new process called "cleanup/" - will be created at startup sharing the address space with the main qemu - process, using clone. It will wait for the main qemu process to - terminate completely, and then exit. - This allows qemu to terminate very quickly even if the guest was - huge, leaving the teardown of the address space to the cleanup - process. Since the cleanup process shares the same cgroups as the - main qemu process, accounting is performed correctly. This only - works if the cleanup process is not forcefully killed with SIGKILL - before the main qemu process has terminated completely. + This option is deprecated and should no longer be used. The new option + ``-run-with async-teardown=on`` is a replacement. ERST +DEF("run-with", HAS_ARG, QEMU_OPTION_run_with, + "-run-with async-teardown[=on|off]\n" + " misc QEMU process lifecycle options\n" + " async-teardown=on enables asynchronous teardown\n", + QEMU_ARCH_ALL) +SRST +``-run-with`` + Set QEMU process lifecycle options. + + ``async-teardown=on`` enables asynchronous teardown. A new process called + "cleanup/" will be created at startup sharing the address + space with the main QEMU process, using clone. It will wait for the + main QEMU process to terminate completely, and then exit. This allows + QEMU to terminate very quickly even if the guest was huge, leaving the + teardown of the address space to the cleanup process. Since the cleanup + process shares the same cgroups as the main QEMU process, accounting is + performed correctly. This only works if the cleanup process is not + forcefully killed with SIGKILL before the main QEMU process has + terminated completely. +ERST +#endif DEF("msg", HAS_ARG, QEMU_OPTION_msg, "-msg [timestamp[=on|off]][,guest-name=[on|off]]\n" @@ -4859,7 +4949,7 @@ SRST they are specified. Note that the 'id' property must be set. These objects are placed in the '/objects' path. - ``-object memory-backend-file,id=id,size=size,mem-path=dir,share=on|off,discard-data=on|off,merge=on|off,dump=on|off,prealloc=on|off,host-nodes=host-nodes,policy=default|preferred|bind|interleave,align=align,readonly=on|off`` + ``-object memory-backend-file,id=id,size=size,mem-path=dir,share=on|off,discard-data=on|off,merge=on|off,dump=on|off,prealloc=on|off,host-nodes=host-nodes,policy=default|preferred|bind|interleave,align=align,offset=offset,readonly=on|off`` Creates a memory file backend object, which can be used to back the guest RAM with huge pages. @@ -4929,6 +5019,10 @@ SRST such cases, users can specify the required alignment via this option. + The ``offset`` option specifies the offset into the target file + that the region starts at. You can use this parameter to back + multiple regions with a single file. + The ``pmem`` option specifies whether the backing file specified by ``mem-path`` is in host persistent memory that can be accessed using the SNIA NVM programming model (e.g. Intel @@ -5405,7 +5499,7 @@ SRST physical address space. The ``reduced-phys-bits`` is used to provide the number of bits we loose in physical address space. Similar to C-bit, the value is Host family dependent. On EPYC, - the value should be 5. + a guest will lose a maximum of 1 bit, so the value should be 1. The ``sev-device`` provides the device file to use for communicating with the SEV firmware running inside AMD Secure @@ -5440,7 +5534,7 @@ SRST # |qemu_system_x86| \\ ...... \\ - -object sev-guest,id=sev0,cbitpos=47,reduced-phys-bits=5 \\ + -object sev-guest,id=sev0,cbitpos=47,reduced-phys-bits=1 \\ -machine ...,memory-encryption=sev0 \\ ..... diff --git a/qga/commands-posix.c b/qga/commands-posix.c index 079689d79a..def857d773 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -879,7 +879,9 @@ static bool build_guest_fsinfo_for_pci_dev(char const *syspath, g_str_equal(driver, "sym53c8xx") || g_str_equal(driver, "virtio-pci") || g_str_equal(driver, "ahci") || - g_str_equal(driver, "nvme"))) { + g_str_equal(driver, "nvme") || + g_str_equal(driver, "xhci_hcd") || + g_str_equal(driver, "ehci-pci"))) { break; } @@ -976,6 +978,8 @@ static bool build_guest_fsinfo_for_pci_dev(char const *syspath, } } else if (strcmp(driver, "nvme") == 0) { disk->bus_type = GUEST_DISK_BUS_TYPE_NVME; + } else if (strcmp(driver, "ehci-pci") == 0 || strcmp(driver, "xhci_hcd") == 0) { + disk->bus_type = GUEST_DISK_BUS_TYPE_USB; } else { g_debug("unknown driver '%s' (sysfs path '%s')", driver, syspath); goto cleanup; @@ -1918,10 +1922,10 @@ static void guest_suspend(SuspendMode mode, Error **errp) if (systemd_supports_mode(mode, &local_err)) { mode_supported = true; systemd_suspend(mode, &local_err); - } - if (!local_err) { - return; + if (!local_err) { + return; + } } error_free(local_err); @@ -1930,10 +1934,10 @@ static void guest_suspend(SuspendMode mode, Error **errp) if (pmutils_supports_mode(mode, &local_err)) { mode_supported = true; pmutils_suspend(mode, &local_err); - } - if (!local_err) { - return; + if (!local_err) { + return; + } } error_free(local_err); diff --git a/qga/commands-win32.c b/qga/commands-win32.c index b5fee6a2cd..d23875264f 100644 --- a/qga/commands-win32.c +++ b/qga/commands-win32.c @@ -484,7 +484,6 @@ static GuestDiskBusType win2qemu[] = { [BusTypeSata] = GUEST_DISK_BUS_TYPE_SATA, [BusTypeSd] = GUEST_DISK_BUS_TYPE_SD, [BusTypeMmc] = GUEST_DISK_BUS_TYPE_MMC, -#if (_WIN32_WINNT >= 0x0601) [BusTypeVirtual] = GUEST_DISK_BUS_TYPE_VIRTUAL, [BusTypeFileBackedVirtual] = GUEST_DISK_BUS_TYPE_FILE_BACKED_VIRTUAL, /* @@ -492,7 +491,6 @@ static GuestDiskBusType win2qemu[] = { */ [BusTypeSpaces] = GUEST_DISK_BUS_TYPE_UNKNOWN, [BusTypeNvme] = GUEST_DISK_BUS_TYPE_NVME, -#endif }; static GuestDiskBusType find_bus_type(STORAGE_BUS_TYPE bus) diff --git a/qga/commands.c b/qga/commands.c index 172826f8f8..09c683e263 100644 --- a/qga/commands.c +++ b/qga/commands.c @@ -270,12 +270,26 @@ static void guest_exec_child_watch(GPid pid, gint status, gpointer data) g_spawn_close_pid(pid); } -/** Reset ignored signals back to default. */ static void guest_exec_task_setup(gpointer data) { #if !defined(G_OS_WIN32) + bool has_merge = *(bool *)data; struct sigaction sigact; + if (has_merge) { + /* + * FIXME: When `GLIB_VERSION_MIN_REQUIRED` is bumped to 2.58+, use + * g_spawn_async_with_fds() to be portable on windows. The current + * logic does not work on windows b/c `GSpawnChildSetupFunc` is run + * inside the parent, not the child. + */ + if (dup2(STDOUT_FILENO, STDERR_FILENO) != 0) { + slog("dup2() failed to merge stderr into stdout: %s", + strerror(errno)); + } + } + + /* Reset ignored signals back to default. */ memset(&sigact, 0, sizeof(struct sigaction)); sigact.sa_handler = SIG_DFL; @@ -379,11 +393,23 @@ close: return false; } +static GuestExecCaptureOutputMode ga_parse_capture_output( + GuestExecCaptureOutput *capture_output) +{ + if (!capture_output) + return GUEST_EXEC_CAPTURE_OUTPUT_MODE_NONE; + else if (capture_output->type == QTYPE_QBOOL) + return capture_output->u.flag ? GUEST_EXEC_CAPTURE_OUTPUT_MODE_SEPARATED + : GUEST_EXEC_CAPTURE_OUTPUT_MODE_NONE; + else + return capture_output->u.mode; +} + GuestExec *qmp_guest_exec(const char *path, bool has_arg, strList *arg, bool has_env, strList *env, const char *input_data, - bool has_capture_output, bool capture_output, + GuestExecCaptureOutput *capture_output, Error **errp) { GPid pid; @@ -396,7 +422,9 @@ GuestExec *qmp_guest_exec(const char *path, gint in_fd, out_fd, err_fd; GIOChannel *in_ch, *out_ch, *err_ch; GSpawnFlags flags; - bool has_output = (has_capture_output && capture_output); + bool has_output = false; + bool has_merge = false; + GuestExecCaptureOutputMode output_mode; g_autofree uint8_t *input = NULL; size_t ninput = 0; @@ -415,12 +443,36 @@ GuestExec *qmp_guest_exec(const char *path, flags = G_SPAWN_SEARCH_PATH | G_SPAWN_DO_NOT_REAP_CHILD | G_SPAWN_SEARCH_PATH_FROM_ENVP; - if (!has_output) { + + output_mode = ga_parse_capture_output(capture_output); + switch (output_mode) { + case GUEST_EXEC_CAPTURE_OUTPUT_MODE_NONE: flags |= G_SPAWN_STDOUT_TO_DEV_NULL | G_SPAWN_STDERR_TO_DEV_NULL; + break; + case GUEST_EXEC_CAPTURE_OUTPUT_MODE_STDOUT: + has_output = true; + flags |= G_SPAWN_STDERR_TO_DEV_NULL; + break; + case GUEST_EXEC_CAPTURE_OUTPUT_MODE_STDERR: + has_output = true; + flags |= G_SPAWN_STDOUT_TO_DEV_NULL; + break; + case GUEST_EXEC_CAPTURE_OUTPUT_MODE_SEPARATED: + has_output = true; + break; +#if !defined(G_OS_WIN32) + case GUEST_EXEC_CAPTURE_OUTPUT_MODE_MERGED: + has_output = true; + has_merge = true; + break; +#endif + case GUEST_EXEC_CAPTURE_OUTPUT_MODE__MAX: + /* Silence warning; impossible branch */ + break; } ret = g_spawn_async_with_pipes(NULL, argv, envp, flags, - guest_exec_task_setup, NULL, &pid, input_data ? &in_fd : NULL, + guest_exec_task_setup, &has_merge, &pid, input_data ? &in_fd : NULL, has_output ? &out_fd : NULL, has_output ? &err_fd : NULL, &gerr); if (!ret) { error_setg(errp, QERR_QGA_COMMAND_FAILED, gerr->message); diff --git a/qga/installer/qemu-ga.wxs b/qga/installer/qemu-ga.wxs index 51340f7ecc..df572adb4a 100644 --- a/qga/installer/qemu-ga.wxs +++ b/qga/installer/qemu-ga.wxs @@ -31,6 +31,7 @@ /> + @@ -121,27 +122,31 @@ + + + - + diff --git a/qga/meson.build b/qga/meson.build index ad17dc7dca..d3291b4376 100644 --- a/qga/meson.build +++ b/qga/meson.build @@ -22,7 +22,7 @@ have_qga_vss = get_option('qga_vss') \ Then run configure with: --extra-cxxflags="-isystem /path/to/vss/inc/win2003"''') \ .require(midl.found() or widl.found(), error_message: 'VSS support requires midl or widl') \ - .require(not enable_static, + .require(not get_option('prefer_static'), error_message: 'VSS support requires dynamic linking with GLib') \ .allowed() @@ -154,7 +154,7 @@ if targetos == 'windows' qemu_ga_msi_arch[cpu], qemu_ga_msi_vss, '-D', 'BUILD_DIR=' + meson.project_build_root(), - '-D', 'BIN_DIR=' + glib.get_variable('bindir'), + '-D', 'BIN_DIR=' + glib_pc.get_variable('bindir'), '-D', 'QEMU_GA_VERSION=' + config_host['QEMU_GA_VERSION'], '-D', 'QEMU_GA_MANUFACTURER=' + config_host['QEMU_GA_MANUFACTURER'], '-D', 'QEMU_GA_DISTRO=' + config_host['QEMU_GA_DISTRO'], diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json index 796434ed34..42fb046eb7 100644 --- a/qga/qapi-schema.json +++ b/qga/qapi-schema.json @@ -4,10 +4,10 @@ ## # = General note concerning the use of guest agent interfaces # -# "unsupported" is a higher-level error than the errors that individual -# commands might document. The caller should always be prepared to receive -# QERR_UNSUPPORTED, even if the given command doesn't specify it, or doesn't -# document any failure mode at all. +# "unsupported" is a higher-level error than the errors that +# individual commands might document. The caller should always be +# prepared to receive QERR_UNSUPPORTED, even if the given command +# doesn't specify it, or doesn't document any failure mode at all. ## ## @@ -38,28 +38,27 @@ ## # @guest-sync-delimited: # -# Echo back a unique integer value, and prepend to response a -# leading sentinel byte (0xFF) the client can check scan for. +# Echo back a unique integer value, and prepend to response a leading +# sentinel byte (0xFF) the client can check scan for. # -# This is used by clients talking to the guest agent over the -# wire to ensure the stream is in sync and doesn't contain stale -# data from previous client. It must be issued upon initial -# connection, and after any client-side timeouts (including -# timeouts on receiving a response to this command). +# This is used by clients talking to the guest agent over the wire to +# ensure the stream is in sync and doesn't contain stale data from +# previous client. It must be issued upon initial connection, and +# after any client-side timeouts (including timeouts on receiving a +# response to this command). # # After issuing this request, all guest agent responses should be -# ignored until the response containing the unique integer value -# the client passed in is returned. Receival of the 0xFF sentinel -# byte must be handled as an indication that the client's -# lexer/tokenizer/parser state should be flushed/reset in -# preparation for reliably receiving the subsequent response. As -# an optimization, clients may opt to ignore all data until a -# sentinel value is receiving to avoid unnecessary processing of -# stale data. +# ignored until the response containing the unique integer value the +# client passed in is returned. Receival of the 0xFF sentinel byte +# must be handled as an indication that the client's +# lexer/tokenizer/parser state should be flushed/reset in preparation +# for reliably receiving the subsequent response. As an optimization, +# clients may opt to ignore all data until a sentinel value is +# receiving to avoid unnecessary processing of stale data. # -# Similarly, clients should also precede this *request* -# with a 0xFF byte to make sure the guest agent flushes any -# partially read JSON data from a previous client connection. +# Similarly, clients should also precede this *request* with a 0xFF +# byte to make sure the guest agent flushes any partially read JSON +# data from a previous client connection. # # @id: randomly generated 64-bit integer # @@ -76,28 +75,27 @@ # # Echo back a unique integer value # -# This is used by clients talking to the guest agent over the -# wire to ensure the stream is in sync and doesn't contain stale -# data from previous client. All guest agent responses should be -# ignored until the provided unique integer value is returned, -# and it is up to the client to handle stale whole or -# partially-delivered JSON text in such a way that this response -# can be obtained. +# This is used by clients talking to the guest agent over the wire to +# ensure the stream is in sync and doesn't contain stale data from +# previous client. All guest agent responses should be ignored until +# the provided unique integer value is returned, and it is up to the +# client to handle stale whole or partially-delivered JSON text in +# such a way that this response can be obtained. # -# In cases where a partial stale response was previously -# received by the client, this cannot always be done reliably. -# One particular scenario being if qemu-ga responses are fed -# character-by-character into a JSON parser. In these situations, -# using guest-sync-delimited may be optimal. +# In cases where a partial stale response was previously received by +# the client, this cannot always be done reliably. One particular +# scenario being if qemu-ga responses are fed character-by-character +# into a JSON parser. In these situations, using guest-sync-delimited +# may be optimal. # -# For clients that fetch responses line by line and convert them -# to JSON objects, guest-sync should be sufficient, but note that -# in cases where the channel is dirty some attempts at parsing the +# For clients that fetch responses line by line and convert them to +# JSON objects, guest-sync should be sufficient, but note that in +# cases where the channel is dirty some attempts at parsing the # response may result in a parser error. # -# Such clients should also precede this command -# with a 0xFF byte to make sure the guest agent flushes any -# partially read JSON data from a previous session. +# Such clients should also precede this command with a 0xFF byte to +# make sure the guest agent flushes any partially read JSON data from +# a previous session. # # @id: randomly generated 64-bit integer # @@ -121,8 +119,8 @@ ## # @guest-get-time: # -# Get the information about guest's System Time relative to -# the Epoch of 1970-01-01 in UTC. +# Get the information about guest's System Time relative to the Epoch +# of 1970-01-01 in UTC. # # Returns: Time in nanoseconds. # @@ -136,23 +134,21 @@ # # Set guest time. # -# When a guest is paused or migrated to a file then loaded -# from that file, the guest OS has no idea that there -# was a big gap in the time. Depending on how long the -# gap was, NTP might not be able to resynchronize the -# guest. +# When a guest is paused or migrated to a file then loaded from that +# file, the guest OS has no idea that there was a big gap in the time. +# Depending on how long the gap was, NTP might not be able to +# resynchronize the guest. # -# This command tries to set guest's System Time to the -# given value, then sets the Hardware Clock (RTC) to the -# current System Time. This will make it easier for a guest -# to resynchronize without waiting for NTP. If no @time is -# specified, then the time to set is read from RTC. However, -# this may not be supported on all platforms (i.e. Windows). -# If that's the case users are advised to always pass a +# This command tries to set guest's System Time to the given value, +# then sets the Hardware Clock (RTC) to the current System Time. This +# will make it easier for a guest to resynchronize without waiting for +# NTP. If no @time is specified, then the time to set is read from +# RTC. However, this may not be supported on all platforms (i.e. +# Windows). If that's the case users are advised to always pass a # value. # -# @time: time of nanoseconds, relative to the Epoch -# of 1970-01-01 in UTC. +# @time: time of nanoseconds, relative to the Epoch of 1970-01-01 in +# UTC. # # Returns: Nothing on success. # @@ -171,7 +167,7 @@ # @enabled: whether command is currently enabled by guest admin # # @success-response: whether command returns a response on success -# (since 1.7) +# (since 1.7) # # Since: 1.1.0 ## @@ -207,15 +203,15 @@ ## # @guest-shutdown: # -# Initiate guest-activated shutdown. Note: this is an asynchronous +# Initiate guest-activated shutdown. Note: this is an asynchronous # shutdown request, with no guarantee of successful shutdown. # # @mode: "halt", "powerdown" (default), or "reboot" # -# This command does NOT return a response on success. Success condition -# is indicated by the VM exiting with a zero exit status or, when -# running with --no-shutdown, by issuing the query-status QMP command -# to confirm the VM status is "shutdown". +# This command does NOT return a response on success. Success +# condition is indicated by the VM exiting with a zero exit status or, +# when running with --no-shutdown, by issuing the query-status QMP +# command to confirm the VM status is "shutdown". # # Since: 0.15.0 ## @@ -259,7 +255,7 @@ # Result of guest agent file-read operation # # @count: number of bytes read (note: count is *before* -# base64-encoding is applied) +# base64-encoding is applied) # # @buf-b64: base64-encoded bytes read # @@ -273,13 +269,14 @@ ## # @guest-file-read: # -# Read from an open file in the guest. Data will be base64-encoded. +# Read from an open file in the guest. Data will be base64-encoded. # As this command is just for limited, ad-hoc debugging, such as log # file access, the number of bytes to read is limited to 48 MB. # # @handle: filehandle returned by guest-file-open # -# @count: maximum number of bytes to read (default is 4KB, maximum is 48MB) +# @count: maximum number of bytes to read (default is 4KB, maximum is +# 48MB) # # Returns: @GuestFileRead on success. # @@ -295,7 +292,7 @@ # Result of guest agent file-write operation # # @count: number of bytes written (note: count is actual bytes -# written, after base64-decoding of provided buffer) +# written, after base64-decoding of provided buffer) # # @eof: whether EOF was encountered during write operation. # @@ -313,8 +310,8 @@ # # @buf-b64: base64-encoded string representing data to be written # -# @count: bytes to write (actual bytes, after base64-decode), -# default is all content in buf-b64 buffer after base64 decoding +# @count: bytes to write (actual bytes, after base64-decode), default +# is all content in buf-b64 buffer after base64 decoding # # Returns: @GuestFileWrite on success. # @@ -345,7 +342,9 @@ # Symbolic names for use in @guest-file-seek # # @set: Set to the specified offset (same effect as 'whence':0) +# # @cur: Add offset to the current location (same effect as 'whence':1) +# # @end: Add offset to the end of the file (same effect as 'whence':2) # # Since: 2.6 @@ -358,8 +357,9 @@ # Controls the meaning of offset to @guest-file-seek. # # @value: Integral value (0 for set, 1 for cur, 2 for end), available -# for historical reasons, and might differ from the host's or -# guest's SEEK_* values (since: 0.15) +# for historical reasons, and might differ from the host's or +# guest's SEEK_* values (since: 0.15) +# # @name: Symbolic name, and preferred interface # # Since: 2.6 @@ -371,7 +371,7 @@ # @guest-file-seek: # # Seek to a position in the file, as with fseek(), and return the -# current file position afterward. Also encapsulates ftell()'s +# current file position afterward. Also encapsulates ftell()'s # functionality, with offset=0 and whence=1. # # @handle: filehandle returned by guest-file-open @@ -420,12 +420,13 @@ ## # @guest-fsfreeze-status: # -# Get guest fsfreeze state. error state indicates +# Get guest fsfreeze state. # -# Returns: GuestFsfreezeStatus ("thawed", "frozen", etc., as defined below) +# Returns: GuestFsfreezeStatus ("thawed", "frozen", etc., as defined +# below) # -# Note: This may fail to properly report the current state as a result of -# some other guest processes having issued an fs freeze/thaw. +# Note: This may fail to properly report the current state as a result +# of some other guest processes having issued an fs freeze/thaw. # # Since: 0.15.0 ## @@ -435,18 +436,18 @@ ## # @guest-fsfreeze-freeze: # -# Sync and freeze all freezable, local guest filesystems. If this +# Sync and freeze all freezable, local guest filesystems. If this # command succeeded, you may call @guest-fsfreeze-thaw later to # unfreeze. # # Note: On Windows, the command is implemented with the help of a -# Volume Shadow-copy Service DLL helper. The frozen state is limited -# for up to 10 seconds by VSS. +# Volume Shadow-copy Service DLL helper. The frozen state is +# limited for up to 10 seconds by VSS. # -# Returns: Number of file systems currently frozen. On error, all filesystems -# will be thawed. If no filesystems are frozen as a result of this call, -# then @guest-fsfreeze-status will remain "thawed" and calling -# @guest-fsfreeze-thaw is not necessary. +# Returns: Number of file systems currently frozen. On error, all +# filesystems will be thawed. If no filesystems are frozen as a +# result of this call, then @guest-fsfreeze-status will remain +# "thawed" and calling @guest-fsfreeze-thaw is not necessary. # # Since: 0.15.0 ## @@ -456,15 +457,15 @@ ## # @guest-fsfreeze-freeze-list: # -# Sync and freeze specified guest filesystems. -# See also @guest-fsfreeze-freeze. +# Sync and freeze specified guest filesystems. See also +# @guest-fsfreeze-freeze. # # @mountpoints: an array of mountpoints of filesystems to be frozen. -# If omitted, every mounted filesystem is frozen. -# Invalid mount points are ignored. +# If omitted, every mounted filesystem is frozen. Invalid mount +# points are ignored. # -# Returns: Number of file systems currently frozen. On error, all filesystems -# will be thawed. +# Returns: Number of file systems currently frozen. On error, all +# filesystems will be thawed. # # Since: 2.2 ## @@ -480,10 +481,9 @@ # Returns: Number of file systems thawed by this call # # Note: if return value does not match the previous call to -# guest-fsfreeze-freeze, this likely means some freezable -# filesystems were unfrozen before this call, and that the -# filesystem state may have changed before issuing this -# command. +# guest-fsfreeze-freeze, this likely means some freezable +# filesystems were unfrozen before this call, and that the +# filesystem state may have changed before issuing this command. # # Since: 0.15.0 ## @@ -494,8 +494,11 @@ # @GuestFilesystemTrimResult: # # @path: path that was trimmed +# # @error: an error message when trim failed +# # @trimmed: bytes trimmed for this path +# # @minimum: reported effective minimum for this path # # Since: 2.4 @@ -519,15 +522,16 @@ # # Discard (or "trim") blocks which are not in use by the filesystem. # -# @minimum: Minimum contiguous free range to discard, in bytes. Free ranges -# smaller than this may be ignored (this is a hint and the guest -# may not respect it). By increasing this value, the fstrim -# operation will complete more quickly for filesystems with badly -# fragmented free space, although not all blocks will be discarded. -# The default value is zero, meaning "discard every free block". +# @minimum: Minimum contiguous free range to discard, in bytes. Free +# ranges smaller than this may be ignored (this is a hint and the +# guest may not respect it). By increasing this value, the fstrim +# operation will complete more quickly for filesystems with badly +# fragmented free space, although not all blocks will be +# discarded. The default value is zero, meaning "discard every +# free block". # -# Returns: A @GuestFilesystemTrimResponse which contains the -# status of all trimmed paths. (since 2.4) +# Returns: A @GuestFilesystemTrimResponse which contains the status of +# all trimmed paths. (since 2.4) # # Since: 1.2 ## @@ -540,25 +544,26 @@ # # Suspend guest to disk. # -# This command attempts to suspend the guest using three strategies, in this -# order: +# This command attempts to suspend the guest using three strategies, +# in this order: # # - systemd hibernate # - pm-utils (via pm-hibernate) # - manual write into sysfs # -# This command does NOT return a response on success. There is a high chance -# the command succeeded if the VM exits with a zero exit status or, when -# running with --no-shutdown, by issuing the query-status QMP command to -# to confirm the VM status is "shutdown". However, the VM could also exit -# (or set its status to "shutdown") due to other reasons. +# This command does NOT return a response on success. There is a high +# chance the command succeeded if the VM exits with a zero exit status +# or, when running with --no-shutdown, by issuing the query-status QMP +# command to to confirm the VM status is "shutdown". However, the VM +# could also exit (or set its status to "shutdown") due to other +# reasons. # # The following errors may be returned: # # - If suspend to disk is not supported, Unsupported # -# Notes: It's strongly recommended to issue the guest-sync command before -# sending commands when the guest resumes +# Notes: It's strongly recommended to issue the guest-sync command +# before sending commands when the guest resumes # # Since: 1.1 ## @@ -569,21 +574,22 @@ # # Suspend guest to ram. # -# This command attempts to suspend the guest using three strategies, in this -# order: +# This command attempts to suspend the guest using three strategies, +# in this order: # -# - systemd suspend -# - pm-utils (via pm-suspend) +# - systemd hibernate +# - pm-utils (via pm-hibernate) # - manual write into sysfs # # IMPORTANT: guest-suspend-ram requires working wakeup support in # QEMU. You should check QMP command query-current-machine returns -# wakeup-suspend-support: true before issuing this command. Failure in -# doing so can result in a suspended guest that QEMU will not be able to -# awaken, forcing the user to power cycle the guest to bring it back. +# wakeup-suspend-support: true before issuing this command. Failure +# in doing so can result in a suspended guest that QEMU will not be +# able to awaken, forcing the user to power cycle the guest to bring +# it back. # -# This command does NOT return a response on success. There are two options -# to check for success: +# This command does NOT return a response on success. There are two +# options to check for success: # # 1. Wait for the SUSPEND QMP event from QEMU # 2. Issue the query-status QMP command to confirm the VM status is @@ -593,8 +599,8 @@ # # - If suspend to ram is not supported, Unsupported # -# Notes: It's strongly recommended to issue the guest-sync command before -# sending commands when the guest resumes +# Notes: It's strongly recommended to issue the guest-sync command +# before sending commands when the guest resumes # # Since: 1.1 ## @@ -605,19 +611,21 @@ # # Save guest state to disk and suspend to ram. # -# This command attempts to suspend the guest by executing, in this order: +# This command attempts to suspend the guest by executing, in this +# order: # # - systemd hybrid-sleep # - pm-utils (via pm-suspend-hybrid) # # IMPORTANT: guest-suspend-hybrid requires working wakeup support in # QEMU. You should check QMP command query-current-machine returns -# wakeup-suspend-support: true before issuing this command. Failure in -# doing so can result in a suspended guest that QEMU will not be able to -# awaken, forcing the user to power cycle the guest to bring it back. +# wakeup-suspend-support: true before issuing this command. Failure +# in doing so can result in a suspended guest that QEMU will not be +# able to awaken, forcing the user to power cycle the guest to bring +# it back. # -# This command does NOT return a response on success. There are two options -# to check for success: +# This command does NOT return a response on success. There are two +# options to check for success: # # 1. Wait for the SUSPEND QMP event from QEMU # 2. Issue the query-status QMP command to confirm the VM status is @@ -627,8 +635,8 @@ # # - If hybrid suspend is not supported, Unsupported # -# Notes: It's strongly recommended to issue the guest-sync command before -# sending commands when the guest resumes +# Notes: It's strongly recommended to issue the guest-sync command +# before sending commands when the guest resumes # # Since: 1.1 ## @@ -705,8 +713,8 @@ # # @ip-addresses: List of addresses assigned to @name # -# @statistics: various statistic counters related to @name -# (since 2.11) +# @statistics: various statistic counters related to @name (since +# 2.11) # # Since: 1.1 ## @@ -719,10 +727,9 @@ ## # @guest-network-get-interfaces: # -# Get list of guest IP addresses, MAC addresses -# and netmasks. +# Get list of guest IP addresses, MAC addresses and netmasks. # -# Returns: List of GuestNetworkInfo on success. +# Returns: List of GuestNetworkInterface on success. # # Since: 1.1 ## @@ -736,10 +743,10 @@ # # @online: Whether the VCPU is enabled. # -# @can-offline: Whether offlining the VCPU is possible. This member -# is always filled in by the guest agent when the structure is -# returned, and always ignored on input (hence it can be omitted -# then). +# @can-offline: Whether offlining the VCPU is possible. This member +# is always filled in by the guest agent when the structure is +# returned, and always ignored on input (hence it can be omitted +# then). # # Since: 1.5 ## @@ -755,8 +762,8 @@ # # This is a read-only operation. # -# Returns: The list of all VCPUs the guest knows about. Each VCPU is put on the -# list exactly once, but their order is unspecified. +# Returns: The list of all VCPUs the guest knows about. Each VCPU is +# put on the list exactly once, but their order is unspecified. # # Since: 1.5 ## @@ -766,36 +773,36 @@ ## # @guest-set-vcpus: # -# Attempt to reconfigure (currently: enable/disable) logical processors inside -# the guest. +# Attempt to reconfigure (currently: enable/disable) logical +# processors inside the guest. # -# The input list is processed node by node in order. In each node @logical-id -# is used to look up the guest VCPU, for which @online specifies the requested -# state. The set of distinct @logical-id's is only required to be a subset of -# the guest-supported identifiers. There's no restriction on list length or on -# repeating the same @logical-id (with possibly different @online field). -# Preferably the input list should describe a modified subset of -# @guest-get-vcpus' return value. +# The input list is processed node by node in order. In each node +# @logical-id is used to look up the guest VCPU, for which @online +# specifies the requested state. The set of distinct @logical-id's is +# only required to be a subset of the guest-supported identifiers. +# There's no restriction on list length or on repeating the same +# @logical-id (with possibly different @online field). Preferably the +# input list should describe a modified subset of @guest-get-vcpus' +# return value. # -# Returns: The length of the initial sublist that has been successfully -# processed. The guest agent maximizes this value. Possible cases: +# Returns: The length of the initial sublist that has been +# successfully processed. The guest agent maximizes this value. +# Possible cases: # -# - 0: -# if the @vcpus list was empty on input. Guest state -# has not been changed. Otherwise, -# - Error: -# processing the first node of @vcpus failed for the -# reason returned. Guest state has not been changed. -# Otherwise, -# - < length(@vcpus): -# more than zero initial nodes have been processed, -# but not the entire @vcpus list. Guest state has -# changed accordingly. To retrieve the error -# (assuming it persists), repeat the call with the -# successfully processed initial sublist removed. -# Otherwise, -# - length(@vcpus): -# call successful. +# - 0: +# if the @vcpus list was empty on input. Guest state has not +# been changed. Otherwise, +# - Error: +# processing the first node of @vcpus failed for the reason +# returned. Guest state has not been changed. Otherwise, +# - < length(@vcpus): +# more than zero initial nodes have been processed, but not the +# entire @vcpus list. Guest state has changed accordingly. To +# retrieve the error (assuming it persists), repeat the call +# with the successfully processed initial sublist removed. +# Otherwise, +# - length(@vcpus): +# call successful. # # Since: 1.5 ## @@ -809,24 +816,43 @@ # An enumeration of bus type of disks # # @ide: IDE disks +# # @fdc: floppy disks +# # @scsi: SCSI disks +# # @virtio: virtio disks +# # @xen: Xen disks +# # @usb: USB disks +# # @uml: UML disks +# # @sata: SATA disks +# # @sd: SD cards +# # @unknown: Unknown bus type +# # @ieee1394: Win IEEE 1394 bus type +# # @ssa: Win SSA bus type +# # @fibre: Win fiber channel bus type +# # @raid: Win RAID bus type +# # @iscsi: Win iScsi bus type +# # @sas: Win serial-attaches SCSI bus type +# # @mmc: Win multimedia card (MMC) bus type +# # @virtual: Win virtual bus type +# # @file-backed-virtual: Win file-backed bus type +# # @nvme: NVMe disks (since 7.1) # # Since: 2.2; 'Unknown' and all entries below since 2.4 @@ -841,8 +867,11 @@ # @GuestPCIAddress: # # @domain: domain id +# # @bus: bus id +# # @slot: slot id +# # @function: function id # # Since: 2.2 @@ -855,8 +884,11 @@ # @GuestCCWAddress: # # @cssid: channel subsystem image id +# # @ssid: subchannel set id +# # @subchno: subchannel number +# # @devno: device number # # Since: 6.0 @@ -870,13 +902,21 @@ ## # @GuestDiskAddress: # -# @pci-controller: controller's PCI address (fields are set to -1 if invalid) +# @pci-controller: controller's PCI address (fields are set to -1 if +# invalid) +# # @bus-type: bus type +# # @bus: bus id +# # @target: target id +# # @unit: unit id +# # @serial: serial number (since: 3.1) +# # @dev: device node (POSIX) or device UNC (Windows) (since: 3.1) +# # @ccw-address: CCW address on s390x (since: 6.0) # # Since: 2.2 @@ -891,8 +931,8 @@ ## # @GuestNVMeSmart: # -# NVMe smart informations, based on NVMe specification, -# section +# NVMe smart informations, based on NVMe specification, section +# # # Since: 7.1 ## @@ -941,13 +981,18 @@ # @GuestDiskInfo: # # @name: device node (Linux) or device UNC (Windows) +# # @partition: whether this is a partition or disk -# @dependencies: list of device dependencies; e.g. for LVs of the LVM this will -# hold the list of PVs, for LUKS encrypted volume this will -# contain the disk where the volume is placed. (Linux) +# +# @dependencies: list of device dependencies; e.g. for LVs of the LVM +# this will hold the list of PVs, for LUKS encrypted volume this +# will contain the disk where the volume is placed. (Linux) +# # @address: disk address information (only for non-virtual devices) -# @alias: optional alias assigned to the disk, on Linux this is a name assigned -# by device mapper +# +# @alias: optional alias assigned to the disk, on Linux this is a name +# assigned by device mapper +# # @smart: disk smart information (Since 7.1) # # Since: 5.2 @@ -960,10 +1005,10 @@ ## # @guest-get-disks: # -# Returns: The list of disks in the guest. For Windows these are only the -# physical disks. On Linux these are all root block devices of -# non-zero size including e.g. removable devices, loop devices, -# NBD, etc. +# Returns: The list of disks in the guest. For Windows these are only +# the physical disks. On Linux these are all root block devices +# of non-zero size including e.g. removable devices, loop devices, +# NBD, etc. # # Since: 5.2 ## @@ -974,12 +1019,17 @@ # @GuestFilesystemInfo: # # @name: disk name +# # @mountpoint: mount point path +# # @type: file system type string +# # @used-bytes: file system used bytes (since 3.0) +# # @total-bytes: non-root file system total bytes (since 3.0) -# @disk: an array of disk hardware information that the volume lies on, -# which may be empty if the disk type is not supported +# +# @disk: an array of disk hardware information that the volume lies +# on, which may be empty if the disk type is not supported # # Since: 2.2 ## @@ -992,9 +1042,9 @@ # @guest-get-fsinfo: # # Returns: The list of filesystems information mounted in the guest. -# The returned mountpoints may be specified to -# @guest-fsfreeze-freeze-list. -# Network filesystems (such as CIFS and NFS) are not listed. +# The returned mountpoints may be specified to +# @guest-fsfreeze-freeze-list. Network filesystems (such as CIFS +# and NFS) are not listed. # # Since: 2.2 ## @@ -1005,21 +1055,23 @@ # @guest-set-user-password: # # @username: the user account whose password to change +# # @password: the new password entry string, base64 encoded +# # @crypted: true if password is already crypt()d, false if raw # -# If the @crypted flag is true, it is the caller's responsibility -# to ensure the correct crypt() encryption scheme is used. This -# command does not attempt to interpret or report on the encryption -# scheme. Refer to the documentation of the guest operating system -# in question to determine what is supported. +# If the @crypted flag is true, it is the caller's responsibility to +# ensure the correct crypt() encryption scheme is used. This command +# does not attempt to interpret or report on the encryption scheme. +# Refer to the documentation of the guest operating system in question +# to determine what is supported. # -# Not all guest operating systems will support use of the -# @crypted flag, as they may require the clear-text password +# Not all guest operating systems will support use of the @crypted +# flag, as they may require the clear-text password # # The @password parameter must always be base64 encoded before -# transmission, even if already crypt()d, to ensure it is 8-bit -# safe when passed as JSON. +# transmission, even if already crypt()d, to ensure it is 8-bit safe +# when passed as JSON. # # Returns: Nothing on success. # @@ -1031,14 +1083,15 @@ ## # @GuestMemoryBlock: # -# @phys-index: Arbitrary guest-specific unique identifier of the MEMORY BLOCK. +# @phys-index: Arbitrary guest-specific unique identifier of the +# MEMORY BLOCK. # # @online: Whether the MEMORY BLOCK is enabled in guest. # -# @can-offline: Whether offlining the MEMORY BLOCK is possible. -# This member is always filled in by the guest agent when the -# structure is returned, and always ignored on input (hence it -# can be omitted then). +# @can-offline: Whether offlining the MEMORY BLOCK is possible. This +# member is always filled in by the guest agent when the structure +# is returned, and always ignored on input (hence it can be +# omitted then). # # Since: 2.3 ## @@ -1054,9 +1107,9 @@ # # This is a read-only operation. # -# Returns: The list of all memory blocks the guest knows about. -# Each memory block is put on the list exactly once, but their order -# is unspecified. +# Returns: The list of all memory blocks the guest knows about. Each +# memory block is put on the list exactly once, but their order is +# unspecified. # # Since: 2.3 ## @@ -1068,12 +1121,17 @@ # # An enumeration of memory block operation result. # -# @success: the operation of online/offline memory block is successful. -# @not-found: can't find the corresponding memoryXXX directory in sysfs. +# @success: the operation of online/offline memory block is +# successful. +# +# @not-found: can't find the corresponding memoryXXX directory in +# sysfs. +# # @operation-not-supported: for some old kernels, it does not support -# online or offline memory block. -# @operation-failed: the operation of online/offline memory block fails, -# because of some errors happen. +# online or offline memory block. +# +# @operation-failed: the operation of online/offline memory block +# fails, because of some errors happen. # # Since: 2.3 ## @@ -1088,10 +1146,9 @@ # # @response: the result of memory block operation. # -# @error-code: the error number. -# When memory block operation fails, we assign the value of -# 'errno' to this member, it indicates what goes wrong. -# When the operation succeeds, it will be omitted. +# @error-code: the error number. When memory block operation fails, +# we assign the value of 'errno' to this member, it indicates what +# goes wrong. When the operation succeeds, it will be omitted. # # Since: 2.3 ## @@ -1103,24 +1160,25 @@ ## # @guest-set-memory-blocks: # -# Attempt to reconfigure (currently: enable/disable) state of memory blocks -# inside the guest. +# Attempt to reconfigure (currently: enable/disable) state of memory +# blocks inside the guest. # -# The input list is processed node by node in order. In each node @phys-index -# is used to look up the guest MEMORY BLOCK, for which @online specifies the -# requested state. The set of distinct @phys-index's is only required to be a -# subset of the guest-supported identifiers. There's no restriction on list -# length or on repeating the same @phys-index (with possibly different @online -# field). +# The input list is processed node by node in order. In each node +# @phys-index is used to look up the guest MEMORY BLOCK, for which +# @online specifies the requested state. The set of distinct +# @phys-index's is only required to be a subset of the guest-supported +# identifiers. There's no restriction on list length or on repeating +# the same @phys-index (with possibly different @online field). # Preferably the input list should describe a modified subset of # @guest-get-memory-blocks' return value. # -# Returns: The operation results, it is a list of @GuestMemoryBlockResponse, -# which is corresponding to the input list. +# Returns: The operation results, it is a list of +# @GuestMemoryBlockResponse, which is corresponding to the input +# list. # -# Note: it will return NULL if the @mem-blks list was empty on input, -# or there is an error, and in this case, guest state will not be -# changed. +# Note: it will return NULL if the @mem-blks list was empty on +# input, or there is an error, and in this case, guest state will +# not be changed. # # Since: 2.3 ## @@ -1131,9 +1189,9 @@ ## # @GuestMemoryBlockInfo: # -# @size: the size (in bytes) of the guest memory blocks, -# which are the minimal units of memory block online/offline -# operations (also called Logical Memory Hotplug). +# @size: the size (in bytes) of the guest memory blocks, which are the +# minimal units of memory block online/offline operations (also +# called Logical Memory Hotplug). # # Since: 2.3 ## @@ -1156,17 +1214,23 @@ # @GuestExecStatus: # # @exited: true if process has already terminated. +# # @exitcode: process exit code if it was normally terminated. -# @signal: signal number (linux) or unhandled exception code -# (windows) if the process was abnormally terminated. +# +# @signal: signal number (linux) or unhandled exception code (windows) +# if the process was abnormally terminated. +# # @out-data: base64-encoded stdout of the process -# @err-data: base64-encoded stderr of the process -# Note: @out-data and @err-data are present only -# if 'capture-output' was specified for 'guest-exec' -# @out-truncated: true if stdout was not fully captured -# due to size limitation. -# @err-truncated: true if stderr was not fully captured -# due to size limitation. +# +# @err-data: base64-encoded stderr of the process Note: @out-data and +# @err-data are present only if 'capture-output' was specified for +# 'guest-exec' +# +# @out-truncated: true if stdout was not fully captured due to size +# limitation. +# +# @err-truncated: true if stderr was not fully captured due to size +# limitation. # # Since: 2.5 ## @@ -1177,8 +1241,9 @@ ## # @guest-exec-status: # -# Check status of process associated with PID retrieved via guest-exec. -# Reap the process and associated metadata if it has exited. +# Check status of process associated with PID retrieved via +# guest-exec. Reap the process and associated metadata if it has +# exited. # # @pid: pid returned from guest-exec # @@ -1200,17 +1265,55 @@ { 'struct': 'GuestExec', 'data': { 'pid': 'int'} } +## +# @GuestExecCaptureOutputMode: +# +# An enumeration of guest-exec capture modes. +# +# @none: do not capture any output +# @stdout: only capture stdout +# @stderr: only capture stderr +# @separated: capture both stdout and stderr, but separated into +# GuestExecStatus out-data and err-data, respectively +# @merged: capture both stdout and stderr, but merge together +# into out-data. not effective on windows guests. +# +# Since: 8.0 +## + { 'enum': 'GuestExecCaptureOutputMode', + 'data': [ 'none', 'stdout', 'stderr', 'separated', + { 'name': 'merged', 'if': { 'not': 'CONFIG_WIN32' } } ] } + +## +# @GuestExecCaptureOutput: +# +# Controls what guest-exec output gets captures. +# +# @flag: captures both stdout and stderr if true. Equivalent +# to GuestExecCaptureOutputMode::all. (since 2.5) +# @mode: capture mode; preferred interface +# +# Since: 8.0 +## + { 'alternate': 'GuestExecCaptureOutput', + 'data': { 'flag': 'bool', + 'mode': 'GuestExecCaptureOutputMode'} } + ## # @guest-exec: # # Execute a command in the guest # # @path: path or executable name to execute +# # @arg: argument list to pass to executable +# # @env: environment variables to pass to executable +# # @input-data: data to be passed to process stdin (base64 encoded) -# @capture-output: bool flag to enable capture of -# stdout/stderr of running process. defaults to false. +# +# @capture-output: bool flag to enable capture of stdout/stderr of +# running process. defaults to false. # # Returns: PID on success. # @@ -1218,7 +1321,7 @@ ## { 'command': 'guest-exec', 'data': { 'path': 'str', '*arg': ['str'], '*env': ['str'], - '*input-data': 'str', '*capture-output': 'bool' }, + '*input-data': 'str', '*capture-output': 'GuestExecCaptureOutput' }, 'returns': 'GuestExec' } @@ -1237,9 +1340,9 @@ # # Return a name for the machine. # -# The returned name is not necessarily a fully-qualified domain name, or even -# present in DNS or some other name service at all. It need not even be unique -# on your local network or site, but usually it is. +# The returned name is not necessarily a fully-qualified domain name, +# or even present in DNS or some other name service at all. It need +# not even be unique on your local network or site, but usually it is. # # Returns: the host name of the machine on success # @@ -1253,10 +1356,13 @@ # @GuestUser: # # @user: Username +# # @domain: Logon domain (windows only) -# @login-time: Time of login of this user on the computer. If multiple -# instances of the user are logged in, the earliest login time is -# reported. The value is in fractional seconds since epoch time. +# +# @login-time: Time of login of this user on the computer. If +# multiple instances of the user are logged in, the earliest login +# time is reported. The value is in fractional seconds since +# epoch time. # # Since: 2.10 ## @@ -1265,6 +1371,7 @@ ## # @guest-get-users: +# # Retrieves a list of currently active users on the VM. # # Returns: A unique list of users. @@ -1277,10 +1384,11 @@ ## # @GuestTimezone: # -# @zone: Timezone name. These values may differ depending on guest/OS and -# should only be used for informational purposes. -# @offset: Offset to UTC in seconds, negative numbers for time zones west of -# GMT, positive numbers for east +# @zone: Timezone name. These values may differ depending on guest/OS +# and should only be used for informational purposes. +# +# @offset: Offset to UTC in seconds, negative numbers for time zones +# west of GMT, positive numbers for east # # Since: 2.10 ## @@ -1303,45 +1411,56 @@ # @GuestOSInfo: # # @kernel-release: -# * POSIX: release field returned by uname(2) -# * Windows: build number of the OS +# * POSIX: release field returned by uname(2) +# * Windows: build number of the OS +# # @kernel-version: -# * POSIX: version field returned by uname(2) -# * Windows: version number of the OS +# * POSIX: version field returned by uname(2) +# * Windows: version number of the OS +# # @machine: -# * POSIX: machine field returned by uname(2) -# * Windows: one of x86, x86_64, arm, ia64 +# * POSIX: machine field returned by uname(2) +# * Windows: one of x86, x86_64, arm, ia64 +# # @id: -# * POSIX: as defined by os-release(5) -# * Windows: contains string "mswindows" +# * POSIX: as defined by os-release(5) +# * Windows: contains string "mswindows" +# # @name: -# * POSIX: as defined by os-release(5) -# * Windows: contains string "Microsoft Windows" +# * POSIX: as defined by os-release(5) +# * Windows: contains string "Microsoft Windows" +# # @pretty-name: -# * POSIX: as defined by os-release(5) -# * Windows: product name, e.g. "Microsoft Windows 10 Enterprise" +# * POSIX: as defined by os-release(5) +# * Windows: product name, e.g. "Microsoft Windows 10 Enterprise" +# # @version: -# * POSIX: as defined by os-release(5) -# * Windows: long version string, e.g. "Microsoft Windows Server 2008" +# * POSIX: as defined by os-release(5) +# * Windows: long version string, e.g. "Microsoft Windows Server +# 2008" +# # @version-id: -# * POSIX: as defined by os-release(5) -# * Windows: short version identifier, e.g. "7" or "20012r2" +# * POSIX: as defined by os-release(5) +# * Windows: short version identifier, e.g. "7" or "20012r2" +# # @variant: -# * POSIX: as defined by os-release(5) -# * Windows: contains string "server" or "client" +# * POSIX: as defined by os-release(5) +# * Windows: contains string "server" or "client" +# # @variant-id: -# * POSIX: as defined by os-release(5) -# * Windows: contains string "server" or "client" +# * POSIX: as defined by os-release(5) +# * Windows: contains string "server" or "client" # -# Notes: +# Notes: On POSIX systems the fields @id, @name, @pretty-name, +# @version, @version-id, @variant and @variant-id follow the +# definition specified in os-release(5). Refer to the manual page +# for exact description of the fields. Their values are taken +# from the os-release file. If the file is not present in the +# system, or the values are not present in the file, the fields +# are not included. # -# On POSIX systems the fields @id, @name, @pretty-name, @version, @version-id, -# @variant and @variant-id follow the definition specified in os-release(5). -# Refer to the manual page for exact description of the fields. Their values -# are taken from the os-release file. If the file is not present in the system, -# or the values are not present in the file, the fields are not included. -# -# On Windows the values are filled from information gathered from the system. +# On Windows the values are filled from information gathered from +# the system. # # Since: 2.10 ## @@ -1374,6 +1493,7 @@ # @GuestDeviceIdPCI: # # @vendor-id: vendor ID +# # @device-id: device ID # # Since: 5.2 @@ -1384,8 +1504,7 @@ ## # @GuestDeviceId: # -# Id of the device -# - @pci: PCI ID, since: 5.2 +# Id of the device - @pci: PCI ID, since: 5.2 # # Since: 5.2 ## @@ -1398,8 +1517,11 @@ # @GuestDeviceInfo: # # @driver-name: name of the associated driver +# # @driver-date: driver release date, in nanoseconds since the epoch +# # @driver-version: driver version +# # @id: device ID # # Since: 5.2 @@ -1443,8 +1565,8 @@ # # @username: the user account to add the authorized keys # -# Return the public keys from user .ssh/authorized_keys on Unix systems (not -# implemented for other systems). +# Return the public keys from user .ssh/authorized_keys on Unix +# systems (not implemented for other systems). # # Returns: @GuestAuthorizedKeys # @@ -1459,7 +1581,10 @@ # @guest-ssh-add-authorized-keys: # # @username: the user account to add the authorized keys -# @keys: the public keys to add (in OpenSSH/sshd(8) authorized_keys format) +# +# @keys: the public keys to add (in OpenSSH/sshd(8) authorized_keys +# format) +# # @reset: ignore the existing content, set it with the given keys only # # Append public keys to user .ssh/authorized_keys on Unix systems (not @@ -1477,11 +1602,13 @@ # @guest-ssh-remove-authorized-keys: # # @username: the user account to remove the authorized keys -# @keys: the public keys to remove (in OpenSSH/sshd(8) authorized_keys format) # -# Remove public keys from the user .ssh/authorized_keys on Unix systems (not -# implemented for other systems). It's not an error if the key is already -# missing. +# @keys: the public keys to remove (in OpenSSH/sshd(8) authorized_keys +# format) +# +# Remove public keys from the user .ssh/authorized_keys on Unix +# systems (not implemented for other systems). It's not an error if +# the key is already missing. # # Returns: Nothing on success. # @@ -1526,7 +1653,8 @@ # # @total-ticks: time spent doing I/Os (ms) # -# @weight-ticks: weighted time spent doing I/Os since the last update of this field(ms) +# @weight-ticks: weighted time spent doing I/Os since the last update +# of this field(ms) # # Since: 7.1 ## @@ -1553,11 +1681,11 @@ ## # @GuestDiskStatsInfo: # -# @name disk name +# @name: disk name # -# @major major device number of disk +# @major: major device number of disk # -# @minor minor device number of disk +# @minor: minor device number of disk ## { 'struct': 'GuestDiskStatsInfo', 'data': {'name': 'str', @@ -1569,6 +1697,7 @@ # @guest-get-diskstats: # # Retrieve information about disk stats. +# # Returns: List of disk stats of guest. # # Since: 7.1 @@ -1611,8 +1740,8 @@ # # @steal: Stolen time by host (since Linux 2.6.11) # -# @guest: ime spent running a virtual CPU for guest operating systems under -# the control of the Linux kernel (since Linux 2.6.24) +# @guest: ime spent running a virtual CPU for guest operating systems +# under the control of the Linux kernel (since Linux 2.6.24) # # @guestnice: Time spent running a niced guest (since Linux 2.6.33) # @@ -1650,6 +1779,7 @@ # @guest-get-cpustats: # # Retrieve information about CPU stats. +# # Returns: List of CPU stats of guest. # # Since: 7.1 diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp index b57508fbe0..ff93b08a9e 100644 --- a/qga/vss-win32/install.cpp +++ b/qga/vss-win32/install.cpp @@ -357,6 +357,15 @@ out: return hr; } +STDAPI_(void) CALLBACK DLLCOMRegister(HWND, HINSTANCE, LPSTR, int) +{ + COMRegister(); +} + +STDAPI_(void) CALLBACK DLLCOMUnregister(HWND, HINSTANCE, LPSTR, int) +{ + COMUnregister(); +} static BOOL CreateRegistryKey(LPCTSTR key, LPCTSTR value, LPCTSTR data) { @@ -518,7 +527,7 @@ namespace _com_util /* Stop QGA VSS provider service using Winsvc API */ STDAPI StopService(void) { - HRESULT hr; + HRESULT hr = S_OK; SC_HANDLE manager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); SC_HANDLE service = NULL; diff --git a/qga/vss-win32/qga-vss.def b/qga/vss-win32/qga-vss.def index 927782c31b..ee97a81427 100644 --- a/qga/vss-win32/qga-vss.def +++ b/qga/vss-win32/qga-vss.def @@ -1,6 +1,8 @@ LIBRARY "QGA-PROVIDER.DLL" EXPORTS + DLLCOMRegister + DLLCOMUnregister COMRegister PRIVATE COMUnregister PRIVATE DllCanUnloadNow PRIVATE diff --git a/qga/vss-win32/requester.cpp b/qga/vss-win32/requester.cpp index b371affeab..3e998af4a8 100644 --- a/qga/vss-win32/requester.cpp +++ b/qga/vss-win32/requester.cpp @@ -23,6 +23,8 @@ /* Call QueryStatus every 10 ms while waiting for frozen event */ #define VSS_TIMEOUT_EVENT_MSEC 10 +#define DEFAULT_VSS_BACKUP_TYPE VSS_BT_FULL + #define err_set(e, err, fmt, ...) \ ((e)->error_setg_win32_wrapper((e)->errp, __FILE__, __LINE__, __func__, \ err, fmt, ## __VA_ARGS__)) @@ -234,6 +236,42 @@ out: } } +DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName, + DWORD defaultData) +{ + DWORD regGetValueError; + DWORD dwordData; + DWORD dataSize = sizeof(DWORD); + + regGetValueError = RegGetValue(baseKey, subKey, valueName, RRF_RT_DWORD, + NULL, &dwordData, &dataSize); + if (regGetValueError != ERROR_SUCCESS) { + return defaultData; + } + return dwordData; +} + +bool is_valid_vss_backup_type(VSS_BACKUP_TYPE vssBT) +{ + return (vssBT > VSS_BT_UNDEFINED && vssBT < VSS_BT_OTHER); +} + +VSS_BACKUP_TYPE get_vss_backup_type( + VSS_BACKUP_TYPE defaultVssBT = DEFAULT_VSS_BACKUP_TYPE) +{ + VSS_BACKUP_TYPE vssBackupType; + + vssBackupType = static_cast( + get_reg_dword_value(HKEY_LOCAL_MACHINE, + QGA_PROVIDER_REGISTRY_ADDRESS, + "VssOption", + defaultVssBT)); + if (!is_valid_vss_backup_type(vssBackupType)) { + return defaultVssBT; + } + return vssBackupType; +} + void requester_freeze(int *num_vols, void *mountpoints, ErrorSet *errset) { COMPointer pAsync; @@ -247,6 +285,7 @@ void requester_freeze(int *num_vols, void *mountpoints, ErrorSet *errset) DWORD wait_status; int num_fixed_drives = 0, i; int num_mount_points = 0; + VSS_BACKUP_TYPE vss_bt = get_vss_backup_type(); if (vss_ctx.pVssbc) { /* already frozen */ *num_vols = 0; @@ -294,7 +333,7 @@ void requester_freeze(int *num_vols, void *mountpoints, ErrorSet *errset) goto out; } - hr = vss_ctx.pVssbc->SetBackupState(true, true, VSS_BT_FULL, false); + hr = vss_ctx.pVssbc->SetBackupState(true, true, vss_bt, false); if (FAILED(hr)) { err_set(errset, hr, "failed to set backup state"); goto out; diff --git a/qga/vss-win32/vss-handles.h b/qga/vss-win32/vss-handles.h index 0f8a741ad2..1a7d842129 100644 --- a/qga/vss-win32/vss-handles.h +++ b/qga/vss-win32/vss-handles.h @@ -6,6 +6,9 @@ #define QGA_PROVIDER_NAME "QEMU Guest Agent VSS Provider" #define QGA_PROVIDER_LNAME L(QGA_PROVIDER_NAME) #define QGA_PROVIDER_VERSION L(QEMU_VERSION) +#define QGA_PROVIDER_REGISTRY_ADDRESS "SYSTEM\\CurrentControlSet"\ + "\\Services"\ + "\\" QGA_PROVIDER_NAME #define EVENT_NAME_FROZEN "Global\\QGAVSSEvent-frozen" #define EVENT_NAME_THAW "Global\\QGAVSSEvent-thaw" diff --git a/qobject/json-lexer.c b/qobject/json-lexer.c index 632320d72d..51341d96e4 100644 --- a/qobject/json-lexer.c +++ b/qobject/json-lexer.c @@ -139,7 +139,7 @@ static const uint8_t json_lexer[][256] = { * bytes '\xFE', '\xFF'. Structural characters and line * endings are promising resynchronization points. Clients * may use the others to force the JSON parser into known-good - * state; see docs/interop/qmp-spec.txt. + * state; see docs/interop/qmp-spec.rst. */ [0 ... 0x1F] = IN_START | LOOKAHEAD, [0x20 ... 0xFD] = IN_RECOVERY, diff --git a/qom/object_interfaces.c b/qom/object_interfaces.c index f94b6c3193..7d31589b04 100644 --- a/qom/object_interfaces.c +++ b/qom/object_interfaces.c @@ -2,8 +2,8 @@ #include "qemu/cutils.h" #include "qapi/error.h" -#include "qapi/qapi-commands-qom.h" #include "qapi/qapi-visit-qom.h" +#include "qapi/qmp/qobject.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qerror.h" #include "qapi/qmp/qjson.h" diff --git a/replay/replay-input.c b/replay/replay-input.c index 1147e3d34e..bee3dbe528 100644 --- a/replay/replay-input.c +++ b/replay/replay-input.c @@ -22,6 +22,7 @@ void replay_save_input_event(InputEvent *evt) InputKeyEvent *key; InputBtnEvent *btn; InputMoveEvent *move; + InputMultiTouchEvent *mtt; replay_put_dword(evt->type); switch (evt->type) { @@ -58,6 +59,14 @@ void replay_save_input_event(InputEvent *evt) replay_put_dword(move->axis); replay_put_qword(move->value); break; + case INPUT_EVENT_KIND_MTT: + mtt = evt->u.mtt.data; + replay_put_dword(mtt->type); + replay_put_qword(mtt->slot); + replay_put_qword(mtt->tracking_id); + replay_put_dword(mtt->axis); + replay_put_qword(mtt->value); + break; case INPUT_EVENT_KIND__MAX: /* keep gcc happy */ break; @@ -73,6 +82,7 @@ InputEvent *replay_read_input_event(void) InputBtnEvent btn; InputMoveEvent rel; InputMoveEvent abs; + InputMultiTouchEvent mtt; evt.type = replay_get_dword(); switch (evt.type) { @@ -109,6 +119,14 @@ InputEvent *replay_read_input_event(void) evt.u.abs.data->axis = (InputAxis)replay_get_dword(); evt.u.abs.data->value = replay_get_qword(); break; + case INPUT_EVENT_KIND_MTT: + evt.u.mtt.data = &mtt; + evt.u.mtt.data->type = (InputMultiTouchType)replay_get_dword(); + evt.u.mtt.data->slot = replay_get_qword(); + evt.u.mtt.data->tracking_id = replay_get_qword(); + evt.u.mtt.data->axis = (InputAxis)replay_get_dword(); + evt.u.mtt.data->value = replay_get_qword(); + break; case INPUT_EVENT_KIND__MAX: /* keep gcc happy */ break; diff --git a/replay/replay.c b/replay/replay.c index c39156c522..0f7d766efe 100644 --- a/replay/replay.c +++ b/replay/replay.c @@ -74,7 +74,7 @@ uint64_t replay_get_current_icount(void) int replay_get_instructions(void) { int res = 0; - replay_mutex_lock(); + g_assert(replay_mutex_locked()); if (replay_next_event_is(EVENT_INSTRUCTION)) { res = replay_state.instruction_count; if (replay_break_icount != -1LL) { @@ -85,7 +85,6 @@ int replay_get_instructions(void) } } } - replay_mutex_unlock(); return res; } diff --git a/roms/Makefile b/roms/Makefile index 955f92286d..6859685290 100644 --- a/roms/Makefile +++ b/roms/Makefile @@ -126,25 +126,6 @@ build-efi-roms: build-pxe-roms CROSS_COMPILE=$(x86_64_cross_prefix) \ $(patsubst %,bin-x86_64-efi/%.efidrv,$(pxerom_targets)) -# Build scripts can pass compiler/linker flags to the EDK2 -# build tools via the EDK2_BASETOOLS_OPTFLAGS (CFLAGS) and -# EDK2_BASETOOLS_LDFLAGS (LDFLAGS) environment variables. -# -# Example: -# -# make -C roms \ -# EDK2_BASETOOLS_OPTFLAGS='...' \ -# EDK2_BASETOOLS_LDFLAGS='...' \ -# efirom -# -edk2-basetools: - cd edk2/BaseTools && git submodule update --init --force \ - Source/C/BrotliCompress/brotli - $(MAKE) -C edk2/BaseTools \ - PYTHON_COMMAND=$${EDK2_PYTHON_COMMAND:-python3} \ - EXTRA_OPTFLAGS='$(EDK2_BASETOOLS_OPTFLAGS)' \ - EXTRA_LDFLAGS='$(EDK2_BASETOOLS_LDFLAGS)' - slof: $(MAKE) -C SLOF CROSS=$(powerpc64_cross_prefix) qemu cp SLOF/boot_rom.bin ../pc-bios/slof.bin @@ -165,8 +146,12 @@ skiboot: $(MAKE) -C skiboot CROSS=$(powerpc64_cross_prefix) cp skiboot/skiboot.lid ../pc-bios/skiboot.lid -efi: edk2-basetools - $(MAKE) -f Makefile.edk2 +efi: + python3 edk2-build.py --config edk2-build.config \ + --version-override "edk2-stable202302-for-qemu" \ + --release-date "03/01/2023" + rm -f ../pc-bios/edk2-*.fd.bz2 + bzip2 --verbose ../pc-bios/edk2-*.fd opensbi32-generic: $(MAKE) -C opensbi \ @@ -200,7 +185,7 @@ clean: rm -rf u-boot/build-e500 $(MAKE) -C u-boot-sam460ex distclean $(MAKE) -C skiboot clean - $(MAKE) -f Makefile.edk2 clean + rm -rf Build $(MAKE) -C opensbi clean $(MAKE) -C qboot clean $(MAKE) -C vbootrom clean diff --git a/roms/Makefile.edk2 b/roms/Makefile.edk2 deleted file mode 100644 index 485f2244b1..0000000000 --- a/roms/Makefile.edk2 +++ /dev/null @@ -1,178 +0,0 @@ -# Makefile for building firmware binaries and variable store templates for a -# number of virtual platforms in edk2. -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program and the accompanying materials are licensed and made available -# under the terms and conditions of the BSD License that accompanies this -# distribution. The full text of the license may be found at -# . -# -# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT -# WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. - -SHELL = /bin/bash - -target = RELEASE -toolchain = $(shell source ./edk2-funcs.sh && qemu_edk2_get_toolchain $(1)) - -licenses := \ - edk2/License.txt \ - edk2/License-History.txt \ - edk2/OvmfPkg/License.txt \ - edk2/ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3/COPYING.txt \ - edk2/CryptoPkg/Library/OpensslLib/openssl/LICENSE - -# The "edk2-arm-vars.fd" varstore template is suitable for aarch64 as well. -# Similarly, the "edk2-i386-vars.fd" varstore template is suitable for x86_64 -# as well, independently of "secure" too. -flashdevs := \ - aarch64-code \ - arm-code \ - i386-code \ - i386-secure-code \ - x86_64-code \ - x86_64-secure-code \ - x86_64-microvm \ - \ - arm-vars \ - i386-vars - -all: $(foreach flashdev,$(flashdevs),../pc-bios/edk2-$(flashdev).fd.bz2) \ - ../pc-bios/edk2-licenses.txt - -../pc-bios/edk2-%.fd.bz2: ../pc-bios/edk2-%.fd - bzip2 -9 -c $< > $@ - -# When the build completes, we need not keep the uncompressed flash device -# files. -.INTERMEDIATE: $(foreach flashdev,$(flashdevs),../pc-bios/edk2-$(flashdev).fd) - -# Fetch edk2 submodule's submodules. If it is not in a git tree, assume -# we're building from a tarball and that they've already been fetched by -# make-release/tarball scripts. -submodules: - if test -e edk2/.git; then \ - cd edk2 && git submodule update --init --force -- \ - ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 \ - BaseTools/Source/C/BrotliCompress/brotli \ - CryptoPkg/Library/OpensslLib/openssl \ - MdeModulePkg/Library/BrotliCustomDecompressLib/brotli \ - ; \ - fi - -# See notes on the ".NOTPARALLEL" target and the "+" indicator in -# "tests/uefi-test-tools/Makefile". -.NOTPARALLEL: - -../pc-bios/edk2-aarch64-code.fd: submodules - +./edk2-build.sh \ - aarch64 \ - --arch=AARCH64 \ - --platform=ArmVirtPkg/ArmVirtQemu.dsc \ - -D NETWORK_IP6_ENABLE \ - -D NETWORK_HTTP_BOOT_ENABLE \ - -D NETWORK_TLS_ENABLE \ - -D TPM2_ENABLE \ - -D TPM2_CONFIG_ENABLE - cp edk2/Build/ArmVirtQemu-AARCH64/$(target)_$(call toolchain,aarch64)/FV/QEMU_EFI.fd \ - $@ - truncate --size=64M $@ - -../pc-bios/edk2-arm-code.fd: submodules - +./edk2-build.sh \ - arm \ - --arch=ARM \ - --platform=ArmVirtPkg/ArmVirtQemu.dsc \ - -D NETWORK_IP6_ENABLE \ - -D NETWORK_HTTP_BOOT_ENABLE \ - -D NETWORK_TLS_ENABLE \ - -D TPM2_ENABLE \ - -D TPM2_CONFIG_ENABLE - cp edk2/Build/ArmVirtQemu-ARM/$(target)_$(call toolchain,arm)/FV/QEMU_EFI.fd \ - $@ - truncate --size=64M $@ - -../pc-bios/edk2-i386-code.fd: submodules - +./edk2-build.sh \ - i386 \ - --arch=IA32 \ - --platform=OvmfPkg/OvmfPkgIa32.dsc \ - -D NETWORK_IP6_ENABLE \ - -D NETWORK_HTTP_BOOT_ENABLE \ - -D NETWORK_TLS_ENABLE \ - -D TPM_ENABLE \ - -D TPM_CONFIG_ENABLE - cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_CODE.fd $@ - -../pc-bios/edk2-i386-secure-code.fd: submodules - +./edk2-build.sh \ - i386 \ - --arch=IA32 \ - --platform=OvmfPkg/OvmfPkgIa32.dsc \ - -D NETWORK_IP6_ENABLE \ - -D NETWORK_HTTP_BOOT_ENABLE \ - -D NETWORK_TLS_ENABLE \ - -D TPM_ENABLE \ - -D TPM_CONFIG_ENABLE \ - -D SECURE_BOOT_ENABLE \ - -D SMM_REQUIRE - cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_CODE.fd $@ - -../pc-bios/edk2-x86_64-code.fd: submodules - +./edk2-build.sh \ - x86_64 \ - --arch=X64 \ - --platform=OvmfPkg/OvmfPkgX64.dsc \ - -D NETWORK_IP6_ENABLE \ - -D NETWORK_HTTP_BOOT_ENABLE \ - -D NETWORK_TLS_ENABLE \ - -D TPM_ENABLE \ - -D TPM_CONFIG_ENABLE - cp edk2/Build/OvmfX64/$(target)_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@ - -../pc-bios/edk2-x86_64-secure-code.fd: submodules - +./edk2-build.sh \ - x86_64 \ - --arch=IA32 \ - --arch=X64 \ - --platform=OvmfPkg/OvmfPkgIa32X64.dsc \ - -D NETWORK_IP6_ENABLE \ - -D NETWORK_HTTP_BOOT_ENABLE \ - -D NETWORK_TLS_ENABLE \ - -D TPM_ENABLE \ - -D TPM_CONFIG_ENABLE \ - -D SECURE_BOOT_ENABLE \ - -D SMM_REQUIRE - cp edk2/Build/Ovmf3264/$(target)_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@ - -../pc-bios/edk2-x86_64-microvm.fd: submodules - +./edk2-build.sh \ - x86_64 \ - --arch=X64 \ - --platform=OvmfPkg/Microvm/MicrovmX64.dsc \ - -D NETWORK_IP6_ENABLE \ - -D NETWORK_HTTP_BOOT_ENABLE \ - -D NETWORK_TLS_ENABLE - cp edk2/Build/MicrovmX64/$(target)_$(call toolchain,x86_64)/FV/MICROVM.fd $@ - -../pc-bios/edk2-arm-vars.fd: ../pc-bios/edk2-arm-code.fd - cp edk2/Build/ArmVirtQemu-ARM/$(target)_$(call toolchain,arm)/FV/QEMU_VARS.fd \ - $@ - truncate --size=64M $@ - -../pc-bios/edk2-i386-vars.fd: ../pc-bios/edk2-i386-code.fd - cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_VARS.fd $@ - -# The license file accumulates several individual licenses from under edk2, -# prefixing each individual license with a header (generated by "tail") that -# states its pathname. -../pc-bios/edk2-licenses.txt: submodules - tail -n $(shell cat $(licenses) | wc -l) $(licenses) > $@ - dos2unix $@ - -clean: - rm -rf edk2/Build - cd edk2/Conf && \ - rm -rf .cache BuildEnv.sh build_rule.txt target.txt \ - tools_def.txt diff --git a/roms/edk2 b/roms/edk2 index b24306f15d..f80f052277 160000 --- a/roms/edk2 +++ b/roms/edk2 @@ -1 +1 @@ -Subproject commit b24306f15daa2ff8510b06702114724b33895d3c +Subproject commit f80f052277c88a67c55e107b550f504eeea947d3 diff --git a/roms/edk2-build.config b/roms/edk2-build.config new file mode 100644 index 0000000000..66ef9ffcb9 --- /dev/null +++ b/roms/edk2-build.config @@ -0,0 +1,124 @@ +[global] +core = edk2 + +#################################################################################### +# options + +[opts.common] +NETWORK_HTTP_BOOT_ENABLE = TRUE +NETWORK_IP6_ENABLE = TRUE +NETWORK_TLS_ENABLE = TRUE +NETWORK_ISCSI_ENABLE = TRUE +NETWORK_ALLOW_HTTP_CONNECTIONS = TRUE +TPM2_ENABLE = TRUE +TPM2_CONFIG_ENABLE = TRUE +TPM1_ENABLE = TRUE +CAVIUM_ERRATUM_27456 = TRUE + +[opts.ovmf.sb.smm] +SECURE_BOOT_ENABLE = TRUE +SMM_REQUIRE = TRUE + +[opts.armvirt.silent] +DEBUG_PRINT_ERROR_LEVEL = 0x80000000 + +[pcds.nx.broken.grub] +# grub.efi uses EfiLoaderData for code +PcdDxeNxMemoryProtectionPolicy = 0xC000000000007FD1 + +#################################################################################### +# i386 + +[build.ovmf.i386] +desc = ovmf build (32-bit) +conf = OvmfPkg/OvmfPkgIa32.dsc +arch = IA32 +opts = common +plat = OvmfIa32 +dest = ../pc-bios +cpy1 = FV/OVMF_CODE.fd edk2-i386-code.fd +cpy2 = FV/OVMF_VARS.fd edk2-i386-vars.fd + +[build.ovmf.i386.secure] +desc = ovmf build (32-bit, secure boot) +conf = OvmfPkg/OvmfPkgIa32.dsc +arch = IA32 +opts = common + ovmf.sb.smm +plat = OvmfIa32 +dest = ../pc-bios +cpy1 = FV/OVMF_CODE.fd edk2-i386-secure-code.fd + +#################################################################################### +# x86_64 + +[build.ovmf.x86_64] +desc = ovmf build (64-bit) +conf = OvmfPkg/OvmfPkgX64.dsc +arch = X64 +opts = common +plat = OvmfX64 +dest = ../pc-bios +cpy1 = FV/OVMF_CODE.fd edk2-x86_64-code.fd + +[build.ovmf.x86_64.secure] +desc = ovmf build (64-bit, secure boot) +conf = OvmfPkg/OvmfPkgIa32X64.dsc +arch = IA32 X64 +opts = common + ovmf.sb.smm +plat = Ovmf3264 +dest = ../pc-bios +cpy1 = FV/OVMF_CODE.fd edk2-x86_64-secure-code.fd + +[build.ovmf.microvm] +desc = ovmf build for microvm +conf = OvmfPkg/Microvm/MicrovmX64.dsc +arch = X64 +opts = common +plat = MicrovmX64 +dest = ../pc-bios +cpy1 = FV/MICROVM.fd edk2-x86_64-microvm.fd + +#################################################################################### +# arm + +[build.armvirt.arm] +desc = ArmVirt build, 32-bit (arm v7) +conf = ArmVirtPkg/ArmVirtQemu.dsc +arch = ARM +opts = common + armvirt.silent +pcds = nx.broken.grub +plat = ArmVirtQemu-ARM +dest = ../pc-bios +cpy1 = FV/QEMU_EFI.fd edk2-arm-code.fd +cpy2 = FV/QEMU_VARS.fd edk2-arm-vars.fd +pad1 = edk2-arm-code.fd 64m +pad2 = edk2-arm-vars.fd 64m + +#################################################################################### +# aarch64 + +[build.armvirt.aa64] +desc = ArmVirt build, 64-bit (arm v8) +conf = ArmVirtPkg/ArmVirtQemu.dsc +arch = AARCH64 +opts = common + armvirt.silent +pcds = nx.broken.grub +plat = ArmVirtQemu-AARCH64 +dest = ../pc-bios +cpy1 = FV/QEMU_EFI.fd edk2-aarch64-code.fd +pad1 = edk2-aarch64-code.fd 64m + +#################################################################################### +# riscv64 + +[build.riscv.qemu] +conf = OvmfPkg/RiscVVirt/RiscVVirtQemu.dsc +arch = RISCV64 +plat = RiscVVirtQemu +dest = ../pc-bios +cpy1 = FV/RISCV_VIRT.fd edk2-riscv.fd +pad1 = edk2-riscv.fd 32m diff --git a/roms/edk2-build.py b/roms/edk2-build.py new file mode 100755 index 0000000000..870893f7c8 --- /dev/null +++ b/roms/edk2-build.py @@ -0,0 +1,380 @@ +#!/usr/bin/python3 +""" +build helper script for edk2, see +https://gitlab.com/kraxel/edk2-build-config + +""" +import os +import sys +import shutil +import argparse +import subprocess +import configparser + +rebase_prefix = "" +version_override = None +release_date = None + +# pylint: disable=unused-variable +def check_rebase(): + """ detect 'git rebase -x edk2-build.py master' testbuilds """ + global rebase_prefix + global version_override + gitdir = '.git' + + if os.path.isfile(gitdir): + with open(gitdir, 'r', encoding = 'utf-8') as f: + (unused, gitdir) = f.read().split() + + if not os.path.exists(f'{gitdir}/rebase-merge/msgnum'): + return + with open(f'{gitdir}/rebase-merge/msgnum', 'r', encoding = 'utf-8') as f: + msgnum = int(f.read()) + with open(f'{gitdir}/rebase-merge/end', 'r', encoding = 'utf-8') as f: + end = int(f.read()) + with open(f'{gitdir}/rebase-merge/head-name', 'r', encoding = 'utf-8') as f: + head = f.read().strip().split('/') + + rebase_prefix = f'[ {int(msgnum/2)} / {int(end/2)} - {head[-1]} ] ' + if msgnum != end and not version_override: + # fixed version speeds up builds + version_override = "test-build-patch-series" + +def get_coredir(cfg): + if cfg.has_option('global', 'core'): + return os.path.abspath(cfg['global']['core']) + return os.getcwd() + +def get_version(cfg): + coredir = get_coredir(cfg) + if version_override: + version = version_override + print('') + print(f'### version [override]: {version}') + return version + if os.environ.get('RPM_PACKAGE_NAME'): + version = os.environ.get('RPM_PACKAGE_NAME') + version += '-' + os.environ.get('RPM_PACKAGE_VERSION') + version += '-' + os.environ.get('RPM_PACKAGE_RELEASE') + print('') + print(f'### version [rpmbuild]: {version}') + return version + if os.path.exists(coredir + '/.git'): + cmdline = [ 'git', 'describe', '--tags', '--abbrev=8', + '--match=edk2-stable*' ] + result = subprocess.run(cmdline, cwd = coredir, + stdout = subprocess.PIPE, + check = True) + version = result.stdout.decode().strip() + print('') + print(f'### version [git]: {version}') + return version + return None + +def pcd_string(name, value): + return f'{name}=L{value}\\0' + +def pcd_version(cfg): + version = get_version(cfg) + if version is None: + return [] + return [ '--pcd', pcd_string('PcdFirmwareVersionString', version) ] + +def pcd_release_date(): + if release_date is None: + return [] + return [ '--pcd', pcd_string('PcdFirmwareReleaseDateString', release_date) ] + +def build_message(line, line2 = None): + if os.environ.get('TERM') in [ 'xterm', 'xterm-256color' ]: + # setxterm title + start = '\x1b]2;' + end = '\x07' + print(f'{start}{rebase_prefix}{line}{end}', end = '') + + print('') + print('###') + print(f'### {rebase_prefix}{line}') + if line2: + print(f'### {line2}') + print('###', flush = True) + +def build_run(cmdline, name, section, silent = False): + print(cmdline, flush = True) + if silent: + print('### building in silent mode ...', flush = True) + result = subprocess.run(cmdline, check = False, + stdout = subprocess.PIPE, + stderr = subprocess.STDOUT) + + logfile = f'{section}.log' + print(f'### writing log to {logfile} ...') + with open(logfile, 'wb') as f: + f.write(result.stdout) + + if result.returncode: + print('### BUILD FAILURE') + print('### output') + print(result.stdout.decode()) + print(f'### exit code: {result.returncode}') + else: + print('### OK') + else: + result = subprocess.run(cmdline, check = False) + if result.returncode: + print(f'ERROR: {cmdline[0]} exited with {result.returncode}' + f' while building {name}') + sys.exit(result.returncode) + +def build_copy(plat, tgt, dstdir, copy): + srcdir = f'Build/{plat}/{tgt}_GCC5' + names = copy.split() + srcfile = names[0] + if len(names) > 1: + dstfile = names[1] + else: + dstfile = os.path.basename(srcfile) + print(f'# copy: {srcdir} / {srcfile} => {dstdir} / {dstfile}') + + src = srcdir + '/' + srcfile + dst = dstdir + '/' + dstfile + os.makedirs(os.path.dirname(dst), exist_ok = True) + shutil.copy(src, dst) + +def pad_file(dstdir, pad): + args = pad.split() + if len(args) < 2: + raise RuntimeError(f'missing arg for pad ({args})') + name = args[0] + size = args[1] + cmdline = [ + 'truncate', + '--size', size, + dstdir + '/' + name, + ] + print(f'# padding: {dstdir} / {name} => {size}') + subprocess.run(cmdline, check = True) + +# pylint: disable=too-many-branches +def build_one(cfg, build, jobs = None, silent = False): + cmdline = [ 'build' ] + cmdline += [ '-t', 'GCC5' ] + cmdline += [ '-p', cfg[build]['conf'] ] + + if (cfg[build]['conf'].startswith('OvmfPkg/') or + cfg[build]['conf'].startswith('ArmVirtPkg/')): + cmdline += pcd_version(cfg) + cmdline += pcd_release_date() + + if jobs: + cmdline += [ '-n', jobs ] + for arch in cfg[build]['arch'].split(): + cmdline += [ '-a', arch ] + if 'opts' in cfg[build]: + for name in cfg[build]['opts'].split(): + section = 'opts.' + name + for opt in cfg[section]: + cmdline += [ '-D', opt + '=' + cfg[section][opt] ] + if 'pcds' in cfg[build]: + for name in cfg[build]['pcds'].split(): + section = 'pcds.' + name + for pcd in cfg[section]: + cmdline += [ '--pcd', pcd + '=' + cfg[section][pcd] ] + if 'tgts' in cfg[build]: + tgts = cfg[build]['tgts'].split() + else: + tgts = [ 'DEBUG' ] + for tgt in tgts: + desc = None + if 'desc' in cfg[build]: + desc = cfg[build]['desc'] + build_message(f'building: {cfg[build]["conf"]} ({cfg[build]["arch"]}, {tgt})', + f'description: {desc}') + build_run(cmdline + [ '-b', tgt ], + cfg[build]['conf'], + build + '.' + tgt, + silent) + + if 'plat' in cfg[build]: + # copy files + for cpy in cfg[build]: + if not cpy.startswith('cpy'): + continue + build_copy(cfg[build]['plat'], + tgt, + cfg[build]['dest'], + cfg[build][cpy]) + # pad builds + for pad in cfg[build]: + if not pad.startswith('pad'): + continue + pad_file(cfg[build]['dest'], + cfg[build][pad]) + +def build_basetools(silent = False): + build_message('building: BaseTools') + basedir = os.environ['EDK_TOOLS_PATH'] + cmdline = [ 'make', '-C', basedir ] + build_run(cmdline, 'BaseTools', 'build.basetools', silent) + +def binary_exists(name): + for pdir in os.environ['PATH'].split(':'): + if os.path.exists(pdir + '/' + name): + return True + return False + +def prepare_env(cfg): + """ mimic Conf/BuildEnv.sh """ + workspace = os.getcwd() + packages = [ workspace, ] + path = os.environ['PATH'].split(':') + dirs = [ + 'BaseTools/Bin/Linux-x86_64', + 'BaseTools/BinWrappers/PosixLike' + ] + + if cfg.has_option('global', 'pkgs'): + for pkgdir in cfg['global']['pkgs'].split(): + packages.append(os.path.abspath(pkgdir)) + coredir = get_coredir(cfg) + if coredir != workspace: + packages.append(coredir) + + # add basetools to path + for pdir in dirs: + p = coredir + '/' + pdir + if not os.path.exists(p): + continue + if p in path: + continue + path.insert(0, p) + + # run edksetup if needed + toolsdef = coredir + '/Conf/tools_def.txt' + if not os.path.exists(toolsdef): + os.makedirs(os.path.dirname(toolsdef), exist_ok = True) + build_message('running BaseTools/BuildEnv') + cmdline = [ 'bash', 'BaseTools/BuildEnv' ] + subprocess.run(cmdline, cwd = coredir, check = True) + + # set variables + os.environ['PATH'] = ':'.join(path) + os.environ['PACKAGES_PATH'] = ':'.join(packages) + os.environ['WORKSPACE'] = workspace + os.environ['EDK_TOOLS_PATH'] = coredir + '/BaseTools' + os.environ['CONF_PATH'] = coredir + '/Conf' + os.environ['PYTHON_COMMAND'] = '/usr/bin/python3' + os.environ['PYTHONHASHSEED'] = '1' + + # for cross builds + if binary_exists('arm-linux-gnu-gcc'): + os.environ['GCC5_ARM_PREFIX'] = 'arm-linux-gnu-' + if binary_exists('loongarch64-linux-gnu-gcc'): + os.environ['GCC5_LOONGARCH64_PREFIX'] = 'loongarch64-linux-gnu-' + + hostarch = os.uname().machine + if binary_exists('aarch64-linux-gnu-gcc') and hostarch != 'aarch64': + os.environ['GCC5_AARCH64_PREFIX'] = 'aarch64-linux-gnu-' + if binary_exists('riscv64-linux-gnu-gcc') and hostarch != 'riscv64': + os.environ['GCC5_RISCV64_PREFIX'] = 'riscv64-linux-gnu-' + if binary_exists('x86_64-linux-gnu-gcc') and hostarch != 'x86_64': + os.environ['GCC5_IA32_PREFIX'] = 'x86_64-linux-gnu-' + os.environ['GCC5_X64_PREFIX'] = 'x86_64-linux-gnu-' + os.environ['GCC5_BIN'] = 'x86_64-linux-gnu-' + +def build_list(cfg): + for build in cfg.sections(): + if not build.startswith('build.'): + continue + name = build.lstrip('build.') + desc = 'no description' + if 'desc' in cfg[build]: + desc = cfg[build]['desc'] + print(f'# {name:20s} - {desc}') + +def main(): + parser = argparse.ArgumentParser(prog = 'edk2-build', + description = 'edk2 build helper script') + parser.add_argument('-c', '--config', dest = 'configfile', + type = str, default = '.edk2.builds', metavar = 'FILE', + help = 'read configuration from FILE (default: .edk2.builds)') + parser.add_argument('-C', '--directory', dest = 'directory', type = str, + help = 'change to DIR before building', metavar = 'DIR') + parser.add_argument('-j', '--jobs', dest = 'jobs', type = str, + help = 'allow up to JOBS parallel build jobs', + metavar = 'JOBS') + parser.add_argument('-m', '--match', dest = 'match', type = str, + help = 'only run builds matching INCLUDE (substring)', + metavar = 'INCLUDE') + parser.add_argument('-x', '--exclude', dest = 'exclude', type = str, + help = 'skip builds matching EXCLUDE (substring)', + metavar = 'EXCLUDE') + parser.add_argument('-l', '--list', dest = 'list', + action = 'store_true', default = False, + help = 'list build configs available') + parser.add_argument('--silent', dest = 'silent', + action = 'store_true', default = False, + help = 'write build output to logfiles, ' + 'write to console only on errors') + parser.add_argument('--core', dest = 'core', type = str, metavar = 'DIR', + help = 'location of the core edk2 repository ' + '(i.e. where BuildTools are located)') + parser.add_argument('--pkg', '--package', dest = 'pkgs', + type = str, action = 'append', metavar = 'DIR', + help = 'location(s) of additional packages ' + '(can be specified multiple times)') + parser.add_argument('--version-override', dest = 'version_override', + type = str, metavar = 'VERSION', + help = 'set firmware build version') + parser.add_argument('--release-date', dest = 'release_date', + type = str, metavar = 'DATE', + help = 'set firmware build release date (in MM/DD/YYYY format)') + options = parser.parse_args() + + if options.directory: + os.chdir(options.directory) + + if not os.path.exists(options.configfile): + print('config file "{options.configfile}" not found') + return 1 + + cfg = configparser.ConfigParser() + cfg.optionxform = str + cfg.read(options.configfile) + + if options.list: + build_list(cfg) + return + + if not cfg.has_section('global'): + cfg.add_section('global') + if options.core: + cfg.set('global', 'core', options.core) + if options.pkgs: + cfg.set('global', 'pkgs', ' '.join(options.pkgs)) + + global version_override + global release_date + check_rebase() + if options.version_override: + version_override = options.version_override + if options.release_date: + release_date = options.release_date + + prepare_env(cfg) + build_basetools(options.silent) + for build in cfg.sections(): + if not build.startswith('build.'): + continue + if options.match and options.match not in build: + print(f'# skipping "{build}" (not matching "{options.match}")') + continue + if options.exclude and options.exclude in build: + print(f'# skipping "{build}" (matching "{options.exclude}")') + continue + build_one(cfg, build, options.jobs, options.silent) + + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/roms/edk2-build.sh b/roms/edk2-build.sh deleted file mode 100755 index ea79dc27a2..0000000000 --- a/roms/edk2-build.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -# Wrapper shell script for building a virtual platform firmware in edk2. -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program and the accompanying materials are licensed and made available -# under the terms and conditions of the BSD License that accompanies this -# distribution. The full text of the license may be found at -# . -# -# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT -# WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. - -set -e -u -C - -# Save the command line arguments. We need to reset $# to 0 before sourcing -# "edksetup.sh", as it will inherit $@. -emulation_target=$1 -shift -num_args=0 -args=() -for arg in "$@"; do - args[num_args++]="$arg" -done -shift $num_args - -cd edk2 - -export PYTHON_COMMAND=${EDK2_PYTHON_COMMAND:-python3} - -# Source "edksetup.sh" carefully. -set +e +u +C -source ./edksetup.sh -ret=$? -set -e -u -C -if [ $ret -ne 0 ]; then - exit $ret -fi - -# Fetch some option arguments, and set the cross-compilation environment (if -# any), for the edk2 "build" utility. -source ../edk2-funcs.sh -edk2_toolchain=$(qemu_edk2_get_toolchain "$emulation_target") -MAKEFLAGS=$(qemu_edk2_quirk_tianocore_1607 "$MAKEFLAGS") -edk2_thread_count=$(qemu_edk2_get_thread_count "$MAKEFLAGS") -qemu_edk2_set_cross_env "$emulation_target" - -# Build the platform firmware. -build \ - --cmd-len=65536 \ - -n "$edk2_thread_count" \ - --buildtarget=RELEASE \ - --tagname="$edk2_toolchain" \ - "${args[@]}" diff --git a/roms/edk2-funcs.sh b/roms/edk2-funcs.sh deleted file mode 100644 index cd6e4f2c82..0000000000 --- a/roms/edk2-funcs.sh +++ /dev/null @@ -1,273 +0,0 @@ -# Shell script that defines functions for determining some environmental -# characteristics for the edk2 "build" utility. -# -# This script is meant to be sourced, in a bash environment. -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program and the accompanying materials are licensed and made available -# under the terms and conditions of the BSD License that accompanies this -# distribution. The full text of the license may be found at -# . -# -# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT -# WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. - - -# Verify whether the QEMU system emulation target is supported by the UEFI spec -# and edk2. Print a message to the standard error, and return with nonzero -# status, if verification fails. -# -# Parameters: -# $1: QEMU system emulation target -qemu_edk2_verify_arch() -{ - local emulation_target="$1" - local program_name=$(basename -- "$0") - - case "$emulation_target" in - (arm|aarch64|i386|x86_64) - ;; - (*) - printf '%s: unknown/unsupported QEMU system emulation target "%s"\n' \ - "$program_name" "$emulation_target" >&2 - return 1 - ;; - esac -} - - -# Translate the QEMU system emulation target to the edk2 architecture -# identifier. Print the result to the standard output. -# -# Parameters: -# $1: QEMU system emulation target -qemu_edk2_get_arch() -{ - local emulation_target="$1" - - if ! qemu_edk2_verify_arch "$emulation_target"; then - return 1 - fi - - case "$emulation_target" in - (arm) - printf 'ARM\n' - ;; - (aarch64) - printf 'AARCH64\n' - ;; - (i386) - printf 'IA32\n' - ;; - (x86_64) - printf 'X64\n' - ;; - esac -} - - -# Translate the QEMU system emulation target to the gcc cross-compilation -# architecture identifier. Print the result to the standard output. -# -# Parameters: -# $1: QEMU system emulation target -qemu_edk2_get_gcc_arch() -{ - local emulation_target="$1" - - if ! qemu_edk2_verify_arch "$emulation_target"; then - return 1 - fi - - case "$emulation_target" in - (arm|aarch64|x86_64) - printf '%s\n' "$emulation_target" - ;; - (i386) - printf 'i686\n' - ;; - esac -} - - -# Determine the gcc cross-compiler prefix (if any) for use with the edk2 -# toolchain. Print the result to the standard output. -# -# Parameters: -# $1: QEMU system emulation target -qemu_edk2_get_cross_prefix() -{ - local emulation_target="$1" - local gcc_arch - local host_arch - - if ! gcc_arch=$(qemu_edk2_get_gcc_arch "$emulation_target"); then - return 1 - fi - - host_arch=$(uname -m) - - if [ "$gcc_arch" == "$host_arch" ] || - ( [ "$gcc_arch" == i686 ] && [ "$host_arch" == x86_64 ] ); then - # no cross-compiler needed - : - elif ( [ -e /etc/debian_version ] && [ "$gcc_arch" == arm ] ); then - # force soft-float cross-compiler on Debian - printf 'arm-linux-gnueabi-' - else - printf '%s-linux-gnu-\n' "$gcc_arch" - fi -} - - -# Determine the edk2 toolchain tag for the QEMU system emulation target. Print -# the result to the standard output. Print a message to the standard error, and -# return with nonzero status, if the (conditional) gcc version check fails. -# -# Parameters: -# $1: QEMU system emulation target -qemu_edk2_get_toolchain() -{ - local emulation_target="$1" - local program_name=$(basename -- "$0") - local cross_prefix - local gcc_version - - if ! qemu_edk2_verify_arch "$emulation_target"; then - return 1 - fi - - case "$emulation_target" in - (arm|aarch64) - printf 'GCC5\n' - ;; - - (i386|x86_64) - if ! cross_prefix=$(qemu_edk2_get_cross_prefix "$emulation_target"); then - return 1 - fi - - gcc_version=$("${cross_prefix}gcc" -v 2>&1 | tail -1 | awk '{print $3}') - # Run "git-blame" on "OvmfPkg/build.sh" in edk2 for more information on - # the mapping below. - case "$gcc_version" in - ([1-3].*|4.[0-7].*) - printf '%s: unsupported gcc version "%s"\n' \ - "$program_name" "$gcc_version" >&2 - return 1 - ;; - (4.8.*) - printf 'GCC48\n' - ;; - (4.9.*|6.[0-2].*) - printf 'GCC49\n' - ;; - (*) - printf 'GCC5\n' - ;; - esac - ;; - esac -} - - -# Determine the name of the environment variable that exposes the -# cross-compiler prefix to the edk2 "build" utility. Print the result to the -# standard output. -# -# Parameters: -# $1: QEMU system emulation target -qemu_edk2_get_cross_prefix_var() -{ - local emulation_target="$1" - local edk2_toolchain - local edk2_arch - - if ! edk2_toolchain=$(qemu_edk2_get_toolchain "$emulation_target"); then - return 1 - fi - - case "$emulation_target" in - (arm|aarch64) - if ! edk2_arch=$(qemu_edk2_get_arch "$emulation_target"); then - return 1 - fi - printf '%s_%s_PREFIX\n' "$edk2_toolchain" "$edk2_arch" - ;; - (i386|x86_64) - printf '%s_BIN\n' "$edk2_toolchain" - ;; - esac -} - - -# Set and export the environment variable(s) necessary for cross-compilation, -# whenever needed by the edk2 "build" utility. -# -# Parameters: -# $1: QEMU system emulation target -qemu_edk2_set_cross_env() -{ - local emulation_target="$1" - local cross_prefix - local cross_prefix_var - - if ! cross_prefix=$(qemu_edk2_get_cross_prefix "$emulation_target"); then - return 1 - fi - - if [ -z "$cross_prefix" ]; then - # Nothing to do. - return 0 - fi - - if ! cross_prefix_var=$(qemu_edk2_get_cross_prefix_var \ - "$emulation_target"); then - return 1 - fi - - eval "export $cross_prefix_var=\$cross_prefix" -} - - -# Determine the "-n" option argument (that is, the number of modules to build -# in parallel) for the edk2 "build" utility. Print the result to the standard -# output. -# -# Parameters: -# $1: the value of the MAKEFLAGS variable -qemu_edk2_get_thread_count() -{ - local makeflags="$1" - - if [[ "$makeflags" == *--jobserver-auth=* ]] || - [[ "$makeflags" == *--jobserver-fds=* ]]; then - # If there is a job server, allow the edk2 "build" utility to parallelize - # as many module builds as there are logical CPUs in the system. The "make" - # instances forked by "build" are supposed to limit themselves through the - # job server. The zero value below causes the edk2 "build" utility to fetch - # the logical CPU count with Python's multiprocessing.cpu_count() method. - printf '0\n' - else - # Build a single module at a time. - printf '1\n' - fi -} - - -# Work around by -# filtering jobserver-related flags out of MAKEFLAGS. Print the result to the -# standard output. -# -# Parameters: -# $1: the value of the MAKEFLAGS variable -qemu_edk2_quirk_tianocore_1607() -{ - local makeflags="$1" - - printf %s "$makeflags" \ - | LC_ALL=C sed --regexp-extended \ - --expression='s/--jobserver-(auth|fds)=[0-9]+,[0-9]+//' \ - --expression='s/-j([0-9]+)?//' -} diff --git a/roms/openbios b/roms/openbios index 0e0afae657..af97fd7af5 160000 --- a/roms/openbios +++ b/roms/openbios @@ -1 +1 @@ -Subproject commit 0e0afae6579c1efe9f0d85505b75ffe989554133 +Subproject commit af97fd7af5e7c18f591a7b987291d3db4ffb28b5 diff --git a/roms/opensbi b/roms/opensbi index 4489876e93..6b5188ca14 160000 --- a/roms/opensbi +++ b/roms/opensbi @@ -1 +1 @@ -Subproject commit 4489876e933d8ba0d8bc6c64bae71e295d45faac +Subproject commit 6b5188ca14e59ce7bf71afe4e7d3d557c3d31bf8 diff --git a/roms/seabios b/roms/seabios index 3208b098f5..ea1b7a0733 160000 --- a/roms/seabios +++ b/roms/seabios @@ -1 +1 @@ -Subproject commit 3208b098f51a9ef96d0dfa71d5ec3a3eaec88f0a +Subproject commit ea1b7a0733906b8425d948ae94fba63c32b1d425 diff --git a/scripts/archive-source.sh b/scripts/archive-source.sh index 23e042dacd..dba5ae05b6 100755 --- a/scripts/archive-source.sh +++ b/scripts/archive-source.sh @@ -26,7 +26,7 @@ sub_file="${sub_tdir}/submodule.tar" # independent of what the developer currently has initialized # in their checkout, because the build environment is completely # different to the host OS. -submodules="dtc meson ui/keycodemapdb" +submodules="subprojects/dtc subprojects/keycodemapdb" submodules="$submodules tests/fp/berkeley-softfloat-3 tests/fp/berkeley-testfloat-3" sub_deinit="" diff --git a/scripts/block-coroutine-wrapper.py b/scripts/block-coroutine-wrapper.py index 60e9b3107c..d4a183db61 100644 --- a/scripts/block-coroutine-wrapper.py +++ b/scripts/block-coroutine-wrapper.py @@ -88,16 +88,7 @@ class FuncDecl: raise ValueError(f"no_co function can't be rdlock: {self.name}") self.target_name = f'{subsystem}_{subname}' - t = self.args[0].type - if t == 'BlockDriverState *': - ctx = 'bdrv_get_aio_context(bs)' - elif t == 'BdrvChild *': - ctx = 'bdrv_get_aio_context(child->bs)' - elif t == 'BlockBackend *': - ctx = 'blk_get_aio_context(blk)' - else: - ctx = 'qemu_get_aio_context()' - self.ctx = ctx + self.ctx = self.gen_ctx() self.get_result = 's->ret = ' self.ret = 'return s.ret;' @@ -109,6 +100,17 @@ class FuncDecl: self.co_ret = '' self.return_field = '' + def gen_ctx(self, prefix: str = '') -> str: + t = self.args[0].type + if t == 'BlockDriverState *': + return f'bdrv_get_aio_context({prefix}bs)' + elif t == 'BdrvChild *': + return f'bdrv_get_aio_context({prefix}child->bs)' + elif t == 'BlockBackend *': + return f'blk_get_aio_context({prefix}blk)' + else: + return 'qemu_get_aio_context()' + def gen_list(self, format: str) -> str: return ', '.join(format.format_map(arg.__dict__) for arg in self.args) @@ -262,8 +264,11 @@ typedef struct {struct_name} {{ static void {name}_bh(void *opaque) {{ {struct_name} *s = opaque; + AioContext *ctx = {func.gen_ctx('s->')}; + aio_context_acquire(ctx); {func.get_result}{name}({ func.gen_list('s->{name}') }); + aio_context_release(ctx); aio_co_wake(s->co); }} diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 6ecabfb2b5..eeaec436eb 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2865,6 +2865,14 @@ sub process { if ($line =~ /\bsignal\s*\(/ && !($line =~ /SIG_(?:IGN|DFL)/)) { ERROR("use sigaction to establish signal handlers; signal is not portable\n" . $herecurr); } +# recommend qemu_bh_new_guarded instead of qemu_bh_new + if ($realfile =~ /.*\/hw\/.*/ && $line =~ /\bqemu_bh_new\s*\(/) { + ERROR("use qemu_bh_new_guarded() instead of qemu_bh_new() to avoid reentrancy problems\n" . $herecurr); + } +# recommend aio_bh_new_guarded instead of aio_bh_new + if ($realfile =~ /.*\/hw\/.*/ && $line =~ /\baio_bh_new\s*\(/) { + ERROR("use aio_bh_new_guarded() instead of aio_bh_new() to avoid reentrancy problems\n" . $herecurr); + } # check for module_init(), use category-specific init macros explicitly please if ($line =~ /^module_init\s*\(/) { ERROR("please use block_init(), type_init() etc. instead of module_init()\n" . $herecurr); @@ -2982,6 +2990,9 @@ sub process { if ($line =~ /\bsysconf\(_SC_PAGESIZE\)/) { ERROR("use qemu_real_host_page_size() instead of sysconf(_SC_PAGESIZE)\n" . $herecurr); } + if ($line =~ /\b(g_)?assert\(0\)/) { + ERROR("use g_assert_not_reached() instead of assert(0)\n" . $herecurr); + } my $non_exit_glib_asserts = qr{g_assert_cmpstr| g_assert_cmpint| g_assert_cmpuint| diff --git a/scripts/ci/gitlab-kubernetes-runners/values.yaml b/scripts/ci/gitlab-kubernetes-runners/values.yaml new file mode 100644 index 0000000000..204a96a842 --- /dev/null +++ b/scripts/ci/gitlab-kubernetes-runners/values.yaml @@ -0,0 +1,30 @@ +gitlabUrl: "https://gitlab.com/" +runnerRegistrationToken: "" +rbac: + create: true +concurrent: 200 +runners: + privileged: true + config: | + [[runners]] + limit = 100 + environment = [ + "DOCKER_HOST=tcp://docker:2376", + "DOCKER_TLS_CERTDIR=/certs", + "DOCKER_TLS_VERIFY=1", + "DOCKER_CERT_PATH=/certs/client" + ] + [runners.kubernetes] + poll_timeout = 1200 + image = "ubuntu:20.04" + cpu_request = "0.5" + service_cpu_request = "0.5" + helper_cpu_request = "0.25" + cpu_request_overwrite_max_allowed = "7" + memory_request_overwrite_max_allowed = "30Gi" + [[runners.kubernetes.volumes.empty_dir]] + name = "docker-certs" + mount_path = "/certs/client" + medium = "Memory" + [runners.kubernetes.node_selector] + agentpool = "jobs" diff --git a/scripts/ci/org.centos/stream/8/build-environment.yml b/scripts/ci/org.centos/stream/8/build-environment.yml index 0d094d70c3..1ead77e2cb 100644 --- a/scripts/ci/org.centos/stream/8/build-environment.yml +++ b/scripts/ci/org.centos/stream/8/build-environment.yml @@ -55,6 +55,7 @@ - librados-devel - librbd-devel - libseccomp-devel + - libslirp-devel - libssh-devel - libxkbcommon-devel - lzo-devel diff --git a/scripts/ci/org.centos/stream/8/x86_64/test-avocado b/scripts/ci/org.centos/stream/8/x86_64/test-avocado index f403e4e7ec..73e7a1a312 100755 --- a/scripts/ci/org.centos/stream/8/x86_64/test-avocado +++ b/scripts/ci/org.centos/stream/8/x86_64/test-avocado @@ -4,7 +4,7 @@ # KVM and x86_64, or tests that are generic enough to be valid for all # targets. Such a test list can be generated with: # -# ./tests/venv/bin/avocado list --filter-by-tags-include-empty \ +# ./pyvenv/bin/avocado list --filter-by-tags-include-empty \ # --filter-by-tags-include-empty-key -t accel:kvm,arch:x86_64 \ # tests/avocado/ # @@ -22,7 +22,7 @@ # - tests/avocado/virtio_check_params.py:VirtioMaxSegSettingsCheck.test_machine_types # make get-vm-images -./tests/venv/bin/avocado run \ +./pyvenv/bin/avocado run \ --job-results-dir=tests/results/ \ tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_i440fx_kvm \ tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_q35_kvm \ @@ -30,6 +30,8 @@ make get-vm-images tests/avocado/cpu_queries.py:QueryCPUModelExpansion.test \ tests/avocado/empty_cpu_model.py:EmptyCPUModel.test \ tests/avocado/hotplug_cpu.py:HotPlugCPU.test \ + tests/avocado/netdev-ethtool.py:NetDevEthtool.test_igb \ + tests/avocado/netdev-ethtool.py:NetDevEthtool.test_igb_nomsi \ tests/avocado/info_usernet.py:InfoUsernet.test_hostfwd \ tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu \ tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu_pt \ diff --git a/scripts/ci/setup/build-environment.yml b/scripts/ci/setup/build-environment.yml index 78b1021cd4..f344d1a850 100644 --- a/scripts/ci/setup/build-environment.yml +++ b/scripts/ci/setup/build-environment.yml @@ -24,7 +24,6 @@ when: - ansible_facts['distribution'] == 'Ubuntu' - ansible_facts['architecture'] == 'aarch64' - - ansible_facts['distribution_version'] == '20.04' - name: Update apt cache / upgrade packages via apt apt: @@ -33,87 +32,131 @@ when: - ansible_facts['distribution'] == 'Ubuntu' - - name: Install basic packages to build QEMU on Ubuntu 20.04 + # lcitool variables -f json ubuntu-2204 qemu | jq -r '.pkgs[]' | xargs -n 1 echo "-" + - name: Install basic packages to build QEMU on Ubuntu 22.04 package: name: + - bash + - bc + - bison + - bsdextrautils + - bzip2 + - ca-certificates - ccache + - clang + - dbus + - debianutils + - diffutils + - exuberant-ctags + - findutils + - flex + - g++ - gcc + - gcovr + - genisoimage - gettext - git - - glusterfs-common + - hostname - libaio-dev + - libasan5 + - libasound2-dev - libattr1-dev + - libbpf-dev - libbrlapi-dev - libbz2-dev + - libc6-dev - libcacard-dev - - libcapstone-dev - libcap-ng-dev + - libcapstone-dev + - libcmocka-dev - libcurl4-gnutls-dev + - libdaxctl-dev - libdrm-dev - libepoxy-dev - libfdt-dev + - libffi-dev - libgbm-dev + - libgcrypt20-dev + - libglib2.0-dev + - libglusterfs-dev + - libgnutls28-dev - libgtk-3-dev + - libibumad-dev - libibverbs-dev - libiscsi-dev - libjemalloc-dev - libjpeg-turbo8-dev + - libjson-c-dev + - liblttng-ust-dev - liblzo2-dev - - libncurses5-dev - libncursesw5-dev - libnfs-dev - - libnss3-dev - libnuma-dev + - libpam0g-dev + - libpcre2-dev - libpixman-1-dev - - librados-dev + - libpmem-dev + - libpng-dev + - libpulse-dev - librbd-dev - librdmacm-dev - libsasl2-dev - libsdl2-dev + - libsdl2-image-dev - libseccomp-dev + - libslirp-dev - libsnappy-dev - libspice-protocol-dev + - libspice-server-dev - libssh-dev + - libsystemd-dev + - libtasn1-6-dev + - libubsan1 + - libudev-dev + - liburing-dev - libusb-1.0-0-dev - libusbredirhost-dev - libvdeplug-dev + - libvirglrenderer-dev - libvte-2.91-dev + - libxen-dev + - libxml2-dev - libzstd-dev + - llvm + - locales - make - - python3-yaml + - meson + - multipath-tools + - ncat + - nettle-dev + - ninja-build + - openssh-client + - pkgconf + - python3 + - python3-numpy + - python3-opencv + - python3-pillow + - python3-pip - python3-sphinx - python3-sphinx-rtd-theme - - ninja-build + - python3-venv + - python3-yaml + - rpm2cpio + - sed - sparse + - systemtap-sdt-dev + - tar + - tesseract-ocr + - tesseract-ocr-eng + - texinfo - xfslibs-dev + - zlib1g-dev state: present when: - ansible_facts['distribution'] == 'Ubuntu' + - ansible_facts['distribution_version'] == '22.04' - - name: Install packages to build QEMU on Ubuntu 20.04 on non-s390x - package: - name: - - libspice-server-dev - - libxen-dev - state: present - when: - - ansible_facts['distribution'] == 'Ubuntu' - - ansible_facts['architecture'] == 'aarch64' or ansible_facts['architecture'] == 'x86_64' - - - name: Install basic packages to build QEMU on Ubuntu 20.04 - package: - name: - # Originally from tests/docker/dockerfiles/ubuntu2004.docker - - clang-10 - - genisoimage - - liblttng-ust-dev - - libslirp-dev - - netcat-openbsd - when: - - ansible_facts['distribution'] == 'Ubuntu' - - ansible_facts['distribution_version'] == '20.04' - - - name: Install armhf cross-compile packages to build QEMU on AArch64 Ubuntu 20.04 + - name: Install armhf cross-compile packages to build QEMU on AArch64 Ubuntu 22.04 package: name: - binutils-arm-linux-gnueabihf @@ -128,7 +171,7 @@ - zlib1g-dev:armhf when: - ansible_facts['distribution'] == 'Ubuntu' - - ansible_facts['distribution_version'] == '20.04' + - ansible_facts['distribution_version'] == '22.04' - ansible_facts['architecture'] == 'aarch64' - name: Enable EPEL repo on EL8 diff --git a/scripts/ci/setup/gitlab-runner.yml b/scripts/ci/setup/gitlab-runner.yml index 95d4199c03..7bdafab511 100644 --- a/scripts/ci/setup/gitlab-runner.yml +++ b/scripts/ci/setup/gitlab-runner.yml @@ -26,6 +26,7 @@ user: user: gitlab-runner group: gitlab-runner + groups: kvm comment: GitLab Runner home: /home/gitlab-runner shell: /bin/bash @@ -48,13 +49,29 @@ - debug: msg: gitlab-runner arch is {{ gitlab_runner_arch }} - - name: Download the matching gitlab-runner + - name: Download the matching gitlab-runner (DEB) get_url: dest: "/root/" url: "https://gitlab-runner-downloads.s3.amazonaws.com/latest/deb/gitlab-runner_{{ gitlab_runner_arch }}.deb" + when: + - ansible_facts['distribution'] == 'Ubuntu' - - name: Install gitlab-runner via package manager + - name: Download the matching gitlab-runner (RPM) + get_url: + dest: "/root/" + url: "https://gitlab-runner-downloads.s3.amazonaws.com/latest/rpm/gitlab-runner_{{ gitlab_runner_arch }}.rpm" + when: + - ansible_facts['distribution'] == 'CentOS' + + - name: Install gitlab-runner via package manager (DEB) apt: deb="/root/gitlab-runner_{{ gitlab_runner_arch }}.deb" + when: + - ansible_facts['distribution'] == 'Ubuntu' + + - name: Install gitlab-runner via package manager (RPM) + yum: name="/root/gitlab-runner_{{ gitlab_runner_arch }}.rpm" + when: + - ansible_facts['distribution'] == 'CentOS' - name: Register the gitlab-runner command: "/usr/bin/gitlab-runner register --non-interactive --url {{ gitlab_runner_server_url }} --registration-token {{ gitlab_runner_registration_token }} --executor shell --tag-list {{ ansible_facts[\"architecture\"] }},{{ ansible_facts[\"distribution\"]|lower }}_{{ ansible_facts[\"distribution_version\"] }} --description '{{ ansible_facts[\"distribution\"] }} {{ ansible_facts[\"distribution_version\"] }} {{ ansible_facts[\"architecture\"] }} ({{ ansible_facts[\"os_family\"] }})'" diff --git a/scripts/coverage/compare_gcov_json.py b/scripts/coverage/compare_gcov_json.py new file mode 100755 index 0000000000..1b92dc2c8c --- /dev/null +++ b/scripts/coverage/compare_gcov_json.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +# +# Compare output of two gcovr JSON reports and report differences. To +# generate the required output first: +# - create two build dirs with --enable-gcov +# - run set of tests in each +# - run make coverage-html in each +# - run gcovr --json --exclude-unreachable-branches \ +# --print-summary -o coverage.json --root ../../ . *.p +# +# Author: Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +import argparse +import json +import sys +from pathlib import Path + +def create_parser(): + parser = argparse.ArgumentParser( + prog='compare_gcov_json', + description='analyse the differences in coverage between two runs') + + parser.add_argument('-a', type=Path, default=None, + help=('First file to check')) + + parser.add_argument('-b', type=Path, default=None, + help=('Second file to check')) + + parser.add_argument('--verbose', action='store_true', default=False, + help=('A minimal verbosity level that prints the ' + 'overall result of the check/wait')) + return parser + + +# See https://gcovr.com/en/stable/output/json.html#json-format-reference +def load_json(json_file_path: Path, verbose = False) -> dict[str, set[int]]: + + with open(json_file_path) as f: + data = json.load(f) + + root_dir = json_file_path.absolute().parent + covered_lines = dict() + + for filecov in data["files"]: + file_path = Path(filecov["file"]) + + # account for generated files - map into src tree + resolved_path = Path(file_path).absolute() + if resolved_path.is_relative_to(root_dir): + file_path = resolved_path.relative_to(root_dir) + # print(f"remapped {resolved_path} to {file_path}") + + lines = filecov["lines"] + + executed_lines = set( + linecov["line_number"] + for linecov in filecov["lines"] + if linecov["count"] != 0 and not linecov["gcovr/noncode"] + ) + + # if this file has any coverage add it to the system + if len(executed_lines) > 0: + if verbose: + print(f"file {file_path} {len(executed_lines)}/{len(lines)}") + covered_lines[str(file_path)] = executed_lines + + return covered_lines + +def find_missing_files(first, second): + """ + Return a list of files not covered in the second set + """ + missing_files = [] + for f in sorted(first): + file_a = first[f] + try: + file_b = second[f] + except KeyError: + missing_files.append(f) + + return missing_files + +def main(): + """ + Script entry point + """ + parser = create_parser() + args = parser.parse_args() + + if not args.a or not args.b: + print("We need two files to compare") + sys.exit(1) + + first_coverage = load_json(args.a, args.verbose) + second_coverage = load_json(args.b, args.verbose) + + first_missing = find_missing_files(first_coverage, + second_coverage) + + second_missing = find_missing_files(second_coverage, + first_coverage) + + a_name = args.a.parent.name + b_name = args.b.parent.name + + print(f"{b_name} missing coverage in {len(first_missing)} files") + for f in first_missing: + print(f" {f}") + + print(f"{a_name} missing coverage in {len(second_missing)} files") + for f in second_missing: + print(f" {f}") + + +if __name__ == '__main__': + main() diff --git a/scripts/coverity-scan/COMPONENTS.md b/scripts/coverity-scan/COMPONENTS.md index 639dcee45a..883da95aff 100644 --- a/scripts/coverity-scan/COMPONENTS.md +++ b/scripts/coverity-scan/COMPONENTS.md @@ -12,6 +12,9 @@ avr cris ~ (/qemu)?((/include)?/hw/cris/.*|/target/cris/.*) +hexagon-gen (component should be ignored in analysis) + ~ (/qemu)?(/target/hexagon/.*generated.*) + hexagon ~ (/qemu)?(/target/hexagon/.*) @@ -21,6 +24,9 @@ hppa i386 ~ (/qemu)?((/include)?/hw/i386/.*|/target/i386/.*|/hw/intc/[^/]*apic[^/]*\.c) +loongarch + ~ (/qemu)?((/include)?/hw/(loongarch/.*|.*/loongarch.*)|/target/loongarch/.*) + m68k ~ (/qemu)?((/include)?/hw/m68k/.*|/target/m68k/.*|(/include)?/hw(/.*)?/mcf.*|(/include)?/hw/nubus/.*) @@ -33,11 +39,14 @@ mips nios2 ~ (/qemu)?((/include)?/hw/nios2/.*|/target/nios2/.*) +openrisc + ~ (/qemu)?((/include)?/hw/openrisc/.*|/target/openrisc/.*) + ppc ~ (/qemu)?((/include)?/hw/ppc/.*|/target/ppc/.*|/hw/pci-host/(uninorth.*|dec.*|prep.*|ppc.*)|/hw/misc/macio/.*|(/include)?/hw/.*/(xics|openpic|spapr).*) riscv - ~ (/qemu)?((/include)?/hw/riscv/.*|/target/riscv/.*) + ~ (/qemu)?((/include)?/hw/riscv/.*|/target/riscv/.*|/hw/.*/(riscv_|ibex_|sifive_).*) rx ~ (/qemu)?((/include)?/hw/rx/.*|/target/rx/.*) @@ -51,12 +60,12 @@ sh4 sparc ~ (/qemu)?((/include)?/hw/sparc(64)?.*|/target/sparc/.*|/hw/.*/grlib.*|/hw/display/cg3.c) -tilegx - ~ (/qemu)?(/target/tilegx/.*) - tricore ~ (/qemu)?((/include)?/hw/tricore/.*|/target/tricore/.*) +xtensa + ~ (/qemu)?((/include)?/hw/xtensa/.*|/target/xtensa/.*) + 9pfs ~ (/qemu)?(/hw/9pfs/.*|/fsdev/.*) @@ -64,16 +73,13 @@ audio ~ (/qemu)?((/include)?/(audio|hw/audio)/.*) block - ~ (/qemu)?(/block.*|(/include?)(/hw)?/(block|storage-daemon)/.*|(/include)?/hw/ide/.*|/qemu-(img|io).*|/util/(aio|async|thread-pool).*) + ~ (/qemu)?(/block.*|(/include?)/(block|storage-daemon)/.*|(/include)?/hw/(block|ide|nvme)/.*|/qemu-(img|io).*|/util/(aio|async|thread-pool).*) char ~ (/qemu)?(/qemu-char\.c|/include/sysemu/char\.h|(/include)?/hw/char/.*) -capstone - ~ (/qemu)?(/capstone/.*) - crypto - ~ (/qemu)?((/include)?/crypto/.*|/hw/.*/crypto.*) + ~ (/qemu)?((/include)?/crypto/.*|/hw/.*/.*crypto.*|(/include/sysemu|/backends)/cryptodev.*) disas ~ (/qemu)?((/include)?/disas.*) @@ -100,7 +106,7 @@ net ~ (/qemu)?((/include)?(/hw)?/(net|rdma)/.*) pci - ~ (/qemu)?(/hw/pci.*|/include/hw/pci.*) + ~ (/qemu)?(/include)?/hw/(cxl/|pci).* qemu-ga ~ (/qemu)?(/qga/.*) @@ -108,12 +114,6 @@ qemu-ga scsi ~ (/qemu)?(/scsi/.*|/hw/scsi/.*|/include/hw/scsi/.*) -slirp (component should be ignored in analysis) - ~ (/qemu)?(/slirp/.*) - -tcg - ~ (/qemu)?(/accel/tcg/.*|/replay/.*|/(.*/)?softmmu.*) - trace ~ (/qemu)?(/.*trace.*\.[ch]) @@ -129,9 +129,27 @@ user util ~ (/qemu)?(/util/.*|/include/qemu/.*) +vfio + ~ (/qemu)?(/include)?/hw/vfio/.* + +virtio + ~ (/qemu)?(/include)?/hw/virtio/.* + xen ~ (/qemu)?(.*/xen.*) +hvf + ~ (/qemu)?(.*/hvf.*) + +kvm + ~ (/qemu)?(.*/kvm.*) + +tcg + ~ (/qemu)?(/accel/tcg|/replay|/tcg)/.* + +sysemu + ~ (/qemu)?(/softmmu/.*|/accel/.*) + (headers) ~ (/qemu)?(/include/.*) @@ -140,9 +158,3 @@ testlibs tests ~ (/qemu)?(/tests/.*) - -loongarch - ~ (/qemu)?((/include)?/hw/(loongarch/.*|.*/loongarch.*)|/target/loongarch/.*) - -riscv - ~ (/qemu)?((/include)?/hw/riscv/.*|/target/riscv/.*|/hw/.*/(riscv_|ibex_|sifive_).*) diff --git a/scripts/coverity-scan/coverity-scan.docker b/scripts/coverity-scan/coverity-scan.docker index 6f60a52d23..a349578526 100644 --- a/scripts/coverity-scan/coverity-scan.docker +++ b/scripts/coverity-scan/coverity-scan.docker @@ -15,112 +15,152 @@ # The work of actually doing the build is handled by the # run-coverity-scan script. -FROM fedora:30 -ENV PACKAGES \ - alsa-lib-devel \ - bc \ - brlapi-devel \ - bzip2 \ - bzip2-devel \ - ccache \ - clang \ - curl \ - cyrus-sasl-devel \ - dbus-daemon \ - device-mapper-multipath-devel \ - findutils \ - gcc \ - gcc-c++ \ - gettext \ - git \ - glib2-devel \ - glusterfs-api-devel \ - gnutls-devel \ - gtk3-devel \ - hostname \ - libaio-devel \ - libasan \ - libattr-devel \ - libblockdev-mpath-devel \ - libcap-devel \ - libcap-ng-devel \ - libcurl-devel \ - libepoxy-devel \ - libfdt-devel \ - libgbm-devel \ - libiscsi-devel \ - libjpeg-devel \ - libpmem-devel \ - libnfs-devel \ - libpng-devel \ - librbd-devel \ - libseccomp-devel \ - libssh-devel \ - libubsan \ - libudev-devel \ - libusbx-devel \ - libzstd-devel \ - llvm \ - lzo-devel \ - make \ - mingw32-bzip2 \ - mingw32-curl \ - mingw32-glib2 \ - mingw32-gmp \ - mingw32-gnutls \ - mingw32-gtk3 \ - mingw32-libjpeg-turbo \ - mingw32-libpng \ - mingw32-libtasn1 \ - mingw32-nettle \ - mingw32-nsis \ - mingw32-pixman \ - mingw32-pkg-config \ - mingw32-SDL2 \ - mingw64-bzip2 \ - mingw64-curl \ - mingw64-glib2 \ - mingw64-gmp \ - mingw64-gnutls \ - mingw64-gtk3 \ - mingw64-libjpeg-turbo \ - mingw64-libpng \ - mingw64-libtasn1 \ - mingw64-nettle \ - mingw64-pixman \ - mingw64-pkg-config \ - mingw64-SDL2 \ - ncurses-devel \ - nettle-devel \ - numactl-devel \ - perl \ - perl-Test-Harness \ - pixman-devel \ - pulseaudio-libs-devel \ - python3 \ - python3-sphinx \ - PyYAML \ - rdma-core-devel \ - SDL2-devel \ - snappy-devel \ - sparse \ - spice-server-devel \ - systemd-devel \ - systemtap-sdt-devel \ - tar \ - usbredir-devel \ - virglrenderer-devel \ - vte291-devel \ - wget \ - which \ - xen-devel \ - xfsprogs-devel \ - zlib-devel -ENV QEMU_CONFIGURE_OPTS --python=/usr/bin/python3 +FROM registry.fedoraproject.org/fedora:37 -RUN dnf install -y $PACKAGES -RUN rpm -q $PACKAGES | sort > /packages.txt -ENV PATH $PATH:/usr/libexec/python3-sphinx/ +RUN dnf install -y nosync && \ + echo -e '#!/bin/sh\n\ +if test -d /usr/lib64\n\ +then\n\ + export LD_PRELOAD=/usr/lib64/nosync/nosync.so\n\ +else\n\ + export LD_PRELOAD=/usr/lib/nosync/nosync.so\n\ +fi\n\ +exec "$@"' > /usr/bin/nosync && \ + chmod +x /usr/bin/nosync && \ + nosync dnf update -y && \ + nosync dnf install -y \ + SDL2-devel \ + SDL2_image-devel \ + alsa-lib-devel \ + bash \ + bc \ + bison \ + brlapi-devel \ + bzip2 \ + bzip2-devel \ + ca-certificates \ + capstone-devel \ + ccache \ + clang \ + ctags \ + cyrus-sasl-devel \ + daxctl-devel \ + dbus-daemon \ + device-mapper-multipath-devel \ + diffutils \ + findutils \ + flex \ + fuse3-devel \ + gcc \ + gcc-c++ \ + gcovr \ + genisoimage \ + gettext \ + git \ + glib2-devel \ + glib2-static \ + glibc-langpack-en \ + glibc-static \ + glusterfs-api-devel \ + gnutls-devel \ + gtk3-devel \ + hostname \ + jemalloc-devel \ + json-c-devel \ + libaio-devel \ + libasan \ + libattr-devel \ + libbpf-devel \ + libcacard-devel \ + libcap-ng-devel \ + libcmocka-devel \ + libcurl-devel \ + libdrm-devel \ + libepoxy-devel \ + libfdt-devel \ + libffi-devel \ + libgcrypt-devel \ + libiscsi-devel \ + libjpeg-devel \ + libnfs-devel \ + libpmem-devel \ + libpng-devel \ + librbd-devel \ + libseccomp-devel \ + libselinux-devel \ + libslirp-devel \ + libssh-devel \ + libtasn1-devel \ + libubsan \ + liburing-devel \ + libusbx-devel \ + libzstd-devel \ + llvm \ + lttng-ust-devel \ + lzo-devel \ + make \ + mesa-libgbm-devel \ + meson \ + ncurses-devel \ + nettle-devel \ + ninja-build \ + nmap-ncat \ + numactl-devel \ + openssh-clients \ + pam-devel \ + pcre-static \ + pixman-devel \ + pkgconfig \ + pulseaudio-libs-devel \ + python3 \ + python3-PyYAML \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx_rtd_theme \ + rdma-core-devel \ + rpm \ + sed \ + snappy-devel \ + socat \ + sparse \ + spice-protocol \ + spice-server-devel \ + systemd-devel \ + systemtap-sdt-devel \ + tar \ + tesseract \ + tesseract-langpack-eng \ + usbredir-devel \ + util-linux \ + virglrenderer-devel \ + vte291-devel \ + which \ + xen-devel \ + xfsprogs-devel \ + zlib-devel \ + zlib-static \ + zstd && \ + nosync dnf autoremove -y && \ + nosync dnf clean all -y && \ + rpm -qa | sort > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" +ENV QEMU_CONFIGURE_OPTS --meson=internal + +RUN dnf install -y curl wget ENV COVERITY_TOOL_BASE=/coverity-tools COPY coverity_tool.tgz coverity_tool.tgz RUN mkdir -p /coverity-tools/coverity_tool && cd /coverity-tools/coverity_tool && tar xf /coverity_tool.tgz diff --git a/scripts/decodetree.py b/scripts/decodetree.py index a03dc6b5e3..a8a6cb69cd 100644 --- a/scripts/decodetree.py +++ b/scripts/decodetree.py @@ -35,12 +35,14 @@ arguments = {} formats = {} allpatterns = [] anyextern = False +testforerror = False translate_prefix = 'trans' translate_scope = 'static ' input_file = '' output_file = None output_fd = None +output_null = False insntype = 'uint32_t' decode_function = 'decode' @@ -53,6 +55,80 @@ re_fld_ident = '%[a-zA-Z0-9_]*' re_fmt_ident = '@[a-zA-Z0-9_]*' re_pat_ident = '[a-zA-Z0-9_]*' +# Local implementation of a topological sort. We use the same API that +# the Python graphlib does, so that when QEMU moves forward to a +# baseline of Python 3.9 or newer this code can all be dropped and +# replaced with: +# from graphlib import TopologicalSorter, CycleError +# +# https://docs.python.org/3.9/library/graphlib.html#graphlib.TopologicalSorter +# +# We only implement the parts of TopologicalSorter we care about: +# ts = TopologicalSorter(graph=None) +# create the sorter. graph is a dictionary whose keys are +# nodes and whose values are lists of the predecessors of that node. +# (That is, if graph contains "A" -> ["B", "C"] then we must output +# B and C before A.) +# ts.static_order() +# returns a list of all the nodes in sorted order, or raises CycleError +# CycleError +# exception raised if there are cycles in the graph. The second +# element in the args attribute is a list of nodes which form a +# cycle; the first and last element are the same, eg [a, b, c, a] +# (Our implementation doesn't give the order correctly.) +# +# For our purposes we can assume that the data set is always small +# (typically 10 nodes or less, actual links in the graph very rare), +# so we don't need to worry about efficiency of implementation. +# +# The core of this implementation is from +# https://code.activestate.com/recipes/578272-topological-sort/ +# (but updated to Python 3), and is under the MIT license. + +class CycleError(ValueError): + """Subclass of ValueError raised if cycles exist in the graph""" + pass + +class TopologicalSorter: + """Topologically sort a graph""" + def __init__(self, graph=None): + self.graph = graph + + def static_order(self): + # We do the sort right here, unlike the stdlib version + from functools import reduce + data = {} + r = [] + + if not self.graph: + return [] + + # This code wants the values in the dict to be specifically sets + for k, v in self.graph.items(): + data[k] = set(v) + + # Find all items that don't depend on anything. + extra_items_in_deps = (reduce(set.union, data.values()) + - set(data.keys())) + # Add empty dependencies where needed + data.update({item:{} for item in extra_items_in_deps}) + while True: + ordered = set(item for item, dep in data.items() if not dep) + if not ordered: + break + r.extend(ordered) + data = {item: (dep - ordered) + for item, dep in data.items() + if item not in ordered} + if data: + # This doesn't give as nice results as the stdlib, which + # gives you the cycle by listing the nodes in order. Here + # we only know the nodes in the cycle but not their order. + raise CycleError(f'nodes are in a cycle', list(data.keys())) + + return r +# end TopologicalSorter + def error_with_file(file, lineno, *args): """Print an error message from file:line and args and exit.""" global output_file @@ -71,7 +147,7 @@ def error_with_file(file, lineno, *args): if output_file and output_fd: output_fd.close() os.remove(output_file) - exit(1) + exit(0 if testforerror else 1) # end error_with_file @@ -205,11 +281,14 @@ class Field: s = '' return str(self.pos) + ':' + s + str(self.len) - def str_extract(self): + def str_extract(self, lvalue_formatter): global bitop_width s = 's' if self.sign else '' return f'{s}extract{bitop_width}(insn, {self.pos}, {self.len})' + def referenced_fields(self): + return [] + def __eq__(self, other): return self.sign == other.sign and self.mask == other.mask @@ -228,12 +307,12 @@ class MultiField: def __str__(self): return str(self.subs) - def str_extract(self): + def str_extract(self, lvalue_formatter): global bitop_width ret = '0' pos = 0 for f in reversed(self.subs): - ext = f.str_extract() + ext = f.str_extract(lvalue_formatter) if pos == 0: ret = ext else: @@ -241,6 +320,12 @@ class MultiField: pos += f.len return ret + def referenced_fields(self): + l = [] + for f in self.subs: + l.extend(f.referenced_fields()) + return l + def __ne__(self, other): if len(self.subs) != len(other.subs): return True @@ -264,9 +349,12 @@ class ConstField: def __str__(self): return str(self.value) - def str_extract(self): + def str_extract(self, lvalue_formatter): return str(self.value) + def referenced_fields(self): + return [] + def __cmp__(self, other): return self.value - other.value # end ConstField @@ -283,8 +371,12 @@ class FunctionField: def __str__(self): return self.func + '(' + str(self.base) + ')' - def str_extract(self): - return self.func + '(ctx, ' + self.base.str_extract() + ')' + def str_extract(self, lvalue_formatter): + return (self.func + '(ctx, ' + + self.base.str_extract(lvalue_formatter) + ')') + + def referenced_fields(self): + return self.base.referenced_fields() def __eq__(self, other): return self.func == other.func and self.base == other.base @@ -304,9 +396,12 @@ class ParameterField: def __str__(self): return self.func - def str_extract(self): + def str_extract(self, lvalue_formatter): return self.func + '(ctx)' + def referenced_fields(self): + return [] + def __eq__(self, other): return self.func == other.func @@ -314,6 +409,32 @@ class ParameterField: return not self.__eq__(other) # end ParameterField +class NamedField: + """Class representing a field already named in the pattern""" + def __init__(self, name, sign, len): + self.mask = 0 + self.sign = sign + self.len = len + self.name = name + + def __str__(self): + return self.name + + def str_extract(self, lvalue_formatter): + global bitop_width + s = 's' if self.sign else '' + lvalue = lvalue_formatter(self.name) + return f'{s}extract{bitop_width}({lvalue}, 0, {self.len})' + + def referenced_fields(self): + return [self.name] + + def __eq__(self, other): + return self.name == other.name + + def __ne__(self, other): + return not self.__eq__(other) +# end NamedField class Arguments: """Class representing the extracted fields of a format""" @@ -337,7 +458,6 @@ class Arguments: output('} ', self.struct_name(), ';\n\n') # end Arguments - class General: """Common code between instruction formats and instruction patterns""" def __init__(self, name, lineno, base, fixb, fixm, udfm, fldm, flds, w): @@ -351,12 +471,59 @@ class General: self.fieldmask = fldm self.fields = flds self.width = w + self.dangling = None def __str__(self): return self.name + ' ' + str_match_bits(self.fixedbits, self.fixedmask) def str1(self, i): return str_indent(i) + self.__str__() + + def dangling_references(self): + # Return a list of all named references which aren't satisfied + # directly by this format/pattern. This will be either: + # * a format referring to a field which is specified by the + # pattern(s) using it + # * a pattern referring to a field which is specified by the + # format it uses + # * a user error (referring to a field that doesn't exist at all) + if self.dangling is None: + # Compute this once and cache the answer + dangling = [] + for n, f in self.fields.items(): + for r in f.referenced_fields(): + if r not in self.fields: + dangling.append(r) + self.dangling = dangling + return self.dangling + + def output_fields(self, indent, lvalue_formatter): + # We use a topological sort to ensure that any use of NamedField + # comes after the initialization of the field it is referencing. + graph = {} + for n, f in self.fields.items(): + refs = f.referenced_fields() + graph[n] = refs + + try: + ts = TopologicalSorter(graph) + for n in ts.static_order(): + # We only want to emit assignments for the keys + # in our fields list, not for anything that ends up + # in the tsort graph only because it was referenced as + # a NamedField. + try: + f = self.fields[n] + output(indent, lvalue_formatter(n), ' = ', + f.str_extract(lvalue_formatter), ';\n') + except KeyError: + pass + except CycleError as e: + # The second element of args is a list of nodes which form + # a cycle (there might be others too, but only one is reported). + # Pretty-print it to tell the user. + cycle = ' => '.join(e.args[1]) + error(self.lineno, 'field definitions form a cycle: ' + cycle) # end General @@ -370,8 +537,7 @@ class Format(General): def output_extract(self): output('static void ', self.extract_name(), '(DisasContext *ctx, ', self.base.struct_name(), ' *a, ', insntype, ' insn)\n{\n') - for n, f in self.fields.items(): - output(' a->', n, ' = ', f.str_extract(), ';\n') + self.output_fields(str_indent(4), lambda n: 'a->' + n) output('}\n\n') # end Format @@ -392,11 +558,36 @@ class Pattern(General): ind = str_indent(i) arg = self.base.base.name output(ind, '/* ', self.file, ':', str(self.lineno), ' */\n') + # We might have named references in the format that refer to fields + # in the pattern, or named references in the pattern that refer + # to fields in the format. This affects whether we extract the fields + # for the format before or after the ones for the pattern. + # For simplicity we don't allow cross references in both directions. + # This is also where we catch the syntax error of referring to + # a nonexistent field. + fmt_refs = self.base.dangling_references() + for r in fmt_refs: + if r not in self.fields: + error(self.lineno, f'format refers to undefined field {r}') + pat_refs = self.dangling_references() + for r in pat_refs: + if r not in self.base.fields: + error(self.lineno, f'pattern refers to undefined field {r}') + if pat_refs and fmt_refs: + error(self.lineno, ('pattern that uses fields defined in format ' + 'cannot use format that uses fields defined ' + 'in pattern')) + if fmt_refs: + # pattern fields first + self.output_fields(ind, lambda n: 'u.f_' + arg + '.' + n) + assert not extracted, "dangling fmt refs but it was already extracted" if not extracted: output(ind, self.base.extract_name(), '(ctx, &u.f_', arg, ', insn);\n') - for n, f in self.fields.items(): - output(ind, 'u.f_', arg, '.', n, ' = ', f.str_extract(), ';\n') + if not fmt_refs: + # pattern fields last + self.output_fields(ind, lambda n: 'u.f_' + arg + '.' + n) + output(ind, 'if (', translate_prefix, '_', self.name, '(ctx, &u.f_', arg, ')) return true;\n') @@ -473,7 +664,7 @@ class MultiPattern(General): def prop_format(self): for p in self.pats: - p.build_tree() + p.prop_format() def prop_width(self): width = None @@ -505,6 +696,12 @@ class IncMultiPattern(MultiPattern): output(ind, '}\n') else: p.output_code(i, extracted, p.fixedbits, p.fixedmask) + + def build_tree(self): + if not self.pats: + error_with_file(self.file, self.lineno, 'empty pattern group') + super().build_tree() + #end IncMultiPattern @@ -536,8 +733,10 @@ class Tree: ind = str_indent(i) # If we identified all nodes below have the same format, - # extract the fields now. - if not extracted and self.base: + # extract the fields now. But don't do it if the format relies + # on named fields from the insn pattern, as those won't have + # been initialised at this point. + if not extracted and self.base and not self.base.dangling_references(): output(ind, self.base.extract_name(), '(ctx, &u.f_', self.base.base.name, ', insn);\n') extracted = True @@ -623,7 +822,7 @@ class ExcMultiPattern(MultiPattern): return t def build_tree(self): - super().prop_format() + super().build_tree() self.tree = self.__build_tree(self.pats, self.fixedbits, self.fixedmask) @@ -659,6 +858,7 @@ def parse_field(lineno, name, toks): """Parse one instruction field from TOKS at LINENO""" global fields global insnwidth + global re_C_ident # A "simple" field will have only one entry; # a "multifield" will have several. @@ -673,6 +873,25 @@ def parse_field(lineno, name, toks): func = func[1] continue + if re.fullmatch(re_C_ident + ':s[0-9]+', t): + # Signed named field + subtoks = t.split(':') + n = subtoks[0] + le = int(subtoks[1]) + f = NamedField(n, True, le) + subs.append(f) + width += le + continue + if re.fullmatch(re_C_ident + ':[0-9]+', t): + # Unsigned named field + subtoks = t.split(':') + n = subtoks[0] + le = int(subtoks[1]) + f = NamedField(n, False, le) + subs.append(f) + width += le + continue + if re.fullmatch('[0-9]+:s[0-9]+', t): # Signed field extract subtoks = t.split(':s') @@ -1278,6 +1497,7 @@ def main(): global translate_prefix global output_fd global output_file + global output_null global input_file global insnwidth global insntype @@ -1286,11 +1506,13 @@ def main(): global bitop_width global variablewidth global anyextern + global testforerror decode_scope = 'static ' long_opts = ['decode=', 'translate=', 'output=', 'insnwidth=', - 'static-decode=', 'varinsnwidth='] + 'static-decode=', 'varinsnwidth=', 'test-for-error', + 'output-null'] try: (opts, args) = getopt.gnu_getopt(sys.argv[1:], 'o:vw:', long_opts) except getopt.GetoptError as err: @@ -1319,6 +1541,10 @@ def main(): bitop_width = 64 elif insnwidth != 32: error(0, 'cannot handle insns of width', insnwidth) + elif o == '--test-for-error': + testforerror = True + elif o == '--output-null': + output_null = True else: assert False, 'unhandled option' @@ -1348,7 +1574,9 @@ def main(): stree = build_size_tree(toppat.pats, 8, 0, 0) prop_size(stree) - if output_file: + if output_null: + output_fd = open(os.devnull, 'wt', encoding='utf-8', errors="ignore") + elif output_file: output_fd = open(output_file, 'wt', encoding='utf-8') else: output_fd = io.TextIOWrapper(sys.stdout.buffer, @@ -1417,6 +1645,7 @@ def main(): if output_file: output_fd.close() + exit(1 if testforerror else 0) # end main diff --git a/scripts/device-crash-test b/scripts/device-crash-test index 73bcb98693..353aa575d7 100755 --- a/scripts/device-crash-test +++ b/scripts/device-crash-test @@ -43,7 +43,7 @@ except ModuleNotFoundError as exc: print(f"Module '{exc.name}' not found.") print(" Try 'make check-venv' from your build directory,") print(" and then one way to run this script is like so:") - print(f' > $builddir/tests/venv/bin/python3 "{path}"') + print(f' > $builddir/pyvenv/bin/python3 "{path}"') sys.exit(1) logger = logging.getLogger('device-crash-test') @@ -397,7 +397,7 @@ def binariesToTest(args, testcase): def accelsToTest(args, testcase): - if getBinaryInfo(args, testcase['binary']).kvm_available: + if getBinaryInfo(args, testcase['binary']).kvm_available and not args.tcg_only: yield 'kvm' yield 'tcg' @@ -510,6 +510,8 @@ def main(): help="Full mode: test cases that are expected to fail") parser.add_argument('--strict', action='store_true', dest='strict', help="Treat all warnings as fatal") + parser.add_argument('--tcg-only', action='store_true', dest='tcg_only', + help="Only test with TCG accelerator") parser.add_argument('qemu', nargs='*', metavar='QEMU', help='QEMU binary to run') args = parser.parse_args() diff --git a/scripts/make-config-poison.sh b/scripts/make-config-poison.sh index d222a04304..1892854261 100755 --- a/scripts/make-config-poison.sh +++ b/scripts/make-config-poison.sh @@ -13,4 +13,4 @@ exec sed -n \ -e 's///' \ -e 's/ .*//' \ -e 's/^/#pragma GCC poison /p' \ - -e '}' "$@" + -e '}' "$@" | sort -u diff --git a/scripts/meson-buildoptions.py b/scripts/meson-buildoptions.py old mode 100755 new mode 100644 index a04dcc70a5..8d2e526132 --- a/scripts/meson-buildoptions.py +++ b/scripts/meson-buildoptions.py @@ -35,6 +35,8 @@ SKIP_OPTIONS = { OPTION_NAMES = { "b_coverage": "gcov", "b_lto": "lto", + "coroutine_backend": "with-coroutine", + "debug": "debug-info", "malloc": "enable-malloc", "pkgversion": "with-pkgversion", "qemu_firmwarepath": "firmwarepath", @@ -46,6 +48,7 @@ BUILTIN_OPTIONS = { "b_coverage", "b_lto", "datadir", + "debug", "includedir", "libdir", "libexecdir", diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh index 5d969a94c0..5714fd93d9 100644 --- a/scripts/meson-buildoptions.sh +++ b/scripts/meson-buildoptions.sh @@ -1,7 +1,8 @@ # This file is generated by meson-buildoptions.py, do not edit! meson_options_help() { printf "%s\n" ' --audio-drv-list=CHOICES Set audio driver list [default] (choices: alsa/co' - printf "%s\n" ' reaudio/default/dsound/jack/oss/pa/sdl/sndio)' + printf "%s\n" ' reaudio/default/dsound/jack/oss/pa/pipewire/sdl/s' + printf "%s\n" ' ndio)' printf "%s\n" ' --block-drv-ro-whitelist=VALUE' printf "%s\n" ' set block driver read-only whitelist (by default' printf "%s\n" ' affects only QEMU, not tools like qemu-img)' @@ -10,10 +11,12 @@ meson_options_help() { printf "%s\n" ' affects only QEMU, not tools like qemu-img)' printf "%s\n" ' --datadir=VALUE Data file directory [share]' printf "%s\n" ' --disable-coroutine-pool coroutine freelist (better performance)' + printf "%s\n" ' --disable-debug-info Enable debug symbols and other information' printf "%s\n" ' --disable-hexagon-idef-parser' printf "%s\n" ' use idef-parser to automatically generate TCG' printf "%s\n" ' code for the Hexagon frontend' printf "%s\n" ' --disable-install-blobs install provided firmware blobs' + printf "%s\n" ' --disable-qom-cast-debug cast debugging support' printf "%s\n" ' --docdir=VALUE Base directory for documentation installation' printf "%s\n" ' (can be empty) [share/doc]' printf "%s\n" ' --enable-block-drv-whitelist-in-tools' @@ -21,6 +24,8 @@ meson_options_help() { printf "%s\n" ' QEMU' printf "%s\n" ' --enable-cfi Control-Flow Integrity (CFI)' printf "%s\n" ' --enable-cfi-debug Verbose errors in case of CFI violation' + printf "%s\n" ' --enable-debug-graph-lock' + printf "%s\n" ' graph lock debugging support' printf "%s\n" ' --enable-debug-mutex mutex debugging support' printf "%s\n" ' --enable-debug-stack-usage' printf "%s\n" ' measure coroutine stack usage' @@ -35,14 +40,17 @@ meson_options_help() { printf "%s\n" ' --enable-module-upgrades try to load modules from alternate paths for' printf "%s\n" ' upgrades' printf "%s\n" ' --enable-profiler profiler support' - printf "%s\n" ' --enable-qom-cast-debug cast debugging support' printf "%s\n" ' --enable-rng-none dummy RNG, avoid using /dev/(u)random and' printf "%s\n" ' getrandom()' + printf "%s\n" ' --enable-safe-stack SafeStack Stack Smash Protection (requires' + printf "%s\n" ' clang/llvm and coroutine backend ucontext)' + printf "%s\n" ' --enable-sanitizers enable default sanitizers' printf "%s\n" ' --enable-strip Strip targets on install' printf "%s\n" ' --enable-tcg-interpreter TCG with bytecode interpreter (slow)' printf "%s\n" ' --enable-trace-backends=CHOICES' printf "%s\n" ' Set available tracing backends [log] (choices:' printf "%s\n" ' dtrace/ftrace/log/nop/simple/syslog/ust)' + printf "%s\n" ' --enable-tsan enable thread sanitizer' printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-' printf "%s\n" ' firmware]' printf "%s\n" ' --iasl=VALUE Path to ACPI disassembler' @@ -54,10 +62,11 @@ meson_options_help() { printf "%s\n" ' --localedir=VALUE Locale data directory [share/locale]' printf "%s\n" ' --localstatedir=VALUE Localstate data directory [/var/local]' printf "%s\n" ' --mandir=VALUE Manual page directory [share/man]' - printf "%s\n" ' --sphinx-build=VALUE Use specified sphinx-build for building document' printf "%s\n" ' --sysconfdir=VALUE Sysconf data directory [etc]' printf "%s\n" ' --tls-priority=VALUE Default TLS protocol/cipher priority string' printf "%s\n" ' [NORMAL]' + printf "%s\n" ' --with-coroutine=CHOICE coroutine backend to use (choices:' + printf "%s\n" ' auto/sigaltstack/ucontext/windows)' printf "%s\n" ' --with-pkgversion=VALUE use specified string as sub-version of the' printf "%s\n" ' package' printf "%s\n" ' --with-trace-file=VALUE Trace file prefix for simple backend [trace]' @@ -82,6 +91,7 @@ meson_options_help() { printf "%s\n" ' capstone Whether and how to find the capstone library' printf "%s\n" ' cloop cloop image format support' printf "%s\n" ' cocoa Cocoa user interface (macOS only)' + printf "%s\n" ' colo-proxy colo-proxy support' printf "%s\n" ' coreaudio CoreAudio sound support' printf "%s\n" ' crypto-afalg Linux AF_ALG crypto backend driver' printf "%s\n" ' curl CURL block device driver' @@ -125,6 +135,7 @@ meson_options_help() { printf "%s\n" ' lzo lzo compression support' printf "%s\n" ' malloc-trim enable libc malloc_trim() for memory optimization' printf "%s\n" ' membarrier membarrier system call (for Linux 4.14+ or Windows' + printf "%s\n" ' modules modules support (non Windows)' printf "%s\n" ' mpath Multipath persistent reservation passthrough' printf "%s\n" ' multiprocess Out of process device emulation support' printf "%s\n" ' netmap netmap network backend support' @@ -135,6 +146,7 @@ meson_options_help() { printf "%s\n" ' oss OSS sound support' printf "%s\n" ' pa PulseAudio sound support' printf "%s\n" ' parallels parallels image format support' + printf "%s\n" ' pipewire Pipewire sound support' printf "%s\n" ' png PNG support with libpng' printf "%s\n" ' pvrdma Enable PVRDMA support' printf "%s\n" ' qcow1 qcow1 image format support' @@ -155,6 +167,7 @@ meson_options_help() { printf "%s\n" ' sparse sparse checker' printf "%s\n" ' spice Spice server support' printf "%s\n" ' spice-protocol Spice protocol support' + printf "%s\n" ' stack-protector compiler-provided stack protection' printf "%s\n" ' tcg TCG support' printf "%s\n" ' tools build support utilities that come with QEMU' printf "%s\n" ' tpm TPM support' @@ -166,6 +179,7 @@ meson_options_help() { printf "%s\n" ' VDUSE block export support' printf "%s\n" ' vfio-user-server' printf "%s\n" ' vfio-user server support' + printf "%s\n" ' vhdx vhdx image format support' printf "%s\n" ' vhost-crypto vhost-user crypto backend support' printf "%s\n" ' vhost-kernel vhost kernel backend support' printf "%s\n" ' vhost-net vhost-net kernel acceleration support' @@ -175,10 +189,14 @@ meson_options_help() { printf "%s\n" ' vhost-vdpa vhost-vdpa kernel backend support' printf "%s\n" ' virglrenderer virgl rendering support' printf "%s\n" ' virtfs virtio-9p support' + printf "%s\n" ' virtfs-proxy-helper' + printf "%s\n" ' virtio-9p proxy helper support' + printf "%s\n" ' vmdk vmdk image format support' printf "%s\n" ' vmnet vmnet.framework network backend support' printf "%s\n" ' vnc VNC server' printf "%s\n" ' vnc-jpeg JPEG lossy compression for VNC server' printf "%s\n" ' vnc-sasl SASL authentication for VNC server' + printf "%s\n" ' vpc vpc image format support' printf "%s\n" ' vte vte support for the gtk UI' printf "%s\n" ' vvfat vvfat image format support' printf "%s\n" ' whpx WHPX acceleration support' @@ -235,8 +253,11 @@ _meson_option_parse() { --disable-cloop) printf "%s" -Dcloop=disabled ;; --enable-cocoa) printf "%s" -Dcocoa=enabled ;; --disable-cocoa) printf "%s" -Dcocoa=disabled ;; + --enable-colo-proxy) printf "%s" -Dcolo_proxy=enabled ;; + --disable-colo-proxy) printf "%s" -Dcolo_proxy=disabled ;; --enable-coreaudio) printf "%s" -Dcoreaudio=enabled ;; --disable-coreaudio) printf "%s" -Dcoreaudio=disabled ;; + --with-coroutine=*) quote_sh "-Dcoroutine_backend=$2" ;; --enable-coroutine-pool) printf "%s" -Dcoroutine_pool=true ;; --disable-coroutine-pool) printf "%s" -Dcoroutine_pool=false ;; --enable-crypto-afalg) printf "%s" -Dcrypto_afalg=enabled ;; @@ -248,6 +269,10 @@ _meson_option_parse() { --datadir=*) quote_sh "-Ddatadir=$2" ;; --enable-dbus-display) printf "%s" -Ddbus_display=enabled ;; --disable-dbus-display) printf "%s" -Ddbus_display=disabled ;; + --enable-debug-info) printf "%s" -Ddebug=true ;; + --disable-debug-info) printf "%s" -Ddebug=false ;; + --enable-debug-graph-lock) printf "%s" -Ddebug_graph_lock=true ;; + --disable-debug-graph-lock) printf "%s" -Ddebug_graph_lock=false ;; --enable-debug-mutex) printf "%s" -Ddebug_mutex=true ;; --disable-debug-mutex) printf "%s" -Ddebug_mutex=false ;; --enable-debug-stack-usage) printf "%s" -Ddebug_stack_usage=true ;; @@ -349,6 +374,8 @@ _meson_option_parse() { --disable-membarrier) printf "%s" -Dmembarrier=disabled ;; --enable-module-upgrades) printf "%s" -Dmodule_upgrades=true ;; --disable-module-upgrades) printf "%s" -Dmodule_upgrades=false ;; + --enable-modules) printf "%s" -Dmodules=enabled ;; + --disable-modules) printf "%s" -Dmodules=disabled ;; --enable-mpath) printf "%s" -Dmpath=enabled ;; --disable-mpath) printf "%s" -Dmpath=disabled ;; --enable-multiprocess) printf "%s" -Dmultiprocess=enabled ;; @@ -369,6 +396,8 @@ _meson_option_parse() { --disable-pa) printf "%s" -Dpa=disabled ;; --enable-parallels) printf "%s" -Dparallels=enabled ;; --disable-parallels) printf "%s" -Dparallels=disabled ;; + --enable-pipewire) printf "%s" -Dpipewire=enabled ;; + --disable-pipewire) printf "%s" -Dpipewire=disabled ;; --with-pkgversion=*) quote_sh "-Dpkgversion=$2" ;; --enable-png) printf "%s" -Dpng=enabled ;; --disable-png) printf "%s" -Dpng=disabled ;; @@ -393,6 +422,10 @@ _meson_option_parse() { --disable-replication) printf "%s" -Dreplication=disabled ;; --enable-rng-none) printf "%s" -Drng_none=true ;; --disable-rng-none) printf "%s" -Drng_none=false ;; + --enable-safe-stack) printf "%s" -Dsafe_stack=true ;; + --disable-safe-stack) printf "%s" -Dsafe_stack=false ;; + --enable-sanitizers) printf "%s" -Dsanitizers=true ;; + --disable-sanitizers) printf "%s" -Dsanitizers=false ;; --enable-sdl) printf "%s" -Dsdl=enabled ;; --disable-sdl) printf "%s" -Dsdl=disabled ;; --enable-sdl-image) printf "%s" -Dsdl_image=enabled ;; @@ -413,11 +446,12 @@ _meson_option_parse() { --disable-sndio) printf "%s" -Dsndio=disabled ;; --enable-sparse) printf "%s" -Dsparse=enabled ;; --disable-sparse) printf "%s" -Dsparse=disabled ;; - --sphinx-build=*) quote_sh "-Dsphinx_build=$2" ;; --enable-spice) printf "%s" -Dspice=enabled ;; --disable-spice) printf "%s" -Dspice=disabled ;; --enable-spice-protocol) printf "%s" -Dspice_protocol=enabled ;; --disable-spice-protocol) printf "%s" -Dspice_protocol=disabled ;; + --enable-stack-protector) printf "%s" -Dstack_protector=enabled ;; + --disable-stack-protector) printf "%s" -Dstack_protector=disabled ;; --enable-strip) printf "%s" -Dstrip=true ;; --disable-strip) printf "%s" -Dstrip=false ;; --sysconfdir=*) quote_sh "-Dsysconfdir=$2" ;; @@ -432,6 +466,8 @@ _meson_option_parse() { --disable-tpm) printf "%s" -Dtpm=disabled ;; --enable-trace-backends=*) quote_sh "-Dtrace_backends=$2" ;; --with-trace-file=*) quote_sh "-Dtrace_file=$2" ;; + --enable-tsan) printf "%s" -Dtsan=true ;; + --disable-tsan) printf "%s" -Dtsan=false ;; --enable-u2f) printf "%s" -Du2f=enabled ;; --disable-u2f) printf "%s" -Du2f=disabled ;; --enable-usb-redir) printf "%s" -Dusb_redir=enabled ;; @@ -444,6 +480,8 @@ _meson_option_parse() { --disable-vduse-blk-export) printf "%s" -Dvduse_blk_export=disabled ;; --enable-vfio-user-server) printf "%s" -Dvfio_user_server=enabled ;; --disable-vfio-user-server) printf "%s" -Dvfio_user_server=disabled ;; + --enable-vhdx) printf "%s" -Dvhdx=enabled ;; + --disable-vhdx) printf "%s" -Dvhdx=disabled ;; --enable-vhost-crypto) printf "%s" -Dvhost_crypto=enabled ;; --disable-vhost-crypto) printf "%s" -Dvhost_crypto=disabled ;; --enable-vhost-kernel) printf "%s" -Dvhost_kernel=enabled ;; @@ -460,6 +498,10 @@ _meson_option_parse() { --disable-virglrenderer) printf "%s" -Dvirglrenderer=disabled ;; --enable-virtfs) printf "%s" -Dvirtfs=enabled ;; --disable-virtfs) printf "%s" -Dvirtfs=disabled ;; + --enable-virtfs-proxy-helper) printf "%s" -Dvirtfs_proxy_helper=enabled ;; + --disable-virtfs-proxy-helper) printf "%s" -Dvirtfs_proxy_helper=disabled ;; + --enable-vmdk) printf "%s" -Dvmdk=enabled ;; + --disable-vmdk) printf "%s" -Dvmdk=disabled ;; --enable-vmnet) printf "%s" -Dvmnet=enabled ;; --disable-vmnet) printf "%s" -Dvmnet=disabled ;; --enable-vnc) printf "%s" -Dvnc=enabled ;; @@ -468,6 +510,8 @@ _meson_option_parse() { --disable-vnc-jpeg) printf "%s" -Dvnc_jpeg=disabled ;; --enable-vnc-sasl) printf "%s" -Dvnc_sasl=enabled ;; --disable-vnc-sasl) printf "%s" -Dvnc_sasl=disabled ;; + --enable-vpc) printf "%s" -Dvpc=enabled ;; + --disable-vpc) printf "%s" -Dvpc=disabled ;; --enable-vte) printf "%s" -Dvte=enabled ;; --disable-vte) printf "%s" -Dvte=disabled ;; --enable-vvfat) printf "%s" -Dvvfat=enabled ;; diff --git a/scripts/modinfo-collect.py b/scripts/modinfo-collect.py old mode 100755 new mode 100644 diff --git a/scripts/modinfo-generate.py b/scripts/modinfo-generate.py old mode 100755 new mode 100644 diff --git a/scripts/mtest2make.py b/scripts/mtest2make.py index 0fe81efbbc..179dd54871 100644 --- a/scripts/mtest2make.py +++ b/scripts/mtest2make.py @@ -51,10 +51,11 @@ def process_tests(test, targets, suites): test_suites = test['suite'] or ['default'] for s in test_suites: - # The suite name in the introspection info is "PROJECT:SUITE" - s = s.split(':')[1] - if s == 'slow' or s == 'thorough': - continue + # The suite name in the introspection info is "PROJECT" or "PROJECT:SUITE" + if ':' in s: + s = s.split(':')[1] + if s == 'slow' or s == 'thorough': + continue if s.endswith('-slow'): s = s[:-5] suites[s].speeds.append('slow') diff --git a/scripts/probe-gdb-support.py b/scripts/probe-gdb-support.py new file mode 100644 index 0000000000..5755255966 --- /dev/null +++ b/scripts/probe-gdb-support.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +# coding: utf-8 +# +# Probe gdb for supported architectures. +# +# This is required to support testing of the gdbstub as its hard to +# handle errors gracefully during the test. Instead this script when +# passed a GDB binary will probe its architecture support and return a +# string of supported arches, stripped of guff. +# +# Copyright 2023 Linaro Ltd +# +# Author: Alex Bennée +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import argparse +import re +from subprocess import check_output, STDOUT + +# mappings from gdb arch to QEMU target +mappings = { + "alpha" : "alpha", + "aarch64" : ["aarch64", "aarch64_be"], + "armv7": "arm", + "armv8-a" : ["aarch64", "aarch64_be"], + "avr" : "avr", + "cris" : "cris", + # no hexagon in upstream gdb + "hppa1.0" : "hppa", + "i386" : "i386", + "i386:x86-64" : "x86_64", + "Loongarch64" : "loongarch64", + "m68k" : "m68k", + "MicroBlaze" : "microblaze", + "mips:isa64" : ["mips64", "mips64el"], + "nios2" : "nios2", + "or1k" : "or1k", + "powerpc:common" : "ppc", + "powerpc:common64" : ["ppc64", "ppc64le"], + "riscv:rv32" : "riscv32", + "riscv:rv64" : "riscv64", + "s390:64-bit" : "s390x", + "sh4" : ["sh4", "sh4eb"], + "sparc": "sparc", + "sparc:v8plus": "sparc32plus", + "sparc:v9a" : "sparc64", + # no tricore in upstream gdb + "xtensa" : ["xtensa", "xtensaeb"] +} + +def do_probe(gdb): + gdb_out = check_output([gdb, + "-ex", "set architecture", + "-ex", "quit"], stderr=STDOUT) + + m = re.search(r"Valid arguments are (.*)", + gdb_out.decode("utf-8")) + + valid_arches = set() + + if m.group(1): + for arch in m.group(1).split(", "): + if arch in mappings: + mapping = mappings[arch] + if isinstance(mapping, str): + valid_arches.add(mapping) + else: + for entry in mapping: + valid_arches.add(entry) + + return valid_arches + +def main() -> None: + parser = argparse.ArgumentParser(description='Probe GDB Architectures') + parser.add_argument('gdb', help='Path to GDB binary.') + + args = parser.parse_args() + + supported = do_probe(args.gdb) + + print(" ".join(supported)) + +if __name__ == '__main__': + main() diff --git a/scripts/qapi/commands.py b/scripts/qapi/commands.py index 79c5e5c3a9..d1fdf4182c 100644 --- a/scripts/qapi/commands.py +++ b/scripts/qapi/commands.py @@ -41,11 +41,13 @@ from .source import QAPISourceInfo def gen_command_decl(name: str, arg_type: Optional[QAPISchemaObjectType], boxed: bool, - ret_type: Optional[QAPISchemaType]) -> str: + ret_type: Optional[QAPISchemaType], + coroutine: bool) -> str: return mcgen(''' -%(c_type)s qmp_%(c_name)s(%(params)s); +%(c_type)s %(coroutine_fn)sqmp_%(c_name)s(%(params)s); ''', c_type=(ret_type and ret_type.c_type()) or 'void', + coroutine_fn='coroutine_fn ' if coroutine else '', c_name=c_name(name), params=build_params(arg_type, boxed, 'Error **errp')) @@ -64,6 +66,7 @@ def gen_call(name: str, elif arg_type: assert not arg_type.variants for memb in arg_type.members: + assert not memb.ifcond.is_present() if memb.need_has(): argstr += 'arg.has_%s, ' % c_name(memb.name) argstr += 'arg.%s, ' % c_name(memb.name) @@ -157,16 +160,21 @@ static void qmp_marshal_output_%(c_name)s(%(c_type)s ret_in, c_type=ret_type.c_type(), c_name=ret_type.c_name()) -def build_marshal_proto(name: str) -> str: - return ('void qmp_marshal_%s(QDict *args, QObject **ret, Error **errp)' - % c_name(name)) +def build_marshal_proto(name: str, + coroutine: bool) -> str: + return ('void %(coroutine_fn)sqmp_marshal_%(c_name)s(%(params)s)' % { + 'coroutine_fn': 'coroutine_fn ' if coroutine else '', + 'c_name': c_name(name), + 'params': 'QDict *args, QObject **ret, Error **errp', + }) -def gen_marshal_decl(name: str) -> str: +def gen_marshal_decl(name: str, + coroutine: bool) -> str: return mcgen(''' %(proto)s; ''', - proto=build_marshal_proto(name)) + proto=build_marshal_proto(name, coroutine)) def gen_trace(name: str) -> str: @@ -181,7 +189,8 @@ def gen_marshal(name: str, arg_type: Optional[QAPISchemaObjectType], boxed: bool, ret_type: Optional[QAPISchemaType], - gen_tracing: bool) -> str: + gen_tracing: bool, + coroutine: bool) -> str: have_args = boxed or (arg_type and not arg_type.is_empty()) if have_args: assert arg_type is not None @@ -195,7 +204,7 @@ def gen_marshal(name: str, bool ok = false; Visitor *v; ''', - proto=build_marshal_proto(name)) + proto=build_marshal_proto(name, coroutine)) if ret_type: ret += mcgen(''' @@ -387,10 +396,11 @@ void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds) self._genh, self._genc): self._genc.add(gen_marshal_output(ret_type)) with ifcontext(ifcond, self._genh, self._genc): - self._genh.add(gen_command_decl(name, arg_type, boxed, ret_type)) - self._genh.add(gen_marshal_decl(name)) + self._genh.add(gen_command_decl(name, arg_type, boxed, + ret_type, coroutine)) + self._genh.add(gen_marshal_decl(name, coroutine)) self._genc.add(gen_marshal(name, arg_type, boxed, ret_type, - self._gen_tracing)) + self._gen_tracing, coroutine)) if self._gen_tracing: self._gen_trace_events.add(gen_trace(name)) with self._temp_module('./init'): diff --git a/scripts/qapi/expr.py b/scripts/qapi/expr.py index ca01ea6f4a..cae0a08359 100644 --- a/scripts/qapi/expr.py +++ b/scripts/qapi/expr.py @@ -333,62 +333,56 @@ def normalize_members(members: object) -> None: members[key] = {'type': arg} -def check_type(value: Optional[object], - info: QAPISourceInfo, - source: str, - allow_array: bool = False, - allow_dict: Union[bool, str] = False) -> None: - """ - Normalize and validate the QAPI type of ``value``. +def check_type_name(value: Optional[object], + info: QAPISourceInfo, source: str) -> None: + if value is not None and not isinstance(value, str): + raise QAPISemError(info, "%s should be a type name" % source) - Python types of ``str`` or ``None`` are always allowed. + +def check_type_name_or_array(value: Optional[object], + info: QAPISourceInfo, source: str) -> None: + if value is None or isinstance(value, str): + return + + if not isinstance(value, list): + raise QAPISemError(info, + "%s should be a type name or array" % source) + + if len(value) != 1 or not isinstance(value[0], str): + raise QAPISemError(info, + "%s: array type must contain single type name" % + source) + + +def check_type_implicit(value: Optional[object], + info: QAPISourceInfo, source: str, + parent_name: Optional[str]) -> None: + """ + Normalize and validate an optional implicit struct type. + + Accept ``None`` or a ``dict`` defining an implicit struct type. + The latter is normalized in place. :param value: The value to check. :param info: QAPI schema source file information. :param source: Error string describing this ``value``. - :param allow_array: - Allow a ``List[str]`` of length 1, which indicates an array of - the type named by the list element. - :param allow_dict: - Allow a dict. Its members can be struct type members or union - branches. When the value of ``allow_dict`` is in pragma - ``member-name-exceptions``, the dict's keys may violate the - member naming rules. The dict members are normalized in place. + :param parent_name: + When the value of ``parent_name`` is in pragma + ``member-name-exceptions``, an implicit struct type may + violate the member naming rules. :raise QAPISemError: When ``value`` fails validation. - :return: None, ``value`` is normalized in-place as needed. + :return: None """ if value is None: return - # Type name - if isinstance(value, str): - return - - # Array type - if isinstance(value, list): - if not allow_array: - raise QAPISemError(info, "%s cannot be an array" % source) - if len(value) != 1 or not isinstance(value[0], str): - raise QAPISemError(info, - "%s: array type must contain single type name" % - source) - return - - # Anonymous type - - if not allow_dict: - raise QAPISemError(info, "%s should be a type name" % source) - if not isinstance(value, dict): raise QAPISemError(info, "%s should be an object or type name" % source) - permissive = False - if isinstance(allow_dict, str): - permissive = allow_dict in info.pragma.member_name_exceptions + permissive = parent_name in info.pragma.member_name_exceptions - # value is a dictionary, check that each member is okay for (key, arg) in value.items(): key_source = "%s member '%s'" % (source, key) if key.startswith('*'): @@ -401,7 +395,16 @@ def check_type(value: Optional[object], check_keys(arg, info, key_source, ['type'], ['if', 'features']) check_if(arg, info, key_source) check_features(arg.get('features'), info) - check_type(arg['type'], info, key_source, allow_array=True) + check_type_name_or_array(arg['type'], info, key_source) + + +def check_type_name_or_implicit(value: Optional[object], + info: QAPISourceInfo, source: str, + parent_name: Optional[str]) -> None: + if value is None or isinstance(value, str): + return + + check_type_implicit(value, info, source, parent_name) def check_features(features: Optional[object], @@ -489,8 +492,8 @@ def check_struct(expr: QAPIExpression) -> None: name = cast(str, expr['struct']) # Checked in check_exprs members = expr['data'] - check_type(members, expr.info, "'data'", allow_dict=name) - check_type(expr.get('base'), expr.info, "'base'") + check_type_implicit(members, expr.info, "'data'", name) + check_type_name(expr.get('base'), expr.info, "'base'") def check_union(expr: QAPIExpression) -> None: @@ -508,7 +511,7 @@ def check_union(expr: QAPIExpression) -> None: members = expr['data'] info = expr.info - check_type(base, info, "'base'", allow_dict=name) + check_type_name_or_implicit(base, info, "'base'", name) check_name_is_str(discriminator, info, "'discriminator'") if not isinstance(members, dict): @@ -518,7 +521,7 @@ def check_union(expr: QAPIExpression) -> None: source = "'data' member '%s'" % key check_keys(value, info, source, ['type'], ['if']) check_if(value, info, source) - check_type(value['type'], info, source, allow_array=not base) + check_type_name(value['type'], info, source) def check_alternate(expr: QAPIExpression) -> None: @@ -544,7 +547,7 @@ def check_alternate(expr: QAPIExpression) -> None: check_name_lower(key, info, source) check_keys(value, info, source, ['type'], ['if']) check_if(value, info, source) - check_type(value['type'], info, source, allow_array=True) + check_type_name_or_array(value['type'], info, source) def check_command(expr: QAPIExpression) -> None: @@ -560,10 +563,13 @@ def check_command(expr: QAPIExpression) -> None: rets = expr.get('returns') boxed = expr.get('boxed', False) - if boxed and args is None: - raise QAPISemError(expr.info, "'boxed': true requires 'data'") - check_type(args, expr.info, "'data'", allow_dict=not boxed) - check_type(rets, expr.info, "'returns'", allow_array=True) + if boxed: + if args is None: + raise QAPISemError(expr.info, "'boxed': true requires 'data'") + check_type_name(args, expr.info, "'data'") + else: + check_type_name_or_implicit(args, expr.info, "'data'", None) + check_type_name_or_array(rets, expr.info, "'returns'") def check_event(expr: QAPIExpression) -> None: @@ -578,9 +584,12 @@ def check_event(expr: QAPIExpression) -> None: args = expr.get('data') boxed = expr.get('boxed', False) - if boxed and args is None: - raise QAPISemError(expr.info, "'boxed': true requires 'data'") - check_type(args, expr.info, "'data'", allow_dict=not boxed) + if boxed: + if args is None: + raise QAPISemError(expr.info, "'boxed': true requires 'data'") + check_type_name(args, expr.info, "'data'") + else: + check_type_name_or_implicit(args, expr.info, "'data'", None) def check_exprs(exprs: List[QAPIExpression]) -> List[QAPIExpression]: diff --git a/scripts/qapi/gen.py b/scripts/qapi/gen.py index b5a8d03e8e..8f8f784f4a 100644 --- a/scripts/qapi/gen.py +++ b/scripts/qapi/gen.py @@ -119,6 +119,7 @@ def build_params(arg_type: Optional[QAPISchemaObjectType], elif arg_type: assert not arg_type.variants for memb in arg_type.members: + assert not memb.ifcond.is_present() ret += sep sep = ', ' if memb.need_has(): diff --git a/scripts/qapi/main.py b/scripts/qapi/main.py index fc216a53d3..316736b6a2 100644 --- a/scripts/qapi/main.py +++ b/scripts/qapi/main.py @@ -98,6 +98,6 @@ def main() -> int: builtins=args.builtins, gen_tracing=not args.suppress_tracing) except QAPIError as err: - print(f"{sys.argv[0]}: {str(err)}", file=sys.stderr) + print(err, file=sys.stderr) return 1 return 0 diff --git a/scripts/qapi/mypy.ini b/scripts/qapi/mypy.ini index 6625356429..3463307ddc 100644 --- a/scripts/qapi/mypy.ini +++ b/scripts/qapi/mypy.ini @@ -1,7 +1,7 @@ [mypy] strict = True disallow_untyped_calls = False -python_version = 3.6 +python_version = 3.7 [mypy-qapi.schema] disallow_untyped_defs = False diff --git a/scripts/qapi/parser.py b/scripts/qapi/parser.py index 878f90b458..22e7bcc4b1 100644 --- a/scripts/qapi/parser.py +++ b/scripts/qapi/parser.py @@ -346,7 +346,7 @@ class QAPISchemaParser: elif not self.tok.isspace(): # Show up to next structural, whitespace or quote # character - match = must_match('[^[\\]{}:,\\s\'"]+', + match = must_match('[^[\\]{}:,\\s\']+', self.src[self.cursor-1:]) raise QAPIParseError(self, "stray '%s'" % match.group(0)) @@ -468,34 +468,39 @@ class QAPIDoc: class Section: # pylint: disable=too-few-public-methods def __init__(self, parser: QAPISchemaParser, - name: Optional[str] = None, indent: int = 0): - + name: Optional[str] = None): # parser, for error messages about indentation self._parser = parser # optional section name (argument/member or section name) self.name = name + # section text without section name self.text = '' - # the expected indent level of the text of this section - self._indent = indent + # indentation to strip (None means indeterminate) + self._indent = None if self.name else 0 def append(self, line: str) -> None: - # Strip leading spaces corresponding to the expected indent level - # Blank lines are always OK. + line = line.rstrip() + if line: indent = must_match(r'\s*', line).end() - if indent < self._indent: + if self._indent is None: + # indeterminate indentation + if self.text != '': + # non-blank, non-first line determines indentation + self._indent = indent + elif indent < self._indent: raise QAPIParseError( self._parser, "unexpected de-indent (expected at least %d spaces)" % self._indent) line = line[self._indent:] - self.text += line.rstrip() + '\n' + self.text += line + '\n' class ArgSection(Section): def __init__(self, parser: QAPISchemaParser, - name: str, indent: int = 0): - super().__init__(parser, name, indent) + name: str): + super().__init__(parser, name) self.member: Optional['QAPISchemaMember'] = None def connect(self, member: 'QAPISchemaMember') -> None: @@ -558,12 +563,12 @@ class QAPIDoc: self._switch_section(QAPIDoc.NullSection(self._parser)) @staticmethod - def _is_section_tag(name: str) -> bool: - return name in ('Returns:', 'Since:', - # those are often singular or plural - 'Note:', 'Notes:', - 'Example:', 'Examples:', - 'TODO:') + def _match_at_name_colon(string: str): + return re.match(r'@([^:]*): *', string) + + @staticmethod + def _match_section_tag(string: str): + return re.match(r'(Returns|Since|Notes?|Examples?|TODO): *', string) def _append_body_line(self, line: str) -> None: """ @@ -579,7 +584,6 @@ class QAPIDoc: Else, append the line to the current section. """ - name = line.split(' ', 1)[0] # FIXME not nice: things like '# @foo:' and '# @foo: ' aren't # recognized, and get silently treated as ordinary text if not self.symbol and not self.body.text and line.startswith('@'): @@ -593,12 +597,12 @@ class QAPIDoc: self._parser, "name required after '@'") elif self.symbol: # This is a definition documentation block - if name.startswith('@') and name.endswith(':'): + if self._match_at_name_colon(line): self._append_line = self._append_args_line self._append_args_line(line) elif line == 'Features:': self._append_line = self._append_features_line - elif self._is_section_tag(name): + elif self._match_section_tag(line): self._append_line = self._append_various_line self._append_various_line(line) else: @@ -619,25 +623,11 @@ class QAPIDoc: Else, append the line to the current section. """ - name = line.split(' ', 1)[0] - - if name.startswith('@') and name.endswith(':'): - # If line is "@arg: first line of description", find - # the index of 'f', which is the indent we expect for any - # following lines. We then remove the leading "@arg:" - # from line and replace it with spaces so that 'f' has the - # same index as it did in the original line and can be - # handled the same way we will handle following lines. - indent = must_match(r'@\S*:\s*', line).end() - line = line[indent:] - if not line: - # Line was just the "@arg:" header; following lines - # are not indented - indent = 0 - else: - line = ' ' * indent + line - self._start_args_section(name[1:-1], indent) - elif self._is_section_tag(name): + match = self._match_at_name_colon(line) + if match: + line = line[match.end():] + self._start_args_section(match.group(1)) + elif self._match_section_tag(line): self._append_line = self._append_various_line self._append_various_line(line) return @@ -654,25 +644,11 @@ class QAPIDoc: self._append_freeform(line) def _append_features_line(self, line: str) -> None: - name = line.split(' ', 1)[0] - - if name.startswith('@') and name.endswith(':'): - # If line is "@arg: first line of description", find - # the index of 'f', which is the indent we expect for any - # following lines. We then remove the leading "@arg:" - # from line and replace it with spaces so that 'f' has the - # same index as it did in the original line and can be - # handled the same way we will handle following lines. - indent = must_match(r'@\S*:\s*', line).end() - line = line[indent:] - if not line: - # Line was just the "@arg:" header; following lines - # are not indented - indent = 0 - else: - line = ' ' * indent + line - self._start_features_section(name[1:-1], indent) - elif self._is_section_tag(name): + match = self._match_at_name_colon(line) + if match: + line = line[match.end():] + self._start_features_section(match.group(1)) + elif self._match_section_tag(line): self._append_line = self._append_various_line self._append_various_line(line) return @@ -696,36 +672,22 @@ class QAPIDoc: Else, append the line to the current section. """ - name = line.split(' ', 1)[0] - - if name.startswith('@') and name.endswith(':'): + match = self._match_at_name_colon(line) + if match: raise QAPIParseError(self._parser, - "'%s' can't follow '%s' section" - % (name, self.sections[0].name)) - if self._is_section_tag(name): - # If line is "Section: first line of description", find - # the index of 'f', which is the indent we expect for any - # following lines. We then remove the leading "Section:" - # from line and replace it with spaces so that 'f' has the - # same index as it did in the original line and can be - # handled the same way we will handle following lines. - indent = must_match(r'\S*:\s*', line).end() - line = line[indent:] - if not line: - # Line was just the "Section:" header; following lines - # are not indented - indent = 0 - else: - line = ' ' * indent + line - self._start_section(name[:-1], indent) + "description of '@%s:' follows a section" + % match.group(1)) + match = self._match_section_tag(line) + if match: + line = line[match.end():] + self._start_section(match.group(1)) self._append_freeform(line) def _start_symbol_section( self, symbols_dict: Dict[str, 'QAPIDoc.ArgSection'], - name: str, - indent: int) -> None: + name: str) -> None: # FIXME invalid names other than the empty string aren't flagged if not name: raise QAPIParseError(self._parser, "invalid parameter name") @@ -733,27 +695,26 @@ class QAPIDoc: raise QAPIParseError(self._parser, "'%s' parameter name duplicated" % name) assert not self.sections - new_section = QAPIDoc.ArgSection(self._parser, name, indent) + new_section = QAPIDoc.ArgSection(self._parser, name) self._switch_section(new_section) symbols_dict[name] = new_section - def _start_args_section(self, name: str, indent: int) -> None: - self._start_symbol_section(self.args, name, indent) + def _start_args_section(self, name: str) -> None: + self._start_symbol_section(self.args, name) - def _start_features_section(self, name: str, indent: int) -> None: - self._start_symbol_section(self.features, name, indent) + def _start_features_section(self, name: str) -> None: + self._start_symbol_section(self.features, name) - def _start_section(self, name: Optional[str] = None, - indent: int = 0) -> None: + def _start_section(self, name: Optional[str] = None) -> None: if name in ('Returns', 'Since') and self.has_section(name): raise QAPIParseError(self._parser, "duplicated '%s' section" % name) - new_section = QAPIDoc.Section(self._parser, name, indent) + new_section = QAPIDoc.Section(self._parser, name) self._switch_section(new_section) self.sections.append(new_section) def _switch_section(self, new_section: 'QAPIDoc.Section') -> None: - text = self._section.text = self._section.text.strip() + text = self._section.text = self._section.text.strip('\n') # Only the 'body' section is allowed to have an empty body. # All other sections, including anonymous ones, must have text. diff --git a/scripts/qapi/schema.py b/scripts/qapi/schema.py index 207e4d71f3..231ebf61ba 100644 --- a/scripts/qapi/schema.py +++ b/scripts/qapi/schema.py @@ -259,7 +259,7 @@ class QAPISchemaType(QAPISchemaEntity): return not self.c_type().endswith(POINTER_SUFFIX) def check(self, schema): - QAPISchemaEntity.check(self, schema) + super().check(schema) for feat in self.features: if feat.is_special(): raise QAPISemError( @@ -465,9 +465,10 @@ class QAPISchemaObjectType(QAPISchemaType): # on behalf of info, which is not necessarily self.info def check_clash(self, info, seen): assert self._checked - assert not self.variants # not implemented for m in self.members: m.check_clash(info, seen) + if self.variants: + self.variants.check_clash(info, seen) def connect_doc(self, doc=None): super().connect_doc(doc) @@ -486,6 +487,10 @@ class QAPISchemaObjectType(QAPISchemaType): assert self.members is not None return not self.members and not self.variants + def has_conditional_members(self): + assert self.members is not None + return any(m.ifcond.is_present() for m in self.members) + def c_name(self): assert self.name != 'q_empty' return super().c_name() @@ -652,8 +657,7 @@ class QAPISchemaVariants: self.info, "branch '%s' is not a value of %s" % (v.name, self.tag_member.type.describe())) - if (not isinstance(v.type, QAPISchemaObjectType) - or v.type.variants): + if not isinstance(v.type, QAPISchemaObjectType): raise QAPISemError( self.info, "%s cannot use %s" @@ -697,6 +701,7 @@ class QAPISchemaMember: def describe(self, info): role = self.role + meta = 'type' defined_in = self.defined_in assert defined_in @@ -708,13 +713,17 @@ class QAPISchemaMember: # Implicit type created for a command's dict 'data' assert role == 'member' role = 'parameter' + meta = 'command' + defined_in = defined_in[:-4] elif defined_in.endswith('-base'): # Implicit type created for a union's dict 'base' role = 'base ' + role + defined_in = defined_in[:-5] else: assert False - elif defined_in != info.defn_name: - return "%s '%s' of type '%s'" % (role, self.name, defined_in) + + if defined_in != info.defn_name: + return "%s '%s' of %s '%s'" % (role, self.name, meta, defined_in) return "%s '%s'" % (role, self.name) @@ -817,6 +826,11 @@ class QAPISchemaCommand(QAPISchemaEntity): self.info, "command's 'data' can take %s only with 'boxed': true" % self.arg_type.describe()) + self.arg_type.check(schema) + if self.arg_type.has_conditional_members() and not self.boxed: + raise QAPISemError( + self.info, + "conditional command arguments require 'boxed': true") if self._ret_type_name: self.ret_type = schema.resolve_type( self._ret_type_name, self.info, "command's 'returns'") @@ -872,6 +886,11 @@ class QAPISchemaEvent(QAPISchemaEntity): self.info, "event's 'data' can take %s only with 'boxed': true" % self.arg_type.describe()) + self.arg_type.check(schema) + if self.arg_type.has_conditional_members() and not self.boxed: + raise QAPISemError( + self.info, + "conditional event arguments require 'boxed': true") def connect_doc(self, doc=None): super().connect_doc(doc) diff --git a/scripts/qapi/visit.py b/scripts/qapi/visit.py index 26a584ee4c..c56ea4d724 100644 --- a/scripts/qapi/visit.py +++ b/scripts/qapi/visit.py @@ -74,11 +74,13 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) sep = '' for memb in members: if memb.optional and not memb.need_has(): + ret += memb.ifcond.gen_if() ret += mcgen(''' bool has_%(c_name)s = !!obj->%(c_name)s; ''', c_name=c_name(memb.name)) sep = '\n' + ret += memb.ifcond.gen_endif() ret += sep if base: diff --git a/scripts/symlink-install-tree.py b/scripts/symlink-install-tree.py index 67cb86dd52..8ed97e3c94 100644 --- a/scripts/symlink-install-tree.py +++ b/scripts/symlink-install-tree.py @@ -28,5 +28,8 @@ for source, dest in json.loads(out).items(): os.symlink(source, bundle_dest) except BaseException as e: if not isinstance(e, OSError) or e.errno != errno.EEXIST: + if os.name == 'nt': + print('Please enable Developer Mode to support soft link ' + 'without Administrator permission') print(f'error making symbolic link {dest}', file=sys.stderr) raise e diff --git a/scripts/tracetool/__init__.py b/scripts/tracetool/__init__.py index 5393c7fc5c..33cf85e2b0 100644 --- a/scripts/tracetool/__init__.py +++ b/scripts/tracetool/__init__.py @@ -18,7 +18,6 @@ import weakref import tracetool.format import tracetool.backend -import tracetool.transform def error_write(*lines): @@ -190,18 +189,6 @@ class Arguments: """List of argument names casted to their type.""" return ["(%s)%s" % (type_, name) for type_, name in self._args] - def transform(self, *trans): - """Return a new Arguments instance with transformed types. - - The types in the resulting Arguments instance are transformed according - to tracetool.transform.transform_type. - """ - res = [] - for type_, name in self._args: - res.append((tracetool.transform.transform_type(type_, *trans), - name)) - return Arguments(res) - class Event(object): """Event description. @@ -358,16 +345,6 @@ class Event(object): fmt = Event.QEMU_TRACE return fmt % {"name": self.name, "NAME": self.name.upper()} - def transform(self, *trans): - """Return a new Event with transformed Arguments.""" - return Event(self.name, - list(self.properties), - self.fmt, - self.args.transform(*trans), - self.lineno, - self.filename, - self) - def read_events(fobj, fname): """Generate the output for the given (format, backends) pair. diff --git a/scripts/tracetool/backend/ftrace.py b/scripts/tracetool/backend/ftrace.py index 5fa30ccc08..baed2ae61c 100644 --- a/scripts/tracetool/backend/ftrace.py +++ b/scripts/tracetool/backend/ftrace.py @@ -12,6 +12,8 @@ __maintainer__ = "Stefan Hajnoczi" __email__ = "stefanha@redhat.com" +import os.path + from tracetool import out @@ -45,7 +47,7 @@ def generate_h(event, group): args=event.args, event_id="TRACE_" + event.name.upper(), event_lineno=event.lineno, - event_filename=event.filename, + event_filename=os.path.relpath(event.filename), fmt=event.fmt.rstrip("\n"), argnames=argnames) diff --git a/scripts/tracetool/backend/log.py b/scripts/tracetool/backend/log.py index 17ba1cd90e..de27b7e62e 100644 --- a/scripts/tracetool/backend/log.py +++ b/scripts/tracetool/backend/log.py @@ -12,6 +12,8 @@ __maintainer__ = "Stefan Hajnoczi" __email__ = "stefanha@redhat.com" +import os.path + from tracetool import out @@ -53,7 +55,7 @@ def generate_h(event, group): ' }', cond=cond, event_lineno=event.lineno, - event_filename=event.filename, + event_filename=os.path.relpath(event.filename), name=event.name, fmt=event.fmt.rstrip("\n"), argnames=argnames) diff --git a/scripts/tracetool/backend/syslog.py b/scripts/tracetool/backend/syslog.py index 5a3a00fe31..012970f6cc 100644 --- a/scripts/tracetool/backend/syslog.py +++ b/scripts/tracetool/backend/syslog.py @@ -12,6 +12,8 @@ __maintainer__ = "Stefan Hajnoczi" __email__ = "stefanha@redhat.com" +import os.path + from tracetool import out @@ -41,7 +43,7 @@ def generate_h(event, group): ' }', cond=cond, event_lineno=event.lineno, - event_filename=event.filename, + event_filename=os.path.relpath(event.filename), name=event.name, fmt=event.fmt.rstrip("\n"), argnames=argnames) diff --git a/scripts/tracetool/transform.py b/scripts/tracetool/transform.py deleted file mode 100644 index ea8b27799d..0000000000 --- a/scripts/tracetool/transform.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Type-transformation rules. -""" - -__author__ = "Lluís Vilanova " -__copyright__ = "Copyright 2012-2016, Lluís Vilanova " -__license__ = "GPL version 2 or (at your option) any later version" - -__maintainer__ = "Stefan Hajnoczi" -__email__ = "stefanha@redhat.com" - - -def _transform_type(type_, trans): - if isinstance(trans, str): - return trans - elif isinstance(trans, dict): - if type_ in trans: - return _transform_type(type_, trans[type_]) - elif None in trans: - return _transform_type(type_, trans[None]) - else: - return type_ - elif callable(trans): - return trans(type_) - else: - raise ValueError("Invalid type transformation rule: %s" % trans) - - -def transform_type(type_, *trans): - """Return a new type transformed according to the given rules. - - Applies each of the transformation rules in trans in order. - - If an element of trans is a string, return it. - - If an element of trans is a function, call it with type_ as its only - argument. - - If an element of trans is a dict, search type_ in its keys. If type_ is - a key, use the value as a transformation rule for type_. Otherwise, if - None is a key use the value as a transformation rule for type_. - - Otherwise, return type_. - - Parameters - ---------- - type_ : str - Type to transform. - trans : list of function or dict - Type transformation rules. - """ - if len(trans) == 0: - raise ValueError - res = type_ - for t in trans: - res = _transform_type(res, t) - return res - - -################################################## -# tcg -> host - -def _tcg_2_host(type_): - if type_ == "TCGv": - # force a fixed-size type (target-independent) - return "uint64_t" - else: - return type_ - -TCG_2_HOST = { - "TCGv_i32": "uint32_t", - "TCGv_i64": "uint64_t", - "TCGv_ptr": "void *", - None: _tcg_2_host, - } - - -################################################## -# host -> host compatible with tcg sizes - -HOST_2_TCG_COMPAT = { - "uint8_t": "uint32_t", - "uint16_t": "uint32_t", - } - - -################################################## -# host/tcg -> tcg - -def _host_2_tcg(type_): - if type_.startswith("TCGv"): - return type_ - raise ValueError("Don't know how to translate '%s' into a TCG type\n" % type_) - -HOST_2_TCG = { - "uint32_t": "TCGv_i32", - "uint64_t": "TCGv_i64", - "void *" : "TCGv_ptr", - "CPUArchState *": "TCGv_env", - None: _host_2_tcg, - } - - -################################################## -# tcg -> tcg helper definition - -def _tcg_2_helper_def(type_): - if type_ == "TCGv": - return "target_ulong" - else: - return type_ - -TCG_2_TCG_HELPER_DEF = { - "TCGv_i32": "uint32_t", - "TCGv_i64": "uint64_t", - "TCGv_ptr": "void *", - None: _tcg_2_helper_def, - } - - -################################################## -# tcg -> tcg helper declaration - -def _tcg_2_tcg_helper_decl_error(type_): - raise ValueError("Don't know how to translate type '%s' into a TCG helper declaration type\n" % type_) - -TCG_2_TCG_HELPER_DECL = { - "TCGv" : "tl", - "TCGv_ptr": "ptr", - "TCGv_i32": "i32", - "TCGv_i64": "i64", - "TCGv_env": "env", - None: _tcg_2_tcg_helper_decl_error, - } - - -################################################## -# host/tcg -> tcg temporal constant allocation - -def _host_2_tcg_tmp_new(type_): - if type_.startswith("TCGv"): - return "tcg_temp_new_nop" - raise ValueError("Don't know how to translate type '%s' into a TCG temporal allocation" % type_) - -HOST_2_TCG_TMP_NEW = { - "uint32_t": "tcg_const_i32", - "uint64_t": "tcg_const_i64", - "void *" : "tcg_const_ptr", - None: _host_2_tcg_tmp_new, - } - - -################################################## -# host/tcg -> tcg temporal constant deallocation - -def _host_2_tcg_tmp_free(type_): - if type_.startswith("TCGv"): - return "tcg_temp_free_nop" - raise ValueError("Don't know how to translate type '%s' into a TCG temporal deallocation" % type_) - -HOST_2_TCG_TMP_FREE = { - "uint32_t": "tcg_temp_free_i32", - "uint64_t": "tcg_temp_free_i64", - "void *" : "tcg_temp_free_ptr", - None: _host_2_tcg_tmp_free, - } diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh index b1ad99cba8..35a64bb501 100755 --- a/scripts/update-linux-headers.sh +++ b/scripts/update-linux-headers.sh @@ -160,8 +160,8 @@ done rm -rf "$output/linux-headers/linux" mkdir -p "$output/linux-headers/linux" -for header in kvm.h vfio.h vfio_ccw.h vfio_zdev.h vhost.h \ - psci.h psp-sev.h userfaultfd.h mman.h vduse.h; do +for header in const.h stddef.h kvm.h vfio.h vfio_ccw.h vfio_zdev.h vhost.h \ + psci.h psp-sev.h userfaultfd.h memfd.h mman.h nvme_ioctl.h vduse.h; do cp "$tmpdir/include/linux/$header" "$output/linux-headers/linux" done diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py index dfeee8231a..9c0e6b81f2 100755 --- a/scripts/vmstate-static-checker.py +++ b/scripts/vmstate-static-checker.py @@ -134,6 +134,11 @@ def exists_in_substruct(fields, item): return check_fields_match(fields["Description"]["name"], substruct_fields[0]["field"], item) +def size_total(entry): + size = entry["size"] + if "num" not in entry: + return size + return size * entry["num"] def check_fields(src_fields, dest_fields, desc, sec): # This function checks for all the fields in a section. If some @@ -249,17 +254,19 @@ def check_fields(src_fields, dest_fields, desc, sec): continue if s_item["field"] == "unused" or d_item["field"] == "unused": - if s_item["size"] == d_item["size"]: + s_size = size_total(s_item) + d_size = size_total(d_item) + if s_size == d_size: continue if d_item["field"] == "unused": advance_dest = False - unused_count = d_item["size"] - s_item["size"] + unused_count = d_size - s_size; continue if s_item["field"] == "unused": advance_src = False - unused_count = s_item["size"] - d_item["size"] + unused_count = s_size - d_size continue print("Section \"" + sec + "\",", end=' ') diff --git a/scsi/pr-manager.c b/scsi/pr-manager.c index 2098d7e759..fb5fc29730 100644 --- a/scsi/pr-manager.c +++ b/scsi/pr-manager.c @@ -51,7 +51,6 @@ static int pr_manager_worker(void *opaque) int coroutine_fn pr_manager_execute(PRManager *pr_mgr, AioContext *ctx, int fd, struct sg_io_hdr *hdr) { - ThreadPool *pool = aio_get_thread_pool(ctx); PRManagerData data = { .pr_mgr = pr_mgr, .fd = fd, @@ -62,7 +61,7 @@ int coroutine_fn pr_manager_execute(PRManager *pr_mgr, AioContext *ctx, int fd, /* The matching object_unref is in pr_manager_worker. */ object_ref(OBJECT(pr_mgr)); - return thread_pool_submit_co(pool, pr_manager_worker, &data); + return thread_pool_submit_co(pr_manager_worker, &data); } bool pr_manager_is_connected(PRManager *pr_mgr) diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c index 199227a556..a857e80c03 100644 --- a/scsi/qemu-pr-helper.c +++ b/scsi/qemu-pr-helper.c @@ -177,10 +177,9 @@ static int do_sgio_worker(void *opaque) return status; } -static int do_sgio(int fd, const uint8_t *cdb, uint8_t *sense, - uint8_t *buf, int *sz, int dir) +static int coroutine_fn do_sgio(int fd, const uint8_t *cdb, uint8_t *sense, + uint8_t *buf, int *sz, int dir) { - ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); int r; PRHelperSGIOData data = { @@ -192,7 +191,7 @@ static int do_sgio(int fd, const uint8_t *cdb, uint8_t *sense, .dir = dir, }; - r = thread_pool_submit_co(pool, do_sgio_worker, &data); + r = thread_pool_submit_co(do_sgio_worker, &data); *sz = data.sz; return r; } @@ -320,7 +319,7 @@ static SCSISense mpath_generic_sense(int r) } } -static int mpath_reconstruct_sense(int fd, int r, uint8_t *sense) +static int coroutine_fn mpath_reconstruct_sense(int fd, int r, uint8_t *sense) { switch (r) { case MPATH_PR_SUCCESS: @@ -372,8 +371,8 @@ static int mpath_reconstruct_sense(int fd, int r, uint8_t *sense) } } -static int multipath_pr_in(int fd, const uint8_t *cdb, uint8_t *sense, - uint8_t *data, int sz) +static int coroutine_fn multipath_pr_in(int fd, const uint8_t *cdb, uint8_t *sense, + uint8_t *data, int sz) { int rq_servact = cdb[1]; struct prin_resp resp; @@ -427,8 +426,8 @@ static int multipath_pr_in(int fd, const uint8_t *cdb, uint8_t *sense, return mpath_reconstruct_sense(fd, r, sense); } -static int multipath_pr_out(int fd, const uint8_t *cdb, uint8_t *sense, - const uint8_t *param, int sz) +static int coroutine_fn multipath_pr_out(int fd, const uint8_t *cdb, uint8_t *sense, + const uint8_t *param, int sz) { int rq_servact = cdb[1]; int rq_scope = cdb[2] >> 4; @@ -545,8 +544,8 @@ static int multipath_pr_out(int fd, const uint8_t *cdb, uint8_t *sense, } #endif -static int do_pr_in(int fd, const uint8_t *cdb, uint8_t *sense, - uint8_t *data, int *resp_sz) +static int coroutine_fn do_pr_in(int fd, const uint8_t *cdb, uint8_t *sense, + uint8_t *data, int *resp_sz) { #ifdef CONFIG_MPATH if (is_mpath(fd)) { @@ -563,8 +562,8 @@ static int do_pr_in(int fd, const uint8_t *cdb, uint8_t *sense, SG_DXFER_FROM_DEV); } -static int do_pr_out(int fd, const uint8_t *cdb, uint8_t *sense, - const uint8_t *param, int sz) +static int coroutine_fn do_pr_out(int fd, const uint8_t *cdb, uint8_t *sense, + const uint8_t *param, int sz) { int resp_sz; diff --git a/semihosting/arm-compat-semi.c b/semihosting/arm-compat-semi.c index 62d8bae97f..564fe17f75 100644 --- a/semihosting/arm-compat-semi.c +++ b/semihosting/arm-compat-semi.c @@ -34,6 +34,7 @@ #include "qemu/osdep.h" #include "qemu/timer.h" #include "exec/gdbstub.h" +#include "gdbstub/syscalls.h" #include "semihosting/semihost.h" #include "semihosting/console.h" #include "semihosting/common-semi.h" diff --git a/semihosting/guestfd.c b/semihosting/guestfd.c index b05c52f26f..acb86b50dd 100644 --- a/semihosting/guestfd.c +++ b/semihosting/guestfd.c @@ -9,7 +9,7 @@ */ #include "qemu/osdep.h" -#include "exec/gdbstub.h" +#include "gdbstub/syscalls.h" #include "semihosting/semihost.h" #include "semihosting/guestfd.h" #ifdef CONFIG_USER_ONLY diff --git a/semihosting/syscalls.c b/semihosting/syscalls.c index e89992cf90..68899ebb1c 100644 --- a/semihosting/syscalls.c +++ b/semihosting/syscalls.c @@ -7,7 +7,8 @@ */ #include "qemu/osdep.h" -#include "exec/gdbstub.h" +#include "cpu.h" +#include "gdbstub/syscalls.h" #include "semihosting/guestfd.h" #include "semihosting/syscalls.h" #include "semihosting/console.h" @@ -138,46 +139,48 @@ static void gdb_open(CPUState *cs, gdb_syscall_complete_cb complete, gdb_open_complete = complete; gdb_do_syscall(gdb_open_cb, "open,%s,%x,%x", - fname, len, (target_ulong)gdb_flags, (target_ulong)mode); + (uint64_t)fname, (uint32_t)len, + (uint32_t)gdb_flags, (uint32_t)mode); } static void gdb_close(CPUState *cs, gdb_syscall_complete_cb complete, GuestFD *gf) { - gdb_do_syscall(complete, "close,%x", (target_ulong)gf->hostfd); + gdb_do_syscall(complete, "close,%x", (uint32_t)gf->hostfd); } static void gdb_read(CPUState *cs, gdb_syscall_complete_cb complete, GuestFD *gf, target_ulong buf, target_ulong len) { - gdb_do_syscall(complete, "read,%x,%x,%x", - (target_ulong)gf->hostfd, buf, len); + gdb_do_syscall(complete, "read,%x,%lx,%lx", + (uint32_t)gf->hostfd, (uint64_t)buf, (uint64_t)len); } static void gdb_write(CPUState *cs, gdb_syscall_complete_cb complete, GuestFD *gf, target_ulong buf, target_ulong len) { - gdb_do_syscall(complete, "write,%x,%x,%x", - (target_ulong)gf->hostfd, buf, len); + gdb_do_syscall(complete, "write,%x,%lx,%lx", + (uint32_t)gf->hostfd, (uint64_t)buf, (uint64_t)len); } static void gdb_lseek(CPUState *cs, gdb_syscall_complete_cb complete, GuestFD *gf, int64_t off, int gdb_whence) { gdb_do_syscall(complete, "lseek,%x,%lx,%x", - (target_ulong)gf->hostfd, off, (target_ulong)gdb_whence); + (uint32_t)gf->hostfd, off, (uint32_t)gdb_whence); } static void gdb_isatty(CPUState *cs, gdb_syscall_complete_cb complete, GuestFD *gf) { - gdb_do_syscall(complete, "isatty,%x", (target_ulong)gf->hostfd); + gdb_do_syscall(complete, "isatty,%x", (uint32_t)gf->hostfd); } static void gdb_fstat(CPUState *cs, gdb_syscall_complete_cb complete, GuestFD *gf, target_ulong addr) { - gdb_do_syscall(complete, "fstat,%x,%x", (target_ulong)gf->hostfd, addr); + gdb_do_syscall(complete, "fstat,%x,%lx", + (uint32_t)gf->hostfd, (uint64_t)addr); } static void gdb_stat(CPUState *cs, gdb_syscall_complete_cb complete, @@ -190,7 +193,8 @@ static void gdb_stat(CPUState *cs, gdb_syscall_complete_cb complete, return; } - gdb_do_syscall(complete, "stat,%s,%x", fname, len, addr); + gdb_do_syscall(complete, "stat,%s,%lx", + (uint64_t)fname, (uint32_t)len, (uint64_t)addr); } static void gdb_remove(CPUState *cs, gdb_syscall_complete_cb complete, @@ -202,7 +206,7 @@ static void gdb_remove(CPUState *cs, gdb_syscall_complete_cb complete, return; } - gdb_do_syscall(complete, "unlink,%s", fname, len); + gdb_do_syscall(complete, "unlink,%s", (uint64_t)fname, (uint32_t)len); } static void gdb_rename(CPUState *cs, gdb_syscall_complete_cb complete, @@ -222,7 +226,9 @@ static void gdb_rename(CPUState *cs, gdb_syscall_complete_cb complete, return; } - gdb_do_syscall(complete, "rename,%s,%s", oname, olen, nname, nlen); + gdb_do_syscall(complete, "rename,%s,%s", + (uint64_t)oname, (uint32_t)olen, + (uint64_t)nname, (uint32_t)nlen); } static void gdb_system(CPUState *cs, gdb_syscall_complete_cb complete, @@ -234,13 +240,14 @@ static void gdb_system(CPUState *cs, gdb_syscall_complete_cb complete, return; } - gdb_do_syscall(complete, "system,%s", cmd, len); + gdb_do_syscall(complete, "system,%s", (uint64_t)cmd, (uint32_t)len); } static void gdb_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, target_ulong tv_addr, target_ulong tz_addr) { - gdb_do_syscall(complete, "gettimeofday,%x,%x", tv_addr, tz_addr); + gdb_do_syscall(complete, "gettimeofday,%lx,%lx", + (uint64_t)tv_addr, (uint64_t)tz_addr); } /* diff --git a/semihosting/uaccess.c b/semihosting/uaccess.c index 8018828069..7505eb6d1b 100644 --- a/semihosting/uaccess.c +++ b/semihosting/uaccess.c @@ -37,7 +37,7 @@ ssize_t softmmu_strlen_user(CPUArchState *env, target_ulong addr) /* Find the number of bytes remaining in the page. */ left_in_page = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK); - flags = probe_access_flags(env, addr, MMU_DATA_LOAD, + flags = probe_access_flags(env, addr, 0, MMU_DATA_LOAD, mmu_idx, true, &h, 0); if (flags & TLB_INVALID_MASK) { return -1; diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c index c56f0f58c8..015a9038d1 100644 --- a/softmmu/dirtylimit.c +++ b/softmmu/dirtylimit.c @@ -20,6 +20,7 @@ #include "monitor/hmp.h" #include "monitor/monitor.h" #include "exec/memory.h" +#include "exec/target_page.h" #include "hw/boards.h" #include "sysemu/kvm.h" #include "trace.h" @@ -231,18 +232,18 @@ bool dirtylimit_vcpu_index_valid(int cpu_index) cpu_index >= ms->smp.max_cpus); } -static inline int64_t dirtylimit_dirty_ring_full_time(uint64_t dirtyrate) +static uint64_t dirtylimit_dirty_ring_full_time(uint64_t dirtyrate) { static uint64_t max_dirtyrate; - uint32_t dirty_ring_size = kvm_dirty_ring_size(); - uint64_t dirty_ring_size_meory_MB = - dirty_ring_size * TARGET_PAGE_SIZE >> 20; + uint64_t dirty_ring_size_MiB; + + dirty_ring_size_MiB = qemu_target_pages_to_MiB(kvm_dirty_ring_size()); if (max_dirtyrate < dirtyrate) { max_dirtyrate = dirtyrate; } - return dirty_ring_size_meory_MB * 1000000 / max_dirtyrate; + return dirty_ring_size_MiB * 1000000 / max_dirtyrate; } static inline bool dirtylimit_done(uint64_t quota, diff --git a/softmmu/globals.c b/softmmu/globals.c index 527edbefdd..e83b5428d1 100644 --- a/softmmu/globals.c +++ b/softmmu/globals.c @@ -43,7 +43,6 @@ int vga_interface_type = VGA_NONE; bool vga_interface_created; Chardev *parallel_hds[MAX_PARALLEL_PORTS]; int win2k_install_hack; -int singlestep; int fd_bootchk = 1; int graphic_rotate; QEMUOptionRom option_rom[MAX_OPTION_ROMS]; @@ -63,5 +62,9 @@ QemuUUID qemu_uuid; bool qemu_uuid_set; uint32_t xen_domid; -enum xen_mode xen_mode = XEN_EMULATE; +enum xen_mode xen_mode = XEN_DISABLED; bool xen_domid_restrict; +struct evtchn_backend_ops *xen_evtchn_ops; +struct gnttab_backend_ops *xen_gnttab_ops; +struct foreignmem_backend_ops *xen_foreignmem_ops; +struct xenstore_backend_ops *xen_xenstore_ops; diff --git a/softmmu/ioport.c b/softmmu/ioport.c index cb8adb0b93..b66e0a5a8e 100644 --- a/softmmu/ioport.c +++ b/softmmu/ioport.c @@ -32,11 +32,16 @@ #include "exec/address-spaces.h" #include "trace.h" -typedef struct MemoryRegionPortioList { +struct MemoryRegionPortioList { + Object obj; + MemoryRegion mr; void *portio_opaque; - MemoryRegionPortio ports[]; -} MemoryRegionPortioList; + MemoryRegionPortio *ports; +}; + +#define TYPE_MEMORY_REGION_PORTIO_LIST "memory-region-portio-list" +OBJECT_DECLARE_SIMPLE_TYPE(MemoryRegionPortioList, MEMORY_REGION_PORTIO_LIST) static uint64_t unassigned_io_read(void *opaque, hwaddr addr, unsigned size) { @@ -147,7 +152,7 @@ void portio_list_destroy(PortioList *piolist) for (i = 0; i < piolist->nr; ++i) { mrpio = container_of(piolist->regions[i], MemoryRegionPortioList, mr); object_unparent(OBJECT(&mrpio->mr)); - g_free(mrpio); + object_unref(mrpio); } g_free(piolist->regions); } @@ -224,12 +229,15 @@ static void portio_list_add_1(PortioList *piolist, unsigned off_low, unsigned off_high) { MemoryRegionPortioList *mrpio; + Object *owner; + char *name; unsigned i; /* Copy the sub-list and null-terminate it. */ - mrpio = g_malloc0(sizeof(MemoryRegionPortioList) + - sizeof(MemoryRegionPortio) * (count + 1)); + mrpio = MEMORY_REGION_PORTIO_LIST( + object_new(TYPE_MEMORY_REGION_PORTIO_LIST)); mrpio->portio_opaque = piolist->opaque; + mrpio->ports = g_malloc0(sizeof(MemoryRegionPortio) * (count + 1)); memcpy(mrpio->ports, pio_init, sizeof(MemoryRegionPortio) * count); memset(mrpio->ports + count, 0, sizeof(MemoryRegionPortio)); @@ -239,8 +247,25 @@ static void portio_list_add_1(PortioList *piolist, mrpio->ports[i].base = start + off_low; } - memory_region_init_io(&mrpio->mr, piolist->owner, &portio_ops, mrpio, + /* + * The MemoryRegion owner is the MemoryRegionPortioList since that manages + * the lifecycle via the refcount + */ + memory_region_init_io(&mrpio->mr, OBJECT(mrpio), &portio_ops, mrpio, piolist->name, off_high - off_low); + + /* Reparent the MemoryRegion to the piolist owner */ + object_ref(&mrpio->mr); + object_unparent(OBJECT(&mrpio->mr)); + if (!piolist->owner) { + owner = container_get(qdev_get_machine(), "/unattached"); + } else { + owner = piolist->owner; + } + name = g_strdup_printf("%s[*]", piolist->name); + object_property_add_child(owner, name, OBJECT(&mrpio->mr)); + g_free(name); + if (piolist->flush_coalesced_mmio) { memory_region_set_flush_coalesced(&mrpio->mr); } @@ -297,3 +322,25 @@ void portio_list_del(PortioList *piolist) memory_region_del_subregion(piolist->address_space, &mrpio->mr); } } + +static void memory_region_portio_list_finalize(Object *obj) +{ + MemoryRegionPortioList *mrpio = MEMORY_REGION_PORTIO_LIST(obj); + + object_unref(&mrpio->mr); + g_free(mrpio->ports); +} + +static const TypeInfo memory_region_portio_list_info = { + .parent = TYPE_OBJECT, + .name = TYPE_MEMORY_REGION_PORTIO_LIST, + .instance_size = sizeof(MemoryRegionPortioList), + .instance_finalize = memory_region_portio_list_finalize, +}; + +static void ioport_register_types(void) +{ + type_register_static(&memory_region_portio_list_info); +} + +type_init(ioport_register_types) diff --git a/softmmu/memory.c b/softmmu/memory.c index 9d64efca26..7d9494ce70 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -534,6 +534,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr, unsigned access_size; unsigned i; MemTxResult r = MEMTX_OK; + bool reentrancy_guard_applied = false; if (!access_size_min) { access_size_min = 1; @@ -542,6 +543,19 @@ static MemTxResult access_with_adjusted_size(hwaddr addr, access_size_max = 4; } + /* Do not allow more than one simultaneous access to a device's IO Regions */ + if (mr->dev && !mr->disable_reentrancy_guard && + !mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) { + if (mr->dev->mem_reentrancy_guard.engaged_in_io) { + warn_report_once("Blocked re-entrant IO on MemoryRegion: " + "%s at addr: 0x%" HWADDR_PRIX, + memory_region_name(mr), addr); + return MEMTX_ACCESS_ERROR; + } + mr->dev->mem_reentrancy_guard.engaged_in_io = true; + reentrancy_guard_applied = true; + } + /* FIXME: support unaligned access? */ access_size = MAX(MIN(size, access_size_max), access_size_min); access_mask = MAKE_64BIT_MASK(0, access_size * 8); @@ -556,6 +570,9 @@ static MemTxResult access_with_adjusted_size(hwaddr addr, access_mask, attrs); } } + if (mr->dev && reentrancy_guard_applied) { + mr->dev->mem_reentrancy_guard.engaged_in_io = false; + } return r; } @@ -1170,6 +1187,7 @@ static void memory_region_do_init(MemoryRegion *mr, } mr->name = g_strdup(name); mr->owner = owner; + mr->dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE); mr->ram_block = NULL; if (name) { @@ -1601,6 +1619,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, uint64_t align, uint32_t ram_flags, const char *path, + ram_addr_t offset, bool readonly, Error **errp) { @@ -1612,7 +1631,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, mr->destructor = memory_region_destructor_ram; mr->align = align; mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, - readonly, &err); + offset, readonly, &err); if (err) { mr->size = int128_zero(); object_unparent(OBJECT(mr)); @@ -1996,6 +2015,19 @@ void memory_region_notify_iommu_one(IOMMUNotifier *notifier, } } +void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier) +{ + IOMMUTLBEvent event; + + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.target_as = &address_space_memory; + event.entry.iova = notifier->start; + event.entry.perm = IOMMU_NONE; + event.entry.addr_mask = notifier->end - notifier->start; + + memory_region_notify_iommu_one(notifier, &event); +} + void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, int iommu_idx, IOMMUTLBEvent event) @@ -2224,7 +2256,7 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, * If memory region `mr' is NULL, do global sync. Otherwise, sync * dirty bitmap for the specified memory region. */ -static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) +static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage) { MemoryListener *listener; AddressSpace *as; @@ -2254,7 +2286,7 @@ static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) * is to do a global sync, because we are not capable to * sync in a finer granularity. */ - listener->log_sync_global(listener); + listener->log_sync_global(listener, last_stage); trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1); } } @@ -2318,7 +2350,7 @@ DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, { DirtyBitmapSnapshot *snapshot; assert(mr->ram_block); - memory_region_sync_dirty_bitmap(mr); + memory_region_sync_dirty_bitmap(mr, false); snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client); memory_global_after_dirty_log_sync(); return snapshot; @@ -2844,9 +2876,9 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr) return mr && mr != container; } -void memory_global_dirty_log_sync(void) +void memory_global_dirty_log_sync(bool last_stage) { - memory_region_sync_dirty_bitmap(NULL); + memory_region_sync_dirty_bitmap(NULL, last_stage); } void memory_global_after_dirty_log_sync(void) diff --git a/softmmu/meson.build b/softmmu/meson.build index 1828db149c..974732b0f3 100644 --- a/softmmu/meson.build +++ b/softmmu/meson.build @@ -3,12 +3,11 @@ specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files( 'ioport.c', 'memory.c', 'physmem.c', - 'qtest.c', - 'dirtylimit.c', + 'watchpoint.c', )]) specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: [files( - 'icount.c' + 'icount.c', )]) softmmu_ss.add(files( @@ -18,10 +17,12 @@ softmmu_ss.add(files( 'cpu-throttle.c', 'cpu-timers.c', 'datadir.c', + 'dirtylimit.c', 'dma-helpers.c', 'globals.c', 'memory_mapping.c', 'qdev-monitor.c', + 'qtest.c', 'rtc.c', 'runstate-action.c', 'runstate-hmp-cmds.c', diff --git a/softmmu/physmem.c b/softmmu/physmem.c index cb998cdf23..9d7e172260 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -781,197 +781,6 @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) return cpu->cpu_ases[asidx].as; } -/* Add a watchpoint. */ -int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, - int flags, CPUWatchpoint **watchpoint) -{ - CPUWatchpoint *wp; - vaddr in_page; - - /* forbid ranges which are empty or run off the end of the address space */ - if (len == 0 || (addr + len - 1) < addr) { - error_report("tried to set invalid watchpoint at %" - VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); - return -EINVAL; - } - wp = g_malloc(sizeof(*wp)); - - wp->vaddr = addr; - wp->len = len; - wp->flags = flags; - - /* keep all GDB-injected watchpoints in front */ - if (flags & BP_GDB) { - QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); - } else { - QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); - } - - in_page = -(addr | TARGET_PAGE_MASK); - if (len <= in_page) { - tlb_flush_page(cpu, addr); - } else { - tlb_flush(cpu); - } - - if (watchpoint) - *watchpoint = wp; - return 0; -} - -/* Remove a specific watchpoint. */ -int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, - int flags) -{ - CPUWatchpoint *wp; - - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (addr == wp->vaddr && len == wp->len - && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { - cpu_watchpoint_remove_by_ref(cpu, wp); - return 0; - } - } - return -ENOENT; -} - -/* Remove a specific watchpoint by reference. */ -void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) -{ - QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); - - tlb_flush_page(cpu, watchpoint->vaddr); - - g_free(watchpoint); -} - -/* Remove all matching watchpoints. */ -void cpu_watchpoint_remove_all(CPUState *cpu, int mask) -{ - CPUWatchpoint *wp, *next; - - QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { - if (wp->flags & mask) { - cpu_watchpoint_remove_by_ref(cpu, wp); - } - } -} - -#ifdef CONFIG_TCG -/* Return true if this watchpoint address matches the specified - * access (ie the address range covered by the watchpoint overlaps - * partially or completely with the address range covered by the - * access). - */ -static inline bool watchpoint_address_matches(CPUWatchpoint *wp, - vaddr addr, vaddr len) -{ - /* We know the lengths are non-zero, but a little caution is - * required to avoid errors in the case where the range ends - * exactly at the top of the address space and so addr + len - * wraps round to zero. - */ - vaddr wpend = wp->vaddr + wp->len - 1; - vaddr addrend = addr + len - 1; - - return !(addr > wpend || wp->vaddr > addrend); -} - -/* Return flags for watchpoints that match addr + prot. */ -int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) -{ - CPUWatchpoint *wp; - int ret = 0; - - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (watchpoint_address_matches(wp, addr, len)) { - ret |= wp->flags; - } - } - return ret; -} - -/* Generate a debug exception if a watchpoint has been hit. */ -void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, - MemTxAttrs attrs, int flags, uintptr_t ra) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - CPUWatchpoint *wp; - - assert(tcg_enabled()); - if (cpu->watchpoint_hit) { - /* - * We re-entered the check after replacing the TB. - * Now raise the debug interrupt so that it will - * trigger after the current instruction. - */ - qemu_mutex_lock_iothread(); - cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); - qemu_mutex_unlock_iothread(); - return; - } - - if (cc->tcg_ops->adjust_watchpoint_address) { - /* this is currently used only by ARM BE32 */ - addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len); - } - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (watchpoint_address_matches(wp, addr, len) - && (wp->flags & flags)) { - if (replay_running_debug()) { - /* - * replay_breakpoint reads icount. - * Force recompile to succeed, because icount may - * be read only at the end of the block. - */ - if (!cpu->can_do_io) { - /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(cpu); - cpu_loop_exit_restore(cpu, ra); - } - /* - * Don't process the watchpoints when we are - * in a reverse debugging operation. - */ - replay_breakpoint(); - return; - } - if (flags == BP_MEM_READ) { - wp->flags |= BP_WATCHPOINT_HIT_READ; - } else { - wp->flags |= BP_WATCHPOINT_HIT_WRITE; - } - wp->hitaddr = MAX(addr, wp->vaddr); - wp->hitattrs = attrs; - - if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint && - !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { - wp->flags &= ~BP_WATCHPOINT_HIT; - continue; - } - cpu->watchpoint_hit = wp; - - mmap_lock(); - /* This call also restores vCPU state */ - tb_check_watchpoint(cpu, ra); - if (wp->flags & BP_STOP_BEFORE_ACCESS) { - cpu->exception_index = EXCP_DEBUG; - mmap_unlock(); - cpu_loop_exit(cpu); - } else { - /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(cpu); - mmap_unlock(); - cpu_loop_exit_noexc(cpu); - } - } else { - wp->flags &= ~BP_WATCHPOINT_HIT; - } - } -} - -#endif /* CONFIG_TCG */ - /* Called from RCU critical section */ static RAMBlock *qemu_get_ram_block(ram_addr_t addr) { @@ -1317,15 +1126,21 @@ GString *ram_block_format(void) GString *buf = g_string_new(""); RCU_READ_LOCK_GUARD(); - g_string_append_printf(buf, "%24s %8s %18s %18s %18s\n", - "Block Name", "PSize", "Offset", "Used", "Total"); + g_string_append_printf(buf, "%24s %8s %18s %18s %18s %18s %3s\n", + "Block Name", "PSize", "Offset", "Used", "Total", + "HVA", "RO"); + RAMBLOCK_FOREACH(block) { psize = size_to_str(block->page_size); g_string_append_printf(buf, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 - " 0x%016" PRIx64 "\n", block->idstr, psize, + " 0x%016" PRIx64 " 0x%016" PRIx64 " %3s\n", + block->idstr, psize, (uint64_t)block->offset, (uint64_t)block->used_length, - (uint64_t)block->max_length); + (uint64_t)block->max_length, + (uint64_t)(uintptr_t)block->host, + block->mr->readonly ? "ro" : "rw"); + g_free(psize); } @@ -1554,6 +1369,11 @@ static void *file_ram_alloc(RAMBlock *block, error_setg(errp, "alignment 0x%" PRIx64 " must be a power of two", block->mr->align); return NULL; + } else if (offset % block->page_size) { + error_setg(errp, "offset 0x%" PRIx64 + " must be multiples of page size 0x%zx", + offset, block->page_size); + return NULL; } block->mr->align = MAX(block->page_size, block->mr->align); #if defined(__s390x__) @@ -1585,7 +1405,7 @@ static void *file_ram_alloc(RAMBlock *block, * those labels. Therefore, extending the non-empty backend file * is disabled as well. */ - if (truncate && ftruncate(fd, memory)) { + if (truncate && ftruncate(fd, offset + memory)) { perror("ftruncate"); } @@ -1601,6 +1421,7 @@ static void *file_ram_alloc(RAMBlock *block, } block->fd = fd; + block->fd_offset = offset; return area; } #endif @@ -2074,7 +1895,7 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, size = HOST_PAGE_ALIGN(size); file_size = get_file_size(fd); - if (file_size > 0 && file_size < size) { + if (file_size > offset && file_size < (offset + size)) { error_setg(errp, "backing store size 0x%" PRIx64 " does not match 'size' option 0x" RAM_ADDR_FMT, file_size, size); @@ -2114,7 +1935,7 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, uint32_t ram_flags, const char *mem_path, - bool readonly, Error **errp) + off_t offset, bool readonly, Error **errp) { int fd; bool created; @@ -2126,7 +1947,8 @@ RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, return NULL; } - block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, 0, readonly, errp); + block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset, readonly, + errp); if (!block) { if (created) { unlink(mem_path); @@ -2260,7 +2082,7 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; if (block->fd >= 0) { area = mmap(vaddr, length, PROT_READ | PROT_WRITE, - flags, block->fd, offset); + flags, block->fd, offset + block->fd_offset); } else { flags |= MAP_ANONYMOUS; area = mmap(vaddr, length, PROT_READ | PROT_WRITE, @@ -2712,7 +2534,7 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, } if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { assert(tcg_enabled()); - tb_invalidate_phys_range(addr, addr + length); + tb_invalidate_phys_range(addr, addr + length - 1); dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); } cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); @@ -2828,7 +2650,7 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, } else { /* RAM case */ ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); - memcpy(ram_ptr, buf, l); + memmove(ram_ptr, buf, l); invalidate_and_set_dirty(mr, addr1, l); } @@ -3118,6 +2940,8 @@ void cpu_register_map_client(QEMUBH *bh) qemu_mutex_lock(&map_client_list_lock); client->bh = bh; QLIST_INSERT_HEAD(&map_client_list, client, link); + /* Write map_client_list before reading in_use. */ + smp_mb(); if (!qatomic_read(&bounce.in_use)) { cpu_notify_map_clients_locked(); } @@ -3307,6 +3131,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, qemu_vfree(bounce.buffer); bounce.buffer = NULL; memory_region_unref(bounce.mr); + /* Clear in_use before reading map_client_list. */ qatomic_mb_set(&bounce.in_use, false); cpu_notify_map_clients(); } @@ -3357,7 +3182,7 @@ int64_t address_space_cache_init(MemoryRegionCache *cache, * cache->xlat and the end of the section. */ diff = int128_sub(cache->mrs.size, - int128_make64(cache->xlat - cache->mrs.offset_within_region)); + int128_make64(cache->xlat - cache->mrs.offset_within_region)); l = int128_get64(int128_min(diff, int128_make64(l))); mr = cache->mrs.mr; @@ -3539,6 +3364,17 @@ int qemu_target_page_bits_min(void) return TARGET_PAGE_BITS_MIN; } +/* Convert target pages to MiB (2**20). */ +size_t qemu_target_pages_to_MiB(size_t pages) +{ + int page_bits = TARGET_PAGE_BITS; + + /* So far, the largest (non-huge) page size is 64k, i.e. 16 bits. */ + g_assert(page_bits < 20); + + return pages >> (20 - page_bits); +} + bool cpu_physical_memory_is_io(hwaddr phys_addr) { MemoryRegion*mr; diff --git a/softmmu/qtest.c b/softmmu/qtest.c index 34bd2a33a7..f8d764b719 100644 --- a/softmmu/qtest.c +++ b/softmmu/qtest.c @@ -13,12 +13,12 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "cpu.h" #include "sysemu/qtest.h" #include "sysemu/runstate.h" #include "chardev/char-fe.h" #include "exec/ioport.h" #include "exec/memory.h" +#include "exec/tswap.h" #include "hw/qdev-core.h" #include "hw/irq.h" #include "qemu/accel.h" @@ -29,10 +29,6 @@ #include "qemu/module.h" #include "qemu/cutils.h" #include "qom/object_interfaces.h" -#include CONFIG_DEVICES -#ifdef CONFIG_PSERIES -#include "hw/ppc/spapr_rtas.h" -#endif #define MAX_IRQ 256 @@ -263,7 +259,7 @@ static int hex2nib(char ch) } } -static void qtest_send_prefix(CharBackend *chr) +void qtest_send_prefix(CharBackend *chr) { if (!qtest_log_fp || !qtest_opened) { return; @@ -302,8 +298,7 @@ static void qtest_send(CharBackend *chr, const char *str) qtest_server_send(qtest_server_send_opaque, str); } -static void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, - const char *fmt, ...) +void qtest_sendf(CharBackend *chr, const char *fmt, ...) { va_list ap; gchar *buffer; @@ -361,6 +356,15 @@ static void qtest_clock_warp(int64_t dest) qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } +static bool (*process_command_cb)(CharBackend *chr, gchar **words); + +void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words)) +{ + assert(!process_command_cb); /* Switch to a list if we need more than one */ + + process_command_cb = pc_cb; +} + static void qtest_process_command(CharBackend *chr, gchar **words) { const gchar *command; @@ -713,30 +717,11 @@ static void qtest_process_command(CharBackend *chr, gchar **words) qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "endianness") == 0) { qtest_send_prefix(chr); -#if TARGET_BIG_ENDIAN - qtest_sendf(chr, "OK big\n"); -#else - qtest_sendf(chr, "OK little\n"); -#endif -#ifdef CONFIG_PSERIES - } else if (strcmp(words[0], "rtas") == 0) { - uint64_t res, args, ret; - unsigned long nargs, nret; - int rc; - - rc = qemu_strtoul(words[2], NULL, 0, &nargs); - g_assert(rc == 0); - rc = qemu_strtou64(words[3], NULL, 0, &args); - g_assert(rc == 0); - rc = qemu_strtoul(words[4], NULL, 0, &nret); - g_assert(rc == 0); - rc = qemu_strtou64(words[5], NULL, 0, &ret); - g_assert(rc == 0); - res = qtest_rtas_call(words[1], nargs, args, nret, ret); - - qtest_send_prefix(chr); - qtest_sendf(chr, "OK %"PRIu64"\n", res); -#endif + if (target_words_bigendian()) { + qtest_sendf(chr, "OK big\n"); + } else { + qtest_sendf(chr, "OK little\n"); + } } else if (qtest_enabled() && strcmp(words[0], "clock_step") == 0) { int64_t ns; @@ -777,6 +762,8 @@ static void qtest_process_command(CharBackend *chr, gchar **words) qtest_send_prefix(chr); qtest_sendf(chr, "OK %"PRIi64"\n", (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + } else if (process_command_cb && process_command_cb(chr, words)) { + /* Command got consumed by the callback handler */ } else { qtest_send_prefix(chr); qtest_sendf(chr, "FAIL Unknown command '%s'\n", words[0]); @@ -867,7 +854,7 @@ void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error ** } qtest = object_new(TYPE_QTEST); - object_property_set_str(qtest, "chardev", "qtest", &error_abort); + object_property_set_str(qtest, "chardev", chr->label, &error_abort); if (qtest_log) { object_property_set_str(qtest, "log", qtest_log, &error_abort); } diff --git a/softmmu/runstate-hmp-cmds.c b/softmmu/runstate-hmp-cmds.c index d55a7d4db8..2df670f0c0 100644 --- a/softmmu/runstate-hmp-cmds.c +++ b/softmmu/runstate-hmp-cmds.c @@ -20,6 +20,7 @@ #include "qapi/error.h" #include "qapi/qapi-commands-run-state.h" #include "qapi/qmp/qdict.h" +#include "qemu/accel.h" void hmp_info_status(Monitor *mon, const QDict *qdict) { @@ -27,9 +28,8 @@ void hmp_info_status(Monitor *mon, const QDict *qdict) info = qmp_query_status(NULL); - monitor_printf(mon, "VM status: %s%s", - info->running ? "running" : "paused", - info->singlestep ? " (single step mode)" : ""); + monitor_printf(mon, "VM status: %s", + info->running ? "running" : "paused"); if (!info->running && info->status != RUN_STATE_PAUSED) { monitor_printf(mon, " (%s)", RunState_str(info->status)); @@ -40,16 +40,29 @@ void hmp_info_status(Monitor *mon, const QDict *qdict) qapi_free_StatusInfo(info); } -void hmp_singlestep(Monitor *mon, const QDict *qdict) +void hmp_one_insn_per_tb(Monitor *mon, const QDict *qdict) { const char *option = qdict_get_try_str(qdict, "option"); + AccelState *accel = current_accel(); + bool newval; + + if (!object_property_find(OBJECT(accel), "one-insn-per-tb")) { + monitor_printf(mon, + "This accelerator does not support setting one-insn-per-tb\n"); + return; + } + if (!option || !strcmp(option, "on")) { - singlestep = 1; + newval = true; } else if (!strcmp(option, "off")) { - singlestep = 0; + newval = false; } else { monitor_printf(mon, "unexpected option %s\n", option); + return; } + /* If the property exists then setting it can never fail */ + object_property_set_bool(OBJECT(accel), "one-insn-per-tb", + newval, &error_abort); } void hmp_watchdog_action(Monitor *mon, const QDict *qdict) diff --git a/softmmu/runstate.c b/softmmu/runstate.c index 77b1adb84f..11b72fde06 100644 --- a/softmmu/runstate.c +++ b/softmmu/runstate.c @@ -30,7 +30,7 @@ #include "crypto/cipher.h" #include "crypto/init.h" #include "exec/cpu-common.h" -#include "exec/gdbstub.h" +#include "gdbstub/syscalls.h" #include "hw/boards.h" #include "migration/misc.h" #include "migration/postcopy-ram.h" @@ -40,12 +40,14 @@ #include "qapi/error.h" #include "qapi/qapi-commands-run-state.h" #include "qapi/qapi-events-run-state.h" +#include "qemu/accel.h" #include "qemu/error-report.h" #include "qemu/job.h" #include "qemu/log.h" #include "qemu/module.h" #include "qemu/plugin.h" #include "qemu/sockets.h" +#include "qemu/timer.h" #include "qemu/thread.h" #include "qom/object.h" #include "qom/object_interfaces.h" @@ -233,9 +235,16 @@ bool runstate_needs_reset(void) StatusInfo *qmp_query_status(Error **errp) { StatusInfo *info = g_malloc0(sizeof(*info)); + AccelState *accel = current_accel(); + /* + * We ignore errors, which will happen if the accelerator + * is not TCG. "singlestep" is meaningless for other accelerators, + * so we will set the StatusInfo field to false for those. + */ + info->singlestep = object_property_get_bool(OBJECT(accel), + "one-insn-per-tb", NULL); info->running = runstate_is_running(); - info->singlestep = singlestep; info->status = current_run_state; return info; diff --git a/softmmu/vl.c b/softmmu/vl.c index f29e4c4dc3..b0b96f67fa 100644 --- a/softmmu/vl.c +++ b/softmmu/vl.c @@ -182,6 +182,7 @@ static const char *log_file; static bool list_data_dirs; static const char *qtest_chrdev; static const char *qtest_log; +static bool opt_one_insn_per_tb; static int has_defaults = 1; static int default_serial = 1; @@ -883,7 +884,7 @@ static const QEMUOption qemu_options[] = { #define ARCHHEADING(text, arch_mask) #include "qemu-options.def" - { NULL }, + { /* end of list */ } }; typedef struct VGAInterfaceInfo { @@ -956,7 +957,18 @@ static const char * get_default_vga_model(const MachineClass *machine_class) { if (machine_class->default_display) { - return machine_class->default_display; + for (int t = 0; t < VGA_TYPE_MAX; t++) { + const VGAInterfaceInfo *ti = &vga_interfaces[t]; + + if (ti->opt_name && vga_interface_available(t) && + g_str_equal(ti->opt_name, machine_class->default_display)) { + return machine_class->default_display; + } + } + + warn_report_once("Default display '%s' is not available in this binary", + machine_class->default_display); + return NULL; } else if (vga_interface_available(VGA_CIRRUS)) { return "cirrus"; } else if (vga_interface_available(VGA_STD)) { @@ -1282,6 +1294,13 @@ static void qemu_disable_default_devices(void) default_monitor = 0; default_net = 0; default_vga = 0; + } else { + if (default_net && machine_class->default_nic && + !module_object_class_by_name(machine_class->default_nic)) { + warn_report("Default NIC '%s' is not available in this binary", + machine_class->default_nic); + default_net = 0; + } } } @@ -2220,7 +2239,19 @@ static int do_configure_accelerator(void *opaque, QemuOpts *opts, Error **errp) qemu_opt_foreach(opts, accelerator_set_property, accel, &error_fatal); - + /* + * If legacy -singlestep option is set, honour it for TCG and + * silently ignore for any other accelerator (which is how this + * option has always behaved). + */ + if (opt_one_insn_per_tb) { + /* + * This will always succeed for TCG, and we want to ignore + * the error from trying to set a nonexistent property + * on any other accelerator. + */ + object_property_set_bool(OBJECT(accel), "one-insn-per-tb", true, NULL); + } ret = accel_init_machine(accel, current_machine); if (ret < 0) { if (!qtest_with_kvm || ret != -ENOENT) { @@ -2432,7 +2463,7 @@ static void qemu_process_help_options(void) * to say '-cpu help -machine something'. */ if (cpu_option && is_help_option(cpu_option)) { - list_cpus(cpu_option); + list_cpus(); exit(0); } @@ -2465,10 +2496,11 @@ static void qemu_maybe_daemonize(const char *pid_file) pid_file_realpath = g_malloc0(PATH_MAX); if (!realpath(pid_file, pid_file_realpath)) { - error_report("cannot resolve PID file path: %s: %s", - pid_file, strerror(errno)); - unlink(pid_file); - exit(1); + if (errno != ENOENT) { + warn_report("not removing PID file on exit: cannot resolve PID " + "file path: %s: %s", pid_file, strerror(errno)); + } + return; } qemu_unlink_pidfile_notifier = (struct UnlinkPidfileNotifier) { @@ -2954,7 +2986,7 @@ void qemu_init(int argc, char **argv) qdict_put_str(machine_opts_dict, "firmware", optarg); break; case QEMU_OPTION_singlestep: - singlestep = 1; + opt_one_insn_per_tb = true; break; case QEMU_OPTION_S: autostart = 0; @@ -3360,7 +3392,7 @@ void qemu_init(int argc, char **argv) has_defaults = 0; break; case QEMU_OPTION_xen_domid: - if (!(accel_find("xen"))) { + if (!(accel_find("xen")) && !(accel_find("kvm"))) { error_report("Option not supported for this target"); exit(1); } @@ -3582,14 +3614,19 @@ void qemu_init(int argc, char **argv) machine_class->name, machine_class->deprecation_reason); } + /* + * Create backends before creating migration objects, so that it can + * check against compatibilities on the backend memories (e.g. postcopy + * over memory-backend-file objects). + */ + qemu_create_late_backends(); + /* * Note: creates a QOM object, must run only after global and * compat properties have been set up. */ migration_object_init(); - qemu_create_late_backends(); - /* parse features once if machine provides default cpu_type */ current_machine->cpu_type = machine_class->default_cpu_type; if (cpu_option) { diff --git a/softmmu/watchpoint.c b/softmmu/watchpoint.c new file mode 100644 index 0000000000..5350163385 --- /dev/null +++ b/softmmu/watchpoint.c @@ -0,0 +1,226 @@ +/* + * CPU watchpoints + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/error-report.h" +#include "exec/exec-all.h" +#include "exec/translate-all.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "hw/core/tcg-cpu-ops.h" +#include "hw/core/cpu.h" + +/* Add a watchpoint. */ +int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, + int flags, CPUWatchpoint **watchpoint) +{ + CPUWatchpoint *wp; + vaddr in_page; + + /* forbid ranges which are empty or run off the end of the address space */ + if (len == 0 || (addr + len - 1) < addr) { + error_report("tried to set invalid watchpoint at %" + VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); + return -EINVAL; + } + wp = g_malloc(sizeof(*wp)); + + wp->vaddr = addr; + wp->len = len; + wp->flags = flags; + + /* keep all GDB-injected watchpoints in front */ + if (flags & BP_GDB) { + QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); + } else { + QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); + } + + in_page = -(addr | TARGET_PAGE_MASK); + if (len <= in_page) { + tlb_flush_page(cpu, addr); + } else { + tlb_flush(cpu); + } + + if (watchpoint) { + *watchpoint = wp; + } + return 0; +} + +/* Remove a specific watchpoint. */ +int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, + int flags) +{ + CPUWatchpoint *wp; + + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + if (addr == wp->vaddr && len == wp->len + && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { + cpu_watchpoint_remove_by_ref(cpu, wp); + return 0; + } + } + return -ENOENT; +} + +/* Remove a specific watchpoint by reference. */ +void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) +{ + QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); + + tlb_flush_page(cpu, watchpoint->vaddr); + + g_free(watchpoint); +} + +/* Remove all matching watchpoints. */ +void cpu_watchpoint_remove_all(CPUState *cpu, int mask) +{ + CPUWatchpoint *wp, *next; + + QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { + if (wp->flags & mask) { + cpu_watchpoint_remove_by_ref(cpu, wp); + } + } +} + +#ifdef CONFIG_TCG + +/* + * Return true if this watchpoint address matches the specified + * access (ie the address range covered by the watchpoint overlaps + * partially or completely with the address range covered by the + * access). + */ +static inline bool watchpoint_address_matches(CPUWatchpoint *wp, + vaddr addr, vaddr len) +{ + /* + * We know the lengths are non-zero, but a little caution is + * required to avoid errors in the case where the range ends + * exactly at the top of the address space and so addr + len + * wraps round to zero. + */ + vaddr wpend = wp->vaddr + wp->len - 1; + vaddr addrend = addr + len - 1; + + return !(addr > wpend || wp->vaddr > addrend); +} + +/* Return flags for watchpoints that match addr + prot. */ +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) +{ + CPUWatchpoint *wp; + int ret = 0; + + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + if (watchpoint_address_matches(wp, addr, len)) { + ret |= wp->flags; + } + } + return ret; +} + +/* Generate a debug exception if a watchpoint has been hit. */ +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs attrs, int flags, uintptr_t ra) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUWatchpoint *wp; + + assert(tcg_enabled()); + if (cpu->watchpoint_hit) { + /* + * We re-entered the check after replacing the TB. + * Now raise the debug interrupt so that it will + * trigger after the current instruction. + */ + qemu_mutex_lock_iothread(); + cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); + qemu_mutex_unlock_iothread(); + return; + } + + if (cc->tcg_ops->adjust_watchpoint_address) { + /* this is currently used only by ARM BE32 */ + addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len); + } + + assert((flags & ~BP_MEM_ACCESS) == 0); + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + int hit_flags = wp->flags & flags; + + if (hit_flags && watchpoint_address_matches(wp, addr, len)) { + if (replay_running_debug()) { + /* + * replay_breakpoint reads icount. + * Force recompile to succeed, because icount may + * be read only at the end of the block. + */ + if (!cpu->can_do_io) { + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ + | curr_cflags(cpu); + cpu_loop_exit_restore(cpu, ra); + } + /* + * Don't process the watchpoints when we are + * in a reverse debugging operation. + */ + replay_breakpoint(); + return; + } + + wp->flags |= hit_flags << BP_HIT_SHIFT; + wp->hitaddr = MAX(addr, wp->vaddr); + wp->hitattrs = attrs; + + if (wp->flags & BP_CPU + && cc->tcg_ops->debug_check_watchpoint + && !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { + wp->flags &= ~BP_WATCHPOINT_HIT; + continue; + } + cpu->watchpoint_hit = wp; + + mmap_lock(); + /* This call also restores vCPU state */ + tb_check_watchpoint(cpu, ra); + if (wp->flags & BP_STOP_BEFORE_ACCESS) { + cpu->exception_index = EXCP_DEBUG; + mmap_unlock(); + cpu_loop_exit(cpu); + } else { + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ + | curr_cflags(cpu); + mmap_unlock(); + cpu_loop_exit_noexc(cpu); + } + } else { + wp->flags &= ~BP_WATCHPOINT_HIT; + } + } +} + +#endif /* CONFIG_TCG */ diff --git a/stats/stats-hmp-cmds.c b/stats/stats-hmp-cmds.c index 531e35d128..1f91bf8bd5 100644 --- a/stats/stats-hmp-cmds.c +++ b/stats/stats-hmp-cmds.c @@ -155,6 +155,8 @@ static StatsFilter *stats_filter(StatsTarget target, const char *names, filter->u.vcpu.vcpus = vcpu_list; break; } + case STATS_TARGET_CRYPTODEV: + break; default: break; } @@ -226,6 +228,9 @@ void hmp_info_stats(Monitor *mon, const QDict *qdict) int cpu_index = monitor_get_cpu_index(mon); filter = stats_filter(target, names, cpu_index, provider); break; + case STATS_TARGET_CRYPTODEV: + filter = stats_filter(target, names, -1, provider); + break; default: abort(); } diff --git a/stats/stats-qmp-cmds.c b/stats/stats-qmp-cmds.c index bc973747fb..e214b964fd 100644 --- a/stats/stats-qmp-cmds.c +++ b/stats/stats-qmp-cmds.c @@ -64,6 +64,8 @@ static bool invoke_stats_cb(StatsCallbacks *entry, targets = filter->u.vcpu.vcpus; } break; + case STATS_TARGET_CRYPTODEV: + break; default: abort(); } diff --git a/storage-daemon/qapi/qapi-schema.json b/storage-daemon/qapi/qapi-schema.json index 67749d1101..f10c949490 100644 --- a/storage-daemon/qapi/qapi-schema.json +++ b/storage-daemon/qapi/qapi-schema.json @@ -15,18 +15,26 @@ { 'include': '../../qapi/pragma.json' } +# Documentation generated with qapi-gen.py is in source order, with +# included sub-schemas inserted at the first include directive +# (subsequent include directives have no effect). To get a sane and +# stable order, it's best to include each sub-schema just once, or +# include it first right here. + +{ 'include': '../../qapi/common.json' } +{ 'include': '../../qapi/sockets.json' } +{ 'include': '../../qapi/crypto.json' } +{ 'include': '../../qapi/job.json' } + ## # = Block devices ## { 'include': '../../qapi/block-core.json' } { 'include': '../../qapi/block-export.json' } + { 'include': '../../qapi/char.json' } -{ 'include': '../../qapi/common.json' } -{ 'include': '../../qapi/control.json' } -{ 'include': '../../qapi/crypto.json' } -{ 'include': '../../qapi/introspect.json' } -{ 'include': '../../qapi/job.json' } { 'include': '../../qapi/authz.json' } -{ 'include': '../../qapi/qom.json' } -{ 'include': '../../qapi/sockets.json' } { 'include': '../../qapi/transaction.json' } +{ 'include': '../../qapi/control.json' } +{ 'include': '../../qapi/introspect.json' } +{ 'include': '../../qapi/qom.json' } diff --git a/stubs/colo-compare.c b/stubs/colo-compare.c new file mode 100644 index 0000000000..ec726665be --- /dev/null +++ b/stubs/colo-compare.c @@ -0,0 +1,7 @@ +#include "qemu/osdep.h" +#include "qemu/notify.h" +#include "net/colo-compare.h" + +void colo_compare_cleanup(void) +{ +} diff --git a/stubs/colo.c b/stubs/colo.c new file mode 100644 index 0000000000..f33379d0fd --- /dev/null +++ b/stubs/colo.c @@ -0,0 +1,37 @@ +#include "qemu/osdep.h" +#include "qemu/notify.h" +#include "net/colo-compare.h" +#include "migration/colo.h" +#include "migration/migration.h" +#include "qemu/error-report.h" +#include "qapi/qapi-commands-migration.h" + +void colo_shutdown(void) +{ +} + +int coroutine_fn colo_incoming_co(void) +{ + return 0; +} + +void colo_checkpoint_delay_set(void) +{ +} + +void migrate_start_colo_process(MigrationState *s) +{ + error_report("Impossible happend: trying to start COLO when COLO " + "module is not built in"); + abort(); +} + +bool migration_in_colo_state(void) +{ + return false; +} + +bool migration_incoming_in_colo_state(void) +{ + return false; +} diff --git a/stubs/meson.build b/stubs/meson.build index 7657467a5d..a56645e2f7 100644 --- a/stubs/meson.build +++ b/stubs/meson.build @@ -45,6 +45,8 @@ stub_ss.add(files('target-get-monitor-def.c')) stub_ss.add(files('target-monitor-defs.c')) stub_ss.add(files('trace-control.c')) stub_ss.add(files('uuid.c')) +stub_ss.add(files('colo.c')) +stub_ss.add(files('colo-compare.c')) stub_ss.add(files('vmstate.c')) stub_ss.add(files('vm-stop.c')) stub_ss.add(files('win32-kbd-hook.c')) @@ -61,4 +63,5 @@ if have_system else stub_ss.add(files('qdev.c')) endif +stub_ss.add(files('semihost-all.c')) stub_ss.add(when: 'CONFIG_VFIO_USER_SERVER', if_false: files('vfio-user-obj.c')) diff --git a/stubs/replay.c b/stubs/replay.c index 9d5b4be339..42c92e4acb 100644 --- a/stubs/replay.c +++ b/stubs/replay.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "sysemu/replay.h" +#include "exec/replay-core.h" ReplayMode replay_mode; diff --git a/stubs/semihost-all.c b/stubs/semihost-all.c new file mode 100644 index 0000000000..a2a1fc9c6f --- /dev/null +++ b/stubs/semihost-all.c @@ -0,0 +1,17 @@ +/* + * Semihosting Stubs for all targets + * + * Copyright (c) 2023 Linaro Ltd + * + * Stubs for all targets that don't actually do semihosting. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "semihosting/semihost.h" + +SemihostingTarget semihosting_get_target(void) +{ + return SEMIHOSTING_TARGET_AUTO; +} diff --git a/stubs/semihost.c b/stubs/semihost.c index d65c9fd5dc..aad7a70353 100644 --- a/stubs/semihost.c +++ b/stubs/semihost.c @@ -28,11 +28,6 @@ bool semihosting_enabled(bool is_user) return false; } -SemihostingTarget semihosting_get_target(void) -{ - return SEMIHOSTING_TARGET_AUTO; -} - /* * All the rest are empty subs. We could g_assert_not_reached() but * that adds extra weight to the final binary. Waste not want not. diff --git a/subprojects/dtc b/subprojects/dtc new file mode 160000 index 0000000000..b6910bec11 --- /dev/null +++ b/subprojects/dtc @@ -0,0 +1 @@ +Subproject commit b6910bec11614980a21e46fbccc35934b671bd81 diff --git a/subprojects/keycodemapdb b/subprojects/keycodemapdb new file mode 160000 index 0000000000..f5772a62ec --- /dev/null +++ b/subprojects/keycodemapdb @@ -0,0 +1 @@ +Subproject commit f5772a62ec52591ff6870b7e8ef32482371f22c6 diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c index fc69783d2b..8fb61e2df2 100644 --- a/subprojects/libvhost-user/libvhost-user.c +++ b/subprojects/libvhost-user/libvhost-user.c @@ -32,6 +32,12 @@ #include #include +/* Necessary to provide VIRTIO_F_VERSION_1 on system + * with older linux headers. Must appear before + * below. + */ +#include "standard-headers/linux/virtio_config.h" + #if defined(__linux__) #include #include @@ -140,7 +146,7 @@ vu_request_to_string(unsigned int req) REQ(VHOST_USER_SET_VRING_ENABLE), REQ(VHOST_USER_SEND_RARP), REQ(VHOST_USER_NET_SET_MTU), - REQ(VHOST_USER_SET_SLAVE_REQ_FD), + REQ(VHOST_USER_SET_BACKEND_REQ_FD), REQ(VHOST_USER_IOTLB_MSG), REQ(VHOST_USER_SET_VRING_ENDIAN), REQ(VHOST_USER_GET_CONFIG), @@ -1365,7 +1371,7 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, int qidx = vq - dev->vq; int fd_num = 0; VhostUserMsg vmsg = { - .request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG, + .request = VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG, .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, .size = sizeof(vmsg.payload.area), .payload.area = { @@ -1383,7 +1389,7 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, vmsg.fd_num = fd_num; - if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) { + if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) { return false; } @@ -1461,9 +1467,9 @@ vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) */ uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ | 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | - 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | + 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ | 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | - 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | + 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD | 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS; @@ -1494,7 +1500,7 @@ vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) if (vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && - (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ) || + (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) || !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { /* * The use case for using messages for kick/call is simulation, to make @@ -1507,7 +1513,7 @@ vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) * that actually enables the simulation case. */ vu_panic(dev, - "F_IN_BAND_NOTIFICATIONS requires F_SLAVE_REQ && F_REPLY_ACK"); + "F_IN_BAND_NOTIFICATIONS requires F_BACKEND_REQ && F_REPLY_ACK"); return false; } @@ -1910,7 +1916,7 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg) return vu_get_queue_num_exec(dev, vmsg); case VHOST_USER_SET_VRING_ENABLE: return vu_set_vring_enable_exec(dev, vmsg); - case VHOST_USER_SET_SLAVE_REQ_FD: + case VHOST_USER_SET_BACKEND_REQ_FD: return vu_set_slave_req_fd(dev, vmsg); case VHOST_USER_GET_CONFIG: return vu_get_config(dev, vmsg); @@ -2416,9 +2422,9 @@ static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) if (vq->call_fd < 0 && vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && - vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { + vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) { VhostUserMsg vmsg = { - .request = VHOST_USER_SLAVE_VRING_CALL, + .request = VHOST_USER_BACKEND_VRING_CALL, .flags = VHOST_USER_VERSION, .size = sizeof(vmsg.payload.state), .payload.state = { @@ -2455,6 +2461,16 @@ void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq) _vu_queue_notify(dev, vq, true); } +void vu_config_change_msg(VuDev *dev) +{ + VhostUserMsg vmsg = { + .request = VHOST_USER_BACKEND_CONFIG_CHANGE_MSG, + .flags = VHOST_USER_VERSION, + }; + + vu_message_write(dev, dev->slave_fd, &vmsg); +} + static inline void vring_used_flags_set_bit(VuVirtq *vq, int mask) { @@ -2553,6 +2569,10 @@ virtqueue_alloc_element(size_t sz, assert(sz >= sizeof(VuVirtqElement)); elem = malloc(out_sg_end); + if (!elem) { + DPRINT("%s: failed to malloc virtqueue element\n", __func__); + return NULL; + } elem->out_num = out_num; elem->in_num = in_num; elem->in_sg = (void *)elem + in_sg_ofs; @@ -2639,6 +2659,9 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) /* Now copy what we have collected and mapped */ elem = virtqueue_alloc_element(sz, out_num, in_num); + if (!elem) { + return NULL; + } elem->index = idx; for (i = 0; i < out_num; i++) { elem->out_sg[i] = iov[i]; diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h index 8cda9b8f57..49208cceaa 100644 --- a/subprojects/libvhost-user/libvhost-user.h +++ b/subprojects/libvhost-user/libvhost-user.h @@ -54,12 +54,12 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_RARP = 2, VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, VHOST_USER_PROTOCOL_F_NET_MTU = 4, - VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, + VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5, VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, VHOST_USER_PROTOCOL_F_CONFIG = 9, - VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, + VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10, VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14, @@ -92,7 +92,7 @@ typedef enum VhostUserRequest { VHOST_USER_SET_VRING_ENABLE = 18, VHOST_USER_SEND_RARP = 19, VHOST_USER_NET_SET_MTU = 20, - VHOST_USER_SET_SLAVE_REQ_FD = 21, + VHOST_USER_SET_BACKEND_REQ_FD = 21, VHOST_USER_IOTLB_MSG = 22, VHOST_USER_SET_VRING_ENDIAN = 23, VHOST_USER_GET_CONFIG = 24, @@ -113,13 +113,13 @@ typedef enum VhostUserRequest { } VhostUserRequest; typedef enum VhostUserSlaveRequest { - VHOST_USER_SLAVE_NONE = 0, - VHOST_USER_SLAVE_IOTLB_MSG = 1, - VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2, - VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3, - VHOST_USER_SLAVE_VRING_CALL = 4, - VHOST_USER_SLAVE_VRING_ERR = 5, - VHOST_USER_SLAVE_MAX + VHOST_USER_BACKEND_NONE = 0, + VHOST_USER_BACKEND_IOTLB_MSG = 1, + VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2, + VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3, + VHOST_USER_BACKEND_VRING_CALL = 4, + VHOST_USER_BACKEND_VRING_ERR = 5, + VHOST_USER_BACKEND_MAX } VhostUserSlaveRequest; typedef struct VhostUserMemoryRegion { @@ -585,6 +585,8 @@ bool vu_queue_empty(VuDev *dev, VuVirtq *vq); */ void vu_queue_notify(VuDev *dev, VuVirtq *vq); +void vu_config_change_msg(VuDev *dev); + /** * vu_queue_notify_sync: * @dev: a VuDev context diff --git a/subprojects/slirp.wrap b/subprojects/slirp.wrap new file mode 100644 index 0000000000..08291a4cf9 --- /dev/null +++ b/subprojects/slirp.wrap @@ -0,0 +1,6 @@ +[wrap-git] +url = https://gitlab.freedesktop.org/slirp/libslirp +revision = 26be815b86e8d49add8c9a8b320239b9594ff03d + +[provide] +slirp = libslirp_dep diff --git a/target/alpha/STATUS b/target/alpha/STATUS deleted file mode 100644 index 6c9744569e..0000000000 --- a/target/alpha/STATUS +++ /dev/null @@ -1,28 +0,0 @@ -(to be completed) - -Alpha emulation structure: -cpu.h : CPU definitions globally exported -exec.h : CPU definitions used only for translated code execution -helper.c : helpers that can be called either by the translated code - or the QEMU core, including the exception handler. -op_helper.c : helpers that can be called only from TCG -helper.h : TCG helpers prototypes -translate.c : Alpha instructions to micro-operations translator - -Code translator status: -The Alpha CPU instruction emulation should be quite complete with the -limitation that the VAX floating-point load and stores are not tested. -The 4 MMU modes are implemented. - -Linux user mode emulation status: -a few programs start to run. Most crash at a certain point, dereferencing a -NULL pointer. It seems that the UNIQUE register is not initialized properly. -It may appear that old executables, not relying on TLS support, run but -this is to be proved... - -Full system emulation status: -* Alpha PALCode emulation is in a very early stage and is not sufficient - to run any real OS. The alpha-softmmu target is not enabled for now. -* no hardware platform description is implemented -* there might be problems in the Alpha PALCode dedicated instructions - that would prevent to use a native PALCode image. diff --git a/target/alpha/cpu-param.h b/target/alpha/cpu-param.h index 17cd14e590..68c46f7998 100644 --- a/target/alpha/cpu-param.h +++ b/target/alpha/cpu-param.h @@ -15,6 +15,4 @@ #define TARGET_PHYS_ADDR_SPACE_BITS 44 #define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS) -#define NB_MMU_MODES 3 - #endif diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index d0abc949a8..5e67304d81 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -276,9 +276,9 @@ extern const VMStateDescription vmstate_alpha_cpu; void alpha_cpu_do_interrupt(CPUState *cpu); bool alpha_cpu_exec_interrupt(CPUState *cpu, int int_req); +hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); #endif /* !CONFIG_USER_ONLY */ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags); -hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); diff --git a/target/alpha/gdbstub.c b/target/alpha/gdbstub.c index 7db14f4431..0f8fa150f8 100644 --- a/target/alpha/gdbstub.c +++ b/target/alpha/gdbstub.c @@ -19,7 +19,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" int alpha_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c index 25f6cb8894..c83c92dd4c 100644 --- a/target/alpha/sys_helper.c +++ b/target/alpha/sys_helper.c @@ -20,6 +20,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" +#include "exec/tb-flush.h" #include "exec/helper-proto.h" #include "sysemu/runstate.h" #include "sysemu/sysemu.h" diff --git a/target/alpha/translate.c b/target/alpha/translate.c index f9bcdeb717..be8adb2526 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -72,7 +72,7 @@ struct DisasContext { #ifdef CONFIG_USER_ONLY #define UNALIGN(C) (C)->unalign #else -#define UNALIGN(C) 0 +#define UNALIGN(C) MO_ALIGN #endif /* Target-specific return values from translate_one, indicating the @@ -179,7 +179,6 @@ static void free_context_temps(DisasContext *ctx) { if (ctx->sink) { tcg_gen_discard_i64(ctx->sink); - tcg_temp_free(ctx->sink); ctx->sink = NULL; } } @@ -279,7 +278,6 @@ static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr) TCGv_i32 tmp32 = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); gen_helper_memory_to_f(dest, tmp32); - tcg_temp_free_i32(tmp32); } static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) @@ -287,7 +285,6 @@ static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) TCGv tmp = tcg_temp_new(); tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); gen_helper_memory_to_g(dest, tmp); - tcg_temp_free(tmp); } static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) @@ -295,7 +292,6 @@ static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) TCGv_i32 tmp32 = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); gen_helper_memory_to_s(dest, tmp32); - tcg_temp_free_i32(tmp32); } static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr) @@ -311,7 +307,6 @@ static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, TCGv addr = tcg_temp_new(); tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); func(ctx, cpu_fir[ra], addr); - tcg_temp_free(addr); } } @@ -342,7 +337,6 @@ static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16, tcg_gen_mov_i64(cpu_lock_addr, addr); tcg_gen_mov_i64(cpu_lock_value, dest); } - tcg_temp_free(addr); } static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) @@ -350,7 +344,6 @@ static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) TCGv_i32 tmp32 = tcg_temp_new_i32(); gen_helper_f_to_memory(tmp32, addr); tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); - tcg_temp_free_i32(tmp32); } static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) @@ -358,7 +351,6 @@ static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) TCGv tmp = tcg_temp_new(); gen_helper_g_to_memory(tmp, src); tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); - tcg_temp_free(tmp); } static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) @@ -366,7 +358,6 @@ static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) TCGv_i32 tmp32 = tcg_temp_new_i32(); gen_helper_s_to_memory(tmp32, src); tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); - tcg_temp_free_i32(tmp32); } static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr) @@ -380,7 +371,6 @@ static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, TCGv addr = tcg_temp_new(); tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); func(ctx, load_fpr(ctx, ra), addr); - tcg_temp_free(addr); } static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, @@ -398,8 +388,6 @@ static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, src = load_gpr(ctx, ra); tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op); - - tcg_temp_free(addr); } static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, @@ -416,7 +404,6 @@ static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, lab_fail = gen_new_label(); lab_done = gen_new_label(); tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); - tcg_temp_free_i64(addr); val = tcg_temp_new_i64(); tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value, @@ -426,7 +413,6 @@ static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, if (ra != 31) { tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value); } - tcg_temp_free_i64(val); tcg_gen_br(lab_done); gen_set_label(lab_fail); @@ -504,7 +490,6 @@ static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra, tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1); ret = gen_bcond_internal(ctx, cond, tmp, disp); - tcg_temp_free(tmp); return ret; } return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp); @@ -550,7 +535,6 @@ static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra)); ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp); - tcg_temp_free(cmp_tmp); return ret; } @@ -564,8 +548,6 @@ static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) gen_fold_mzero(cond, va, load_fpr(ctx, ra)); tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc)); - - tcg_temp_free(va); } #define QUAL_RM_N 0x080 /* Round mode nearest even */ @@ -615,8 +597,6 @@ static void gen_qual_roundmode(DisasContext *ctx, int fn11) #else gen_helper_setroundmode(tmp); #endif - - tcg_temp_free_i32(tmp); } static void gen_qual_flushzero(DisasContext *ctx, int fn11) @@ -645,8 +625,6 @@ static void gen_qual_flushzero(DisasContext *ctx, int fn11) #else gen_helper_setflushzero(tmp); #endif - - tcg_temp_free_i32(tmp); } static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp) @@ -716,8 +694,6 @@ static void gen_cvtlq(TCGv vc, TCGv vb) tcg_gen_shri_i64(tmp, vb, 29); tcg_gen_sari_i64(vc, vb, 32); tcg_gen_deposit_i64(vc, vc, tmp, 0, 30); - - tcg_temp_free(tmp); } static void gen_ieee_arith2(DisasContext *ctx, @@ -808,8 +784,6 @@ static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask) tcg_gen_andc_i64(vc, vb, vmask); tcg_gen_or_i64(vc, vc, tmp); - - tcg_temp_free(tmp); } static void gen_ieee_arith3(DisasContext *ctx, @@ -927,7 +901,6 @@ static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_neg_i64(tmp, tmp); tcg_gen_andi_i64(tmp, tmp, 0x3f); tcg_gen_shl_i64(vc, va, tmp); - tcg_temp_free(tmp); } gen_zapnoti(vc, vc, byte_mask); } @@ -948,7 +921,6 @@ static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7); tcg_gen_shli_i64(tmp, tmp, 3); tcg_gen_shr_i64(vc, va, tmp); - tcg_temp_free(tmp); gen_zapnoti(vc, vc, byte_mask); } } @@ -986,8 +958,6 @@ static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_shr_i64(vc, tmp, shift); tcg_gen_shri_i64(vc, vc, 1); - tcg_temp_free(shift); - tcg_temp_free(tmp); } } @@ -1015,8 +985,6 @@ static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); tcg_gen_shli_i64(shift, shift, 3); tcg_gen_shl_i64(vc, tmp, shift); - tcg_temp_free(shift); - tcg_temp_free(tmp); } } @@ -1047,9 +1015,6 @@ static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_shri_i64(mask, mask, 1); tcg_gen_andc_i64(vc, va, mask); - - tcg_temp_free(mask); - tcg_temp_free(shift); } } @@ -1069,9 +1034,6 @@ static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_shl_i64(mask, mask, shift); tcg_gen_andc_i64(vc, va, mask); - - tcg_temp_free(mask); - tcg_temp_free(shift); } } @@ -1152,7 +1114,6 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) TCGv tmp = tcg_temp_new(); tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); st_flag_byte(tmp, ENV_FLAG_PS_SHIFT); - tcg_temp_free(tmp); } /* Allow interrupts to be recognized right away. */ @@ -1215,7 +1176,6 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) tcg_gen_movi_i64(tmp, exc_addr); tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr)); - tcg_temp_free(tmp); entry += (palcode & 0x80 ? 0x2000 + (palcode - 0x80) * 64 @@ -1550,7 +1510,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shli_i64(tmp, va, 2); tcg_gen_add_i64(tmp, tmp, vb); tcg_gen_ext32s_i64(vc, tmp); - tcg_temp_free(tmp); break; case 0x09: /* SUBL */ @@ -1563,7 +1522,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shli_i64(tmp, va, 2); tcg_gen_sub_i64(tmp, tmp, vb); tcg_gen_ext32s_i64(vc, tmp); - tcg_temp_free(tmp); break; case 0x0F: /* CMPBGE */ @@ -1580,7 +1538,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shli_i64(tmp, va, 3); tcg_gen_add_i64(tmp, tmp, vb); tcg_gen_ext32s_i64(vc, tmp); - tcg_temp_free(tmp); break; case 0x1B: /* S8SUBL */ @@ -1588,7 +1545,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shli_i64(tmp, va, 3); tcg_gen_sub_i64(tmp, tmp, vb); tcg_gen_ext32s_i64(vc, tmp); - tcg_temp_free(tmp); break; case 0x1D: /* CMPULT */ @@ -1603,7 +1559,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, va, 2); tcg_gen_add_i64(vc, tmp, vb); - tcg_temp_free(tmp); break; case 0x29: /* SUBQ */ @@ -1614,7 +1569,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, va, 2); tcg_gen_sub_i64(vc, tmp, vb); - tcg_temp_free(tmp); break; case 0x2D: /* CMPEQ */ @@ -1625,14 +1579,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, va, 3); tcg_gen_add_i64(vc, tmp, vb); - tcg_temp_free(tmp); break; case 0x3B: /* S8SUBQ */ tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, va, 3); tcg_gen_sub_i64(vc, tmp, vb); - tcg_temp_free(tmp); break; case 0x3D: /* CMPULE */ @@ -1646,7 +1598,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_add_i64(tmp, tmp, vc); tcg_gen_ext32s_i64(vc, tmp); gen_helper_check_overflow(cpu_env, vc, tmp); - tcg_temp_free(tmp); break; case 0x49: /* SUBL/V */ @@ -1656,7 +1607,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_sub_i64(tmp, tmp, vc); tcg_gen_ext32s_i64(vc, tmp); gen_helper_check_overflow(cpu_env, vc, tmp); - tcg_temp_free(tmp); break; case 0x4D: /* CMPLT */ @@ -1674,8 +1624,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shri_i64(tmp, tmp, 63); tcg_gen_movi_i64(tmp2, 0); gen_helper_check_overflow(cpu_env, tmp, tmp2); - tcg_temp_free(tmp); - tcg_temp_free(tmp2); break; case 0x69: /* SUBQ/V */ @@ -1689,8 +1637,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shri_i64(tmp, tmp, 63); tcg_gen_movi_i64(tmp2, 0); gen_helper_check_overflow(cpu_env, tmp, tmp2); - tcg_temp_free(tmp); - tcg_temp_free(tmp2); break; case 0x6D: /* CMPLE */ @@ -1744,7 +1690,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_andi_i64(tmp, va, 1); tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx), vb, load_gpr(ctx, rc)); - tcg_temp_free(tmp); break; case 0x16: /* CMOVLBC */ @@ -1752,7 +1697,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_andi_i64(tmp, va, 1); tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx), vb, load_gpr(ctx, rc)); - tcg_temp_free(tmp); break; case 0x20: /* BIS */ @@ -1884,7 +1828,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) vb = load_gpr(ctx, rb); tcg_gen_andi_i64(tmp, vb, 0x3f); tcg_gen_shr_i64(vc, va, tmp); - tcg_temp_free(tmp); } break; case 0x36: @@ -1900,7 +1843,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) vb = load_gpr(ctx, rb); tcg_gen_andi_i64(tmp, vb, 0x3f); tcg_gen_shl_i64(vc, va, tmp); - tcg_temp_free(tmp); } break; case 0x3B: @@ -1916,7 +1858,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) vb = load_gpr(ctx, rb); tcg_gen_andi_i64(tmp, vb, 0x3f); tcg_gen_sar_i64(vc, va, tmp); - tcg_temp_free(tmp); } break; case 0x52: @@ -1978,7 +1919,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) /* UMULH */ tmp = tcg_temp_new(); tcg_gen_mulu2_i64(tmp, vc, va, vb); - tcg_temp_free(tmp); break; case 0x40: /* MULL/V */ @@ -1988,7 +1928,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_mul_i64(tmp, tmp, vc); tcg_gen_ext32s_i64(vc, tmp); gen_helper_check_overflow(cpu_env, vc, tmp); - tcg_temp_free(tmp); break; case 0x60: /* MULQ/V */ @@ -1997,8 +1936,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_muls2_i64(vc, tmp, va, vb); tcg_gen_sari_i64(tmp2, vc, 63); gen_helper_check_overflow(cpu_env, tmp, tmp2); - tcg_temp_free(tmp); - tcg_temp_free(tmp2); break; default: goto invalid_opc; @@ -2017,7 +1954,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) va = load_gpr(ctx, ra); tcg_gen_extrl_i64_i32(t32, va); gen_helper_memory_to_s(vc, t32); - tcg_temp_free_i32(t32); break; case 0x0A: /* SQRTF */ @@ -2040,7 +1976,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) va = load_gpr(ctx, ra); tcg_gen_extrl_i64_i32(t32, va); gen_helper_memory_to_f(vc, t32); - tcg_temp_free_i32(t32); break; case 0x24: /* ITOFT */ @@ -2464,21 +2399,21 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access (hw_ldl/p) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); break; case 0x1: /* Quadword physical access (hw_ldq/p) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ); + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); break; case 0x2: /* Longword physical access with lock (hw_ldl_l/p) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); tcg_gen_mov_i64(cpu_lock_addr, addr); tcg_gen_mov_i64(cpu_lock_value, va); break; case 0x3: /* Quadword physical access with lock (hw_ldq_l/p) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ); + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); tcg_gen_mov_i64(cpu_lock_addr, addr); tcg_gen_mov_i64(cpu_lock_value, va); break; @@ -2503,11 +2438,13 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) goto invalid_opc; case 0xA: /* Longword virtual access with protection check (hw_ldl/w) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL); + tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, + MO_LESL | MO_ALIGN); break; case 0xB: /* Quadword virtual access with protection check (hw_ldq/w) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ); + tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, + MO_LEUQ | MO_ALIGN); break; case 0xC: /* Longword virtual access with alt access mode (hw_ldl/a)*/ @@ -2518,15 +2455,16 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) case 0xE: /* Longword virtual access with alternate access mode and protection checks (hw_ldl/wa) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL); + tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, + MO_LESL | MO_ALIGN); break; case 0xF: /* Quadword virtual access with alternate access mode and protection checks (hw_ldq/wa) */ - tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ); + tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, + MO_LEUQ | MO_ALIGN); break; } - tcg_temp_free(addr); break; } #else @@ -2550,7 +2488,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) va = load_fpr(ctx, ra); gen_helper_s_to_memory(t32, va); tcg_gen_ext_i32_i64(vc, t32); - tcg_temp_free_i32(t32); break; } @@ -2706,7 +2643,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tmp = tcg_temp_new(); tcg_gen_andi_i64(tmp, vb, 1); st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); - tcg_temp_free(tmp); tcg_gen_andi_i64(cpu_pc, vb, ~3); /* Allow interrupts to be recognized right away. */ ret = DISAS_PC_UPDATED_NOCHAIN; @@ -2727,8 +2663,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) vb = load_gpr(ctx, rb); tmp = tcg_temp_new(); tcg_gen_addi_i64(tmp, vb, disp12); - tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL); - tcg_temp_free(tmp); + tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN); break; case 0x1: /* Quadword physical access */ @@ -2736,18 +2671,17 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) vb = load_gpr(ctx, rb); tmp = tcg_temp_new(); tcg_gen_addi_i64(tmp, vb, disp12); - tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ); - tcg_temp_free(tmp); + tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); break; case 0x2: /* Longword physical access with lock */ ret = gen_store_conditional(ctx, ra, rb, disp12, - MMU_PHYS_IDX, MO_LESL); + MMU_PHYS_IDX, MO_LESL | MO_ALIGN); break; case 0x3: /* Quadword physical access with lock */ ret = gen_store_conditional(ctx, ra, rb, disp12, - MMU_PHYS_IDX, MO_LEUQ); + MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN); break; case 0x4: /* Longword virtual access */ @@ -2841,11 +2775,11 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) break; case 0x2A: /* LDL_L */ - gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1); + gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1); break; case 0x2B: /* LDQ_L */ - gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1); + gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1); break; case 0x2C: /* STL */ @@ -2858,12 +2792,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) case 0x2E: /* STL_C */ ret = gen_store_conditional(ctx, ra, rb, disp16, - ctx->mem_idx, MO_LESL); + ctx->mem_idx, MO_LESL | MO_ALIGN); break; case 0x2F: /* STQ_C */ ret = gen_store_conditional(ctx, ra, rb, disp16, - ctx->mem_idx, MO_LEUQ); + ctx->mem_idx, MO_LEUQ | MO_ALIGN); break; case 0x30: /* BR */ @@ -2996,7 +2930,6 @@ static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) ctx->base.is_jmp = translate_one(ctx, insn); free_context_temps(ctx); - translator_loop_temp_check(&ctx->base); } static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) @@ -3043,7 +2976,7 @@ static const TranslatorOps alpha_tr_ops = { .disas_log = alpha_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/arm/Kconfig b/target/arm/Kconfig index 3f3394a22b..bf57d739cd 100644 --- a/target/arm/Kconfig +++ b/target/arm/Kconfig @@ -1,5 +1,10 @@ config ARM bool + select ARM_COMPATIBLE_SEMIHOSTING if TCG + + # We need to select this until we move m_helper.c and the + # translate.c v7m helpers under ARM_V7M. + select ARM_V7M if TCG config AARCH64 bool diff --git a/target/arm/monitor.c b/target/arm/arm-qmp-cmds.c similarity index 90% rename from target/arm/monitor.c rename to target/arm/arm-qmp-cmds.c index ecdd5ee817..c8fa524002 100644 --- a/target/arm/monitor.c +++ b/target/arm/arm-qmp-cmds.c @@ -227,3 +227,31 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, return expansion_info; } + +static void arm_cpu_add_definition(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CpuDefinitionInfoList **cpu_list = user_data; + CpuDefinitionInfo *info; + const char *typename; + + typename = object_class_get_name(oc); + info = g_malloc0(sizeof(*info)); + info->name = g_strndup(typename, + strlen(typename) - strlen("-" TYPE_ARM_CPU)); + info->q_typename = g_strdup(typename); + + QAPI_LIST_PREPEND(*cpu_list, info); +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + CpuDefinitionInfoList *cpu_list = NULL; + GSList *list; + + list = object_class_get_list(TYPE_ARM_CPU, false); + g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); + g_slist_free(list); + + return cpu_list; +} diff --git a/target/arm/cortex-regs.c b/target/arm/cortex-regs.c new file mode 100644 index 0000000000..ae817b08dd --- /dev/null +++ b/target/arm/cortex-regs.c @@ -0,0 +1,76 @@ +/* + * ARM Cortex-A registers + * + * This code is licensed under the GNU GPL v2 or later. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "cpregs.h" + + +static uint64_t l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + ARMCPU *cpu = env_archcpu(env); + + /* + * Number of cores is in [25:24]; otherwise we RAZ. + * If the board didn't configure the CPUs into clusters, + * we default to "all CPUs in one cluster", which might be + * more than the 4 that the hardware permits and which is + * all you can report in this two-bit field. Saturate to + * 0b11 (== 4 CPUs) rather than overflowing the field. + */ + return MIN(cpu->core_count - 1, 3) << 24; +} + +static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = { + { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2, + .access = PL1_RW, .readfn = l2ctlr_read, + .writefn = arm_cp_write_ignore }, + { .name = "L2CTLR", + .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2, + .access = PL1_RW, .readfn = l2ctlr_read, + .writefn = arm_cp_write_ignore }, + { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "L2ECTLR", + .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUACTLR", + .cp = 15, .opc1 = 0, .crm = 15, + .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUECTLR", + .cp = 15, .opc1 = 1, .crm = 15, + .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUMERRSR", + .cp = 15, .opc1 = 2, .crm = 15, + .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "L2MERRSR", + .cp = 15, .opc1 = 3, .crm = 15, + .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, +}; + +void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) +{ + define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo); +} diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h index 1ee64e99de..b04d344a9f 100644 --- a/target/arm/cpregs.h +++ b/target/arm/cpregs.h @@ -1071,4 +1071,10 @@ static inline bool arm_cpreg_in_idspace(const ARMCPRegInfo *ri) ri->crn, ri->crm); } +#ifdef CONFIG_USER_ONLY +static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { } +#else +void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu); +#endif + #endif /* TARGET_ARM_CPREGS_H */ diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h index 349d0aba38..b90c54727a 100644 --- a/target/arm/cpu-param.h +++ b/target/arm/cpu-param.h @@ -31,8 +31,6 @@ # define TARGET_PAGE_BITS_VARY # define TARGET_PAGE_BITS_MIN 10 -# define TARGET_TB_PCREL 1 - /* * Cache the attrs and shareability fields from the page table entry. * @@ -47,8 +45,6 @@ bool guarded; #endif -#define NB_MMU_MODES 12 - //// --- Begin LibAFL code --- #undef TARGET_TB_PCREL diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 0b333a749f..5182ed0c91 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -78,17 +78,17 @@ static vaddr arm_cpu_get_pc(CPUState *cs) void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - /* The program counter is always up to date with TARGET_TB_PCREL. */ - if (!TARGET_TB_PCREL) { + /* The program counter is always up to date with CF_PCREL. */ + if (!(tb_cflags(tb) & CF_PCREL)) { CPUARMState *env = cs->env_ptr; /* * It's OK to look at env for the current mode here, because it's * never possible for an AArch64 TB to chain to an AArch32 TB. */ if (is_a64(env)) { - env->pc = tb_pc(tb); + env->pc = tb->pc; } else { - env->regs[15] = tb_pc(tb); + env->regs[15] = tb->pc; } } } @@ -100,7 +100,7 @@ void arm_restore_state_to_opc(CPUState *cs, CPUARMState *env = cs->env_ptr; if (is_a64(env)) { - if (TARGET_TB_PCREL) { + if (tb_cflags(tb) & CF_PCREL) { env->pc = (env->pc & TARGET_PAGE_MASK) | data[0]; } else { env->pc = data[0]; @@ -108,7 +108,7 @@ void arm_restore_state_to_opc(CPUState *cs, env->condexec_bits = 0; env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; } else { - if (TARGET_TB_PCREL) { + if (tb_cflags(tb) & CF_PCREL) { env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0]; } else { env->regs[15] = data[0]; @@ -1557,6 +1557,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) Error *local_err = NULL; bool no_aa32 = false; + /* Use pc-relative instructions in system-mode */ +#ifndef CONFIG_USER_ONLY + cs->tcg_cflags |= CF_PCREL; +#endif + /* If we needed to query the host kernel for the CPU features * then it's possible that might have failed in the initfn, but * this is the first point where we can report it. diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 12b1082537..d469a2637b 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -869,6 +869,8 @@ struct ArchCPU { DynamicGDBXMLInfo dyn_sysreg_xml; DynamicGDBXMLInfo dyn_svereg_xml; + DynamicGDBXMLInfo dyn_m_systemreg_xml; + DynamicGDBXMLInfo dyn_m_secextreg_xml; /* Timers used by the generic (architected) timer */ QEMUTimer *gt_timer[NUM_GTIMERS]; @@ -1104,21 +1106,14 @@ extern const VMStateDescription vmstate_arm_cpu; void arm_cpu_do_interrupt(CPUState *cpu); void arm_v7m_cpu_do_interrupt(CPUState *cpu); -#endif /* !CONFIG_USER_ONLY */ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, MemTxAttrs *attrs); +#endif /* !CONFIG_USER_ONLY */ int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -/* - * Helpers to dynamically generates XML descriptions of the sysregs - * and SVE registers. Returns the number of registers in each set. - */ -int arm_gen_dynamic_sysreg_xml(CPUState *cpu, int base_reg); -int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg); - /* Returns the dynamically generated XML for the gdb stub. * Returns a pointer to the XML contents for the specified XML file or NULL * if the XML name doesn't match the predefined one. @@ -2389,7 +2384,8 @@ static inline int arm_feature(CPUARMState *env, int feature) void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp); #if !defined(CONFIG_USER_ONLY) -/* Return true if exception levels below EL3 are in secure state, +/* + * Return true if exception levels below EL3 are in secure state, * or would be following an exception return to that level. * Unlike arm_is_secure() (which is always a question about the * _current_ state of the CPU) this doesn't care about the current @@ -2397,6 +2393,7 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp); */ static inline bool arm_is_secure_below_el3(CPUARMState *env) { + assert(!arm_feature(env, ARM_FEATURE_M)); if (arm_feature(env, ARM_FEATURE_EL3)) { return !(env->cp15.scr_el3 & SCR_NS); } else { @@ -2410,6 +2407,7 @@ static inline bool arm_is_secure_below_el3(CPUARMState *env) /* Return true if the CPU is AArch64 EL3 or AArch32 Mon */ static inline bool arm_is_el3_or_mon(CPUARMState *env) { + assert(!arm_feature(env, ARM_FEATURE_M)); if (arm_feature(env, ARM_FEATURE_EL3)) { if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) { /* CPU currently in AArch64 state and EL3 */ @@ -2426,6 +2424,9 @@ static inline bool arm_is_el3_or_mon(CPUARMState *env) /* Return true if the processor is in secure state */ static inline bool arm_is_secure(CPUARMState *env) { + if (arm_feature(env, ARM_FEATURE_M)) { + return env->v7m.secure; + } if (arm_is_el3_or_mon(env)) { return true; } @@ -3822,6 +3823,11 @@ static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2; } +static inline bool isar_feature_aa64_pan3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 3; +} + static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0; diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 4066950da1..6eaf8e32cf 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -21,94 +21,18 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "cpu.h" +#include "cpregs.h" #include "qemu/module.h" #include "sysemu/kvm.h" #include "sysemu/hvf.h" +#include "sysemu/qtest.h" +#include "sysemu/tcg.h" #include "kvm_arm.h" #include "hvf_arm.h" #include "qapi/visitor.h" #include "hw/qdev-properties.h" #include "internals.h" - -static void aarch64_a35_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "arm,cortex-a35"; - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_EL2); - set_feature(&cpu->env, ARM_FEATURE_EL3); - set_feature(&cpu->env, ARM_FEATURE_PMU); - - /* From B2.2 AArch64 identification registers. */ - cpu->midr = 0x411fd040; - cpu->revidr = 0; - cpu->ctr = 0x84448004; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; - cpu->id_afr0 = 0; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64pfr1 = 0; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64dfr1 = 0; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64isar1 = 0; - cpu->isar.id_aa64mmfr0 = 0x00101122; - cpu->isar.id_aa64mmfr1 = 0; - cpu->clidr = 0x0a200023; - cpu->dcz_blocksize = 4; - - /* From B2.4 AArch64 Virtual Memory control registers */ - cpu->reset_sctlr = 0x00c50838; - - /* From B2.10 AArch64 performance monitor registers */ - cpu->isar.reset_pmcr_el0 = 0x410a3000; - - /* From B2.29 Cache ID registers */ - cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ - cpu->ccsidr[2] = 0x703fe03a; /* 512KB L2 cache */ - - /* From B3.5 VGIC Type register */ - cpu->gic_num_lrs = 4; - cpu->gic_vpribits = 5; - cpu->gic_vprebits = 5; - cpu->gic_pribits = 5; - - /* From C6.4 Debug ID Register */ - cpu->isar.dbgdidr = 0x3516d000; - /* From C6.5 Debug Device ID Register */ - cpu->isar.dbgdevid = 0x00110f13; - /* From C6.6 Debug Device ID Register 1 */ - cpu->isar.dbgdevid1 = 0x2; - - /* From Cortex-A35 SIMD and Floating-point Support r1p0 */ - /* From 3.2 AArch32 register summary */ - cpu->reset_fpsid = 0x41034043; - - /* From 2.2 AArch64 register summary */ - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; - - /* These values are the same with A53/A57/A72. */ - define_cortex_a72_a57_a53_cp_reginfo(cpu); -} +#include "cpregs.h" void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { @@ -309,47 +233,6 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) cpu->sve_vq.map = vq_map; } -static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - ARMCPU *cpu = ARM_CPU(obj); - uint32_t value; - - /* All vector lengths are disabled when SVE is off. */ - if (!cpu_isar_feature(aa64_sve, cpu)) { - value = 0; - } else { - value = cpu->sve_max_vq; - } - visit_type_uint32(v, name, &value, errp); -} - -static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - ARMCPU *cpu = ARM_CPU(obj); - uint32_t max_vq; - - if (!visit_type_uint32(v, name, &max_vq, errp)) { - return; - } - - if (kvm_enabled() && !kvm_arm_sve_supported()) { - error_setg(errp, "cannot set sve-max-vq"); - error_append_hint(errp, "SVE not supported by KVM on this host\n"); - return; - } - - if (max_vq == 0 || max_vq > ARM_MAX_VQ) { - error_setg(errp, "unsupported SVE vector length"); - error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n", - ARM_MAX_VQ); - return; - } - - cpu->sve_max_vq = max_vq; -} - /* * Note that cpu_arm_{get,set}_vq cannot use the simpler * object_property_add_bool interface because they make use of the @@ -540,7 +423,7 @@ static void cpu_arm_get_default_vec_len(Object *obj, Visitor *v, } #endif -static void aarch64_add_sve_properties(Object *obj) +void aarch64_add_sve_properties(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); uint32_t vq; @@ -563,7 +446,7 @@ static void aarch64_add_sve_properties(Object *obj) #endif } -static void aarch64_add_sme_properties(Object *obj) +void aarch64_add_sme_properties(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); uint32_t vq; @@ -628,7 +511,7 @@ static Property arm_cpu_pauth_property = static Property arm_cpu_pauth_impdef_property = DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false); -static void aarch64_add_pauth_properties(Object *obj) +void aarch64_add_pauth_properties(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -649,9 +532,6 @@ static void aarch64_add_pauth_properties(Object *obj) } } -static Property arm_cpu_lpa2_property = - DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true); - void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp) { uint64_t t; @@ -786,316 +666,6 @@ static void aarch64_a53_initfn(Object *obj) define_cortex_a72_a57_a53_cp_reginfo(cpu); } -static void aarch64_a55_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "arm,cortex-a55"; - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_EL2); - set_feature(&cpu->env, ARM_FEATURE_EL3); - set_feature(&cpu->env, ARM_FEATURE_PMU); - - /* Ordered by B2.4 AArch64 registers by functional group */ - cpu->clidr = 0x82000023; - cpu->ctr = 0x84448004; /* L1Ip = VIPT */ - cpu->dcz_blocksize = 4; /* 64 bytes */ - cpu->isar.id_aa64dfr0 = 0x0000000010305408ull; - cpu->isar.id_aa64isar0 = 0x0000100010211120ull; - cpu->isar.id_aa64isar1 = 0x0000000000100001ull; - cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull; - cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; - cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; - cpu->isar.id_aa64pfr0 = 0x0000000010112222ull; - cpu->isar.id_aa64pfr1 = 0x0000000000000010ull; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_dfr0 = 0x04010088; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x01011121; - cpu->isar.id_isar6 = 0x00000010; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02122211; - cpu->isar.id_mmfr4 = 0x00021110; - cpu->isar.id_pfr0 = 0x10010131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_pfr2 = 0x00000011; - cpu->midr = 0x412FD050; /* r2p0 */ - cpu->revidr = 0; - - /* From B2.23 CCSIDR_EL1 */ - cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x200fe01a; /* 32KB L1 icache */ - cpu->ccsidr[2] = 0x703fe07a; /* 512KB L2 cache */ - - /* From B2.96 SCTLR_EL3 */ - cpu->reset_sctlr = 0x30c50838; - - /* From B4.45 ICH_VTR_EL2 */ - cpu->gic_num_lrs = 4; - cpu->gic_vpribits = 5; - cpu->gic_vprebits = 5; - cpu->gic_pribits = 5; - - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x13211111; - cpu->isar.mvfr2 = 0x00000043; - - /* From D5.4 AArch64 PMU register summary */ - cpu->isar.reset_pmcr_el0 = 0x410b3000; -} - -static void aarch64_a72_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "arm,cortex-a72"; - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_EL2); - set_feature(&cpu->env, ARM_FEATURE_EL3); - set_feature(&cpu->env, ARM_FEATURE_PMU); - cpu->midr = 0x410fd083; - cpu->revidr = 0x00000000; - cpu->reset_fpsid = 0x41034080; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; - cpu->ctr = 0x8444c004; - cpu->reset_sctlr = 0x00c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001124; - cpu->isar.dbgdidr = 0x3516d000; - cpu->isar.dbgdevid = 0x01110f13; - cpu->isar.dbgdevid1 = 0x2; - cpu->isar.reset_pmcr_el0 = 0x41023000; - cpu->clidr = 0x0a200023; - cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ - cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */ - cpu->dcz_blocksize = 4; /* 64 bytes */ - cpu->gic_num_lrs = 4; - cpu->gic_vpribits = 5; - cpu->gic_vprebits = 5; - cpu->gic_pribits = 5; - define_cortex_a72_a57_a53_cp_reginfo(cpu); -} - -static void aarch64_a76_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "arm,cortex-a76"; - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_EL2); - set_feature(&cpu->env, ARM_FEATURE_EL3); - set_feature(&cpu->env, ARM_FEATURE_PMU); - - /* Ordered by B2.4 AArch64 registers by functional group */ - cpu->clidr = 0x82000023; - cpu->ctr = 0x8444C004; - cpu->dcz_blocksize = 4; - cpu->isar.id_aa64dfr0 = 0x0000000010305408ull; - cpu->isar.id_aa64isar0 = 0x0000100010211120ull; - cpu->isar.id_aa64isar1 = 0x0000000000100001ull; - cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull; - cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; - cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; - cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */ - cpu->isar.id_aa64pfr1 = 0x0000000000000010ull; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_dfr0 = 0x04010088; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00010142; - cpu->isar.id_isar5 = 0x01011121; - cpu->isar.id_isar6 = 0x00000010; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02122211; - cpu->isar.id_mmfr4 = 0x00021110; - cpu->isar.id_pfr0 = 0x10010131; - cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ - cpu->isar.id_pfr2 = 0x00000011; - cpu->midr = 0x414fd0b1; /* r4p1 */ - cpu->revidr = 0; - - /* From B2.18 CCSIDR_EL1 */ - cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */ - cpu->ccsidr[2] = 0x707fe03a; /* 512KB L2 cache */ - - /* From B2.93 SCTLR_EL3 */ - cpu->reset_sctlr = 0x30c50838; - - /* From B4.23 ICH_VTR_EL2 */ - cpu->gic_num_lrs = 4; - cpu->gic_vpribits = 5; - cpu->gic_vprebits = 5; - cpu->gic_pribits = 5; - - /* From B5.1 AdvSIMD AArch64 register summary */ - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x13211111; - cpu->isar.mvfr2 = 0x00000043; - - /* From D5.1 AArch64 PMU register summary */ - cpu->isar.reset_pmcr_el0 = 0x410b3000; -} - -static void aarch64_a64fx_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "arm,a64fx"; - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - set_feature(&cpu->env, ARM_FEATURE_EL2); - set_feature(&cpu->env, ARM_FEATURE_EL3); - set_feature(&cpu->env, ARM_FEATURE_PMU); - cpu->midr = 0x461f0010; - cpu->revidr = 0x00000000; - cpu->ctr = 0x86668006; - cpu->reset_sctlr = 0x30000180; - cpu->isar.id_aa64pfr0 = 0x0000000101111111; /* No RAS Extensions */ - cpu->isar.id_aa64pfr1 = 0x0000000000000000; - cpu->isar.id_aa64dfr0 = 0x0000000010305408; - cpu->isar.id_aa64dfr1 = 0x0000000000000000; - cpu->id_aa64afr0 = 0x0000000000000000; - cpu->id_aa64afr1 = 0x0000000000000000; - cpu->isar.id_aa64mmfr0 = 0x0000000000001122; - cpu->isar.id_aa64mmfr1 = 0x0000000011212100; - cpu->isar.id_aa64mmfr2 = 0x0000000000001011; - cpu->isar.id_aa64isar0 = 0x0000000010211120; - cpu->isar.id_aa64isar1 = 0x0000000000010001; - cpu->isar.id_aa64zfr0 = 0x0000000000000000; - cpu->clidr = 0x0000000080000023; - cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */ - cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */ - cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */ - cpu->dcz_blocksize = 6; /* 256 bytes */ - cpu->gic_num_lrs = 4; - cpu->gic_vpribits = 5; - cpu->gic_vprebits = 5; - cpu->gic_pribits = 5; - - /* The A64FX supports only 128, 256 and 512 bit vector lengths */ - aarch64_add_sve_properties(obj); - cpu->sve_vq.supported = (1 << 0) /* 128bit */ - | (1 << 1) /* 256bit */ - | (1 << 3); /* 512bit */ - - cpu->isar.reset_pmcr_el0 = 0x46014040; - - /* TODO: Add A64FX specific HPC extension registers */ -} - -static void aarch64_neoverse_n1_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "arm,neoverse-n1"; - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_EL2); - set_feature(&cpu->env, ARM_FEATURE_EL3); - set_feature(&cpu->env, ARM_FEATURE_PMU); - - /* Ordered by B2.4 AArch64 registers by functional group */ - cpu->clidr = 0x82000023; - cpu->ctr = 0x8444c004; - cpu->dcz_blocksize = 4; - cpu->isar.id_aa64dfr0 = 0x0000000110305408ull; - cpu->isar.id_aa64isar0 = 0x0000100010211120ull; - cpu->isar.id_aa64isar1 = 0x0000000000100001ull; - cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull; - cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; - cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; - cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */ - cpu->isar.id_aa64pfr1 = 0x0000000000000020ull; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_dfr0 = 0x04010088; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00010142; - cpu->isar.id_isar5 = 0x01011121; - cpu->isar.id_isar6 = 0x00000010; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02122211; - cpu->isar.id_mmfr4 = 0x00021110; - cpu->isar.id_pfr0 = 0x10010131; - cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ - cpu->isar.id_pfr2 = 0x00000011; - cpu->midr = 0x414fd0c1; /* r4p1 */ - cpu->revidr = 0; - - /* From B2.23 CCSIDR_EL1 */ - cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */ - cpu->ccsidr[2] = 0x70ffe03a; /* 1MB L2 cache */ - - /* From B2.98 SCTLR_EL3 */ - cpu->reset_sctlr = 0x30c50838; - - /* From B4.23 ICH_VTR_EL2 */ - cpu->gic_num_lrs = 4; - cpu->gic_vpribits = 5; - cpu->gic_vprebits = 5; - cpu->gic_pribits = 5; - - /* From B5.1 AdvSIMD AArch64 register summary */ - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x13211111; - cpu->isar.mvfr2 = 0x00000043; - - /* From D5.1 AArch64 PMU register summary */ - cpu->isar.reset_pmcr_el0 = 0x410c3000; -} - static void aarch64_host_initfn(Object *obj) { #if defined(CONFIG_KVM) @@ -1114,204 +684,27 @@ static void aarch64_host_initfn(Object *obj) #endif } -/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host); - * otherwise, a CPU with as many features enabled as our emulation supports. - * The version of '-cpu max' for qemu-system-arm is defined in cpu.c; - * this only needs to handle 64 bits. - */ static void aarch64_max_initfn(Object *obj) { - ARMCPU *cpu = ARM_CPU(obj); - uint64_t t; - uint32_t u; - if (kvm_enabled() || hvf_enabled()) { /* With KVM or HVF, '-cpu max' is identical to '-cpu host' */ aarch64_host_initfn(obj); return; } + if (tcg_enabled() || qtest_enabled()) { + aarch64_a57_initfn(obj); + } + /* '-cpu max' for TCG: we currently do this as "A57 with extra things" */ - - aarch64_a57_initfn(obj); - - /* - * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real - * one and try to apply errata workarounds or use impdef features we - * don't provide. - * An IMPLEMENTER field of 0 means "reserved for software use"; - * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers - * to see which features are present"; - * the VARIANT, PARTNUM and REVISION fields are all implementation - * defined and we choose to define PARTNUM just in case guest - * code needs to distinguish this QEMU CPU from other software - * implementations, though this shouldn't be needed. - */ - t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0); - t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf); - t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q'); - t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0); - t = FIELD_DP64(t, MIDR_EL1, REVISION, 0); - cpu->midr = t; - - /* - * We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS} - * are zero. - */ - u = cpu->clidr; - u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0); - u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0); - cpu->clidr = u; - - t = cpu->isar.id_aa64isar0; - t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */ - t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */ - t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */ - t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); - t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); /* FEAT_LSE */ - t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); /* FEAT_RDM */ - t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); /* FEAT_SHA3 */ - t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1); /* FEAT_SM3 */ - t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1); /* FEAT_SM4 */ - t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1); /* FEAT_DotProd */ - t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1); /* FEAT_FHM */ - t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* FEAT_FlagM2 */ - t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */ - t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); /* FEAT_RNG */ - cpu->isar.id_aa64isar0 = t; - - t = cpu->isar.id_aa64isar1; - t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */ - t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); /* FEAT_JSCVT */ - t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); /* FEAT_FCMA */ - t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* FEAT_LRCPC2 */ - t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1); /* FEAT_FRINTTS */ - t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); /* FEAT_SB */ - t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); /* FEAT_SPECRES */ - t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1); /* FEAT_BF16 */ - t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1); /* FEAT_DGH */ - t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */ - cpu->isar.id_aa64isar1 = t; - - t = cpu->isar.id_aa64pfr0; - t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */ - t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */ - t = FIELD_DP64(t, ID_AA64PFR0, RAS, 2); /* FEAT_RASv1p1 + FEAT_DoubleFault */ - t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); - t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */ - t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */ - t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 2); /* FEAT_CSV2_2 */ - t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1); /* FEAT_CSV3 */ - cpu->isar.id_aa64pfr0 = t; - - t = cpu->isar.id_aa64pfr1; - t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); /* FEAT_BTI */ - t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2); /* FEAT_SSBS2 */ - /* - * Begin with full support for MTE. This will be downgraded to MTE=0 - * during realize if the board provides no tag memory, much like - * we do for EL2 with the virtualization=on property. - */ - t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */ - t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */ - t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */ - t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */ - cpu->isar.id_aa64pfr1 = t; - - t = cpu->isar.id_aa64mmfr0; - t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */ - t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1); /* 16k pages supported */ - t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */ - t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN64_2, 2); /* 64k stage2 supported */ - t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2); /* 4k stage2 supported */ - t = FIELD_DP64(t, ID_AA64MMFR0, FGT, 1); /* FEAT_FGT */ - cpu->isar.id_aa64mmfr0 = t; - - t = cpu->isar.id_aa64mmfr1; - t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */ - t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */ - t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */ - t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* FEAT_HPDS */ - t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */ - t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* FEAT_PAN2 */ - t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */ - t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 1); /* FEAT_ETS */ - t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */ - cpu->isar.id_aa64mmfr1 = t; - - t = cpu->isar.id_aa64mmfr2; - t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* FEAT_TTCNP */ - t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */ - t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */ - t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */ - t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */ - t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1); /* FEAT_IDST */ - t = FIELD_DP64(t, ID_AA64MMFR2, FWB, 1); /* FEAT_S2FWB */ - t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */ - t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */ - t = FIELD_DP64(t, ID_AA64MMFR2, EVT, 2); /* FEAT_EVT */ - t = FIELD_DP64(t, ID_AA64MMFR2, E0PD, 1); /* FEAT_E0PD */ - cpu->isar.id_aa64mmfr2 = t; - - t = cpu->isar.id_aa64zfr0; - t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1); - t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* FEAT_SVE_PMULL128 */ - t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); /* FEAT_SVE_BitPerm */ - t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1); /* FEAT_BF16 */ - t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1); /* FEAT_SVE_SHA3 */ - t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1); /* FEAT_SVE_SM4 */ - t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); /* FEAT_I8MM */ - t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1); /* FEAT_F32MM */ - t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1); /* FEAT_F64MM */ - cpu->isar.id_aa64zfr0 = t; - - t = cpu->isar.id_aa64dfr0; - t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9); /* FEAT_Debugv8p4 */ - t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6); /* FEAT_PMUv3p5 */ - cpu->isar.id_aa64dfr0 = t; - - t = cpu->isar.id_aa64smfr0; - t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */ - t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */ - t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */ - t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */ - t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */ - t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */ - t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */ - cpu->isar.id_aa64smfr0 = t; - - /* Replicate the same data to the 32-bit id registers. */ - aa32_max_features(cpu); - -#ifdef CONFIG_USER_ONLY - /* - * For usermode -cpu max we can use a larger and more efficient DCZ - * blocksize since we don't have to follow what the hardware does. - */ - cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */ - cpu->dcz_blocksize = 7; /* 512 bytes */ -#endif - - cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ); - cpu->sme_vq.supported = SVE_VQ_POW2_MAP; - - aarch64_add_pauth_properties(obj); - aarch64_add_sve_properties(obj); - aarch64_add_sme_properties(obj); - object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq, - cpu_max_set_sve_max_vq, NULL, NULL); - qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property); + if (tcg_enabled()) { + aarch64_max_tcg_initfn(obj); + } } static const ARMCPUInfo aarch64_cpus[] = { - { .name = "cortex-a35", .initfn = aarch64_a35_initfn }, { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, - { .name = "cortex-a55", .initfn = aarch64_a55_initfn }, - { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, - { .name = "cortex-a76", .initfn = aarch64_a76_initfn }, - { .name = "a64fx", .initfn = aarch64_a64fx_initfn }, - { .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn }, { .name = "max", .initfn = aarch64_max_initfn }, #if defined(CONFIG_KVM) || defined(CONFIG_HVF) { .name = "host", .initfn = aarch64_host_initfn }, diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index dfc8b2a1a5..d41cc643b1 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -949,8 +949,10 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .access = PL0_R, .accessfn = access_tdcc, .type = ARM_CP_CONST, .resetvalue = 0 }, /* - * OSDTRRX_EL1/OSDTRTX_EL1 are used for save and restore of DBGDTRRX_EL0. - * It is a component of the Debug Communications Channel, which is not implemented. + * These registers belong to the Debug Communications Channel, + * which is not implemented. However we implement RAZ/WI behaviour + * with trapping to prevent spurious SIGILLs if the guest OS does + * access them as the support cannot be probed for. */ { .name = "OSDTRRX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 2, @@ -960,6 +962,11 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, .access = PL1_RW, .accessfn = access_tdcc, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* DBGDTRTX_EL0/DBGDTRRX_EL0 depend on direction */ + { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_BOTH, .cp = 14, + .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0, + .access = PL0_RW, .accessfn = access_tdcc, + .type = ARM_CP_CONST, .resetvalue = 0 }, /* * OSECCR_EL1 provides a mechanism for an operating system * to access the contents of EDECCR. EDECCR is not implemented though, diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c index 2f806512d0..03b17c814f 100644 --- a/target/arm/gdbstub.c +++ b/target/arm/gdbstub.c @@ -20,6 +20,8 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/gdbstub.h" +#include "gdbstub/helpers.h" +#include "sysemu/tcg.h" #include "internals.h" #include "cpregs.h" @@ -305,7 +307,7 @@ static void arm_register_sysreg_for_xml(gpointer key, gpointer value, } } -int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg) +static int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg) { ARMCPU *cpu = ARM_CPU(cs); GString *s = g_string_new(NULL); @@ -322,125 +324,163 @@ int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg) return cpu->dyn_sysreg_xml.num; } -struct TypeSize { - const char *gdb_type; - int size; - const char sz, suffix; +typedef enum { + M_SYSREG_MSP, + M_SYSREG_PSP, + M_SYSREG_PRIMASK, + M_SYSREG_CONTROL, + M_SYSREG_BASEPRI, + M_SYSREG_FAULTMASK, + M_SYSREG_MSPLIM, + M_SYSREG_PSPLIM, +} MProfileSysreg; + +static const struct { + const char *name; + int feature; +} m_sysreg_def[] = { + [M_SYSREG_MSP] = { "msp", ARM_FEATURE_M }, + [M_SYSREG_PSP] = { "psp", ARM_FEATURE_M }, + [M_SYSREG_PRIMASK] = { "primask", ARM_FEATURE_M }, + [M_SYSREG_CONTROL] = { "control", ARM_FEATURE_M }, + [M_SYSREG_BASEPRI] = { "basepri", ARM_FEATURE_M_MAIN }, + [M_SYSREG_FAULTMASK] = { "faultmask", ARM_FEATURE_M_MAIN }, + [M_SYSREG_MSPLIM] = { "msplim", ARM_FEATURE_V8 }, + [M_SYSREG_PSPLIM] = { "psplim", ARM_FEATURE_V8 }, }; -static const struct TypeSize vec_lanes[] = { - /* quads */ - { "uint128", 128, 'q', 'u' }, - { "int128", 128, 'q', 's' }, - /* 64 bit */ - { "ieee_double", 64, 'd', 'f' }, - { "uint64", 64, 'd', 'u' }, - { "int64", 64, 'd', 's' }, - /* 32 bit */ - { "ieee_single", 32, 's', 'f' }, - { "uint32", 32, 's', 'u' }, - { "int32", 32, 's', 's' }, - /* 16 bit */ - { "ieee_half", 16, 'h', 'f' }, - { "uint16", 16, 'h', 'u' }, - { "int16", 16, 'h', 's' }, - /* bytes */ - { "uint8", 8, 'b', 'u' }, - { "int8", 8, 'b', 's' }, -}; +static uint32_t *m_sysreg_ptr(CPUARMState *env, MProfileSysreg reg, bool sec) +{ + uint32_t *ptr; + switch (reg) { + case M_SYSREG_MSP: + ptr = arm_v7m_get_sp_ptr(env, sec, false, true); + break; + case M_SYSREG_PSP: + ptr = arm_v7m_get_sp_ptr(env, sec, true, true); + break; + case M_SYSREG_MSPLIM: + ptr = &env->v7m.msplim[sec]; + break; + case M_SYSREG_PSPLIM: + ptr = &env->v7m.psplim[sec]; + break; + case M_SYSREG_PRIMASK: + ptr = &env->v7m.primask[sec]; + break; + case M_SYSREG_BASEPRI: + ptr = &env->v7m.basepri[sec]; + break; + case M_SYSREG_FAULTMASK: + ptr = &env->v7m.faultmask[sec]; + break; + case M_SYSREG_CONTROL: + ptr = &env->v7m.control[sec]; + break; + default: + return NULL; + } + return arm_feature(env, m_sysreg_def[reg].feature) ? ptr : NULL; +} -int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg) +static int m_sysreg_get(CPUARMState *env, GByteArray *buf, + MProfileSysreg reg, bool secure) +{ + uint32_t *ptr = m_sysreg_ptr(env, reg, secure); + + if (ptr == NULL) { + return 0; + } + return gdb_get_reg32(buf, *ptr); +} + +static int arm_gdb_get_m_systemreg(CPUARMState *env, GByteArray *buf, int reg) +{ + /* + * Here, we emulate MRS instruction, where CONTROL has a mix of + * banked and non-banked bits. + */ + if (reg == M_SYSREG_CONTROL) { + return gdb_get_reg32(buf, arm_v7m_mrs_control(env, env->v7m.secure)); + } + return m_sysreg_get(env, buf, reg, env->v7m.secure); +} + +static int arm_gdb_set_m_systemreg(CPUARMState *env, uint8_t *buf, int reg) +{ + return 0; /* TODO */ +} + +static int arm_gen_dynamic_m_systemreg_xml(CPUState *cs, int orig_base_reg) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + GString *s = g_string_new(NULL); + int base_reg = orig_base_reg; + int i; + + g_string_printf(s, ""); + g_string_append_printf(s, ""); + g_string_append_printf(s, "\n"); + + for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) { + if (arm_feature(env, m_sysreg_def[i].feature)) { + g_string_append_printf(s, + "\n", + m_sysreg_def[i].name, base_reg++); + } + } + + g_string_append_printf(s, ""); + cpu->dyn_m_systemreg_xml.desc = g_string_free(s, false); + cpu->dyn_m_systemreg_xml.num = base_reg - orig_base_reg; + + return cpu->dyn_m_systemreg_xml.num; +} + +#ifndef CONFIG_USER_ONLY +/* + * For user-only, we see the non-secure registers via m_systemreg above. + * For secext, encode the non-secure view as even and secure view as odd. + */ +static int arm_gdb_get_m_secextreg(CPUARMState *env, GByteArray *buf, int reg) +{ + return m_sysreg_get(env, buf, reg >> 1, reg & 1); +} + +static int arm_gdb_set_m_secextreg(CPUARMState *env, uint8_t *buf, int reg) +{ + return 0; /* TODO */ +} + +static int arm_gen_dynamic_m_secextreg_xml(CPUState *cs, int orig_base_reg) { ARMCPU *cpu = ARM_CPU(cs); GString *s = g_string_new(NULL); - DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml; - g_autoptr(GString) ts = g_string_new(""); - int i, j, bits, reg_width = (cpu->sve_max_vq * 128); - info->num = 0; + int base_reg = orig_base_reg; + int i; + g_string_printf(s, ""); g_string_append_printf(s, ""); - g_string_append_printf(s, ""); + g_string_append_printf(s, "\n"); - /* First define types and totals in a whole VL */ - for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { - int count = reg_width / vec_lanes[i].size; - g_string_printf(ts, "svev%c%c", vec_lanes[i].sz, vec_lanes[i].suffix); + for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) { g_string_append_printf(s, - "", - ts->str, vec_lanes[i].gdb_type, count); - } - /* - * Now define a union for each size group containing unsigned and - * signed and potentially float versions of each size from 128 to - * 8 bits. - */ - for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) { - const char suf[] = { 'q', 'd', 's', 'h', 'b' }; - g_string_append_printf(s, "", suf[i]); - for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) { - if (vec_lanes[j].size == bits) { - g_string_append_printf(s, "", - vec_lanes[j].suffix, - vec_lanes[j].sz, vec_lanes[j].suffix); - } - } - g_string_append(s, ""); - } - /* And now the final union of unions */ - g_string_append(s, ""); - for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) { - const char suf[] = { 'q', 'd', 's', 'h', 'b' }; - g_string_append_printf(s, "", - suf[i], suf[i]); - } - g_string_append(s, ""); - - /* Finally the sve prefix type */ - g_string_append_printf(s, - "", - reg_width / 8); - - /* Then define each register in parts for each vq */ - for (i = 0; i < 32; i++) { + "\n", + m_sysreg_def[i].name, base_reg++); g_string_append_printf(s, - "", - i, reg_width, base_reg++); - info->num++; + "\n", + m_sysreg_def[i].name, base_reg++); } - /* fpscr & status registers */ - g_string_append_printf(s, "", base_reg++); - g_string_append_printf(s, "", base_reg++); - info->num += 2; - for (i = 0; i < 16; i++) { - g_string_append_printf(s, - "", - i, cpu->sve_max_vq * 16, base_reg++); - info->num++; - } - g_string_append_printf(s, - "", - cpu->sve_max_vq * 16, base_reg++); - g_string_append_printf(s, - "", - base_reg++); - info->num += 2; g_string_append_printf(s, ""); - cpu->dyn_svereg_xml.desc = g_string_free(s, false); + cpu->dyn_m_secextreg_xml.desc = g_string_free(s, false); + cpu->dyn_m_secextreg_xml.num = base_reg - orig_base_reg; - return cpu->dyn_svereg_xml.num; + return cpu->dyn_m_secextreg_xml.num; } - +#endif const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) { @@ -450,6 +490,12 @@ const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) return cpu->dyn_sysreg_xml.desc; } else if (strcmp(xmlname, "sve-registers.xml") == 0) { return cpu->dyn_svereg_xml.desc; + } else if (strcmp(xmlname, "arm-m-system.xml") == 0) { + return cpu->dyn_m_systemreg_xml.desc; +#ifndef CONFIG_USER_ONLY + } else if (strcmp(xmlname, "arm-m-secext.xml") == 0) { + return cpu->dyn_m_secextreg_xml.desc; +#endif } return NULL; } @@ -466,14 +512,26 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) */ #ifdef TARGET_AARCH64 if (isar_feature_aa64_sve(&cpu->isar)) { - gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg, - arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs), + int nreg = arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs); + gdb_register_coprocessor(cs, aarch64_gdb_get_sve_reg, + aarch64_gdb_set_sve_reg, nreg, "sve-registers.xml", 0); } else { - gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, - aarch64_fpu_gdb_set_reg, + gdb_register_coprocessor(cs, aarch64_gdb_get_fpu_reg, + aarch64_gdb_set_fpu_reg, 34, "aarch64-fpu.xml", 0); } + /* + * Note that we report pauth information via the feature name + * org.gnu.gdb.aarch64.pauth_v2, not org.gnu.gdb.aarch64.pauth. + * GDB versions 9 through 12 have a bug where they will crash + * if they see the latter XML from QEMU. + */ + if (isar_feature_aa64_pauth(&cpu->isar)) { + gdb_register_coprocessor(cs, aarch64_gdb_get_pauth_reg, + aarch64_gdb_set_pauth_reg, + 4, "aarch64-pauth.xml", 0); + } #endif } else { if (arm_feature(env, ARM_FEATURE_NEON)) { @@ -495,7 +553,7 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 2, "arm-vfp-sysregs.xml", 0); } } - if (cpu_isar_feature(aa32_mve, cpu)) { + if (cpu_isar_feature(aa32_mve, cpu) && tcg_enabled()) { gdb_register_coprocessor(cs, mve_gdb_get_reg, mve_gdb_set_reg, 1, "arm-m-profile-mve.xml", 0); } @@ -503,4 +561,18 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs), "system-registers.xml", 0); + if (arm_feature(env, ARM_FEATURE_M) && tcg_enabled()) { + gdb_register_coprocessor(cs, + arm_gdb_get_m_systemreg, arm_gdb_set_m_systemreg, + arm_gen_dynamic_m_systemreg_xml(cs, cs->gdb_num_regs), + "arm-m-system.xml", 0); +#ifndef CONFIG_USER_ONLY + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + gdb_register_coprocessor(cs, + arm_gdb_get_m_secextreg, arm_gdb_set_m_secextreg, + arm_gen_dynamic_m_secextreg_xml(cs, cs->gdb_num_regs), + "arm-m-secext.xml", 0); + } +#endif + } } diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c index 07a6746944..d7b79a6589 100644 --- a/target/arm/gdbstub64.c +++ b/target/arm/gdbstub64.c @@ -20,7 +20,7 @@ #include "qemu/log.h" #include "cpu.h" #include "internals.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { @@ -72,7 +72,7 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return 0; } -int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) +int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg) { switch (reg) { case 0 ... 31: @@ -92,7 +92,7 @@ int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) } } -int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) +int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg) { switch (reg) { case 0 ... 31: @@ -116,7 +116,7 @@ int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) } } -int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg) +int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg) { ARMCPU *cpu = env_archcpu(env); @@ -164,7 +164,7 @@ int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg) return 0; } -int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg) +int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg) { ARMCPU *cpu = env_archcpu(env); @@ -209,3 +209,173 @@ int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg) return 0; } + +int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg) +{ + switch (reg) { + case 0: /* pauth_dmask */ + case 1: /* pauth_cmask */ + case 2: /* pauth_dmask_high */ + case 3: /* pauth_cmask_high */ + /* + * Note that older versions of this feature only contained + * pauth_{d,c}mask, for use with Linux user processes, and + * thus exclusively in the low half of the address space. + * + * To support system mode, and to debug kernels, two new regs + * were added to cover the high half of the address space. + * For the purpose of pauth_ptr_mask, we can use any well-formed + * address within the address space half -- here, 0 and -1. + */ + { + bool is_data = !(reg & 1); + bool is_high = reg & 2; + ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); + ARMVAParameters param; + + param = aa64_va_parameters(env, -is_high, mmu_idx, is_data, false); + return gdb_get_reg64(buf, pauth_ptr_mask(param)); + } + default: + return 0; + } +} + +int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg) +{ + /* All pseudo registers are read-only. */ + return 0; +} + +static void output_vector_union_type(GString *s, int reg_width, + const char *name) +{ + struct TypeSize { + const char *gdb_type; + short size; + char sz, suffix; + }; + + static const struct TypeSize vec_lanes[] = { + /* quads */ + { "uint128", 128, 'q', 'u' }, + { "int128", 128, 'q', 's' }, + /* 64 bit */ + { "ieee_double", 64, 'd', 'f' }, + { "uint64", 64, 'd', 'u' }, + { "int64", 64, 'd', 's' }, + /* 32 bit */ + { "ieee_single", 32, 's', 'f' }, + { "uint32", 32, 's', 'u' }, + { "int32", 32, 's', 's' }, + /* 16 bit */ + { "ieee_half", 16, 'h', 'f' }, + { "uint16", 16, 'h', 'u' }, + { "int16", 16, 'h', 's' }, + /* bytes */ + { "uint8", 8, 'b', 'u' }, + { "int8", 8, 'b', 's' }, + }; + + static const char suf[] = { 'b', 'h', 's', 'd', 'q' }; + int i, j; + + /* First define types and totals in a whole VL */ + for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { + g_string_append_printf(s, + "", + name, vec_lanes[i].sz, vec_lanes[i].suffix, + vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size); + } + + /* + * Now define a union for each size group containing unsigned and + * signed and potentially float versions of each size from 128 to + * 8 bits. + */ + for (i = 0; i < ARRAY_SIZE(suf); i++) { + int bits = 8 << i; + + g_string_append_printf(s, "", name, suf[i]); + for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) { + if (vec_lanes[j].size == bits) { + g_string_append_printf(s, "", + vec_lanes[j].suffix, name, + vec_lanes[j].sz, vec_lanes[j].suffix); + } + } + g_string_append(s, ""); + } + + /* And now the final union of unions */ + g_string_append_printf(s, "", name); + for (i = ARRAY_SIZE(suf) - 1; i >= 0; i--) { + g_string_append_printf(s, "", + suf[i], name, suf[i]); + } + g_string_append(s, ""); +} + +int arm_gen_dynamic_svereg_xml(CPUState *cs, int orig_base_reg) +{ + ARMCPU *cpu = ARM_CPU(cs); + GString *s = g_string_new(NULL); + DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml; + int reg_width = cpu->sve_max_vq * 128; + int pred_width = cpu->sve_max_vq * 16; + int base_reg = orig_base_reg; + int i; + + g_string_printf(s, ""); + g_string_append_printf(s, ""); + g_string_append_printf(s, ""); + + /* Create the vector union type. */ + output_vector_union_type(s, reg_width, "svev"); + + /* Create the predicate vector type. */ + g_string_append_printf(s, + "", + pred_width / 8); + + /* Define the vector registers. */ + for (i = 0; i < 32; i++) { + g_string_append_printf(s, + "", + i, reg_width, base_reg++); + } + + /* fpscr & status registers */ + g_string_append_printf(s, "", base_reg++); + g_string_append_printf(s, "", base_reg++); + + /* Define the predicate registers. */ + for (i = 0; i < 16; i++) { + g_string_append_printf(s, + "", + i, pred_width, base_reg++); + } + g_string_append_printf(s, + "", + pred_width, base_reg++); + + /* Define the vector length pseudo-register. */ + g_string_append_printf(s, + "", + base_reg++); + + g_string_append_printf(s, ""); + + info->desc = g_string_free(s, false); + info->num = base_reg - orig_base_reg; + return info->num; +} diff --git a/target/arm/helper.c b/target/arm/helper.c index 14af7ba095..0b7fd2e7e6 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -23,7 +23,6 @@ #include "sysemu/cpu-timers.h" #include "sysemu/kvm.h" #include "sysemu/tcg.h" -#include "qapi/qapi-commands-machine-target.h" #include "qapi/error.h" #include "qemu/guest-random.h" #ifdef CONFIG_TCG @@ -4905,7 +4904,7 @@ static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx, unsigned int page_size_granule, page_shift, num, scale, exponent; /* Extract one bit to represent the va selector in use. */ uint64_t select = sextract64(value, 36, 1); - ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true); + ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false); TLBIRange ret = { }; ARMGranuleSize gran; @@ -5788,6 +5787,9 @@ uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure) uint64_t arm_hcr_el2_eff(CPUARMState *env) { + if (arm_feature(env, ARM_FEATURE_M)) { + return 0; + } return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env)); } @@ -9188,34 +9190,6 @@ void arm_cpu_list(void) g_slist_free(list); } -static void arm_cpu_add_definition(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CpuDefinitionInfoList **cpu_list = user_data; - CpuDefinitionInfo *info; - const char *typename; - - typename = object_class_get_name(oc); - info = g_malloc0(sizeof(*info)); - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_ARM_CPU)); - info->q_typename = g_strdup(typename); - - QAPI_LIST_PREPEND(*cpu_list, info); -} - -CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) -{ - CpuDefinitionInfoList *cpu_list = NULL; - GSList *list; - - list = object_class_get_list(TYPE_ARM_CPU, false); - g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); - g_slist_free(list); - - return cpu_list; -} - /* * Private utility function for define_one_arm_cp_reg_with_opaque(): * add a single reginfo struct to the hash table. @@ -11219,7 +11193,8 @@ static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran, } ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, - ARMMMUIdx mmu_idx, bool data) + ARMMMUIdx mmu_idx, bool data, + bool el1_is_aa32) { uint64_t tcr = regime_tcr(env, mmu_idx); bool epd, hpd, tsz_oob, ds, ha, hd; @@ -11315,6 +11290,16 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, } } + if (stage2 && el1_is_aa32) { + /* + * For AArch32 EL1 the min txsz (and thus max IPA size) requirements + * are loosened: a configured IPA of 40 bits is permitted even if + * the implemented PA is less than that (and so a 40 bit IPA would + * fault for an AArch64 EL1). See R_DTLMN. + */ + min_tsz = MIN(min_tsz, 24); + } + if (tsz > max_tsz) { tsz = max_tsz; tsz_oob = true; diff --git a/target/arm/helper.h b/target/arm/helper.h index 018b00ea75..3335c2b10b 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -1039,9 +1039,9 @@ DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) #ifdef TARGET_AARCH64 -#include "helper-a64.h" -#include "helper-sve.h" -#include "helper-sme.h" +#include "tcg/helper-a64.h" +#include "tcg/helper-sve.h" +#include "tcg/helper-sme.h" #endif -#include "helper-mve.h" +#include "tcg/helper-mve.h" diff --git a/target/arm/internals.h b/target/arm/internals.h index 680c574717..c869d18c38 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -193,16 +193,22 @@ void arm_restore_state_to_opc(CPUState *cs, void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); #endif /* CONFIG_TCG */ -enum arm_fprounding { +typedef enum ARMFPRounding { FPROUNDING_TIEEVEN, FPROUNDING_POSINF, FPROUNDING_NEGINF, FPROUNDING_ZERO, FPROUNDING_TIEAWAY, FPROUNDING_ODD -}; +} ARMFPRounding; -int arm_rmode_to_sf(int rmode); +extern const FloatRoundMode arm_rmode_to_sf_map[6]; + +static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) +{ + assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); + return arm_rmode_to_sf_map[rmode]; +} static inline void aarch64_save_sp(CPUARMState *env, int el) { @@ -611,6 +617,7 @@ G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); +#ifndef CONFIG_USER_ONLY /* arm_cpu_do_transaction_failed: handle a memory system error response * (eg "no device/memory present at address") by raising an external abort * exception @@ -620,6 +627,7 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); +#endif /* Call any registered EL change hooks */ static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) @@ -1083,8 +1091,18 @@ typedef struct ARMVAParameters { ARMGranuleSize gran : 2; } ARMVAParameters; +/** + * aa64_va_parameters: Return parameters for an AArch64 virtual address + * @env: CPU + * @va: virtual address to look up + * @mmu_idx: determines translation regime to use + * @data: true if this is a data access + * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 + * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) + */ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, - ARMMMUIdx mmu_idx, bool data); + ARMMMUIdx mmu_idx, bool data, + bool el1_is_aa32); int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); @@ -1342,21 +1360,35 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env) } #ifdef TARGET_AARCH64 -int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg); -int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg); -int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg); -int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg); +int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg); +int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg); +int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg); +int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg); +int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg); +int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg); +int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg); void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); +void aarch64_max_tcg_initfn(Object *obj); +void aarch64_add_pauth_properties(Object *obj); +void aarch64_add_sve_properties(Object *obj); +void aarch64_add_sme_properties(Object *obj); #endif -#ifdef CONFIG_USER_ONLY -static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { } -#else -void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu); -#endif +/* Read the CONTROL register as the MRS instruction would. */ +uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); + +/* + * Return a pointer to the location where we currently store the + * stack pointer for the requested security state and thread mode. + * This pointer will become invalid if the CPU state is updated + * such that the stack pointers are switched around (eg changing + * the SPSEL control bit). + */ +uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, + bool threadmode, bool spsel); bool el_is_in_host(CPUARMState *env, int el); @@ -1365,6 +1397,21 @@ int exception_target_el(CPUARMState *env); bool arm_singlestep_active(CPUARMState *env); bool arm_generate_debug_exceptions(CPUARMState *env); +/** + * pauth_ptr_mask: + * @param: parameters defining the MMU setup + * + * Return a mask of the address bits that contain the authentication code, + * given the MMU config defined by @param. + */ +static inline uint64_t pauth_ptr_mask(ARMVAParameters param) +{ + int bot_pac_bit = 64 - param.tsz; + int top_pac_bit = 64 - 8 * param.tbi; + + return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); +} + /* Add the cpreg definitions for debug related system registers */ void define_debug_regs(ARMCPU *cpu); diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h index 09967ec5e6..7c6adc14f6 100644 --- a/target/arm/kvm-consts.h +++ b/target/arm/kvm-consts.h @@ -124,13 +124,10 @@ MISMATCH_CHECK(QEMU_PSCI_RET_INTERNAL_FAILURE, PSCI_RET_INTERNAL_FAILURE); MISMATCH_CHECK(QEMU_PSCI_RET_NOT_PRESENT, PSCI_RET_NOT_PRESENT); MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED); -/* Note that KVM uses overlapping values for AArch32 and AArch64 - * target CPU numbers. AArch32 targets: +/* + * Note that KVM uses overlapping values for AArch32 and AArch64 + * target CPU numbers. AArch64 targets: */ -#define QEMU_KVM_ARM_TARGET_CORTEX_A15 0 -#define QEMU_KVM_ARM_TARGET_CORTEX_A7 1 - -/* AArch64 targets: */ #define QEMU_KVM_ARM_TARGET_AEM_V8 0 #define QEMU_KVM_ARM_TARGET_FOUNDATION_V8 1 #define QEMU_KVM_ARM_TARGET_CORTEX_A57 2 diff --git a/target/arm/kvm.c b/target/arm/kvm.c index f022c644d2..84da49332c 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -280,6 +280,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } + kvm_arm_init_debug(s); + return ret; } diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 1197253d12..810db33ccb 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -74,24 +74,16 @@ GArray *hw_breakpoints, *hw_watchpoints; #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) -/** - * kvm_arm_init_debug() - check for guest debug capabilities - * @cs: CPUState - * - * kvm_check_extension returns the number of debug registers we have - * or 0 if we have none. - * - */ -static void kvm_arm_init_debug(CPUState *cs) +void kvm_arm_init_debug(KVMState *s) { - have_guest_debug = kvm_check_extension(cs->kvm_state, + have_guest_debug = kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG); - max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS); + max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS); hw_watchpoints = g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps); - max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS); + max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS); hw_breakpoints = g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps); return; @@ -920,8 +912,6 @@ int kvm_arch_init_vcpu(CPUState *cs) } cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; - kvm_arm_init_debug(cs); - /* Check whether user space can specify guest syndrome value */ kvm_arm_init_serror_injection(cs); diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 99017b635c..330fbe5c72 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -18,6 +18,14 @@ #define KVM_ARM_VGIC_V2 (1 << 0) #define KVM_ARM_VGIC_V3 (1 << 1) +/** + * kvm_arm_init_debug() - initialize guest debug capabilities + * @s: KVMState + * + * Should be called only once before using guest debug capabilities. + */ +void kvm_arm_init_debug(KVMState *s); + /** * kvm_arm_vcpu_init: * @cs: CPUState diff --git a/target/arm/meson.build b/target/arm/meson.build index a5191b57e1..359a649eaf 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -5,7 +5,6 @@ arm_ss.add(files( 'gdbstub.c', 'helper.c', 'vfp_helper.c', - 'cpu_tcg.c', )) arm_ss.add(zlib) @@ -20,8 +19,9 @@ arm_softmmu_ss = ss.source_set() arm_softmmu_ss.add(files( 'arch_dump.c', 'arm-powerctl.c', + 'arm-qmp-cmds.c', + 'cortex-regs.c', 'machine.c', - 'monitor.c', 'ptw.c', )) diff --git a/target/arm/ptw.c b/target/arm/ptw.c index be0cc6bc15..69c05cd9da 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -103,6 +103,37 @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) return stage_1_mmu_idx(arm_mmu_idx(env)); } +/* + * Return where we should do ptw loads from for a stage 2 walk. + * This depends on whether the address we are looking up is a + * Secure IPA or a NonSecure IPA, which we know from whether this is + * Stage2 or Stage2_S. + * If this is the Secure EL1&0 regime we need to check the NSW and SW bits. + */ +static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx) +{ + bool s2walk_secure; + + /* + * We're OK to check the current state of the CPU here because + * (1) we always invalidate all TLBs when the SCR_EL3.NS bit changes + * (2) there's no way to do a lookup that cares about Stage 2 for a + * different security state to the current one for AArch64, and AArch32 + * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do + * an NS stage 1+2 lookup while the NS bit is 0.) + */ + if (!arm_is_secure_below_el3(env) || !arm_el_is_aa64(env, 3)) { + return ARMMMUIdx_Phys_NS; + } + if (stage2idx == ARMMMUIdx_Stage2_S) { + s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW); + } else { + s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW); + } + return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; + +} + static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx) { return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; @@ -220,7 +251,6 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, ARMMMUIdx mmu_idx = ptw->in_mmu_idx; ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; uint8_t pte_attrs; - bool pte_secure; ptw->out_virt = addr; @@ -232,8 +262,8 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, if (regime_is_stage2(s2_mmu_idx)) { S1Translate s2ptw = { .in_mmu_idx = s2_mmu_idx, - .in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS, - .in_secure = is_secure, + .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), + .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S, .in_debug = true, }; GetPhysAddrResult s2 = { }; @@ -244,12 +274,12 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, } ptw->out_phys = s2.f.phys_addr; pte_attrs = s2.cacheattrs.attrs; - pte_secure = s2.f.attrs.secure; + ptw->out_secure = s2.f.attrs.secure; } else { /* Regime is physical. */ ptw->out_phys = addr; pte_attrs = 0; - pte_secure = is_secure; + ptw->out_secure = s2_mmu_idx == ARMMMUIdx_Phys_S; } ptw->out_host = NULL; ptw->out_rw = false; @@ -259,7 +289,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, int flags; env->tlb_fi = fi; - flags = probe_access_full(env, addr, MMU_DATA_LOAD, + flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD, arm_to_core_mmu_idx(s2_mmu_idx), true, &ptw->out_host, &full, 0); env->tlb_fi = NULL; @@ -270,7 +300,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK); ptw->out_rw = full->prot & PAGE_WRITE; pte_attrs = full->pte_attrs; - pte_secure = full->attrs.secure; + ptw->out_secure = full->attrs.secure; #else g_assert_not_reached(); #endif @@ -293,11 +323,6 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, } } - /* Check if page table walk is to secure or non-secure PA space. */ - ptw->out_secure = (is_secure - && !(pte_secure - ? env->cp15.vstcr_el2 & VSTCR_SW - : env->cp15.vtcr_el2 & VTCR_NSW)); ptw->out_be = regime_translation_big_endian(env, mmu_idx); return true; @@ -411,7 +436,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, void *discard; env->tlb_fi = fi; - flags = probe_access_flags(env, ptw->out_virt, MMU_DATA_STORE, + flags = probe_access_flags(env, ptw->out_virt, 0, MMU_DATA_STORE, arm_to_core_mmu_idx(ptw->in_ptw_idx), true, &discard, 0); env->tlb_fi = NULL; @@ -947,6 +972,7 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, int ap, int ns, int xn, int pxn) { + ARMCPU *cpu = env_archcpu(env); bool is_user = regime_is_user(env, mmu_idx); int prot_rw, user_rw; bool have_wxn; @@ -958,8 +984,19 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, if (is_user) { prot_rw = user_rw; } else { + /* + * PAN controls can forbid data accesses but don't affect insn fetch. + * Plain PAN forbids data accesses if EL0 has data permissions; + * PAN3 forbids data accesses if EL0 has either data or exec perms. + * Note that for AArch64 the 'user can exec' case is exactly !xn. + * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0 + * do not affect EPAN. + */ if (user_rw && regime_is_pan(env, mmu_idx)) { - /* PAN forbids data accesses but doesn't affect insn fetch */ + prot_rw = 0; + } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 && + regime_is_pan(env, mmu_idx) && + (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) { prot_rw = 0; } else { prot_rw = simple_ap_to_rw_prot_is_user(ap, false); @@ -1081,70 +1118,108 @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, * check_s2_mmu_setup * @cpu: ARMCPU * @is_aa64: True if the translation regime is in AArch64 state - * @startlevel: Suggested starting level - * @inputsize: Bitsize of IPAs + * @tcr: VTCR_EL2 or VSTCR_EL2 + * @ds: Effective value of TCR.DS. + * @iasize: Bitsize of IPAs * @stride: Page-table stride (See the ARM ARM) * - * Returns true if the suggested S2 translation parameters are OK and - * false otherwise. + * Decode the starting level of the S2 lookup, returning INT_MIN if + * the configuration is invalid. */ -static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, - int inputsize, int stride, int outputsize) +static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr, + bool ds, int iasize, int stride) { - const int grainsize = stride + 3; - int startsizecheck; - - /* - * Negative levels are usually not allowed... - * Except for FEAT_LPA2, 4k page table, 52-bit address space, which - * begins with level -1. Note that previous feature tests will have - * eliminated this combination if it is not enabled. - */ - if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) { - return false; - } - - startsizecheck = inputsize - ((3 - level) * stride + grainsize); - if (startsizecheck < 1 || startsizecheck > stride + 4) { - return false; - } + int sl0, sl2, startlevel, granulebits, levels; + int s1_min_iasize, s1_max_iasize; + sl0 = extract32(tcr, 6, 2); if (is_aa64) { + /* + * AArch64.S2InvalidSL: Interpretation of SL depends on the page size, + * so interleave AArch64.S2StartLevel. + */ switch (stride) { - case 13: /* 64KB Pages. */ - if (level == 0 || (level == 1 && outputsize <= 42)) { - return false; + case 9: /* 4KB */ + /* SL2 is RES0 unless DS=1 & 4KB granule. */ + sl2 = extract64(tcr, 33, 1); + if (ds && sl2) { + if (sl0 != 0) { + goto fail; + } + startlevel = -1; + } else { + startlevel = 2 - sl0; + switch (sl0) { + case 2: + if (arm_pamax(cpu) < 44) { + goto fail; + } + break; + case 3: + if (!cpu_isar_feature(aa64_st, cpu)) { + goto fail; + } + startlevel = 3; + break; + } } break; - case 11: /* 16KB Pages. */ - if (level == 0 || (level == 1 && outputsize <= 40)) { - return false; + case 11: /* 16KB */ + switch (sl0) { + case 2: + if (arm_pamax(cpu) < 42) { + goto fail; + } + break; + case 3: + if (!ds) { + goto fail; + } + break; } + startlevel = 3 - sl0; break; - case 9: /* 4KB Pages. */ - if (level == 0 && outputsize <= 42) { - return false; + case 13: /* 64KB */ + switch (sl0) { + case 2: + if (arm_pamax(cpu) < 44) { + goto fail; + } + break; + case 3: + goto fail; } + startlevel = 3 - sl0; break; default: g_assert_not_reached(); } - - /* Inputsize checks. */ - if (inputsize > outputsize && - (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) { - /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ - return false; - } } else { - /* AArch32 only supports 4KB pages. Assert on that. */ + /* + * Things are simpler for AArch32 EL2, with only 4k pages. + * There is no separate S2InvalidSL function, but AArch32.S2Walk + * begins with walkparms.sl0 in {'1x'}. + */ assert(stride == 9); - - if (level == 0) { - return false; + if (sl0 >= 2) { + goto fail; } + startlevel = 2 - sl0; } - return true; + + /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */ + levels = 3 - startlevel; + granulebits = stride + 3; + + s1_min_iasize = levels * stride + granulebits + 1; + s1_max_iasize = s1_min_iasize + (stride - 1) + 4; + + if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) { + return startlevel; + } + + fail: + return INT_MIN; } /** @@ -1198,7 +1273,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, int ps; param = aa64_va_parameters(env, address, mmu_idx, - access_type != MMU_INST_FETCH); + access_type != MMU_INST_FETCH, + !arm_el_is_aa64(env, 1)); level = 0; /* @@ -1300,38 +1376,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, */ level = 4 - (inputsize - 4) / stride; } else { - /* - * For stage 2 translations the starting level is specified by the - * VTCR_EL2.SL0 field (whose interpretation depends on the page size) - */ - uint32_t sl0 = extract32(tcr, 6, 2); - uint32_t sl2 = extract64(tcr, 33, 1); - int32_t startlevel; - bool ok; - - /* SL2 is RES0 unless DS=1 & 4kb granule. */ - if (param.ds && stride == 9 && sl2) { - if (sl0 != 0) { - level = 0; - goto do_translation_fault; - } - startlevel = -1; - } else if (!aarch64 || stride == 9) { - /* AArch32 or 4KB pages */ - startlevel = 2 - sl0; - - if (cpu_isar_feature(aa64_st, cpu)) { - startlevel &= 3; - } - } else { - /* 16KB or 64KB pages */ - startlevel = 3 - sl0; - } - - /* Check that the starting level is valid. */ - ok = check_s2_mmu_setup(cpu, aarch64, startlevel, - inputsize, stride, outputsize); - if (!ok) { + int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds, + inputsize, stride); + if (startlevel == INT_MIN) { + level = 0; goto do_translation_fault; } level = startlevel; @@ -1382,17 +1430,18 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, descaddrmask &= ~indexmask_grainsize; /* - * Secure accesses start with the page table in secure memory and + * Secure stage 1 accesses start with the page table in secure memory and * can be downgraded to non-secure at any step. Non-secure accesses * remain non-secure. We implement this by just ORing in the NSTable/NS * bits at each step. + * Stage 2 never gets this kind of downgrade. */ tableattrs = is_secure ? 0 : (1 << 4); next_level: descaddr |= (address >> (stride * (4 - level))) & indexmask; descaddr &= ~7ULL; - nstable = extract32(tableattrs, 4, 1); + nstable = !regime_is_stage2(mmu_idx) && extract32(tableattrs, 4, 1); if (nstable) { /* * Stage2_S -> Stage2 or Phys_S -> Phys_NS @@ -1564,11 +1613,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, result->f.attrs.secure = false; } - /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ - if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { - result->f.guarded = extract64(attrs, 50, 1); /* GP */ - } - if (regime_is_stage2(mmu_idx)) { result->cacheattrs.is_s2_format = true; result->cacheattrs.attrs = extract32(attrs, 2, 4); @@ -1579,6 +1623,11 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, assert(attrindx <= 7); result->cacheattrs.is_s2_format = false; result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); + + /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ + if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { + result->f.guarded = extract64(attrs, 50, 1); /* GP */ + } } /* @@ -2555,6 +2604,7 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr, assert(!s1.is_s2_format); ret.is_s2_format = false; + ret.guarded = s1.guarded; if (s1.attrs == 0xf0) { tagged = true; @@ -2691,7 +2741,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, hwaddr ipa; int s1_prot, s1_lgpgsz; bool is_secure = ptw->in_secure; - bool ret, ipa_secure, s2walk_secure; + bool ret, ipa_secure; ARMCacheAttrs cacheattrs1; bool is_el0; uint64_t hcr; @@ -2705,20 +2755,11 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, ipa = result->f.phys_addr; ipa_secure = result->f.attrs.secure; - if (is_secure) { - /* Select TCR based on the NS bit from the S1 walk. */ - s2walk_secure = !(ipa_secure - ? env->cp15.vstcr_el2 & VSTCR_SW - : env->cp15.vtcr_el2 & VTCR_NSW); - } else { - assert(!ipa_secure); - s2walk_secure = false; - } is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0; - ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; - ptw->in_ptw_idx = s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; - ptw->in_secure = s2walk_secure; + ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; + ptw->in_secure = ipa_secure; + ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx); /* * S1 is done, now do S2 translation. @@ -2826,6 +2867,16 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw, ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; break; + case ARMMMUIdx_Stage2: + case ARMMMUIdx_Stage2_S: + /* + * Second stage lookup uses physical for ptw; whether this is S or + * NS may depend on the SW/NSW bits if this is a stage 2 lookup for + * the Secure EL2&0 regime. + */ + ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx); + break; + case ARMMMUIdx_E10_0: s1_mmu_idx = ARMMMUIdx_Stage1_E0; goto do_twostage; @@ -2849,7 +2900,7 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw, /* fall through */ default: - /* Single stage and second stage uses physical for ptw. */ + /* Single stage uses physical for ptw. */ ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; break; } diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode new file mode 100644 index 0000000000..12a310d0a3 --- /dev/null +++ b/target/arm/tcg/a64.decode @@ -0,0 +1,152 @@ +# AArch64 A64 allowed instruction decoding +# +# Copyright (c) 2023 Linaro, Ltd +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, see . + +# +# This file is processed by scripts/decodetree.py +# + +&r rn +&ri rd imm +&rri_sf rd rn imm sf +&i imm + + +### Data Processing - Immediate + +# PC-rel addressing + +%imm_pcrel 5:s19 29:2 +@pcrel . .. ..... ................... rd:5 &ri imm=%imm_pcrel + +ADR 0 .. 10000 ................... ..... @pcrel +ADRP 1 .. 10000 ................... ..... @pcrel + +# Add/subtract (immediate) + +%imm12_sh12 10:12 !function=shl_12 +@addsub_imm sf:1 .. ...... . imm:12 rn:5 rd:5 +@addsub_imm12 sf:1 .. ...... . ............ rn:5 rd:5 imm=%imm12_sh12 + +ADD_i . 00 100010 0 ............ ..... ..... @addsub_imm +ADD_i . 00 100010 1 ............ ..... ..... @addsub_imm12 +ADDS_i . 01 100010 0 ............ ..... ..... @addsub_imm +ADDS_i . 01 100010 1 ............ ..... ..... @addsub_imm12 + +SUB_i . 10 100010 0 ............ ..... ..... @addsub_imm +SUB_i . 10 100010 1 ............ ..... ..... @addsub_imm12 +SUBS_i . 11 100010 0 ............ ..... ..... @addsub_imm +SUBS_i . 11 100010 1 ............ ..... ..... @addsub_imm12 + +# Add/subtract (immediate with tags) + +&rri_tag rd rn uimm6 uimm4 +@addsub_imm_tag . .. ...... . uimm6:6 .. uimm4:4 rn:5 rd:5 &rri_tag + +ADDG_i 1 00 100011 0 ...... 00 .... ..... ..... @addsub_imm_tag +SUBG_i 1 10 100011 0 ...... 00 .... ..... ..... @addsub_imm_tag + +# Logical (immediate) + +&rri_log rd rn sf dbm +@logic_imm_64 1 .. ...... dbm:13 rn:5 rd:5 &rri_log sf=1 +@logic_imm_32 0 .. ...... 0 dbm:12 rn:5 rd:5 &rri_log sf=0 + +AND_i . 00 100100 . ...... ...... ..... ..... @logic_imm_64 +AND_i . 00 100100 . ...... ...... ..... ..... @logic_imm_32 +ORR_i . 01 100100 . ...... ...... ..... ..... @logic_imm_64 +ORR_i . 01 100100 . ...... ...... ..... ..... @logic_imm_32 +EOR_i . 10 100100 . ...... ...... ..... ..... @logic_imm_64 +EOR_i . 10 100100 . ...... ...... ..... ..... @logic_imm_32 +ANDS_i . 11 100100 . ...... ...... ..... ..... @logic_imm_64 +ANDS_i . 11 100100 . ...... ...... ..... ..... @logic_imm_32 + +# Move wide (immediate) + +&movw rd sf imm hw +@movw_64 1 .. ...... hw:2 imm:16 rd:5 &movw sf=1 +@movw_32 0 .. ...... 0 hw:1 imm:16 rd:5 &movw sf=0 + +MOVN . 00 100101 .. ................ ..... @movw_64 +MOVN . 00 100101 .. ................ ..... @movw_32 +MOVZ . 10 100101 .. ................ ..... @movw_64 +MOVZ . 10 100101 .. ................ ..... @movw_32 +MOVK . 11 100101 .. ................ ..... @movw_64 +MOVK . 11 100101 .. ................ ..... @movw_32 + +# Bitfield + +&bitfield rd rn sf immr imms +@bitfield_64 1 .. ...... 1 immr:6 imms:6 rn:5 rd:5 &bitfield sf=1 +@bitfield_32 0 .. ...... 0 0 immr:5 0 imms:5 rn:5 rd:5 &bitfield sf=0 + +SBFM . 00 100110 . ...... ...... ..... ..... @bitfield_64 +SBFM . 00 100110 . ...... ...... ..... ..... @bitfield_32 +BFM . 01 100110 . ...... ...... ..... ..... @bitfield_64 +BFM . 01 100110 . ...... ...... ..... ..... @bitfield_32 +UBFM . 10 100110 . ...... ...... ..... ..... @bitfield_64 +UBFM . 10 100110 . ...... ...... ..... ..... @bitfield_32 + +# Extract + +&extract rd rn rm imm sf + +EXTR 1 00 100111 1 0 rm:5 imm:6 rn:5 rd:5 &extract sf=1 +EXTR 0 00 100111 0 0 rm:5 0 imm:5 rn:5 rd:5 &extract sf=0 + +# Branches + +%imm26 0:s26 !function=times_4 +@branch . ..... .......................... &i imm=%imm26 + +B 0 00101 .......................... @branch +BL 1 00101 .......................... @branch + +%imm19 5:s19 !function=times_4 +&cbz rt imm sf nz + +CBZ sf:1 011010 nz:1 ................... rt:5 &cbz imm=%imm19 + +%imm14 5:s14 !function=times_4 +%imm31_19 31:1 19:5 +&tbz rt imm nz bitpos + +TBZ . 011011 nz:1 ..... .............. rt:5 &tbz imm=%imm14 bitpos=%imm31_19 + +B_cond 0101010 0 ................... 0 cond:4 imm=%imm19 + +BR 1101011 0000 11111 000000 rn:5 00000 &r +BLR 1101011 0001 11111 000000 rn:5 00000 &r +RET 1101011 0010 11111 000000 rn:5 00000 &r + +&braz rn m +BRAZ 1101011 0000 11111 00001 m:1 rn:5 11111 &braz # BRAAZ, BRABZ +BLRAZ 1101011 0001 11111 00001 m:1 rn:5 11111 &braz # BLRAAZ, BLRABZ + +&reta m +RETA 1101011 0010 11111 00001 m:1 11111 11111 &reta # RETAA, RETAB + +&bra rn rm m +BRA 1101011 1000 11111 00001 m:1 rn:5 rm:5 &bra # BRAA, BRAB +BLRA 1101011 1001 11111 00001 m:1 rn:5 rm:5 &bra # BLRAA, BLRAB + +ERET 1101011 0100 11111 000000 11111 00000 +ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB + +# We don't need to decode DRPS because it always UNDEFs except when +# the processor is in halting debug state (which we don't implement). +# The pattern is listed here as documentation. +# DRPS 1101011 0101 11111 000000 11111 00000 diff --git a/target/arm/arm_ldst.h b/target/arm/tcg/arm_ldst.h similarity index 100% rename from target/arm/arm_ldst.h rename to target/arm/tcg/arm_ldst.h diff --git a/target/arm/cpu_tcg.c b/target/arm/tcg/cpu32.c similarity index 93% rename from target/arm/cpu_tcg.c rename to target/arm/tcg/cpu32.c index df0c45e523..47d2e8e781 100644 --- a/target/arm/cpu_tcg.c +++ b/target/arm/tcg/cpu32.c @@ -1,5 +1,5 @@ /* - * QEMU ARM TCG CPUs. + * QEMU ARM TCG-only CPUs. * * Copyright (c) 2012 SUSE LINUX Products GmbH * @@ -10,9 +10,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" -#endif /* CONFIG_TCG */ #include "internals.h" #include "target/arm/idau.h" #if !defined(CONFIG_USER_ONLY) @@ -93,69 +91,10 @@ void aa32_max_features(ARMCPU *cpu) cpu->isar.id_dfr0 = t; } -#ifndef CONFIG_USER_ONLY -static uint64_t l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - ARMCPU *cpu = env_archcpu(env); - - /* Number of cores is in [25:24]; otherwise we RAZ */ - return (cpu->core_count - 1) << 24; -} - -static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = { - { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2, - .access = PL1_RW, .readfn = l2ctlr_read, - .writefn = arm_cp_write_ignore }, - { .name = "L2CTLR", - .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2, - .access = PL1_RW, .readfn = l2ctlr_read, - .writefn = arm_cp_write_ignore }, - { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "L2ECTLR", - .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPUACTLR", - .cp = 15, .opc1 = 0, .crm = 15, - .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, - { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPUECTLR", - .cp = 15, .opc1 = 1, .crm = 15, - .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, - { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPUMERRSR", - .cp = 15, .opc1 = 2, .crm = 15, - .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, - { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "L2MERRSR", - .cp = 15, .opc1 = 3, .crm = 15, - .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, -}; - -void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) -{ - define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo); -} -#endif /* !CONFIG_USER_ONLY */ - /* CPU models. These are not needed for the AArch64 linux-user build. */ #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) -#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) +#if !defined(CONFIG_USER_ONLY) static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { CPUClass *cc = CPU_GET_CLASS(cs); @@ -179,7 +118,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } return ret; } -#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */ +#endif /* !CONFIG_USER_ONLY */ static void arm926_initfn(Object *obj) { @@ -546,7 +485,6 @@ static void cortex_a7_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); - cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7; cpu->midr = 0x410fc075; cpu->reset_fpsid = 0x41023075; cpu->isar.mvfr0 = 0x10110222; @@ -595,7 +533,6 @@ static void cortex_a15_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); - cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15; /* r4p0 cpu, not requiring expensive tlb flush errata */ cpu->midr = 0x414fc0f0; cpu->revidr = 0x0; @@ -1075,7 +1012,6 @@ static void pxa270c5_initfn(Object *obj) cpu->reset_sctlr = 0x00000078; } -#ifdef CONFIG_TCG static const struct TCGCPUOps arm_v7m_tcg_ops = { .initialize = arm_translate_init, .synchronize_from_tb = arm_cpu_synchronize_from_tb, @@ -1096,7 +1032,6 @@ static const struct TCGCPUOps arm_v7m_tcg_ops = { .debug_check_breakpoint = arm_debug_check_breakpoint, #endif /* !CONFIG_USER_ONLY */ }; -#endif /* CONFIG_TCG */ static void arm_v7m_class_init(ObjectClass *oc, void *data) { @@ -1104,10 +1039,7 @@ static void arm_v7m_class_init(ObjectClass *oc, void *data) CPUClass *cc = CPU_CLASS(oc); acc->info = data; -#ifdef CONFIG_TCG cc->tcg_ops = &arm_v7m_tcg_ops; -#endif /* CONFIG_TCG */ - cc->gdb_core_xml_file = "arm-m-profile.xml"; } diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c new file mode 100644 index 0000000000..886674a443 --- /dev/null +++ b/target/arm/tcg/cpu64.c @@ -0,0 +1,723 @@ +/* + * QEMU AArch64 TCG CPUs + * + * Copyright (c) 2013 Linaro Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "qemu/module.h" +#include "qapi/visitor.h" +#include "hw/qdev-properties.h" +#include "internals.h" +#include "cpregs.h" + +static void aarch64_a35_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a35"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* From B2.2 AArch64 identification registers. */ + cpu->midr = 0x411fd040; + cpu->revidr = 0; + cpu->ctr = 0x84448004; + cpu->isar.id_pfr0 = 0x00000131; + cpu->isar.id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x03010066; + cpu->id_afr0 = 0; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_aa64pfr0 = 0x00002222; + cpu->isar.id_aa64pfr1 = 0; + cpu->isar.id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64dfr1 = 0; + cpu->isar.id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64isar1 = 0; + cpu->isar.id_aa64mmfr0 = 0x00101122; + cpu->isar.id_aa64mmfr1 = 0; + cpu->clidr = 0x0a200023; + cpu->dcz_blocksize = 4; + + /* From B2.4 AArch64 Virtual Memory control registers */ + cpu->reset_sctlr = 0x00c50838; + + /* From B2.10 AArch64 performance monitor registers */ + cpu->isar.reset_pmcr_el0 = 0x410a3000; + + /* From B2.29 Cache ID registers */ + cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ + cpu->ccsidr[2] = 0x703fe03a; /* 512KB L2 cache */ + + /* From B3.5 VGIC Type register */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + /* From C6.4 Debug ID Register */ + cpu->isar.dbgdidr = 0x3516d000; + /* From C6.5 Debug Device ID Register */ + cpu->isar.dbgdevid = 0x00110f13; + /* From C6.6 Debug Device ID Register 1 */ + cpu->isar.dbgdevid1 = 0x2; + + /* From Cortex-A35 SIMD and Floating-point Support r1p0 */ + /* From 3.2 AArch32 register summary */ + cpu->reset_fpsid = 0x41034043; + + /* From 2.2 AArch64 register summary */ + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + + /* These values are the same with A53/A57/A72. */ + define_cortex_a72_a57_a53_cp_reginfo(cpu); +} + +static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + uint32_t value; + + /* All vector lengths are disabled when SVE is off. */ + if (!cpu_isar_feature(aa64_sve, cpu)) { + value = 0; + } else { + value = cpu->sve_max_vq; + } + visit_type_uint32(v, name, &value, errp); +} + +static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + uint32_t max_vq; + + if (!visit_type_uint32(v, name, &max_vq, errp)) { + return; + } + + if (max_vq == 0 || max_vq > ARM_MAX_VQ) { + error_setg(errp, "unsupported SVE vector length"); + error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n", + ARM_MAX_VQ); + return; + } + + cpu->sve_max_vq = max_vq; +} + +static Property arm_cpu_lpa2_property = + DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true); + +static void aarch64_a55_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a55"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* Ordered by B2.4 AArch64 registers by functional group */ + cpu->clidr = 0x82000023; + cpu->ctr = 0x84448004; /* L1Ip = VIPT */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->isar.id_aa64dfr0 = 0x0000000010305408ull; + cpu->isar.id_aa64isar0 = 0x0000100010211120ull; + cpu->isar.id_aa64isar1 = 0x0000000000100001ull; + cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull; + cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; + cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; + cpu->isar.id_aa64pfr0 = 0x0000000010112222ull; + cpu->isar.id_aa64pfr1 = 0x0000000000000010ull; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_dfr0 = 0x04010088; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x01011121; + cpu->isar.id_isar6 = 0x00000010; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02122211; + cpu->isar.id_mmfr4 = 0x00021110; + cpu->isar.id_pfr0 = 0x10010131; + cpu->isar.id_pfr1 = 0x00011011; + cpu->isar.id_pfr2 = 0x00000011; + cpu->midr = 0x412FD050; /* r2p0 */ + cpu->revidr = 0; + + /* From B2.23 CCSIDR_EL1 */ + cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x200fe01a; /* 32KB L1 icache */ + cpu->ccsidr[2] = 0x703fe07a; /* 512KB L2 cache */ + + /* From B2.96 SCTLR_EL3 */ + cpu->reset_sctlr = 0x30c50838; + + /* From B4.45 ICH_VTR_EL2 */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x13211111; + cpu->isar.mvfr2 = 0x00000043; + + /* From D5.4 AArch64 PMU register summary */ + cpu->isar.reset_pmcr_el0 = 0x410b3000; +} + +static void aarch64_a72_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a72"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x410fd083; + cpu->revidr = 0x00000000; + cpu->reset_fpsid = 0x41034080; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + cpu->ctr = 0x8444c004; + cpu->reset_sctlr = 0x00c50838; + cpu->isar.id_pfr0 = 0x00000131; + cpu->isar.id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x03010066; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_aa64pfr0 = 0x00002222; + cpu->isar.id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64mmfr0 = 0x00001124; + cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.dbgdevid = 0x01110f13; + cpu->isar.dbgdevid1 = 0x2; + cpu->isar.reset_pmcr_el0 = 0x41023000; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ + cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + define_cortex_a72_a57_a53_cp_reginfo(cpu); +} + +static void aarch64_a76_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a76"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* Ordered by B2.4 AArch64 registers by functional group */ + cpu->clidr = 0x82000023; + cpu->ctr = 0x8444C004; + cpu->dcz_blocksize = 4; + cpu->isar.id_aa64dfr0 = 0x0000000010305408ull; + cpu->isar.id_aa64isar0 = 0x0000100010211120ull; + cpu->isar.id_aa64isar1 = 0x0000000000100001ull; + cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull; + cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; + cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; + cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */ + cpu->isar.id_aa64pfr1 = 0x0000000000000010ull; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_dfr0 = 0x04010088; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00010142; + cpu->isar.id_isar5 = 0x01011121; + cpu->isar.id_isar6 = 0x00000010; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02122211; + cpu->isar.id_mmfr4 = 0x00021110; + cpu->isar.id_pfr0 = 0x10010131; + cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ + cpu->isar.id_pfr2 = 0x00000011; + cpu->midr = 0x414fd0b1; /* r4p1 */ + cpu->revidr = 0; + + /* From B2.18 CCSIDR_EL1 */ + cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */ + cpu->ccsidr[2] = 0x707fe03a; /* 512KB L2 cache */ + + /* From B2.93 SCTLR_EL3 */ + cpu->reset_sctlr = 0x30c50838; + + /* From B4.23 ICH_VTR_EL2 */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + /* From B5.1 AdvSIMD AArch64 register summary */ + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x13211111; + cpu->isar.mvfr2 = 0x00000043; + + /* From D5.1 AArch64 PMU register summary */ + cpu->isar.reset_pmcr_el0 = 0x410b3000; +} + +static void aarch64_a64fx_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,a64fx"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x461f0010; + cpu->revidr = 0x00000000; + cpu->ctr = 0x86668006; + cpu->reset_sctlr = 0x30000180; + cpu->isar.id_aa64pfr0 = 0x0000000101111111; /* No RAS Extensions */ + cpu->isar.id_aa64pfr1 = 0x0000000000000000; + cpu->isar.id_aa64dfr0 = 0x0000000010305408; + cpu->isar.id_aa64dfr1 = 0x0000000000000000; + cpu->id_aa64afr0 = 0x0000000000000000; + cpu->id_aa64afr1 = 0x0000000000000000; + cpu->isar.id_aa64mmfr0 = 0x0000000000001122; + cpu->isar.id_aa64mmfr1 = 0x0000000011212100; + cpu->isar.id_aa64mmfr2 = 0x0000000000001011; + cpu->isar.id_aa64isar0 = 0x0000000010211120; + cpu->isar.id_aa64isar1 = 0x0000000000010001; + cpu->isar.id_aa64zfr0 = 0x0000000000000000; + cpu->clidr = 0x0000000080000023; + cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */ + cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */ + cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */ + cpu->dcz_blocksize = 6; /* 256 bytes */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + /* The A64FX supports only 128, 256 and 512 bit vector lengths */ + aarch64_add_sve_properties(obj); + cpu->sve_vq.supported = (1 << 0) /* 128bit */ + | (1 << 1) /* 256bit */ + | (1 << 3); /* 512bit */ + + cpu->isar.reset_pmcr_el0 = 0x46014040; + + /* TODO: Add A64FX specific HPC extension registers */ +} + +static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = { + { .name = "ATCR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 7, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "ATCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "ATCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 7, .opc2 = 0, + .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "ATCR_EL12", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 5, .crn = 15, .crm = 7, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "AVTCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUACTLR2_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUACTLR3_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 2, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* + * Report CPUCFR_EL1.SCU as 1, as we do not implement the DSU + * (and in particular its system registers). + */ + { .name = "CPUCFR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 0, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 4 }, + { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 4, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0x961563010 }, + { .name = "CPUPCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 1, + .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUPMR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 3, + .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUPOR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 2, + .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUPSELR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 0, + .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPUPWRCTLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "ERXPFGCDN_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 2, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "ERXPFGCTL_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "ERXPFGF_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, +}; + +static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu) +{ + define_arm_cp_regs(cpu, neoverse_n1_cp_reginfo); +} + +static void aarch64_neoverse_n1_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,neoverse-n1"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* Ordered by B2.4 AArch64 registers by functional group */ + cpu->clidr = 0x82000023; + cpu->ctr = 0x8444c004; + cpu->dcz_blocksize = 4; + cpu->isar.id_aa64dfr0 = 0x0000000110305408ull; + cpu->isar.id_aa64isar0 = 0x0000100010211120ull; + cpu->isar.id_aa64isar1 = 0x0000000000100001ull; + cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull; + cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; + cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; + cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */ + cpu->isar.id_aa64pfr1 = 0x0000000000000020ull; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_dfr0 = 0x04010088; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00010142; + cpu->isar.id_isar5 = 0x01011121; + cpu->isar.id_isar6 = 0x00000010; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02122211; + cpu->isar.id_mmfr4 = 0x00021110; + cpu->isar.id_pfr0 = 0x10010131; + cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ + cpu->isar.id_pfr2 = 0x00000011; + cpu->midr = 0x414fd0c1; /* r4p1 */ + cpu->revidr = 0; + + /* From B2.23 CCSIDR_EL1 */ + cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */ + cpu->ccsidr[2] = 0x70ffe03a; /* 1MB L2 cache */ + + /* From B2.98 SCTLR_EL3 */ + cpu->reset_sctlr = 0x30c50838; + + /* From B4.23 ICH_VTR_EL2 */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + /* From B5.1 AdvSIMD AArch64 register summary */ + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x13211111; + cpu->isar.mvfr2 = 0x00000043; + + /* From D5.1 AArch64 PMU register summary */ + cpu->isar.reset_pmcr_el0 = 0x410c3000; + + define_neoverse_n1_cp_reginfo(cpu); +} + +/* + * -cpu max: a CPU with as many features enabled as our emulation supports. + * The version of '-cpu max' for qemu-system-arm is defined in cpu32.c; + * this only needs to handle 64 bits. + */ +void aarch64_max_tcg_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + uint64_t t; + uint32_t u; + + /* + * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real + * one and try to apply errata workarounds or use impdef features we + * don't provide. + * An IMPLEMENTER field of 0 means "reserved for software use"; + * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers + * to see which features are present"; + * the VARIANT, PARTNUM and REVISION fields are all implementation + * defined and we choose to define PARTNUM just in case guest + * code needs to distinguish this QEMU CPU from other software + * implementations, though this shouldn't be needed. + */ + t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0); + t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf); + t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q'); + t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0); + t = FIELD_DP64(t, MIDR_EL1, REVISION, 0); + cpu->midr = t; + + /* + * We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS} + * are zero. + */ + u = cpu->clidr; + u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0); + u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0); + cpu->clidr = u; + + t = cpu->isar.id_aa64isar0; + t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */ + t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */ + t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */ + t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); + t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); /* FEAT_LSE */ + t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); /* FEAT_RDM */ + t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); /* FEAT_SHA3 */ + t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1); /* FEAT_SM3 */ + t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1); /* FEAT_SM4 */ + t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1); /* FEAT_DotProd */ + t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1); /* FEAT_FHM */ + t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* FEAT_FlagM2 */ + t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */ + t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); /* FEAT_RNG */ + cpu->isar.id_aa64isar0 = t; + + t = cpu->isar.id_aa64isar1; + t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */ + t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); /* FEAT_JSCVT */ + t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); /* FEAT_FCMA */ + t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* FEAT_LRCPC2 */ + t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1); /* FEAT_FRINTTS */ + t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); /* FEAT_SB */ + t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); /* FEAT_SPECRES */ + t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1); /* FEAT_BF16 */ + t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1); /* FEAT_DGH */ + t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */ + cpu->isar.id_aa64isar1 = t; + + t = cpu->isar.id_aa64pfr0; + t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */ + t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */ + t = FIELD_DP64(t, ID_AA64PFR0, RAS, 2); /* FEAT_RASv1p1 + FEAT_DoubleFault */ + t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); + t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */ + t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */ + t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 2); /* FEAT_CSV2_2 */ + t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1); /* FEAT_CSV3 */ + cpu->isar.id_aa64pfr0 = t; + + t = cpu->isar.id_aa64pfr1; + t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); /* FEAT_BTI */ + t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2); /* FEAT_SSBS2 */ + /* + * Begin with full support for MTE. This will be downgraded to MTE=0 + * during realize if the board provides no tag memory, much like + * we do for EL2 with the virtualization=on property. + */ + t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */ + t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */ + t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */ + cpu->isar.id_aa64pfr1 = t; + + t = cpu->isar.id_aa64mmfr0; + t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1); /* 16k pages supported */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN64_2, 2); /* 64k stage2 supported */ + t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2); /* 4k stage2 supported */ + t = FIELD_DP64(t, ID_AA64MMFR0, FGT, 1); /* FEAT_FGT */ + cpu->isar.id_aa64mmfr0 = t; + + t = cpu->isar.id_aa64mmfr1; + t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */ + t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */ + t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */ + t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* FEAT_HPDS */ + t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */ + t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 3); /* FEAT_PAN3 */ + t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */ + t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 1); /* FEAT_ETS */ + t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */ + cpu->isar.id_aa64mmfr1 = t; + + t = cpu->isar.id_aa64mmfr2; + t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* FEAT_TTCNP */ + t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */ + t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */ + t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */ + t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */ + t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1); /* FEAT_IDST */ + t = FIELD_DP64(t, ID_AA64MMFR2, FWB, 1); /* FEAT_S2FWB */ + t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */ + t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */ + t = FIELD_DP64(t, ID_AA64MMFR2, EVT, 2); /* FEAT_EVT */ + t = FIELD_DP64(t, ID_AA64MMFR2, E0PD, 1); /* FEAT_E0PD */ + cpu->isar.id_aa64mmfr2 = t; + + t = cpu->isar.id_aa64zfr0; + t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1); + t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* FEAT_SVE_PMULL128 */ + t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); /* FEAT_SVE_BitPerm */ + t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1); /* FEAT_BF16 */ + t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1); /* FEAT_SVE_SHA3 */ + t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1); /* FEAT_SVE_SM4 */ + t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); /* FEAT_I8MM */ + t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1); /* FEAT_F32MM */ + t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1); /* FEAT_F64MM */ + cpu->isar.id_aa64zfr0 = t; + + t = cpu->isar.id_aa64dfr0; + t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9); /* FEAT_Debugv8p4 */ + t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6); /* FEAT_PMUv3p5 */ + cpu->isar.id_aa64dfr0 = t; + + t = cpu->isar.id_aa64smfr0; + t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */ + t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */ + t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */ + cpu->isar.id_aa64smfr0 = t; + + /* Replicate the same data to the 32-bit id registers. */ + aa32_max_features(cpu); + +#ifdef CONFIG_USER_ONLY + /* + * For usermode -cpu max we can use a larger and more efficient DCZ + * blocksize since we don't have to follow what the hardware does. + */ + cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */ + cpu->dcz_blocksize = 7; /* 512 bytes */ +#endif + + cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ); + cpu->sme_vq.supported = SVE_VQ_POW2_MAP; + + aarch64_add_pauth_properties(obj); + aarch64_add_sve_properties(obj); + aarch64_add_sme_properties(obj); + object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq, + cpu_max_set_sve_max_vq, NULL, NULL); + qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property); +} + +static const ARMCPUInfo aarch64_cpus[] = { + { .name = "cortex-a35", .initfn = aarch64_a35_initfn }, + { .name = "cortex-a55", .initfn = aarch64_a55_initfn }, + { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, + { .name = "cortex-a76", .initfn = aarch64_a76_initfn }, + { .name = "a64fx", .initfn = aarch64_a64fx_initfn }, + { .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn }, +}; + +static void aarch64_cpu_register_types(void) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) { + aarch64_cpu_register(&aarch64_cpus[i]); + } +} + +type_init(aarch64_cpu_register_types) diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 0972a4bdd0..c3edf163be 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" #include "qemu/log.h" diff --git a/target/arm/helper-a64.h b/target/arm/tcg/helper-a64.h similarity index 100% rename from target/arm/helper-a64.h rename to target/arm/tcg/helper-a64.h diff --git a/target/arm/helper-mve.h b/target/arm/tcg/helper-mve.h similarity index 100% rename from target/arm/helper-mve.h rename to target/arm/tcg/helper-mve.h diff --git a/target/arm/helper-sme.h b/target/arm/tcg/helper-sme.h similarity index 100% rename from target/arm/helper-sme.h rename to target/arm/tcg/helper-sme.h diff --git a/target/arm/helper-sve.h b/target/arm/tcg/helper-sve.h similarity index 100% rename from target/arm/helper-sve.h rename to target/arm/tcg/helper-sve.h diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c index f94e87e728..9cef70e5c9 100644 --- a/target/arm/tcg/m_helper.c +++ b/target/arm/tcg/m_helper.c @@ -9,6 +9,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" +#include "gdbstub/helpers.h" #include "exec/helper-proto.h" #include "qemu/main-loop.h" #include "qemu/bitops.h" @@ -56,7 +57,7 @@ static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el) return xpsr_read(env) & mask; } -static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure) +uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure) { uint32_t value = env->v7m.control[secure]; @@ -93,7 +94,7 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) case 0 ... 7: /* xPSR sub-fields */ return v7m_mrs_xpsr(env, reg, 0); case 20: /* CONTROL */ - return v7m_mrs_control(env, 0); + return arm_v7m_mrs_control(env, 0); default: /* Unprivileged reads others as zero. */ return 0; @@ -650,42 +651,6 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) arm_rebuild_hflags(env); } -static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, - bool spsel) -{ - /* - * Return a pointer to the location where we currently store the - * stack pointer for the requested security state and thread mode. - * This pointer will become invalid if the CPU state is updated - * such that the stack pointers are switched around (eg changing - * the SPSEL control bit). - * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). - * Unlike that pseudocode, we require the caller to pass us in the - * SPSEL control bit value; this is because we also use this - * function in handling of pushing of the callee-saves registers - * part of the v8M stack frame (pseudocode PushCalleeStack()), - * and in the tailchain codepath the SPSEL bit comes from the exception - * return magic LR value from the previous exception. The pseudocode - * opencodes the stack-selection in PushCalleeStack(), but we prefer - * to make this utility function generic enough to do the job. - */ - bool want_psp = threadmode && spsel; - - if (secure == env->v7m.secure) { - if (want_psp == v7m_using_psp(env)) { - return &env->regs[13]; - } else { - return &env->v7m.other_sp; - } - } else { - if (want_psp) { - return &env->v7m.other_ss_psp; - } else { - return &env->v7m.other_ss_msp; - } - } -} - static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, uint32_t *pvec) { @@ -810,8 +775,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, !mode; mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); - frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, - lr & R_V7M_EXCRET_SPSEL_MASK); + frame_sp_p = arm_v7m_get_sp_ptr(env, M_REG_S, mode, + lr & R_V7M_EXCRET_SPSEL_MASK); want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK); if (want_psp) { limit = env->v7m.psplim[M_REG_S]; @@ -1656,10 +1621,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu) * use 'frame_sp_p' after we do something that makes it invalid. */ bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK; - uint32_t *frame_sp_p = get_v7m_sp_ptr(env, - return_to_secure, - !return_to_handler, - spsel); + uint32_t *frame_sp_p = arm_v7m_get_sp_ptr(env, return_to_secure, + !return_to_handler, spsel); uint32_t frameptr = *frame_sp_p; bool pop_ok = true; ARMMMUIdx mmu_idx; @@ -1965,7 +1928,7 @@ static bool do_v7m_function_return(ARMCPU *cpu) threadmode = !arm_v7m_is_handler_mode(env); spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; - frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); + frame_sp_p = arm_v7m_get_sp_ptr(env, true, threadmode, spsel); frameptr = *frame_sp_p; /* @@ -1974,8 +1937,8 @@ static bool do_v7m_function_return(ARMCPU *cpu) */ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx)); - newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0); - newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0); + newpc = cpu_ldl_mmu(env, frameptr, oi, 0); + newpsr = cpu_ldl_mmu(env, frameptr + 4, oi, 0); /* Consistency checks on new IPSR */ newpsr_exc = newpsr & XPSR_EXCP; @@ -2465,7 +2428,7 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) case 0 ... 7: /* xPSR sub-fields */ return v7m_mrs_xpsr(env, reg, el); case 20: /* CONTROL */ - return v7m_mrs_control(env, env->v7m.secure); + return arm_v7m_mrs_control(env, env->v7m.secure); case 0x94: /* CONTROL_NS */ /* * We have to handle this here because unprivileged Secure code @@ -2900,3 +2863,39 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) } #endif /* !CONFIG_USER_ONLY */ + +uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, bool threadmode, + bool spsel) +{ + /* + * Return a pointer to the location where we currently store the + * stack pointer for the requested security state and thread mode. + * This pointer will become invalid if the CPU state is updated + * such that the stack pointers are switched around (eg changing + * the SPSEL control bit). + * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). + * Unlike that pseudocode, we require the caller to pass us in the + * SPSEL control bit value; this is because we also use this + * function in handling of pushing of the callee-saves registers + * part of the v8M stack frame (pseudocode PushCalleeStack()), + * and in the tailchain codepath the SPSEL bit comes from the exception + * return magic LR value from the previous exception. The pseudocode + * opencodes the stack-selection in PushCalleeStack(), but we prefer + * to make this utility function generic enough to do the job. + */ + bool want_psp = threadmode && spsel; + + if (secure == env->v7m.secure) { + if (want_psp == v7m_using_psp(env)) { + return &env->regs[13]; + } else { + return &env->v7m.other_sp; + } + } else { + if (want_psp) { + return &env->v7m.other_ss_psp; + } else { + return &env->v7m.other_ss_msp; + } + } +} diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build index d27e76af6c..130ed62fcd 100644 --- a/target/arm/tcg/meson.build +++ b/target/arm/tcg/meson.build @@ -13,11 +13,13 @@ gen = [ decodetree.process('a32-uncond.decode', extra_args: '--static-decode=disas_a32_uncond'), decodetree.process('t32.decode', extra_args: '--static-decode=disas_t32'), decodetree.process('t16.decode', extra_args: ['-w', '16', '--static-decode=disas_t16']), + decodetree.process('a64.decode', extra_args: ['--static-decode=disas_a64']), ] arm_ss.add(gen) arm_ss.add(files( + 'cpu32.c', 'translate.c', 'translate-m-nocp.c', 'translate-mve.c', @@ -35,6 +37,7 @@ arm_ss.add(files( )) arm_ss.add(when: 'TARGET_AARCH64', if_true: files( + 'cpu64.c', 'translate-a64.c', 'translate-sve.c', 'translate-sme.c', diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c index 98bcf59c22..a4f3f92bc0 100644 --- a/target/arm/tcg/mte_helper.c +++ b/target/arm/tcg/mte_helper.c @@ -25,6 +25,7 @@ #include "exec/ram_addr.h" #include "exec/cpu_ldst.h" #include "exec/helper-proto.h" +#include "hw/core/tcg-cpu-ops.h" #include "qapi/error.h" #include "qemu/guest-random.h" @@ -118,7 +119,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, * valid. Indicate to probe_access_flags no-fault, then assert that * we received a valid page. */ - flags = probe_access_full(env, ptr, ptr_access, ptr_mmu_idx, + flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx, ra == 0, &host, &full, ra); assert(!(flags & TLB_INVALID_MASK)); @@ -154,7 +155,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, */ in_page = -(ptr | TARGET_PAGE_MASK); if (unlikely(ptr_size > in_page)) { - flags |= probe_access_full(env, ptr + in_page, ptr_access, + flags |= probe_access_full(env, ptr + in_page, 0, ptr_access, ptr_mmu_idx, ra == 0, &host, &full, ra); assert(!(flags & TLB_INVALID_MASK)); } diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c index d0483bf051..62af569341 100644 --- a/target/arm/tcg/pauth_helper.c +++ b/target/arm/tcg/pauth_helper.c @@ -293,7 +293,7 @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier, ARMPACKey *key, bool data) { ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); - ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); + ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data, false); uint64_t pac, ext_ptr, ext, test; int bot_bit, top_bit; @@ -341,19 +341,21 @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier, static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param) { - /* Note that bit 55 is used whether or not the regime has 2 ranges. */ - uint64_t extfield = sextract64(ptr, 55, 1); - int bot_pac_bit = 64 - param.tsz; - int top_pac_bit = 64 - 8 * param.tbi; + uint64_t mask = pauth_ptr_mask(param); - return deposit64(ptr, bot_pac_bit, top_pac_bit - bot_pac_bit, extfield); + /* Note that bit 55 is used whether or not the regime has 2 ranges. */ + if (extract64(ptr, 55, 1)) { + return ptr | mask; + } else { + return ptr & ~mask; + } } static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier, ARMPACKey *key, bool data, int keynumber) { ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); - ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); + ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data, false); int bot_bit, top_bit; uint64_t pac, orig_ptr, test; @@ -377,7 +379,7 @@ static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier, static uint64_t pauth_strip(CPUARMState *env, uint64_t ptr, bool data) { ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); - ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); + ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data, false); return pauth_original_ptr(ptr, param); } diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 521fc9b969..0097522470 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -27,6 +27,7 @@ #include "tcg/tcg.h" #include "vec_internal.h" #include "sve_ldst_internal.h" +#include "hw/core/tcg-cpu-ops.h" /* Return a value for NZCV as per the ARM PredTest pseudofunction. @@ -5352,11 +5353,11 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env, addr = useronly_clean_ptr(addr); #ifdef CONFIG_USER_ONLY - flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault, + flags = probe_access_flags(env, addr, 0, access_type, mmu_idx, nofault, &info->host, retaddr); #else CPUTLBEntryFull *full; - flags = probe_access_full(env, addr, access_type, mmu_idx, nofault, + flags = probe_access_full(env, addr, 0, access_type, mmu_idx, nofault, &info->host, &full, retaddr); #endif info->flags = flags; @@ -6726,6 +6727,7 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, intptr_t reg_off; SVEHostPage info; target_ulong addr, in_page; + ARMVectorReg scratch; /* Skip to the first true predicate. */ reg_off = find_next_active(vg, 0, reg_max, esz); @@ -6735,6 +6737,11 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, return; } + /* Protect against overlap between vd and vm. */ + if (unlikely(vd == vm)) { + vm = memcpy(&scratch, vm, reg_max); + } + /* * Probe the first element, allowing faults. */ diff --git a/target/arm/sve_ldst_internal.h b/target/arm/tcg/sve_ldst_internal.h similarity index 100% rename from target/arm/sve_ldst_internal.h rename to target/arm/tcg/sve_ldst_internal.h diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c index 31eb77f7df..8df36c2cbf 100644 --- a/target/arm/tcg/tlb_helper.c +++ b/target/arm/tcg/tlb_helper.c @@ -24,16 +24,17 @@ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) } static inline uint32_t merge_syn_data_abort(uint32_t template_syn, + ARMMMUFaultInfo *fi, unsigned int target_el, - bool same_el, bool ea, - bool s1ptw, bool is_write, + bool same_el, bool is_write, int fsc) { uint32_t syn; /* - * ISV is only set for data aborts routed to EL2 and - * never for stage-1 page table walks faulting on stage 2. + * ISV is only set for stage-2 data aborts routed to EL2 and + * never for stage-1 page table walks faulting on stage 2 + * or for stage-1 faults. * * Furthermore, ISV is only set for certain kinds of load/stores. * If the template syndrome does not have ISV set, we should leave @@ -42,10 +43,16 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn, * See ARMv8 specs, D7-1974: * ISS encoding for an exception from a Data Abort, the * ISV field. + * + * TODO: FEAT_LS64/FEAT_LS64_V/FEAT_SL64_ACCDATA: Translation, + * Access Flag, and Permission faults caused by LD64B, ST64B, + * ST64BV, or ST64BV0 insns report syndrome info even for stage-1 + * faults and regardless of the target EL. */ - if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) { + if (!(template_syn & ARM_EL_ISV) || target_el != 2 + || fi->s1ptw || !fi->stage2) { syn = syn_data_abort_no_iss(same_el, 0, - ea, 0, s1ptw, is_write, fsc); + fi->ea, 0, fi->s1ptw, is_write, fsc); } else { /* * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template @@ -54,7 +61,7 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn, */ syn = syn_data_abort_with_iss(same_el, 0, 0, 0, 0, 0, - ea, 0, s1ptw, is_write, fsc, + fi->ea, 0, fi->s1ptw, is_write, fsc, true); /* Merge the runtime syndrome with the template syndrome. */ syn |= template_syn; @@ -68,8 +75,17 @@ static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi, ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); uint32_t fsr, fsc; - if (target_el == 2 || arm_el_is_aa64(env, target_el) || - arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { + /* + * For M-profile there is no guest-facing FSR. We compute a + * short-form value for env->exception.fsr which we will then + * examine in arm_v7m_cpu_do_interrupt(). In theory we could + * use the LPAE format instead as long as both bits of code agree + * (and arm_fi_to_lfsc() handled the M-profile specific + * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases). + */ + if (!arm_feature(env, ARM_FEATURE_M) && + (target_el == 2 || arm_el_is_aa64(env, target_el) || + arm_s1_regime_using_lpae_format(env, arm_mmu_idx))) { /* * LPAE format fault status register : bottom 6 bits are * status code in the same form as needed for syndrome @@ -117,9 +133,8 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr, syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc); exc = EXCP_PREFETCH_ABORT; } else { - syn = merge_syn_data_abort(env->exception.syndrome, target_el, - same_el, fi->ea, fi->s1ptw, - access_type == MMU_DATA_STORE, + syn = merge_syn_data_abort(env->exception.syndrome, fi, target_el, + same_el, access_type == MMU_DATA_STORE, fsc); if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) { diff --git a/target/arm/translate-a32.h b/target/arm/tcg/translate-a32.h similarity index 82% rename from target/arm/translate-a32.h rename to target/arm/tcg/translate-a32.h index 5339c22f1e..48a15379d2 100644 --- a/target/arm/translate-a32.h +++ b/target/arm/tcg/translate-a32.h @@ -59,13 +59,29 @@ static inline TCGv_i32 load_cpu_offset(int offset) return tmp; } -#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name)) +/* Load from a 32-bit field to a TCGv_i32 */ +#define load_cpu_field(name) \ + ({ \ + QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 4); \ + load_cpu_offset(offsetof(CPUARMState, name)); \ + }) + +/* Load from the low half of a 64-bit field to a TCGv_i32 */ +#define load_cpu_field_low32(name) \ + ({ \ + QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 8); \ + load_cpu_offset(offsetoflow32(CPUARMState, name)); \ + }) void store_cpu_offset(TCGv_i32 var, int offset, int size); -#define store_cpu_field(var, name) \ - store_cpu_offset(var, offsetof(CPUARMState, name), \ - sizeof_field(CPUARMState, name)) +#define store_cpu_field(val, name) \ + ({ \ + QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 4 \ + && sizeof_field(CPUARMState, name) != 1); \ + store_cpu_offset(val, offsetof(CPUARMState, name), \ + sizeof_field(CPUARMState, name)); \ + }) #define store_cpu_field_constant(val, name) \ store_cpu_field(tcg_constant_i32(val), name) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index c2ffd82e17..9ba5720010 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -56,6 +56,13 @@ enum a64_shift_type { A64_SHIFT_TYPE_ROR = 3 }; +/* + * Include the generated decoders. + */ + +#include "decode-sme-fa64.c.inc" +#include "decode-a64.c.inc" + /* Table based decoder typedefs - used when the relevant bits for decode * are too awkwardly scattered across the instruction (eg SIMD). */ @@ -143,7 +150,7 @@ static void reset_btype(DisasContext *s) static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff); } else { tcg_gen_movi_i64(dest, s->pc_curr + diff); @@ -224,7 +231,7 @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src) TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr) { - TCGv_i64 clean = new_tmp_a64(s); + TCGv_i64 clean = tcg_temp_new_i64(); #ifdef CONFIG_USER_ONLY gen_top_byte_ignore(s, clean, addr, s->tbid); #else @@ -269,7 +276,7 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1); - ret = new_tmp_a64(s); + ret = tcg_temp_new_i64(); gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr); return ret; @@ -300,7 +307,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1); - ret = new_tmp_a64(s); + ret = tcg_temp_new_i64(); gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr); return ret; @@ -319,18 +326,13 @@ static void a64_test_cc(DisasCompare64 *c64, int cc) arm_test_cc(&c32, cc); - /* Sign-extend the 32-bit value so that the GE/LT comparisons work - * properly. The NE/EQ comparisons are also fine with this choice. */ + /* + * Sign-extend the 32-bit value so that the GE/LT comparisons work + * properly. The NE/EQ comparisons are also fine with this choice. + */ c64->cond = c32.cond; c64->value = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(c64->value, c32.value); - - arm_free_cc(&c32); -} - -static void a64_free_cc(DisasCompare64 *c64) -{ - tcg_temp_free_i64(c64->value); } static void gen_rebuild_hflags(DisasContext *s) @@ -393,7 +395,7 @@ static void gen_goto_tb(DisasContext *s, int n, int64_t diff) * update to pc to the unlinked path. A long chain of links * can thus avoid many updates to the PC. */ - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { gen_a64_update_pc(s, diff); tcg_gen_goto_tb(n); } else { @@ -413,42 +415,6 @@ static void gen_goto_tb(DisasContext *s, int n, int64_t diff) } } -static void init_tmp_a64_array(DisasContext *s) -{ -#ifdef CONFIG_DEBUG_TCG - memset(s->tmp_a64, 0, sizeof(s->tmp_a64)); -#endif - s->tmp_a64_count = 0; -} - -static void free_tmp_a64(DisasContext *s) -{ - int i; - for (i = 0; i < s->tmp_a64_count; i++) { - tcg_temp_free_i64(s->tmp_a64[i]); - } - init_tmp_a64_array(s); -} - -TCGv_i64 new_tmp_a64(DisasContext *s) -{ - assert(s->tmp_a64_count < TMP_A64_MAX); - return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64(); -} - -TCGv_i64 new_tmp_a64_local(DisasContext *s) -{ - assert(s->tmp_a64_count < TMP_A64_MAX); - return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64(); -} - -TCGv_i64 new_tmp_a64_zero(DisasContext *s) -{ - TCGv_i64 t = new_tmp_a64(s); - tcg_gen_movi_i64(t, 0); - return t; -} - /* * Register access functions * @@ -467,7 +433,9 @@ TCGv_i64 new_tmp_a64_zero(DisasContext *s) TCGv_i64 cpu_reg(DisasContext *s, int reg) { if (reg == 31) { - return new_tmp_a64_zero(s); + TCGv_i64 t = tcg_temp_new_i64(); + tcg_gen_movi_i64(t, 0); + return t; } else { return cpu_X[reg]; } @@ -485,7 +453,7 @@ TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) */ TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) { - TCGv_i64 v = new_tmp_a64(s); + TCGv_i64 v = tcg_temp_new_i64(); if (reg != 31) { if (sf) { tcg_gen_mov_i64(v, cpu_X[reg]); @@ -500,7 +468,7 @@ TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) { - TCGv_i64 v = new_tmp_a64(s); + TCGv_i64 v = tcg_temp_new_i64(); if (sf) { tcg_gen_mov_i64(v, cpu_X[reg]); } else { @@ -581,7 +549,6 @@ static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) tcg_gen_extu_i32_i64(tmp, v); write_fp_dreg(s, reg, tmp); - tcg_temp_free_i64(tmp); } /* Expand a 2-operand AdvSIMD vector operation using an expander function. */ @@ -650,7 +617,6 @@ static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn, vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), fpst, is_q ? 16 : 8, vec_full_reg_size(s), data, fn); - tcg_temp_free_ptr(fpst); } /* Expand a 3-operand + qc + operation using an out-of-line helper. */ @@ -664,7 +630,6 @@ static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn, vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), qc_ptr, is_q ? 16 : 8, vec_full_reg_size(s), 0, fn); - tcg_temp_free_ptr(qc_ptr); } /* Expand a 4-operand operation using an out-of-line helper. */ @@ -692,7 +657,6 @@ static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn, vec_full_reg_offset(s, rm), vec_full_reg_offset(s, ra), fpst, is_q ? 16 : 8, vec_full_reg_size(s), data, fn); - tcg_temp_free_ptr(fpst); } /* Set ZF and NF based on a 64 bit result. This is alas fiddlier @@ -718,96 +682,102 @@ static inline void gen_logic_CC(int sf, TCGv_i64 result) } /* dest = T0 + T1; compute C, N, V and Z flags */ +static void gen_add64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +{ + TCGv_i64 result, flag, tmp; + result = tcg_temp_new_i64(); + flag = tcg_temp_new_i64(); + tmp = tcg_temp_new_i64(); + + tcg_gen_movi_i64(tmp, 0); + tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp); + + tcg_gen_extrl_i64_i32(cpu_CF, flag); + + gen_set_NZ64(result); + + tcg_gen_xor_i64(flag, result, t0); + tcg_gen_xor_i64(tmp, t0, t1); + tcg_gen_andc_i64(flag, flag, tmp); + tcg_gen_extrh_i64_i32(cpu_VF, flag); + + tcg_gen_mov_i64(dest, result); +} + +static void gen_add32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +{ + TCGv_i32 t0_32 = tcg_temp_new_i32(); + TCGv_i32 t1_32 = tcg_temp_new_i32(); + TCGv_i32 tmp = tcg_temp_new_i32(); + + tcg_gen_movi_i32(tmp, 0); + tcg_gen_extrl_i64_i32(t0_32, t0); + tcg_gen_extrl_i64_i32(t1_32, t1); + tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp); + tcg_gen_mov_i32(cpu_ZF, cpu_NF); + tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); + tcg_gen_xor_i32(tmp, t0_32, t1_32); + tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); + tcg_gen_extu_i32_i64(dest, cpu_NF); +} + static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { if (sf) { - TCGv_i64 result, flag, tmp; - result = tcg_temp_new_i64(); - flag = tcg_temp_new_i64(); - tmp = tcg_temp_new_i64(); - - tcg_gen_movi_i64(tmp, 0); - tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp); - - tcg_gen_extrl_i64_i32(cpu_CF, flag); - - gen_set_NZ64(result); - - tcg_gen_xor_i64(flag, result, t0); - tcg_gen_xor_i64(tmp, t0, t1); - tcg_gen_andc_i64(flag, flag, tmp); - tcg_temp_free_i64(tmp); - tcg_gen_extrh_i64_i32(cpu_VF, flag); - - tcg_gen_mov_i64(dest, result); - tcg_temp_free_i64(result); - tcg_temp_free_i64(flag); + gen_add64_CC(dest, t0, t1); } else { - /* 32 bit arithmetic */ - TCGv_i32 t0_32 = tcg_temp_new_i32(); - TCGv_i32 t1_32 = tcg_temp_new_i32(); - TCGv_i32 tmp = tcg_temp_new_i32(); - - tcg_gen_movi_i32(tmp, 0); - tcg_gen_extrl_i64_i32(t0_32, t0); - tcg_gen_extrl_i64_i32(t1_32, t1); - tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp); - tcg_gen_mov_i32(cpu_ZF, cpu_NF); - tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); - tcg_gen_xor_i32(tmp, t0_32, t1_32); - tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); - tcg_gen_extu_i32_i64(dest, cpu_NF); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(t0_32); - tcg_temp_free_i32(t1_32); + gen_add32_CC(dest, t0, t1); } } /* dest = T0 - T1; compute C, N, V and Z flags */ +static void gen_sub64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +{ + /* 64 bit arithmetic */ + TCGv_i64 result, flag, tmp; + + result = tcg_temp_new_i64(); + flag = tcg_temp_new_i64(); + tcg_gen_sub_i64(result, t0, t1); + + gen_set_NZ64(result); + + tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1); + tcg_gen_extrl_i64_i32(cpu_CF, flag); + + tcg_gen_xor_i64(flag, result, t0); + tmp = tcg_temp_new_i64(); + tcg_gen_xor_i64(tmp, t0, t1); + tcg_gen_and_i64(flag, flag, tmp); + tcg_gen_extrh_i64_i32(cpu_VF, flag); + tcg_gen_mov_i64(dest, result); +} + +static void gen_sub32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +{ + /* 32 bit arithmetic */ + TCGv_i32 t0_32 = tcg_temp_new_i32(); + TCGv_i32 t1_32 = tcg_temp_new_i32(); + TCGv_i32 tmp; + + tcg_gen_extrl_i64_i32(t0_32, t0); + tcg_gen_extrl_i64_i32(t1_32, t1); + tcg_gen_sub_i32(cpu_NF, t0_32, t1_32); + tcg_gen_mov_i32(cpu_ZF, cpu_NF); + tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32); + tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); + tmp = tcg_temp_new_i32(); + tcg_gen_xor_i32(tmp, t0_32, t1_32); + tcg_gen_and_i32(cpu_VF, cpu_VF, tmp); + tcg_gen_extu_i32_i64(dest, cpu_NF); +} + static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { if (sf) { - /* 64 bit arithmetic */ - TCGv_i64 result, flag, tmp; - - result = tcg_temp_new_i64(); - flag = tcg_temp_new_i64(); - tcg_gen_sub_i64(result, t0, t1); - - gen_set_NZ64(result); - - tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1); - tcg_gen_extrl_i64_i32(cpu_CF, flag); - - tcg_gen_xor_i64(flag, result, t0); - tmp = tcg_temp_new_i64(); - tcg_gen_xor_i64(tmp, t0, t1); - tcg_gen_and_i64(flag, flag, tmp); - tcg_temp_free_i64(tmp); - tcg_gen_extrh_i64_i32(cpu_VF, flag); - tcg_gen_mov_i64(dest, result); - tcg_temp_free_i64(flag); - tcg_temp_free_i64(result); + gen_sub64_CC(dest, t0, t1); } else { - /* 32 bit arithmetic */ - TCGv_i32 t0_32 = tcg_temp_new_i32(); - TCGv_i32 t1_32 = tcg_temp_new_i32(); - TCGv_i32 tmp; - - tcg_gen_extrl_i64_i32(t0_32, t0); - tcg_gen_extrl_i64_i32(t1_32, t1); - tcg_gen_sub_i32(cpu_NF, t0_32, t1_32); - tcg_gen_mov_i32(cpu_ZF, cpu_NF); - tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32); - tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); - tmp = tcg_temp_new_i32(); - tcg_gen_xor_i32(tmp, t0_32, t1_32); - tcg_temp_free_i32(t0_32); - tcg_temp_free_i32(t1_32); - tcg_gen_and_i32(cpu_VF, cpu_VF, tmp); - tcg_temp_free_i32(tmp); - tcg_gen_extu_i32_i64(dest, cpu_NF); + gen_sub32_CC(dest, t0, t1); } } @@ -818,7 +788,6 @@ static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_extu_i32_i64(flag, cpu_CF); tcg_gen_add_i64(dest, t0, t1); tcg_gen_add_i64(dest, dest, flag); - tcg_temp_free_i64(flag); if (!sf) { tcg_gen_ext32u_i64(dest, dest); @@ -847,11 +816,6 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_extrh_i64_i32(cpu_VF, vf_64); tcg_gen_mov_i64(dest, result); - - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(vf_64); - tcg_temp_free_i64(cf_64); - tcg_temp_free_i64(result); } else { TCGv_i32 t0_32 = tcg_temp_new_i32(); TCGv_i32 t1_32 = tcg_temp_new_i32(); @@ -868,10 +832,6 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_xor_i32(tmp, t0_32, t1_32); tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); tcg_gen_extu_i32_i64(dest, cpu_NF); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(t1_32); - tcg_temp_free_i32(t0_32); } } @@ -981,12 +941,7 @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr, get_mem_index(s), mop); - - tcg_temp_free_i64(tcg_hiaddr); - tcg_temp_free_i64(tmphi); } - - tcg_temp_free_i64(tmplo); } /* @@ -1015,15 +970,12 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr, get_mem_index(s), mop); - tcg_temp_free_i64(tcg_hiaddr); } tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64)); - tcg_temp_free_i64(tmplo); if (tmphi) { tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx)); - tcg_temp_free_i64(tmphi); } clear_vec_high(s, tmphi != NULL, destidx); } @@ -1149,8 +1101,6 @@ static void do_vec_st(DisasContext *s, int srcidx, int element, read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE); tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop); - - tcg_temp_free_i64(tcg_tmp); } /* Load from memory to vector register */ @@ -1161,8 +1111,6 @@ static void do_vec_ld(DisasContext *s, int destidx, int element, tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop); write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE); - - tcg_temp_free_i64(tcg_tmp); } /* Check that FP/Neon access is enabled. If it is, return @@ -1371,117 +1319,279 @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table, * match up with those in the manual. */ -/* Unconditional branch (immediate) - * 31 30 26 25 0 - * +----+-----------+-------------------------------------+ - * | op | 0 0 1 0 1 | imm26 | - * +----+-----------+-------------------------------------+ - */ -static void disas_uncond_b_imm(DisasContext *s, uint32_t insn) +static bool trans_B(DisasContext *s, arg_i *a) { - int64_t diff = sextract32(insn, 0, 26) * 4; - - if (insn & (1U << 31)) { - /* BL Branch with link */ - gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s)); - } - - /* B Branch / BL Branch with link */ reset_btype(s); - gen_goto_tb(s, 0, diff); + gen_goto_tb(s, 0, a->imm); + return true; } -/* Compare and branch (immediate) - * 31 30 25 24 23 5 4 0 - * +----+-------------+----+---------------------+--------+ - * | sf | 0 1 1 0 1 0 | op | imm19 | Rt | - * +----+-------------+----+---------------------+--------+ - */ -static void disas_comp_b_imm(DisasContext *s, uint32_t insn) +static bool trans_BL(DisasContext *s, arg_i *a) +{ + gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s)); + reset_btype(s); + gen_goto_tb(s, 0, a->imm); + return true; +} + + +static bool trans_CBZ(DisasContext *s, arg_cbz *a) { - unsigned int sf, op, rt; - int64_t diff; DisasLabel match; TCGv_i64 tcg_cmp; - sf = extract32(insn, 31, 1); - op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */ - rt = extract32(insn, 0, 5); - diff = sextract32(insn, 5, 19) * 4; - - tcg_cmp = read_cpu_reg(s, rt, sf); + tcg_cmp = read_cpu_reg(s, a->rt, a->sf); reset_btype(s); match = gen_disas_label(s); - tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, + tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ, tcg_cmp, 0, match.label); gen_goto_tb(s, 0, 4); set_disas_label(s, match); - gen_goto_tb(s, 1, diff); + gen_goto_tb(s, 1, a->imm); + return true; } -/* Test and branch (immediate) - * 31 30 25 24 23 19 18 5 4 0 - * +----+-------------+----+-------+-------------+------+ - * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt | - * +----+-------------+----+-------+-------------+------+ - */ -static void disas_test_b_imm(DisasContext *s, uint32_t insn) +static bool trans_TBZ(DisasContext *s, arg_tbz *a) { - unsigned int bit_pos, op, rt; - int64_t diff; DisasLabel match; TCGv_i64 tcg_cmp; - bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5); - op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */ - diff = sextract32(insn, 5, 14) * 4; - rt = extract32(insn, 0, 5); - tcg_cmp = tcg_temp_new_i64(); - tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos)); + tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, a->rt), 1ULL << a->bitpos); reset_btype(s); match = gen_disas_label(s); - tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, + tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ, tcg_cmp, 0, match.label); - tcg_temp_free_i64(tcg_cmp); gen_goto_tb(s, 0, 4); set_disas_label(s, match); - gen_goto_tb(s, 1, diff); + gen_goto_tb(s, 1, a->imm); + return true; } -/* Conditional branch (immediate) - * 31 25 24 23 5 4 3 0 - * +---------------+----+---------------------+----+------+ - * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond | - * +---------------+----+---------------------+----+------+ - */ -static void disas_cond_b_imm(DisasContext *s, uint32_t insn) +static bool trans_B_cond(DisasContext *s, arg_B_cond *a) { - unsigned int cond; - int64_t diff; - - if ((insn & (1 << 4)) || (insn & (1 << 24))) { - unallocated_encoding(s); - return; - } - diff = sextract32(insn, 5, 19) * 4; - cond = extract32(insn, 0, 4); - reset_btype(s); - if (cond < 0x0e) { + if (a->cond < 0x0e) { /* genuinely conditional branches */ DisasLabel match = gen_disas_label(s); - arm_gen_test_cc(cond, match.label); + arm_gen_test_cc(a->cond, match.label); gen_goto_tb(s, 0, 4); set_disas_label(s, match); - gen_goto_tb(s, 1, diff); + gen_goto_tb(s, 1, a->imm); } else { /* 0xe and 0xf are both "always" conditions */ - gen_goto_tb(s, 0, diff); + gen_goto_tb(s, 0, a->imm); } + return true; +} + +static void set_btype_for_br(DisasContext *s, int rn) +{ + if (dc_isar_feature(aa64_bti, s)) { + /* BR to {x16,x17} or !guard -> 1, else 3. */ + set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3); + } +} + +static void set_btype_for_blr(DisasContext *s) +{ + if (dc_isar_feature(aa64_bti, s)) { + /* BLR sets BTYPE to 2, regardless of source guarded page. */ + set_btype(s, 2); + } +} + +static bool trans_BR(DisasContext *s, arg_r *a) +{ + gen_a64_set_pc(s, cpu_reg(s, a->rn)); + set_btype_for_br(s, a->rn); + s->base.is_jmp = DISAS_JUMP; + return true; +} + +static bool trans_BLR(DisasContext *s, arg_r *a) +{ + TCGv_i64 dst = cpu_reg(s, a->rn); + TCGv_i64 lr = cpu_reg(s, 30); + if (dst == lr) { + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_mov_i64(tmp, dst); + dst = tmp; + } + gen_pc_plus_diff(s, lr, curr_insn_len(s)); + gen_a64_set_pc(s, dst); + set_btype_for_blr(s); + s->base.is_jmp = DISAS_JUMP; + return true; +} + +static bool trans_RET(DisasContext *s, arg_r *a) +{ + gen_a64_set_pc(s, cpu_reg(s, a->rn)); + s->base.is_jmp = DISAS_JUMP; + return true; +} + +static TCGv_i64 auth_branch_target(DisasContext *s, TCGv_i64 dst, + TCGv_i64 modifier, bool use_key_a) +{ + TCGv_i64 truedst; + /* + * Return the branch target for a BRAA/RETA/etc, which is either + * just the destination dst, or that value with the pauth check + * done and the code removed from the high bits. + */ + if (!s->pauth_active) { + return dst; + } + + truedst = tcg_temp_new_i64(); + if (use_key_a) { + gen_helper_autia(truedst, cpu_env, dst, modifier); + } else { + gen_helper_autib(truedst, cpu_env, dst, modifier); + } + return truedst; +} + +static bool trans_BRAZ(DisasContext *s, arg_braz *a) +{ + TCGv_i64 dst; + + if (!dc_isar_feature(aa64_pauth, s)) { + return false; + } + + dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m); + gen_a64_set_pc(s, dst); + set_btype_for_br(s, a->rn); + s->base.is_jmp = DISAS_JUMP; + return true; +} + +static bool trans_BLRAZ(DisasContext *s, arg_braz *a) +{ + TCGv_i64 dst, lr; + + if (!dc_isar_feature(aa64_pauth, s)) { + return false; + } + + dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m); + lr = cpu_reg(s, 30); + if (dst == lr) { + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_mov_i64(tmp, dst); + dst = tmp; + } + gen_pc_plus_diff(s, lr, curr_insn_len(s)); + gen_a64_set_pc(s, dst); + set_btype_for_blr(s); + s->base.is_jmp = DISAS_JUMP; + return true; +} + +static bool trans_RETA(DisasContext *s, arg_reta *a) +{ + TCGv_i64 dst; + + dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m); + gen_a64_set_pc(s, dst); + s->base.is_jmp = DISAS_JUMP; + return true; +} + +static bool trans_BRA(DisasContext *s, arg_bra *a) +{ + TCGv_i64 dst; + + if (!dc_isar_feature(aa64_pauth, s)) { + return false; + } + dst = auth_branch_target(s, cpu_reg(s,a->rn), cpu_reg_sp(s, a->rm), !a->m); + gen_a64_set_pc(s, dst); + set_btype_for_br(s, a->rn); + s->base.is_jmp = DISAS_JUMP; + return true; +} + +static bool trans_BLRA(DisasContext *s, arg_bra *a) +{ + TCGv_i64 dst, lr; + + if (!dc_isar_feature(aa64_pauth, s)) { + return false; + } + dst = auth_branch_target(s, cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm), !a->m); + lr = cpu_reg(s, 30); + if (dst == lr) { + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_mov_i64(tmp, dst); + dst = tmp; + } + gen_pc_plus_diff(s, lr, curr_insn_len(s)); + gen_a64_set_pc(s, dst); + set_btype_for_blr(s); + s->base.is_jmp = DISAS_JUMP; + return true; +} + +static bool trans_ERET(DisasContext *s, arg_ERET *a) +{ + TCGv_i64 dst; + + if (s->current_el == 0) { + return false; + } + if (s->fgt_eret) { + gen_exception_insn_el(s, 0, EXCP_UDEF, 0, 2); + return true; + } + dst = tcg_temp_new_i64(); + tcg_gen_ld_i64(dst, cpu_env, + offsetof(CPUARMState, elr_el[s->current_el])); + + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + + gen_helper_exception_return(cpu_env, dst); + /* Must exit loop to check un-masked IRQs */ + s->base.is_jmp = DISAS_EXIT; + return true; +} + +static bool trans_ERETA(DisasContext *s, arg_reta *a) +{ + TCGv_i64 dst; + + if (!dc_isar_feature(aa64_pauth, s)) { + return false; + } + if (s->current_el == 0) { + return false; + } + /* The FGT trap takes precedence over an auth trap. */ + if (s->fgt_eret) { + gen_exception_insn_el(s, 0, EXCP_UDEF, a->m ? 3 : 2, 2); + return true; + } + dst = tcg_temp_new_i64(); + tcg_gen_ld_i64(dst, cpu_env, + offsetof(CPUARMState, elr_el[s->current_el])); + + dst = auth_branch_target(s, dst, cpu_X[31], !a->m); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + + gen_helper_exception_return(cpu_env, dst); + /* Must exit loop to check un-masked IRQs */ + s->base.is_jmp = DISAS_EXIT; + return true; } /* HINT instruction group, including various allocated HINTs */ @@ -1566,7 +1676,7 @@ static void handle_hint(DisasContext *s, uint32_t insn, case 0b11000: /* PACIAZ */ if (s->pauth_active) { gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } break; case 0b11001: /* PACIASP */ @@ -1577,7 +1687,7 @@ static void handle_hint(DisasContext *s, uint32_t insn, case 0b11010: /* PACIBZ */ if (s->pauth_active) { gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } break; case 0b11011: /* PACIBSP */ @@ -1588,7 +1698,7 @@ static void handle_hint(DisasContext *s, uint32_t insn, case 0b11100: /* AUTIAZ */ if (s->pauth_active) { gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } break; case 0b11101: /* AUTIASP */ @@ -1599,7 +1709,7 @@ static void handle_hint(DisasContext *s, uint32_t insn, case 0b11110: /* AUTIBZ */ if (s->pauth_active) { gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } break; case 0b11111: /* AUTIBSP */ @@ -1702,8 +1812,6 @@ static void gen_xaflag(void) /* C | Z */ tcg_gen_or_i32(cpu_CF, cpu_CF, z); - - tcg_temp_free_i32(z); } static void gen_axflag(void) @@ -1879,9 +1987,6 @@ static void gen_get_nzcv(TCGv_i64 tcg_rt) tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1); /* generate result */ tcg_gen_extu_i32_i64(tcg_rt, nzcv); - - tcg_temp_free_i32(nzcv); - tcg_temp_free_i32(tmp); } static void gen_set_nzcv(TCGv_i64 tcg_rt) @@ -1902,7 +2007,6 @@ static void gen_set_nzcv(TCGv_i64 tcg_rt) /* bit 28, V */ tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28)); tcg_gen_shli_i32(cpu_VF, cpu_VF, 3); - tcg_temp_free_i32(nzcv); } static void gen_sysreg_undef(DisasContext *s, bool isread, @@ -1988,7 +2092,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, case 0: break; case ARM_CP_NOP: - goto exit; + return; case ARM_CP_NZCV: tcg_rt = cpu_reg(s, rt); if (isread) { @@ -1996,14 +2100,14 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, } else { gen_set_nzcv(tcg_rt); } - goto exit; + return; case ARM_CP_CURRENTEL: /* Reads as current EL value from pstate, which is * guaranteed to be constant by the tb flags. */ tcg_rt = cpu_reg(s, rt); tcg_gen_movi_i64(tcg_rt, s->current_el << 2); - goto exit; + return; case ARM_CP_DC_ZVA: /* Writes clear the aligned block of memory which rt points into. */ if (s->mte_active[0]) { @@ -2013,14 +2117,14 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); - tcg_rt = new_tmp_a64(s); + tcg_rt = tcg_temp_new_i64(); gen_helper_mte_check_zva(tcg_rt, cpu_env, tcg_constant_i32(desc), cpu_reg(s, rt)); } else { tcg_rt = clean_data_tbi(s, cpu_reg(s, rt)); } gen_helper_dc_zva(cpu_env, tcg_rt); - goto exit; + return; case ARM_CP_DC_GVA: { TCGv_i64 clean_addr, tag; @@ -2038,10 +2142,9 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, tag = tcg_temp_new_i64(); tcg_gen_shri_i64(tag, tcg_rt, 56); gen_helper_stzgm_tags(cpu_env, clean_addr, tag); - tcg_temp_free_i64(tag); } } - goto exit; + return; case ARM_CP_DC_GZVA: { TCGv_i64 clean_addr, tag; @@ -2056,19 +2159,18 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, tag = tcg_temp_new_i64(); tcg_gen_shri_i64(tag, tcg_rt, 56); gen_helper_stzgm_tags(cpu_env, clean_addr, tag); - tcg_temp_free_i64(tag); } } - goto exit; + return; default: g_assert_not_reached(); } if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { - goto exit; + return; } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { - goto exit; + return; } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { - goto exit; + return; } if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { @@ -2091,7 +2193,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, } else { if (ri->type & ARM_CP_CONST) { /* If not forbidden by access permissions, treat as WI */ - goto exit; + return; } else if (ri->writefn) { if (!tcg_ri) { tcg_ri = gen_lookup_cp_reg(key); @@ -2119,11 +2221,6 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, */ s->base.is_jmp = DISAS_UPDATE_EXIT; } - - exit: - if (tcg_ri) { - tcg_temp_free_ptr(tcg_ri); - } } /* System @@ -2265,234 +2362,10 @@ static void disas_exc(DisasContext *s, uint32_t insn) } } -/* Unconditional branch (register) - * 31 25 24 21 20 16 15 10 9 5 4 0 - * +---------------+-------+-------+-------+------+-------+ - * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 | - * +---------------+-------+-------+-------+------+-------+ - */ -static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) -{ - unsigned int opc, op2, op3, rn, op4; - unsigned btype_mod = 2; /* 0: BR, 1: BLR, 2: other */ - TCGv_i64 dst; - TCGv_i64 modifier; - - opc = extract32(insn, 21, 4); - op2 = extract32(insn, 16, 5); - op3 = extract32(insn, 10, 6); - rn = extract32(insn, 5, 5); - op4 = extract32(insn, 0, 5); - - if (op2 != 0x1f) { - goto do_unallocated; - } - - switch (opc) { - case 0: /* BR */ - case 1: /* BLR */ - case 2: /* RET */ - btype_mod = opc; - switch (op3) { - case 0: - /* BR, BLR, RET */ - if (op4 != 0) { - goto do_unallocated; - } - dst = cpu_reg(s, rn); - break; - - case 2: - case 3: - if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - if (opc == 2) { - /* RETAA, RETAB */ - if (rn != 0x1f || op4 != 0x1f) { - goto do_unallocated; - } - rn = 30; - modifier = cpu_X[31]; - } else { - /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */ - if (op4 != 0x1f) { - goto do_unallocated; - } - modifier = new_tmp_a64_zero(s); - } - if (s->pauth_active) { - dst = new_tmp_a64(s); - if (op3 == 2) { - gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier); - } else { - gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier); - } - } else { - dst = cpu_reg(s, rn); - } - break; - - default: - goto do_unallocated; - } - /* BLR also needs to load return address */ - if (opc == 1) { - TCGv_i64 lr = cpu_reg(s, 30); - if (dst == lr) { - TCGv_i64 tmp = new_tmp_a64(s); - tcg_gen_mov_i64(tmp, dst); - dst = tmp; - } - gen_pc_plus_diff(s, lr, curr_insn_len(s)); - } - gen_a64_set_pc(s, dst); - break; - - case 8: /* BRAA */ - case 9: /* BLRAA */ - if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - if ((op3 & ~1) != 2) { - goto do_unallocated; - } - btype_mod = opc & 1; - if (s->pauth_active) { - dst = new_tmp_a64(s); - modifier = cpu_reg_sp(s, op4); - if (op3 == 2) { - gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier); - } else { - gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier); - } - } else { - dst = cpu_reg(s, rn); - } - /* BLRAA also needs to load return address */ - if (opc == 9) { - TCGv_i64 lr = cpu_reg(s, 30); - if (dst == lr) { - TCGv_i64 tmp = new_tmp_a64(s); - tcg_gen_mov_i64(tmp, dst); - dst = tmp; - } - gen_pc_plus_diff(s, lr, curr_insn_len(s)); - } - gen_a64_set_pc(s, dst); - break; - - case 4: /* ERET */ - if (s->current_el == 0) { - goto do_unallocated; - } - switch (op3) { - case 0: /* ERET */ - if (op4 != 0) { - goto do_unallocated; - } - if (s->fgt_eret) { - gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(op3), 2); - return; - } - dst = tcg_temp_new_i64(); - tcg_gen_ld_i64(dst, cpu_env, - offsetof(CPUARMState, elr_el[s->current_el])); - break; - - case 2: /* ERETAA */ - case 3: /* ERETAB */ - if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - if (rn != 0x1f || op4 != 0x1f) { - goto do_unallocated; - } - /* The FGT trap takes precedence over an auth trap. */ - if (s->fgt_eret) { - gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(op3), 2); - return; - } - dst = tcg_temp_new_i64(); - tcg_gen_ld_i64(dst, cpu_env, - offsetof(CPUARMState, elr_el[s->current_el])); - if (s->pauth_active) { - modifier = cpu_X[31]; - if (op3 == 2) { - gen_helper_autia(dst, cpu_env, dst, modifier); - } else { - gen_helper_autib(dst, cpu_env, dst, modifier); - } - } - break; - - default: - goto do_unallocated; - } - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - - gen_helper_exception_return(cpu_env, dst); - tcg_temp_free_i64(dst); - /* Must exit loop to check un-masked IRQs */ - s->base.is_jmp = DISAS_EXIT; - return; - - case 5: /* DRPS */ - if (op3 != 0 || op4 != 0 || rn != 0x1f) { - goto do_unallocated; - } else { - unallocated_encoding(s); - } - return; - - default: - do_unallocated: - unallocated_encoding(s); - return; - } - - switch (btype_mod) { - case 0: /* BR */ - if (dc_isar_feature(aa64_bti, s)) { - /* BR to {x16,x17} or !guard -> 1, else 3. */ - set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3); - } - break; - - case 1: /* BLR */ - if (dc_isar_feature(aa64_bti, s)) { - /* BLR sets BTYPE to 2, regardless of source guarded page. */ - set_btype(s, 2); - } - break; - - default: /* RET or none of the above. */ - /* BTYPE will be set to 0 by normal end-of-insn processing. */ - break; - } - - s->base.is_jmp = DISAS_JUMP; -} - /* Branches, exception generating and system instructions */ static void disas_b_exc_sys(DisasContext *s, uint32_t insn) { switch (extract32(insn, 25, 7)) { - case 0x0a: case 0x0b: - case 0x4a: case 0x4b: /* Unconditional branch (immediate) */ - disas_uncond_b_imm(s, insn); - break; - case 0x1a: case 0x5a: /* Compare & branch (immediate) */ - disas_comp_b_imm(s, insn); - break; - case 0x1b: case 0x5b: /* Test & branch (immediate) */ - disas_test_b_imm(s, insn); - break; - case 0x2a: /* Conditional branch (immediate) */ - disas_cond_b_imm(s, insn); - break; case 0x6a: /* Exception generation / System */ if (insn & (1 << 24)) { if (extract32(insn, 22, 2) == 0) { @@ -2504,9 +2377,6 @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn) disas_exc(s, insn); } break; - case 0x6b: /* Unconditional branch (register) */ - disas_uncond_b_reg(s, insn); - break; default: unallocated_encoding(s); break; @@ -2554,7 +2424,6 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv_i64 addr2 = tcg_temp_new_i64(); tcg_gen_addi_i64(addr2, addr, 8); tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop); - tcg_temp_free_i64(addr2); tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high); @@ -2619,7 +2488,6 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, tcg_gen_atomic_cmpxchg_i128(t16, cpu_exclusive_addr, c16, t16, get_mem_index(s), MO_128 | MO_ALIGN | s->be_data); - tcg_temp_free_i128(c16); a = tcg_temp_new_i64(); b = tcg_temp_new_i64(); @@ -2632,9 +2500,6 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, tcg_gen_xor_i64(a, a, cpu_exclusive_val); tcg_gen_xor_i64(b, b, cpu_exclusive_high); tcg_gen_or_i64(tmp, a, b); - tcg_temp_free_i64(a); - tcg_temp_free_i64(b); - tcg_temp_free_i128(t16); tcg_gen_setcondi_i64(TCG_COND_NE, tmp, tmp, 0); } @@ -2645,7 +2510,6 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val); } tcg_gen_mov_i64(cpu_reg(s, rd), tmp); - tcg_temp_free_i64(tmp); tcg_gen_br(done_label); gen_set_label(fail_label); @@ -2701,14 +2565,12 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx, MO_64 | MO_ALIGN | s->be_data); - tcg_temp_free_i64(val); if (s->be_data == MO_LE) { tcg_gen_extr32_i64(s1, s2, cmp); } else { tcg_gen_extr32_i64(s2, s1, cmp); } - tcg_temp_free_i64(cmp); } else { TCGv_i128 cmp = tcg_temp_new_i128(); TCGv_i128 val = tcg_temp_new_i128(); @@ -2723,14 +2585,12 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, tcg_gen_atomic_cmpxchg_i128(cmp, clean_addr, cmp, val, memidx, MO_128 | MO_ALIGN | s->be_data); - tcg_temp_free_i128(val); if (s->be_data == MO_LE) { tcg_gen_extr_i128_i64(s1, s2, cmp); } else { tcg_gen_extr_i128_i64(s2, s1, cmp); } - tcg_temp_free_i128(cmp); } } @@ -2941,7 +2801,7 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) tcg_rt = cpu_reg(s, rt); - clean_addr = new_tmp_a64(s); + clean_addr = tcg_temp_new_i64(); gen_pc_plus_diff(s, clean_addr, imm); if (is_vector) { do_fp_ld(s, rt, clean_addr, size); @@ -3114,7 +2974,6 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) false, false, 0, false, false); tcg_gen_mov_i64(tcg_rt, tmp); - tcg_temp_free_i64(tmp); } else { do_gpr_st(s, tcg_rt, clean_addr, size, false, 0, false, false); @@ -3584,10 +3443,10 @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn, if (s->pauth_active) { if (use_key_a) { gen_helper_autda(dirty_addr, cpu_env, dirty_addr, - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } else { gen_helper_autdb(dirty_addr, cpu_env, dirty_addr, - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } } @@ -4014,7 +3873,6 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt), (is_q + 1) * 8, vec_full_reg_size(s), tcg_tmp); - tcg_temp_free_i64(tcg_tmp); } else { /* Load/store one element per register */ if (is_load) { @@ -4277,32 +4135,7 @@ static void disas_ldst(DisasContext *s, uint32_t insn) } } -/* PC-rel. addressing - * 31 30 29 28 24 23 5 4 0 - * +----+-------+-----------+-------------------+------+ - * | op | immlo | 1 0 0 0 0 | immhi | Rd | - * +----+-------+-----------+-------------------+------+ - */ -static void disas_pc_rel_adr(DisasContext *s, uint32_t insn) -{ - unsigned int page, rd; - int64_t offset; - - page = extract32(insn, 31, 1); - /* SignExtend(immhi:immlo) -> offset */ - offset = sextract64(insn, 5, 19); - offset = offset << 2 | extract32(insn, 29, 2); - rd = extract32(insn, 0, 5); - - if (page) { - /* ADRP (page based) */ - offset <<= 12; - /* The page offset is ok for TARGET_TB_PCREL. */ - offset -= s->pc_curr & 0xfff; - } - - gen_pc_plus_diff(s, cpu_reg(s, rd), offset); -} +typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64); //// --- Begin LibAFL code --- @@ -4310,117 +4143,87 @@ void libafl_gen_cmp(target_ulong pc, TCGv op0, TCGv op1, MemOp ot); //// --- End LibAFL code --- -/* - * Add/subtract (immediate) - * - * 31 30 29 28 23 22 21 10 9 5 4 0 - * +--+--+--+-------------+--+-------------+-----+-----+ - * |sf|op| S| 1 0 0 0 1 0 |sh| imm12 | Rn | Rd | - * +--+--+--+-------------+--+-------------+-----+-----+ - * - * sf: 0 -> 32bit, 1 -> 64bit - * op: 0 -> add , 1 -> sub - * S: 1 -> set flags - * sh: 1 -> LSL imm by 12 - */ -static void disas_add_sub_imm(DisasContext *s, uint32_t insn) +static bool gen_rri(DisasContext *s, arg_rri_sf *a, + bool rd_sp, bool rn_sp, ArithTwoOp *fn) { - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - uint64_t imm = extract32(insn, 10, 12); - bool shift = extract32(insn, 22, 1); - bool setflags = extract32(insn, 29, 1); - bool sub_op = extract32(insn, 30, 1); - bool is_64bit = extract32(insn, 31, 1); - - TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); - TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd); - TCGv_i64 tcg_result; - - if (shift) { - imm <<= 12; - } + TCGv_i64 tcg_rn = rn_sp ? cpu_reg_sp(s, a->rn) : cpu_reg(s, a->rn); + TCGv_i64 tcg_rd = rd_sp ? cpu_reg_sp(s, a->rd) : cpu_reg(s, a->rd); + TCGv_i64 tcg_imm = tcg_constant_i64(a->imm); //// --- Begin LibAFL code --- - if (rd == 31 && sub_op) { // cmp xX, imm - TCGv_i64 tcg_imm = tcg_const_i64(imm); - libafl_gen_cmp(s->pc_curr, tcg_rn, tcg_imm, is_64bit ? MO_64 : MO_32); - tcg_temp_free_i64(tcg_imm); + if (a->rd == 31 && ( + fn == tcg_gen_sub_i64 || + fn == gen_sub64_CC || + fn == gen_sub32_CC)) { // cmp xX, imm + libafl_gen_cmp(s->pc_curr, tcg_rn, tcg_imm, a->sf ? MO_64 : MO_32); } //// --- End LibAFL code --- - tcg_result = tcg_temp_new_i64(); - if (!setflags) { - if (sub_op) { - tcg_gen_subi_i64(tcg_result, tcg_rn, imm); - } else { - tcg_gen_addi_i64(tcg_result, tcg_rn, imm); - } - } else { - TCGv_i64 tcg_imm = tcg_constant_i64(imm); - if (sub_op) { - gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm); - } else { - gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm); - } + fn(tcg_rd, tcg_rn, tcg_imm); + if (!a->sf) { + tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } + return true; +} - if (is_64bit) { - tcg_gen_mov_i64(tcg_rd, tcg_result); - } else { - tcg_gen_ext32u_i64(tcg_rd, tcg_result); - } +static bool trans_ADR(DisasContext *s, arg_ri *a) +{ + gen_pc_plus_diff(s, cpu_reg(s, a->rd), a->imm); + return true; +} - tcg_temp_free_i64(tcg_result); +static bool trans_ADRP(DisasContext *s, arg_ri *a) +{ + int64_t offset = (int64_t)a->imm << 12; + + /* The page offset is ok for CF_PCREL. */ + offset -= s->pc_curr & 0xfff; + gen_pc_plus_diff(s, cpu_reg(s, a->rd), offset); + return true; } /* - * Add/subtract (immediate, with tags) - * - * 31 30 29 28 23 22 21 16 14 10 9 5 4 0 - * +--+--+--+-------------+--+---------+--+-------+-----+-----+ - * |sf|op| S| 1 0 0 0 1 1 |o2| uimm6 |o3| uimm4 | Rn | Rd | - * +--+--+--+-------------+--+---------+--+-------+-----+-----+ - * - * op: 0 -> add, 1 -> sub + * Add/subtract (immediate) */ -static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn) +TRANS(ADD_i, gen_rri, a, 1, 1, tcg_gen_add_i64) +TRANS(SUB_i, gen_rri, a, 1, 1, tcg_gen_sub_i64) +TRANS(ADDS_i, gen_rri, a, 0, 1, a->sf ? gen_add64_CC : gen_add32_CC) +TRANS(SUBS_i, gen_rri, a, 0, 1, a->sf ? gen_sub64_CC : gen_sub32_CC) + +/* + * Add/subtract (immediate, with tags) + */ + +static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a, + bool sub_op) { - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int uimm4 = extract32(insn, 10, 4); - int uimm6 = extract32(insn, 16, 6); - bool sub_op = extract32(insn, 30, 1); TCGv_i64 tcg_rn, tcg_rd; int imm; - /* Test all of sf=1, S=0, o2=0, o3=0. */ - if ((insn & 0xa040c000u) != 0x80000000u || - !dc_isar_feature(aa64_mte_insn_reg, s)) { - unallocated_encoding(s); - return; - } - - imm = uimm6 << LOG2_TAG_GRANULE; + imm = a->uimm6 << LOG2_TAG_GRANULE; if (sub_op) { imm = -imm; } - tcg_rn = cpu_reg_sp(s, rn); - tcg_rd = cpu_reg_sp(s, rd); + tcg_rn = cpu_reg_sp(s, a->rn); + tcg_rd = cpu_reg_sp(s, a->rd); if (s->ata) { gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, tcg_constant_i32(imm), - tcg_constant_i32(uimm4)); + tcg_constant_i32(a->uimm4)); } else { tcg_gen_addi_i64(tcg_rd, tcg_rn, imm); gen_address_with_allocation_tag0(tcg_rd, tcg_rd); } + return true; } +TRANS_FEAT(ADDG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, false) +TRANS_FEAT(SUBG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, true) + /* The input should be a value in the bottom e bits (with higher * bits zero); returns that value replicated into every element * of size e in a 64 bit integer. @@ -4435,14 +4238,12 @@ static uint64_t bitfield_replicate(uint64_t mask, unsigned int e) return mask; } -/* Return a value with the bottom len bits set (where 0 < len <= 64) */ -static inline uint64_t bitmask64(unsigned int length) -{ - assert(length > 0 && length <= 64); - return ~0ULL >> (64 - length); -} +/* + * Logical (immediate) + */ -/* Simplified variant of pseudocode DecodeBitMasks() for the case where we +/* + * Simplified variant of pseudocode DecodeBitMasks() for the case where we * only require the wmask. Returns false if the imms/immr/immn are a reserved * value (ie should cause a guest UNDEF exception), and true if they are * valid, in which case the decoded bit pattern is written to result. @@ -4497,10 +4298,10 @@ bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, /* Create the value of one element: s+1 set bits rotated * by r within the element (which is e bits wide)... */ - mask = bitmask64(s + 1); + mask = MAKE_64BIT_MASK(0, s + 1); if (r) { mask = (mask >> r) | (mask << (e - r)); - mask &= bitmask64(e); + mask &= MAKE_64BIT_MASK(0, e); } /* ...then replicate the element over the whole 64 bit value */ mask = bitfield_replicate(mask, e); @@ -4508,297 +4309,215 @@ bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, return true; } -/* Logical (immediate) - * 31 30 29 28 23 22 21 16 15 10 9 5 4 0 - * +----+-----+-------------+---+------+------+------+------+ - * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd | - * +----+-----+-------------+---+------+------+------+------+ - */ -static void disas_logic_imm(DisasContext *s, uint32_t insn) +static bool gen_rri_log(DisasContext *s, arg_rri_log *a, bool set_cc, + void (*fn)(TCGv_i64, TCGv_i64, int64_t)) { - unsigned int sf, opc, is_n, immr, imms, rn, rd; TCGv_i64 tcg_rd, tcg_rn; - uint64_t wmask; - bool is_and = false; + uint64_t imm; - sf = extract32(insn, 31, 1); - opc = extract32(insn, 29, 2); - is_n = extract32(insn, 22, 1); - immr = extract32(insn, 16, 6); - imms = extract32(insn, 10, 6); - rn = extract32(insn, 5, 5); - rd = extract32(insn, 0, 5); - - if (!sf && is_n) { - unallocated_encoding(s); - return; + /* Some immediate field values are reserved. */ + if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), + extract32(a->dbm, 0, 6), + extract32(a->dbm, 6, 6))) { + return false; + } + if (!a->sf) { + imm &= 0xffffffffull; } - if (opc == 0x3) { /* ANDS */ - tcg_rd = cpu_reg(s, rd); - } else { - tcg_rd = cpu_reg_sp(s, rd); - } - tcg_rn = cpu_reg(s, rn); + tcg_rd = set_cc ? cpu_reg(s, a->rd) : cpu_reg_sp(s, a->rd); + tcg_rn = cpu_reg(s, a->rn); - if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) { - /* some immediate field values are reserved */ - unallocated_encoding(s); - return; + fn(tcg_rd, tcg_rn, imm); + if (set_cc) { + gen_logic_CC(a->sf, tcg_rd); } - - if (!sf) { - wmask &= 0xffffffff; - } - - switch (opc) { - case 0x3: /* ANDS */ - case 0x0: /* AND */ - tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask); - is_and = true; - break; - case 0x1: /* ORR */ - tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask); - break; - case 0x2: /* EOR */ - tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask); - break; - default: - assert(FALSE); /* must handle all above */ - break; - } - - if (!sf && !is_and) { - /* zero extend final result; we know we can skip this for AND - * since the immediate had the high 32 bits clear. - */ + if (!a->sf) { tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } - - if (opc == 3) { /* ANDS */ - gen_logic_CC(sf, tcg_rd); - } + return true; } +TRANS(AND_i, gen_rri_log, a, false, tcg_gen_andi_i64) +TRANS(ORR_i, gen_rri_log, a, false, tcg_gen_ori_i64) +TRANS(EOR_i, gen_rri_log, a, false, tcg_gen_xori_i64) +TRANS(ANDS_i, gen_rri_log, a, true, tcg_gen_andi_i64) + /* * Move wide (immediate) - * - * 31 30 29 28 23 22 21 20 5 4 0 - * +--+-----+-------------+-----+----------------+------+ - * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd | - * +--+-----+-------------+-----+----------------+------+ - * - * sf: 0 -> 32 bit, 1 -> 64 bit - * opc: 00 -> N, 10 -> Z, 11 -> K - * hw: shift/16 (0,16, and sf only 32, 48) */ -static void disas_movw_imm(DisasContext *s, uint32_t insn) + +static bool trans_MOVZ(DisasContext *s, arg_movw *a) { - int rd = extract32(insn, 0, 5); - uint64_t imm = extract32(insn, 5, 16); - int sf = extract32(insn, 31, 1); - int opc = extract32(insn, 29, 2); - int pos = extract32(insn, 21, 2) << 4; - TCGv_i64 tcg_rd = cpu_reg(s, rd); - - if (!sf && (pos >= 32)) { - unallocated_encoding(s); - return; - } - - switch (opc) { - case 0: /* MOVN */ - case 2: /* MOVZ */ - imm <<= pos; - if (opc == 0) { - imm = ~imm; - } - if (!sf) { - imm &= 0xffffffffu; - } - tcg_gen_movi_i64(tcg_rd, imm); - break; - case 3: /* MOVK */ - tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_constant_i64(imm), pos, 16); - if (!sf) { - tcg_gen_ext32u_i64(tcg_rd, tcg_rd); - } - break; - default: - unallocated_encoding(s); - break; - } + int pos = a->hw << 4; + tcg_gen_movi_i64(cpu_reg(s, a->rd), (uint64_t)a->imm << pos); + return true; } -/* Bitfield - * 31 30 29 28 23 22 21 16 15 10 9 5 4 0 - * +----+-----+-------------+---+------+------+------+------+ - * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd | - * +----+-----+-------------+---+------+------+------+------+ - */ -static void disas_bitfield(DisasContext *s, uint32_t insn) +static bool trans_MOVN(DisasContext *s, arg_movw *a) { - unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len; - TCGv_i64 tcg_rd, tcg_tmp; + int pos = a->hw << 4; + uint64_t imm = a->imm; - sf = extract32(insn, 31, 1); - opc = extract32(insn, 29, 2); - n = extract32(insn, 22, 1); - ri = extract32(insn, 16, 6); - si = extract32(insn, 10, 6); - rn = extract32(insn, 5, 5); - rd = extract32(insn, 0, 5); - bitsize = sf ? 64 : 32; - - if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) { - unallocated_encoding(s); - return; + imm = ~(imm << pos); + if (!a->sf) { + imm = (uint32_t)imm; } + tcg_gen_movi_i64(cpu_reg(s, a->rd), imm); + return true; +} - tcg_rd = cpu_reg(s, rd); +static bool trans_MOVK(DisasContext *s, arg_movw *a) +{ + int pos = a->hw << 4; + TCGv_i64 tcg_rd, tcg_im; - /* Suppress the zero-extend for !sf. Since RI and SI are constrained - to be smaller than bitsize, we'll never reference data outside the - low 32-bits anyway. */ - tcg_tmp = read_cpu_reg(s, rn, 1); + tcg_rd = cpu_reg(s, a->rd); + tcg_im = tcg_constant_i64(a->imm); + tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_im, pos, 16); + if (!a->sf) { + tcg_gen_ext32u_i64(tcg_rd, tcg_rd); + } + return true; +} + +/* + * Bitfield + */ + +static bool trans_SBFM(DisasContext *s, arg_SBFM *a) +{ + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1); + unsigned int bitsize = a->sf ? 64 : 32; + unsigned int ri = a->immr; + unsigned int si = a->imms; + unsigned int pos, len; - /* Recognize simple(r) extractions. */ if (si >= ri) { /* Wd = Wn */ len = (si - ri) + 1; - if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */ - tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len); - goto done; - } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */ - tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len); - return; + tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len); + if (!a->sf) { + tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } - /* opc == 1, BFXIL fall through to deposit */ + } else { + /* Wd<32+s-r,32-r> = Wn */ + len = si + 1; + pos = (bitsize - ri) & (bitsize - 1); + + if (len < ri) { + /* + * Sign extend the destination field from len to fill the + * balance of the word. Let the deposit below insert all + * of those sign bits. + */ + tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len); + len = ri; + } + + /* + * We start with zero, and we haven't modified any bits outside + * bitsize, therefore no final zero-extension is unneeded for !sf. + */ + tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len); + } + return true; +} + +static bool trans_UBFM(DisasContext *s, arg_UBFM *a) +{ + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1); + unsigned int bitsize = a->sf ? 64 : 32; + unsigned int ri = a->immr; + unsigned int si = a->imms; + unsigned int pos, len; + + tcg_rd = cpu_reg(s, a->rd); + tcg_tmp = read_cpu_reg(s, a->rn, 1); + + if (si >= ri) { + /* Wd = Wn */ + len = (si - ri) + 1; + tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len); + } else { + /* Wd<32+s-r,32-r> = Wn */ + len = si + 1; + pos = (bitsize - ri) & (bitsize - 1); + tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len); + } + return true; +} + +static bool trans_BFM(DisasContext *s, arg_BFM *a) +{ + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1); + unsigned int bitsize = a->sf ? 64 : 32; + unsigned int ri = a->immr; + unsigned int si = a->imms; + unsigned int pos, len; + + tcg_rd = cpu_reg(s, a->rd); + tcg_tmp = read_cpu_reg(s, a->rn, 1); + + if (si >= ri) { + /* Wd = Wn */ tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri); + len = (si - ri) + 1; pos = 0; } else { - /* Handle the ri > si case with a deposit - * Wd<32+s-r,32-r> = Wn - */ + /* Wd<32+s-r,32-r> = Wn */ len = si + 1; pos = (bitsize - ri) & (bitsize - 1); } - if (opc == 0 && len < ri) { - /* SBFM: sign extend the destination field from len to fill - the balance of the word. Let the deposit below insert all - of those sign bits. */ - tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len); - len = ri; - } - - if (opc == 1) { /* BFM, BFXIL */ - tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len); - } else { - /* SBFM or UBFM: We start with zero, and we haven't modified - any bits outside bitsize, therefore the zero-extension - below is unneeded. */ - tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len); - return; - } - - done: - if (!sf) { /* zero extend final result */ + tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len); + if (!a->sf) { tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } + return true; } -/* Extract - * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0 - * +----+------+-------------+---+----+------+--------+------+------+ - * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd | - * +----+------+-------------+---+----+------+--------+------+------+ - */ -static void disas_extract(DisasContext *s, uint32_t insn) +static bool trans_EXTR(DisasContext *s, arg_extract *a) { - unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0; + TCGv_i64 tcg_rd, tcg_rm, tcg_rn; - sf = extract32(insn, 31, 1); - n = extract32(insn, 22, 1); - rm = extract32(insn, 16, 5); - imm = extract32(insn, 10, 6); - rn = extract32(insn, 5, 5); - rd = extract32(insn, 0, 5); - op21 = extract32(insn, 29, 2); - op0 = extract32(insn, 21, 1); - bitsize = sf ? 64 : 32; + tcg_rd = cpu_reg(s, a->rd); - if (sf != n || op21 || op0 || imm >= bitsize) { - unallocated_encoding(s); - } else { - TCGv_i64 tcg_rd, tcg_rm, tcg_rn; - - tcg_rd = cpu_reg(s, rd); - - if (unlikely(imm == 0)) { - /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts, - * so an extract from bit 0 is a special case. - */ - if (sf) { - tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm)); - } else { - tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm)); - } + if (unlikely(a->imm == 0)) { + /* + * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts, + * so an extract from bit 0 is a special case. + */ + if (a->sf) { + tcg_gen_mov_i64(tcg_rd, cpu_reg(s, a->rm)); } else { - tcg_rm = cpu_reg(s, rm); - tcg_rn = cpu_reg(s, rn); + tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, a->rm)); + } + } else { + tcg_rm = cpu_reg(s, a->rm); + tcg_rn = cpu_reg(s, a->rn); - if (sf) { - /* Specialization to ROR happens in EXTRACT2. */ - tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm); + if (a->sf) { + /* Specialization to ROR happens in EXTRACT2. */ + tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, a->imm); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(); + + tcg_gen_extrl_i64_i32(t0, tcg_rm); + if (a->rm == a->rn) { + tcg_gen_rotri_i32(t0, t0, a->imm); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); - - tcg_gen_extrl_i64_i32(t0, tcg_rm); - if (rm == rn) { - tcg_gen_rotri_i32(t0, t0, imm); - } else { - TCGv_i32 t1 = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(t1, tcg_rn); - tcg_gen_extract2_i32(t0, t0, t1, imm); - tcg_temp_free_i32(t1); - } - tcg_gen_extu_i32_i64(tcg_rd, t0); - tcg_temp_free_i32(t0); + TCGv_i32 t1 = tcg_temp_new_i32(); + tcg_gen_extrl_i64_i32(t1, tcg_rn); + tcg_gen_extract2_i32(t0, t0, t1, a->imm); } + tcg_gen_extu_i32_i64(tcg_rd, t0); } } -} - -/* Data processing - immediate */ -static void disas_data_proc_imm(DisasContext *s, uint32_t insn) -{ - switch (extract32(insn, 23, 6)) { - case 0x20: case 0x21: /* PC-rel. addressing */ - disas_pc_rel_adr(s, insn); - break; - case 0x22: /* Add/subtract (immediate) */ - disas_add_sub_imm(s, insn); - break; - case 0x23: /* Add/subtract (immediate, with tags) */ - disas_add_sub_imm_with_tags(s, insn); - break; - case 0x24: /* Logical (immediate) */ - disas_logic_imm(s, insn); - break; - case 0x25: /* Move wide (immediate) */ - disas_movw_imm(s, insn); - break; - case 0x26: /* Bitfield */ - disas_bitfield(s, insn); - break; - case 0x27: /* Extract */ - disas_extract(s, insn); - break; - default: - unallocated_encoding(s); - break; - } + return true; } /* Shift a TCGv src by TCGv shift_amount, put result in dst. @@ -4833,8 +4552,6 @@ static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf, tcg_gen_extrl_i64_i32(t1, shift_amount); tcg_gen_rotr_i32(t0, t0, t1); tcg_gen_extu_i32_i64(dst, t0); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); } break; default: @@ -5030,8 +4747,6 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) } else { tcg_gen_ext32u_i64(tcg_rd, tcg_result); } - - tcg_temp_free_i64(tcg_result); } /* @@ -5101,8 +4816,6 @@ static void disas_add_sub_reg(DisasContext *s, uint32_t insn) } else { tcg_gen_ext32u_i64(tcg_rd, tcg_result); } - - tcg_temp_free_i64(tcg_result); } /* Data-processing (3 source) @@ -5160,8 +4873,6 @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) } else { tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm); } - - tcg_temp_free_i64(low_bits); return; } @@ -5197,10 +4908,6 @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) if (!sf) { tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd)); } - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_tmp); } /* Add/subtract (with carry) @@ -5226,7 +4933,7 @@ static void disas_adc_sbc(DisasContext *s, uint32_t insn) tcg_rn = cpu_reg(s, rn); if (op) { - tcg_y = new_tmp_a64(s); + tcg_y = tcg_temp_new_i64(); tcg_gen_not_i64(tcg_y, cpu_reg(s, rm)); } else { tcg_y = cpu_reg(s, rm); @@ -5280,8 +4987,6 @@ static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn) if (mask & 1) { /* V */ tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0); } - - tcg_temp_free_i32(nzcv); } /* @@ -5314,7 +5019,6 @@ static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn) tcg_gen_shli_i32(cpu_VF, tmp, shift - 1); tcg_gen_mov_i32(cpu_ZF, cpu_NF); tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF); - tcg_temp_free_i32(tmp); } /* Conditional compare (immediate / register) @@ -5351,11 +5055,10 @@ static void disas_cc(DisasContext *s, uint32_t insn) tcg_t0 = tcg_temp_new_i32(); arm_test_cc(&c, cond); tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0); - arm_free_cc(&c); /* Load the arguments for the new comparison. */ if (is_imm) { - tcg_y = new_tmp_a64(s); + tcg_y = tcg_temp_new_i64(); tcg_gen_movi_i64(tcg_y, y); } else { tcg_y = cpu_reg(s, y); @@ -5375,7 +5078,6 @@ static void disas_cc(DisasContext *s, uint32_t insn) } else { gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y); } - tcg_temp_free_i64(tcg_tmp); /* If COND was false, force the flags to #nzcv. Compute two masks * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0). @@ -5423,9 +5125,6 @@ static void disas_cc(DisasContext *s, uint32_t insn) tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2); } } - tcg_temp_free_i32(tcg_t0); - tcg_temp_free_i32(tcg_t1); - tcg_temp_free_i32(tcg_t2); } /* Conditional select @@ -5477,8 +5176,6 @@ static void disas_cond_select(DisasContext *s, uint32_t insn) tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false); } - a64_free_cc(&c); - if (!sf) { tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } @@ -5498,7 +5195,6 @@ static void handle_clz(DisasContext *s, unsigned int sf, tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32); tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); - tcg_temp_free_i32(tcg_tmp32); } } @@ -5516,7 +5212,6 @@ static void handle_cls(DisasContext *s, unsigned int sf, tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); - tcg_temp_free_i32(tcg_tmp32); } } @@ -5534,7 +5229,6 @@ static void handle_rbit(DisasContext *s, unsigned int sf, tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); gen_helper_rbit(tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); - tcg_temp_free_i32(tcg_tmp32); } } @@ -5580,8 +5274,6 @@ static void handle_rev16(DisasContext *s, unsigned int sf, tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask); tcg_gen_shli_i64(tcg_rd, tcg_rd, 8); tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp); - - tcg_temp_free_i64(tcg_tmp); } /* Data-processing (1 source) @@ -5701,7 +5393,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x09): /* PACIZB */ @@ -5709,7 +5401,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0a): /* PACDZA */ @@ -5717,7 +5409,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0b): /* PACDZB */ @@ -5725,7 +5417,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0c): /* AUTIZA */ @@ -5733,7 +5425,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_autia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0d): /* AUTIZB */ @@ -5741,7 +5433,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_autib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0e): /* AUTDZA */ @@ -5749,7 +5441,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_autda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0f): /* AUTDZB */ @@ -5757,7 +5449,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x10): /* XPACI */ @@ -5792,8 +5484,8 @@ static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, tcg_rd = cpu_reg(s, rd); if (!sf && is_signed) { - tcg_n = new_tmp_a64(s); - tcg_m = new_tmp_a64(s); + tcg_n = tcg_temp_new_i64(); + tcg_m = tcg_temp_new_i64(); tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn)); tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm)); } else { @@ -5823,7 +5515,6 @@ static void handle_shift_reg(DisasContext *s, tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31); shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift); - tcg_temp_free_i64(tcg_shift); } /* CRC32[BHWX], CRC32C[BHWX] */ @@ -5858,7 +5549,7 @@ static void handle_crc32(DisasContext *s, default: g_assert_not_reached(); } - tcg_val = new_tmp_a64(s); + tcg_val = tcg_temp_new_i64(); tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask); } @@ -5940,8 +5631,6 @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn) tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4); tcg_gen_shl_i64(t, tcg_constant_i64(1), t); tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t); - - tcg_temp_free_i64(t); } break; case 8: /* LSLV */ @@ -6085,8 +5774,6 @@ static void handle_fp_compare(DisasContext *s, int size, } else { gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst); } - tcg_temp_free_i64(tcg_vn); - tcg_temp_free_i64(tcg_vm); } else { TCGv_i32 tcg_vn = tcg_temp_new_i32(); TCGv_i32 tcg_vm = tcg_temp_new_i32(); @@ -6116,16 +5803,9 @@ static void handle_fp_compare(DisasContext *s, int size, default: g_assert_not_reached(); } - - tcg_temp_free_i32(tcg_vn); - tcg_temp_free_i32(tcg_vm); } - tcg_temp_free_ptr(fpst); - gen_set_nzcv(tcg_flags); - - tcg_temp_free_i64(tcg_flags); } /* Floating point compare @@ -6297,13 +5977,10 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) a64_test_cc(&c, cond); tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0), t_true, t_false); - tcg_temp_free_i64(t_false); - a64_free_cc(&c); /* Note that sregs & hregs write back zeros to the high bits, and we've already done the zero-extension. */ write_fp_dreg(s, rd, t_true); - tcg_temp_free_i64(t_true); } /* Floating-point data-processing (1 source) - half precision */ @@ -6333,14 +6010,12 @@ static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) case 0xb: /* FRINTZ */ case 0xc: /* FRINTA */ { - TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7)); + TCGv_i32 tcg_rmode; + fpst = fpstatus_ptr(FPST_FPCR_F16); - - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + tcg_rmode = gen_set_rmode(opcode & 7, fpst); gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst); - - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); + gen_restore_rmode(tcg_rmode, fpst); break; } case 0xe: /* FRINTX */ @@ -6356,12 +6031,6 @@ static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) } write_fp_sreg(s, rd, tcg_res); - - if (fpst) { - tcg_temp_free_ptr(fpst); - } - tcg_temp_free_i32(tcg_op); - tcg_temp_free_i32(tcg_res); } /* Floating-point data-processing (1 source) - single precision */ @@ -6396,7 +6065,7 @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) case 0xa: /* FRINTM */ case 0xb: /* FRINTZ */ case 0xc: /* FRINTA */ - rmode = arm_rmode_to_sf(opcode & 7); + rmode = opcode & 7; gen_fpst = gen_helper_rints; break; case 0xe: /* FRINTX */ @@ -6406,14 +6075,14 @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) gen_fpst = gen_helper_rints; break; case 0x10: /* FRINT32Z */ - rmode = float_round_to_zero; + rmode = FPROUNDING_ZERO; gen_fpst = gen_helper_frint32_s; break; case 0x11: /* FRINT32X */ gen_fpst = gen_helper_frint32_s; break; case 0x12: /* FRINT64Z */ - rmode = float_round_to_zero; + rmode = FPROUNDING_ZERO; gen_fpst = gen_helper_frint64_s; break; case 0x13: /* FRINT64X */ @@ -6425,20 +6094,15 @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) fpst = fpstatus_ptr(FPST_FPCR); if (rmode >= 0) { - TCGv_i32 tcg_rmode = tcg_const_i32(rmode); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst); gen_fpst(tcg_res, tcg_op, fpst); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); + gen_restore_rmode(tcg_rmode, fpst); } else { gen_fpst(tcg_res, tcg_op, fpst); } - tcg_temp_free_ptr(fpst); done: write_fp_sreg(s, rd, tcg_res); - tcg_temp_free_i32(tcg_op); - tcg_temp_free_i32(tcg_res); } /* Floating-point data-processing (1 source) - double precision */ @@ -6473,7 +6137,7 @@ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) case 0xa: /* FRINTM */ case 0xb: /* FRINTZ */ case 0xc: /* FRINTA */ - rmode = arm_rmode_to_sf(opcode & 7); + rmode = opcode & 7; gen_fpst = gen_helper_rintd; break; case 0xe: /* FRINTX */ @@ -6483,14 +6147,14 @@ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) gen_fpst = gen_helper_rintd; break; case 0x10: /* FRINT32Z */ - rmode = float_round_to_zero; + rmode = FPROUNDING_ZERO; gen_fpst = gen_helper_frint32_d; break; case 0x11: /* FRINT32X */ gen_fpst = gen_helper_frint32_d; break; case 0x12: /* FRINT64Z */ - rmode = float_round_to_zero; + rmode = FPROUNDING_ZERO; gen_fpst = gen_helper_frint64_d; break; case 0x13: /* FRINT64X */ @@ -6502,20 +6166,15 @@ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) fpst = fpstatus_ptr(FPST_FPCR); if (rmode >= 0) { - TCGv_i32 tcg_rmode = tcg_const_i32(rmode); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst); gen_fpst(tcg_res, tcg_op, fpst); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); + gen_restore_rmode(tcg_rmode, fpst); } else { gen_fpst(tcg_res, tcg_op, fpst); } - tcg_temp_free_ptr(fpst); done: write_fp_dreg(s, rd, tcg_res); - tcg_temp_free_i64(tcg_op); - tcg_temp_free_i64(tcg_res); } static void handle_fp_fcvt(DisasContext *s, int opcode, @@ -6530,7 +6189,6 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, TCGv_i64 tcg_rd = tcg_temp_new_i64(); gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env); write_fp_dreg(s, rd, tcg_rd); - tcg_temp_free_i64(tcg_rd); } else { /* Single to half */ TCGv_i32 tcg_rd = tcg_temp_new_i32(); @@ -6540,11 +6198,7 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp); /* write_fp_sreg is OK here because top half of tcg_rd is zero */ write_fp_sreg(s, rd, tcg_rd); - tcg_temp_free_i32(tcg_rd); - tcg_temp_free_i32(ahp); - tcg_temp_free_ptr(fpst); } - tcg_temp_free_i32(tcg_rn); break; } case 0x1: @@ -6560,12 +6214,8 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, /* Double to half */ gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp); /* write_fp_sreg is OK here because top half of tcg_rd is zero */ - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(ahp); } write_fp_sreg(s, rd, tcg_rd); - tcg_temp_free_i32(tcg_rd); - tcg_temp_free_i64(tcg_rn); break; } case 0x3: @@ -6579,17 +6229,12 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, TCGv_i32 tcg_rd = tcg_temp_new_i32(); gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); write_fp_sreg(s, rd, tcg_rd); - tcg_temp_free_i32(tcg_rd); } else { /* Half to double */ TCGv_i64 tcg_rd = tcg_temp_new_i64(); gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); write_fp_dreg(s, rd, tcg_rd); - tcg_temp_free_i64(tcg_rd); } - tcg_temp_free_i32(tcg_rn); - tcg_temp_free_ptr(tcg_fpst); - tcg_temp_free_i32(tcg_ahp); break; } default: @@ -6737,11 +6382,6 @@ static void handle_fp_2src_single(DisasContext *s, int opcode, } write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_res); } /* Floating-point data-processing (2 source) - double precision */ @@ -6790,11 +6430,6 @@ static void handle_fp_2src_double(DisasContext *s, int opcode, } write_fp_dreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_res); } /* Floating-point data-processing (2 source) - half precision */ @@ -6845,11 +6480,6 @@ static void handle_fp_2src_half(DisasContext *s, int opcode, } write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_res); } /* Floating point data-processing (2 source) @@ -6930,12 +6560,6 @@ static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1, gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_op3); - tcg_temp_free_i32(tcg_res); } /* Floating-point data-processing (3 source) - double precision */ @@ -6968,12 +6592,6 @@ static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1, gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); write_fp_dreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_op3); - tcg_temp_free_i64(tcg_res); } /* Floating-point data-processing (3 source) - half precision */ @@ -7006,12 +6624,6 @@ static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1, gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_op3); - tcg_temp_free_i32(tcg_res); } /* Floating point data-processing (3 source) @@ -7131,7 +6743,7 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, if (itof) { TCGv_i64 tcg_int = cpu_reg(s, rn); if (!sf) { - TCGv_i64 tcg_extend = new_tmp_a64(s); + TCGv_i64 tcg_extend = tcg_temp_new_i64(); if (is_signed) { tcg_gen_ext32s_i64(tcg_extend, tcg_int); @@ -7153,7 +6765,6 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } write_fp_dreg(s, rd, tcg_double); - tcg_temp_free_i64(tcg_double); break; case 0: /* float32 */ @@ -7166,7 +6777,6 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } write_fp_sreg(s, rd, tcg_single); - tcg_temp_free_i32(tcg_single); break; case 3: /* float16 */ @@ -7179,7 +6789,6 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } write_fp_sreg(s, rd, tcg_single); - tcg_temp_free_i32(tcg_single); break; default: @@ -7196,9 +6805,7 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, rmode = FPROUNDING_TIEAWAY; } - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); - - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); + tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); switch (type) { case 1: /* float64 */ @@ -7223,7 +6830,6 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, if (!sf) { tcg_gen_ext32u_i64(tcg_int, tcg_int); } - tcg_temp_free_i64(tcg_double); break; case 0: /* float32 */ @@ -7246,9 +6852,7 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } tcg_gen_extu_i32_i64(tcg_int, tcg_dest); - tcg_temp_free_i32(tcg_dest); } - tcg_temp_free_i32(tcg_single); break; case 3: /* float16 */ @@ -7271,20 +6875,15 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } tcg_gen_extu_i32_i64(tcg_int, tcg_dest); - tcg_temp_free_i32(tcg_dest); } - tcg_temp_free_i32(tcg_single); break; default: g_assert_not_reached(); } - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); - tcg_temp_free_i32(tcg_rmode); + gen_restore_rmode(tcg_rmode, tcg_fpstatus); } - - tcg_temp_free_ptr(tcg_fpstatus); } /* Floating point <-> fixed point conversions @@ -7361,7 +6960,6 @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) tmp = tcg_temp_new_i64(); tcg_gen_ext32u_i64(tmp, tcg_rn); write_fp_dreg(s, rd, tmp); - tcg_temp_free_i64(tmp); break; case 1: /* 64 bit */ @@ -7377,7 +6975,6 @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) tmp = tcg_temp_new_i64(); tcg_gen_ext16u_i64(tmp, tcg_rn); write_fp_dreg(s, rd, tmp); - tcg_temp_free_i64(tmp); break; default: g_assert_not_reached(); @@ -7415,15 +7012,11 @@ static void handle_fjcvtzs(DisasContext *s, int rd, int rn) gen_helper_fjcvtzs(t, t, fpstatus); - tcg_temp_free_ptr(fpstatus); - tcg_gen_ext32u_i64(cpu_reg(s, rd), t); tcg_gen_extrh_i64_i32(cpu_ZF, t); tcg_gen_movi_i32(cpu_CF, 0); tcg_gen_movi_i32(cpu_NF, 0); tcg_gen_movi_i32(cpu_VF, 0); - - tcg_temp_free_i64(t); } /* Floating point <-> integer conversions @@ -7588,8 +7181,6 @@ static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right, tcg_gen_shri_i64(tcg_right, tcg_right, pos); tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos); tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp); - - tcg_temp_free_i64(tcg_tmp); } /* EXT @@ -7654,16 +7245,13 @@ static void disas_simd_ext(DisasContext *s, uint32_t insn) tcg_hh = tcg_temp_new_i64(); read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64); do_ext64(s, tcg_hh, tcg_resh, pos); - tcg_temp_free_i64(tcg_hh); } } write_vec_element(s, tcg_resl, rd, 0, MO_64); - tcg_temp_free_i64(tcg_resl); if (is_q) { write_vec_element(s, tcg_resh, rd, 1, MO_64); } - tcg_temp_free_i64(tcg_resh); clear_vec_high(s, is_q, rd); } @@ -7718,10 +7306,10 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) bool part = extract32(insn, 14, 1); bool is_q = extract32(insn, 30, 1); int esize = 8 << size; - int i, ofs; + int i; int datasize = is_q ? 128 : 64; int elements = datasize / esize; - TCGv_i64 tcg_res, tcg_resl, tcg_resh; + TCGv_i64 tcg_res[2], tcg_ele; if (opcode == 0 || (size == 3 && !is_q)) { unallocated_encoding(s); @@ -7732,37 +7320,39 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) return; } - tcg_resl = tcg_const_i64(0); - tcg_resh = is_q ? tcg_const_i64(0) : NULL; - tcg_res = tcg_temp_new_i64(); + tcg_res[0] = tcg_temp_new_i64(); + tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL; + tcg_ele = tcg_temp_new_i64(); for (i = 0; i < elements; i++) { + int o, w; + switch (opcode) { case 1: /* UZP1/2 */ { int midpoint = elements / 2; if (i < midpoint) { - read_vec_element(s, tcg_res, rn, 2 * i + part, size); + read_vec_element(s, tcg_ele, rn, 2 * i + part, size); } else { - read_vec_element(s, tcg_res, rm, + read_vec_element(s, tcg_ele, rm, 2 * (i - midpoint) + part, size); } break; } case 2: /* TRN1/2 */ if (i & 1) { - read_vec_element(s, tcg_res, rm, (i & ~1) + part, size); + read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size); } else { - read_vec_element(s, tcg_res, rn, (i & ~1) + part, size); + read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size); } break; case 3: /* ZIP1/2 */ { int base = part * elements / 2; if (i & 1) { - read_vec_element(s, tcg_res, rm, base + (i >> 1), size); + read_vec_element(s, tcg_ele, rm, base + (i >> 1), size); } else { - read_vec_element(s, tcg_res, rn, base + (i >> 1), size); + read_vec_element(s, tcg_ele, rn, base + (i >> 1), size); } break; } @@ -7770,24 +7360,18 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) g_assert_not_reached(); } - ofs = i * esize; - if (ofs < 64) { - tcg_gen_shli_i64(tcg_res, tcg_res, ofs); - tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res); + w = (i * esize) / 64; + o = (i * esize) % 64; + if (o == 0) { + tcg_gen_mov_i64(tcg_res[w], tcg_ele); } else { - tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64); - tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res); + tcg_gen_shli_i64(tcg_ele, tcg_ele, o); + tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele); } } - tcg_temp_free_i64(tcg_res); - - write_vec_element(s, tcg_resl, rd, 0, MO_64); - tcg_temp_free_i64(tcg_resl); - - if (is_q) { - write_vec_element(s, tcg_resh, rd, 1, MO_64); - tcg_temp_free_i64(tcg_resh); + for (i = 0; i <= is_q; ++i) { + write_vec_element(s, tcg_res[i], rd, i, MO_64); } clear_vec_high(s, is_q, rd); } @@ -7857,9 +7441,6 @@ static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, default: g_assert_not_reached(); } - - tcg_temp_free_i32(tcg_hi); - tcg_temp_free_i32(tcg_lo); return tcg_res; } } @@ -7987,12 +7568,8 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize, (is_q ? 128 : 64), vmap, fpst); tcg_gen_extu_i32_i64(tcg_res, tcg_res32); - tcg_temp_free_i32(tcg_res32); - tcg_temp_free_ptr(fpst); } - tcg_temp_free_i64(tcg_elt); - /* Now truncate the result to the width required for the final output */ if (opcode == 0x03) { /* SADDLV, UADDLV: result is 2*esize */ @@ -8016,7 +7593,6 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) } write_fp_dreg(s, rd, tcg_res); - tcg_temp_free_i64(tcg_res); } /* DUP (Element, Vector) @@ -8079,7 +7655,6 @@ static void handle_simd_dupes(DisasContext *s, int rd, int rn, tmp = tcg_temp_new_i64(); read_vec_element(s, tmp, rn, index, size); write_fp_dreg(s, rd, tmp); - tcg_temp_free_i64(tmp); } /* DUP (General) @@ -8147,8 +7722,6 @@ static void handle_simd_inse(DisasContext *s, int rd, int rn, read_vec_element(s, tmp, rn, src_index, size); write_vec_element(s, tmp, rd, dst_index, size); - tcg_temp_free_i64(tmp); - /* INS is considered a 128-bit write for SVE. */ clear_vec_high(s, true, rd); } @@ -8459,10 +8032,6 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) } write_fp_dreg(s, rd, tcg_res); - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_res); } else { TCGv_i32 tcg_op1 = tcg_temp_new_i32(); TCGv_i32 tcg_op2 = tcg_temp_new_i32(); @@ -8514,14 +8083,6 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) } write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_res); - } - - if (fpst) { - tcg_temp_free_ptr(fpst); } } @@ -8606,10 +8167,6 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src, } else { tcg_gen_mov_i64(tcg_res, tcg_src); } - - if (extended_result) { - tcg_temp_free_i64(tcg_src_hi); - } } /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */ @@ -8675,9 +8232,6 @@ static void handle_scalar_simd_shri(DisasContext *s, } write_fp_dreg(s, rd, tcg_rd); - - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rd); } /* SHL/SLI - Scalar shift left */ @@ -8710,9 +8264,6 @@ static void handle_scalar_simd_shli(DisasContext *s, bool insert, } write_fp_dreg(s, rd, tcg_rd); - - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rd); } /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with @@ -8772,7 +8323,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, tcg_rn = tcg_temp_new_i64(); tcg_rd = tcg_temp_new_i64(); tcg_rd_narrowed = tcg_temp_new_i32(); - tcg_final = tcg_const_i64(0); + tcg_final = tcg_temp_new_i64(); if (round) { tcg_round = tcg_constant_i64(1ULL << (shift - 1)); @@ -8786,7 +8337,11 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, false, is_u_shift, size+1, shift); narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd); tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed); - tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); + if (i == 0) { + tcg_gen_mov_i64(tcg_final, tcg_rd); + } else { + tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); + } } if (!is_q) { @@ -8794,12 +8349,6 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, } else { write_vec_element(s, tcg_final, rd, 1, MO_64); } - - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rd); - tcg_temp_free_i32(tcg_rd_narrowed); - tcg_temp_free_i64(tcg_final); - clear_vec_high(s, is_q, rd); } @@ -8860,8 +8409,6 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, read_vec_element(s, tcg_op, rn, pass, MO_64); genfn(tcg_op, cpu_env, tcg_op, tcg_shift); write_vec_element(s, tcg_op, rd, pass, MO_64); - - tcg_temp_free_i64(tcg_op); } clear_vec_high(s, is_q, rd); } else { @@ -8907,8 +8454,6 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, } else { write_vec_element_i32(s, tcg_op, rd, pass, MO_32); } - - tcg_temp_free_i32(tcg_op); } if (!scalar) { @@ -8952,10 +8497,6 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, write_vec_element(s, tcg_double, rd, pass, MO_64); } } - - tcg_temp_free_i64(tcg_int64); - tcg_temp_free_i64(tcg_double); - } else { TCGv_i32 tcg_int32 = tcg_temp_new_i32(); TCGv_i32 tcg_float = tcg_temp_new_i32(); @@ -9008,13 +8549,8 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, write_vec_element_i32(s, tcg_float, rd, pass, size); } } - - tcg_temp_free_i32(tcg_int32); - tcg_temp_free_i32(tcg_float); } - tcg_temp_free_ptr(tcg_fpst); - clear_vec_high(s, elements << size == 16, rd); } @@ -9099,9 +8635,8 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, assert(!(is_scalar && is_q)); - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO)); tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); + tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus); fracbits = (16 << size) - immhb; tcg_shift = tcg_constant_i32(fracbits); @@ -9118,7 +8653,6 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); } write_vec_element(s, tcg_op, rd, pass, MO_64); - tcg_temp_free_i64(tcg_op); } clear_vec_high(s, is_q, rd); } else { @@ -9154,16 +8688,13 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, } else { write_vec_element_i32(s, tcg_op, rd, pass, size); } - tcg_temp_free_i32(tcg_op); } if (!is_scalar) { clear_vec_high(s, is_q, rd); } } - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); - tcg_temp_free_ptr(tcg_fpstatus); - tcg_temp_free_i32(tcg_rmode); + gen_restore_rmode(tcg_rmode, tcg_fpstatus); } /* AdvSIMD scalar shift by immediate @@ -9306,10 +8837,6 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) } write_fp_dreg(s, rd, tcg_res); - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_res); } else { TCGv_i32 tcg_op1 = read_fp_hreg(s, rn); TCGv_i32 tcg_op2 = read_fp_hreg(s, rm); @@ -9330,7 +8857,6 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) read_vec_element(s, tcg_op3, rd, 0, MO_32); gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_op3); - tcg_temp_free_i64(tcg_op3); break; } default: @@ -9339,10 +8865,6 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) tcg_gen_ext32u_i64(tcg_res, tcg_res); write_fp_dreg(s, rd, tcg_res); - - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i64(tcg_res); } } @@ -9517,10 +9039,6 @@ static void handle_3same_float(DisasContext *s, int size, int elements, } write_vec_element(s, tcg_res, rd, pass, MO_64); - - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); } else { /* Single */ TCGv_i32 tcg_op1 = tcg_temp_new_i32(); @@ -9602,19 +9120,12 @@ static void handle_3same_float(DisasContext *s, int size, int elements, tcg_gen_extu_i32_i64(tcg_tmp, tcg_res); write_vec_element(s, tcg_tmp, rd, pass, MO_64); - tcg_temp_free_i64(tcg_tmp); } else { write_vec_element_i32(s, tcg_res, rd, pass, MO_32); } - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); } } - tcg_temp_free_ptr(fpst); - clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd); } @@ -9700,8 +9211,6 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn) TCGv_i64 tcg_rm = read_fp_dreg(s, rm); handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm); - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rm); } else { /* Do a single operation on the lowest element in the vector. * We use the standard Neon helpers and rely on 0 OP 0 == 0 with @@ -9774,14 +9283,9 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn) genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm); tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32); - tcg_temp_free_i32(tcg_rd32); - tcg_temp_free_i32(tcg_rn); - tcg_temp_free_i32(tcg_rm); } write_fp_dreg(s, rd, tcg_rd); - - tcg_temp_free_i64(tcg_rd); } /* AdvSIMD scalar three same FP16 @@ -9871,12 +9375,6 @@ static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s, } write_fp_sreg(s, rd, tcg_res); - - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_ptr(fpst); } /* AdvSIMD scalar three same extra @@ -9951,15 +9449,10 @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s, default: g_assert_not_reached(); } - tcg_temp_free_i32(ele1); - tcg_temp_free_i32(ele2); res = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(res, ele3); - tcg_temp_free_i32(ele3); - write_fp_dreg(s, rd, res); - tcg_temp_free_i64(res); } static void handle_2misc_64(DisasContext *s, int opcode, bool u, @@ -10115,8 +9608,6 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, } write_vec_element(s, tcg_res, rd, pass, MO_64); } - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_op); clear_vec_high(s, !is_scalar, rd); } else { @@ -10189,14 +9680,11 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, write_vec_element_i32(s, tcg_res, rd, pass, size); } } - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op); + if (!is_scalar) { clear_vec_high(s, is_q, rd); } } - - tcg_temp_free_ptr(fpst); } static void handle_2misc_reciprocal(DisasContext *s, int opcode, @@ -10228,8 +9716,6 @@ static void handle_2misc_reciprocal(DisasContext *s, int opcode, } write_vec_element(s, tcg_res, rd, pass, MO_64); } - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_op); clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_op = tcg_temp_new_i32(); @@ -10268,13 +9754,10 @@ static void handle_2misc_reciprocal(DisasContext *s, int opcode, write_vec_element_i32(s, tcg_res, rd, pass, MO_32); } } - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op); if (!is_scalar) { clear_vec_high(s, is_q, rd); } } - tcg_temp_free_ptr(fpst); } static void handle_2misc_narrow(DisasContext *s, bool scalar, @@ -10352,17 +9835,12 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp); gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp); tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16); - tcg_temp_free_i32(tcg_lo); - tcg_temp_free_i32(tcg_hi); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(ahp); } break; case 0x36: /* BFCVTN, BFCVTN2 */ { TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst); - tcg_temp_free_ptr(fpst); } break; case 0x56: /* FCVTXN, FCVTXN2 */ @@ -10381,13 +9859,10 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, } else if (genenvfn) { genenvfn(tcg_res[pass], cpu_env, tcg_op); } - - tcg_temp_free_i64(tcg_op); } for (pass = 0; pass < 2; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32); - tcg_temp_free_i32(tcg_res[pass]); } clear_vec_high(s, is_q, rd); } @@ -10414,8 +9889,6 @@ static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u, } write_vec_element(s, tcg_rd, rd, pass, MO_64); } - tcg_temp_free_i64(tcg_rd); - tcg_temp_free_i64(tcg_rn); clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_rn = tcg_temp_new_i32(); @@ -10472,8 +9945,6 @@ static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u, } write_vec_element_i32(s, tcg_rd, rd, pass, MO_32); } - tcg_temp_free_i32(tcg_rd); - tcg_temp_free_i32(tcg_rn); clear_vec_high(s, is_q, rd); } } @@ -10611,12 +10082,11 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) } if (is_fcvt) { - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); tcg_fpstatus = fpstatus_ptr(FPST_FPCR); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); + tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); } else { - tcg_rmode = NULL; tcg_fpstatus = NULL; + tcg_rmode = NULL; } if (size == 3) { @@ -10625,8 +10095,6 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus); write_fp_dreg(s, rd, tcg_rd); - tcg_temp_free_i64(tcg_rd); - tcg_temp_free_i64(tcg_rn); } else { TCGv_i32 tcg_rn = tcg_temp_new_i32(); TCGv_i32 tcg_rd = tcg_temp_new_i32(); @@ -10667,14 +10135,10 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) } write_fp_sreg(s, rd, tcg_rd); - tcg_temp_free_i32(tcg_rd); - tcg_temp_free_i32(tcg_rn); } if (is_fcvt) { - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); - tcg_temp_free_i32(tcg_rmode); - tcg_temp_free_ptr(tcg_fpstatus); + gen_restore_rmode(tcg_rmode, tcg_fpstatus); } } @@ -10776,8 +10240,8 @@ static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u, int dsize = 64; int esize = 8 << size; int elements = dsize/esize; - TCGv_i64 tcg_rn = new_tmp_a64(s); - TCGv_i64 tcg_rd = new_tmp_a64(s); + TCGv_i64 tcg_rn = tcg_temp_new_i64(); + TCGv_i64 tcg_rd = tcg_temp_new_i64(); int i; if (size >= 3) { @@ -10851,9 +10315,6 @@ static void handle_vec_simd_shrn(DisasContext *s, bool is_q, } else { write_vec_element(s, tcg_final, rd, 1, MO_64); } - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rd); - tcg_temp_free_i64(tcg_final); clear_vec_high(s, is_q, rd); } @@ -11024,8 +10485,6 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE, tcg_passres, tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2); - tcg_temp_free_i64(tcg_tmp1); - tcg_temp_free_i64(tcg_tmp2); break; } case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ @@ -11056,13 +10515,6 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, } else if (accop < 0) { tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres); } - - if (accop != 0) { - tcg_temp_free_i64(tcg_passres); - } - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); } } else { /* size 0 or 1, generally helper functions */ @@ -11096,7 +10548,6 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, widenfn(tcg_passres, tcg_op1); gen_neon_addl(size, (opcode == 2), tcg_passres, tcg_passres, tcg_op2_64); - tcg_temp_free_i64(tcg_op2_64); break; } case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ @@ -11143,8 +10594,6 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, default: g_assert_not_reached(); } - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); if (accop != 0) { if (opcode == 9 || opcode == 11) { @@ -11159,15 +10608,12 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, gen_neon_addl(size, (accop < 0), tcg_res[pass], tcg_res[pass], tcg_passres); } - tcg_temp_free_i64(tcg_passres); } } } write_vec_element(s, tcg_res[0], rd, 0, MO_64); write_vec_element(s, tcg_res[1], rd, 1, MO_64); - tcg_temp_free_i64(tcg_res[0]); - tcg_temp_free_i64(tcg_res[1]); } static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size, @@ -11191,17 +10637,13 @@ static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size, read_vec_element(s, tcg_op1, rn, pass, MO_64); read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32); widenfn(tcg_op2_wide, tcg_op2); - tcg_temp_free_i32(tcg_op2); tcg_res[pass] = tcg_temp_new_i64(); gen_neon_addl(size, (opcode == 3), tcg_res[pass], tcg_op1, tcg_op2_wide); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2_wide); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } @@ -11236,17 +10678,12 @@ static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size, gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_res[pass] = tcg_temp_new_i32(); gennarrow(tcg_res[pass], tcg_wideres); - tcg_temp_free_i64(tcg_wideres); } for (pass = 0; pass < 2; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32); - tcg_temp_free_i32(tcg_res[pass]); } clear_vec_high(s, is_q, rd); } @@ -11473,14 +10910,10 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, default: g_assert_not_reached(); } - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } else { int maxpass = is_q ? 4 : 2; @@ -11552,21 +10985,13 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, if (genfn) { genfn(tcg_res[pass], tcg_op1, tcg_op2); } - - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); } for (pass = 0; pass < maxpass; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); - tcg_temp_free_i32(tcg_res[pass]); } clear_vec_high(s, is_q, rd); } - - if (fpst) { - tcg_temp_free_ptr(fpst); - } } /* Floating point op subgroup of C3.6.16. */ @@ -11823,10 +11248,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2); write_vec_element(s, tcg_res, rd, pass, MO_64); - - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); } } else { for (pass = 0; pass < (is_q ? 4 : 2); pass++) { @@ -11911,10 +11332,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) } write_vec_element_i32(s, tcg_res, rd, pass, MO_32); - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); } } clear_vec_high(s, is_q, rd); @@ -12085,12 +11502,7 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn) for (pass = 0; pass < maxpass; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16); - tcg_temp_free_i32(tcg_res[pass]); } - - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - } else { for (pass = 0; pass < elements; pass++) { TCGv_i32 tcg_op1 = tcg_temp_new_i32(); @@ -12170,14 +11582,9 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn) } write_vec_element_i32(s, tcg_res, rd, pass, MO_16); - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); } } - tcg_temp_free_ptr(fpst); - clear_vec_high(s, is_q, rd); } @@ -12389,11 +11796,9 @@ static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32); gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env); - tcg_temp_free_i32(tcg_op); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } else { /* 16 -> 32 bit fp conversion */ @@ -12411,11 +11816,7 @@ static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, } for (pass = 0; pass < 4; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); - tcg_temp_free_i32(tcg_res[pass]); } - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(ahp); } } @@ -12459,7 +11860,6 @@ static void handle_rev(DisasContext *s, int opcode, bool u, g_assert_not_reached(); } write_vec_element(s, tcg_tmp, rd, i, grp_size); - tcg_temp_free_i64(tcg_tmp); } clear_vec_high(s, is_q, rd); } else { @@ -12467,26 +11867,26 @@ static void handle_rev(DisasContext *s, int opcode, bool u, int esize = 8 << size; int elements = dsize / esize; TCGv_i64 tcg_rn = tcg_temp_new_i64(); - TCGv_i64 tcg_rd = tcg_const_i64(0); - TCGv_i64 tcg_rd_hi = tcg_const_i64(0); + TCGv_i64 tcg_rd[2]; + + for (i = 0; i < 2; i++) { + tcg_rd[i] = tcg_temp_new_i64(); + tcg_gen_movi_i64(tcg_rd[i], 0); + } for (i = 0; i < elements; i++) { int e_rev = (i & 0xf) ^ revmask; - int off = e_rev * esize; - read_vec_element(s, tcg_rn, rn, i, size); - if (off >= 64) { - tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi, - tcg_rn, off - 64, esize); - } else { - tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize); - } - } - write_vec_element(s, tcg_rd, rd, 0, MO_64); - write_vec_element(s, tcg_rd_hi, rd, 1, MO_64); + int w = (e_rev * esize) / 64; + int o = (e_rev * esize) % 64; - tcg_temp_free_i64(tcg_rd_hi); - tcg_temp_free_i64(tcg_rd); - tcg_temp_free_i64(tcg_rn); + read_vec_element(s, tcg_rn, rn, i, size); + tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize); + } + + for (i = 0; i < 2; i++) { + write_vec_element(s, tcg_rd[i], rd, i, MO_64); + } + clear_vec_high(s, true, rd); } } @@ -12520,9 +11920,6 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, read_vec_element(s, tcg_op1, rd, pass, MO_64); tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1); } - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); } } else { for (pass = 0; pass < maxpass; pass++) { @@ -12550,7 +11947,6 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, tcg_res[pass], tcg_op); } } - tcg_temp_free_i64(tcg_op); } } if (!is_q) { @@ -12558,7 +11954,6 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } @@ -12582,13 +11977,10 @@ static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd) tcg_res[pass] = tcg_temp_new_i64(); widenfn(tcg_res[pass], tcg_op); tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size); - - tcg_temp_free_i32(tcg_op); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } @@ -12607,7 +11999,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); bool need_fpstatus = false; - bool need_rmode = false; int rmode = -1; TCGv_i32 tcg_rmode; TCGv_ptr tcg_fpstatus; @@ -12757,7 +12148,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) case 0x7a: /* FCVTPU */ case 0x7b: /* FCVTZU */ need_fpstatus = true; - need_rmode = true; rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); if (size == 3 && !is_q) { unallocated_encoding(s); @@ -12767,7 +12157,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) case 0x5c: /* FCVTAU */ case 0x1c: /* FCVTAS */ need_fpstatus = true; - need_rmode = true; rmode = FPROUNDING_TIEAWAY; if (size == 3 && !is_q) { unallocated_encoding(s); @@ -12826,7 +12215,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) case 0x19: /* FRINTM */ case 0x38: /* FRINTP */ case 0x39: /* FRINTZ */ - need_rmode = true; rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); /* fall through */ case 0x59: /* FRINTX */ @@ -12838,7 +12226,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) } break; case 0x58: /* FRINTA */ - need_rmode = true; rmode = FPROUNDING_TIEAWAY; need_fpstatus = true; if (size == 3 && !is_q) { @@ -12854,7 +12241,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) break; case 0x1e: /* FRINT32Z */ case 0x1f: /* FRINT64Z */ - need_rmode = true; rmode = FPROUNDING_ZERO; /* fall through */ case 0x5e: /* FRINT32X */ @@ -12880,14 +12266,13 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) return; } - if (need_fpstatus || need_rmode) { + if (need_fpstatus || rmode >= 0) { tcg_fpstatus = fpstatus_ptr(FPST_FPCR); } else { tcg_fpstatus = NULL; } - if (need_rmode) { - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); + if (rmode >= 0) { + tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); } else { tcg_rmode = NULL; } @@ -12943,9 +12328,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) tcg_rmode, tcg_fpstatus); write_vec_element(s, tcg_res, rd, pass, MO_64); - - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_op); } } else { int pass; @@ -13068,19 +12450,12 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) } write_vec_element_i32(s, tcg_res, rd, pass, MO_32); - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op); } } clear_vec_high(s, is_q, rd); - if (need_rmode) { - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); - tcg_temp_free_i32(tcg_rmode); - } - if (need_fpstatus) { - tcg_temp_free_ptr(tcg_fpstatus); + if (tcg_rmode) { + gen_restore_rmode(tcg_rmode, tcg_fpstatus); } } @@ -13109,9 +12484,8 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) int pass; TCGv_i32 tcg_rmode = NULL; TCGv_ptr tcg_fpstatus = NULL; - bool need_rmode = false; bool need_fpst = true; - int rmode; + int rmode = -1; if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); @@ -13160,27 +12534,22 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) case 0x3f: /* FRECPX */ break; case 0x18: /* FRINTN */ - need_rmode = true; only_in_vector = true; rmode = FPROUNDING_TIEEVEN; break; case 0x19: /* FRINTM */ - need_rmode = true; only_in_vector = true; rmode = FPROUNDING_NEGINF; break; case 0x38: /* FRINTP */ - need_rmode = true; only_in_vector = true; rmode = FPROUNDING_POSINF; break; case 0x39: /* FRINTZ */ - need_rmode = true; only_in_vector = true; rmode = FPROUNDING_ZERO; break; case 0x58: /* FRINTA */ - need_rmode = true; only_in_vector = true; rmode = FPROUNDING_TIEAWAY; break; @@ -13190,43 +12559,33 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) /* current rounding mode */ break; case 0x1a: /* FCVTNS */ - need_rmode = true; rmode = FPROUNDING_TIEEVEN; break; case 0x1b: /* FCVTMS */ - need_rmode = true; rmode = FPROUNDING_NEGINF; break; case 0x1c: /* FCVTAS */ - need_rmode = true; rmode = FPROUNDING_TIEAWAY; break; case 0x3a: /* FCVTPS */ - need_rmode = true; rmode = FPROUNDING_POSINF; break; case 0x3b: /* FCVTZS */ - need_rmode = true; rmode = FPROUNDING_ZERO; break; case 0x5a: /* FCVTNU */ - need_rmode = true; rmode = FPROUNDING_TIEEVEN; break; case 0x5b: /* FCVTMU */ - need_rmode = true; rmode = FPROUNDING_NEGINF; break; case 0x5c: /* FCVTAU */ - need_rmode = true; rmode = FPROUNDING_TIEAWAY; break; case 0x7a: /* FCVTPU */ - need_rmode = true; rmode = FPROUNDING_POSINF; break; case 0x7b: /* FCVTZU */ - need_rmode = true; rmode = FPROUNDING_ZERO; break; case 0x2f: /* FABS */ @@ -13259,13 +12618,12 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) return; } - if (need_rmode || need_fpst) { + if (rmode >= 0 || need_fpst) { tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16); } - if (need_rmode) { - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); + if (rmode >= 0) { + tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); } if (is_scalar) { @@ -13306,9 +12664,6 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) /* limit any sign extension going on */ tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff); write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op); } else { for (pass = 0; pass < (is_q ? 8 : 4); pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(); @@ -13362,21 +12717,13 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) } write_vec_element_i32(s, tcg_res, rd, pass, MO_16); - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op); } clear_vec_high(s, is_q, rd); } if (tcg_rmode) { - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); - tcg_temp_free_i32(tcg_rmode); - } - - if (tcg_fpstatus) { - tcg_temp_free_ptr(tcg_fpstatus); + gen_restore_rmode(tcg_rmode, tcg_fpstatus); } } @@ -13646,7 +12993,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) size == MO_64 ? gen_helper_gvec_fcmlas_idx : gen_helper_gvec_fcmlah_idx); - tcg_temp_free_ptr(fpst); } return; @@ -13751,11 +13097,8 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } write_vec_element(s, tcg_res, rd, pass, MO_64); - tcg_temp_free_i64(tcg_op); - tcg_temp_free_i64(tcg_res); } - tcg_temp_free_i64(tcg_idx); clear_vec_high(s, !is_scalar, rd); } else if (!is_long) { /* 32 bit floating point, or 16 or 32 bit integer. @@ -13929,12 +13272,8 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } else { write_vec_element_i32(s, tcg_res, rd, pass, MO_32); } - - tcg_temp_free_i32(tcg_op); - tcg_temp_free_i32(tcg_res); } - tcg_temp_free_i32(tcg_idx); clear_vec_high(s, is_q, rd); } else { /* long ops: 16x16->32 or 32x32->64 */ @@ -13975,7 +13314,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx); - tcg_temp_free_i64(tcg_op); if (satop) { /* saturating, doubling */ @@ -14008,9 +13346,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) default: g_assert_not_reached(); } - tcg_temp_free_i64(tcg_passres); } - tcg_temp_free_i64(tcg_idx); clear_vec_high(s, !is_scalar, rd); } else { @@ -14056,7 +13392,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env, tcg_passres, tcg_passres); } - tcg_temp_free_i32(tcg_op); if (opcode == 0xa || opcode == 0xb) { continue; @@ -14085,9 +13420,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) default: g_assert_not_reached(); } - tcg_temp_free_i64(tcg_passres); } - tcg_temp_free_i32(tcg_idx); if (is_scalar) { tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]); @@ -14100,13 +13433,8 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } - - if (fpst) { - tcg_temp_free_ptr(fpst); - } } /* Crypto AES @@ -14486,12 +13814,6 @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) } write_vec_element(s, tcg_res[0], rd, 0, MO_64); write_vec_element(s, tcg_res[1], rd, 1, MO_64); - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_op3); - tcg_temp_free_i64(tcg_res[0]); - tcg_temp_free_i64(tcg_res[1]); } else { TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero; @@ -14514,11 +13836,6 @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) write_vec_element_i32(s, tcg_zero, rd, 1, MO_32); write_vec_element_i32(s, tcg_zero, rd, 2, MO_32); write_vec_element_i32(s, tcg_res, rd, 3, MO_32); - - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_op3); - tcg_temp_free_i32(tcg_res); } } @@ -14647,12 +13964,6 @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) } } -/* - * Include the generated SME FA64 decoder. - */ - -#include "decode-sme-fa64.c.inc" - static bool trans_OK(DisasContext *s, arg_OK *a) { return true; @@ -14687,7 +13998,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s) * that the TLB entry must be present and valid, and thus this * access will never raise an exception. */ - flags = probe_access_full(env, addr, MMU_INST_FETCH, mmu_idx, + flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx, false, &host, &full, 0); assert(!(flags & TLB_INVALID_MASK)); @@ -14747,6 +14058,33 @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype) return false; } +/* C3.1 A64 instruction index by encoding */ +static void disas_a64_legacy(DisasContext *s, uint32_t insn) +{ + switch (extract32(insn, 25, 4)) { + case 0xa: case 0xb: /* Branch, exception generation and system insns */ + disas_b_exc_sys(s, insn); + break; + case 0x4: + case 0x6: + case 0xc: + case 0xe: /* Loads and stores */ + disas_ldst(s, insn); + break; + case 0x5: + case 0xd: /* Data processing - register */ + disas_data_proc_reg(s, insn); + break; + case 0x7: + case 0xf: /* Data processing - SIMD and floating point */ + disas_data_proc_simd_fp(s, insn); + break; + default: + unallocated_encoding(s); + break; + } +} + static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) { @@ -14832,8 +14170,6 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, bound = 1; } dc->base.max_insns = MIN(dc->base.max_insns, bound); - - init_tmp_a64_array(dc); } static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu) @@ -14845,7 +14181,7 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) DisasContext *dc = container_of(dcbase, DisasContext, base); target_ulong pc_arg = dc->base.pc_next; - if (TARGET_TB_PCREL) { + if (tb_cflags(dcbase->tb) & CF_PCREL) { pc_arg &= ~TARGET_PAGE_MASK; } tcg_gen_insn_start(pc_arg, 0, 0); @@ -14950,48 +14286,12 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) disas_sme_fa64(s, insn); } - switch (extract32(insn, 25, 4)) { - case 0x0: - if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) { - unallocated_encoding(s); - } - break; - case 0x1: case 0x3: /* UNALLOCATED */ - unallocated_encoding(s); - break; - case 0x2: - if (!disas_sve(s, insn)) { - unallocated_encoding(s); - } - break; - case 0x8: case 0x9: /* Data processing - immediate */ - disas_data_proc_imm(s, insn); - break; - case 0xa: case 0xb: /* Branch, exception generation and system insns */ - disas_b_exc_sys(s, insn); - break; - case 0x4: - case 0x6: - case 0xc: - case 0xe: /* Loads and stores */ - disas_ldst(s, insn); - break; - case 0x5: - case 0xd: /* Data processing - register */ - disas_data_proc_reg(s, insn); - break; - case 0x7: - case 0xf: /* Data processing - SIMD and floating point */ - disas_data_proc_simd_fp(s, insn); - break; - default: - assert(FALSE); /* all 15 cases should be handled above */ - break; + if (!disas_a64(s, insn) && + !disas_sme(s, insn) && + !disas_sve(s, insn)) { + disas_a64_legacy(s, insn); } - /* if we allocated any temporaries, free them here */ - free_tmp_a64(s); - /* * After execution of most insns, btype is reset to 0. * Note that we set btype == -1 when the insn sets btype. @@ -14999,8 +14299,6 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) { reset_btype(s); } - - translator_loop_temp_check(&s->base); } static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h index ad3762d1ac..0576c4ea12 100644 --- a/target/arm/tcg/translate-a64.h +++ b/target/arm/tcg/translate-a64.h @@ -18,9 +18,6 @@ #ifndef TARGET_ARM_TRANSLATE_A64_H #define TARGET_ARM_TRANSLATE_A64_H -TCGv_i64 new_tmp_a64(DisasContext *s); -TCGv_i64 new_tmp_a64_local(DisasContext *s); -TCGv_i64 new_tmp_a64_zero(DisasContext *s); TCGv_i64 cpu_reg(DisasContext *s, int reg); TCGv_i64 cpu_reg_sp(DisasContext *s, int reg); TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf); diff --git a/target/arm/tcg/translate-m-nocp.c b/target/arm/tcg/translate-m-nocp.c index 5df7d46120..9a89aab785 100644 --- a/target/arm/tcg/translate-m-nocp.c +++ b/target/arm/tcg/translate-m-nocp.c @@ -91,7 +91,6 @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a) } else { gen_helper_v7m_vlstm(cpu_env, fptr); } - tcg_temp_free_i32(fptr); clear_eci_state(s); @@ -303,8 +302,6 @@ static void gen_branch_fpInactive(DisasContext *s, TCGCond cond, tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK); tcg_gen_or_i32(fpca, fpca, aspen); tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label); - tcg_temp_free_i32(aspen); - tcg_temp_free_i32(fpca); } static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, @@ -328,7 +325,6 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, case ARM_VFP_FPSCR: tmp = loadfn(s, opaque, true); gen_helper_vfp_set_fpscr(cpu_env, tmp); - tcg_temp_free_i32(tmp); gen_lookup_tb(s); break; case ARM_VFP_FPSCR_NZCVQC: @@ -351,7 +347,6 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK); tcg_gen_or_i32(fpscr, fpscr, tmp); store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]); - tcg_temp_free_i32(tmp); break; } case ARM_VFP_FPCXT_NS: @@ -400,8 +395,6 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK); gen_helper_vfp_set_fpscr(cpu_env, tmp); s->base.is_jmp = DISAS_UPDATE_NOCHAIN; - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(sfpa); break; } case ARM_VFP_VPR: @@ -423,7 +416,6 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH); store_cpu_field(vpr, v7m.vpr); s->base.is_jmp = DISAS_UPDATE_NOCHAIN; - tcg_temp_free_i32(tmp); break; } default: @@ -491,7 +483,6 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK); tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT); tcg_gen_or_i32(tmp, tmp, sfpa); - tcg_temp_free_i32(sfpa); /* * Store result before updating FPSCR etc, in case * it is a memory write which causes an exception. @@ -505,7 +496,6 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, store_cpu_field(control, v7m.control[M_REG_S]); fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]); gen_helper_vfp_set_fpscr(cpu_env, fpscr); - tcg_temp_free_i32(fpscr); lookup_tb = true; break; } @@ -546,7 +536,6 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK); tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT); tcg_gen_or_i32(tmp, tmp, sfpa); - tcg_temp_free_i32(control); /* Store result before updating FPSCR, in case it faults */ storefn(s, opaque, tmp, true); /* If SFPA is zero then set FPSCR from FPDSCR_NS */ @@ -554,9 +543,6 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, tcg_constant_i32(0), fpdscr, fpscr); gen_helper_vfp_set_fpscr(cpu_env, fpscr); - tcg_temp_free_i32(sfpa); - tcg_temp_free_i32(fpdscr); - tcg_temp_free_i32(fpscr); break; } case ARM_VFP_VPR: @@ -598,7 +584,6 @@ static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value, if (a->rt == 15) { /* Set the 4 flag bits in the CPSR */ gen_set_nzcv(value); - tcg_temp_free_i32(value); } else { store_reg(s, a->rt, value); } @@ -666,7 +651,6 @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value, if (do_access) { gen_aa32_st_i32(s, value, addr, get_mem_index(s), MO_UL | MO_ALIGN | s->be_data); - tcg_temp_free_i32(value); } if (a->w) { @@ -675,8 +659,6 @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value, tcg_gen_addi_i32(addr, addr, offset); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } } @@ -717,8 +699,6 @@ static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque, tcg_gen_addi_i32(addr, addr, offset); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } return value; } diff --git a/target/arm/tcg/translate-mve.c b/target/arm/tcg/translate-mve.c index db7ea3f603..31fb2110f1 100644 --- a/target/arm/tcg/translate-mve.c +++ b/target/arm/tcg/translate-mve.c @@ -178,7 +178,6 @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn, qreg = mve_qreg_ptr(a->qd); fn(cpu_env, qreg, addr); - tcg_temp_free_ptr(qreg); /* * Writeback always happens after the last beat of the insn, @@ -189,8 +188,6 @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn, tcg_gen_addi_i32(addr, addr, offset); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } mve_update_eci(s); return true; @@ -242,9 +239,6 @@ static bool do_ldst_sg(DisasContext *s, arg_vldst_sg *a, MVEGenLdStSGFn fn) qd = mve_qreg_ptr(a->qd); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qm, addr); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); - tcg_temp_free_i32(addr); mve_update_eci(s); return true; } @@ -341,8 +335,6 @@ static bool do_ldst_sg_imm(DisasContext *s, arg_vldst_sg_imm *a, qd = mve_qreg_ptr(a->qd); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qm, tcg_constant_i32(offset)); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); mve_update_eci(s); return true; } @@ -414,8 +406,6 @@ static bool do_vldst_il(DisasContext *s, arg_vldst_il *a, MVEGenLdStIlFn *fn, if (a->w) { tcg_gen_addi_i32(rn, rn, addrinc); store_reg(s, a->rn, rn); - } else { - tcg_temp_free_i32(rn); } mve_update_and_store_eci(s); return true; @@ -506,9 +496,7 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a) qd = mve_qreg_ptr(a->qd); tcg_gen_dup_i32(a->size, rt, rt); gen_helper_mve_vdup(cpu_env, qd, rt); - tcg_temp_free_ptr(qd); } - tcg_temp_free_i32(rt); mve_update_eci(s); return true; } @@ -534,8 +522,6 @@ static bool do_1op_vec(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn, qd = mve_qreg_ptr(a->qd); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qm); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); } mve_update_eci(s); return true; @@ -602,7 +588,7 @@ DO_VCVT(VCVT_FS, vcvt_hs, vcvt_fs) DO_VCVT(VCVT_FU, vcvt_hu, vcvt_fu) static bool do_vcvt_rmode(DisasContext *s, arg_1op *a, - enum arm_fprounding rmode, bool u) + ARMFPRounding rmode, bool u) { /* * Handle VCVT fp to int with specified rounding mode. @@ -631,8 +617,6 @@ static bool do_vcvt_rmode(DisasContext *s, arg_1op *a, qd = mve_qreg_ptr(a->qd); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qm, tcg_constant_i32(arm_rmode_to_sf(rmode))); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); mve_update_eci(s); return true; } @@ -821,9 +805,6 @@ static bool do_2op_vec(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn, qn = mve_qreg_ptr(a->qn); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qn, qm); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qn); - tcg_temp_free_ptr(qm); } mve_update_eci(s); return true; @@ -1076,9 +1057,6 @@ static bool do_2op_scalar(DisasContext *s, arg_2scalar *a, qn = mve_qreg_ptr(a->qn); rm = load_reg(s, a->rm); fn(cpu_env, qd, qn, rm); - tcg_temp_free_i32(rm); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qn); mve_update_eci(s); return true; } @@ -1172,7 +1150,7 @@ static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a, MVEGenLongDualAccOpFn *fn) { TCGv_ptr qn, qm; - TCGv_i64 rda; + TCGv_i64 rda_i, rda_o; TCGv_i32 rdalo, rdahi; if (!dc_isar_feature(aa32_mve, s) || @@ -1199,28 +1177,24 @@ static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a, * of an A=0 (no-accumulate) insn which does not execute the first * beat must start with the current rda value, not 0. */ + rda_o = tcg_temp_new_i64(); if (a->a || mve_skip_first_beat(s)) { - rda = tcg_temp_new_i64(); + rda_i = rda_o; rdalo = load_reg(s, a->rdalo); rdahi = load_reg(s, a->rdahi); - tcg_gen_concat_i32_i64(rda, rdalo, rdahi); - tcg_temp_free_i32(rdalo); - tcg_temp_free_i32(rdahi); + tcg_gen_concat_i32_i64(rda_i, rdalo, rdahi); } else { - rda = tcg_const_i64(0); + rda_i = tcg_constant_i64(0); } - fn(rda, cpu_env, qn, qm, rda); - tcg_temp_free_ptr(qn); - tcg_temp_free_ptr(qm); + fn(rda_o, cpu_env, qn, qm, rda_i); rdalo = tcg_temp_new_i32(); rdahi = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(rdalo, rda); - tcg_gen_extrh_i64_i32(rdahi, rda); + tcg_gen_extrl_i64_i32(rdalo, rda_o); + tcg_gen_extrh_i64_i32(rdahi, rda_o); store_reg(s, a->rdalo, rdalo); store_reg(s, a->rdahi, rdahi); - tcg_temp_free_i64(rda); mve_update_eci(s); return true; } @@ -1285,7 +1259,7 @@ static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a) static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn) { TCGv_ptr qn, qm; - TCGv_i32 rda; + TCGv_i32 rda_i, rda_o; if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qn) || @@ -1305,15 +1279,14 @@ static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn) * beat must start with the current rda value, not 0. */ if (a->a || mve_skip_first_beat(s)) { - rda = load_reg(s, a->rda); + rda_o = rda_i = load_reg(s, a->rda); } else { - rda = tcg_const_i32(0); + rda_i = tcg_constant_i32(0); + rda_o = tcg_temp_new_i32(); } - fn(rda, cpu_env, qn, qm, rda); - store_reg(s, a->rda, rda); - tcg_temp_free_ptr(qn); - tcg_temp_free_ptr(qm); + fn(rda_o, cpu_env, qn, qm, rda_i); + store_reg(s, a->rda, rda_o); mve_update_eci(s); return true; @@ -1425,7 +1398,7 @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a) { NULL, NULL } }; TCGv_ptr qm; - TCGv_i32 rda; + TCGv_i32 rda_i, rda_o; if (!dc_isar_feature(aa32_mve, s) || a->size == 3) { @@ -1442,16 +1415,16 @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a) */ if (a->a || mve_skip_first_beat(s)) { /* Accumulate input from Rda */ - rda = load_reg(s, a->rda); + rda_o = rda_i = load_reg(s, a->rda); } else { /* Accumulate starting at zero */ - rda = tcg_const_i32(0); + rda_i = tcg_constant_i32(0); + rda_o = tcg_temp_new_i32(); } qm = mve_qreg_ptr(a->qm); - fns[a->size][a->u](rda, cpu_env, qm, rda); - store_reg(s, a->rda, rda); - tcg_temp_free_ptr(qm); + fns[a->size][a->u](rda_o, cpu_env, qm, rda_i); + store_reg(s, a->rda, rda_o); mve_update_eci(s); return true; @@ -1466,7 +1439,7 @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a) * No need to check Qm's bank: it is only 3 bits in decode. */ TCGv_ptr qm; - TCGv_i64 rda; + TCGv_i64 rda_i, rda_o; TCGv_i32 rdalo, rdahi; if (!dc_isar_feature(aa32_mve, s)) { @@ -1488,34 +1461,31 @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a) * of an A=0 (no-accumulate) insn which does not execute the first * beat must start with the current value of RdaHi:RdaLo, not zero. */ + rda_o = tcg_temp_new_i64(); if (a->a || mve_skip_first_beat(s)) { /* Accumulate input from RdaHi:RdaLo */ - rda = tcg_temp_new_i64(); + rda_i = rda_o; rdalo = load_reg(s, a->rdalo); rdahi = load_reg(s, a->rdahi); - tcg_gen_concat_i32_i64(rda, rdalo, rdahi); - tcg_temp_free_i32(rdalo); - tcg_temp_free_i32(rdahi); + tcg_gen_concat_i32_i64(rda_i, rdalo, rdahi); } else { /* Accumulate starting at zero */ - rda = tcg_const_i64(0); + rda_i = tcg_constant_i64(0); } qm = mve_qreg_ptr(a->qm); if (a->u) { - gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda); + gen_helper_mve_vaddlv_u(rda_o, cpu_env, qm, rda_i); } else { - gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda); + gen_helper_mve_vaddlv_s(rda_o, cpu_env, qm, rda_i); } - tcg_temp_free_ptr(qm); rdalo = tcg_temp_new_i32(); rdahi = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(rdalo, rda); - tcg_gen_extrh_i64_i32(rdahi, rda); + tcg_gen_extrl_i64_i32(rdalo, rda_o); + tcg_gen_extrh_i64_i32(rdahi, rda_o); store_reg(s, a->rdalo, rdalo); store_reg(s, a->rdahi, rdahi); - tcg_temp_free_i64(rda); mve_update_eci(s); return true; } @@ -1543,7 +1513,6 @@ static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn, } else { qd = mve_qreg_ptr(a->qd); fn(cpu_env, qd, tcg_constant_i64(imm)); - tcg_temp_free_ptr(qd); } mve_update_eci(s); return true; @@ -1616,8 +1585,6 @@ static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, qd = mve_qreg_ptr(a->qd); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qm, tcg_constant_i32(shift)); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); } mve_update_eci(s); return true; @@ -1723,8 +1690,6 @@ static bool do_2shift_scalar(DisasContext *s, arg_shl_scalar *a, qda = mve_qreg_ptr(a->qda); rm = load_reg(s, a->rm); fn(cpu_env, qda, qda, rm); - tcg_temp_free_ptr(qda); - tcg_temp_free_i32(rm); mve_update_eci(s); return true; } @@ -1868,7 +1833,6 @@ static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a) rdm = load_reg(s, a->rdm); gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm)); store_reg(s, a->rdm, rdm); - tcg_temp_free_ptr(qd); mve_update_eci(s); return true; } @@ -1898,7 +1862,6 @@ static bool do_vidup(DisasContext *s, arg_vidup *a, MVEGenVIDUPFn *fn) rn = load_reg(s, a->rn); fn(rn, cpu_env, qd, rn, tcg_constant_i32(a->imm)); store_reg(s, a->rn, rn); - tcg_temp_free_ptr(qd); mve_update_eci(s); return true; } @@ -1934,8 +1897,6 @@ static bool do_viwdup(DisasContext *s, arg_viwdup *a, MVEGenVIWDUPFn *fn) rm = load_reg(s, a->rm); fn(rn, cpu_env, qd, rn, rm, tcg_constant_i32(a->imm)); store_reg(s, a->rn, rn); - tcg_temp_free_ptr(qd); - tcg_temp_free_i32(rm); mve_update_eci(s); return true; } @@ -2001,8 +1962,6 @@ static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn) qn = mve_qreg_ptr(a->qn); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qn, qm); - tcg_temp_free_ptr(qn); - tcg_temp_free_ptr(qm); if (a->mask) { /* VPT */ gen_vpst(s, a->mask); @@ -2034,8 +1993,6 @@ static bool do_vcmp_scalar(DisasContext *s, arg_vcmp_scalar *a, rm = load_reg(s, a->rm); } fn(cpu_env, qn, rm); - tcg_temp_free_ptr(qn); - tcg_temp_free_i32(rm); if (a->mask) { /* VPT */ gen_vpst(s, a->mask); @@ -2138,7 +2095,6 @@ static bool do_vmaxv(DisasContext *s, arg_vmaxv *a, MVEGenVADDVFn fn) rda = load_reg(s, a->rda); fn(rda, cpu_env, qm, rda); store_reg(s, a->rda, rda); - tcg_temp_free_ptr(qm); mve_update_eci(s); return true; } @@ -2203,8 +2159,6 @@ static bool do_vabav(DisasContext *s, arg_vabav *a, MVEGenVABAVFn *fn) rda = load_reg(s, a->rda); fn(rda, cpu_env, qn, qm, rda); store_reg(s, a->rda, rda); - tcg_temp_free_ptr(qm); - tcg_temp_free_ptr(qn); mve_update_eci(s); return true; } @@ -2297,12 +2251,10 @@ static bool trans_VMOV_from_2gp(DisasContext *s, arg_VMOV_to_2gp *a) if (!mve_skip_vmov(s, vd, a->idx, MO_32)) { tmp = load_reg(s, a->rt); write_neon_element32(tmp, vd, a->idx, MO_32); - tcg_temp_free_i32(tmp); } if (!mve_skip_vmov(s, vd + 1, a->idx, MO_32)) { tmp = load_reg(s, a->rt2); write_neon_element32(tmp, vd + 1, a->idx, MO_32); - tcg_temp_free_i32(tmp); } mve_update_and_store_eci(s); diff --git a/target/arm/tcg/translate-neon.c b/target/arm/tcg/translate-neon.c index 4016339d46..af8685a4ac 100644 --- a/target/arm/tcg/translate-neon.c +++ b/target/arm/tcg/translate-neon.c @@ -182,7 +182,6 @@ static bool do_neon_ddda_fpst(DisasContext *s, int q, int vd, int vn, int vm, vfp_reg_offset(1, vm), vfp_reg_offset(1, vd), fpst, opr_sz, opr_sz, data, fn_gvec_ptr); - tcg_temp_free_ptr(fpst); return true; } @@ -236,7 +235,6 @@ static bool trans_VCADD(DisasContext *s, arg_VCADD *a) vfp_reg_offset(1, a->vm), fpst, opr_sz, opr_sz, a->rot, fn_gvec_ptr); - tcg_temp_free_ptr(fpst); return true; } @@ -433,7 +431,6 @@ static void gen_neon_ldst_base_update(DisasContext *s, int rm, int rn, TCGv_i32 index; index = load_reg(s, rm); tcg_gen_add_i32(base, base, index); - tcg_temp_free_i32(index); } store_reg(s, rn, base); } @@ -536,8 +533,6 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a) } } } - tcg_temp_free_i32(addr); - tcg_temp_free_i64(tmp64); gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8); return true; @@ -630,8 +625,6 @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a) /* Subsequent memory operations inherit alignment */ mop &= ~MO_AMASK; } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(addr); gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << size) * nregs); @@ -751,8 +744,6 @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a) /* Subsequent memory operations inherit alignment */ mop &= ~MO_AMASK; } - tcg_temp_free_i32(addr); - tcg_temp_free_i32(tmp); gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << a->size) * nregs); @@ -1061,9 +1052,6 @@ static bool do_3same_pair(DisasContext *s, arg_3same *a, NeonGenTwoOpFn *fn) write_neon_element32(tmp, a->vd, 0, MO_32); write_neon_element32(tmp3, a->vd, 1, MO_32); - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp3); return true; } @@ -1126,7 +1114,6 @@ DO_3SAME_VQDMULH(VQRDMULH, qrdmulh) TCGv_ptr fpst = fpstatus_ptr(FPST); \ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpst, \ oprsz, maxsz, 0, FUNC); \ - tcg_temp_free_ptr(fpst); \ } #define DO_3S_FP_GVEC(INSN,SFUNC,HFUNC) \ @@ -1225,7 +1212,6 @@ static bool do_3same_fp_pair(DisasContext *s, arg_3same *a, vfp_reg_offset(1, a->vn), vfp_reg_offset(1, a->vm), fpstatus, 8, 8, 0, fn); - tcg_temp_free_ptr(fpstatus); return true; } @@ -1358,7 +1344,6 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a, read_neon_element64(tmp, a->vm, pass, MO_64); fn(tmp, cpu_env, tmp, constimm); write_neon_element64(tmp, a->vd, pass, MO_64); - tcg_temp_free_i64(tmp); } return true; } @@ -1403,7 +1388,6 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a, fn(tmp, cpu_env, tmp, constimm); write_neon_element32(tmp, a->vd, pass, MO_32); } - tcg_temp_free_i32(tmp); return true; } @@ -1474,10 +1458,6 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a, narrowfn(rd, cpu_env, rm2); write_neon_element32(rd, a->vd, 1, MO_32); - tcg_temp_free_i32(rd); - tcg_temp_free_i64(rm1); - tcg_temp_free_i64(rm2); - return true; } @@ -1537,22 +1517,17 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a, shiftfn(rm2, rm2, constimm); tcg_gen_concat_i32_i64(rtmp, rm1, rm2); - tcg_temp_free_i32(rm2); narrowfn(rm1, cpu_env, rtmp); write_neon_element32(rm1, a->vd, 0, MO_32); - tcg_temp_free_i32(rm1); shiftfn(rm3, rm3, constimm); shiftfn(rm4, rm4, constimm); tcg_gen_concat_i32_i64(rtmp, rm3, rm4); - tcg_temp_free_i32(rm4); narrowfn(rm3, cpu_env, rtmp); - tcg_temp_free_i64(rtmp); write_neon_element32(rm3, a->vd, 1, MO_32); - tcg_temp_free_i32(rm3); return true; } @@ -1660,7 +1635,6 @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a, tmp = tcg_temp_new_i64(); widenfn(tmp, rm0); - tcg_temp_free_i32(rm0); if (a->shift != 0) { tcg_gen_shli_i64(tmp, tmp, a->shift); tcg_gen_andi_i64(tmp, tmp, ~widen_mask); @@ -1668,13 +1642,11 @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a, write_neon_element64(tmp, a->vd, 0, MO_64); widenfn(tmp, rm1); - tcg_temp_free_i32(rm1); if (a->shift != 0) { tcg_gen_shli_i64(tmp, tmp, a->shift); tcg_gen_andi_i64(tmp, tmp, ~widen_mask); } write_neon_element64(tmp, a->vd, 1, MO_64); - tcg_temp_free_i64(tmp); return true; } @@ -1733,7 +1705,6 @@ static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a, fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD); tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, vec_size, vec_size, a->shift, fn); - tcg_temp_free_ptr(fpst); return true; } @@ -1849,7 +1820,6 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vn, 0, MO_32); widenfn(rn0_64, tmp); - tcg_temp_free_i32(tmp); } if (src2_mop >= 0) { read_neon_element64(rm_64, a->vm, 0, src2_mop); @@ -1857,7 +1827,6 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vm, 0, MO_32); widenfn(rm_64, tmp); - tcg_temp_free_i32(tmp); } opfn(rn0_64, rn0_64, rm_64); @@ -1872,7 +1841,6 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vn, 1, MO_32); widenfn(rn1_64, tmp); - tcg_temp_free_i32(tmp); } if (src2_mop >= 0) { read_neon_element64(rm_64, a->vm, 1, src2_mop); @@ -1880,7 +1848,6 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vm, 1, MO_32); widenfn(rm_64, tmp); - tcg_temp_free_i32(tmp); } write_neon_element64(rn0_64, a->vd, 0, MO_64); @@ -1888,10 +1855,6 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, opfn(rn1_64, rn1_64, rm_64); write_neon_element64(rn1_64, a->vd, 1, MO_64); - tcg_temp_free_i64(rn0_64); - tcg_temp_free_i64(rn1_64); - tcg_temp_free_i64(rm_64); - return true; } @@ -1976,11 +1939,6 @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a, write_neon_element32(rd0, a->vd, 0, MO_32); write_neon_element32(rd1, a->vd, 1, MO_32); - tcg_temp_free_i32(rd0); - tcg_temp_free_i32(rd1); - tcg_temp_free_i64(rn_64); - tcg_temp_free_i64(rm_64); - return true; } @@ -2061,8 +2019,6 @@ static bool do_long_3d(DisasContext *s, arg_3diff *a, read_neon_element32(rn, a->vn, 1, MO_32); read_neon_element32(rm, a->vm, 1, MO_32); opfn(rd1, rn, rm); - tcg_temp_free_i32(rn); - tcg_temp_free_i32(rm); /* Don't store results until after all loads: they might overlap */ if (accfn) { @@ -2071,13 +2027,10 @@ static bool do_long_3d(DisasContext *s, arg_3diff *a, accfn(rd0, tmp, rd0); read_neon_element64(tmp, a->vd, 1, MO_64); accfn(rd1, tmp, rd1); - tcg_temp_free_i64(tmp); } write_neon_element64(rd0, a->vd, 0, MO_64); write_neon_element64(rd1, a->vd, 1, MO_64); - tcg_temp_free_i64(rd0); - tcg_temp_free_i64(rd1); return true; } @@ -2149,9 +2102,6 @@ static void gen_mull_s32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm) tcg_gen_muls2_i32(lo, hi, rn, rm); tcg_gen_concat_i32_i64(rd, lo, hi); - - tcg_temp_free_i32(lo); - tcg_temp_free_i32(hi); } static void gen_mull_u32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm) @@ -2161,9 +2111,6 @@ static void gen_mull_u32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm) tcg_gen_mulu2_i32(lo, hi, rn, rm); tcg_gen_concat_i32_i64(rd, lo, hi); - - tcg_temp_free_i32(lo); - tcg_temp_free_i32(hi); } static bool trans_VMULL_S_3d(DisasContext *s, arg_3diff *a) @@ -2344,7 +2291,6 @@ static void gen_neon_dup_low16(TCGv_i32 var) tcg_gen_ext16u_i32(var, var); tcg_gen_shli_i32(tmp, var, 16); tcg_gen_or_i32(var, var, tmp); - tcg_temp_free_i32(tmp); } static void gen_neon_dup_high16(TCGv_i32 var) @@ -2353,7 +2299,6 @@ static void gen_neon_dup_high16(TCGv_i32 var) tcg_gen_andi_i32(var, var, 0xffff0000); tcg_gen_shri_i32(tmp, var, 16); tcg_gen_or_i32(var, var, tmp); - tcg_temp_free_i32(tmp); } static inline TCGv_i32 neon_get_scalar(int size, int reg) @@ -2417,12 +2362,9 @@ static bool do_2scalar(DisasContext *s, arg_2scalar *a, TCGv_i32 rd = tcg_temp_new_i32(); read_neon_element32(rd, a->vd, pass, MO_32); accfn(tmp, rd, tmp); - tcg_temp_free_i32(rd); } write_neon_element32(tmp, a->vd, pass, MO_32); } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(scalar); return true; } @@ -2516,7 +2458,6 @@ static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a, fpstatus = fpstatus_ptr(a->size == 1 ? FPST_STD_F16 : FPST_STD); tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpstatus, vec_size, vec_size, idx, fn); - tcg_temp_free_ptr(fpstatus); return true; } @@ -2616,10 +2557,6 @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a, opfn(rd, cpu_env, rn, scalar, rd); write_neon_element32(rd, a->vd, pass, MO_32); } - tcg_temp_free_i32(rn); - tcg_temp_free_i32(rd); - tcg_temp_free_i32(scalar); - return true; } @@ -2692,8 +2629,6 @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a, read_neon_element32(rn, a->vn, 1, MO_32); rn1_64 = tcg_temp_new_i64(); opfn(rn1_64, rn, scalar); - tcg_temp_free_i32(rn); - tcg_temp_free_i32(scalar); if (accfn) { TCGv_i64 t64 = tcg_temp_new_i64(); @@ -2701,13 +2636,10 @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a, accfn(rn0_64, t64, rn0_64); read_neon_element64(t64, a->vd, 1, MO_64); accfn(rn1_64, t64, rn1_64); - tcg_temp_free_i64(t64); } write_neon_element64(rn0_64, a->vd, 0, MO_64); write_neon_element64(rn1_64, a->vd, 1, MO_64); - tcg_temp_free_i64(rn0_64); - tcg_temp_free_i64(rn1_64); return true; } @@ -2842,10 +2774,6 @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a) read_neon_element64(left, a->vm, 0, MO_64); tcg_gen_extract2_i64(dest, right, left, a->imm * 8); write_neon_element64(dest, a->vd, 0, MO_64); - - tcg_temp_free_i64(left); - tcg_temp_free_i64(right); - tcg_temp_free_i64(dest); } else { /* Extract 128 bits from */ TCGv_i64 left, middle, right, destleft, destright; @@ -2872,12 +2800,6 @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a) write_neon_element64(destright, a->vd, 0, MO_64); write_neon_element64(destleft, a->vd, 1, MO_64); - - tcg_temp_free_i64(destright); - tcg_temp_free_i64(destleft); - tcg_temp_free_i64(right); - tcg_temp_free_i64(middle); - tcg_temp_free_i64(left); } return true; } @@ -2921,9 +2843,6 @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a) gen_helper_neon_tbl(val, cpu_env, desc, val, def); write_neon_element64(val, a->vd, 0, MO_64); - - tcg_temp_free_i64(def); - tcg_temp_free_i64(val); return true; } @@ -3002,9 +2921,6 @@ static bool trans_VREV64(DisasContext *s, arg_VREV64 *a) write_neon_element32(tmp[1], a->vd, pass * 2, MO_32); write_neon_element32(tmp[0], a->vd, pass * 2 + 1, MO_32); } - - tcg_temp_free_i32(tmp[0]); - tcg_temp_free_i32(tmp[1]); return true; } @@ -3055,20 +2971,15 @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a, widenfn(rm0_64, tmp); read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32); widenfn(rm1_64, tmp); - tcg_temp_free_i32(tmp); opfn(rd_64, rm0_64, rm1_64); - tcg_temp_free_i64(rm0_64); - tcg_temp_free_i64(rm1_64); if (accfn) { TCGv_i64 tmp64 = tcg_temp_new_i64(); read_neon_element64(tmp64, a->vd, pass, MO_64); accfn(rd_64, tmp64, rd_64); - tcg_temp_free_i64(tmp64); } write_neon_element64(rd_64, a->vd, pass, MO_64); - tcg_temp_free_i64(rd_64); } return true; } @@ -3192,8 +3103,6 @@ static bool do_zip_uzp(DisasContext *s, arg_2misc *a, pd = vfp_reg_ptr(true, a->vd); pm = vfp_reg_ptr(true, a->vm); fn(pd, pm); - tcg_temp_free_ptr(pd); - tcg_temp_free_ptr(pm); return true; } @@ -3271,9 +3180,6 @@ static bool do_vmovn(DisasContext *s, arg_2misc *a, narrowfn(rd1, cpu_env, rm); write_neon_element32(rd0, a->vd, 0, MO_32); write_neon_element32(rd1, a->vd, 1, MO_32); - tcg_temp_free_i32(rd0); - tcg_temp_free_i32(rd1); - tcg_temp_free_i64(rm); return true; } @@ -3341,10 +3247,6 @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a) widenfn(rd, rm1); tcg_gen_shli_i64(rd, rd, 8 << a->size); write_neon_element64(rd, a->vd, 1, MO_64); - - tcg_temp_free_i64(rd); - tcg_temp_free_i32(rm0); - tcg_temp_free_i32(rm1); return true; } @@ -3385,11 +3287,6 @@ static bool trans_VCVT_B16_F32(DisasContext *s, arg_2misc *a) write_neon_element32(dst0, a->vd, 0, MO_32); write_neon_element32(dst1, a->vd, 1, MO_32); - - tcg_temp_free_i64(tmp); - tcg_temp_free_i32(dst0); - tcg_temp_free_i32(dst1); - tcg_temp_free_ptr(fpst); return true; } @@ -3432,16 +3329,10 @@ static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a) tmp3 = tcg_temp_new_i32(); read_neon_element32(tmp3, a->vm, 3, MO_32); write_neon_element32(tmp2, a->vd, 0, MO_32); - tcg_temp_free_i32(tmp2); gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp); tcg_gen_shli_i32(tmp3, tmp3, 16); tcg_gen_or_i32(tmp3, tmp3, tmp); write_neon_element32(tmp3, a->vd, 1, MO_32); - tcg_temp_free_i32(tmp3); - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(ahp); - tcg_temp_free_ptr(fpst); - return true; } @@ -3482,18 +3373,12 @@ static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a) tcg_gen_shri_i32(tmp, tmp, 16); gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp); write_neon_element32(tmp, a->vd, 1, MO_32); - tcg_temp_free_i32(tmp); tcg_gen_ext16u_i32(tmp3, tmp2); gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp); write_neon_element32(tmp3, a->vd, 2, MO_32); - tcg_temp_free_i32(tmp3); tcg_gen_shri_i32(tmp2, tmp2, 16); gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp); write_neon_element32(tmp2, a->vd, 3, MO_32); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(ahp); - tcg_temp_free_ptr(fpst); - return true; } @@ -3628,8 +3513,6 @@ static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn) fn(tmp, tmp); write_neon_element32(tmp, a->vd, pass, MO_32); } - tcg_temp_free_i32(tmp); - return true; } @@ -3790,7 +3673,6 @@ static bool trans_VQNEG(DisasContext *s, arg_2misc *a) fpst = fpstatus_ptr(vece == MO_16 ? FPST_STD_F16 : FPST_STD); \ tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, 0, \ fns[vece]); \ - tcg_temp_free_ptr(fpst); \ } \ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \ { \ @@ -3841,7 +3723,6 @@ static bool trans_VRINTX(DisasContext *s, arg_2misc *a) fpst = fpstatus_ptr(vece == 1 ? FPST_STD_F16 : FPST_STD); \ tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, \ arm_rmode_to_sf(RMODE), fns[vece]); \ - tcg_temp_free_ptr(fpst); \ } \ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \ { \ @@ -3908,11 +3789,9 @@ static bool trans_VSWP(DisasContext *s, arg_2misc *a) write_neon_element64(rm, a->vd, pass, MO_64); write_neon_element64(rd, a->vm, pass, MO_64); } - tcg_temp_free_i64(rm); - tcg_temp_free_i64(rd); - return true; } + static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 rd, tmp; @@ -3930,9 +3809,6 @@ static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1) tcg_gen_andi_i32(tmp, t0, 0xff00ff00); tcg_gen_or_i32(t1, t1, tmp); tcg_gen_mov_i32(t0, rd); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(rd); } static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1) @@ -3949,9 +3825,6 @@ static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1) tcg_gen_andi_i32(tmp, t0, 0xffff0000); tcg_gen_or_i32(t1, t1, tmp); tcg_gen_mov_i32(t0, rd); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(rd); } static bool trans_VTRN(DisasContext *s, arg_2misc *a) @@ -4003,8 +3876,6 @@ static bool trans_VTRN(DisasContext *s, arg_2misc *a) write_neon_element32(tmp, a->vd, pass, MO_32); } } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(tmp2); return true; } diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c index 7b87a9df63..e3adba314e 100644 --- a/target/arm/tcg/translate-sme.c +++ b/target/arm/tcg/translate-sme.c @@ -97,7 +97,6 @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs, /* Add the byte offset to env to produce the final pointer. */ addr = tcg_temp_new_ptr(); tcg_gen_ext_i32_ptr(addr, tmp); - tcg_temp_free_i32(tmp); tcg_gen_add_ptr(addr, addr, cpu_env); return addr; @@ -166,11 +165,6 @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a) h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc); } } - - tcg_temp_free_ptr(t_za); - tcg_temp_free_ptr(t_zr); - tcg_temp_free_ptr(t_pg); - return true; } @@ -237,10 +231,6 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_za); - tcg_temp_free_ptr(t_pg); - tcg_temp_free_i64(addr); return true; } @@ -260,8 +250,6 @@ static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn) base = get_tile_rowcol(s, MO_8, a->rv, imm, false); fn(s, base, 0, svl, a->rn, imm * svl); - - tcg_temp_free_ptr(base); return true; } @@ -286,11 +274,6 @@ static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz, pm = pred_full_reg_ptr(s, a->pm); fn(za, zn, pn, pm, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(za); - tcg_temp_free_ptr(zn); - tcg_temp_free_ptr(pn); - tcg_temp_free_ptr(pm); return true; } @@ -318,11 +301,6 @@ static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz, pm = pred_full_reg_ptr(s, a->pm); fn(za, zn, zm, pn, pm, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(za); - tcg_temp_free_ptr(zn); - tcg_temp_free_ptr(pn); - tcg_temp_free_ptr(pm); return true; } @@ -346,12 +324,6 @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, fpst = fpstatus_ptr(FPST_FPCR); fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(za); - tcg_temp_free_ptr(zn); - tcg_temp_free_ptr(pn); - tcg_temp_free_ptr(pm); - tcg_temp_free_ptr(fpst); return true; } diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 621a2abb22..92ab290106 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -130,7 +130,6 @@ static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), status, vsz, vsz, data, fn); - tcg_temp_free_ptr(status); } return true; } @@ -181,8 +180,6 @@ static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), status, vsz, vsz, data, fn); - - tcg_temp_free_ptr(status); } return true; } @@ -249,7 +246,6 @@ static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, { TCGv_ptr status = fpstatus_ptr(flavour); bool ret = gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, status); - tcg_temp_free_ptr(status); return ret; } @@ -271,8 +267,6 @@ static bool gen_gvec_fpst_zzzzp(DisasContext *s, gen_helper_gvec_5_ptr *fn, vec_full_reg_offset(s, ra), pred_full_reg_offset(s, pg), status, vsz, vsz, data, fn); - - tcg_temp_free_ptr(status); } return true; } @@ -321,7 +315,6 @@ static bool gen_gvec_fpst_zzp(DisasContext *s, gen_helper_gvec_3_ptr *fn, vec_full_reg_offset(s, rn), pred_full_reg_offset(s, pg), status, vsz, vsz, data, fn); - tcg_temp_free_ptr(status); } return true; } @@ -374,7 +367,6 @@ static bool gen_gvec_fpst_zzzp(DisasContext *s, gen_helper_gvec_4_ptr *fn, vec_full_reg_offset(s, rm), pred_full_reg_offset(s, pg), status, vsz, vsz, data, fn); - tcg_temp_free_ptr(status); } return true; } @@ -508,7 +500,6 @@ static void do_predtest1(TCGv_i64 d, TCGv_i64 g) gen_helper_sve_predtest1(t, d, g); do_pred_flags(t); - tcg_temp_free_i32(t); } static void do_predtest(DisasContext *s, int dofs, int gofs, int words) @@ -521,11 +512,8 @@ static void do_predtest(DisasContext *s, int dofs, int gofs, int words) tcg_gen_addi_ptr(gptr, cpu_env, gofs); gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words)); - tcg_temp_free_ptr(dptr); - tcg_temp_free_ptr(gptr); do_pred_flags(t); - tcg_temp_free_i32(t); } /* For each element size, the bits within a predicate word that are active. */ @@ -561,7 +549,6 @@ static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) tcg_gen_andi_i64(d, d, mask); tcg_gen_andi_i64(t, t, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) @@ -575,7 +562,6 @@ static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) tcg_gen_andi_i64(d, d, mask); tcg_gen_andi_i64(t, t, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh) @@ -984,11 +970,8 @@ static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a, tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg)); fn(temp, t_zn, t_pg, desc); - tcg_temp_free_ptr(t_zn); - tcg_temp_free_ptr(t_pg); write_fp_dreg(s, a->rd, temp); - tcg_temp_free_i64(temp); return true; } @@ -1253,11 +1236,7 @@ static bool do_index(DisasContext *s, int esz, int rd, tcg_gen_extrl_i64_i32(s32, start); tcg_gen_extrl_i64_i32(i32, incr); fns[esz](t_zd, s32, i32, desc); - - tcg_temp_free_i32(s32); - tcg_temp_free_i32(i32); } - tcg_temp_free_ptr(t_zd); return true; } @@ -1419,11 +1398,6 @@ static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a, tcg_gen_st_i64(pd, cpu_env, dofs); do_predtest1(pd, pg); - - tcg_temp_free_i64(pd); - tcg_temp_free_i64(pn); - tcg_temp_free_i64(pm); - tcg_temp_free_i64(pg); } else { /* The operation and flags generation is large. The computation * of the flags depends on the original contents of the guarding @@ -1694,9 +1668,6 @@ static bool trans_PTEST(DisasContext *s, arg_PTEST *a) tcg_gen_ld_i64(pn, cpu_env, nofs); tcg_gen_ld_i64(pg, cpu_env, gofs); do_predtest1(pn, pg); - - tcg_temp_free_i64(pn); - tcg_temp_free_i64(pg); } else { do_predtest(s, nofs, gofs, words); } @@ -1810,8 +1781,6 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) } done: - tcg_temp_free_i64(t); - /* PTRUES */ if (setflag) { tcg_gen_movi_i32(cpu_NF, -(word != 0)); @@ -1869,11 +1838,8 @@ static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, t = tcg_temp_new_i32(); gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc)); - tcg_temp_free_ptr(t_pd); - tcg_temp_free_ptr(t_pg); do_pred_flags(t); - tcg_temp_free_i32(t); return true; } @@ -1950,9 +1916,7 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) t2 = tcg_constant_i64(0); tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg); } - tcg_temp_free_i64(t1); } - tcg_temp_free_i64(t0); } /* Similarly with a vector and a scalar operand. */ @@ -1982,7 +1946,6 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, } else { gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc); } - tcg_temp_free_i32(t32); break; case MO_16: @@ -1996,7 +1959,6 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, } else { gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc); } - tcg_temp_free_i32(t32); break; case MO_32: @@ -2011,7 +1973,6 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, } else { gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc); } - tcg_temp_free_i64(t64); break; case MO_64: @@ -2025,7 +1986,6 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, t64 = tcg_temp_new_i64(); tcg_gen_neg_i64(t64, val); gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc); - tcg_temp_free_i64(t64); } else { gen_helper_sve_sqaddi_d(dptr, nptr, val, desc); } @@ -2034,9 +1994,6 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, default: g_assert_not_reached(); } - - tcg_temp_free_ptr(dptr); - tcg_temp_free_ptr(nptr); } static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a) @@ -2222,10 +2179,6 @@ static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg, tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); fns[esz](t_zd, t_zn, t_pg, val, desc); - - tcg_temp_free_ptr(t_zd); - tcg_temp_free_ptr(t_zn); - tcg_temp_free_ptr(t_pg); } static bool trans_FCPY(DisasContext *s, arg_FCPY *a) @@ -2372,9 +2325,6 @@ static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val) tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn)); fns[a->esz](t_zd, t_zn, val, desc); - - tcg_temp_free_ptr(t_zd); - tcg_temp_free_ptr(t_zn); } static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) @@ -2386,7 +2336,6 @@ static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) TCGv_i64 t = tcg_temp_new_i64(); tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64)); do_insr_i64(s, a, t); - tcg_temp_free_i64(t); } return true; } @@ -2476,10 +2425,6 @@ static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd, tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm)); fn(t_d, t_n, t_m, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_d); - tcg_temp_free_ptr(t_n); - tcg_temp_free_ptr(t_m); return true; } @@ -2503,9 +2448,6 @@ static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd, desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd); fn(t_d, t_n, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_d); - tcg_temp_free_ptr(t_n); return true; } @@ -2597,8 +2539,6 @@ static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg) tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg)); gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_p); } /* Increment LAST to the offset of the next element in the vector, @@ -2661,7 +2601,6 @@ static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, int rm, int esz) { TCGv_ptr p = tcg_temp_new_ptr(); - TCGv_i64 r; /* Convert offset into vector into offset into ENV. * The final adjustment for the vector register base @@ -2676,10 +2615,7 @@ static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, tcg_gen_ext_i32_ptr(p, last); tcg_gen_add_ptr(p, p, cpu_env); - r = load_esz(p, vec_full_reg_offset(s, rm), esz); - tcg_temp_free_ptr(p); - - return r; + return load_esz(p, vec_full_reg_offset(s, rm), esz); } /* Compute CLAST for a Zreg. */ @@ -2694,7 +2630,7 @@ static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before) return true; } - last = tcg_temp_local_new_i32(); + last = tcg_temp_new_i32(); over = gen_new_label(); find_last_active(s, last, esz, a->pg); @@ -2709,11 +2645,9 @@ static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before) } ele = load_last_active(s, last, a->rm, esz); - tcg_temp_free_i32(last); vsz = vec_full_reg_size(s); tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele); - tcg_temp_free_i64(ele); /* If this insn used MOVPRFX, we may need a second move. */ if (a->rd != a->rn) { @@ -2756,13 +2690,9 @@ static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm, * a conditional move. */ ele = load_last_active(s, last, rm, esz); - tcg_temp_free_i32(last); tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, tcg_constant_i64(0), ele, reg_val); - - tcg_temp_free_i64(cmp); - tcg_temp_free_i64(ele); } /* Compute CLAST for a Vreg. */ @@ -2775,7 +2705,6 @@ static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before) do_clast_scalar(s, esz, a->pg, a->rn, before, reg); write_fp_dreg(s, a->rd, reg); - tcg_temp_free_i64(reg); } return true; } @@ -2821,7 +2750,6 @@ static TCGv_i64 do_last_scalar(DisasContext *s, int esz, int pg, int rm, bool before) { TCGv_i32 last = tcg_temp_new_i32(); - TCGv_i64 ret; find_last_active(s, last, esz, pg); if (before) { @@ -2830,9 +2758,7 @@ static TCGv_i64 do_last_scalar(DisasContext *s, int esz, incr_last_active(s, last, esz); } - ret = load_last_active(s, last, rm, esz); - tcg_temp_free_i32(last); - return ret; + return load_last_active(s, last, rm, esz); } /* Compute LAST for a Vreg. */ @@ -2841,7 +2767,6 @@ static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before) if (sve_access_check(s)) { TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); write_fp_dreg(s, a->rd, val); - tcg_temp_free_i64(val); } return true; } @@ -2855,7 +2780,6 @@ static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before) if (sve_access_check(s)) { TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); tcg_gen_mov_i64(cpu_reg(s, a->rd), val); - tcg_temp_free_i64(val); } return true; } @@ -2883,7 +2807,6 @@ static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a) int ofs = vec_reg_offset(s, a->rn, 0, a->esz); TCGv_i64 t = load_esz(cpu_env, ofs, a->esz); do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t); - tcg_temp_free_i64(t); } return true; } @@ -2942,14 +2865,7 @@ static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a, gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0))); - tcg_temp_free_ptr(pd); - tcg_temp_free_ptr(zn); - tcg_temp_free_ptr(zm); - tcg_temp_free_ptr(pg); - do_pred_flags(t); - - tcg_temp_free_i32(t); return true; } @@ -3021,13 +2937,7 @@ static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a, gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm))); - tcg_temp_free_ptr(pd); - tcg_temp_free_ptr(zn); - tcg_temp_free_ptr(pg); - do_pred_flags(t); - - tcg_temp_free_i32(t); return true; } @@ -3081,14 +2991,9 @@ static bool do_brk3(DisasContext *s, arg_rprr_s *a, TCGv_i32 t = tcg_temp_new_i32(); fn_s(t, d, n, m, g, desc); do_pred_flags(t); - tcg_temp_free_i32(t); } else { fn(d, n, m, g, desc); } - tcg_temp_free_ptr(d); - tcg_temp_free_ptr(n); - tcg_temp_free_ptr(m); - tcg_temp_free_ptr(g); return true; } @@ -3115,13 +3020,9 @@ static bool do_brk2(DisasContext *s, arg_rpr_s *a, TCGv_i32 t = tcg_temp_new_i32(); fn_s(t, d, n, g, desc); do_pred_flags(t); - tcg_temp_free_i32(t); } else { fn(d, n, g, desc); } - tcg_temp_free_ptr(d); - tcg_temp_free_ptr(n); - tcg_temp_free_ptr(g); return true; } @@ -3159,7 +3060,6 @@ static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg) TCGv_i64 g = tcg_temp_new_i64(); tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_and_i64(val, val, g); - tcg_temp_free_i64(g); } /* Reduce the pred_esz_masks value simply to reduce the @@ -3181,8 +3081,6 @@ static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg) tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc)); - tcg_temp_free_ptr(t_pn); - tcg_temp_free_ptr(t_pg); } } @@ -3212,7 +3110,6 @@ static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a) } else { tcg_gen_add_i64(reg, reg, val); } - tcg_temp_free_i64(val); } return true; } @@ -3297,7 +3194,6 @@ static bool trans_CTERM(DisasContext *s, arg_CTERM *a) tcg_gen_setcond_i64(cond, cmp, rn, rm); tcg_gen_extrl_i64_i32(cpu_NF, cmp); - tcg_temp_free_i64(cmp); /* VF = !NF & !CF. */ tcg_gen_xori_i32(cpu_VF, cpu_NF, 1); @@ -3394,12 +3290,10 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) /* Set the count to zero if the condition is false. */ tcg_gen_movi_i64(t1, 0); tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1); - tcg_temp_free_i64(t1); /* Since we're bounded, pass as a 32-bit type. */ t2 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t2, t0); - tcg_temp_free_i64(t0); /* Scale elements to bits. */ tcg_gen_shli_i32(t2, t2, a->esz); @@ -3416,9 +3310,6 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc)); } do_pred_flags(t2); - - tcg_temp_free_ptr(ptr); - tcg_temp_free_i32(t2); return true; } @@ -3450,7 +3341,6 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) tcg_gen_sub_i64(diff, op0, op1); tcg_gen_sub_i64(t1, op1, op0); tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1); - tcg_temp_free_i64(t1); /* Round down to a multiple of ESIZE. */ tcg_gen_andi_i64(diff, diff, -1 << a->esz); /* If op1 == op0, diff == 0, and the condition is always true. */ @@ -3470,7 +3360,6 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) /* Since we're bounded, pass as a 32-bit type. */ t2 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t2, diff); - tcg_temp_free_i64(diff); desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); @@ -3480,9 +3369,6 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); do_pred_flags(t2); - - tcg_temp_free_ptr(ptr); - tcg_temp_free_i32(t2); return true; } @@ -3814,12 +3700,8 @@ static bool do_reduce(DisasContext *s, arg_rpr_esz *a, status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); fn(temp, t_zn, t_pg, status, t_desc); - tcg_temp_free_ptr(t_zn); - tcg_temp_free_ptr(t_pg); - tcg_temp_free_ptr(status); write_fp_dreg(s, a->rd, temp); - tcg_temp_free_i64(temp); return true; } @@ -3873,7 +3755,6 @@ static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a, vec_full_reg_offset(s, a->rn), pred_full_reg_offset(s, a->pg), status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); } return true; } @@ -3942,12 +3823,7 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc); - tcg_temp_free_ptr(t_fpst); - tcg_temp_free_ptr(t_pg); - tcg_temp_free_ptr(t_rm); - write_fp_dreg(s, a->rd, t_val); - tcg_temp_free_i64(t_val); return true; } @@ -4020,11 +3896,6 @@ static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16, status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); fn(t_zd, t_zn, t_pg, scalar, status, desc); - - tcg_temp_free_ptr(status); - tcg_temp_free_ptr(t_pg); - tcg_temp_free_ptr(t_zn); - tcg_temp_free_ptr(t_zd); } static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, @@ -4080,7 +3951,6 @@ static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a, vec_full_reg_offset(s, a->rm), pred_full_reg_offset(s, a->pg), status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); } return true; } @@ -4212,7 +4082,7 @@ TRANS_FEAT(FRINTX, aa64_sve, gen_gvec_fpst_arg_zpz, frintx_fns[a->esz], a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, - int mode, gen_helper_gvec_3_ptr *fn) + ARMFPRounding mode, gen_helper_gvec_3_ptr *fn) { unsigned vsz; TCGv_i32 tmode; @@ -4226,32 +4096,28 @@ static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, } vsz = vec_full_reg_size(s); - tmode = tcg_const_i32(mode); status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - - gen_helper_set_rmode(tmode, tmode, status); + tmode = gen_set_rmode(mode, status); tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), pred_full_reg_offset(s, a->pg), status, vsz, vsz, 0, fn); - gen_helper_set_rmode(tmode, tmode, status); - tcg_temp_free_i32(tmode); - tcg_temp_free_ptr(status); + gen_restore_rmode(tmode, status); return true; } TRANS_FEAT(FRINTN, aa64_sve, do_frint_mode, a, - float_round_nearest_even, frint_fns[a->esz]) + FPROUNDING_TIEEVEN, frint_fns[a->esz]) TRANS_FEAT(FRINTP, aa64_sve, do_frint_mode, a, - float_round_up, frint_fns[a->esz]) + FPROUNDING_POSINF, frint_fns[a->esz]) TRANS_FEAT(FRINTM, aa64_sve, do_frint_mode, a, - float_round_down, frint_fns[a->esz]) + FPROUNDING_NEGINF, frint_fns[a->esz]) TRANS_FEAT(FRINTZ, aa64_sve, do_frint_mode, a, - float_round_to_zero, frint_fns[a->esz]) + FPROUNDING_ZERO, frint_fns[a->esz]) TRANS_FEAT(FRINTA, aa64_sve, do_frint_mode, a, - float_round_ties_away, frint_fns[a->esz]) + FPROUNDING_TIEAWAY, frint_fns[a->esz]) static gen_helper_gvec_3_ptr * const frecpx_fns[] = { NULL, gen_helper_sve_frecpx_h, @@ -4321,7 +4187,6 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, dirty_addr = tcg_temp_new_i64(); tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len); - tcg_temp_free_i64(dirty_addr); /* * Note that unpredicated load/store of vector/predicate registers @@ -4339,22 +4204,11 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_st_i64(t0, base, vofs + i); tcg_gen_addi_i64(clean_addr, clean_addr, 8); } - tcg_temp_free_i64(t0); } else { TCGLabel *loop = gen_new_label(); - TCGv_ptr tp, i = tcg_const_local_ptr(0); - - /* Copy the clean address into a local temp, live across the loop. */ - t0 = clean_addr; - clean_addr = new_tmp_a64_local(s); - tcg_gen_mov_i64(clean_addr, t0); - - if (base != cpu_env) { - TCGv_ptr b = tcg_temp_local_new_ptr(); - tcg_gen_mov_ptr(b, base); - base = b; - } + TCGv_ptr tp, i = tcg_temp_new_ptr(); + tcg_gen_movi_ptr(i, 0); gen_set_label(loop); t0 = tcg_temp_new_i64(); @@ -4365,16 +4219,8 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_add_ptr(tp, base, i); tcg_gen_addi_ptr(i, i, 8); tcg_gen_st_i64(t0, tp, vofs); - tcg_temp_free_ptr(tp); - tcg_temp_free_i64(t0); tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); - tcg_temp_free_ptr(i); - - if (base != cpu_env) { - tcg_temp_free_ptr(base); - assert(len_remain == 0); - } } /* @@ -4397,14 +4243,12 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_addi_i64(clean_addr, clean_addr, 4); tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW); tcg_gen_deposit_i64(t0, t0, t1, 32, 32); - tcg_temp_free_i64(t1); break; default: g_assert_not_reached(); } tcg_gen_st_i64(t0, base, vofs + len_align); - tcg_temp_free_i64(t0); } } @@ -4421,7 +4265,6 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, dirty_addr = tcg_temp_new_i64(); tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len); - tcg_temp_free_i64(dirty_addr); /* Note that unpredicated load/store of vector/predicate registers * are defined as a stream of bytes, which equates to little-endian @@ -4440,22 +4283,11 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); tcg_gen_addi_i64(clean_addr, clean_addr, 8); } - tcg_temp_free_i64(t0); } else { TCGLabel *loop = gen_new_label(); - TCGv_ptr tp, i = tcg_const_local_ptr(0); - - /* Copy the clean address into a local temp, live across the loop. */ - t0 = clean_addr; - clean_addr = new_tmp_a64_local(s); - tcg_gen_mov_i64(clean_addr, t0); - - if (base != cpu_env) { - TCGv_ptr b = tcg_temp_local_new_ptr(); - tcg_gen_mov_ptr(b, base); - base = b; - } + TCGv_ptr tp, i = tcg_temp_new_ptr(); + tcg_gen_movi_ptr(i, 0); gen_set_label(loop); t0 = tcg_temp_new_i64(); @@ -4463,19 +4295,11 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_add_ptr(tp, base, i); tcg_gen_ld_i64(t0, tp, vofs); tcg_gen_addi_ptr(i, i, 8); - tcg_temp_free_ptr(tp); tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); tcg_gen_addi_i64(clean_addr, clean_addr, 8); - tcg_temp_free_i64(t0); tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); - tcg_temp_free_ptr(i); - - if (base != cpu_env) { - tcg_temp_free_ptr(base); - assert(len_remain == 0); - } } /* Predicate register stores can be any multiple of 2. */ @@ -4501,7 +4325,6 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, default: g_assert_not_reached(); } - tcg_temp_free_i64(t0); } } @@ -4610,8 +4433,6 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); fn(cpu_env, t_pg, addr, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_pg); } /* Indexed by [mte][be][dtype][nreg] */ @@ -4753,7 +4574,7 @@ static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) return false; } if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); @@ -4769,7 +4590,7 @@ static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a) if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> dtype_esz[a->dtype]; - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), (a->imm * elements * (a->nreg + 1)) @@ -4872,7 +4693,7 @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) } s->is_nonstreaming = true; if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false, @@ -4977,7 +4798,7 @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) int vsz = vec_full_reg_size(s); int elements = vsz >> dtype_esz[a->dtype]; int off = (a->imm * elements) << dtype_msz(a->dtype); - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off); do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false, @@ -5009,7 +4830,6 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) poff = offsetof(CPUARMState, vfp.preg_tmp); tcg_gen_st_i64(tmp, cpu_env, poff); - tcg_temp_free_i64(tmp); } t_pg = tcg_temp_new_ptr(); @@ -5019,8 +4839,6 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt))); - tcg_temp_free_ptr(t_pg); - /* Replicate that first quadword. */ if (vsz > 16) { int doff = vec_full_reg_offset(s, zt); @@ -5035,7 +4853,7 @@ static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a) } if (sve_access_check(s)) { int msz = dtype_msz(a->dtype); - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); do_ldrq(s, a->rd, a->pg, addr, a->dtype); @@ -5049,7 +4867,7 @@ static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a) return false; } if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16); do_ldrq(s, a->rd, a->pg, addr, a->dtype); } @@ -5091,7 +4909,6 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) poff = offsetof(CPUARMState, vfp.preg_tmp); tcg_gen_st_i64(tmp, cpu_env, poff); - tcg_temp_free_i64(tmp); } t_pg = tcg_temp_new_ptr(); @@ -5101,8 +4918,6 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt))); - tcg_temp_free_ptr(t_pg); - /* * Replicate that first octaword. * The replication happens in units of 32; if the full vector size @@ -5129,7 +4944,7 @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a) } s->is_nonstreaming = true; if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); do_ldro(s, a->rd, a->pg, addr, a->dtype); @@ -5144,7 +4959,7 @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a) } s->is_nonstreaming = true; if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32); do_ldro(s, a->rd, a->pg, addr, a->dtype); } @@ -5180,12 +4995,10 @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask); tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over); - tcg_temp_free_i64(temp); } else { TCGv_i32 t32 = tcg_temp_new_i32(); find_last_active(s, t32, esz, a->pg); tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over); - tcg_temp_free_i32(t32); } /* Load the data. */ @@ -5199,7 +5012,6 @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) /* Broadcast to *all* elements. */ tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, temp); - tcg_temp_free_i64(temp); /* Zero the inactive elements. */ gen_set_label(over); @@ -5339,7 +5151,7 @@ static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) return false; } if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); @@ -5358,7 +5170,7 @@ static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a) if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> a->esz; - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), (a->imm * elements * (a->nreg + 1)) << a->msz); @@ -5395,10 +5207,6 @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm)); tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt)); fn(cpu_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_zt); - tcg_temp_free_ptr(t_zm); - tcg_temp_free_ptr(t_pg); } /* Indexed by [mte][be][ff][xs][u][msz]. */ @@ -6333,7 +6141,6 @@ static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) TCGv_vec t = tcg_temp_new_vec_matching(d); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits)); tcg_gen_and_vec(vece, d, n, t); - tcg_temp_free_vec(t); } else { tcg_gen_sari_vec(vece, d, n, halfbits); tcg_gen_shli_vec(vece, d, d, shl); @@ -6391,7 +6198,6 @@ static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) TCGv_vec t = tcg_temp_new_vec_matching(d); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits)); tcg_gen_and_vec(vece, d, n, t); - tcg_temp_free_vec(t); } else { tcg_gen_shri_vec(vece, d, n, halfbits); tcg_gen_shli_vec(vece, d, d, shl); @@ -6401,7 +6207,6 @@ static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) TCGv_vec t = tcg_temp_new_vec_matching(d); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); tcg_gen_and_vec(vece, d, n, t); - tcg_temp_free_vec(t); } else { tcg_gen_shli_vec(vece, d, n, halfbits); tcg_gen_shri_vec(vece, d, d, halfbits - shl); @@ -6581,7 +6386,6 @@ static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_smin_vec(vece, d, d, t); tcg_gen_dupi_vec(vece, t, mask); tcg_gen_and_vec(vece, d, d, t); - tcg_temp_free_vec(t); } static const GVecGen2 sqxtnb_ops[3] = { @@ -6615,7 +6419,6 @@ static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_dupi_vec(vece, t, mask); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const GVecGen2 sqxtnt_ops[3] = { @@ -6649,7 +6452,6 @@ static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_dupi_vec(vece, t, max); tcg_gen_umin_vec(vece, d, n, t); - tcg_temp_free_vec(t); } static const GVecGen2 uqxtnb_ops[3] = { @@ -6678,7 +6480,6 @@ static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_umin_vec(vece, n, n, t); tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const GVecGen2 uqxtnt_ops[3] = { @@ -6714,7 +6515,6 @@ static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_smax_vec(vece, d, n, t); tcg_gen_dupi_vec(vece, t, max); tcg_gen_umin_vec(vece, d, d, t); - tcg_temp_free_vec(t); } static const GVecGen2 sqxtunb_ops[3] = { @@ -6745,7 +6545,6 @@ static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_umin_vec(vece, n, n, t); tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const GVecGen2 sqxtunt_ops[3] = { @@ -6816,7 +6615,6 @@ static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) tcg_gen_shri_vec(vece, n, n, shr); tcg_gen_dupi_vec(vece, t, mask); tcg_gen_and_vec(vece, d, n, t); - tcg_temp_free_vec(t); } static const TCGOpcode shrnb_vec_list[] = { INDEX_op_shri_vec, 0 }; @@ -6875,7 +6673,6 @@ static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) tcg_gen_shli_vec(vece, n, n, halfbits - shr); tcg_gen_dupi_vec(vece, t, mask); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const TCGOpcode shrnt_vec_list[] = { INDEX_op_shli_vec, 0 }; @@ -6926,7 +6723,6 @@ static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d, tcg_gen_smax_vec(vece, n, n, t); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); tcg_gen_umin_vec(vece, d, n, t); - tcg_temp_free_vec(t); } static const TCGOpcode sqshrunb_vec_list[] = { @@ -6961,7 +6757,6 @@ static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d, tcg_gen_umin_vec(vece, n, n, t); tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const TCGOpcode sqshrunt_vec_list[] = { @@ -7016,7 +6811,6 @@ static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d, tcg_gen_smin_vec(vece, n, n, t); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); tcg_gen_and_vec(vece, d, n, t); - tcg_temp_free_vec(t); } static const TCGOpcode sqshrnb_vec_list[] = { @@ -7054,7 +6848,6 @@ static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d, tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const TCGOpcode sqshrnt_vec_list[] = { @@ -7103,7 +6896,6 @@ static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d, tcg_gen_shri_vec(vece, n, n, shr); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); tcg_gen_umin_vec(vece, d, n, t); - tcg_temp_free_vec(t); } static const TCGOpcode uqshrnb_vec_list[] = { @@ -7136,7 +6928,6 @@ static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d, tcg_gen_umin_vec(vece, n, n, t); tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const TCGOpcode uqshrnt_vec_list[] = { @@ -7354,9 +7145,9 @@ TRANS_FEAT(FCVTLT_sd, aa64_sve2, gen_gvec_fpst_arg_zpz, gen_helper_sve2_fcvtlt_sd, a, 0, FPST_FPCR) TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a, - float_round_to_odd, gen_helper_sve_fcvt_ds) + FPROUNDING_ODD, gen_helper_sve_fcvt_ds) TRANS_FEAT(FCVTXNT_ds, aa64_sve2, do_frint_mode, a, - float_round_to_odd, gen_helper_sve2_fcvtnt_ds) + FPROUNDING_ODD, gen_helper_sve2_fcvtnt_ds) static gen_helper_gvec_3_ptr * const flogb_fns[] = { NULL, gen_helper_flogb_h, @@ -7472,11 +7263,6 @@ static bool trans_PSEL(DisasContext *s, arg_psel *a) /* Apply to either copy the source, or write zeros. */ tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd), pred_full_reg_offset(s, a->pn), tmp, pl, pl); - - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(dbit); - tcg_temp_free_i64(didx); - tcg_temp_free_ptr(ptr); return true; } diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c index 5c5d58d2c6..dd782aacf4 100644 --- a/target/arm/tcg/translate-vfp.c +++ b/target/arm/tcg/translate-vfp.c @@ -178,7 +178,6 @@ static void gen_update_fp_context(DisasContext *s) fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]); gen_helper_vfp_set_fpscr(cpu_env, fpscr); - tcg_temp_free_i32(fpscr); if (dc_isar_feature(aa32_mve, s)) { store_cpu_field(tcg_constant_i32(0), v7m.vpr); } @@ -365,24 +364,15 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tmp = tcg_temp_new_i64(); tcg_gen_xor_i64(tmp, vf, nf); tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, frn, frm); - tcg_temp_free_i64(tmp); break; case 3: /* gt: !Z && N == V */ tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero, frn, frm); tmp = tcg_temp_new_i64(); tcg_gen_xor_i64(tmp, vf, nf); tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, dest, frm); - tcg_temp_free_i64(tmp); break; } vfp_store_reg64(dest, rd); - tcg_temp_free_i64(frn); - tcg_temp_free_i64(frm); - tcg_temp_free_i64(dest); - - tcg_temp_free_i64(zf); - tcg_temp_free_i64(nf); - tcg_temp_free_i64(vf); } else { TCGv_i32 frn, frm, dest; TCGv_i32 tmp, zero; @@ -405,14 +395,12 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tmp = tcg_temp_new_i32(); tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, frn, frm); - tcg_temp_free_i32(tmp); break; case 3: /* gt: !Z && N == V */ tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero, frn, frm); tmp = tcg_temp_new_i32(); tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, dest, frm); - tcg_temp_free_i32(tmp); break; } /* For fp16 the top half is always zeroes */ @@ -420,9 +408,6 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tcg_gen_andi_i32(dest, dest, 0xffff); } vfp_store_reg32(dest, rd); - tcg_temp_free_i32(frn); - tcg_temp_free_i32(frm); - tcg_temp_free_i32(dest); } return true; @@ -479,8 +464,7 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) fpst = fpstatus_ptr(FPST_FPCR); } - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + tcg_rmode = gen_set_rmode(rounding, fpst); if (sz == 3) { TCGv_i64 tcg_op; @@ -490,8 +474,6 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) vfp_load_reg64(tcg_op, rm); gen_helper_rintd(tcg_res, tcg_op, fpst); vfp_store_reg64(tcg_res, rd); - tcg_temp_free_i64(tcg_op); - tcg_temp_free_i64(tcg_res); } else { TCGv_i32 tcg_op; TCGv_i32 tcg_res; @@ -504,14 +486,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) gen_helper_rints(tcg_res, tcg_op, fpst); } vfp_store_reg32(tcg_res, rd); - tcg_temp_free_i32(tcg_op); - tcg_temp_free_i32(tcg_res); } - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); - - tcg_temp_free_ptr(fpst); + gen_restore_rmode(tcg_rmode, fpst); return true; } @@ -555,9 +532,7 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) } tcg_shift = tcg_constant_i32(0); - - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + tcg_rmode = gen_set_rmode(rounding, fpst); if (sz == 3) { TCGv_i64 tcg_double, tcg_res; @@ -573,9 +548,6 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) } tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res); vfp_store_reg32(tcg_tmp, rd); - tcg_temp_free_i32(tcg_tmp); - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_double); } else { TCGv_i32 tcg_single, tcg_res; tcg_single = tcg_temp_new_i32(); @@ -595,15 +567,9 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) } } vfp_store_reg32(tcg_res, rd); - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_single); } - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); - - tcg_temp_free_ptr(fpst); - + gen_restore_rmode(tcg_rmode, fpst); return true; } @@ -729,7 +695,6 @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a) if (!mve_skip_vmov(s, a->vn, a->index, a->size)) { tmp = load_reg(s, a->rt); write_neon_element32(tmp, a->vn, a->index, a->size); - tcg_temp_free_i32(tmp); } if (dc_isar_feature(aa32_mve, s)) { @@ -777,8 +742,6 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a) tmp = load_reg(s, a->rt); tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn), vec_size, vec_size, tmp); - tcg_temp_free_i32(tmp); - return true; } @@ -883,7 +846,6 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) if (a->rt == 15) { /* Set the 4 flag bits in the CPSR. */ gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); } else { store_reg(s, a->rt, tmp); } @@ -899,7 +861,6 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) case ARM_VFP_FPSCR: tmp = load_reg(s, a->rt); gen_helper_vfp_set_fpscr(cpu_env, tmp); - tcg_temp_free_i32(tmp); gen_lookup_tb(s); break; case ARM_VFP_FPEXC: @@ -954,7 +915,6 @@ static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a) tmp = load_reg(s, a->rt); tcg_gen_andi_i32(tmp, tmp, 0xffff); vfp_store_reg32(tmp, a->vn); - tcg_temp_free_i32(tmp); } return true; @@ -979,7 +939,6 @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) if (a->rt == 15) { /* Set the 4 flag bits in the CPSR. */ gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); } else { store_reg(s, a->rt, tmp); } @@ -987,7 +946,6 @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) /* general purpose register to VFP */ tmp = load_reg(s, a->rt); vfp_store_reg32(tmp, a->vn); - tcg_temp_free_i32(tmp); } return true; @@ -1021,10 +979,8 @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a) /* gpreg to fpreg */ tmp = load_reg(s, a->rt); vfp_store_reg32(tmp, a->vm); - tcg_temp_free_i32(tmp); tmp = load_reg(s, a->rt2); vfp_store_reg32(tmp, a->vm + 1); - tcg_temp_free_i32(tmp); } return true; @@ -1064,10 +1020,8 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a) /* gpreg to fpreg */ tmp = load_reg(s, a->rt); vfp_store_reg32(tmp, a->vm * 2); - tcg_temp_free_i32(tmp); tmp = load_reg(s, a->rt2); vfp_store_reg32(tmp, a->vm * 2 + 1); - tcg_temp_free_i32(tmp); } return true; @@ -1102,9 +1056,6 @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a) vfp_load_reg32(tmp, a->vd); gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN); } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(addr); - return true; } @@ -1136,9 +1087,6 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) vfp_load_reg32(tmp, a->vd); gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(addr); - return true; } @@ -1177,9 +1125,6 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) vfp_load_reg64(tmp, a->vd); gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4); } - tcg_temp_free_i64(tmp); - tcg_temp_free_i32(addr); - return true; } @@ -1246,7 +1191,6 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) } tcg_gen_addi_i32(addr, addr, offset); } - tcg_temp_free_i32(tmp); if (a->w) { /* writeback */ if (a->p) { @@ -1254,8 +1198,6 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) tcg_gen_addi_i32(addr, addr, offset); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } clear_eci_state(s); @@ -1332,7 +1274,6 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) } tcg_gen_addi_i32(addr, addr, offset); } - tcg_temp_free_i64(tmp); if (a->w) { /* writeback */ if (a->p) { @@ -1347,8 +1288,6 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) tcg_gen_addi_i32(addr, addr, offset); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } clear_eci_state(s); @@ -1485,12 +1424,6 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, vfp_load_reg32(f1, vm); } } - - tcg_temp_free_i32(f0); - tcg_temp_free_i32(f1); - tcg_temp_free_i32(fd); - tcg_temp_free_ptr(fpst); - return true; } @@ -1533,12 +1466,6 @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn, } fn(fd, f0, f1, fpst); vfp_store_reg32(fd, vd); - - tcg_temp_free_i32(f0); - tcg_temp_free_i32(f1); - tcg_temp_free_i32(fd); - tcg_temp_free_ptr(fpst); - return true; } @@ -1615,12 +1542,6 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, vfp_load_reg64(f1, vm); } } - - tcg_temp_free_i64(f0); - tcg_temp_free_i64(f1); - tcg_temp_free_i64(fd); - tcg_temp_free_ptr(fpst); - return true; } @@ -1688,10 +1609,6 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) vm = vfp_advance_sreg(vm, delta_m); vfp_load_reg32(f0, vm); } - - tcg_temp_free_i32(f0); - tcg_temp_free_i32(fd); - return true; } @@ -1724,7 +1641,6 @@ static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) vfp_load_reg32(f0, vm); fn(f0, f0); vfp_store_reg32(f0, vd); - tcg_temp_free_i32(f0); return true; } @@ -1798,10 +1714,6 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) vd = vfp_advance_dreg(vm, delta_m); vfp_load_reg64(f0, vm); } - - tcg_temp_free_i64(f0); - tcg_temp_free_i64(fd); - return true; } @@ -1812,7 +1724,6 @@ static void gen_VMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_mulh(tmp, vn, vm, fpst); gen_helper_vfp_addh(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VMLA_hp(DisasContext *s, arg_VMLA_sp *a) @@ -1827,7 +1738,6 @@ static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_muls(tmp, vn, vm, fpst); gen_helper_vfp_adds(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a) @@ -1842,7 +1752,6 @@ static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) gen_helper_vfp_muld(tmp, vn, vm, fpst); gen_helper_vfp_addd(vd, vd, tmp, fpst); - tcg_temp_free_i64(tmp); } static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a) @@ -1861,7 +1770,6 @@ static void gen_VMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_mulh(tmp, vn, vm, fpst); gen_helper_vfp_negh(tmp, tmp); gen_helper_vfp_addh(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VMLS_hp(DisasContext *s, arg_VMLS_sp *a) @@ -1880,7 +1788,6 @@ static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_muls(tmp, vn, vm, fpst); gen_helper_vfp_negs(tmp, tmp); gen_helper_vfp_adds(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a) @@ -1899,7 +1806,6 @@ static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) gen_helper_vfp_muld(tmp, vn, vm, fpst); gen_helper_vfp_negd(tmp, tmp); gen_helper_vfp_addd(vd, vd, tmp, fpst); - tcg_temp_free_i64(tmp); } static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a) @@ -1920,7 +1826,6 @@ static void gen_VNMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_mulh(tmp, vn, vm, fpst); gen_helper_vfp_negh(vd, vd); gen_helper_vfp_addh(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VNMLS_hp(DisasContext *s, arg_VNMLS_sp *a) @@ -1941,7 +1846,6 @@ static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_muls(tmp, vn, vm, fpst); gen_helper_vfp_negs(vd, vd); gen_helper_vfp_adds(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a) @@ -1962,7 +1866,6 @@ static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) gen_helper_vfp_muld(tmp, vn, vm, fpst); gen_helper_vfp_negd(vd, vd); gen_helper_vfp_addd(vd, vd, tmp, fpst); - tcg_temp_free_i64(tmp); } static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a) @@ -1979,7 +1882,6 @@ static void gen_VNMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_negh(tmp, tmp); gen_helper_vfp_negh(vd, vd); gen_helper_vfp_addh(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VNMLA_hp(DisasContext *s, arg_VNMLA_sp *a) @@ -1996,7 +1898,6 @@ static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_negs(tmp, tmp); gen_helper_vfp_negs(vd, vd); gen_helper_vfp_adds(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a) @@ -2013,7 +1914,6 @@ static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) gen_helper_vfp_negd(tmp, tmp); gen_helper_vfp_negd(vd, vd); gen_helper_vfp_addd(vd, vd, tmp, fpst); - tcg_temp_free_i64(tmp); } static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a) @@ -2225,12 +2125,6 @@ static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) fpst = fpstatus_ptr(FPST_FPCR_F16); gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst); vfp_store_reg32(vd, a->vd); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(vn); - tcg_temp_free_i32(vm); - tcg_temp_free_i32(vd); - return true; } @@ -2290,12 +2184,6 @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_vfp_muladds(vd, vn, vm, vd, fpst); vfp_store_reg32(vd, a->vd); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(vn); - tcg_temp_free_i32(vm); - tcg_temp_free_i32(vd); - return true; } @@ -2361,12 +2249,6 @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst); vfp_store_reg64(vd, a->vd); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(vn); - tcg_temp_free_i64(vm); - tcg_temp_free_i64(vd); - return true; } @@ -2591,10 +2473,6 @@ static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a) } else { gen_helper_vfp_cmph(vd, vm, cpu_env); } - - tcg_temp_free_i32(vd); - tcg_temp_free_i32(vm); - return true; } @@ -2630,10 +2508,6 @@ static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a) } else { gen_helper_vfp_cmps(vd, vm, cpu_env); } - - tcg_temp_free_i32(vd); - tcg_temp_free_i32(vm); - return true; } @@ -2674,10 +2548,6 @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a) } else { gen_helper_vfp_cmpd(vd, vm, cpu_env); } - - tcg_temp_free_i64(vd); - tcg_temp_free_i64(vm); - return true; } @@ -2702,9 +2572,6 @@ static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a) tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t)); gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_i32(ahp_mode); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2740,10 +2607,6 @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a) vd = tcg_temp_new_i64(); gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode); vfp_store_reg64(vd, a->vd); - tcg_temp_free_i32(ahp_mode); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); - tcg_temp_free_i64(vd); return true; } @@ -2766,8 +2629,6 @@ static bool trans_VCVT_b16_f32(DisasContext *s, arg_VCVT_b16_f32 *a) vfp_load_reg32(tmp, a->vm); gen_helper_bfcvt(tmp, tmp, fpst); tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2792,9 +2653,6 @@ static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a) vfp_load_reg32(tmp, a->vm); gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode); tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); - tcg_temp_free_i32(ahp_mode); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2829,11 +2687,7 @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a) vfp_load_reg64(vm, a->vm); gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode); - tcg_temp_free_i64(vm); tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); - tcg_temp_free_i32(ahp_mode); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2855,8 +2709,6 @@ static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a) fpst = fpstatus_ptr(FPST_FPCR_F16); gen_helper_rinth(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2878,8 +2730,6 @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rints(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2910,8 +2760,6 @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rintd(tmp, tmp, fpst); vfp_store_reg64(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(tmp); return true; } @@ -2932,14 +2780,10 @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a) tmp = tcg_temp_new_i32(); vfp_load_reg32(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR_F16); - tcg_rmode = tcg_const_i32(float_round_to_zero); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst); gen_helper_rinth(tmp, tmp, fpst); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + gen_restore_rmode(tcg_rmode, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_rmode); - tcg_temp_free_i32(tmp); return true; } @@ -2960,14 +2804,10 @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a) tmp = tcg_temp_new_i32(); vfp_load_reg32(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR); - tcg_rmode = tcg_const_i32(float_round_to_zero); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst); gen_helper_rints(tmp, tmp, fpst); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + gen_restore_rmode(tcg_rmode, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_rmode); - tcg_temp_free_i32(tmp); return true; } @@ -2997,14 +2837,10 @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a) tmp = tcg_temp_new_i64(); vfp_load_reg64(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR); - tcg_rmode = tcg_const_i32(float_round_to_zero); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst); gen_helper_rintd(tmp, tmp, fpst); - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); + gen_restore_rmode(tcg_rmode, fpst); vfp_store_reg64(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(tmp); - tcg_temp_free_i32(tcg_rmode); return true; } @@ -3026,8 +2862,6 @@ static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a) fpst = fpstatus_ptr(FPST_FPCR_F16); gen_helper_rinth_exact(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -3049,8 +2883,6 @@ static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rints_exact(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -3081,8 +2913,6 @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rintd_exact(tmp, tmp, fpst); vfp_store_reg64(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(tmp); return true; } @@ -3109,8 +2939,6 @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a) vfp_load_reg32(vm, a->vm); gen_helper_vfp_fcvtds(vd, vm, cpu_env); vfp_store_reg64(vd, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_i64(vd); return true; } @@ -3137,8 +2965,6 @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a) vfp_load_reg64(vm, a->vm); gen_helper_vfp_fcvtsd(vd, vm, cpu_env); vfp_store_reg32(vd, a->vd); - tcg_temp_free_i32(vd); - tcg_temp_free_i64(vm); return true; } @@ -3166,8 +2992,6 @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a) gen_helper_vfp_uitoh(vm, vm, fpst); } vfp_store_reg32(vm, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_ptr(fpst); return true; } @@ -3195,8 +3019,6 @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a) gen_helper_vfp_uitos(vm, vm, fpst); } vfp_store_reg32(vm, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_ptr(fpst); return true; } @@ -3231,9 +3053,6 @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a) gen_helper_vfp_uitod(vd, vm, fpst); } vfp_store_reg64(vd, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_i64(vd); - tcg_temp_free_ptr(fpst); return true; } @@ -3264,8 +3083,6 @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a) vfp_load_reg64(vm, a->vm); gen_helper_vjcvt(vd, vm, cpu_env); vfp_store_reg32(vd, a->vd); - tcg_temp_free_i64(vm); - tcg_temp_free_i32(vd); return true; } @@ -3322,8 +3139,6 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a) } vfp_store_reg32(vd, a->vd); - tcg_temp_free_i32(vd); - tcg_temp_free_ptr(fpst); return true; } @@ -3380,8 +3195,6 @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a) } vfp_store_reg32(vd, a->vd); - tcg_temp_free_i32(vd); - tcg_temp_free_ptr(fpst); return true; } @@ -3444,8 +3257,6 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) } vfp_store_reg64(vd, a->vd); - tcg_temp_free_i64(vd); - tcg_temp_free_ptr(fpst); return true; } @@ -3480,8 +3291,6 @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a) } } vfp_store_reg32(vm, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_ptr(fpst); return true; } @@ -3516,8 +3325,6 @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a) } } vfp_store_reg32(vm, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_ptr(fpst); return true; } @@ -3559,9 +3366,6 @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a) } } vfp_store_reg32(vd, a->vd); - tcg_temp_free_i32(vd); - tcg_temp_free_i64(vm); - tcg_temp_free_ptr(fpst); return true; } @@ -3588,8 +3392,6 @@ static bool trans_VINS(DisasContext *s, arg_VINS *a) vfp_load_reg32(rd, a->vd); tcg_gen_deposit_i32(rd, rd, rm, 16, 16); vfp_store_reg32(rd, a->vd); - tcg_temp_free_i32(rm); - tcg_temp_free_i32(rd); return true; } @@ -3614,6 +3416,5 @@ static bool trans_VMOVX(DisasContext *s, arg_VINS *a) vfp_load_reg32(rm, a->vm); tcg_gen_shri_i32(rm, rm, 16); vfp_store_reg32(rm, a->vd); - tcg_temp_free_i32(rm); return true; } diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index 0fab359cae..9df84eae65 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -35,6 +35,11 @@ #include "exec/log.h" #include "cpregs.h" +//// --- Begin LibAFL code --- + +#include "tcg/tcg-temp-internal.h" // tcg_temp_free + +//// --- End LibAFL code --- #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T) #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5) @@ -195,7 +200,6 @@ void store_cpu_offset(TCGv_i32 var, int offset, int size) default: g_assert_not_reached(); } - tcg_temp_free_i32(var); } /* Save the syndrome information for a Data Abort */ @@ -269,7 +273,7 @@ static target_long jmp_diff(DisasContext *s, target_long diff) static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff); } else { tcg_gen_movi_i32(var, s->pc_curr + diff); @@ -325,7 +329,6 @@ void store_reg(DisasContext *s, int reg, TCGv_i32 var) tcg_gen_andi_i32(var, var, ~3); } tcg_gen_mov_i32(cpu_R[reg], var); - tcg_temp_free_i32(var); } /* @@ -420,12 +423,10 @@ static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b) tcg_gen_ext16s_i32(tmp1, a); tcg_gen_ext16s_i32(tmp2, b); tcg_gen_mul_i32(tmp1, tmp1, tmp2); - tcg_temp_free_i32(tmp2); tcg_gen_sari_i32(a, a, 16); tcg_gen_sari_i32(b, b, 16); tcg_gen_mul_i32(b, b, a); tcg_gen_mov_i32(a, tmp1); - tcg_temp_free_i32(tmp1); } /* Byteswap each halfword. */ @@ -438,7 +439,6 @@ void gen_rev16(TCGv_i32 dest, TCGv_i32 var) tcg_gen_and_i32(var, var, mask); tcg_gen_shli_i32(var, var, 8); tcg_gen_or_i32(dest, var, tmp); - tcg_temp_free_i32(tmp); } /* Byteswap low halfword and sign extend. */ @@ -463,7 +463,6 @@ static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) tcg_gen_andi_i32(t1, t1, ~0x8000); tcg_gen_add_i32(t0, t0, t1); tcg_gen_xor_i32(dest, t0, tmp); - tcg_temp_free_i32(tmp); } /* Set N and Z flags from var. */ @@ -498,7 +497,6 @@ static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) tcg_gen_xor_i32(cpu_VF, cpu_NF, t0); tcg_gen_xor_i32(tmp, t0, t1); tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); - tcg_temp_free_i32(tmp); tcg_gen_mov_i32(dest, cpu_NF); } @@ -519,14 +517,11 @@ static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) tcg_gen_extu_i32_i64(q1, cpu_CF); tcg_gen_add_i64(q0, q0, q1); tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0); - tcg_temp_free_i64(q0); - tcg_temp_free_i64(q1); } tcg_gen_mov_i32(cpu_ZF, cpu_NF); tcg_gen_xor_i32(cpu_VF, cpu_NF, t0); tcg_gen_xor_i32(tmp, t0, t1); tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); - tcg_temp_free_i32(tmp); tcg_gen_mov_i32(dest, cpu_NF); } @@ -541,7 +536,6 @@ static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) tmp = tcg_temp_new_i32(); tcg_gen_xor_i32(tmp, t0, t1); tcg_gen_and_i32(cpu_VF, cpu_VF, tmp); - tcg_temp_free_i32(tmp); tcg_gen_mov_i32(dest, cpu_NF); } @@ -551,7 +545,6 @@ static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_not_i32(tmp, t1); gen_adc_CC(dest, t0, tmp); - tcg_temp_free_i32(tmp); } #define GEN_SHIFT(name) \ @@ -564,8 +557,6 @@ static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \ tcg_gen_##name##_i32(tmpd, t0, tmp1); \ tcg_gen_andi_i32(tmp1, t1, 0xe0); \ tcg_gen_movcond_i32(TCG_COND_NE, dest, tmp1, zero, zero, tmpd); \ - tcg_temp_free_i32(tmpd); \ - tcg_temp_free_i32(tmp1); \ } GEN_SHIFT(shl) GEN_SHIFT(shr) @@ -578,7 +569,6 @@ static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) tcg_gen_andi_i32(tmp1, t1, 0xff); tcg_gen_umin_i32(tmp1, tmp1, tcg_constant_i32(31)); tcg_gen_sar_i32(dest, t0, tmp1); - tcg_temp_free_i32(tmp1); } static void shifter_out_im(TCGv_i32 var, int shift) @@ -631,7 +621,6 @@ static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop, shifter_out_im(var, 0); tcg_gen_shri_i32(var, var, 1); tcg_gen_or_i32(var, var, tmp); - tcg_temp_free_i32(tmp); } } }; @@ -661,7 +650,6 @@ static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop, tcg_gen_rotr_i32(var, var, shift); break; } } - tcg_temp_free_i32(shift); } /* @@ -672,7 +660,6 @@ void arm_test_cc(DisasCompare *cmp, int cc) { TCGv_i32 value; TCGCond cond; - bool global = true; switch (cc) { case 0: /* eq: Z */ @@ -703,7 +690,6 @@ void arm_test_cc(DisasCompare *cmp, int cc) case 9: /* ls: !C || Z -> !(C && !Z) */ cond = TCG_COND_NE; value = tcg_temp_new_i32(); - global = false; /* CF is 1 for C, so -CF is an all-bits-set mask for C; ZF is non-zero for !Z; so AND the two subexpressions. */ tcg_gen_neg_i32(value, cpu_CF); @@ -715,7 +701,6 @@ void arm_test_cc(DisasCompare *cmp, int cc) /* Since we're only interested in the sign bit, == 0 is >= 0. */ cond = TCG_COND_GE; value = tcg_temp_new_i32(); - global = false; tcg_gen_xor_i32(value, cpu_VF, cpu_NF); break; @@ -723,7 +708,6 @@ void arm_test_cc(DisasCompare *cmp, int cc) case 13: /* le: Z || N != V */ cond = TCG_COND_NE; value = tcg_temp_new_i32(); - global = false; /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate * the sign bit then AND with ZF to yield the result. */ tcg_gen_xor_i32(value, cpu_VF, cpu_NF); @@ -751,14 +735,6 @@ void arm_test_cc(DisasCompare *cmp, int cc) no_invert: cmp->cond = cond; cmp->value = value; - cmp->value_global = global; -} - -void arm_free_cc(DisasCompare *cmp) -{ - if (!cmp->value_global) { - tcg_temp_free_i32(cmp->value); - } } void arm_jump_cc(DisasCompare *cmp, TCGLabel *label) @@ -771,7 +747,6 @@ void arm_gen_test_cc(int cc, TCGLabel *label) DisasCompare cmp; arm_test_cc(&cmp, cc); arm_jump_cc(&cmp, label); - arm_free_cc(&cmp); } void gen_set_condexec(DisasContext *s) @@ -888,7 +863,6 @@ static inline void gen_bxns(DisasContext *s, int rm) * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise. */ gen_helper_v7m_bxns(cpu_env, var); - tcg_temp_free_i32(var); s->base.is_jmp = DISAS_EXIT; } @@ -902,7 +876,6 @@ static inline void gen_blxns(DisasContext *s, int rm) */ gen_update_pc(s, curr_insn_len(s)); gen_helper_v7m_blxns(cpu_env, var); - tcg_temp_free_i32(var); s->base.is_jmp = DISAS_EXIT; } @@ -982,7 +955,6 @@ void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val, { TCGv addr = gen_aa32_addr(s, a32, opc); tcg_gen_qemu_ld_i32(val, addr, index, opc); - tcg_temp_free(addr); } void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val, @@ -990,7 +962,6 @@ void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val, { TCGv addr = gen_aa32_addr(s, a32, opc); tcg_gen_qemu_st_i32(val, addr, index, opc); - tcg_temp_free(addr); } void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val, @@ -1004,7 +975,6 @@ void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val, if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { tcg_gen_rotri_i64(val, val, 32); } - tcg_temp_free(addr); } void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val, @@ -1017,11 +987,9 @@ void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val, TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_rotri_i64(tmp, val, 32); tcg_gen_qemu_st_i64(tmp, addr, index, opc); - tcg_temp_free_i64(tmp); } else { tcg_gen_qemu_st_i64(val, addr, index, opc); } - tcg_temp_free(addr); } void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, @@ -1333,7 +1301,6 @@ static inline TCGv_i32 iwmmxt_load_creg(int reg) static inline void iwmmxt_store_creg(int reg, TCGv_i32 var) { tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); - tcg_temp_free_i32(var); } static inline void gen_op_iwmmxt_movq_wRn_M0(int rn) @@ -1492,10 +1459,9 @@ static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, else tcg_gen_addi_i32(tmp, tmp, -offset); tcg_gen_mov_i32(dest, tmp); - if (insn & (1 << 21)) + if (insn & (1 << 21)) { store_reg(s, rd, tmp); - else - tcg_temp_free_i32(tmp); + } } else if (insn & (1 << 21)) { /* Post indexed */ tcg_gen_mov_i32(dest, tmp); @@ -1527,7 +1493,6 @@ static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest) } tcg_gen_andi_i32(tmp, tmp, mask); tcg_gen_mov_i32(dest, tmp); - tcg_temp_free_i32(tmp); return 0; } @@ -1560,7 +1525,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) wrd = (insn >> 12) & 0xf; addr = tcg_temp_new_i32(); if (gen_iwmmxt_address(s, insn, addr)) { - tcg_temp_free_i32(addr); return 1; } if (insn & ARM_CP_RW_BIT) { @@ -1588,7 +1552,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) } if (i) { tcg_gen_extu_i32_i64(cpu_M0, tmp); - tcg_temp_free_i32(tmp); } gen_op_iwmmxt_movq_wRn_M0(wrd); } @@ -1616,9 +1579,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) } } } - tcg_temp_free_i32(tmp); } - tcg_temp_free_i32(addr); return 0; } @@ -1653,7 +1614,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) tmp = iwmmxt_load_creg(wrd); tmp2 = load_reg(s, rd); tcg_gen_andc_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); iwmmxt_store_creg(wrd, tmp); break; case ARM_IWMMXT_wCGR0: @@ -1866,7 +1826,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) tcg_gen_andi_i32(tmp, tmp, 7); iwmmxt_load_reg(cpu_V1, rd1); gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; @@ -1894,7 +1853,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) g_assert_not_reached(); } gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3); - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; @@ -1948,7 +1906,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) } tcg_gen_shli_i32(tmp, tmp, 28); gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); break; case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ if (((insn >> 6) & 3) == 3) @@ -1967,7 +1924,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_helper_iwmmxt_bcstl(cpu_M0, tmp); break; } - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; @@ -1996,8 +1952,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) break; } gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); break; case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ wrd = (insn >> 12) & 0xf; @@ -2044,8 +1998,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) break; } gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); break; case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ rd = (insn >> 12) & 0xf; @@ -2170,7 +2122,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = tcg_temp_new_i32(); if (gen_iwmmxt_shift(insn, 0xff, tmp)) { - tcg_temp_free_i32(tmp); return 1; } switch ((insn >> 22) & 3) { @@ -2184,7 +2135,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp); break; } - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); @@ -2198,7 +2148,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = tcg_temp_new_i32(); if (gen_iwmmxt_shift(insn, 0xff, tmp)) { - tcg_temp_free_i32(tmp); return 1; } switch ((insn >> 22) & 3) { @@ -2212,7 +2161,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp); break; } - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); @@ -2226,7 +2174,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = tcg_temp_new_i32(); if (gen_iwmmxt_shift(insn, 0xff, tmp)) { - tcg_temp_free_i32(tmp); return 1; } switch ((insn >> 22) & 3) { @@ -2240,7 +2187,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp); break; } - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); @@ -2256,27 +2202,23 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) switch ((insn >> 22) & 3) { case 1: if (gen_iwmmxt_shift(insn, 0xf, tmp)) { - tcg_temp_free_i32(tmp); return 1; } gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp); break; case 2: if (gen_iwmmxt_shift(insn, 0x1f, tmp)) { - tcg_temp_free_i32(tmp); return 1; } gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp); break; case 3: if (gen_iwmmxt_shift(insn, 0x3f, tmp)) { - tcg_temp_free_i32(tmp); return 1; } gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp); break; } - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); @@ -2515,12 +2457,8 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); break; default: - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); return 1; } - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; @@ -2569,8 +2507,6 @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn) default: return 1; } - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(acc); return 0; @@ -2620,7 +2556,7 @@ static void gen_goto_tb(DisasContext *s, int n, target_long diff) * update to pc to the unlinked path. A long chain of links * can thus avoid many updates to the PC. */ - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { gen_update_pc(s, diff); tcg_gen_goto_tb(n); } else { @@ -2747,7 +2683,6 @@ static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0) } else { gen_set_cpsr(t0, mask); } - tcg_temp_free_i32(t0); gen_lookup_tb(s); return 0; } @@ -2886,7 +2821,7 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, if (arm_dc_feature(s, ARM_FEATURE_AARCH64) && dc_isar_feature(aa64_sel2, s)) { /* Target EL is EL<3 minus SCR_EL3.EEL2> */ - tcg_el = load_cpu_field(cp15.scr_el3); + tcg_el = load_cpu_field_low32(cp15.scr_el3); tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1); tcg_gen_addi_i32(tcg_el, tcg_el, 3); } else { @@ -2895,7 +2830,6 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, gen_exception_insn_el_v(s, 0, EXCP_UDEF, syn_uncategorized(), tcg_el); - tcg_temp_free_i32(tcg_el); return false; } break; @@ -2939,7 +2873,6 @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn) gen_helper_msr_banked(cpu_env, tcg_reg, tcg_constant_i32(tgtmode), tcg_constant_i32(regno)); - tcg_temp_free_i32(tcg_reg); s->base.is_jmp = DISAS_UPDATE_EXIT; } @@ -2970,7 +2903,6 @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn) static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc) { tcg_gen_mov_i32(cpu_R[15], pc); - tcg_temp_free_i32(pc); } /* Generate a v6 exception return. Marks both values as dead. */ @@ -2985,7 +2917,6 @@ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr) gen_io_start(); } gen_helper_cpsr_write_eret(cpu_env, cpsr); - tcg_temp_free_i32(cpsr); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; } @@ -3005,7 +2936,6 @@ static void gen_gvec_fn3_qc(uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc)); tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr, opr_sz, max_sz, 0, fn); - tcg_temp_free_ptr(qc_ptr); } void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -3251,7 +3181,6 @@ static void gen_srshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); tcg_gen_vec_sar8i_i64(d, a, sh); tcg_gen_vec_add8_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3262,7 +3191,6 @@ static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); tcg_gen_vec_sar16i_i64(d, a, sh); tcg_gen_vec_add16_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) @@ -3278,7 +3206,6 @@ static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) tcg_gen_extract_i32(t, a, sh - 1, 1); tcg_gen_sari_i32(d, a, sh); tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3288,7 +3215,6 @@ static void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_extract_i64(t, a, sh - 1, 1); tcg_gen_sari_i64(d, a, sh); tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) @@ -3301,9 +3227,6 @@ static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) tcg_gen_and_vec(vece, t, t, ones); tcg_gen_sari_vec(vece, d, a, sh); tcg_gen_add_vec(vece, d, d, t); - - tcg_temp_free_vec(t); - tcg_temp_free_vec(ones); } void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -3359,7 +3282,6 @@ static void gen_srsra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_srshr8_i64(t, a, sh); tcg_gen_vec_add8_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srsra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3368,7 +3290,6 @@ static void gen_srsra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_srshr16_i64(t, a, sh); tcg_gen_vec_add16_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srsra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) @@ -3377,7 +3298,6 @@ static void gen_srsra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) gen_srshr32_i32(t, a, sh); tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_srsra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3386,7 +3306,6 @@ static void gen_srsra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_srshr64_i64(t, a, sh); tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srsra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) @@ -3395,7 +3314,6 @@ static void gen_srsra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) gen_srshr_vec(vece, t, a, sh); tcg_gen_add_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -3458,7 +3376,6 @@ static void gen_urshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); tcg_gen_vec_shr8i_i64(d, a, sh); tcg_gen_vec_add8_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3469,7 +3386,6 @@ static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); tcg_gen_vec_shr16i_i64(d, a, sh); tcg_gen_vec_add16_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) @@ -3485,7 +3401,6 @@ static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) tcg_gen_extract_i32(t, a, sh - 1, 1); tcg_gen_shri_i32(d, a, sh); tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3495,7 +3410,6 @@ static void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_extract_i64(t, a, sh - 1, 1); tcg_gen_shri_i64(d, a, sh); tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift) @@ -3508,9 +3422,6 @@ static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift) tcg_gen_and_vec(vece, t, t, ones); tcg_gen_shri_vec(vece, d, a, shift); tcg_gen_add_vec(vece, d, d, t); - - tcg_temp_free_vec(t); - tcg_temp_free_vec(ones); } void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -3569,7 +3480,6 @@ static void gen_ursra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_urshr8_i64(t, a, sh); } tcg_gen_vec_add8_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_ursra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3582,7 +3492,6 @@ static void gen_ursra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_urshr16_i64(t, a, sh); } tcg_gen_vec_add16_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_ursra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) @@ -3595,7 +3504,6 @@ static void gen_ursra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) gen_urshr32_i32(t, a, sh); } tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_ursra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3608,7 +3516,6 @@ static void gen_ursra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_urshr64_i64(t, a, sh); } tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_ursra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) @@ -3621,7 +3528,6 @@ static void gen_ursra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) gen_urshr_vec(vece, t, a, sh); } tcg_gen_add_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -3674,7 +3580,6 @@ static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) tcg_gen_andi_i64(t, t, mask); tcg_gen_andi_i64(d, d, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) @@ -3686,7 +3591,6 @@ static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) tcg_gen_andi_i64(t, t, mask); tcg_gen_andi_i64(d, d, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) @@ -3710,9 +3614,6 @@ static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) tcg_gen_shri_vec(vece, t, a, sh); tcg_gen_and_vec(vece, d, d, m); tcg_gen_or_vec(vece, d, d, t); - - tcg_temp_free_vec(t); - tcg_temp_free_vec(m); } void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -3769,7 +3670,6 @@ static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) tcg_gen_andi_i64(t, t, mask); tcg_gen_andi_i64(d, d, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) @@ -3781,7 +3681,6 @@ static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) tcg_gen_andi_i64(t, t, mask); tcg_gen_andi_i64(d, d, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) @@ -3803,9 +3702,6 @@ static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh)); tcg_gen_and_vec(vece, d, d, m); tcg_gen_or_vec(vece, d, d, t); - - tcg_temp_free_vec(t); - tcg_temp_free_vec(m); } void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -4046,11 +3942,6 @@ void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) tcg_gen_shr_i32(rval, src, rsh); tcg_gen_movcond_i32(TCG_COND_LTU, dst, lsh, max, lval, zero); tcg_gen_movcond_i32(TCG_COND_LTU, dst, rsh, max, rval, dst); - - tcg_temp_free_i32(lval); - tcg_temp_free_i32(rval); - tcg_temp_free_i32(lsh); - tcg_temp_free_i32(rsh); } void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) @@ -4073,11 +3964,6 @@ void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) tcg_gen_shr_i64(rval, src, rsh); tcg_gen_movcond_i64(TCG_COND_LTU, dst, lsh, max, lval, zero); tcg_gen_movcond_i64(TCG_COND_LTU, dst, rsh, max, rval, dst); - - tcg_temp_free_i64(lval); - tcg_temp_free_i64(rval); - tcg_temp_free_i64(lsh); - tcg_temp_free_i64(rsh); } static void gen_ushl_vec(unsigned vece, TCGv_vec dst, @@ -4097,7 +3983,6 @@ static void gen_ushl_vec(unsigned vece, TCGv_vec dst, tcg_gen_dupi_vec(vece, msk, 0xff); tcg_gen_and_vec(vece, lsh, shift, msk); tcg_gen_and_vec(vece, rsh, rsh, msk); - tcg_temp_free_vec(msk); } /* @@ -4130,12 +4015,6 @@ static void gen_ushl_vec(unsigned vece, TCGv_vec dst, tcg_gen_and_vec(vece, rval, rval, rsh); } tcg_gen_or_vec(vece, dst, lval, rval); - - tcg_temp_free_vec(max); - tcg_temp_free_vec(lval); - tcg_temp_free_vec(rval); - tcg_temp_free_vec(lsh); - tcg_temp_free_vec(rsh); } void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4187,11 +4066,6 @@ void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) tcg_gen_sar_i32(rval, src, rsh); tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero); tcg_gen_movcond_i32(TCG_COND_LT, dst, lsh, zero, rval, lval); - - tcg_temp_free_i32(lval); - tcg_temp_free_i32(rval); - tcg_temp_free_i32(lsh); - tcg_temp_free_i32(rsh); } void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) @@ -4215,11 +4089,6 @@ void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) tcg_gen_sar_i64(rval, src, rsh); tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero); tcg_gen_movcond_i64(TCG_COND_LT, dst, lsh, zero, rval, lval); - - tcg_temp_free_i64(lval); - tcg_temp_free_i64(rval); - tcg_temp_free_i64(lsh); - tcg_temp_free_i64(rsh); } static void gen_sshl_vec(unsigned vece, TCGv_vec dst, @@ -4264,12 +4133,6 @@ static void gen_sshl_vec(unsigned vece, TCGv_vec dst, tcg_gen_dupi_vec(vece, tmp, 0x80); tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval); } - - tcg_temp_free_vec(lval); - tcg_temp_free_vec(rval); - tcg_temp_free_vec(lsh); - tcg_temp_free_vec(rsh); - tcg_temp_free_vec(tmp); } void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4308,7 +4171,6 @@ static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat, tcg_gen_usadd_vec(vece, t, a, b); tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(vece, sat, sat, x); - tcg_temp_free_vec(x); } void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4351,7 +4213,6 @@ static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat, tcg_gen_ssadd_vec(vece, t, a, b); tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(vece, sat, sat, x); - tcg_temp_free_vec(x); } void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4394,7 +4255,6 @@ static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat, tcg_gen_ussub_vec(vece, t, a, b); tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(vece, sat, sat, x); - tcg_temp_free_vec(x); } void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4437,7 +4297,6 @@ static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat, tcg_gen_sssub_vec(vece, t, a, b); tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(vece, sat, sat, x); - tcg_temp_free_vec(x); } void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4479,7 +4338,6 @@ static void gen_sabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) tcg_gen_sub_i32(t, a, b); tcg_gen_sub_i32(d, b, a); tcg_gen_movcond_i32(TCG_COND_LT, d, a, b, d, t); - tcg_temp_free_i32(t); } static void gen_sabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) @@ -4489,7 +4347,6 @@ static void gen_sabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) tcg_gen_sub_i64(t, a, b); tcg_gen_sub_i64(d, b, a); tcg_gen_movcond_i64(TCG_COND_LT, d, a, b, d, t); - tcg_temp_free_i64(t); } static void gen_sabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) @@ -4499,7 +4356,6 @@ static void gen_sabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) tcg_gen_smin_vec(vece, t, a, b); tcg_gen_smax_vec(vece, d, a, b); tcg_gen_sub_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4539,7 +4395,6 @@ static void gen_uabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) tcg_gen_sub_i32(t, a, b); tcg_gen_sub_i32(d, b, a); tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, d, t); - tcg_temp_free_i32(t); } static void gen_uabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) @@ -4549,7 +4404,6 @@ static void gen_uabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) tcg_gen_sub_i64(t, a, b); tcg_gen_sub_i64(d, b, a); tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, d, t); - tcg_temp_free_i64(t); } static void gen_uabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) @@ -4559,7 +4413,6 @@ static void gen_uabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) tcg_gen_umin_vec(vece, t, a, b); tcg_gen_umax_vec(vece, d, a, b); tcg_gen_sub_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4597,7 +4450,6 @@ static void gen_saba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) TCGv_i32 t = tcg_temp_new_i32(); gen_sabd_i32(t, a, b); tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_saba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) @@ -4605,7 +4457,6 @@ static void gen_saba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) TCGv_i64 t = tcg_temp_new_i64(); gen_sabd_i64(t, a, b); tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_saba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) @@ -4613,7 +4464,6 @@ static void gen_saba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) TCGv_vec t = tcg_temp_new_vec_matching(d); gen_sabd_vec(vece, t, a, b); tcg_gen_add_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4656,7 +4506,6 @@ static void gen_uaba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) TCGv_i32 t = tcg_temp_new_i32(); gen_uabd_i32(t, a, b); tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_uaba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) @@ -4664,7 +4513,6 @@ static void gen_uaba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) TCGv_i64 t = tcg_temp_new_i64(); gen_uabd_i64(t, a, b); tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_uaba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) @@ -4672,7 +4520,6 @@ static void gen_uaba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) TCGv_vec t = tcg_temp_new_vec_matching(d); gen_uabd_vec(vece, t, a, b); tcg_gen_add_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4779,9 +4626,14 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, t = load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2)); tcg_gen_andi_i32(t, t, 1u << maskbit); tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label); - tcg_temp_free_i32(t); gen_exception_insn(s, 0, EXCP_UDEF, syndrome); + /* + * gen_exception_insn() will set is_jmp to DISAS_NORETURN, + * but since we're conditionally branching over it, we want + * to assume continue-to-next-instruction. + */ + s->base.is_jmp = DISAS_NEXT; set_disas_label(s, over); } } @@ -4844,7 +4696,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, case 0: break; case ARM_CP_NOP: - goto exit; + return; case ARM_CP_WFI: if (isread) { unallocated_encoding(s); @@ -4852,7 +4704,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, gen_update_pc(s, curr_insn_len(s)); s->base.is_jmp = DISAS_WFI; } - goto exit; + return; default: g_assert_not_reached(); } @@ -4883,7 +4735,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, store_reg(s, rt, tmp); tmp = tcg_temp_new_i32(); tcg_gen_extrh_i64_i32(tmp, tmp64); - tcg_temp_free_i64(tmp64); store_reg(s, rt2, tmp); } else { TCGv_i32 tmp; @@ -4903,7 +4754,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, * the condition codes from the high 4 bits of the value */ gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); } else { store_reg(s, rt, tmp); } @@ -4912,7 +4762,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, /* Write */ if (ri->type & ARM_CP_CONST) { /* If not forbidden by access permissions, treat as WI */ - goto exit; + return; } if (is64) { @@ -4921,8 +4771,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, tmplo = load_reg(s, rt); tmphi = load_reg(s, rt2); tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi); - tcg_temp_free_i32(tmplo); - tcg_temp_free_i32(tmphi); if (ri->writefn) { if (!tcg_ri) { tcg_ri = gen_lookup_cp_reg(key); @@ -4931,7 +4779,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, } else { tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset); } - tcg_temp_free_i64(tmp64); } else { TCGv_i32 tmp = load_reg(s, rt); if (ri->writefn) { @@ -4939,7 +4786,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, tcg_ri = gen_lookup_cp_reg(key); } gen_helper_set_cp_reg(cpu_env, tcg_ri, tmp); - tcg_temp_free_i32(tmp); } else { store_cpu_offset(tmp, ri->fieldoffset, 4); } @@ -4966,11 +4812,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, if (need_exit_tb) { gen_lookup_tb(s); } - - exit: - if (tcg_ri) { - tcg_temp_free_ptr(tcg_ri); - } } /* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */ @@ -5015,10 +4856,7 @@ static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) tmph = load_reg(s, rhigh); tmp = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(tmp, tmpl, tmph); - tcg_temp_free_i32(tmpl); - tcg_temp_free_i32(tmph); tcg_gen_add_i64(val, val, tmp); - tcg_temp_free_i64(tmp); } /* Set N and Z flags from hi|lo. */ @@ -5057,15 +4895,12 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv taddr = gen_aa32_addr(s, addr, opc); tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc); - tcg_temp_free(taddr); tcg_gen_mov_i64(cpu_exclusive_val, t64); if (s->be_data == MO_BE) { tcg_gen_extr_i64_i32(tmp2, tmp, t64); } else { tcg_gen_extr_i64_i32(tmp, tmp2, t64); } - tcg_temp_free_i64(t64); - store_reg(s, rt2, tmp2); } else { gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc); @@ -5102,7 +4937,6 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, extaddr = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(extaddr, addr); tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label); - tcg_temp_free_i64(extaddr); taddr = gen_aa32_addr(s, addr, opc); t0 = tcg_temp_new_i32(); @@ -5127,27 +4961,19 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, } else { tcg_gen_concat_i32_i64(n64, t1, t2); } - tcg_temp_free_i32(t2); tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64, get_mem_index(s), opc); - tcg_temp_free_i64(n64); tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val); tcg_gen_extrl_i64_i32(t0, o64); - - tcg_temp_free_i64(o64); } else { t2 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val); tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc); tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2); - tcg_temp_free_i32(t2); } - tcg_temp_free_i32(t1); - tcg_temp_free(taddr); tcg_gen_mov_i32(cpu_R[rd], t0); - tcg_temp_free_i32(t0); tcg_gen_br(done_label); gen_set_label(fail_label); @@ -5249,11 +5075,9 @@ static void gen_srs(DisasContext *s, tcg_gen_addi_i32(addr, addr, offset); tmp = load_reg(s, 14); gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); tmp = load_cpu_field(spsr); tcg_gen_addi_i32(addr, addr, 4); gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); if (writeback) { switch (amode) { case 0: @@ -5274,7 +5098,6 @@ static void gen_srs(DisasContext *s, tcg_gen_addi_i32(addr, addr, offset); gen_helper_set_r13_banked(cpu_env, tcg_constant_i32(mode), addr); } - tcg_temp_free_i32(addr); s->base.is_jmp = DISAS_UPDATE_EXIT; } @@ -5466,7 +5289,6 @@ static bool store_reg_kind(DisasContext *s, int rd, { switch (kind) { case STREG_NONE: - tcg_temp_free_i32(val); return true; case STREG_NORMAL: /* See ALUWritePC: Interworking only from a32 mode. */ @@ -5527,7 +5349,6 @@ static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a, //// --- End LibAFL code --- gen(tmp1, tmp1, tmp2); - tcg_temp_free_i32(tmp2); if (logic_cc) { gen_logic_CC(tmp1); @@ -5569,7 +5390,6 @@ static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a, tmp1 = load_reg(s, a->rn); gen(tmp1, tmp1, tmp2); - tcg_temp_free_i32(tmp2); if (logic_cc) { gen_logic_CC(tmp1); @@ -5846,7 +5666,6 @@ static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a, tcg_gen_extrh_i64_i32(rdahi, rda); store_reg(s, a->rdalo, rdalo); store_reg(s, a->rdahi, rdahi); - tcg_temp_free_i64(rda); return true; } @@ -5930,7 +5749,6 @@ static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn) tcg_gen_extrh_i64_i32(rdahi, rda); store_reg(s, a->rdalo, rdalo); store_reg(s, a->rdahi, rdahi); - tcg_temp_free_i64(rda); return true; } @@ -6058,11 +5876,9 @@ static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add) t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); tcg_gen_mul_i32(t1, t1, t2); - tcg_temp_free_i32(t2); if (add) { t2 = load_reg(s, a->ra); tcg_gen_add_i32(t1, t1, t2); - tcg_temp_free_i32(t2); } if (a->s) { gen_logic_CC(t1); @@ -6091,10 +5907,8 @@ static bool trans_MLS(DisasContext *s, arg_MLS *a) t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); tcg_gen_mul_i32(t1, t1, t2); - tcg_temp_free_i32(t2); t2 = load_reg(s, a->ra); tcg_gen_sub_i32(t1, t2, t1); - tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); return true; } @@ -6114,8 +5928,6 @@ static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add) t2 = load_reg(s, a->ra); t3 = load_reg(s, a->rd); tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } if (a->s) { gen_logicq_cc(t0, t1); @@ -6161,10 +5973,8 @@ static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a) zero = tcg_constant_i32(0); t2 = load_reg(s, a->ra); tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero); - tcg_temp_free_i32(t2); t2 = load_reg(s, a->rd); tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero); - tcg_temp_free_i32(t2); store_reg(s, a->ra, t0); store_reg(s, a->rd, t1); return true; @@ -6194,7 +6004,6 @@ static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub) } else { gen_helper_sub_saturate(t0, cpu_env, t0, t1); } - tcg_temp_free_i32(t1); store_reg(s, a->rd, t0); return true; } @@ -6230,7 +6039,6 @@ static bool op_smlaxxx(DisasContext *s, arg_rrrr *a, t0 = load_reg(s, a->rn); t1 = load_reg(s, a->rm); gen_mulxy(t0, t1, nt, mt); - tcg_temp_free_i32(t1); switch (add_long) { case 0: @@ -6239,7 +6047,6 @@ static bool op_smlaxxx(DisasContext *s, arg_rrrr *a, case 1: t1 = load_reg(s, a->ra); gen_helper_add_setq(t0, cpu_env, t0, t1); - tcg_temp_free_i32(t1); store_reg(s, a->rd, t0); break; case 2: @@ -6249,8 +6056,6 @@ static bool op_smlaxxx(DisasContext *s, arg_rrrr *a, t1 = tcg_temp_new_i32(); tcg_gen_sari_i32(t1, t0, 31); tcg_gen_add2_i32(tl, th, tl, th, t0, t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); store_reg(s, a->ra, tl); store_reg(s, a->rd, th); break; @@ -6303,11 +6108,9 @@ static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt) tcg_gen_shli_i32(t1, t1, 16); } tcg_gen_muls2_i32(t0, t1, t0, t1); - tcg_temp_free_i32(t0); if (add) { t0 = load_reg(s, a->ra); gen_helper_add_setq(t1, cpu_env, t1, t0); - tcg_temp_free_i32(t0); } store_reg(s, a->rd, t1); return true; @@ -6440,7 +6243,6 @@ static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz) } else { gen_helper_crc32(t1, t1, t2, t3); } - tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); return true; } @@ -6539,7 +6341,6 @@ static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a) addr = tcg_constant_i32((a->mask << 10) | a->sysm); reg = load_reg(s, a->rn); gen_helper_v7m_msr(cpu_env, addr, reg); - tcg_temp_free_i32(reg); /* If we wrote to CONTROL, the EL might have changed */ gen_rebuild_hflags(s, true); gen_lookup_tb(s); @@ -6642,7 +6443,7 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a) } if (s->current_el == 2) { /* ERET from Hyp uses ELR_Hyp, not LR */ - tmp = load_cpu_field(elr_el[2]); + tmp = load_cpu_field_low32(elr_el[2]); } else { tmp = load_reg(s, 14); } @@ -6749,7 +6550,6 @@ static bool trans_TT(DisasContext *s, arg_TT *a) addr = load_reg(s, a->rn); tmp = tcg_temp_new_i32(); gen_helper_v7m_tt(tmp, cpu_env, addr, tcg_constant_i32((a->A << 1) | a->T)); - tcg_temp_free_i32(addr); store_reg(s, a->rd, tmp); return true; } @@ -6790,7 +6590,6 @@ static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a) } else { tcg_gen_sub_i32(addr, addr, ofs); } - tcg_temp_free_i32(ofs); } return addr; } @@ -6806,9 +6605,7 @@ static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a, } else { tcg_gen_sub_i32(addr, addr, ofs); } - tcg_temp_free_i32(ofs); } else if (!a->w) { - tcg_temp_free_i32(addr); return; } tcg_gen_addi_i32(addr, addr, address_offset); @@ -6855,7 +6652,6 @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a, tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, mop); disas_set_da_iss(s, mop, issinfo); - tcg_temp_free_i32(tmp); op_addr_rr_post(s, a, addr, 0); return true; @@ -6906,13 +6702,11 @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a) tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); tcg_gen_addi_i32(addr, addr, 4); tmp = load_reg(s, a->rt + 1); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); op_addr_rr_post(s, a, addr, -4); return true; @@ -6941,7 +6735,6 @@ static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a) TCGv_i32 newsp = tcg_temp_new_i32(); tcg_gen_addi_i32(newsp, cpu_R[13], ofs); gen_helper_v8m_stackcheck(cpu_env, newsp); - tcg_temp_free_i32(newsp); } else { gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]); } @@ -6960,7 +6753,6 @@ static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a, address_offset -= a->imm; } } else if (!a->w) { - tcg_temp_free_i32(addr); return; } tcg_gen_addi_i32(addr, addr, address_offset); @@ -7007,7 +6799,6 @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a, tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, mop); disas_set_da_iss(s, mop, issinfo); - tcg_temp_free_i32(tmp); op_addr_ri_post(s, a, addr, 0); return true; @@ -7061,13 +6852,11 @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); tcg_gen_addi_i32(addr, addr, 4); tmp = load_reg(s, rt2); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); op_addr_ri_post(s, a, addr, -4); return true; @@ -7132,11 +6921,9 @@ static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc) opc |= s->be_data; addr = load_reg(s, a->rn); taddr = gen_aa32_addr(s, addr, opc); - tcg_temp_free_i32(addr); tmp = load_reg(s, a->rt2); tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc); - tcg_temp_free(taddr); store_reg(s, a->rt, tmp); return true; @@ -7178,12 +6965,11 @@ static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel) tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); } - addr = tcg_temp_local_new_i32(); + addr = tcg_temp_new_i32(); load_reg_var(s, addr, a->rn); tcg_gen_addi_i32(addr, addr, a->imm); gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop); - tcg_temp_free_i32(addr); return true; } @@ -7295,8 +7081,6 @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop) gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN); disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite); - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(addr); return true; } @@ -7331,12 +7115,11 @@ static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq) return true; } - addr = tcg_temp_local_new_i32(); + addr = tcg_temp_new_i32(); load_reg_var(s, addr, a->rn); tcg_gen_addi_i32(addr, addr, a->imm); gen_load_exclusive(s, a->rt, a->rt2, addr, mop); - tcg_temp_free_i32(addr); if (acq) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); @@ -7450,7 +7233,6 @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop) tmp = tcg_temp_new_i32(); gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN); disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel); - tcg_temp_free_i32(addr); store_reg(s, a->rt, tmp); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); @@ -7487,11 +7269,9 @@ static bool trans_USADA8(DisasContext *s, arg_USADA8 *a) t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); gen_helper_usad8(t1, t1, t2); - tcg_temp_free_i32(t2); if (a->ra != 15) { t2 = load_reg(s, a->ra); tcg_gen_add_i32(t1, t1, t2); - tcg_temp_free_i32(t2); } store_reg(s, a->rd, t1); return true; @@ -7534,8 +7314,8 @@ static bool trans_UBFX(DisasContext *s, arg_UBFX *a) static bool trans_BFCI(DisasContext *s, arg_BFCI *a) { - TCGv_i32 tmp; int msb = a->msb, lsb = a->lsb; + TCGv_i32 t_in, t_rd; int width; if (!ENABLE_ARCH_6T2) { @@ -7550,17 +7330,14 @@ static bool trans_BFCI(DisasContext *s, arg_BFCI *a) width = msb + 1 - lsb; if (a->rn == 15) { /* BFC */ - tmp = tcg_const_i32(0); + t_in = tcg_constant_i32(0); } else { /* BFI */ - tmp = load_reg(s, a->rn); + t_in = load_reg(s, a->rn); } - if (width != 32) { - TCGv_i32 tmp2 = load_reg(s, a->rd); - tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width); - tcg_temp_free_i32(tmp2); - } - store_reg(s, a->rd, tmp); + t_rd = load_reg(s, a->rd); + tcg_gen_deposit_i32(t_rd, t_rd, t_in, lsb, width); + store_reg(s, a->rd, t_rd); return true; } @@ -7590,7 +7367,6 @@ static bool op_par_addsub(DisasContext *s, arg_rrr *a, gen(t0, t0, t1); - tcg_temp_free_i32(t1); store_reg(s, a->rd, t0); return true; } @@ -7615,8 +7391,6 @@ static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a, tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE)); gen(t0, t0, t1, ge); - tcg_temp_free_ptr(ge); - tcg_temp_free_i32(t1); store_reg(s, a->rd, t0); return true; } @@ -7707,7 +7481,6 @@ static bool trans_PKH(DisasContext *s, arg_PKH *a) tcg_gen_shli_i32(tm, tm, shift); tcg_gen_deposit_i32(tn, tm, tn, 0, 16); } - tcg_temp_free_i32(tm); store_reg(s, a->rd, tn); return true; } @@ -7782,7 +7555,6 @@ static bool op_xta(DisasContext *s, arg_rrr_rot *a, if (a->rn != 15) { TCGv_i32 tmp2 = load_reg(s, a->rn); gen_add(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); } store_reg(s, a->rd, tmp); return true; @@ -7839,8 +7611,6 @@ static bool trans_SEL(DisasContext *s, arg_rrr *a) t3 = tcg_temp_new_i32(); tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE)); gen_helper_sel_flags(t1, t3, t1, t2); - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); return true; } @@ -7914,17 +7684,14 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) * addition of Ra. */ tcg_gen_sub_i32(t1, t1, t2); - tcg_temp_free_i32(t2); if (a->ra != 15) { t2 = load_reg(s, a->ra); gen_helper_add_setq(t1, cpu_env, t1, t2); - tcg_temp_free_i32(t2); } } else if (a->ra == 15) { /* Single saturation-checking addition */ gen_helper_add_setq(t1, cpu_env, t1, t2); - tcg_temp_free_i32(t2); } else { /* * We need to add the products and Ra together and then @@ -7944,10 +7711,8 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) load_reg_var(s, t2, a->ra); tcg_gen_ext_i32_i64(q64, t2); tcg_gen_add_i64(p64, p64, q64); - tcg_temp_free_i64(q64); tcg_gen_extr_i64_i32(t1, t2, p64); - tcg_temp_free_i64(p64); /* * t1 is the low half of the result which goes into Rd. * We have overflow and must set Q if the high half (t2) @@ -7959,8 +7724,6 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) one = tcg_constant_i32(1); tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf); store_cpu_field(qf, QF); - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); } store_reg(s, a->rd, t1); return true; @@ -8006,19 +7769,15 @@ static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) l2 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(l1, t1); tcg_gen_ext_i32_i64(l2, t2); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); if (sub) { tcg_gen_sub_i64(l1, l1, l2); } else { tcg_gen_add_i64(l1, l1, l2); } - tcg_temp_free_i64(l2); gen_addq(s, l1, a->ra, a->rd); gen_storeq_reg(s, a->ra, a->rd, l1); - tcg_temp_free_i64(l1); return true; } @@ -8068,7 +7827,6 @@ static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub) } else { tcg_gen_add_i32(t1, t1, t3); } - tcg_temp_free_i32(t3); } if (round) { /* @@ -8078,7 +7836,6 @@ static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub) tcg_gen_shri_i32(t2, t2, 31); tcg_gen_add_i32(t1, t1, t2); } - tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); return true; } @@ -8120,7 +7877,6 @@ static bool op_div(DisasContext *s, arg_rrr *a, bool u) } else { gen_helper_sdiv(t1, cpu_env, t1, t2); } - tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); return true; } @@ -8192,8 +7948,6 @@ static void op_addr_block_post(DisasContext *s, arg_ldst_block *a, tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } } @@ -8236,7 +7990,6 @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n) tmp = load_reg(s, i); } gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); /* No need to add after the last transfer. */ if (++j != n) { @@ -8316,7 +8069,6 @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n) gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); if (user) { gen_helper_set_user_reg(cpu_env, tcg_constant_i32(i), tmp); - tcg_temp_free_i32(tmp); } else if (i == a->rn) { loaded_var = tmp; loaded_base = true; @@ -8346,7 +8098,6 @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n) gen_io_start(); } gen_helper_cpsr_write_eret(cpu_env, tmp); - tcg_temp_free_i32(tmp); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; } @@ -8712,7 +8463,6 @@ static bool trans_LE(DisasContext *s, arg_LE *a) DisasLabel skipexc = gen_disas_label(s); tmp = load_cpu_field(v7m.ltpsize); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label); - tcg_temp_free_i32(tmp); gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized()); set_disas_label(s, skipexc); } @@ -8738,16 +8488,14 @@ static bool trans_LE(DisasContext *s, arg_LE *a) * Decrement by 1 << (4 - LTPSIZE). We need to use a TCG local * so that decr stays live after the brcondi. */ - TCGv_i32 decr = tcg_temp_local_new_i32(); + TCGv_i32 decr = tcg_temp_new_i32(); TCGv_i32 ltpsize = load_cpu_field(v7m.ltpsize); tcg_gen_sub_i32(decr, tcg_constant_i32(4), ltpsize); tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr); - tcg_temp_free_i32(ltpsize); tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend.label); tcg_gen_sub_i32(cpu_R[14], cpu_R[14], decr); - tcg_temp_free_i32(decr); } /* Jump back to the loop start */ gen_jmp(s, jmp_diff(s, -a->imm)); @@ -8811,8 +8559,6 @@ static bool trans_VCTP(DisasContext *s, arg_VCTP *a) masklen, tcg_constant_i32(1 << (4 - a->size)), rn_shifted, tcg_constant_i32(16)); gen_helper_mve_vctp(cpu_env, masklen); - tcg_temp_free_i32(masklen); - tcg_temp_free_i32(rn_shifted); /* This insn updates predication bits */ s->base.is_jmp = DISAS_UPDATE_NOCHAIN; mve_update_eci(s); @@ -8835,7 +8581,6 @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half) tcg_gen_add_i32(tmp, tmp, tmp); gen_pc_plus_diff(s, addr, jmp_diff(s, 0)); tcg_gen_add_i32(tmp, tmp, addr); - tcg_temp_free_i32(addr); store_reg(s, 15, tmp); return true; } @@ -8857,7 +8602,6 @@ static bool trans_CBZ(DisasContext *s, arg_CBZ *a) arm_gen_condlabel(s); tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE, tmp, 0, s->condlabel.label); - tcg_temp_free_i32(tmp); gen_jmp(s, jmp_diff(s, a->imm)); return true; } @@ -8924,8 +8668,6 @@ static bool trans_RFE(DisasContext *s, arg_RFE *a) /* Base writeback. */ tcg_gen_addi_i32(addr, addr, post_offset[a->pu]); store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } gen_rfe(s, t1, t2); return true; @@ -9167,11 +8909,8 @@ static bool trans_CSEL(DisasContext *s, arg_CSEL *a) arm_test_cc(&c, a->fcond); tcg_gen_movcond_i32(c.cond, rn, c.value, zero, rn, rm); - arm_free_cc(&c); store_reg(s, a->rd, rn); - tcg_temp_free_i32(rm); - return true; } @@ -9584,7 +9323,7 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) uint32_t condexec_bits; target_ulong pc_arg = dc->base.pc_next; - if (TARGET_TB_PCREL) { + if (tb_cflags(dcbase->tb) & CF_PCREL) { pc_arg &= ~TARGET_PAGE_MASK; } if (dc->eci) { @@ -9642,7 +9381,6 @@ static void arm_post_translate_insn(DisasContext *dc) gen_set_label(dc->condlabel.label); dc->condjmp = 0; } - translator_loop_temp_check(&dc->base); } static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) @@ -10012,7 +9750,7 @@ static const TranslatorOps thumb_translator_ops = { }; /* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc = { }; diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 3717824b75..a9d1f4adc2 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -23,7 +23,7 @@ typedef struct DisasContext { /* The address of the current instruction being translated. */ target_ulong pc_curr; /* - * For TARGET_TB_PCREL, the full value of cpu_pc is not known + * For CF_PCREL, the full value of cpu_pc is not known * (although the page offset is known). For convenience, the * translation loop uses the full virtual address that triggered * the translation, from base.pc_start through pc_curr. @@ -149,15 +149,11 @@ typedef struct DisasContext { int c15_cpar; /* TCG op of the current insn_start. */ TCGOp *insn_start; -#define TMP_A64_MAX 16 - int tmp_a64_count; - TCGv_i64 tmp_a64[TMP_A64_MAX]; } DisasContext; typedef struct DisasCompare { TCGCond cond; TCGv_i32 value; - bool value_global; } DisasCompare; /* Share the TCG temporaries common between 32 and 64 bit modes. */ @@ -224,6 +220,11 @@ static inline int rsub_8(DisasContext *s, int x) return 8 - x; } +static inline int shl_12(DisasContext *s, int x) +{ + return x << 12; +} + static inline int neon_3same_fp_size(DisasContext *s, int x) { /* Convert 0==fp32, 1==fp16 into a MO_* value */ @@ -304,7 +305,6 @@ static inline void gen_a64_update_pc(DisasContext *s, target_long diff) #endif void arm_test_cc(DisasCompare *cmp, int cc); -void arm_free_cc(DisasCompare *cmp); void arm_jump_cc(DisasCompare *cmp, TCGLabel *label); void arm_gen_test_cc(int cc, TCGLabel *label); MemOp pow2_align(unsigned i); @@ -336,7 +336,6 @@ static inline void set_pstate_bits(uint32_t bits) tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate)); tcg_gen_ori_i32(p, p, bits); tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate)); - tcg_temp_free_i32(p); } /* Clear bits within PSTATE. */ @@ -349,7 +348,6 @@ static inline void clear_pstate_bits(uint32_t bits) tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate)); tcg_gen_andi_i32(p, p, ~bits); tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate)); - tcg_temp_free_i32(p); } /* If the singlestep state is Active-not-pending, advance to Active-pending. */ @@ -623,6 +621,23 @@ static inline TCGv_ptr gen_lookup_cp_reg(uint32_t key) return ret; } +/* + * Set and reset rounding mode around another operation. + */ +static inline TCGv_i32 gen_set_rmode(ARMFPRounding rmode, TCGv_ptr fpst) +{ + TCGv_i32 new = tcg_constant_i32(arm_rmode_to_sf(rmode)); + TCGv_i32 old = tcg_temp_new_i32(); + + gen_helper_set_rmode(old, new, fpst); + return old; +} + +static inline void gen_restore_rmode(TCGv_i32 old, TCGv_ptr fpst) +{ + gen_helper_set_rmode(old, old, fpst); +} + /* * Helpers for implementing sets of trans_* functions. * Defer the implementation of NAME to FUNC, with optional extra arguments. diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 24e3d820a5..36906db8e0 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -1104,33 +1104,14 @@ float64 HELPER(rintd)(float64 x, void *fp_status) } /* Convert ARM rounding mode to softfloat */ -int arm_rmode_to_sf(int rmode) -{ - switch (rmode) { - case FPROUNDING_TIEAWAY: - rmode = float_round_ties_away; - break; - case FPROUNDING_ODD: - /* FIXME: add support for TIEAWAY and ODD */ - qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", - rmode); - /* fall through for now */ - case FPROUNDING_TIEEVEN: - default: - rmode = float_round_nearest_even; - break; - case FPROUNDING_POSINF: - rmode = float_round_up; - break; - case FPROUNDING_NEGINF: - rmode = float_round_down; - break; - case FPROUNDING_ZERO: - rmode = float_round_to_zero; - break; - } - return rmode; -} +const FloatRoundMode arm_rmode_to_sf_map[] = { + [FPROUNDING_TIEEVEN] = float_round_nearest_even, + [FPROUNDING_POSINF] = float_round_up, + [FPROUNDING_NEGINF] = float_round_down, + [FPROUNDING_ZERO] = float_round_to_zero, + [FPROUNDING_TIEAWAY] = float_round_ties_away, + [FPROUNDING_ODD] = float_round_to_odd, +}; /* * Implement float64 to int32_t conversion without saturation; diff --git a/target/avr/cpu-param.h b/target/avr/cpu-param.h index 7ef4e7c679..9a92bc74fc 100644 --- a/target/avr/cpu-param.h +++ b/target/avr/cpu-param.h @@ -31,6 +31,5 @@ #define TARGET_PAGE_BITS 8 #define TARGET_PHYS_ADDR_SPACE_BITS 24 #define TARGET_VIRT_ADDR_SPACE_BITS 24 -#define NB_MMU_MODES 2 #endif diff --git a/target/avr/cpu.c b/target/avr/cpu.c index d0139804b9..a24c23c247 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -54,7 +54,8 @@ static void avr_cpu_synchronize_from_tb(CPUState *cs, AVRCPU *cpu = AVR_CPU(cs); CPUAVRState *env = &cpu->env; - env->pc_w = tb_pc(tb) / 2; /* internally PC points to words */ + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + env->pc_w = tb->pc / 2; /* internally PC points to words */ } static void avr_restore_state_to_opc(CPUState *cs, diff --git a/target/avr/gdbstub.c b/target/avr/gdbstub.c index 1c1b908c92..150344d8b9 100644 --- a/target/avr/gdbstub.c +++ b/target/avr/gdbstub.c @@ -19,7 +19,7 @@ */ #include "qemu/osdep.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" int avr_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { diff --git a/target/avr/translate.c b/target/avr/translate.c index 2bed56f135..cd82f5d591 100644 --- a/target/avr/translate.c +++ b/target/avr/translate.c @@ -107,11 +107,6 @@ struct DisasContext { * tcg_gen_brcond_tl(skip_cond, skip_var0, skip_var1, skip_label); * } * - * if (free_skip_var0) { - * tcg_temp_free(skip_var0); - * free_skip_var0 = false; - * } - * * translate(ctx); * * if (skip_label) { @@ -121,7 +116,6 @@ struct DisasContext { TCGv skip_var0; TCGv skip_var1; TCGCond skip_cond; - bool free_skip_var0; }; void avr_cpu_tcg_init(void) @@ -227,10 +221,6 @@ static void gen_add_CHf(TCGv R, TCGv Rd, TCGv Rr) tcg_gen_shri_tl(cpu_Cf, t1, 7); /* Cf = t1(7) */ tcg_gen_shri_tl(cpu_Hf, t1, 3); /* Hf = t1(3) */ tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1); - - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } static void gen_add_Vf(TCGv R, TCGv Rd, TCGv Rr) @@ -245,9 +235,6 @@ static void gen_add_Vf(TCGv R, TCGv Rd, TCGv Rr) tcg_gen_andc_tl(t1, t1, t2); tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */ - - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } static void gen_sub_CHf(TCGv R, TCGv Rd, TCGv Rr) @@ -265,10 +252,6 @@ static void gen_sub_CHf(TCGv R, TCGv Rd, TCGv Rr) tcg_gen_shri_tl(cpu_Cf, t2, 7); /* Cf = t2(7) */ tcg_gen_shri_tl(cpu_Hf, t2, 3); /* Hf = t2(3) */ tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1); - - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } static void gen_sub_Vf(TCGv R, TCGv Rd, TCGv Rr) @@ -283,9 +266,6 @@ static void gen_sub_Vf(TCGv R, TCGv Rd, TCGv Rr) tcg_gen_and_tl(t1, t1, t2); tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */ - - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } static void gen_NSf(TCGv R) @@ -323,9 +303,6 @@ static bool trans_ADD(DisasContext *ctx, arg_ADD *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - return true; } @@ -350,9 +327,6 @@ static bool trans_ADC(DisasContext *ctx, arg_ADC *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - return true; } @@ -391,10 +365,6 @@ static bool trans_ADIW(DisasContext *ctx, arg_ADIW *a) /* update output registers */ tcg_gen_andi_tl(RdL, R, 0xff); tcg_gen_shri_tl(RdH, R, 8); - - tcg_temp_free_i32(Rd); - tcg_temp_free_i32(R); - return true; } @@ -419,9 +389,6 @@ static bool trans_SUB(DisasContext *ctx, arg_SUB *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - return true; } @@ -433,7 +400,7 @@ static bool trans_SUB(DisasContext *ctx, arg_SUB *a) static bool trans_SUBI(DisasContext *ctx, arg_SUBI *a) { TCGv Rd = cpu_r[a->rd]; - TCGv Rr = tcg_const_i32(a->imm); + TCGv Rr = tcg_constant_i32(a->imm); TCGv R = tcg_temp_new_i32(); tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Imm */ @@ -446,10 +413,6 @@ static bool trans_SUBI(DisasContext *ctx, arg_SUBI *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - tcg_temp_free_i32(Rr); - return true; } @@ -462,7 +425,7 @@ static bool trans_SBC(DisasContext *ctx, arg_SBC *a) TCGv Rd = cpu_r[a->rd]; TCGv Rr = cpu_r[a->rr]; TCGv R = tcg_temp_new_i32(); - TCGv zero = tcg_const_i32(0); + TCGv zero = tcg_constant_i32(0); tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */ tcg_gen_sub_tl(R, R, cpu_Cf); @@ -481,10 +444,6 @@ static bool trans_SBC(DisasContext *ctx, arg_SBC *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(zero); - tcg_temp_free_i32(R); - return true; } @@ -494,9 +453,9 @@ static bool trans_SBC(DisasContext *ctx, arg_SBC *a) static bool trans_SBCI(DisasContext *ctx, arg_SBCI *a) { TCGv Rd = cpu_r[a->rd]; - TCGv Rr = tcg_const_i32(a->imm); + TCGv Rr = tcg_constant_i32(a->imm); TCGv R = tcg_temp_new_i32(); - TCGv zero = tcg_const_i32(0); + TCGv zero = tcg_constant_i32(0); tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */ tcg_gen_sub_tl(R, R, cpu_Cf); @@ -515,11 +474,6 @@ static bool trans_SBCI(DisasContext *ctx, arg_SBCI *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(zero); - tcg_temp_free_i32(R); - tcg_temp_free_i32(Rr); - return true; } @@ -558,10 +512,6 @@ static bool trans_SBIW(DisasContext *ctx, arg_SBIW *a) /* update output registers */ tcg_gen_andi_tl(RdL, R, 0xff); tcg_gen_shri_tl(RdH, R, 8); - - tcg_temp_free_i32(Rd); - tcg_temp_free_i32(R); - return true; } @@ -584,9 +534,6 @@ static bool trans_AND(DisasContext *ctx, arg_AND *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - return true; } @@ -626,9 +573,6 @@ static bool trans_OR(DisasContext *ctx, arg_OR *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - return true; } @@ -676,7 +620,6 @@ static bool trans_EOR(DisasContext *ctx, arg_EOR *a) static bool trans_COM(DisasContext *ctx, arg_COM *a) { TCGv Rd = cpu_r[a->rd]; - TCGv R = tcg_temp_new_i32(); tcg_gen_xori_tl(Rd, Rd, 0xff); @@ -684,9 +627,6 @@ static bool trans_COM(DisasContext *ctx, arg_COM *a) tcg_gen_movi_tl(cpu_Cf, 1); /* Cf = 1 */ tcg_gen_movi_tl(cpu_Vf, 0); /* Vf = 0 */ gen_ZNSf(Rd); - - tcg_temp_free_i32(R); - return true; } @@ -697,7 +637,7 @@ static bool trans_COM(DisasContext *ctx, arg_COM *a) static bool trans_NEG(DisasContext *ctx, arg_NEG *a) { TCGv Rd = cpu_r[a->rd]; - TCGv t0 = tcg_const_i32(0); + TCGv t0 = tcg_constant_i32(0); TCGv R = tcg_temp_new_i32(); tcg_gen_sub_tl(R, t0, Rd); /* R = 0 - Rd */ @@ -710,10 +650,6 @@ static bool trans_NEG(DisasContext *ctx, arg_NEG *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(t0); - tcg_temp_free_i32(R); - return true; } @@ -783,9 +719,6 @@ static bool trans_MUL(DisasContext *ctx, arg_MUL *a) /* update status register */ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ - - tcg_temp_free_i32(R); - return true; } @@ -816,11 +749,6 @@ static bool trans_MULS(DisasContext *ctx, arg_MULS *a) /* update status register */ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(R); - return true; } @@ -850,10 +778,6 @@ static bool trans_MULSU(DisasContext *ctx, arg_MULSU *a) /* update status register */ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ - - tcg_temp_free_i32(t0); - tcg_temp_free_i32(R); - return true; } @@ -884,10 +808,6 @@ static bool trans_FMUL(DisasContext *ctx, arg_FMUL *a) tcg_gen_andi_tl(R0, R, 0xff); tcg_gen_shri_tl(R1, R, 8); tcg_gen_andi_tl(R1, R1, 0xff); - - - tcg_temp_free_i32(R); - return true; } @@ -923,11 +843,6 @@ static bool trans_FMULS(DisasContext *ctx, arg_FMULS *a) tcg_gen_andi_tl(R0, R, 0xff); tcg_gen_shri_tl(R1, R, 8); tcg_gen_andi_tl(R1, R1, 0xff); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(R); - return true; } @@ -961,10 +876,6 @@ static bool trans_FMULSU(DisasContext *ctx, arg_FMULSU *a) tcg_gen_andi_tl(R0, R, 0xff); tcg_gen_shri_tl(R1, R, 8); tcg_gen_andi_tl(R1, R1, 0xff); - - tcg_temp_free_i32(t0); - tcg_temp_free_i32(R); - return true; } @@ -1019,35 +930,24 @@ static void gen_jmp_z(DisasContext *ctx) static void gen_push_ret(DisasContext *ctx, int ret) { if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) { - - TCGv t0 = tcg_const_i32((ret & 0x0000ff)); + TCGv t0 = tcg_constant_i32(ret & 0x0000ff); tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_UB); tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); - - tcg_temp_free_i32(t0); } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) { - - TCGv t0 = tcg_const_i32((ret & 0x00ffff)); + TCGv t0 = tcg_constant_i32(ret & 0x00ffff); tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_BEUW); tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); - - tcg_temp_free_i32(t0); - } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) { - - TCGv lo = tcg_const_i32((ret & 0x0000ff)); - TCGv hi = tcg_const_i32((ret & 0xffff00) >> 8); + TCGv lo = tcg_constant_i32(ret & 0x0000ff); + TCGv hi = tcg_constant_i32((ret & 0xffff00) >> 8); tcg_gen_qemu_st_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB); tcg_gen_subi_tl(cpu_sp, cpu_sp, 2); tcg_gen_qemu_st_tl(hi, cpu_sp, MMU_DATA_IDX, MO_BEUW); tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); - - tcg_temp_free_i32(lo); - tcg_temp_free_i32(hi); } } @@ -1071,9 +971,6 @@ static void gen_pop_ret(DisasContext *ctx, TCGv ret) tcg_gen_qemu_ld_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB); tcg_gen_deposit_tl(ret, lo, hi, 8, 16); - - tcg_temp_free_i32(lo); - tcg_temp_free_i32(hi); } } @@ -1301,9 +1198,6 @@ static bool trans_CP(DisasContext *ctx, arg_CP *a) gen_sub_CHf(R, Rd, Rr); gen_sub_Vf(R, Rd, Rr); gen_ZNSf(R); - - tcg_temp_free_i32(R); - return true; } @@ -1317,7 +1211,7 @@ static bool trans_CPC(DisasContext *ctx, arg_CPC *a) TCGv Rd = cpu_r[a->rd]; TCGv Rr = cpu_r[a->rr]; TCGv R = tcg_temp_new_i32(); - TCGv zero = tcg_const_i32(0); + TCGv zero = tcg_constant_i32(0); tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */ tcg_gen_sub_tl(R, R, cpu_Cf); @@ -1332,10 +1226,6 @@ static bool trans_CPC(DisasContext *ctx, arg_CPC *a) * cleared otherwise. */ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero); - - tcg_temp_free_i32(zero); - tcg_temp_free_i32(R); - return true; } @@ -1348,7 +1238,7 @@ static bool trans_CPI(DisasContext *ctx, arg_CPI *a) { TCGv Rd = cpu_r[a->rd]; int Imm = a->imm; - TCGv Rr = tcg_const_i32(Imm); + TCGv Rr = tcg_constant_i32(Imm); TCGv R = tcg_temp_new_i32(); tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */ @@ -1358,10 +1248,6 @@ static bool trans_CPI(DisasContext *ctx, arg_CPI *a) gen_sub_CHf(R, Rd, Rr); gen_sub_Vf(R, Rd, Rr); gen_ZNSf(R); - - tcg_temp_free_i32(R); - tcg_temp_free_i32(Rr); - return true; } @@ -1375,7 +1261,6 @@ static bool trans_SBRC(DisasContext *ctx, arg_SBRC *a) ctx->skip_cond = TCG_COND_EQ; ctx->skip_var0 = tcg_temp_new(); - ctx->free_skip_var0 = true; tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit); return true; @@ -1391,7 +1276,6 @@ static bool trans_SBRS(DisasContext *ctx, arg_SBRS *a) ctx->skip_cond = TCG_COND_NE; ctx->skip_var0 = tcg_temp_new(); - ctx->free_skip_var0 = true; tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit); return true; @@ -1404,13 +1288,13 @@ static bool trans_SBRS(DisasContext *ctx, arg_SBRS *a) */ static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a) { - TCGv temp = tcg_const_i32(a->reg); + TCGv data = tcg_temp_new_i32(); + TCGv port = tcg_constant_i32(a->reg); - gen_helper_inb(temp, cpu_env, temp); - tcg_gen_andi_tl(temp, temp, 1 << a->bit); + gen_helper_inb(data, cpu_env, port); + tcg_gen_andi_tl(data, data, 1 << a->bit); ctx->skip_cond = TCG_COND_EQ; - ctx->skip_var0 = temp; - ctx->free_skip_var0 = true; + ctx->skip_var0 = data; return true; } @@ -1422,13 +1306,13 @@ static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a) */ static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a) { - TCGv temp = tcg_const_i32(a->reg); + TCGv data = tcg_temp_new_i32(); + TCGv port = tcg_constant_i32(a->reg); - gen_helper_inb(temp, cpu_env, temp); - tcg_gen_andi_tl(temp, temp, 1 << a->bit); + gen_helper_inb(data, cpu_env, port); + tcg_gen_andi_tl(data, data, 1 << a->bit); ctx->skip_cond = TCG_COND_NE; - ctx->skip_var0 = temp; - ctx->free_skip_var0 = true; + ctx->skip_var0 = data; return true; } @@ -1608,7 +1492,7 @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr) if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { gen_helper_fullwr(cpu_env, data, addr); } else { - tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */ + tcg_gen_qemu_st_tl(data, addr, MMU_DATA_IDX, MO_UB); } } @@ -1617,7 +1501,7 @@ static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr) if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { gen_helper_fullrd(data, cpu_env, addr); } else { - tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */ + tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB); } } @@ -1697,9 +1581,6 @@ static bool trans_LDS(DisasContext *ctx, arg_LDS *a) tcg_gen_ori_tl(addr, addr, a->imm); gen_data_load(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1734,9 +1615,6 @@ static bool trans_LDX1(DisasContext *ctx, arg_LDX1 *a) TCGv addr = gen_get_xaddr(); gen_data_load(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1749,9 +1627,6 @@ static bool trans_LDX2(DisasContext *ctx, arg_LDX2 *a) tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_xaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1763,9 +1638,6 @@ static bool trans_LDX3(DisasContext *ctx, arg_LDX3 *a) tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ gen_data_load(ctx, Rd, addr); gen_set_xaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1803,9 +1675,6 @@ static bool trans_LDY2(DisasContext *ctx, arg_LDY2 *a) tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_yaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1817,9 +1686,6 @@ static bool trans_LDY3(DisasContext *ctx, arg_LDY3 *a) tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ gen_data_load(ctx, Rd, addr); gen_set_yaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1830,9 +1696,6 @@ static bool trans_LDDY(DisasContext *ctx, arg_LDDY *a) tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ gen_data_load(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1874,9 +1737,6 @@ static bool trans_LDZ2(DisasContext *ctx, arg_LDZ2 *a) tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_zaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1889,9 +1749,6 @@ static bool trans_LDZ3(DisasContext *ctx, arg_LDZ3 *a) gen_data_load(ctx, Rd, addr); gen_set_zaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1902,9 +1759,6 @@ static bool trans_LDDZ(DisasContext *ctx, arg_LDDZ *a) tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ gen_data_load(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1931,9 +1785,6 @@ static bool trans_STS(DisasContext *ctx, arg_STS *a) tcg_gen_shli_tl(addr, addr, 16); tcg_gen_ori_tl(addr, addr, a->imm); gen_data_store(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1964,9 +1815,6 @@ static bool trans_STX1(DisasContext *ctx, arg_STX1 *a) TCGv addr = gen_get_xaddr(); gen_data_store(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1978,9 +1826,6 @@ static bool trans_STX2(DisasContext *ctx, arg_STX2 *a) gen_data_store(ctx, Rd, addr); tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_xaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1992,9 +1837,6 @@ static bool trans_STX3(DisasContext *ctx, arg_STX3 *a) tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ gen_data_store(ctx, Rd, addr); gen_set_xaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2029,9 +1871,6 @@ static bool trans_STY2(DisasContext *ctx, arg_STY2 *a) gen_data_store(ctx, Rd, addr); tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_yaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2043,9 +1882,6 @@ static bool trans_STY3(DisasContext *ctx, arg_STY3 *a) tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ gen_data_store(ctx, Rd, addr); gen_set_yaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2056,9 +1892,6 @@ static bool trans_STDY(DisasContext *ctx, arg_STDY *a) tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ gen_data_store(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2094,9 +1927,6 @@ static bool trans_STZ2(DisasContext *ctx, arg_STZ2 *a) tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_zaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2109,9 +1939,6 @@ static bool trans_STZ3(DisasContext *ctx, arg_STZ3 *a) gen_data_store(ctx, Rd, addr); gen_set_zaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2122,9 +1949,6 @@ static bool trans_STDZ(DisasContext *ctx, arg_STDZ *a) tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ gen_data_store(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2155,10 +1979,7 @@ static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a) tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ tcg_gen_or_tl(addr, addr, L); - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ - - tcg_temp_free_i32(addr); - + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); return true; } @@ -2175,10 +1996,7 @@ static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a) tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ tcg_gen_or_tl(addr, addr, L); - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ - - tcg_temp_free_i32(addr); - + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); return true; } @@ -2195,14 +2013,11 @@ static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a) tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ tcg_gen_or_tl(addr, addr, L); - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ tcg_gen_andi_tl(L, addr, 0xff); tcg_gen_shri_tl(addr, addr, 8); tcg_gen_andi_tl(H, addr, 0xff); - - tcg_temp_free_i32(addr); - return true; } @@ -2230,10 +2045,7 @@ static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a) TCGv Rd = cpu_r[0]; TCGv addr = gen_get_zaddr(); - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ - - tcg_temp_free_i32(addr); - + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); return true; } @@ -2246,10 +2058,7 @@ static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a) TCGv Rd = cpu_r[a->rd]; TCGv addr = gen_get_zaddr(); - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ - - tcg_temp_free_i32(addr); - + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); return true; } @@ -2262,12 +2071,9 @@ static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a) TCGv Rd = cpu_r[a->rd]; TCGv addr = gen_get_zaddr(); - tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ + tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB); tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_zaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2318,12 +2124,9 @@ static bool trans_SPMX(DisasContext *ctx, arg_SPMX *a) static bool trans_IN(DisasContext *ctx, arg_IN *a) { TCGv Rd = cpu_r[a->rd]; - TCGv port = tcg_const_i32(a->imm); + TCGv port = tcg_constant_i32(a->imm); gen_helper_inb(Rd, cpu_env, port); - - tcg_temp_free_i32(port); - return true; } @@ -2334,12 +2137,9 @@ static bool trans_IN(DisasContext *ctx, arg_IN *a) static bool trans_OUT(DisasContext *ctx, arg_OUT *a) { TCGv Rd = cpu_r[a->rd]; - TCGv port = tcg_const_i32(a->imm); + TCGv port = tcg_constant_i32(a->imm); gen_helper_outb(cpu_env, port, Rd); - - tcg_temp_free_i32(port); - return true; } @@ -2407,10 +2207,6 @@ static bool trans_XCH(DisasContext *ctx, arg_XCH *a) gen_data_load(ctx, t0, addr); gen_data_store(ctx, Rd, addr); tcg_gen_mov_tl(Rd, t0); - - tcg_temp_free_i32(t0); - tcg_temp_free_i32(addr); - return true; } @@ -2440,11 +2236,6 @@ static bool trans_LAS(DisasContext *ctx, arg_LAS *a) tcg_gen_or_tl(t1, t0, Rr); tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */ - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(addr); - return true; } @@ -2475,11 +2266,6 @@ static bool trans_LAC(DisasContext *ctx, arg_LAC *a) tcg_gen_andc_tl(t1, t0, Rr); /* t1 = t0 & (0xff - Rr) = t0 & ~Rr */ tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */ - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(addr); - return true; } @@ -2510,11 +2296,6 @@ static bool trans_LAT(DisasContext *ctx, arg_LAT *a) tcg_gen_xor_tl(t1, t0, Rd); tcg_gen_mov_tl(Rd, t0); /* Rd = t0 */ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */ - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(addr); - return true; } @@ -2573,9 +2354,6 @@ static bool trans_ROR(DisasContext *ctx, arg_ROR *a) /* update status register */ gen_rshift_ZNVSf(Rd); - - tcg_temp_free_i32(t0); - return true; } @@ -2600,9 +2378,6 @@ static bool trans_ASR(DisasContext *ctx, arg_ASR *a) /* update status register */ gen_rshift_ZNVSf(Rd); - - tcg_temp_free_i32(t0); - return true; } @@ -2620,10 +2395,6 @@ static bool trans_SWAP(DisasContext *ctx, arg_SWAP *a) tcg_gen_andi_tl(t1, Rd, 0xf0); tcg_gen_shri_tl(t1, t1, 4); tcg_gen_or_tl(Rd, t0, t1); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - return true; } @@ -2634,15 +2405,11 @@ static bool trans_SWAP(DisasContext *ctx, arg_SWAP *a) static bool trans_SBI(DisasContext *ctx, arg_SBI *a) { TCGv data = tcg_temp_new_i32(); - TCGv port = tcg_const_i32(a->reg); + TCGv port = tcg_constant_i32(a->reg); gen_helper_inb(data, cpu_env, port); tcg_gen_ori_tl(data, data, 1 << a->bit); gen_helper_outb(cpu_env, port, data); - - tcg_temp_free_i32(port); - tcg_temp_free_i32(data); - return true; } @@ -2653,15 +2420,11 @@ static bool trans_SBI(DisasContext *ctx, arg_SBI *a) static bool trans_CBI(DisasContext *ctx, arg_CBI *a) { TCGv data = tcg_temp_new_i32(); - TCGv port = tcg_const_i32(a->reg); + TCGv port = tcg_constant_i32(a->reg); gen_helper_inb(data, cpu_env, port); tcg_gen_andi_tl(data, data, ~(1 << a->bit)); gen_helper_outb(cpu_env, port, data); - - tcg_temp_free_i32(data); - tcg_temp_free_i32(port); - return true; } @@ -2689,9 +2452,6 @@ static bool trans_BLD(DisasContext *ctx, arg_BLD *a) tcg_gen_andi_tl(Rd, Rd, ~(1u << a->bit)); /* clear bit */ tcg_gen_shli_tl(t1, cpu_Tf, a->bit); /* create mask */ tcg_gen_or_tl(Rd, Rd, t1); - - tcg_temp_free_i32(t1); - return true; } @@ -2886,10 +2646,6 @@ static bool canonicalize_skip(DisasContext *ctx) ctx->skip_cond = TCG_COND_NE; break; } - if (ctx->free_skip_var0) { - tcg_temp_free(ctx->skip_var0); - ctx->free_skip_var0 = false; - } ctx->skip_var0 = cpu_skip; return true; } @@ -2944,7 +2700,6 @@ static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) * This ensures that cpu_skip is non-zero after the label * if and only if the skipped insn itself sets a skip. */ - ctx->free_skip_var0 = true; ctx->skip_var0 = tcg_temp_new(); tcg_gen_mov_tl(ctx->skip_var0, cpu_skip); tcg_gen_movi_tl(cpu_skip, 0); @@ -2956,10 +2711,6 @@ static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) ctx->skip_var1, skip_label); ctx->skip_var1 = NULL; } - if (ctx->free_skip_var0) { - tcg_temp_free(ctx->skip_var0); - ctx->free_skip_var0 = false; - } ctx->skip_cond = TCG_COND_NEVER; ctx->skip_var0 = NULL; } @@ -3049,7 +2800,7 @@ static const TranslatorOps avr_tr_ops = { .disas_log = avr_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc = { }; diff --git a/target/cris/cpu-param.h b/target/cris/cpu-param.h index 12ec22d8df..b31b742c0d 100644 --- a/target/cris/cpu-param.h +++ b/target/cris/cpu-param.h @@ -12,6 +12,5 @@ #define TARGET_PAGE_BITS 13 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define NB_MMU_MODES 2 #endif diff --git a/target/cris/cpu.h b/target/cris/cpu.h index e6776f25b1..71fa1f96e0 100644 --- a/target/cris/cpu.h +++ b/target/cris/cpu.h @@ -193,12 +193,11 @@ bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req); bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); +hwaddr cris_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); #endif void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags); -hwaddr cris_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); - int crisv10_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int cris_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int cris_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); diff --git a/target/cris/gdbstub.c b/target/cris/gdbstub.c index 2418d575b1..25c0ca33a5 100644 --- a/target/cris/gdbstub.c +++ b/target/cris/gdbstub.c @@ -19,7 +19,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" int crisv10_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { diff --git a/target/cris/translate.c b/target/cris/translate.c index fbc3fd5865..b2beb9964d 100644 --- a/target/cris/translate.c +++ b/target/cris/translate.c @@ -175,11 +175,7 @@ static const int preg_sizes[] = { #define t_gen_mov_env_TN(member, tn) \ tcg_gen_st_tl(tn, cpu_env, offsetof(CPUCRISState, member)) #define t_gen_movi_env_TN(member, c) \ - do { \ - TCGv tc = tcg_const_tl(c); \ - t_gen_mov_env_TN(member, tc); \ - tcg_temp_free(tc); \ - } while (0) + t_gen_mov_env_TN(member, tcg_constant_tl(c)) static inline void t_gen_mov_TN_preg(TCGv tn, int r) { @@ -269,9 +265,7 @@ static void cris_lock_irq(DisasContext *dc) static inline void t_gen_raise_exception(uint32_t index) { - TCGv_i32 tmp = tcg_const_i32(index); - gen_helper_raise_exception(cpu_env, tmp); - tcg_temp_free_i32(tmp); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(index)); } static void t_gen_lsl(TCGv d, TCGv a, TCGv b) @@ -279,15 +273,13 @@ static void t_gen_lsl(TCGv d, TCGv a, TCGv b) TCGv t0, t_31; t0 = tcg_temp_new(); - t_31 = tcg_const_tl(31); + t_31 = tcg_constant_tl(31); tcg_gen_shl_tl(d, a, b); tcg_gen_sub_tl(t0, t_31, b); tcg_gen_sar_tl(t0, t0, t_31); tcg_gen_and_tl(t0, t0, d); tcg_gen_xor_tl(d, d, t0); - tcg_temp_free(t0); - tcg_temp_free(t_31); } static void t_gen_lsr(TCGv d, TCGv a, TCGv b) @@ -303,8 +295,6 @@ static void t_gen_lsr(TCGv d, TCGv a, TCGv b) tcg_gen_sar_tl(t0, t0, t_31); tcg_gen_and_tl(t0, t0, d); tcg_gen_xor_tl(d, d, t0); - tcg_temp_free(t0); - tcg_temp_free(t_31); } static void t_gen_asr(TCGv d, TCGv a, TCGv b) @@ -319,8 +309,6 @@ static void t_gen_asr(TCGv d, TCGv a, TCGv b) tcg_gen_sub_tl(t0, t_31, b); tcg_gen_sar_tl(t0, t0, t_31); tcg_gen_or_tl(d, d, t0); - tcg_temp_free(t0); - tcg_temp_free(t_31); } static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b) @@ -335,7 +323,6 @@ static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b) tcg_gen_shli_tl(d, a, 1); tcg_gen_sub_tl(t, d, b); tcg_gen_movcond_tl(TCG_COND_GEU, d, d, b, t, d); - tcg_temp_free(t); } static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs) @@ -353,7 +340,6 @@ static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs) tcg_gen_sari_tl(t, t, 31); tcg_gen_and_tl(t, t, b); tcg_gen_add_tl(d, d, t); - tcg_temp_free(t); } /* Extended arithmetics on CRIS. */ @@ -369,7 +355,6 @@ static inline void t_gen_add_flag(TCGv d, int flag) tcg_gen_shri_tl(c, c, flag); } tcg_gen_add_tl(d, d, c); - tcg_temp_free(c); } static inline void t_gen_addx_carry(DisasContext *dc, TCGv d) @@ -381,7 +366,6 @@ static inline void t_gen_addx_carry(DisasContext *dc, TCGv d) /* C flag is already at bit 0. */ tcg_gen_andi_tl(c, c, C_FLAG); tcg_gen_add_tl(d, d, c); - tcg_temp_free(c); } } @@ -394,7 +378,6 @@ static inline void t_gen_subx_carry(DisasContext *dc, TCGv d) /* C flag is already at bit 0. */ tcg_gen_andi_tl(c, c, C_FLAG); tcg_gen_sub_tl(d, d, c); - tcg_temp_free(c); } } @@ -414,8 +397,6 @@ static inline void t_gen_swapb(TCGv d, TCGv s) tcg_gen_shri_tl(t, org_s, 8); tcg_gen_andi_tl(t, t, 0x00ff00ff); tcg_gen_or_tl(d, d, t); - tcg_temp_free(t); - tcg_temp_free(org_s); } /* Swap the halfwords of the s operand. */ @@ -428,7 +409,6 @@ static inline void t_gen_swapw(TCGv d, TCGv s) tcg_gen_shli_tl(d, t, 16); tcg_gen_shri_tl(t, t, 16); tcg_gen_or_tl(d, d, t); - tcg_temp_free(t); } /* Reverse the within each byte. @@ -475,8 +455,6 @@ static void t_gen_swapr(TCGv d, TCGv s) tcg_gen_andi_tl(t, t, bitrev[i].mask); tcg_gen_or_tl(d, d, t); } - tcg_temp_free(t); - tcg_temp_free(org_s); } static bool use_goto_tb(DisasContext *dc, target_ulong dest) @@ -778,9 +756,6 @@ static void cris_alu(DisasContext *dc, int op, } tcg_gen_or_tl(d, d, tmp); } - if (tmp != d) { - tcg_temp_free(tmp); - } } static int arith_cc(DisasContext *dc) @@ -919,8 +894,6 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond) tcg_gen_shli_tl(cc, tmp, 2); tcg_gen_and_tl(cc, tmp, cc); tcg_gen_andi_tl(cc, cc, Z_FLAG); - - tcg_temp_free(tmp); } break; case CC_GE: @@ -959,9 +932,6 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond) tcg_gen_xori_tl(n, n, 2); tcg_gen_and_tl(cc, z, n); tcg_gen_andi_tl(cc, cc, 2); - - tcg_temp_free(n); - tcg_temp_free(z); } break; case CC_LE: @@ -980,9 +950,6 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond) tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]); tcg_gen_or_tl(cc, z, n); tcg_gen_andi_tl(cc, cc, 2); - - tcg_temp_free(n); - tcg_temp_free(z); } break; case CC_P: @@ -1279,10 +1246,9 @@ static int dec_addq(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_const_tl(dc->op1); + c = tcg_constant_tl(dc->op1); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - tcg_temp_free(c); return 2; } static int dec_moveq(CPUCRISState *env, DisasContext *dc) @@ -1304,10 +1270,9 @@ static int dec_subq(CPUCRISState *env, DisasContext *dc) LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2); cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_const_tl(dc->op1); + c = tcg_constant_tl(dc->op1); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - tcg_temp_free(c); return 2; } static int dec_cmpq(CPUCRISState *env, DisasContext *dc) @@ -1320,10 +1285,9 @@ static int dec_cmpq(CPUCRISState *env, DisasContext *dc) LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2); cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_const_tl(imm); + c = tcg_constant_tl(imm); cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - tcg_temp_free(c); return 2; } static int dec_andq(CPUCRISState *env, DisasContext *dc) @@ -1336,10 +1300,9 @@ static int dec_andq(CPUCRISState *env, DisasContext *dc) LOG_DIS("andq %d, $r%d\n", imm, dc->op2); cris_cc_mask(dc, CC_MASK_NZ); - c = tcg_const_tl(imm); + c = tcg_constant_tl(imm); cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - tcg_temp_free(c); return 2; } static int dec_orq(CPUCRISState *env, DisasContext *dc) @@ -1351,10 +1314,9 @@ static int dec_orq(CPUCRISState *env, DisasContext *dc) LOG_DIS("orq %d, $r%d\n", imm, dc->op2); cris_cc_mask(dc, CC_MASK_NZ); - c = tcg_const_tl(imm); + c = tcg_constant_tl(imm); cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - tcg_temp_free(c); return 2; } static int dec_btstq(CPUCRISState *env, DisasContext *dc) @@ -1364,11 +1326,10 @@ static int dec_btstq(CPUCRISState *env, DisasContext *dc) LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2); cris_cc_mask(dc, CC_MASK_NZ); - c = tcg_const_tl(dc->op1); + c = tcg_constant_tl(dc->op1); cris_evaluate_flags(dc); gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2], c, cpu_PR[PR_CCS]); - tcg_temp_free(c); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4); cris_update_cc_op(dc, CC_OP_FLAGS, 4); @@ -1437,7 +1398,6 @@ static int dec_move_r(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, size); - tcg_temp_free(t0); } return 2; } @@ -1467,14 +1427,6 @@ static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t) } } -static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t) -{ - if (size != 4) { - tcg_temp_free(t[0]); - tcg_temp_free(t[1]); - } -} - static int dec_and_r(CPUCRISState *env, DisasContext *dc) { TCGv t[2]; @@ -1488,7 +1440,6 @@ static int dec_and_r(CPUCRISState *env, DisasContext *dc) cris_alu_alloc_temps(dc, size, t); dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1501,7 +1452,6 @@ static int dec_lz_r(CPUCRISState *env, DisasContext *dc) t0 = tcg_temp_new(); dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0); cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1518,7 +1468,6 @@ static int dec_lsl_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); tcg_gen_andi_tl(t[1], t[1], 63); cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1535,7 +1484,6 @@ static int dec_lsr_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); tcg_gen_andi_tl(t[1], t[1], 63); cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1552,7 +1500,6 @@ static int dec_asr_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]); tcg_gen_andi_tl(t[1], t[1], 63); cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1568,7 +1515,6 @@ static int dec_muls_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]); cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1584,7 +1530,6 @@ static int dec_mulu_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1610,7 +1555,6 @@ static int dec_xor_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1621,10 +1565,9 @@ static int dec_bound_r(CPUCRISState *env, DisasContext *dc) LOG_DIS("bound.%c $r%u, $r%u\n", memsize_char(size), dc->op1, dc->op2); cris_cc_mask(dc, CC_MASK_NZ); - l0 = tcg_temp_local_new(); + l0 = tcg_temp_new(); dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4); - tcg_temp_free(l0); return 2; } @@ -1639,7 +1582,6 @@ static int dec_cmp_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1666,7 +1608,6 @@ static int dec_add_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1741,7 +1682,6 @@ static int dec_swap_r(CPUCRISState *env, DisasContext *dc) t_gen_swapr(t0, t0); } cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op1], cpu_R[dc->op1], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1755,7 +1695,6 @@ static int dec_or_r(CPUCRISState *env, DisasContext *dc) cris_alu_alloc_temps(dc, size, t); dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1768,7 +1707,6 @@ static int dec_addi_r(CPUCRISState *env, DisasContext *dc) t0 = tcg_temp_new(); tcg_gen_shli_tl(t0, cpu_R[dc->op2], dc->zzsize); tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0); - tcg_temp_free(t0); return 2; } @@ -1781,7 +1719,6 @@ static int dec_addi_acr(CPUCRISState *env, DisasContext *dc) t0 = tcg_temp_new(); tcg_gen_shli_tl(t0, cpu_R[dc->op2], dc->zzsize); tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0); - tcg_temp_free(t0); return 2; } @@ -1796,7 +1733,6 @@ static int dec_neg_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1825,7 +1761,6 @@ static int dec_sub_r(CPUCRISState *env, DisasContext *dc) cris_alu_alloc_temps(dc, size, t); dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1842,7 +1777,6 @@ static int dec_movu_r(CPUCRISState *env, DisasContext *dc) t0 = tcg_temp_new(); dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1861,7 +1795,6 @@ static int dec_movs_r(CPUCRISState *env, DisasContext *dc) t_gen_sext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op1], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1879,7 +1812,6 @@ static int dec_addu_r(CPUCRISState *env, DisasContext *dc) /* Size can only be qi or hi. */ t_gen_zext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1898,7 +1830,6 @@ static int dec_adds_r(CPUCRISState *env, DisasContext *dc) t_gen_sext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1917,7 +1848,6 @@ static int dec_subu_r(CPUCRISState *env, DisasContext *dc) t_gen_zext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1936,7 +1866,6 @@ static int dec_subs_r(CPUCRISState *env, DisasContext *dc) t_gen_sext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -2012,24 +1941,20 @@ static int dec_move_rs(CPUCRISState *env, DisasContext *dc) { TCGv c2, c1; LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2); - c1 = tcg_const_tl(dc->op1); - c2 = tcg_const_tl(dc->op2); + c1 = tcg_constant_tl(dc->op1); + c2 = tcg_constant_tl(dc->op2); cris_cc_mask(dc, 0); gen_helper_movl_sreg_reg(cpu_env, c2, c1); - tcg_temp_free(c1); - tcg_temp_free(c2); return 2; } static int dec_move_sr(CPUCRISState *env, DisasContext *dc) { TCGv c2, c1; LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1); - c1 = tcg_const_tl(dc->op1); - c2 = tcg_const_tl(dc->op2); + c1 = tcg_constant_tl(dc->op1); + c2 = tcg_constant_tl(dc->op2); cris_cc_mask(dc, 0); gen_helper_movl_reg_sreg(cpu_env, c1, c2); - tcg_temp_free(c1); - tcg_temp_free(c2); return 2; } @@ -2049,7 +1974,6 @@ static int dec_move_rp(CPUCRISState *env, DisasContext *dc) tcg_gen_andi_tl(t[0], t[0], 0x39f); tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f); tcg_gen_or_tl(t[0], t[1], t[0]); - tcg_temp_free(t[1]); } } else { tcg_gen_mov_tl(t[0], cpu_R[dc->op1]); @@ -2060,7 +1984,6 @@ static int dec_move_rp(CPUCRISState *env, DisasContext *dc) cris_update_cc_op(dc, CC_OP_FLAGS, 4); dc->flags_uptodate = 1; } - tcg_temp_free(t[0]); return 2; } static int dec_move_pr(CPUCRISState *env, DisasContext *dc) @@ -2081,7 +2004,6 @@ static int dec_move_pr(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op1], cpu_R[dc->op1], t0, preg_sizes[dc->op2]); - tcg_temp_free(t0); } return 2; } @@ -2109,7 +2031,6 @@ static int dec_move_mr(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize); - tcg_temp_free(t0); } do_postinc(dc, memsize); return insn_len; @@ -2121,12 +2042,6 @@ static inline void cris_alu_m_alloc_temps(TCGv *t) t[1] = tcg_temp_new(); } -static inline void cris_alu_m_free_temps(TCGv *t) -{ - tcg_temp_free(t[0]); - tcg_temp_free(t[1]); -} - static int dec_movs_m(CPUCRISState *env, DisasContext *dc) { TCGv t[2]; @@ -2144,7 +2059,6 @@ static int dec_movs_m(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2165,7 +2079,6 @@ static int dec_addu_m(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2185,7 +2098,6 @@ static int dec_adds_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2205,7 +2117,6 @@ static int dec_subu_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2225,7 +2136,6 @@ static int dec_subs_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2245,7 +2155,6 @@ static int dec_movu_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2264,7 +2173,6 @@ static int dec_cmpu_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2285,7 +2193,6 @@ static int dec_cmps_m(CPUCRISState *env, DisasContext *dc) cpu_R[dc->op2], cpu_R[dc->op2], t[1], memsize_zz(dc)); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2306,7 +2213,6 @@ static int dec_cmp_m(CPUCRISState *env, DisasContext *dc) cpu_R[dc->op2], cpu_R[dc->op2], t[1], memsize_zz(dc)); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2327,12 +2233,10 @@ static int dec_test_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZ); tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3); - c = tcg_const_tl(0); + c = tcg_constant_tl(0); cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[1], c, memsize_zz(dc)); - tcg_temp_free(c); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2351,7 +2255,6 @@ static int dec_and_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc)); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2371,7 +2274,6 @@ static int dec_add_m(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc)); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2390,7 +2292,6 @@ static int dec_addo_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, 0); cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2404,14 +2305,12 @@ static int dec_bound_m(CPUCRISState *env, DisasContext *dc) dc->op1, dc->postinc ? "+]" : "]", dc->op2); - l[0] = tcg_temp_local_new(); - l[1] = tcg_temp_local_new(); + l[0] = tcg_temp_new(); + l[1] = tcg_temp_new(); insn_len = dec_prep_alu_m(env, dc, 0, memsize, l[0], l[1]); cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4); do_postinc(dc, memsize); - tcg_temp_free(l[0]); - tcg_temp_free(l[1]); return insn_len; } @@ -2433,7 +2332,6 @@ static int dec_addc_mr(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4); do_postinc(dc, 4); - cris_alu_m_free_temps(t); return insn_len; } @@ -2452,7 +2350,6 @@ static int dec_sub_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2472,7 +2369,6 @@ static int dec_or_m(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc)); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2504,7 +2400,6 @@ static int dec_move_mp(CPUCRISState *env, DisasContext *dc) t_gen_mov_preg_TN(dc, dc->op2, t[1]); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2527,7 +2422,6 @@ static int dec_move_pm(CPUCRISState *env, DisasContext *dc) t_gen_mov_TN_preg(t0, dc->op2); cris_flush_cc_state(dc); gen_store(dc, cpu_R[dc->op1], t0, memsize); - tcg_temp_free(t0); cris_cc_mask(dc, 0); if (dc->postinc) { @@ -2562,17 +2456,14 @@ static int dec_movem_mr(CPUCRISState *env, DisasContext *dc) } else { tmp32 = NULL; } - tcg_temp_free(addr); for (i = 0; i < (nr >> 1); i++) { tcg_gen_extrl_i64_i32(cpu_R[i * 2], tmp[i]); tcg_gen_shri_i64(tmp[i], tmp[i], 32); tcg_gen_extrl_i64_i32(cpu_R[i * 2 + 1], tmp[i]); - tcg_temp_free_i64(tmp[i]); } if (nr & 1) { tcg_gen_mov_tl(cpu_R[dc->op2], tmp32); - tcg_temp_free(tmp32); } /* writeback the updated pointer value. */ @@ -2610,8 +2501,6 @@ static int dec_movem_rm(CPUCRISState *env, DisasContext *dc) tcg_gen_mov_tl(cpu_R[dc->op1], addr); } cris_cc_mask(dc, 0); - tcg_temp_free(tmp); - tcg_temp_free(addr); return 2; } @@ -2689,9 +2578,8 @@ static int dec_jas_r(CPUCRISState *env, DisasContext *dc) if (dc->op2 > 15) { abort(); } - c = tcg_const_tl(dc->pc + 4); + c = tcg_constant_tl(dc->pc + 4); t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); cris_prepare_jmp(dc, JMP_INDIRECT); return 2; @@ -2706,10 +2594,9 @@ static int dec_jas_im(CPUCRISState *env, DisasContext *dc) LOG_DIS("jas 0x%x\n", imm); cris_cc_mask(dc, 0); - c = tcg_const_tl(dc->pc + 8); + c = tcg_constant_tl(dc->pc + 8); /* Store the return address in Pd. */ t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); dc->jmp_pc = imm; cris_prepare_jmp(dc, JMP_DIRECT); @@ -2725,10 +2612,9 @@ static int dec_jasc_im(CPUCRISState *env, DisasContext *dc) LOG_DIS("jasc 0x%x\n", imm); cris_cc_mask(dc, 0); - c = tcg_const_tl(dc->pc + 8 + 4); + c = tcg_constant_tl(dc->pc + 8 + 4); /* Store the return address in Pd. */ t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); dc->jmp_pc = imm; cris_prepare_jmp(dc, JMP_DIRECT); @@ -2742,9 +2628,8 @@ static int dec_jasc_r(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, 0); /* Store the return address in Pd. */ tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]); - c = tcg_const_tl(dc->pc + 4 + 4); + c = tcg_constant_tl(dc->pc + 4 + 4); t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); cris_prepare_jmp(dc, JMP_INDIRECT); return 2; } @@ -2775,10 +2660,9 @@ static int dec_bas_im(CPUCRISState *env, DisasContext *dc) LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2); cris_cc_mask(dc, 0); - c = tcg_const_tl(dc->pc + 8); + c = tcg_constant_tl(dc->pc + 8); /* Store the return address in Pd. */ t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); dc->jmp_pc = dc->pc + simm; cris_prepare_jmp(dc, JMP_DIRECT); @@ -2793,10 +2677,9 @@ static int dec_basc_im(CPUCRISState *env, DisasContext *dc) LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2); cris_cc_mask(dc, 0); - c = tcg_const_tl(dc->pc + 12); + c = tcg_constant_tl(dc->pc + 12); /* Store the return address in Pd. */ t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); dc->jmp_pc = dc->pc + simm; cris_prepare_jmp(dc, JMP_DIRECT); @@ -2808,7 +2691,7 @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, 0); if (dc->op2 == 15) { - tcg_gen_st_i32(tcg_const_i32(1), cpu_env, + tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, -offsetof(CRISCPU, env) + offsetof(CPUState, halted)); tcg_gen_movi_tl(env_pc, dc->pc + 2); t_gen_raise_exception(EXCP_HLT); @@ -3286,7 +3169,7 @@ static const TranslatorOps cris_tr_ops = { .disas_log = cris_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc index f500e93447..b7b0517982 100644 --- a/target/cris/translate_v10.c.inc +++ b/target/cris/translate_v10.c.inc @@ -68,9 +68,9 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val, unsigned int size, int mem_index) { TCGLabel *l1 = gen_new_label(); - TCGv taddr = tcg_temp_local_new(); - TCGv tval = tcg_temp_local_new(); - TCGv t1 = tcg_temp_local_new(); + TCGv taddr = tcg_temp_new(); + TCGv tval = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); dc->postinc = 0; cris_evaluate_flags(dc); @@ -80,19 +80,12 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val, /* Store only if F flag isn't set */ tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10); tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - if (size == 1) { - tcg_gen_qemu_st8(tval, taddr, mem_index); - } else if (size == 2) { - tcg_gen_qemu_st16(tval, taddr, mem_index); - } else { - tcg_gen_qemu_st32(tval, taddr, mem_index); - } + + tcg_gen_qemu_st_tl(tval, taddr, mem_index, ctz32(size) | MO_TE); + gen_set_label(l1); tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */ tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/ - tcg_temp_free(t1); - tcg_temp_free(tval); - tcg_temp_free(taddr); } static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val, @@ -112,13 +105,7 @@ static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val, return; } - if (size == 1) { - tcg_gen_qemu_st8(val, addr, mem_index); - } else if (size == 2) { - tcg_gen_qemu_st16(val, addr, mem_index); - } else { - tcg_gen_qemu_st32(val, addr, mem_index); - } + tcg_gen_qemu_st_tl(val, addr, mem_index, ctz32(size) | MO_TE); } @@ -215,7 +202,6 @@ static int dec10_prep_move_m(CPUCRISState *env, DisasContext *dc, else t_gen_zext(dst, dst, memsize); insn_len += crisv10_post_memaddr(dc, memsize); - tcg_temp_free(addr); } if (dc->mode == CRISV10_MODE_INDIRECT && (dc->tb_flags & PFIX_FLAG)) { @@ -255,37 +241,33 @@ static unsigned int dec10_quick_imm(DisasContext *dc) LOG_DIS("moveq %d, $r%d\n", simm, dc->dst); cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_const_tl(simm); + c = tcg_constant_tl(simm); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_CMPQ: LOG_DIS("cmpq %d, $r%d\n", simm, dc->dst); cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_const_tl(simm); + c = tcg_constant_tl(simm); cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_ADDQ: LOG_DIS("addq %d, $r%d\n", imm, dc->dst); cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_const_tl(imm); + c = tcg_constant_tl(imm); cris_alu(dc, CC_OP_ADD, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_ANDQ: LOG_DIS("andq %d, $r%d\n", simm, dc->dst); cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_const_tl(simm); + c = tcg_constant_tl(simm); cris_alu(dc, CC_OP_AND, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_ASHQ: LOG_DIS("ashq %d, $r%d\n", simm, dc->dst); @@ -293,7 +275,7 @@ static unsigned int dec10_quick_imm(DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); op = imm & (1 << 5); imm &= 0x1f; - c = tcg_const_tl(imm); + c = tcg_constant_tl(imm); if (op) { cris_alu(dc, CC_OP_ASR, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); @@ -303,7 +285,6 @@ static unsigned int dec10_quick_imm(DisasContext *dc) gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst], c, cpu_PR[PR_CCS]); } - tcg_temp_free(c); break; case CRISV10_QIMM_LSHQ: LOG_DIS("lshq %d, $r%d\n", simm, dc->dst); @@ -314,28 +295,25 @@ static unsigned int dec10_quick_imm(DisasContext *dc) } imm &= 0x1f; cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_const_tl(imm); + c = tcg_constant_tl(imm); cris_alu(dc, op, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_SUBQ: LOG_DIS("subq %d, $r%d\n", imm, dc->dst); cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_const_tl(imm); + c = tcg_constant_tl(imm); cris_alu(dc, CC_OP_SUB, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_ORQ: LOG_DIS("andq %d, $r%d\n", simm, dc->dst); cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_const_tl(simm); + c = tcg_constant_tl(simm); cris_alu(dc, CC_OP_OR, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_BCC_R0: @@ -426,18 +404,15 @@ static void dec10_reg_alu(DisasContext *dc, int op, int size, int sext) assert(dc->dst != 15); cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], size); - tcg_temp_free(t[0]); - tcg_temp_free(t[1]); } static void dec10_reg_bound(DisasContext *dc, int size) { TCGv t; - t = tcg_temp_local_new(); + t = tcg_temp_new(); t_gen_zext(t, cpu_R[dc->src], size); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[dc->dst], t, 4); - tcg_temp_free(t); } static void dec10_reg_mul(DisasContext *dc, int size, int sext) @@ -451,9 +426,6 @@ static void dec10_reg_mul(DisasContext *dc, int size, int sext) t[0], t[1], cpu_R[dc->dst], cpu_R[dc->src]); cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], 4); - - tcg_temp_free(t[0]); - tcg_temp_free(t[1]); } @@ -472,7 +444,6 @@ static void dec10_reg_movs(DisasContext *dc) t_gen_zext(t, cpu_R[dc->src], size); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, 4); - tcg_temp_free(t); } static void dec10_reg_alux(DisasContext *dc, int op) @@ -490,7 +461,6 @@ static void dec10_reg_alux(DisasContext *dc, int op) t_gen_zext(t, cpu_R[dc->src], size); cris_alu(dc, op, cpu_R[dc->dst], cpu_R[dc->dst], t, 4); - tcg_temp_free(t); } static void dec10_reg_mov_pr(DisasContext *dc) @@ -522,7 +492,6 @@ static void dec10_reg_abs(DisasContext *dc) tcg_gen_sub_tl(t0, cpu_R[dc->dst], t0); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t0, 4); - tcg_temp_free(t0); } static void dec10_reg_swap(DisasContext *dc) @@ -543,7 +512,6 @@ static void dec10_reg_swap(DisasContext *dc) if (dc->dst & 1) t_gen_swapr(t0, t0); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->src], cpu_R[dc->src], t0, 4); - tcg_temp_free(t0); } static void dec10_reg_scc(DisasContext *dc) @@ -623,7 +591,6 @@ static unsigned int dec10_reg(DisasContext *dc) LOG_DIS("addi r%d r%d size=%d\n", dc->src, dc->dst, dc->size); tcg_gen_shli_tl(t, cpu_R[dc->dst], dc->size & 3); tcg_gen_add_tl(cpu_R[dc->src], cpu_R[dc->src], t); - tcg_temp_free(t); break; case CRISV10_REG_LSL: LOG_DIS("lsl $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); @@ -669,7 +636,6 @@ static unsigned int dec10_reg(DisasContext *dc) } else { tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_R[dc->src], t); } - tcg_temp_free(t); cris_set_prefix(dc); break; @@ -778,7 +744,6 @@ static unsigned int dec10_ind_move_m_r(CPUCRISState *env, DisasContext *dc, dc->delayed_branch = 1; } - tcg_temp_free(t); return insn_len; } @@ -792,7 +757,6 @@ static unsigned int dec10_ind_move_r_m(DisasContext *dc, unsigned int size) crisv10_prepare_memaddr(dc, addr, size); gen_store_v10(dc, addr, cpu_R[dc->dst], size); insn_len += crisv10_post_memaddr(dc, size); - tcg_temp_free(addr); return insn_len; } @@ -800,12 +764,11 @@ static unsigned int dec10_ind_move_r_m(DisasContext *dc, unsigned int size) static unsigned int dec10_ind_move_m_pr(CPUCRISState *env, DisasContext *dc) { unsigned int insn_len = 2, rd = dc->dst; - TCGv t, addr; + TCGv t; LOG_DIS("move.%d $p%d, [$r%d]\n", dc->size, dc->dst, dc->src); cris_lock_irq(dc); - addr = tcg_temp_new(); t = tcg_temp_new(); insn_len += dec10_prep_move_m(env, dc, 0, 4, t); if (rd == 15) { @@ -816,8 +779,6 @@ static unsigned int dec10_ind_move_m_pr(CPUCRISState *env, DisasContext *dc) tcg_gen_mov_tl(cpu_PR[rd], t); dc->cpustate_changed = 1; } - tcg_temp_free(addr); - tcg_temp_free(t); return insn_len; } @@ -835,12 +796,10 @@ static unsigned int dec10_ind_move_pr_m(DisasContext *dc) cris_evaluate_flags(dc); tcg_gen_andi_tl(t0, cpu_PR[PR_CCS], ~PFIX_FLAG); gen_store_v10(dc, addr, t0, size); - tcg_temp_free(t0); } else { gen_store_v10(dc, addr, cpu_PR[dc->dst], size); } insn_len += crisv10_post_memaddr(dc, size); - tcg_temp_free(addr); cris_lock_irq(dc); return insn_len; @@ -874,8 +833,6 @@ static void dec10_movem_r_m(DisasContext *dc) if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) { tcg_gen_mov_tl(cpu_R[dc->src], addr); } - tcg_temp_free(addr); - tcg_temp_free(t0); } static void dec10_movem_m_r(DisasContext *dc) @@ -902,8 +859,6 @@ static void dec10_movem_m_r(DisasContext *dc) if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) { tcg_gen_mov_tl(cpu_R[dc->src], addr); } - tcg_temp_free(addr); - tcg_temp_free(t0); } static int dec10_ind_alu(CPUCRISState *env, DisasContext *dc, @@ -922,9 +877,6 @@ static int dec10_ind_alu(CPUCRISState *env, DisasContext *dc, dc->delayed_branch = 1; return insn_len; } - - cris_alu_m_free_temps(t); - return insn_len; } @@ -935,7 +887,7 @@ static int dec10_ind_bound(CPUCRISState *env, DisasContext *dc, int rd = dc->dst; TCGv t; - t = tcg_temp_local_new(); + t = tcg_temp_new(); insn_len += dec10_prep_move_m(env, dc, 0, size, t); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[rd], t, 4); if (dc->dst == 15) { @@ -944,7 +896,6 @@ static int dec10_ind_bound(CPUCRISState *env, DisasContext *dc, dc->delayed_branch = 1; } - tcg_temp_free(t); return insn_len; } @@ -969,7 +920,6 @@ static int dec10_alux_m(CPUCRISState *env, DisasContext *dc, int op) dc->delayed_branch = 1; } - tcg_temp_free(t); return insn_len; } @@ -1054,11 +1004,9 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) cris_alu_m_alloc_temps(t); insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]); tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3); - c = tcg_const_tl(0); + c = tcg_constant_tl(0); cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst], t[0], c, size); - tcg_temp_free(c); - cris_alu_m_free_temps(t); break; case CRISV10_IND_ADD: LOG_DIS("add size=%d op=%d %d\n", size, dc->src, dc->dst); @@ -1153,9 +1101,8 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) if (dc->mode == CRISV10_MODE_AUTOINC) insn_len += size; - c = tcg_const_tl(dc->pc + insn_len); + c = tcg_constant_tl(dc->pc + insn_len); t_gen_mov_preg_TN(dc, dc->dst, c); - tcg_temp_free(c); dc->jmp_pc = imm; cris_prepare_jmp(dc, JMP_DIRECT); dc->delayed_branch--; /* v10 has no dslot here. */ @@ -1164,9 +1111,8 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) LOG_DIS("break %d\n", dc->src); cris_evaluate_flags(dc); tcg_gen_movi_tl(env_pc, dc->pc + 2); - c = tcg_const_tl(dc->src + 2); + c = tcg_constant_tl(dc->src + 2); t_gen_mov_env_TN(trap_vector, c); - tcg_temp_free(c); t_gen_raise_exception(EXCP_BREAK); dc->base.is_jmp = DISAS_NORETURN; return insn_len; @@ -1174,15 +1120,13 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size, dc->opcode, dc->src, dc->dst); t[0] = tcg_temp_new(); - c = tcg_const_tl(dc->pc + insn_len); + c = tcg_constant_tl(dc->pc + insn_len); t_gen_mov_preg_TN(dc, dc->dst, c); - tcg_temp_free(c); crisv10_prepare_memaddr(dc, t[0], size); gen_load(dc, env_btarget, t[0], 4, 0); insn_len += crisv10_post_memaddr(dc, size); cris_prepare_jmp(dc, JMP_INDIRECT); dc->delayed_branch--; /* v10 has no dslot here. */ - tcg_temp_free(t[0]); } break; @@ -1199,9 +1143,8 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) LOG_DIS("jmp pc=%x opcode=%d r%d r%d\n", dc->pc, dc->opcode, dc->dst, dc->src); tcg_gen_mov_tl(env_btarget, cpu_R[dc->src]); - c = tcg_const_tl(dc->pc + insn_len); + c = tcg_constant_tl(dc->pc + insn_len); t_gen_mov_preg_TN(dc, dc->dst, c); - tcg_temp_free(c); cris_prepare_jmp(dc, JMP_INDIRECT); dc->delayed_branch--; /* v10 has no dslot here. */ break; diff --git a/target/hexagon/README b/target/hexagon/README index 6cb5affddb..43811178e9 100644 --- a/target/hexagon/README +++ b/target/hexagon/README @@ -4,10 +4,10 @@ is a wide vector coprocessor designed for high performance computer vision, image processing, machine learning, and other workloads. The following versions of the Hexagon core are supported - Scalar core: v67 - https://developer.qualcomm.com/downloads/qualcomm-hexagon-v67-programmer-s-reference-manual - HVX extension: v66 - https://developer.qualcomm.com/downloads/qualcomm-hexagon-v66-hvx-programmer-s-reference-manual + Scalar core: v73 + https://developer.qualcomm.com/downloads/qualcomm-hexagon-v73-programmers-reference-manual-rev-aa + HVX extension: v73 + https://developer.qualcomm.com/downloads/qualcomm-hexagon-v73-hvx-programmers-reference-manual-rev-aa We presented an overview of the project at the 2019 KVM Forum. https://kvmforum2019.sched.com/event/Tmwc/qemu-hexagon-automatic-translation-of-the-isa-manual-pseudcode-to-tiny-code-instructions-of-a-vliw-architecture-niccolo-izzo-revng-taylor-simpson-qualcomm-innovation-center @@ -52,6 +52,7 @@ header files in /target/hexagon gen_tcg_func_table.py -> tcg_func_table_generated.c.inc gen_helper_funcs.py -> helper_funcs_generated.c.inc gen_idef_parser_funcs.py -> idef_parser_input.h + gen_analyze_funcs.py -> analyze_funcs_generated.c.inc Qemu helper functions have 3 parts DEF_HELPER declaration indicates the signature of the helper @@ -81,14 +82,12 @@ tcg_funcs_generated.c.inc Insn *insn, Packet *pkt) { - TCGv RdV = tcg_temp_local_new(); + TCGv RdV = tcg_temp_new(); const int RdN = insn->regno[0]; TCGv RsV = hex_gpr[insn->regno[1]]; TCGv RtV = hex_gpr[insn->regno[2]]; gen_helper_A2_add(RdV, cpu_env, RsV, RtV); - gen_log_reg_write(RdN, RdV); - ctx_log_reg_write(ctx, RdN); - tcg_temp_free(RdV); + gen_log_reg_write(ctx, RdN, RdV); } helper_funcs_generated.c.inc @@ -137,35 +136,25 @@ For HVX vectors, the generator behaves slightly differently. The wide vectors won't fit in a TCGv or TCGv_i64, so we pass TCGv_ptr variables to pass the address to helper functions. Here's an example for an HVX vector-add-word istruction. - static void generate_V6_vaddw( - CPUHexagonState *env, - DisasContext *ctx, - Insn *insn, - Packet *pkt) + static void generate_V6_vaddw(DisasContext *ctx) { + Insn *insn __attribute__((unused)) = ctx->insn; const int VdN = insn->regno[0]; const intptr_t VdV_off = ctx_future_vreg_off(ctx, VdN, 1, true); - TCGv_ptr VdV = tcg_temp_local_new_ptr(); + TCGv_ptr VdV = tcg_temp_new_ptr(); tcg_gen_addi_ptr(VdV, cpu_env, VdV_off); const int VuN = insn->regno[1]; const intptr_t VuV_off = vreg_src_off(ctx, VuN); - TCGv_ptr VuV = tcg_temp_local_new_ptr(); + TCGv_ptr VuV = tcg_temp_new_ptr(); const int VvN = insn->regno[2]; const intptr_t VvV_off = vreg_src_off(ctx, VvN); - TCGv_ptr VvV = tcg_temp_local_new_ptr(); + TCGv_ptr VvV = tcg_temp_new_ptr(); tcg_gen_addi_ptr(VuV, cpu_env, VuV_off); tcg_gen_addi_ptr(VvV, cpu_env, VvV_off); - TCGv slot = tcg_constant_tl(insn->slot); - gen_helper_V6_vaddw(cpu_env, VdV, VuV, VvV, slot); - tcg_temp_free(slot); - gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false); - ctx_log_vreg_write(ctx, VdN, EXT_DFL, false); - tcg_temp_free_ptr(VdV); - tcg_temp_free_ptr(VuV); - tcg_temp_free_ptr(VvV); + gen_helper_V6_vaddw(cpu_env, VdV, VuV, VvV); } Notice that we also generate a variable named _off for each operand of @@ -178,12 +167,9 @@ functions from tcg-op-gvec.h. Here's the override for this instruction. Finally, we notice that the override doesn't use the TCGv_ptr variables, so we don't generate them when an override is present. Here is what we generate when the override is present. - static void generate_V6_vaddw( - CPUHexagonState *env, - DisasContext *ctx, - Insn *insn, - Packet *pkt) + static void generate_V6_vaddw(DisasContext *ctx) { + Insn *insn __attribute__((unused)) = ctx->insn; const int VdN = insn->regno[0]; const intptr_t VdV_off = ctx_future_vreg_off(ctx, VdN, 1, true); @@ -194,10 +180,14 @@ when the override is present. const intptr_t VvV_off = vreg_src_off(ctx, VvN); fGEN_TCG_V6_vaddw({ fHIDE(int i;) fVFOREACH(32, i) { VdV.w[i] = VuV.w[i] + VvV.w[i] ; } }); - gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false); - ctx_log_vreg_write(ctx, VdN, EXT_DFL, false); } +We also generate an analyze_ function for each instruction. Currently, +these functions record the writes to registers by calling ctx_log_*. During +gen_start_packet, we invoke the analyze_ function for each instruction in +the packet, and we mark the implicit writes. After the analysis is performed, +we initialize the result register for each of the predicated assignments. + In addition to instruction semantics, we use a generator to create the decode tree. This generation is also a two step process. The first step is to run target/hexagon/gen_dectree_import.c to produce @@ -282,10 +272,8 @@ For Hexagon Vector eXtensions (HVX), the following fields are used VRegs Vector registers future_VRegs Registers to be stored during packet commit tmp_VRegs Temporary registers *not* stored during commit - VRegs_updated Mask of predicated vector writes QRegs Q (vector predicate) registers future_QRegs Registers to be stored during packet commit - QRegs_updated Mask of predicated vector writes *** Debugging *** @@ -316,4 +304,4 @@ Here are some handy places to set breakpoints At the start of execution of a packet for a given PC br helper_debug_start_packet if env->gpr[41] == 0xdeadbeef At the end of execution of a packet for a given PC - br helper_debug_commit_end if env->this_PC == 0xdeadbeef + br helper_debug_commit_end if this_PC == 0xdeadbeef diff --git a/target/hexagon/arch.c b/target/hexagon/arch.c index da79b41c4d..d053d68487 100644 --- a/target/hexagon/arch.c +++ b/target/hexagon/arch.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -224,6 +224,7 @@ void arch_fpop_start(CPUHexagonState *env) void arch_fpop_end(CPUHexagonState *env) { + const bool pkt_need_commit = true; int flags = get_float_exception_flags(&env->fp_status); if (flags != 0) { SOFTFLOAT_TEST_FLAG(float_flag_inexact, FPINPF, FPINPE); diff --git a/target/hexagon/attribs_def.h.inc b/target/hexagon/attribs_def.h.inc index 5d2a102c18..21d457fa4a 100644 --- a/target/hexagon/attribs_def.h.inc +++ b/target/hexagon/attribs_def.h.inc @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -44,6 +44,7 @@ DEF_ATTRIB(MEMSIZE_1B, "Memory width is 1 byte", "", "") DEF_ATTRIB(MEMSIZE_2B, "Memory width is 2 bytes", "", "") DEF_ATTRIB(MEMSIZE_4B, "Memory width is 4 bytes", "", "") DEF_ATTRIB(MEMSIZE_8B, "Memory width is 8 bytes", "", "") +DEF_ATTRIB(SCALAR_LOAD, "Load is scalar", "", "") DEF_ATTRIB(SCALAR_STORE, "Store is scalar", "", "") DEF_ATTRIB(REGWRSIZE_1B, "Memory width is 1 byte", "", "") DEF_ATTRIB(REGWRSIZE_2B, "Memory width is 2 bytes", "", "") @@ -51,6 +52,12 @@ DEF_ATTRIB(REGWRSIZE_4B, "Memory width is 4 bytes", "", "") DEF_ATTRIB(REGWRSIZE_8B, "Memory width is 8 bytes", "", "") DEF_ATTRIB(MEMLIKE, "Memory-like instruction", "", "") DEF_ATTRIB(MEMLIKE_PACKET_RULES, "follows Memory-like packet rules", "", "") +DEF_ATTRIB(RELEASE, "Releases a lock", "", "") +DEF_ATTRIB(ACQUIRE, "Acquires a lock", "", "") + +DEF_ATTRIB(RLS_INNER, "Store release inner visibility", "", "") +DEF_ATTRIB(RLS_ALL_THREAD, "Store release among all threads", "", "") +DEF_ATTRIB(RLS_SAME_THREAD, "Store release with the same thread", "", "") /* V6 Vector attributes */ DEF_ATTRIB(CVI, "Executes on the HVX extension", "", "") @@ -62,23 +69,27 @@ DEF_ATTRIB(CVI_VP_VS, "Double vector permute/shft insn executes on HVX", "", "") DEF_ATTRIB(CVI_VX, "Multiply instruction executes on HVX", "", "") DEF_ATTRIB(CVI_VX_DV, "Double vector multiply insn executes on HVX", "", "") DEF_ATTRIB(CVI_VS, "Shift instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VS_3SRC, "This shift needs to borrow a source register", "", "") DEF_ATTRIB(CVI_VS_VX, "Permute/shift and multiply insn executes on HVX", "", "") DEF_ATTRIB(CVI_VA, "ALU instruction executes on HVX", "", "") DEF_ATTRIB(CVI_VA_DV, "Double vector alu instruction executes on HVX", "", "") DEF_ATTRIB(CVI_4SLOT, "Consumes all the vector execution resources", "", "") DEF_ATTRIB(CVI_TMP, "Transient Memory Load not written to register", "", "") +DEF_ATTRIB(CVI_REMAP, "Register Renaming not written to register file", "", "") DEF_ATTRIB(CVI_GATHER, "CVI Gather operation", "", "") DEF_ATTRIB(CVI_SCATTER, "CVI Scatter operation", "", "") DEF_ATTRIB(CVI_SCATTER_RELEASE, "CVI Store Release for scatter", "", "") DEF_ATTRIB(CVI_TMP_DST, "CVI instruction that doesn't write a register", "", "") DEF_ATTRIB(CVI_SLOT23, "Can execute in slot 2 or slot 3 (HVX)", "", "") +DEF_ATTRIB(VTCM_ALLBANK_ACCESS, "Allocates in all VTCM schedulers.", "", "") /* Change-of-flow attributes */ DEF_ATTRIB(JUMP, "Jump-type instruction", "", "") DEF_ATTRIB(INDIRECT, "Absolute register jump", "", "") DEF_ATTRIB(CALL, "Function call instruction", "", "") DEF_ATTRIB(COF, "Change-of-flow instruction", "", "") +DEF_ATTRIB(HINTED_COF, "This instruction is a hinted change-of-flow", "", "") DEF_ATTRIB(CONDEXEC, "May be cancelled by a predicate", "", "") DEF_ATTRIB(DOTNEWVALUE, "Uses a register value generated in this pkt", "", "") DEF_ATTRIB(NEWCMPJUMP, "Compound compare and jump", "", "") @@ -101,6 +112,10 @@ DEF_ATTRIB(IMPLICIT_WRITES_P1, "Writes Predicate 1", "", "UREG.P1") DEF_ATTRIB(IMPLICIT_WRITES_P2, "Writes Predicate 1", "", "UREG.P2") DEF_ATTRIB(IMPLICIT_WRITES_P3, "May write Predicate 3", "", "UREG.P3") DEF_ATTRIB(IMPLICIT_READS_PC, "Reads the PC register", "", "") +DEF_ATTRIB(IMPLICIT_READS_P0, "Reads the P0 register", "", "") +DEF_ATTRIB(IMPLICIT_READS_P1, "Reads the P1 register", "", "") +DEF_ATTRIB(IMPLICIT_READS_P2, "Reads the P2 register", "", "") +DEF_ATTRIB(IMPLICIT_READS_P3, "Reads the P3 register", "", "") DEF_ATTRIB(IMPLICIT_WRITES_USR, "May write USR", "", "") DEF_ATTRIB(WRITES_PRED_REG, "Writes a predicate register", "", "") DEF_ATTRIB(COMMUTES, "The operation is communitive", "", "") @@ -139,6 +154,8 @@ DEF_ATTRIB(L2FETCH, "Instruction is l2fetch type", "", "") DEF_ATTRIB(ICINVA, "icinva", "", "") DEF_ATTRIB(DCCLEANINVA, "dccleaninva", "", "") +DEF_ATTRIB(NO_INTRINSIC, "Don't generate an intrisic", "", "") + /* Documentation Notes */ DEF_ATTRIB(NOTE_CONDITIONAL, "can be conditionally executed", "", "") DEF_ATTRIB(NOTE_NEWVAL_SLOT0, "New-value oprnd must execute on slot 0", "", "") @@ -147,7 +164,11 @@ DEF_ATTRIB(NOTE_NOPACKET, "solo instruction", "", "") DEF_ATTRIB(NOTE_AXOK, "May only be grouped with ALU32 or non-FP XTYPE.", "", "") DEF_ATTRIB(NOTE_LATEPRED, "The predicate can not be used as a .new", "", "") DEF_ATTRIB(NOTE_NVSLOT0, "Can execute only in slot 0 (ST)", "", "") +DEF_ATTRIB(NOTE_NOVP, "Cannot be paired with a HVX permute instruction", "", "") +DEF_ATTRIB(NOTE_VA_UNARY, "Combined with HVX ALU op (must be unary)", "", "") +/* V6 MMVector Notes for Documentation */ +DEF_ATTRIB(NOTE_SHIFT_RESOURCE, "Uses the HVX shift resource.", "", "") /* Restrictions to make note of */ DEF_ATTRIB(RESTRICT_NOSLOT1_STORE, "Packet must not have slot 1 store", "", "") DEF_ATTRIB(RESTRICT_LATEPRED, "Predicate can not be used as a .new.", "", "") diff --git a/target/hexagon/cpu-param.h b/target/hexagon/cpu-param.h index e8ed5468d9..71b4a9b83e 100644 --- a/target/hexagon/cpu-param.h +++ b/target/hexagon/cpu-param.h @@ -24,6 +24,4 @@ #define TARGET_PHYS_ADDR_SPACE_BITS 36 #define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define NB_MMU_MODES 1 - #endif diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index 807037c586..f155936289 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -23,9 +23,33 @@ #include "qapi/error.h" #include "hw/qdev-properties.h" #include "fpu/softfloat-helpers.h" +#include "tcg/tcg.h" +#include "exec/gdbstub.h" -static void hexagon_v67_cpu_init(Object *obj) +static void hexagon_v67_cpu_init(Object *obj) { } +static void hexagon_v68_cpu_init(Object *obj) { } +static void hexagon_v69_cpu_init(Object *obj) { } +static void hexagon_v71_cpu_init(Object *obj) { } +static void hexagon_v73_cpu_init(Object *obj) { } + +static void hexagon_cpu_list_entry(gpointer data, gpointer user_data) { + ObjectClass *oc = data; + char *name = g_strdup(object_class_get_name(oc)); + if (g_str_has_suffix(name, HEXAGON_CPU_TYPE_SUFFIX)) { + name[strlen(name) - strlen(HEXAGON_CPU_TYPE_SUFFIX)] = '\0'; + } + qemu_printf(" %s\n", name); + g_free(name); +} + +void hexagon_cpu_list(void) +{ + GSList *list; + list = object_class_get_list_sorted(TYPE_HEXAGON_CPU, false); + qemu_printf("Available CPUs:\n"); + g_slist_foreach(list, hexagon_cpu_list_entry, NULL); + g_slist_free(list); } static ObjectClass *hexagon_cpu_class_by_name(const char *cpu_model) @@ -51,6 +75,8 @@ static Property hexagon_lldb_compat_property = static Property hexagon_lldb_stack_adjust_property = DEFINE_PROP_UNSIGNED("lldb-stack-adjust", HexagonCPU, lldb_stack_adjust, 0, qdev_prop_uint32, target_ulong); +static Property hexagon_short_circuit_property = + DEFINE_PROP_BOOL("short-circuit", HexagonCPU, short_circuit, true); const char * const hexagon_regnames[TOTAL_PER_THREAD_REGS] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -263,7 +289,8 @@ static void hexagon_cpu_synchronize_from_tb(CPUState *cs, { HexagonCPU *cpu = HEXAGON_CPU(cs); CPUHexagonState *env = &cpu->env; - env->gpr[HEX_REG_PC] = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + env->gpr[HEX_REG_PC] = tb->pc; } static bool hexagon_cpu_has_work(CPUState *cs) @@ -313,6 +340,11 @@ static void hexagon_cpu_realize(DeviceState *dev, Error **errp) return; } + gdb_register_coprocessor(cs, hexagon_hvx_gdb_read_register, + hexagon_hvx_gdb_write_register, + NUM_VREGS + NUM_QREGS, + "hexagon-hvx.xml", 0); + qemu_init_vcpu(cs); cpu_reset(cs); @@ -326,6 +358,7 @@ static void hexagon_cpu_init(Object *obj) cpu_set_cpustate_pointers(cpu); qdev_property_add_static(DEVICE(obj), &hexagon_lldb_compat_property); qdev_property_add_static(DEVICE(obj), &hexagon_lldb_stack_adjust_property); + qdev_property_add_static(DEVICE(obj), &hexagon_short_circuit_property); } #include "hw/core/tcg-cpu-ops.h" @@ -356,8 +389,9 @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data) cc->get_pc = hexagon_cpu_get_pc; cc->gdb_read_register = hexagon_gdb_read_register; cc->gdb_write_register = hexagon_gdb_write_register; - cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS + NUM_VREGS + NUM_QREGS; + cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS; cc->gdb_stop_before_watchpoint = true; + cc->gdb_core_xml_file = "hexagon-core.xml"; cc->disas_set_info = hexagon_cpu_disas_set_info; cc->tcg_ops = &hexagon_tcg_ops; } @@ -380,6 +414,10 @@ static const TypeInfo hexagon_cpu_type_infos[] = { .class_init = hexagon_cpu_class_init, }, DEFINE_CPU(TYPE_HEXAGON_CPU_V67, hexagon_v67_cpu_init), + DEFINE_CPU(TYPE_HEXAGON_CPU_V68, hexagon_v68_cpu_init), + DEFINE_CPU(TYPE_HEXAGON_CPU_V69, hexagon_v69_cpu_init), + DEFINE_CPU(TYPE_HEXAGON_CPU_V71, hexagon_v71_cpu_init), + DEFINE_CPU(TYPE_HEXAGON_CPU_V73, hexagon_v73_cpu_init), }; DEFINE_TYPES(hexagon_cpu_type_infos) diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index 34c0ae0a67..bfcb1057dd 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -43,6 +43,13 @@ #define CPU_RESOLVING_TYPE TYPE_HEXAGON_CPU #define TYPE_HEXAGON_CPU_V67 HEXAGON_CPU_TYPE_NAME("v67") +#define TYPE_HEXAGON_CPU_V68 HEXAGON_CPU_TYPE_NAME("v68") +#define TYPE_HEXAGON_CPU_V69 HEXAGON_CPU_TYPE_NAME("v69") +#define TYPE_HEXAGON_CPU_V71 HEXAGON_CPU_TYPE_NAME("v71") +#define TYPE_HEXAGON_CPU_V73 HEXAGON_CPU_TYPE_NAME("v73") + +void hexagon_cpu_list(void); +#define cpu_list hexagon_cpu_list #define MMU_USER_IDX 0 @@ -78,28 +85,21 @@ typedef struct { typedef struct CPUArchState { target_ulong gpr[TOTAL_PER_THREAD_REGS]; target_ulong pred[NUM_PREGS]; - target_ulong branch_taken; /* For comparing with LLDB on target - see adjust_stack_ptrs function */ target_ulong last_pc_dumped; target_ulong stack_start; uint8_t slot_cancelled; - target_ulong new_value[TOTAL_PER_THREAD_REGS]; + target_ulong new_value_usr; /* * Only used when HEX_DEBUG is on, but unconditionally included * to reduce recompile time when turning HEX_DEBUG on/off. */ - target_ulong this_PC; target_ulong reg_written[TOTAL_PER_THREAD_REGS]; - target_ulong new_pred_value[NUM_PREGS]; - target_ulong pred_written; - MemLog mem_log_stores[STORES_MAX]; - target_ulong pkt_has_store_s1; - target_ulong dczero_addr; float_status fp_status; @@ -111,11 +111,8 @@ typedef struct CPUArchState { MMVector future_VRegs[VECTOR_TEMPS_MAX] QEMU_ALIGNED(16); MMVector tmp_VRegs[VECTOR_TEMPS_MAX] QEMU_ALIGNED(16); - VRegMask VRegs_updated; - MMQReg QRegs[NUM_QREGS] QEMU_ALIGNED(16); MMQReg future_QRegs[NUM_QREGS] QEMU_ALIGNED(16); - QRegMask QRegs_updated; /* Temporaries used within instructions */ MMVectorPair VuuV QEMU_ALIGNED(16); @@ -149,6 +146,7 @@ struct ArchCPU { bool lldb_compat; target_ulong lldb_stack_adjust; + bool short_circuit; }; #include "cpu_bits.h" diff --git a/target/hexagon/decode.c b/target/hexagon/decode.c index 041c8de751..946c55cc71 100644 --- a/target/hexagon/decode.c +++ b/target/hexagon/decode.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -797,7 +797,26 @@ static bool decode_parsebits_is_loopend(uint32_t encoding32) return bits == 0x2; } -static void +static bool has_valid_slot_assignment(Packet *pkt) +{ + int used_slots = 0; + for (int i = 0; i < pkt->num_insns; i++) { + int slot_mask; + Insn *insn = &pkt->insn[i]; + if (decode_opcode_ends_loop(insn->opcode)) { + /* We overload slot 0 for endloop. */ + continue; + } + slot_mask = 1 << insn->slot; + if (used_slots & slot_mask) { + return false; + } + used_slots |= slot_mask; + } + return true; +} + +static bool decode_set_slot_number(Packet *pkt) { int slot; @@ -886,6 +905,8 @@ decode_set_slot_number(Packet *pkt) /* Then push it to slot0 */ pkt->insn[slot1_iidx].slot = 0; } + + return has_valid_slot_assignment(pkt); } /* @@ -961,8 +982,11 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, decode_apply_extenders(pkt); if (!disas_only) { decode_remove_extenders(pkt); + if (!decode_set_slot_number(pkt)) { + /* Invalid packet */ + return 0; + } } - decode_set_slot_number(pkt); decode_fill_newvalue_regno(pkt); if (pkt->pkt_has_hvx) { diff --git a/target/hexagon/dectree.py b/target/hexagon/dectree.py index 29467ec7d7..3b32948a04 100755 --- a/target/hexagon/dectree.py +++ b/target/hexagon/dectree.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -23,94 +23,109 @@ import re import sys import iset -encs = {tag : ''.join(reversed(iset.iset[tag]['enc'].replace(' ', ''))) - for tag in iset.tags if iset.iset[tag]['enc'] != 'MISSING ENCODING'} +encs = { + tag: "".join(reversed(iset.iset[tag]["enc"].replace(" ", ""))) + for tag in iset.tags + if iset.iset[tag]["enc"] != "MISSING ENCODING" +} -enc_classes = set([iset.iset[tag]['enc_class'] for tag in encs.keys()]) -subinsn_enc_classes = \ - set([enc_class for enc_class in enc_classes \ - if enc_class.startswith('SUBINSN_')]) -ext_enc_classes = \ - set([enc_class for enc_class in enc_classes \ - if enc_class not in ('NORMAL', '16BIT') and \ - not enc_class.startswith('SUBINSN_')]) +enc_classes = set([iset.iset[tag]["enc_class"] for tag in encs.keys()]) +subinsn_enc_classes = set( + [enc_class for enc_class in enc_classes if enc_class.startswith("SUBINSN_")] +) +ext_enc_classes = set( + [ + enc_class + for enc_class in enc_classes + if enc_class not in ("NORMAL", "16BIT") and not enc_class.startswith("SUBINSN_") + ] +) try: subinsn_groupings = iset.subinsn_groupings except AttributeError: subinsn_groupings = {} -for (tag, subinsn_grouping) in subinsn_groupings.items(): - encs[tag] = ''.join(reversed(subinsn_grouping['enc'].replace(' ', ''))) +for tag, subinsn_grouping in subinsn_groupings.items(): + encs[tag] = "".join(reversed(subinsn_grouping["enc"].replace(" ", ""))) -dectree_normal = {'leaves' : set()} -dectree_16bit = {'leaves' : set()} -dectree_subinsn_groupings = {'leaves' : set()} -dectree_subinsns = {name : {'leaves' : set()} for name in subinsn_enc_classes} -dectree_extensions = {name : {'leaves' : set()} for name in ext_enc_classes} +dectree_normal = {"leaves": set()} +dectree_16bit = {"leaves": set()} +dectree_subinsn_groupings = {"leaves": set()} +dectree_subinsns = {name: {"leaves": set()} for name in subinsn_enc_classes} +dectree_extensions = {name: {"leaves": set()} for name in ext_enc_classes} for tag in encs.keys(): if tag in subinsn_groupings: - dectree_subinsn_groupings['leaves'].add(tag) + dectree_subinsn_groupings["leaves"].add(tag) continue - enc_class = iset.iset[tag]['enc_class'] - if enc_class.startswith('SUBINSN_'): + enc_class = iset.iset[tag]["enc_class"] + if enc_class.startswith("SUBINSN_"): if len(encs[tag]) != 32: - encs[tag] = encs[tag] + '0' * (32 - len(encs[tag])) - dectree_subinsns[enc_class]['leaves'].add(tag) - elif enc_class == '16BIT': + encs[tag] = encs[tag] + "0" * (32 - len(encs[tag])) + dectree_subinsns[enc_class]["leaves"].add(tag) + elif enc_class == "16BIT": if len(encs[tag]) != 16: - raise Exception('Tag "{}" has enc_class "{}" and not an encoding ' + - 'width of 16 bits!'.format(tag, enc_class)) - dectree_16bit['leaves'].add(tag) + raise Exception( + 'Tag "{}" has enc_class "{}" and not an encoding ' + + "width of 16 bits!".format(tag, enc_class) + ) + dectree_16bit["leaves"].add(tag) else: if len(encs[tag]) != 32: - raise Exception('Tag "{}" has enc_class "{}" and not an encoding ' + - 'width of 32 bits!'.format(tag, enc_class)) - if enc_class == 'NORMAL': - dectree_normal['leaves'].add(tag) + raise Exception( + 'Tag "{}" has enc_class "{}" and not an encoding ' + + "width of 32 bits!".format(tag, enc_class) + ) + if enc_class == "NORMAL": + dectree_normal["leaves"].add(tag) else: - dectree_extensions[enc_class]['leaves'].add(tag) + dectree_extensions[enc_class]["leaves"].add(tag) faketags = set() -for (tag, enc) in iset.enc_ext_spaces.items(): +for tag, enc in iset.enc_ext_spaces.items(): faketags.add(tag) - encs[tag] = ''.join(reversed(enc.replace(' ', ''))) - dectree_normal['leaves'].add(tag) + encs[tag] = "".join(reversed(enc.replace(" ", ""))) + dectree_normal["leaves"].add(tag) faketags |= set(subinsn_groupings.keys()) + def every_bit_counts(bitset): for i in range(1, len(next(iter(bitset)))): - if len(set([bits[:i] + bits[i+1:] for bits in bitset])) == len(bitset): + if len(set([bits[:i] + bits[i + 1 :] for bits in bitset])) == len(bitset): return False return True + def auto_separate(node): - tags = node['leaves'] + tags = node["leaves"] if len(tags) <= 1: return enc_width = len(encs[next(iter(tags))]) - opcode_bit_for_all = \ - [all([encs[tag][i] in '01' \ - for tag in tags]) for i in range(enc_width)] - opcode_bit_is_0_for_all = \ - [opcode_bit_for_all[i] and all([encs[tag][i] == '0' \ - for tag in tags]) for i in range(enc_width)] - opcode_bit_is_1_for_all = \ - [opcode_bit_for_all[i] and all([encs[tag][i] == '1' \ - for tag in tags]) for i in range(enc_width)] - differentiator_opcode_bit = \ - [opcode_bit_for_all[i] and \ - not (opcode_bit_is_0_for_all[i] or \ - opcode_bit_is_1_for_all[i]) \ - for i in range(enc_width)] + opcode_bit_for_all = [ + all([encs[tag][i] in "01" for tag in tags]) for i in range(enc_width) + ] + opcode_bit_is_0_for_all = [ + opcode_bit_for_all[i] and all([encs[tag][i] == "0" for tag in tags]) + for i in range(enc_width) + ] + opcode_bit_is_1_for_all = [ + opcode_bit_for_all[i] and all([encs[tag][i] == "1" for tag in tags]) + for i in range(enc_width) + ] + differentiator_opcode_bit = [ + opcode_bit_for_all[i] + and not (opcode_bit_is_0_for_all[i] or opcode_bit_is_1_for_all[i]) + for i in range(enc_width) + ] best_width = 0 for width in range(4, 0, -1): for lsb in range(enc_width - width, -1, -1): - bitset = set([encs[tag][lsb:lsb+width] for tag in tags]) - if all(differentiator_opcode_bit[lsb:lsb+width]) and \ - (len(bitset) == len(tags) or every_bit_counts(bitset)): + bitset = set([encs[tag][lsb : lsb + width] for tag in tags]) + if all(differentiator_opcode_bit[lsb : lsb + width]) and ( + len(bitset) == len(tags) or every_bit_counts(bitset) + ): best_width = width best_lsb = lsb caught_all_tags = len(bitset) == len(tags) @@ -118,33 +133,37 @@ def auto_separate(node): if best_width != 0: break if best_width == 0: - raise Exception('Could not find a way to differentiate the encodings ' + - 'of the following tags:\n{}'.format('\n'.join(tags))) + raise Exception( + "Could not find a way to differentiate the encodings " + + "of the following tags:\n{}".format("\n".join(tags)) + ) if caught_all_tags: for width in range(1, best_width): for lsb in range(enc_width - width, -1, -1): - bitset = set([encs[tag][lsb:lsb+width] for tag in tags]) - if all(differentiator_opcode_bit[lsb:lsb+width]) and \ - len(bitset) == len(tags): + bitset = set([encs[tag][lsb : lsb + width] for tag in tags]) + if all(differentiator_opcode_bit[lsb : lsb + width]) and len( + bitset + ) == len(tags): best_width = width best_lsb = lsb break else: continue break - node['separator_lsb'] = best_lsb - node['separator_width'] = best_width - node['children'] = [] - for value in range(2 ** best_width): + node["separator_lsb"] = best_lsb + node["separator_width"] = best_width + node["children"] = [] + for value in range(2**best_width): child = {} - bits = ''.join(reversed('{:0{}b}'.format(value, best_width))) - child['leaves'] = \ - set([tag for tag in tags \ - if encs[tag][best_lsb:best_lsb+best_width] == bits]) - node['children'].append(child) - for child in node['children']: + bits = "".join(reversed("{:0{}b}".format(value, best_width))) + child["leaves"] = set( + [tag for tag in tags if encs[tag][best_lsb : best_lsb + best_width] == bits] + ) + node["children"].append(child) + for child in node["children"]: auto_separate(child) + auto_separate(dectree_normal) auto_separate(dectree_16bit) if subinsn_groupings: @@ -157,144 +176,173 @@ for dectree_ext in dectree_extensions.values(): for tag in faketags: del encs[tag] + def table_name(parents, node): path = parents + [node] root = path[0] - tag = next(iter(node['leaves'])) + tag = next(iter(node["leaves"])) if tag in subinsn_groupings: - enc_width = len(subinsn_groupings[tag]['enc'].replace(' ', '')) + enc_width = len(subinsn_groupings[tag]["enc"].replace(" ", "")) else: - tag = next(iter(node['leaves'] - faketags)) + tag = next(iter(node["leaves"] - faketags)) enc_width = len(encs[tag]) - determining_bits = ['_'] * enc_width - for (parent, child) in zip(path[:-1], path[1:]): - lsb = parent['separator_lsb'] - width = parent['separator_width'] - value = parent['children'].index(child) - determining_bits[lsb:lsb+width] = \ - list(reversed('{:0{}b}'.format(value, width))) + determining_bits = ["_"] * enc_width + for parent, child in zip(path[:-1], path[1:]): + lsb = parent["separator_lsb"] + width = parent["separator_width"] + value = parent["children"].index(child) + determining_bits[lsb : lsb + width] = list( + reversed("{:0{}b}".format(value, width)) + ) if tag in subinsn_groupings: - name = 'DECODE_ROOT_EE' + name = "DECODE_ROOT_EE" else: - enc_class = iset.iset[tag]['enc_class'] + enc_class = iset.iset[tag]["enc_class"] if enc_class in ext_enc_classes: - name = 'DECODE_EXT_{}'.format(enc_class) + name = "DECODE_EXT_{}".format(enc_class) elif enc_class in subinsn_enc_classes: - name = 'DECODE_SUBINSN_{}'.format(enc_class) + name = "DECODE_SUBINSN_{}".format(enc_class) else: - name = 'DECODE_ROOT_{}'.format(enc_width) + name = "DECODE_ROOT_{}".format(enc_width) if node != root: - name += '_' + ''.join(reversed(determining_bits)) + name += "_" + "".join(reversed(determining_bits)) return name + def print_node(f, node, parents): - if len(node['leaves']) <= 1: + if len(node["leaves"]) <= 1: return name = table_name(parents, node) - lsb = node['separator_lsb'] - width = node['separator_width'] - print('DECODE_NEW_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))'.\ - format(name, 2 ** width, lsb, width), file=f) - for child in node['children']: - if len(child['leaves']) == 0: - print('INVALID()', file=f) - elif len(child['leaves']) == 1: - (tag,) = child['leaves'] + lsb = node["separator_lsb"] + width = node["separator_width"] + print( + "DECODE_NEW_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))".format( + name, 2**width, lsb, width + ), + file=f, + ) + for child in node["children"]: + if len(child["leaves"]) == 0: + print("INVALID()", file=f) + elif len(child["leaves"]) == 1: + (tag,) = child["leaves"] if tag in subinsn_groupings: - class_a = subinsn_groupings[tag]['class_a'] - class_b = subinsn_groupings[tag]['class_b'] - enc = subinsn_groupings[tag]['enc'].replace(' ', '') - if 'RESERVED' in tag: - print('INVALID()', file=f) + class_a = subinsn_groupings[tag]["class_a"] + class_b = subinsn_groupings[tag]["class_b"] + enc = subinsn_groupings[tag]["enc"].replace(" ", "") + if "RESERVED" in tag: + print("INVALID()", file=f) else: - print('SUBINSNS({},{},{},"{}")'.\ - format(tag, class_a, class_b, enc), file=f) + print( + 'SUBINSNS({},{},{},"{}")'.format(tag, class_a, class_b, enc), + file=f, + ) elif tag in iset.enc_ext_spaces: - enc = iset.enc_ext_spaces[tag].replace(' ', '') + enc = iset.enc_ext_spaces[tag].replace(" ", "") print('EXTSPACE({},"{}")'.format(tag, enc), file=f) else: - enc = ''.join(reversed(encs[tag])) + enc = "".join(reversed(encs[tag])) print('TERMINAL({},"{}")'.format(tag, enc), file=f) else: - print('TABLE_LINK({})'.format(table_name(parents + [node], child)), - file=f) - print('DECODE_END_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))'.\ - format(name, 2 ** width, lsb, width), file=f) + print("TABLE_LINK({})".format(table_name(parents + [node], child)), file=f) + print( + "DECODE_END_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))".format( + name, 2**width, lsb, width + ), + file=f, + ) print(file=f) parents.append(node) - for child in node['children']: + for child in node["children"]: print_node(f, child, parents) parents.pop() + def print_tree(f, tree): print_node(f, tree, []) + def print_match_info(f): for tag in sorted(encs.keys(), key=iset.tags.index): - enc = ''.join(reversed(encs[tag])) - mask = int(re.sub(r'[^1]', r'0', enc.replace('0', '1')), 2) - match = int(re.sub(r'[^01]', r'0', enc), 2) - suffix = '' - print('DECODE{}_MATCH_INFO({},0x{:x}U,0x{:x}U)'.\ - format(suffix, tag, mask, match), file=f) + enc = "".join(reversed(encs[tag])) + mask = int(re.sub(r"[^1]", r"0", enc.replace("0", "1")), 2) + match = int(re.sub(r"[^01]", r"0", enc), 2) + suffix = "" + print( + "DECODE{}_MATCH_INFO({},0x{:x}U,0x{:x}U)".format(suffix, tag, mask, match), + file=f, + ) + + +regre = re.compile(r"((? 1: - raise Exception('Tag "{}" has split register field!'.\ - format(tag)) + raise Exception('Tag "{}" has split register field!'.format(tag)) reg_enc_field = reg_enc_fields[0] if 2 ** len(reg_enc_field) != reg_num_choices: - raise Exception('Tag "{}" has incorrect register field width!'.\ - format(tag)) - print(' DECODE_REG({},{},{})'.\ - format(regno, len(reg_enc_field), enc.index(reg_enc_field)), - file=f) - if reg_type in num_registers and \ - reg_num_choices != num_registers[reg_type]: - print(' DECODE_MAPPED_REG({},{})'.\ - format(regno, reg_mapping), file=f) + raise Exception( + 'Tag "{}" has incorrect register field width!'.format(tag) + ) + print( + " DECODE_REG({},{},{})".format( + regno, len(reg_enc_field), enc.index(reg_enc_field) + ), + file=f, + ) + if reg_type in num_registers and reg_num_choices != num_registers[reg_type]: + print( + " DECODE_MAPPED_REG({},{})".format(regno, reg_mapping), + file=f, + ) regno += 1 + def implicit_register_key(reg): return implicit_registers[reg] + for reg in sorted( - set([r for r in (iset.iset[tag]['rregs'].split(',') + \ - iset.iset[tag]['wregs'].split(',')) \ - if r in implicit_registers]), key=implicit_register_key): - print(' DECODE_IMPL_REG({},{})'.\ - format(regno, implicit_registers[reg]), file=f) + set( + [ + r + for r in ( + iset.iset[tag]["rregs"].split(",") + + iset.iset[tag]["wregs"].split(",") + ) + if r in implicit_registers + ] + ), + key=implicit_register_key, + ): + print( + " DECODE_IMPL_REG({},{})".format(regno, implicit_registers[reg]), + file=f, + ) regno += 1 if imms and imms[0][0].isupper(): imms = reversed(imms) @@ -311,41 +359,45 @@ def print_op_info(f): else: imm_shift = 0 if imm_type.islower(): - imm_letter = 'i' + imm_letter = "i" else: - imm_letter = 'I' + imm_letter = "I" remainder = imm_width - for m in reversed(list(re.finditer(imm_letter + '+', enc))): + for m in reversed(list(re.finditer(imm_letter + "+", enc))): remainder -= m.end() - m.start() - print(' DECODE_IMM({},{},{},{})'.\ - format(immno, m.end() - m.start(), m.start(), remainder), - file=f) + print( + " DECODE_IMM({},{},{},{})".format( + immno, m.end() - m.start(), m.start(), remainder + ), + file=f, + ) if remainder != 0: if imm[2]: - imm[2] = ':' + imm[2] - raise Exception('Tag "{}" has an incorrect number of ' + \ - 'encoding bits for immediate "{}"'.\ - format(tag, ''.join(imm))) - if imm_type.lower() in 'sr': - print(' DECODE_IMM_SXT({},{})'.\ - format(immno, imm_width), file=f) - if imm_type.lower() == 'n': - print(' DECODE_IMM_NEG({},{})'.\ - format(immno, imm_width), file=f) + imm[2] = ":" + imm[2] + raise Exception( + 'Tag "{}" has an incorrect number of ' + + 'encoding bits for immediate "{}"'.format(tag, "".join(imm)) + ) + if imm_type.lower() in "sr": + print(" DECODE_IMM_SXT({},{})".format(immno, imm_width), file=f) + if imm_type.lower() == "n": + print(" DECODE_IMM_NEG({},{})".format(immno, imm_width), file=f) if imm_shift: - print(' DECODE_IMM_SHIFT({},{})'.\ - format(immno, imm_shift), file=f) - print(')', file=f) + print( + " DECODE_IMM_SHIFT({},{})".format(immno, imm_shift), file=f + ) + print(")", file=f) -if __name__ == '__main__': - with open(sys.argv[1], 'w') as f: + +if __name__ == "__main__": + with open(sys.argv[1], "w") as f: print_tree(f, dectree_normal) print_tree(f, dectree_16bit) if subinsn_groupings: print_tree(f, dectree_subinsn_groupings) - for (name, dectree_subinsn) in sorted(dectree_subinsns.items()): + for name, dectree_subinsn in sorted(dectree_subinsns.items()): print_tree(f, dectree_subinsn) - for (name, dectree_ext) in sorted(dectree_extensions.items()): + for name, dectree_ext in sorted(dectree_extensions.items()): print_tree(f, dectree_ext) print_match_info(f) print_op_info(f) diff --git a/target/hexagon/gdbstub.c b/target/hexagon/gdbstub.c index d152d01bfe..54d37e006e 100644 --- a/target/hexagon/gdbstub.c +++ b/target/hexagon/gdbstub.c @@ -16,7 +16,7 @@ */ #include "qemu/osdep.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "cpu.h" #include "internal.h" @@ -25,6 +25,14 @@ int hexagon_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) HexagonCPU *cpu = HEXAGON_CPU(cs); CPUHexagonState *env = &cpu->env; + if (n == HEX_REG_P3_0_ALIASED) { + uint32_t p3_0 = 0; + for (int i = 0; i < NUM_PREGS; i++) { + p3_0 = deposit32(p3_0, i * 8, 8, env->pred[i]); + } + return gdb_get_regl(mem_buf, p3_0); + } + if (n < TOTAL_PER_THREAD_REGS) { return gdb_get_regl(mem_buf, env->gpr[n]); } @@ -37,6 +45,14 @@ int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) HexagonCPU *cpu = HEXAGON_CPU(cs); CPUHexagonState *env = &cpu->env; + if (n == HEX_REG_P3_0_ALIASED) { + uint32_t p3_0 = ldtul_p(mem_buf); + for (int i = 0; i < NUM_PREGS; i++) { + env->pred[i] = extract32(p3_0, i * 8, 8); + } + return sizeof(target_ulong); + } + if (n < TOTAL_PER_THREAD_REGS) { env->gpr[n] = ldtul_p(mem_buf); return sizeof(target_ulong); @@ -44,3 +60,71 @@ int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) g_assert_not_reached(); } + +static int gdb_get_vreg(CPUHexagonState *env, GByteArray *mem_buf, int n) +{ + int total = 0; + int i; + for (i = 0; i < ARRAY_SIZE(env->VRegs[n].uw); i++) { + total += gdb_get_regl(mem_buf, env->VRegs[n].uw[i]); + } + return total; +} + +static int gdb_get_qreg(CPUHexagonState *env, GByteArray *mem_buf, int n) +{ + int total = 0; + int i; + for (i = 0; i < ARRAY_SIZE(env->QRegs[n].uw); i++) { + total += gdb_get_regl(mem_buf, env->QRegs[n].uw[i]); + } + return total; +} + +int hexagon_hvx_gdb_read_register(CPUHexagonState *env, GByteArray *mem_buf, int n) +{ + if (n < NUM_VREGS) { + return gdb_get_vreg(env, mem_buf, n); + } + n -= NUM_VREGS; + + if (n < NUM_QREGS) { + return gdb_get_qreg(env, mem_buf, n); + } + + g_assert_not_reached(); +} + +static int gdb_put_vreg(CPUHexagonState *env, uint8_t *mem_buf, int n) +{ + int i; + for (i = 0; i < ARRAY_SIZE(env->VRegs[n].uw); i++) { + env->VRegs[n].uw[i] = ldtul_p(mem_buf); + mem_buf += 4; + } + return MAX_VEC_SIZE_BYTES; +} + +static int gdb_put_qreg(CPUHexagonState *env, uint8_t *mem_buf, int n) +{ + int i; + for (i = 0; i < ARRAY_SIZE(env->QRegs[n].uw); i++) { + env->QRegs[n].uw[i] = ldtul_p(mem_buf); + mem_buf += 4; + } + return MAX_VEC_SIZE_BYTES / 8; +} + +int hexagon_hvx_gdb_write_register(CPUHexagonState *env, uint8_t *mem_buf, int n) +{ + if (n < NUM_VREGS) { + return gdb_put_vreg(env, mem_buf, n); + } + n -= NUM_VREGS; + + if (n < NUM_QREGS) { + return gdb_put_qreg(env, mem_buf, n); + } + + g_assert_not_reached(); +} diff --git a/target/hexagon/gen_analyze_funcs.py b/target/hexagon/gen_analyze_funcs.py new file mode 100755 index 0000000000..c3b521abef --- /dev/null +++ b/target/hexagon/gen_analyze_funcs.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 + +## +## Copyright(c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. +## +## This program is free software; you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation; either version 2 of the License, or +## (at your option) any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, see . +## + +import sys +import re +import string +import hex_common + + +## +## Helpers for gen_analyze_func +## +def is_predicated(tag): + return "A_CONDEXEC" in hex_common.attribdict[tag] + + +def analyze_opn_old(f, tag, regtype, regid, regno): + regN = f"{regtype}{regid}N" + predicated = "true" if is_predicated(tag) else "false" + if regtype == "R": + if regid in {"ss", "tt"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_reg_read_pair(ctx, {regN});\n") + elif regid in {"dd", "ee", "xx", "yy"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_reg_write_pair(ctx, {regN}, {predicated});\n") + elif regid in {"s", "t", "u", "v"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_reg_read(ctx, {regN});\n") + elif regid in {"d", "e", "x", "y"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_reg_write(ctx, {regN}, {predicated});\n") + else: + hex_common.bad_register(regtype, regid) + elif regtype == "P": + if regid in {"s", "t", "u", "v"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_pred_read(ctx, {regN});\n") + elif regid in {"d", "e", "x"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_pred_write(ctx, {regN});\n") + else: + hex_common.bad_register(regtype, regid) + elif regtype == "C": + if regid == "ss": + f.write( + f" const int {regN} = insn->regno[{regno}] " + "+ HEX_REG_SA0;\n" + ) + f.write(f" ctx_log_reg_read_pair(ctx, {regN});\n") + elif regid == "dd": + f.write(f" const int {regN} = insn->regno[{regno}] " "+ HEX_REG_SA0;\n") + f.write(f" ctx_log_reg_write_pair(ctx, {regN}, {predicated});\n") + elif regid == "s": + f.write( + f" const int {regN} = insn->regno[{regno}] " + "+ HEX_REG_SA0;\n" + ) + f.write(f" ctx_log_reg_read(ctx, {regN});\n") + elif regid == "d": + f.write(f" const int {regN} = insn->regno[{regno}] " "+ HEX_REG_SA0;\n") + f.write(f" ctx_log_reg_write(ctx, {regN}, {predicated});\n") + else: + hex_common.bad_register(regtype, regid) + elif regtype == "M": + if regid == "u": + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_reg_read(ctx, {regN});\n") + else: + hex_common.bad_register(regtype, regid) + elif regtype == "V": + newv = "EXT_DFL" + if hex_common.is_new_result(tag): + newv = "EXT_NEW" + elif hex_common.is_tmp_result(tag): + newv = "EXT_TMP" + if regid in {"dd", "xx"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write( + f" ctx_log_vreg_write_pair(ctx, {regN}, {newv}, " f"{predicated});\n" + ) + elif regid in {"uu", "vv"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_vreg_read_pair(ctx, {regN});\n") + elif regid in {"s", "u", "v", "w"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_vreg_read(ctx, {regN});\n") + elif regid in {"d", "x", "y"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_vreg_write(ctx, {regN}, {newv}, " f"{predicated});\n") + else: + hex_common.bad_register(regtype, regid) + elif regtype == "Q": + if regid in {"d", "e", "x"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_qreg_write(ctx, {regN});\n") + elif regid in {"s", "t", "u", "v"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_qreg_read(ctx, {regN});\n") + else: + hex_common.bad_register(regtype, regid) + elif regtype == "G": + if regid in {"dd"}: + f.write(f"// const int {regN} = insn->regno[{regno}];\n") + elif regid in {"d"}: + f.write(f"// const int {regN} = insn->regno[{regno}];\n") + elif regid in {"ss"}: + f.write(f"// const int {regN} = insn->regno[{regno}];\n") + elif regid in {"s"}: + f.write(f"// const int {regN} = insn->regno[{regno}];\n") + else: + hex_common.bad_register(regtype, regid) + elif regtype == "S": + if regid in {"dd"}: + f.write(f"// const int {regN} = insn->regno[{regno}];\n") + elif regid in {"d"}: + f.write(f"// const int {regN} = insn->regno[{regno}];\n") + elif regid in {"ss"}: + f.write(f"// const int {regN} = insn->regno[{regno}];\n") + elif regid in {"s"}: + f.write(f"// const int {regN} = insn->regno[{regno}];\n") + else: + hex_common.bad_register(regtype, regid) + else: + hex_common.bad_register(regtype, regid) + + +def analyze_opn_new(f, tag, regtype, regid, regno): + regN = f"{regtype}{regid}N" + if regtype == "N": + if regid in {"s", "t"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_reg_read(ctx, {regN});\n") + else: + hex_common.bad_register(regtype, regid) + elif regtype == "P": + if regid in {"t", "u", "v"}: + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_pred_read(ctx, {regN});\n") + else: + hex_common.bad_register(regtype, regid) + elif regtype == "O": + if regid == "s": + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" ctx_log_vreg_read(ctx, {regN});\n") + else: + hex_common.bad_register(regtype, regid) + else: + hex_common.bad_register(regtype, regid) + + +def analyze_opn(f, tag, regtype, regid, i): + if hex_common.is_pair(regid): + analyze_opn_old(f, tag, regtype, regid, i) + elif hex_common.is_single(regid): + if hex_common.is_old_val(regtype, regid, tag): + analyze_opn_old(f, tag, regtype, regid, i) + elif hex_common.is_new_val(regtype, regid, tag): + analyze_opn_new(f, tag, regtype, regid, i) + else: + hex_common.bad_register(regtype, regid) + else: + hex_common.bad_register(regtype, regid) + + +## +## Generate the code to analyze the instruction +## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;} +## We produce: +## static void analyze_A2_add(DisasContext *ctx) +## { +## Insn *insn G_GNUC_UNUSED = ctx->insn; +## const int RdN = insn->regno[0]; +## ctx_log_reg_write(ctx, RdN, false); +## const int RsN = insn->regno[1]; +## ctx_log_reg_read(ctx, RsN); +## const int RtN = insn->regno[2]; +## ctx_log_reg_read(ctx, RtN); +## } +## +def gen_analyze_func(f, tag, regs, imms): + f.write(f"static void analyze_{tag}(DisasContext *ctx)\n") + f.write("{\n") + + f.write(" Insn *insn G_GNUC_UNUSED = ctx->insn;\n") + + i = 0 + ## Analyze all the registers + for regtype, regid in regs: + analyze_opn(f, tag, regtype, regid, i) + i += 1 + + has_generated_helper = not hex_common.skip_qemu_helper( + tag + ) and not hex_common.is_idef_parser_enabled(tag) + + ## Mark HVX instructions with generated helpers + if (has_generated_helper and + "A_CVI" in hex_common.attribdict[tag]): + f.write(" ctx->has_hvx_helper = true;\n") + + f.write("}\n\n") + + +def main(): + hex_common.read_semantics_file(sys.argv[1]) + hex_common.read_attribs_file(sys.argv[2]) + hex_common.read_overrides_file(sys.argv[3]) + hex_common.read_overrides_file(sys.argv[4]) + ## Whether or not idef-parser is enabled is + ## determined by the number of arguments to + ## this script: + ## + ## 5 args. -> not enabled, + ## 6 args. -> idef-parser enabled. + ## + ## The 6:th arg. then holds a list of the successfully + ## parsed instructions. + is_idef_parser_enabled = len(sys.argv) > 6 + if is_idef_parser_enabled: + hex_common.read_idef_parser_enabled_file(sys.argv[5]) + hex_common.calculate_attribs() + tagregs = hex_common.get_tagregs() + tagimms = hex_common.get_tagimms() + + with open(sys.argv[-1], "w") as f: + f.write("#ifndef HEXAGON_TCG_FUNCS_H\n") + f.write("#define HEXAGON_TCG_FUNCS_H\n\n") + + for tag in hex_common.tags: + gen_analyze_func(f, tag, tagregs[tag], tagimms[tag]) + + f.write("#endif /* HEXAGON_TCG_FUNCS_H */\n") + + +if __name__ == "__main__": + main() diff --git a/target/hexagon/gen_helper_funcs.py b/target/hexagon/gen_helper_funcs.py index 19e9883f4c..ce21d3b688 100755 --- a/target/hexagon/gen_helper_funcs.py +++ b/target/hexagon/gen_helper_funcs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -22,133 +22,171 @@ import re import string import hex_common + ## ## Helpers for gen_helper_function ## def gen_decl_ea(f): f.write(" uint32_t EA;\n") -def gen_helper_return_type(f,regtype,regid,regno): - if regno > 1 : f.write(", ") + +def gen_helper_return_type(f, regtype, regid, regno): + if regno > 1: + f.write(", ") f.write("int32_t") -def gen_helper_return_type_pair(f,regtype,regid,regno): - if regno > 1 : f.write(", ") + +def gen_helper_return_type_pair(f, regtype, regid, regno): + if regno > 1: + f.write(", ") f.write("int64_t") -def gen_helper_arg(f,regtype,regid,regno): - if regno > 0 : f.write(", " ) - f.write("int32_t %s%sV" % (regtype,regid)) -def gen_helper_arg_new(f,regtype,regid,regno): - if regno >= 0 : f.write(", " ) - f.write("int32_t %s%sN" % (regtype,regid)) +def gen_helper_arg(f, regtype, regid, regno): + if regno > 0: + f.write(", ") + f.write(f"int32_t {regtype}{regid}V") -def gen_helper_arg_pair(f,regtype,regid,regno): - if regno >= 0 : f.write(", ") - f.write("int64_t %s%sV" % (regtype,regid)) -def gen_helper_arg_ext(f,regtype,regid,regno): - if regno > 0 : f.write(", ") - f.write("void *%s%sV_void" % (regtype,regid)) +def gen_helper_arg_new(f, regtype, regid, regno): + if regno >= 0: + f.write(", ") + f.write(f"int32_t {regtype}{regid}N") -def gen_helper_arg_ext_pair(f,regtype,regid,regno): - if regno > 0 : f.write(", ") - f.write("void *%s%sV_void" % (regtype,regid)) -def gen_helper_arg_opn(f,regtype,regid,i,tag): - if (hex_common.is_pair(regid)): - if (hex_common.is_hvx_reg(regtype)): - gen_helper_arg_ext_pair(f,regtype,regid,i) +def gen_helper_arg_pair(f, regtype, regid, regno): + if regno >= 0: + f.write(", ") + f.write(f"int64_t {regtype}{regid}V") + + +def gen_helper_arg_ext(f, regtype, regid, regno): + if regno > 0: + f.write(", ") + f.write(f"void *{regtype}{regid}V_void") + + +def gen_helper_arg_ext_pair(f, regtype, regid, regno): + if regno > 0: + f.write(", ") + f.write(f"void *{regtype}{regid}V_void") + + +def gen_helper_arg_opn(f, regtype, regid, i, tag): + if hex_common.is_pair(regid): + if hex_common.is_hvx_reg(regtype): + gen_helper_arg_ext_pair(f, regtype, regid, i) else: - gen_helper_arg_pair(f,regtype,regid,i) - elif (hex_common.is_single(regid)): + gen_helper_arg_pair(f, regtype, regid, i) + elif hex_common.is_single(regid): if hex_common.is_old_val(regtype, regid, tag): - if (hex_common.is_hvx_reg(regtype)): - gen_helper_arg_ext(f,regtype,regid,i) + if hex_common.is_hvx_reg(regtype): + gen_helper_arg_ext(f, regtype, regid, i) else: - gen_helper_arg(f,regtype,regid,i) + gen_helper_arg(f, regtype, regid, i) elif hex_common.is_new_val(regtype, regid, tag): - gen_helper_arg_new(f,regtype,regid,i) + gen_helper_arg_new(f, regtype, regid, i) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) -def gen_helper_arg_imm(f,immlett): - f.write(", int32_t %s" % (hex_common.imm_name(immlett))) -def gen_helper_dest_decl(f,regtype,regid,regno,subfield=""): - f.write(" int32_t %s%sV%s = 0;\n" % \ - (regtype,regid,subfield)) +def gen_helper_arg_imm(f, immlett): + f.write(f", int32_t {hex_common.imm_name(immlett)}") -def gen_helper_dest_decl_pair(f,regtype,regid,regno,subfield=""): - f.write(" int64_t %s%sV%s = 0;\n" % \ - (regtype,regid,subfield)) -def gen_helper_dest_decl_ext(f,regtype,regid): - if (regtype == "Q"): - f.write(" /* %s%sV is *(MMQReg *)(%s%sV_void) */\n" % \ - (regtype,regid,regtype,regid)) +def gen_helper_dest_decl(f, regtype, regid, regno, subfield=""): + f.write(f" int32_t {regtype}{regid}V{subfield} = 0;\n") + + +def gen_helper_dest_decl_pair(f, regtype, regid, regno, subfield=""): + f.write(f" int64_t {regtype}{regid}V{subfield} = 0;\n") + + +def gen_helper_dest_decl_ext(f, regtype, regid): + if regtype == "Q": + f.write( + f" /* {regtype}{regid}V is *(MMQReg *)" f"({regtype}{regid}V_void) */\n" + ) else: - f.write(" /* %s%sV is *(MMVector *)(%s%sV_void) */\n" % \ - (regtype,regid,regtype,regid)) + f.write( + f" /* {regtype}{regid}V is *(MMVector *)" + f"({regtype}{regid}V_void) */\n" + ) -def gen_helper_dest_decl_ext_pair(f,regtype,regid,regno): - f.write(" /* %s%sV is *(MMVectorPair *))%s%sV_void) */\n" % \ - (regtype,regid,regtype, regid)) -def gen_helper_dest_decl_opn(f,regtype,regid,i): - if (hex_common.is_pair(regid)): - if (hex_common.is_hvx_reg(regtype)): - gen_helper_dest_decl_ext_pair(f,regtype,regid, i) +def gen_helper_dest_decl_ext_pair(f, regtype, regid, regno): + f.write( + f" /* {regtype}{regid}V is *(MMVectorPair *))" + f"{regtype}{regid}V_void) */\n" + ) + + +def gen_helper_dest_decl_opn(f, regtype, regid, i): + if hex_common.is_pair(regid): + if hex_common.is_hvx_reg(regtype): + gen_helper_dest_decl_ext_pair(f, regtype, regid, i) else: - gen_helper_dest_decl_pair(f,regtype,regid,i) - elif (hex_common.is_single(regid)): - if (hex_common.is_hvx_reg(regtype)): - gen_helper_dest_decl_ext(f,regtype,regid) + gen_helper_dest_decl_pair(f, regtype, regid, i) + elif hex_common.is_single(regid): + if hex_common.is_hvx_reg(regtype): + gen_helper_dest_decl_ext(f, regtype, regid) else: - gen_helper_dest_decl(f,regtype,regid,i) + gen_helper_dest_decl(f, regtype, regid, i) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) -def gen_helper_src_var_ext(f,regtype,regid): - if (regtype == "Q"): - f.write(" /* %s%sV is *(MMQReg *)(%s%sV_void) */\n" % \ - (regtype,regid,regtype,regid)) + +def gen_helper_src_var_ext(f, regtype, regid): + if regtype == "Q": + f.write( + f" /* {regtype}{regid}V is *(MMQReg *)" f"({regtype}{regid}V_void) */\n" + ) else: - f.write(" /* %s%sV is *(MMVector *)(%s%sV_void) */\n" % \ - (regtype,regid,regtype,regid)) + f.write( + f" /* {regtype}{regid}V is *(MMVector *)" + f"({regtype}{regid}V_void) */\n" + ) -def gen_helper_src_var_ext_pair(f,regtype,regid,regno): - f.write(" /* %s%sV%s is *(MMVectorPair *)(%s%sV%s_void) */\n" % \ - (regtype,regid,regno,regtype,regid,regno)) -def gen_helper_return(f,regtype,regid,regno): - f.write(" return %s%sV;\n" % (regtype,regid)) +def gen_helper_src_var_ext_pair(f, regtype, regid, regno): + f.write( + f" /* {regtype}{regid}V{regno} is *(MMVectorPair *)" + f"({regtype}{regid}V{regno}_void) */\n" + ) -def gen_helper_return_pair(f,regtype,regid,regno): - f.write(" return %s%sV;\n" % (regtype,regid)) -def gen_helper_dst_write_ext(f,regtype,regid): +def gen_helper_return(f, regtype, regid, regno): + f.write(f" return {regtype}{regid}V;\n") + + +def gen_helper_return_pair(f, regtype, regid, regno): + f.write(f" return {regtype}{regid}V;\n") + + +def gen_helper_dst_write_ext(f, regtype, regid): return -def gen_helper_dst_write_ext_pair(f,regtype,regid): + +def gen_helper_dst_write_ext_pair(f, regtype, regid): return + def gen_helper_return_opn(f, regtype, regid, i): - if (hex_common.is_pair(regid)): - if (hex_common.is_hvx_reg(regtype)): - gen_helper_dst_write_ext_pair(f,regtype,regid) + if hex_common.is_pair(regid): + if hex_common.is_hvx_reg(regtype): + gen_helper_dst_write_ext_pair(f, regtype, regid) else: - gen_helper_return_pair(f,regtype,regid,i) - elif (hex_common.is_single(regid)): - if (hex_common.is_hvx_reg(regtype)): - gen_helper_dst_write_ext(f,regtype,regid) + gen_helper_return_pair(f, regtype, regid, i) + elif hex_common.is_single(regid): + if hex_common.is_hvx_reg(regtype): + gen_helper_dst_write_ext(f, regtype, regid) else: - gen_helper_return(f,regtype,regid,i) + gen_helper_return(f, regtype, regid, i) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) + ## ## Generate the TCG code to call the helper @@ -170,129 +208,150 @@ def gen_helper_function(f, tag, tagregs, tagimms): numresults = 0 numscalarresults = 0 numscalarreadwrite = 0 - for regtype,regid,toss,numregs in regs: - if (hex_common.is_written(regid)): + for regtype, regid in regs: + if hex_common.is_written(regid): numresults += 1 - if (hex_common.is_scalar_reg(regtype)): + if hex_common.is_scalar_reg(regtype): numscalarresults += 1 - if (hex_common.is_readwrite(regid)): - if (hex_common.is_scalar_reg(regtype)): + if hex_common.is_readwrite(regid): + if hex_common.is_scalar_reg(regtype): numscalarreadwrite += 1 - if (numscalarresults > 1): + if numscalarresults > 1: ## The helper is bogus when there is more than one result - f.write("void HELPER(%s)(CPUHexagonState *env) { BOGUS_HELPER(%s); }\n" - % (tag, tag)) + f.write( + f"void HELPER({tag})(CPUHexagonState *env) " f"{{ BOGUS_HELPER({tag}); }}\n" + ) else: ## The return type of the function is the type of the destination ## register (if scalar) - i=0 - for regtype,regid,toss,numregs in regs: - if (hex_common.is_written(regid)): - if (hex_common.is_pair(regid)): - if (hex_common.is_hvx_reg(regtype)): + i = 0 + for regtype, regid in regs: + if hex_common.is_written(regid): + if hex_common.is_pair(regid): + if hex_common.is_hvx_reg(regtype): continue else: - gen_helper_return_type_pair(f,regtype,regid,i) - elif (hex_common.is_single(regid)): - if (hex_common.is_hvx_reg(regtype)): - continue + gen_helper_return_type_pair(f, regtype, regid, i) + elif hex_common.is_single(regid): + if hex_common.is_hvx_reg(regtype): + continue else: - gen_helper_return_type(f,regtype,regid,i) + gen_helper_return_type(f, regtype, regid, i) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) i += 1 - if (numscalarresults == 0): + if numscalarresults == 0: f.write("void") - f.write(" HELPER(%s)(CPUHexagonState *env" % tag) + f.write(f" HELPER({tag})(CPUHexagonState *env") ## Arguments include the vector destination operands i = 1 - for regtype,regid,toss,numregs in regs: - if (hex_common.is_written(regid)): - if (hex_common.is_pair(regid)): - if (hex_common.is_hvx_reg(regtype)): - gen_helper_arg_ext_pair(f,regtype,regid,i) + for regtype, regid in regs: + if hex_common.is_written(regid): + if hex_common.is_pair(regid): + if hex_common.is_hvx_reg(regtype): + gen_helper_arg_ext_pair(f, regtype, regid, i) else: continue - elif (hex_common.is_single(regid)): - if (hex_common.is_hvx_reg(regtype)): - gen_helper_arg_ext(f,regtype,regid,i) + elif hex_common.is_single(regid): + if hex_common.is_hvx_reg(regtype): + gen_helper_arg_ext(f, regtype, regid, i) else: # This is the return value of the function continue else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) i += 1 + ## For conditional instructions, we pass in the destination register + if "A_CONDEXEC" in hex_common.attribdict[tag]: + for regtype, regid in regs: + if hex_common.is_writeonly(regid) and not hex_common.is_hvx_reg( + regtype + ): + gen_helper_arg_opn(f, regtype, regid, i, tag) + i += 1 + ## Arguments to the helper function are the source regs and immediates - for regtype,regid,toss,numregs in regs: - if (hex_common.is_read(regid)): - if (hex_common.is_hvx_reg(regtype) and - hex_common.is_readwrite(regid)): + for regtype, regid in regs: + if hex_common.is_read(regid): + if hex_common.is_hvx_reg(regtype) and hex_common.is_readwrite(regid): continue - gen_helper_arg_opn(f,regtype,regid,i,tag) + gen_helper_arg_opn(f, regtype, regid, i, tag) i += 1 - for immlett,bits,immshift in imms: - gen_helper_arg_imm(f,immlett) + for immlett, bits, immshift in imms: + gen_helper_arg_imm(f, immlett) i += 1 - if (hex_common.need_pkt_has_multi_cof(tag)): + if hex_common.need_pkt_has_multi_cof(tag): f.write(", uint32_t pkt_has_multi_cof") + if (hex_common.need_pkt_need_commit(tag)): + f.write(", uint32_t pkt_need_commit") if hex_common.need_PC(tag): - if i > 0: f.write(", ") + if i > 0: + f.write(", ") f.write("target_ulong PC") i += 1 if hex_common.helper_needs_next_PC(tag): - if i > 0: f.write(", ") + if i > 0: + f.write(", ") f.write("target_ulong next_PC") i += 1 if hex_common.need_slot(tag): - if i > 0: f.write(", ") - f.write("uint32_t slot") + if i > 0: + f.write(", ") + f.write("uint32_t slotval") i += 1 if hex_common.need_part1(tag): - if i > 0: f.write(", ") + if i > 0: + f.write(", ") f.write("uint32_t part1") f.write(")\n{\n") - if (not hex_common.need_slot(tag)): - f.write(" uint32_t slot __attribute__((unused)) = 4;\n" ) - if hex_common.need_ea(tag): gen_decl_ea(f) + if hex_common.need_ea(tag): + gen_decl_ea(f) ## Declare the return variable - i=0 - for regtype,regid,toss,numregs in regs: - if (hex_common.is_writeonly(regid)): - gen_helper_dest_decl_opn(f,regtype,regid,i) - i += 1 + i = 0 + if "A_CONDEXEC" not in hex_common.attribdict[tag]: + for regtype, regid in regs: + if hex_common.is_writeonly(regid): + gen_helper_dest_decl_opn(f, regtype, regid, i) + i += 1 - for regtype,regid,toss,numregs in regs: - if (hex_common.is_read(regid)): - if (hex_common.is_pair(regid)): - if (hex_common.is_hvx_reg(regtype)): - gen_helper_src_var_ext_pair(f,regtype,regid,i) - elif (hex_common.is_single(regid)): - if (hex_common.is_hvx_reg(regtype)): - gen_helper_src_var_ext(f,regtype,regid) + for regtype, regid in regs: + if hex_common.is_read(regid): + if hex_common.is_pair(regid): + if hex_common.is_hvx_reg(regtype): + gen_helper_src_var_ext_pair(f, regtype, regid, i) + elif hex_common.is_single(regid): + if hex_common.is_hvx_reg(regtype): + gen_helper_src_var_ext(f, regtype, regid) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) - if 'A_FPOP' in hex_common.attribdict[tag]: - f.write(' arch_fpop_start(env);\n'); + if hex_common.need_slot(tag): + if "A_LOAD" in hex_common.attribdict[tag]: + f.write(" bool pkt_has_store_s1 = slotval & 0x1;\n") + f.write(" uint32_t slot = slotval >> 1;\n") - f.write(" %s\n" % hex_common.semdict[tag]) + if "A_FPOP" in hex_common.attribdict[tag]: + f.write(" arch_fpop_start(env);\n") - if 'A_FPOP' in hex_common.attribdict[tag]: - f.write(' arch_fpop_end(env);\n'); + f.write(f" {hex_common.semdict[tag]}\n") + + if "A_FPOP" in hex_common.attribdict[tag]: + f.write(" arch_fpop_end(env);\n") ## Save/return the return variable - for regtype,regid,toss,numregs in regs: - if (hex_common.is_written(regid)): + for regtype, regid in regs: + if hex_common.is_written(regid): gen_helper_return_opn(f, regtype, regid, i) f.write("}\n\n") ## End of the helper definition + def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) @@ -315,27 +374,28 @@ def main(): tagimms = hex_common.get_tagimms() output_file = sys.argv[-1] - with open(output_file, 'w') as f: + with open(output_file, "w") as f: for tag in hex_common.tags: ## Skip the priv instructions - if ( "A_PRIV" in hex_common.attribdict[tag] ) : + if "A_PRIV" in hex_common.attribdict[tag]: continue ## Skip the guest instructions - if ( "A_GUEST" in hex_common.attribdict[tag] ) : + if "A_GUEST" in hex_common.attribdict[tag]: continue ## Skip the diag instructions - if ( tag == "Y6_diag" ) : + if tag == "Y6_diag": continue - if ( tag == "Y6_diag0" ) : + if tag == "Y6_diag0": continue - if ( tag == "Y6_diag1" ) : + if tag == "Y6_diag1": continue - if ( hex_common.skip_qemu_helper(tag) ): + if hex_common.skip_qemu_helper(tag): continue - if ( hex_common.is_idef_parser_enabled(tag) ): + if hex_common.is_idef_parser_enabled(tag): continue gen_helper_function(f, tag, tagregs, tagimms) + if __name__ == "__main__": main() diff --git a/target/hexagon/gen_helper_protos.py b/target/hexagon/gen_helper_protos.py index 674bf370fa..131043795a 100755 --- a/target/hexagon/gen_helper_protos.py +++ b/target/hexagon/gen_helper_protos.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -26,32 +26,34 @@ import hex_common ## Helpers for gen_helper_prototype ## def_helper_types = { - 'N' : 's32', - 'O' : 's32', - 'P' : 's32', - 'M' : 's32', - 'C' : 's32', - 'R' : 's32', - 'V' : 'ptr', - 'Q' : 'ptr' + "N": "s32", + "O": "s32", + "P": "s32", + "M": "s32", + "C": "s32", + "R": "s32", + "V": "ptr", + "Q": "ptr", } def_helper_types_pair = { - 'R' : 's64', - 'C' : 's64', - 'S' : 's64', - 'G' : 's64', - 'V' : 'ptr', - 'Q' : 'ptr' + "R": "s64", + "C": "s64", + "S": "s64", + "G": "s64", + "V": "ptr", + "Q": "ptr", } -def gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i): - if (hex_common.is_pair(regid)): - f.write(", %s" % (def_helper_types_pair[regtype])) - elif (hex_common.is_single(regid)): - f.write(", %s" % (def_helper_types[regtype])) + +def gen_def_helper_opn(f, tag, regtype, regid, i): + if hex_common.is_pair(regid): + f.write(f", {def_helper_types_pair[regtype]}") + elif hex_common.is_single(regid): + f.write(f", {def_helper_types[regtype]}") else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) + ## ## Generate the DEF_HELPER prototype for an instruction @@ -66,80 +68,114 @@ def gen_helper_prototype(f, tag, tagregs, tagimms): numresults = 0 numscalarresults = 0 numscalarreadwrite = 0 - for regtype,regid,toss,numregs in regs: - if (hex_common.is_written(regid)): + for regtype, regid in regs: + if hex_common.is_written(regid): numresults += 1 - if (hex_common.is_scalar_reg(regtype)): + if hex_common.is_scalar_reg(regtype): numscalarresults += 1 - if (hex_common.is_readwrite(regid)): - if (hex_common.is_scalar_reg(regtype)): + if hex_common.is_readwrite(regid): + if hex_common.is_scalar_reg(regtype): numscalarreadwrite += 1 - if (numscalarresults > 1): + if numscalarresults > 1: ## The helper is bogus when there is more than one result - f.write('DEF_HELPER_1(%s, void, env)\n' % tag) + f.write(f"DEF_HELPER_1({tag}, void, env)\n") else: ## Figure out how many arguments the helper will take - if (numscalarresults == 0): - def_helper_size = len(regs)+len(imms)+numscalarreadwrite+1 - if hex_common.need_pkt_has_multi_cof(tag): def_helper_size += 1 - if hex_common.need_part1(tag): def_helper_size += 1 - if hex_common.need_slot(tag): def_helper_size += 1 - if hex_common.need_PC(tag): def_helper_size += 1 - if hex_common.helper_needs_next_PC(tag): def_helper_size += 1 - f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag)) + if numscalarresults == 0: + def_helper_size = len(regs) + len(imms) + numscalarreadwrite + 1 + if hex_common.need_pkt_has_multi_cof(tag): + def_helper_size += 1 + if hex_common.need_pkt_need_commit(tag): + def_helper_size += 1 + if hex_common.need_part1(tag): + def_helper_size += 1 + if hex_common.need_slot(tag): + def_helper_size += 1 + if hex_common.need_PC(tag): + def_helper_size += 1 + if hex_common.helper_needs_next_PC(tag): + def_helper_size += 1 + if hex_common.need_condexec_reg(tag, regs): + def_helper_size += 1 + f.write(f"DEF_HELPER_{def_helper_size}({tag}") ## The return type is void - f.write(', void' ) + f.write(", void") else: - def_helper_size = len(regs)+len(imms)+numscalarreadwrite - if hex_common.need_pkt_has_multi_cof(tag): def_helper_size += 1 - if hex_common.need_part1(tag): def_helper_size += 1 - if hex_common.need_slot(tag): def_helper_size += 1 - if hex_common.need_PC(tag): def_helper_size += 1 - if hex_common.helper_needs_next_PC(tag): def_helper_size += 1 - f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag)) + def_helper_size = len(regs) + len(imms) + numscalarreadwrite + if hex_common.need_pkt_has_multi_cof(tag): + def_helper_size += 1 + if hex_common.need_pkt_need_commit(tag): + def_helper_size += 1 + if hex_common.need_part1(tag): + def_helper_size += 1 + if hex_common.need_slot(tag): + def_helper_size += 1 + if hex_common.need_PC(tag): + def_helper_size += 1 + if hex_common.need_condexec_reg(tag, regs): + def_helper_size += 1 + if hex_common.helper_needs_next_PC(tag): + def_helper_size += 1 + f.write(f"DEF_HELPER_{def_helper_size}({tag}") ## Generate the qemu DEF_HELPER type for each result ## Iterate over this list twice ## - Emit the scalar result ## - Emit the vector result - i=0 - for regtype,regid,toss,numregs in regs: - if (hex_common.is_written(regid)): - if (not hex_common.is_hvx_reg(regtype)): - gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) + i = 0 + for regtype, regid in regs: + if hex_common.is_written(regid): + if not hex_common.is_hvx_reg(regtype): + gen_def_helper_opn(f, tag, regtype, regid, i) i += 1 ## Put the env between the outputs and inputs - f.write(', env' ) + f.write(", env") i += 1 # Second pass - for regtype,regid,toss,numregs in regs: - if (hex_common.is_written(regid)): - if (hex_common.is_hvx_reg(regtype)): - gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) + for regtype, regid in regs: + if hex_common.is_written(regid): + if hex_common.is_hvx_reg(regtype): + gen_def_helper_opn(f, tag, regtype, regid, i) + i += 1 + + ## For conditional instructions, we pass in the destination register + if "A_CONDEXEC" in hex_common.attribdict[tag]: + for regtype, regid in regs: + if hex_common.is_writeonly(regid) and not hex_common.is_hvx_reg( + regtype + ): + gen_def_helper_opn(f, tag, regtype, regid, i) i += 1 ## Generate the qemu type for each input operand (regs and immediates) - for regtype,regid,toss,numregs in regs: - if (hex_common.is_read(regid)): - if (hex_common.is_hvx_reg(regtype) and - hex_common.is_readwrite(regid)): + for regtype, regid in regs: + if hex_common.is_read(regid): + if hex_common.is_hvx_reg(regtype) and hex_common.is_readwrite(regid): continue - gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) + gen_def_helper_opn(f, tag, regtype, regid, i) i += 1 - for immlett,bits,immshift in imms: + for immlett, bits, immshift in imms: f.write(", s32") - ## Add the arguments for the instruction pkt_has_multi_cof, slot and - ## part1 (if needed) - if hex_common.need_pkt_has_multi_cof(tag): f.write(', i32') - if hex_common.need_PC(tag): f.write(', i32') - if hex_common.helper_needs_next_PC(tag): f.write(', i32') - if hex_common.need_slot(tag): f.write(', i32' ) - if hex_common.need_part1(tag): f.write(' , i32' ) - f.write(')\n') + ## Add the arguments for the instruction pkt_has_multi_cof, + ## pkt_needs_commit, PC, next_PC, slot, and part1 (if needed) + if hex_common.need_pkt_has_multi_cof(tag): + f.write(", i32") + if hex_common.need_pkt_need_commit(tag): + f.write(', i32') + if hex_common.need_PC(tag): + f.write(", i32") + if hex_common.helper_needs_next_PC(tag): + f.write(", i32") + if hex_common.need_slot(tag): + f.write(", i32") + if hex_common.need_part1(tag): + f.write(" , i32") + f.write(")\n") + def main(): hex_common.read_semantics_file(sys.argv[1]) @@ -163,28 +199,29 @@ def main(): tagimms = hex_common.get_tagimms() output_file = sys.argv[-1] - with open(output_file, 'w') as f: + with open(output_file, "w") as f: for tag in hex_common.tags: ## Skip the priv instructions - if ( "A_PRIV" in hex_common.attribdict[tag] ) : + if "A_PRIV" in hex_common.attribdict[tag]: continue ## Skip the guest instructions - if ( "A_GUEST" in hex_common.attribdict[tag] ) : + if "A_GUEST" in hex_common.attribdict[tag]: continue ## Skip the diag instructions - if ( tag == "Y6_diag" ) : + if tag == "Y6_diag": continue - if ( tag == "Y6_diag0" ) : + if tag == "Y6_diag0": continue - if ( tag == "Y6_diag1" ) : + if tag == "Y6_diag1": continue - if ( hex_common.skip_qemu_helper(tag) ): + if hex_common.skip_qemu_helper(tag): continue - if ( hex_common.is_idef_parser_enabled(tag) ): + if hex_common.is_idef_parser_enabled(tag): continue gen_helper_prototype(f, tag, tagregs, tagimms) + if __name__ == "__main__": main() diff --git a/target/hexagon/gen_idef_parser_funcs.py b/target/hexagon/gen_idef_parser_funcs.py index 917753d6d8..f4518e653f 100644 --- a/target/hexagon/gen_idef_parser_funcs.py +++ b/target/hexagon/gen_idef_parser_funcs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2022 rev.ng Labs Srl. All Rights Reserved. +## Copyright(c) 2019-2023 rev.ng Labs Srl. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -24,6 +24,7 @@ from io import StringIO import hex_common + ## ## Generate code to be fed to the idef_parser ## @@ -48,83 +49,116 @@ def main(): tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[3], 'w') as f: + with open(sys.argv[3], "w") as f: f.write('#include "macros.inc"\n\n') for tag in hex_common.tags: ## Skip the priv instructions - if ( "A_PRIV" in hex_common.attribdict[tag] ) : + if "A_PRIV" in hex_common.attribdict[tag]: continue ## Skip the guest instructions - if ( "A_GUEST" in hex_common.attribdict[tag] ) : + if "A_GUEST" in hex_common.attribdict[tag]: continue ## Skip instructions that saturate in a ternary expression - if ( tag in {'S2_asr_r_r_sat', 'S2_asl_r_r_sat'} ) : + if tag in {"S2_asr_r_r_sat", "S2_asl_r_r_sat"}: continue ## Skip instructions using switch - if ( tag in {'S4_vrcrotate_acc', 'S4_vrcrotate'} ) : + if tag in {"S4_vrcrotate_acc", "S4_vrcrotate"}: continue ## Skip trap instructions - if ( tag in {'J2_trap0', 'J2_trap1'} ) : + if tag in {"J2_trap0", "J2_trap1"}: continue ## Skip 128-bit instructions - if ( tag in {'A7_croundd_ri', 'A7_croundd_rr'} ) : + if tag in {"A7_croundd_ri", "A7_croundd_rr"}: continue - if ( tag in {'M7_wcmpyrw', 'M7_wcmpyrwc', - 'M7_wcmpyiw', 'M7_wcmpyiwc', - 'M7_wcmpyrw_rnd', 'M7_wcmpyrwc_rnd', - 'M7_wcmpyiw_rnd', 'M7_wcmpyiwc_rnd'} ) : + if tag in { + "M7_wcmpyrw", + "M7_wcmpyrwc", + "M7_wcmpyiw", + "M7_wcmpyiwc", + "M7_wcmpyrw_rnd", + "M7_wcmpyrwc_rnd", + "M7_wcmpyiw_rnd", + "M7_wcmpyiwc_rnd", + }: continue ## Skip interleave/deinterleave instructions - if ( tag in {'S2_interleave', 'S2_deinterleave'} ) : + if tag in {"S2_interleave", "S2_deinterleave"}: continue ## Skip instructions using bit reverse - if ( tag in {'S2_brev', 'S2_brevp', 'S2_ct0', 'S2_ct1', - 'S2_ct0p', 'S2_ct1p', 'A4_tlbmatch'} ) : + if tag in { + "S2_brev", + "S2_brevp", + "S2_ct0", + "S2_ct1", + "S2_ct0p", + "S2_ct1p", + "A4_tlbmatch", + }: continue ## Skip other unsupported instructions - if ( tag == 'S2_cabacdecbin' or tag == 'A5_ACS' ) : + if tag == "S2_cabacdecbin" or tag == "A5_ACS": continue - if ( tag.startswith('Y') ) : + if tag.startswith("Y"): continue - if ( tag.startswith('V6_') ) : + if tag.startswith("V6_"): continue - if ( tag.startswith('F') ) : + if ( tag.startswith("F") and + tag not in { + "F2_sfimm_p", + "F2_sfimm_n", + "F2_dfimm_p", + "F2_dfimm_n", + "F2_dfmpyll", + "F2_dfmpylh" + }): continue - if ( tag.endswith('_locked') ) : + if tag.endswith("_locked"): continue - if ( "A_COF" in hex_common.attribdict[tag] ) : + if "A_COF" in hex_common.attribdict[tag]: + continue + if ( tag.startswith('R6_release_') ): + continue + ## Skip instructions that are incompatible with short-circuit + ## packet register writes + if ( tag == 'S2_insert' or + tag == 'S2_insert_rp' or + tag == 'S2_asr_r_svw_trun' or + tag == 'A2_swiz' ): continue regs = tagregs[tag] imms = tagimms[tag] arguments = [] - for regtype,regid,toss,numregs in regs: + for regtype, regid in regs: prefix = "in " if hex_common.is_read(regid) else "" is_pair = hex_common.is_pair(regid) - is_single_old = (hex_common.is_single(regid) - and hex_common.is_old_val(regtype, regid, tag)) - is_single_new = (hex_common.is_single(regid) - and hex_common.is_new_val(regtype, regid, tag)) + is_single_old = hex_common.is_single(regid) and hex_common.is_old_val( + regtype, regid, tag + ) + is_single_new = hex_common.is_single(regid) and hex_common.is_new_val( + regtype, regid, tag + ) if is_pair or is_single_old: - arguments.append("%s%s%sV" % (prefix, regtype, regid)) + arguments.append(f"{prefix}{regtype}{regid}V") elif is_single_new: - arguments.append("%s%s%sN" % (prefix, regtype, regid)) + arguments.append(f"{prefix}{regtype}{regid}N") else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) - for immlett,bits,immshift in imms: + for immlett, bits, immshift in imms: arguments.append(hex_common.imm_name(immlett)) - f.write("%s(%s) {\n" % (tag, ", ".join(arguments))) - f.write(" "); + f.write(f"{tag}({', '.join(arguments)}) {{\n") + f.write(" ") if hex_common.need_ea(tag): - f.write("size4u_t EA; "); - f.write("%s\n" % hex_common.semdict[tag]) + f.write("size4u_t EA; ") + f.write(f"{hex_common.semdict[tag]}\n") f.write("}\n\n") + if __name__ == "__main__": main() diff --git a/target/hexagon/gen_op_attribs.py b/target/hexagon/gen_op_attribs.py index 6a1a1ca21d..41074b8573 100755 --- a/target/hexagon/gen_op_attribs.py +++ b/target/hexagon/gen_op_attribs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -22,6 +22,7 @@ import re import string import hex_common + def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) @@ -30,10 +31,13 @@ def main(): ## ## Generate all the attributes associated with each instruction ## - with open(sys.argv[3], 'w') as f: + with open(sys.argv[3], "w") as f: for tag in hex_common.tags: - f.write('OP_ATTRIB(%s,ATTRIBS(%s))\n' % \ - (tag, ','.join(sorted(hex_common.attribdict[tag])))) + f.write( + f"OP_ATTRIB({tag},ATTRIBS(" + f'{",".join(sorted(hex_common.attribdict[tag]))}))\n' + ) + if __name__ == "__main__": main() diff --git a/target/hexagon/gen_op_regs.py b/target/hexagon/gen_op_regs.py index e8137d4a12..a8a7712129 100755 --- a/target/hexagon/gen_op_regs.py +++ b/target/hexagon/gen_op_regs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -22,21 +22,25 @@ import re import string import hex_common + ## ## Generate the register and immediate operands for each instruction ## def calculate_regid_reg(tag): - def letter_inc(x): return chr(ord(x)+1) - ordered_implregs = [ 'SP','FP','LR' ] - srcdst_lett = 'X' - src_lett = 'S' - dst_lett = 'D' + def letter_inc(x): + return chr(ord(x) + 1) + + ordered_implregs = ["SP", "FP", "LR"] + srcdst_lett = "X" + src_lett = "S" + dst_lett = "D" retstr = "" mapdict = {} for reg in ordered_implregs: reg_rd = 0 reg_wr = 0 - if ('A_IMPLICIT_WRITES_'+reg) in hex_common.attribdict[tag]: reg_wr = 1 + if ("A_IMPLICIT_WRITES_" + reg) in hex_common.attribdict[tag]: + reg_wr = 1 if reg_rd and reg_wr: retstr += srcdst_lett mapdict[srcdst_lett] = reg @@ -49,62 +53,71 @@ def calculate_regid_reg(tag): retstr += dst_lett mapdict[dst_lett] = reg dst_lett = letter_inc(dst_lett) - return retstr,mapdict + return retstr, mapdict + def calculate_regid_letters(tag): - retstr,mapdict = calculate_regid_reg(tag) + retstr, mapdict = calculate_regid_reg(tag) return retstr + def strip_reg_prefix(x): - y=x.replace('UREG.','') - y=y.replace('MREG.','') - return y.replace('GREG.','') + y = x.replace("UREG.", "") + y = y.replace("MREG.", "") + return y.replace("GREG.", "") + def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) - tagregs = hex_common.get_tagregs() + tagregs = hex_common.get_tagregs(full=True) tagimms = hex_common.get_tagimms() - with open(sys.argv[3], 'w') as f: + with open(sys.argv[3], "w") as f: for tag in hex_common.tags: regs = tagregs[tag] rregs = [] wregs = [] regids = "" - for regtype,regid,toss,numregs in regs: + for regtype, regid, _, numregs in regs: if hex_common.is_read(regid): - if regid[0] not in regids: regids += regid[0] - rregs.append(regtype+regid+numregs) + if regid[0] not in regids: + regids += regid[0] + rregs.append(regtype + regid + numregs) if hex_common.is_written(regid): - wregs.append(regtype+regid+numregs) - if regid[0] not in regids: regids += regid[0] + wregs.append(regtype + regid + numregs) + if regid[0] not in regids: + regids += regid[0] for attrib in hex_common.attribdict[tag]: - if hex_common.attribinfo[attrib]['rreg']: - rregs.append(strip_reg_prefix(attribinfo[attrib]['rreg'])) - if hex_common.attribinfo[attrib]['wreg']: - wregs.append(strip_reg_prefix(attribinfo[attrib]['wreg'])) + if hex_common.attribinfo[attrib]["rreg"]: + rregs.append(strip_reg_prefix(attribinfo[attrib]["rreg"])) + if hex_common.attribinfo[attrib]["wreg"]: + wregs.append(strip_reg_prefix(attribinfo[attrib]["wreg"])) regids += calculate_regid_letters(tag) - f.write('REGINFO(%s,"%s",\t/*RD:*/\t"%s",\t/*WR:*/\t"%s")\n' % \ - (tag,regids,",".join(rregs),",".join(wregs))) + f.write( + f'REGINFO({tag},"{regids}",\t/*RD:*/\t"{",".join(rregs)}",' + f'\t/*WR:*/\t"{",".join(wregs)}")\n' + ) for tag in hex_common.tags: imms = tagimms[tag] - f.write( 'IMMINFO(%s' % tag) + f.write(f"IMMINFO({tag}") if not imms: - f.write(''','u',0,0,'U',0,0''') - for sign,size,shamt in imms: - if sign == 'r': sign = 's' + f.write(""",'u',0,0,'U',0,0""") + for sign, size, shamt in imms: + if sign == "r": + sign = "s" if not shamt: shamt = "0" - f.write(''','%s',%s,%s''' % (sign,size,shamt)) + f.write(f""",'{sign}',{size},{shamt}""") if len(imms) == 1: if sign.isupper(): - myu = 'u' + myu = "u" else: - myu = 'U' - f.write(''','%s',0,0''' % myu) - f.write(')\n') + myu = "U" + f.write(f""",'{myu}',0,0""") + f.write(")\n") + if __name__ == "__main__": main() diff --git a/target/hexagon/gen_opcodes_def.py b/target/hexagon/gen_opcodes_def.py index fa604a8db9..cddd868fe3 100755 --- a/target/hexagon/gen_opcodes_def.py +++ b/target/hexagon/gen_opcodes_def.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -22,15 +22,17 @@ import re import string import hex_common + def main(): hex_common.read_semantics_file(sys.argv[1]) ## ## Generate a list of all the opcodes ## - with open(sys.argv[3], 'w') as f: + with open(sys.argv[3], "w") as f: for tag in hex_common.tags: - f.write ( "OPCODE(%s),\n" % (tag) ) + f.write(f"OPCODE({tag}),\n") + if __name__ == "__main__": main() diff --git a/target/hexagon/gen_printinsn.py b/target/hexagon/gen_printinsn.py index 12737bf8a0..e570bd7c6a 100755 --- a/target/hexagon/gen_printinsn.py +++ b/target/hexagon/gen_printinsn.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -22,24 +22,26 @@ import re import string import hex_common + ## ## Generate data for printing each instruction (format string + operands) ## def regprinter(m): str = m.group(1) - str += ":".join(["%d"]*len(m.group(2))) + str += ":".join(["%d"] * len(m.group(2))) str += m.group(3) - if ('S' in m.group(1)) and (len(m.group(2)) == 1): + if ("S" in m.group(1)) and (len(m.group(2)) == 1): str += "/%s" - elif ('C' in m.group(1)) and (len(m.group(2)) == 1): + elif ("C" in m.group(1)) and (len(m.group(2)) == 1): str += "/%s" return str + def spacify(s): # Regular expression that matches any operator that contains '=' character: - opswithequal_re = '[-+^&|!<>=]?=' + opswithequal_re = "[-+^&|!<>=]?=" # Regular expression that matches any assignment operator. - assignment_re = '[-+^&|]?=' + assignment_re = "[-+^&|]?=" # Out of the operators that contain the = sign, if the operator is also an # assignment, spaces will be added around it, unless it's enclosed within @@ -54,9 +56,9 @@ def spacify(s): pc = 0 while i < slen: c = s[i] - if c == '(': + if c == "(": pc += 1 - elif c == ')': + elif c == ")": pc -= 1 paren_count[i] = pc i += 1 @@ -76,31 +78,33 @@ def spacify(s): if paren_count[ms] == 0: # Check if the entire string t is an assignment. am = assign.match(t) - if am and len(am.group(0)) == me-ms: + if am and len(am.group(0)) == me - ms: # Don't add spaces if they are already there. - if ms > 0 and s[ms-1] != ' ': - out.append(' ') + if ms > 0 and s[ms - 1] != " ": + out.append(" ") out += t - if me < slen and s[me] != ' ': - out.append(' ') + if me < slen and s[me] != " ": + out.append(" ") continue # If this is not an assignment, just append it to the output # string. out += t # Append the remaining part of the string. - out += s[pos:len(s)] - return ''.join(out) + out += s[pos : len(s)] + return "".join(out) + def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) - immext_casere = re.compile(r'IMMEXT\(([A-Za-z])') + immext_casere = re.compile(r"IMMEXT\(([A-Za-z])") - with open(sys.argv[3], 'w') as f: + with open(sys.argv[3], "w") as f: for tag in hex_common.tags: - if not hex_common.behdict[tag]: continue + if not hex_common.behdict[tag]: + continue extendable_upper_imm = False extendable_lower_imm = False m = immext_casere.search(hex_common.semdict[tag]) @@ -110,46 +114,45 @@ def main(): else: extendable_lower_imm = True beh = hex_common.behdict[tag] - beh = hex_common.regre.sub(regprinter,beh) - beh = hex_common.absimmre.sub(r"#%s0x%x",beh) - beh = hex_common.relimmre.sub(r"PC+%s%d",beh) + beh = hex_common.regre.sub(regprinter, beh) + beh = hex_common.absimmre.sub(r"#%s0x%x", beh) + beh = hex_common.relimmre.sub(r"PC+%s%d", beh) beh = spacify(beh) # Print out a literal "%s" at the end, used to match empty string # so C won't complain at us - if ("A_VECX" in hex_common.attribdict[tag]): + if "A_VECX" in hex_common.attribdict[tag]: macname = "DEF_VECX_PRINTINFO" - else: macname = "DEF_PRINTINFO" - f.write('%s(%s,"%s%%s"' % (macname,tag,beh)) - regs_or_imms = \ - hex_common.reg_or_immre.findall(hex_common.behdict[tag]) + else: + macname = "DEF_PRINTINFO" + f.write(f'{macname}({tag},"{beh}%s"') + regs_or_imms = hex_common.reg_or_immre.findall(hex_common.behdict[tag]) ri = 0 seenregs = {} - for allregs,a,b,c,d,allimm,immlett,bits,immshift in regs_or_imms: + for allregs, a, b, c, d, allimm, immlett, bits, immshift in regs_or_imms: if a: - #register + # register if b in seenregs: regno = seenregs[b] else: regno = ri if len(b) == 1: - f.write(', insn->regno[%d]' % regno) - if 'S' in a: - f.write(', sreg2str(insn->regno[%d])' % regno) - elif 'C' in a: - f.write(', creg2str(insn->regno[%d])' % regno) + f.write(f", insn->regno[{regno}]") + if "S" in a: + f.write(f", sreg2str(insn->regno[{regno}])") + elif "C" in a: + f.write(f", creg2str(insn->regno[{regno}])") elif len(b) == 2: - f.write(', insn->regno[%d] + 1, insn->regno[%d]' % \ - (regno,regno)) + f.write(f", insn->regno[{regno}] + 1" f", insn->regno[{regno}]") else: print("Put some stuff to handle quads here") if b not in seenregs: seenregs[b] = ri ri += 1 else: - #immediate - if (immlett.isupper()): + # immediate + if immlett.isupper(): if extendable_upper_imm: - if immlett in 'rR': + if immlett in "rR": f.write(',insn->extension_valid?"##":""') else: f.write(',insn->extension_valid?"#":""') @@ -158,16 +161,17 @@ def main(): ii = 1 else: if extendable_lower_imm: - if immlett in 'rR': + if immlett in "rR": f.write(',insn->extension_valid?"##":""') else: f.write(',insn->extension_valid?"#":""') else: f.write(',""') ii = 0 - f.write(', insn->immed[%d]' % ii) + f.write(f", insn->immed[{ii}]") # append empty string so there is at least one more arg f.write(',"")\n') + if __name__ == "__main__": main() diff --git a/target/hexagon/gen_shortcode.py b/target/hexagon/gen_shortcode.py index 9b589d0189..deb94446c4 100755 --- a/target/hexagon/gen_shortcode.py +++ b/target/hexagon/gen_shortcode.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -22,8 +22,10 @@ import re import string import hex_common + def gen_shortcode(f, tag): - f.write('DEF_SHORTCODE(%s, %s)\n' % (tag, hex_common.semdict[tag])) + f.write(f"DEF_SHORTCODE({tag}, {hex_common.semdict[tag]})\n") + def main(): hex_common.read_semantics_file(sys.argv[1]) @@ -32,29 +34,30 @@ def main(): tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[3], 'w') as f: + with open(sys.argv[3], "w") as f: f.write("#ifndef DEF_SHORTCODE\n") f.write("#define DEF_SHORTCODE(TAG,SHORTCODE) /* Nothing */\n") f.write("#endif\n") for tag in hex_common.tags: ## Skip the priv instructions - if ( "A_PRIV" in hex_common.attribdict[tag] ) : + if "A_PRIV" in hex_common.attribdict[tag]: continue ## Skip the guest instructions - if ( "A_GUEST" in hex_common.attribdict[tag] ) : + if "A_GUEST" in hex_common.attribdict[tag]: continue ## Skip the diag instructions - if ( tag == "Y6_diag" ) : + if tag == "Y6_diag": continue - if ( tag == "Y6_diag0" ) : + if tag == "Y6_diag0": continue - if ( tag == "Y6_diag1" ) : + if tag == "Y6_diag1": continue gen_shortcode(f, tag) f.write("#undef DEF_SHORTCODE\n") + if __name__ == "__main__": main() diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h index 19697b42a5..d78d99d155 100644 --- a/target/hexagon/gen_tcg.h +++ b/target/hexagon/gen_tcg.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -77,7 +77,6 @@ tcg_gen_mov_tl(EA, RxV); \ gen_read_ireg(ireg, MuV, (SHIFT)); \ gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ - tcg_temp_free(ireg); \ } while (0) /* Instructions with multiple definitions */ @@ -116,7 +115,6 @@ gen_read_ireg(ireg, MuV, SHIFT); \ gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ LOAD; \ - tcg_temp_free(ireg); \ } while (0) #define fGEN_TCG_L2_loadrub_pcr(SHORTCODE) \ @@ -168,8 +166,6 @@ for (int i = 0; i < 2; i++) { \ gen_set_half(i, RdV, gen_get_byte(byte, i, tmp, (SIGN))); \ } \ - tcg_temp_free(tmp); \ - tcg_temp_free(byte); \ } while (0) #define fGEN_TCG_L2_loadbzw2_io(SHORTCODE) \ @@ -222,8 +218,6 @@ for (int i = 0; i < 4; i++) { \ gen_set_half_i64(i, RddV, gen_get_byte(byte, i, tmp, (SIGN))); \ } \ - tcg_temp_free(tmp); \ - tcg_temp_free(byte); \ } while (0) #define fGEN_TCG_L2_loadbzw4_io(SHORTCODE) \ @@ -273,8 +267,6 @@ tcg_gen_extu_i32_i64(tmp_i64, tmp); \ tcg_gen_shri_i64(RyyV, RyyV, 16); \ tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 48, 16); \ - tcg_temp_free(tmp); \ - tcg_temp_free_i64(tmp_i64); \ } while (0) #define fGEN_TCG_L4_loadalignh_ur(SHORTCODE) \ @@ -304,8 +296,6 @@ tcg_gen_extu_i32_i64(tmp_i64, tmp); \ tcg_gen_shri_i64(RyyV, RyyV, 8); \ tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 56, 8); \ - tcg_temp_free(tmp); \ - tcg_temp_free_i64(tmp_i64); \ } while (0) #define fGEN_TCG_L2_loadalignb_io(SHORTCODE) \ @@ -337,17 +327,14 @@ */ #define fGEN_TCG_PRED_LOAD(GET_EA, PRED, SIZE, SIGN) \ do { \ - TCGv LSB = tcg_temp_local_new(); \ + TCGv LSB = tcg_temp_new(); \ TCGLabel *label = gen_new_label(); \ tcg_gen_movi_tl(EA, 0); \ PRED; \ CHECK_NOSHUF_PRED(GET_EA, SIZE, LSB); \ - PRED_LOAD_CANCEL(LSB, EA); \ - tcg_gen_movi_tl(RdV, 0); \ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \ fLOAD(1, SIZE, SIGN, EA, RdV); \ gen_set_label(label); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_L2_ploadrubt_pi(SHORTCODE) \ @@ -397,17 +384,14 @@ /* Predicated loads into a register pair */ #define fGEN_TCG_PRED_LOAD_PAIR(GET_EA, PRED) \ do { \ - TCGv LSB = tcg_temp_local_new(); \ + TCGv LSB = tcg_temp_new(); \ TCGLabel *label = gen_new_label(); \ tcg_gen_movi_tl(EA, 0); \ PRED; \ CHECK_NOSHUF_PRED(GET_EA, 8, LSB); \ - PRED_LOAD_CANCEL(LSB, EA); \ - tcg_gen_movi_i64(RddV, 0); \ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \ fLOAD(1, 8, u, EA, RddV); \ gen_set_label(label); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_L2_ploadrdt_pi(SHORTCODE) \ @@ -431,25 +415,20 @@ #define fGEN_TCG_STORE(SHORTCODE) \ do { \ - TCGv HALF = tcg_temp_new(); \ - TCGv BYTE = tcg_temp_new(); \ + TCGv HALF G_GNUC_UNUSED = tcg_temp_new(); \ + TCGv BYTE G_GNUC_UNUSED = tcg_temp_new(); \ SHORTCODE; \ - tcg_temp_free(HALF); \ - tcg_temp_free(BYTE); \ } while (0) #define fGEN_TCG_STORE_pcr(SHIFT, STORE) \ do { \ TCGv ireg = tcg_temp_new(); \ - TCGv HALF = tcg_temp_new(); \ - TCGv BYTE = tcg_temp_new(); \ + TCGv HALF G_GNUC_UNUSED = tcg_temp_new(); \ + TCGv BYTE G_GNUC_UNUSED = tcg_temp_new(); \ tcg_gen_mov_tl(EA, RxV); \ gen_read_ireg(ireg, MuV, SHIFT); \ gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ STORE; \ - tcg_temp_free(ireg); \ - tcg_temp_free(HALF); \ - tcg_temp_free(BYTE); \ } while (0) #define fGEN_TCG_S2_storerb_pbr(SHORTCODE) \ @@ -508,6 +487,104 @@ #define fGEN_TCG_S2_storerinew_pcr(SHORTCODE) \ fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, NtN)) +/* dczeroa clears the 32 byte cache line at the address given */ +#define fGEN_TCG_Y2_dczeroa(SHORTCODE) SHORTCODE + +/* In linux-user mode, these are not modelled, suppress compiler warning */ +#define fGEN_TCG_Y2_dcinva(SHORTCODE) \ + do { RsV = RsV; } while (0) +#define fGEN_TCG_Y2_dccleaninva(SHORTCODE) \ + do { RsV = RsV; } while (0) +#define fGEN_TCG_Y2_dccleana(SHORTCODE) \ + do { RsV = RsV; } while (0) +#define fGEN_TCG_Y2_icinva(SHORTCODE) \ + do { RsV = RsV; } while (0) + +/* + * allocframe(#uiV) + * RxV == r29 + */ +#define fGEN_TCG_S2_allocframe(SHORTCODE) \ + gen_allocframe(ctx, RxV, uiV) + +/* sub-instruction version (no RxV, so handle it manually) */ +#define fGEN_TCG_SS2_allocframe(SHORTCODE) \ + do { \ + TCGv r29 = tcg_temp_new(); \ + tcg_gen_mov_tl(r29, hex_gpr[HEX_REG_SP]); \ + gen_allocframe(ctx, r29, uiV); \ + gen_log_reg_write(ctx, HEX_REG_SP, r29); \ + } while (0) + +/* + * Rdd32 = deallocframe(Rs32):raw + * RddV == r31:30 + * RsV == r30 + */ +#define fGEN_TCG_L2_deallocframe(SHORTCODE) \ + gen_deallocframe(ctx, RddV, RsV) + +/* sub-instruction version (no RddV/RsV, so handle it manually) */ +#define fGEN_TCG_SL2_deallocframe(SHORTCODE) \ + do { \ + TCGv_i64 r31_30 = tcg_temp_new_i64(); \ + gen_deallocframe(ctx, r31_30, hex_gpr[HEX_REG_FP]); \ + gen_log_reg_write_pair(ctx, HEX_REG_FP, r31_30); \ + } while (0) + +/* + * dealloc_return + * Assembler mapped to + * r31:30 = dealloc_return(r30):raw + */ +#define fGEN_TCG_L4_return(SHORTCODE) \ + gen_return(ctx, RddV, RsV) + +/* + * sub-instruction version (no RddV, so handle it manually) + */ +#define fGEN_TCG_SL2_return(SHORTCODE) \ + do { \ + TCGv_i64 RddV = get_result_gpr_pair(ctx, HEX_REG_FP); \ + gen_return(ctx, RddV, hex_gpr[HEX_REG_FP]); \ + gen_log_reg_write_pair(ctx, HEX_REG_FP, RddV); \ + } while (0) + +/* + * Conditional returns follow this naming convention + * _t predicate true + * _f predicate false + * _tnew_pt predicate.new true predict taken + * _fnew_pt predicate.new false predict taken + * _tnew_pnt predicate.new true predict not taken + * _fnew_pnt predicate.new false predict not taken + * Predictions are not modelled in QEMU + * + * Example: + * if (p1) r31:30 = dealloc_return(r30):raw + */ +#define fGEN_TCG_L4_return_t(SHORTCODE) \ + gen_cond_return(ctx, RddV, RsV, PvV, TCG_COND_EQ); +#define fGEN_TCG_L4_return_f(SHORTCODE) \ + gen_cond_return(ctx, RddV, RsV, PvV, TCG_COND_NE) +#define fGEN_TCG_L4_return_tnew_pt(SHORTCODE) \ + gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_EQ) +#define fGEN_TCG_L4_return_fnew_pt(SHORTCODE) \ + gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_NE) +#define fGEN_TCG_L4_return_tnew_pnt(SHORTCODE) \ + gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_EQ) +#define fGEN_TCG_L4_return_fnew_pnt(SHORTCODE) \ + gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_NE) + +#define fGEN_TCG_SL2_return_t(SHORTCODE) \ + gen_cond_return_subinsn(ctx, TCG_COND_EQ, hex_pred[0]) +#define fGEN_TCG_SL2_return_f(SHORTCODE) \ + gen_cond_return_subinsn(ctx, TCG_COND_NE, hex_pred[0]) +#define fGEN_TCG_SL2_return_tnew(SHORTCODE) \ + gen_cond_return_subinsn(ctx, TCG_COND_EQ, ctx->new_pred_value[0]) +#define fGEN_TCG_SL2_return_fnew(SHORTCODE) \ + gen_cond_return_subinsn(ctx, TCG_COND_NE, ctx->new_pred_value[0]) + /* * Mathematical operations with more than one definition require * special handling @@ -515,7 +592,16 @@ #define fGEN_TCG_A5_ACS(SHORTCODE) \ do { \ gen_helper_vacsh_pred(PeV, cpu_env, RxxV, RssV, RttV); \ - gen_helper_vacsh_val(RxxV, cpu_env, RxxV, RssV, RttV); \ + gen_helper_vacsh_val(RxxV, cpu_env, RxxV, RssV, RttV, \ + tcg_constant_tl(ctx->need_commit)); \ + } while (0) + +#define fGEN_TCG_S2_cabacdecbin(SHORTCODE) \ + do { \ + TCGv p0 = tcg_temp_new(); \ + gen_helper_cabacdecbin_pred(p0, RssV, RttV); \ + gen_helper_cabacdecbin_val(RddV, RssV, RttV); \ + gen_log_pred_write(ctx, 0, p0); \ } while (0) /* @@ -531,7 +617,6 @@ gen_helper_sfrecipa(tmp, cpu_env, RsV, RtV); \ tcg_gen_extrh_i64_i32(RdV, tmp); \ tcg_gen_extrl_i64_i32(PeV, tmp); \ - tcg_temp_free_i64(tmp); \ } while (0) /* @@ -547,7 +632,6 @@ gen_helper_sfinvsqrta(tmp, cpu_env, RsV); \ tcg_gen_extrh_i64_i32(RdV, tmp); \ tcg_gen_extrl_i64_i32(PeV, tmp); \ - tcg_temp_free_i64(tmp); \ } while (0) /* @@ -565,7 +649,6 @@ tcg_gen_add2_i64(RddV, carry, RddV, carry, RttV, zero); \ tcg_gen_extrl_i64_i32(PxV, carry); \ gen_8bitsof(PxV, PxV); \ - tcg_temp_free_i64(carry); \ } while (0) /* r5:4 = sub(r1:0, r3:2, p1):carry */ @@ -581,8 +664,6 @@ tcg_gen_add2_i64(RddV, carry, RddV, carry, not_RttV, zero); \ tcg_gen_extrl_i64_i32(PxV, carry); \ gen_8bitsof(PxV, PxV); \ - tcg_temp_free_i64(carry); \ - tcg_temp_free_i64(not_RttV); \ } while (0) /* @@ -607,21 +688,51 @@ tcg_gen_umin_tl(tmp, left, right); \ gen_set_byte_i64(i, RddV, tmp); \ } \ - tcg_temp_free(left); \ - tcg_temp_free(right); \ - tcg_temp_free(tmp); \ } while (0) #define fGEN_TCG_J2_call(SHORTCODE) \ gen_call(ctx, riV) +#define fGEN_TCG_J2_callr(SHORTCODE) \ + gen_callr(ctx, RsV) +#define fGEN_TCG_J2_callrh(SHORTCODE) \ + gen_callr(ctx, RsV) #define fGEN_TCG_J2_callt(SHORTCODE) \ gen_cond_call(ctx, PuV, TCG_COND_EQ, riV) #define fGEN_TCG_J2_callf(SHORTCODE) \ gen_cond_call(ctx, PuV, TCG_COND_NE, riV) +#define fGEN_TCG_J2_callrt(SHORTCODE) \ + gen_cond_callr(ctx, TCG_COND_EQ, PuV, RsV) +#define fGEN_TCG_J2_callrf(SHORTCODE) \ + gen_cond_callr(ctx, TCG_COND_NE, PuV, RsV) + +#define fGEN_TCG_J2_loop0r(SHORTCODE) \ + gen_loop0r(ctx, RsV, riV) +#define fGEN_TCG_J2_loop1r(SHORTCODE) \ + gen_loop1r(ctx, RsV, riV) +#define fGEN_TCG_J2_loop0i(SHORTCODE) \ + gen_loop0i(ctx, UiV, riV) +#define fGEN_TCG_J2_loop1i(SHORTCODE) \ + gen_loop1i(ctx, UiV, riV) +#define fGEN_TCG_J2_ploop1sr(SHORTCODE) \ + gen_ploopNsr(ctx, 1, RsV, riV) +#define fGEN_TCG_J2_ploop1si(SHORTCODE) \ + gen_ploopNsi(ctx, 1, UiV, riV) +#define fGEN_TCG_J2_ploop2sr(SHORTCODE) \ + gen_ploopNsr(ctx, 2, RsV, riV) +#define fGEN_TCG_J2_ploop2si(SHORTCODE) \ + gen_ploopNsi(ctx, 2, UiV, riV) +#define fGEN_TCG_J2_ploop3sr(SHORTCODE) \ + gen_ploopNsr(ctx, 3, RsV, riV) +#define fGEN_TCG_J2_ploop3si(SHORTCODE) \ + gen_ploopNsi(ctx, 3, UiV, riV) #define fGEN_TCG_J2_endloop0(SHORTCODE) \ gen_endloop0(ctx) +#define fGEN_TCG_J2_endloop1(SHORTCODE) \ + gen_endloop1(ctx) +#define fGEN_TCG_J2_endloop01(SHORTCODE) \ + gen_endloop01(ctx) /* * Compound compare and jump instructions @@ -800,10 +911,20 @@ #define fGEN_TCG_J4_tstbit0_fp1_jump_t(SHORTCODE) \ gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_NE, riV) +/* p0 = cmp.eq(r0, #7) */ +#define fGEN_TCG_SA1_cmpeqi(SHORTCODE) \ + do { \ + TCGv p0 = tcg_temp_new(); \ + gen_comparei(TCG_COND_EQ, p0, RsV, uiV); \ + gen_log_pred_write(ctx, 0, p0); \ + } while (0) + #define fGEN_TCG_J2_jump(SHORTCODE) \ gen_jump(ctx, riV) #define fGEN_TCG_J2_jumpr(SHORTCODE) \ gen_jumpr(ctx, RsV) +#define fGEN_TCG_J2_jumprh(SHORTCODE) \ + gen_jumpr(ctx, RsV) #define fGEN_TCG_J4_jumpseti(SHORTCODE) \ do { \ tcg_gen_movi_tl(RdV, UiV); \ @@ -815,14 +936,12 @@ TCGv LSB = tcg_temp_new(); \ COND; \ gen_cond_jump(ctx, TCG_COND_EQ, LSB, riV); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_cond_jumpf(COND) \ do { \ TCGv LSB = tcg_temp_new(); \ COND; \ gen_cond_jump(ctx, TCG_COND_NE, LSB, riV); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_J2_jumpt(SHORTCODE) \ @@ -863,14 +982,12 @@ TCGv LSB = tcg_temp_new(); \ COND; \ gen_cond_jumpr(ctx, RsV, TCG_COND_EQ, LSB); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_cond_jumprf(COND) \ do { \ TCGv LSB = tcg_temp_new(); \ COND; \ gen_cond_jumpr(ctx, RsV, TCG_COND_NE, LSB); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_J2_jumprt(SHORTCODE) \ @@ -1001,6 +1118,22 @@ gen_jump(ctx, riV); \ } while (0) +/* if (p0.new) r0 = #0 */ +#define fGEN_TCG_SA1_clrtnew(SHORTCODE) \ + do { \ + tcg_gen_movcond_tl(TCG_COND_EQ, RdV, \ + ctx->new_pred_value[0], tcg_constant_tl(0), \ + RdV, tcg_constant_tl(0)); \ + } while (0) + +/* if (!p0.new) r0 = #0 */ +#define fGEN_TCG_SA1_clrfnew(SHORTCODE) \ + do { \ + tcg_gen_movcond_tl(TCG_COND_NE, RdV, \ + ctx->new_pred_value[0], tcg_constant_tl(0), \ + RdV, tcg_constant_tl(0)); \ + } while (0) + #define fGEN_TCG_J2_pause(SHORTCODE) \ do { \ uiV = uiV; \ @@ -1009,11 +1142,66 @@ /* r0 = asr(r1, r2):sat */ #define fGEN_TCG_S2_asr_r_r_sat(SHORTCODE) \ - gen_asr_r_r_sat(RdV, RsV, RtV) + gen_asr_r_r_sat(ctx, RdV, RsV, RtV) /* r0 = asl(r1, r2):sat */ #define fGEN_TCG_S2_asl_r_r_sat(SHORTCODE) \ - gen_asl_r_r_sat(RdV, RsV, RtV) + gen_asl_r_r_sat(ctx, RdV, RsV, RtV) + +#define fGEN_TCG_SL2_jumpr31(SHORTCODE) \ + gen_jumpr(ctx, hex_gpr[HEX_REG_LR]) + +#define fGEN_TCG_SL2_jumpr31_t(SHORTCODE) \ + gen_cond_jumpr31(ctx, TCG_COND_EQ, hex_pred[0]) +#define fGEN_TCG_SL2_jumpr31_f(SHORTCODE) \ + gen_cond_jumpr31(ctx, TCG_COND_NE, hex_pred[0]) + +#define fGEN_TCG_SL2_jumpr31_tnew(SHORTCODE) \ + gen_cond_jumpr31(ctx, TCG_COND_EQ, ctx->new_pred_value[0]) +#define fGEN_TCG_SL2_jumpr31_fnew(SHORTCODE) \ + gen_cond_jumpr31(ctx, TCG_COND_NE, ctx->new_pred_value[0]) + +/* Count trailing zeros/ones */ +#define fGEN_TCG_S2_ct0(SHORTCODE) \ + do { \ + tcg_gen_ctzi_tl(RdV, RsV, 32); \ + } while (0) +#define fGEN_TCG_S2_ct1(SHORTCODE) \ + do { \ + tcg_gen_not_tl(RdV, RsV); \ + tcg_gen_ctzi_tl(RdV, RdV, 32); \ + } while (0) +#define fGEN_TCG_S2_ct0p(SHORTCODE) \ + do { \ + TCGv_i64 tmp = tcg_temp_new_i64(); \ + tcg_gen_ctzi_i64(tmp, RssV, 64); \ + tcg_gen_extrl_i64_i32(RdV, tmp); \ + } while (0) +#define fGEN_TCG_S2_ct1p(SHORTCODE) \ + do { \ + TCGv_i64 tmp = tcg_temp_new_i64(); \ + tcg_gen_not_i64(tmp, RssV); \ + tcg_gen_ctzi_i64(tmp, tmp, 64); \ + tcg_gen_extrl_i64_i32(RdV, tmp); \ + } while (0) + +#define fGEN_TCG_S2_insert(SHORTCODE) \ + do { \ + int width = uiV; \ + int offset = UiV; \ + if (width != 0) { \ + if (offset + width > 32) { \ + width = 32 - offset; \ + } \ + tcg_gen_deposit_tl(RxV, RxV, RsV, offset, width); \ + } \ + } while (0) +#define fGEN_TCG_S2_insert_rp(SHORTCODE) \ + gen_insert_rp(ctx, RxV, RsV, RttV) +#define fGEN_TCG_S2_asr_r_svw_trun(SHORTCODE) \ + gen_asr_r_svw_trun(ctx, RdV, RssV, RtV) +#define fGEN_TCG_A2_swiz(SHORTCODE) \ + tcg_gen_bswap_tl(RdV, RsV) /* Floating point */ #define fGEN_TCG_F2_conv_sf2df(SHORTCODE) \ @@ -1144,6 +1332,35 @@ do { \ RsV = RsV; \ } while (0) +#define fGEN_TCG_Y2_isync(SHORTCODE) \ + do { } while (0) +#define fGEN_TCG_Y2_barrier(SHORTCODE) \ + do { } while (0) +#define fGEN_TCG_Y2_syncht(SHORTCODE) \ + do { } while (0) +#define fGEN_TCG_Y2_dcfetchbo(SHORTCODE) \ + do { \ + RsV = RsV; \ + uiV = uiV; \ + } while (0) + +#define fGEN_TCG_L2_loadw_aq(SHORTCODE) SHORTCODE +#define fGEN_TCG_L4_loadd_aq(SHORTCODE) SHORTCODE + +/* Nothing to do for these in qemu, need to suppress compiler warnings */ +#define fGEN_TCG_R6_release_at_vi(SHORTCODE) \ + do { \ + RsV = RsV; \ + } while (0) +#define fGEN_TCG_R6_release_st_vi(SHORTCODE) \ + do { \ + RsV = RsV; \ + } while (0) + +#define fGEN_TCG_S2_storew_rl_at_vi(SHORTCODE) SHORTCODE +#define fGEN_TCG_S4_stored_rl_at_vi(SHORTCODE) SHORTCODE +#define fGEN_TCG_S2_storew_rl_st_vi(SHORTCODE) SHORTCODE +#define fGEN_TCG_S4_stored_rl_st_vi(SHORTCODE) SHORTCODE #define fGEN_TCG_J2_trap0(SHORTCODE) \ do { \ diff --git a/target/hexagon/gen_tcg_func_table.py b/target/hexagon/gen_tcg_func_table.py index 4809d3273e..f998ef0992 100755 --- a/target/hexagon/gen_tcg_func_table.py +++ b/target/hexagon/gen_tcg_func_table.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -22,6 +22,7 @@ import re import string import hex_common + def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) @@ -29,30 +30,31 @@ def main(): tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[3], 'w') as f: + with open(sys.argv[3], "w") as f: f.write("#ifndef HEXAGON_FUNC_TABLE_H\n") f.write("#define HEXAGON_FUNC_TABLE_H\n\n") f.write("const SemanticInsn opcode_genptr[XX_LAST_OPCODE] = {\n") for tag in hex_common.tags: ## Skip the priv instructions - if ( "A_PRIV" in hex_common.attribdict[tag] ) : + if "A_PRIV" in hex_common.attribdict[tag]: continue ## Skip the guest instructions - if ( "A_GUEST" in hex_common.attribdict[tag] ) : + if "A_GUEST" in hex_common.attribdict[tag]: continue ## Skip the diag instructions - if ( tag == "Y6_diag" ) : + if tag == "Y6_diag": continue - if ( tag == "Y6_diag0" ) : + if tag == "Y6_diag0": continue - if ( tag == "Y6_diag1" ) : + if tag == "Y6_diag1": continue - f.write(" [%s] = generate_%s,\n" % (tag, tag)) + f.write(f" [{tag}] = generate_{tag},\n") f.write("};\n\n") f.write("#endif /* HEXAGON_FUNC_TABLE_H */\n") + if __name__ == "__main__": main() diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py index 7e8ba17ca2..fe29d83d4d 100755 --- a/target/hexagon/gen_tcg_funcs.py +++ b/target/hexagon/gen_tcg_funcs.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -22,568 +22,469 @@ import re import string import hex_common + ## ## Helpers for gen_tcg_func ## def gen_decl_ea_tcg(f, tag): - if ('A_CONDEXEC' in hex_common.attribdict[tag] or - 'A_LOAD' in hex_common.attribdict[tag]): - f.write(" TCGv EA = tcg_temp_local_new();\n") - else: - f.write(" TCGv EA = tcg_temp_new();\n") + f.write(" TCGv EA G_GNUC_UNUSED = tcg_temp_new();\n") -def gen_free_ea_tcg(f): - f.write(" tcg_temp_free(EA);\n") def genptr_decl_pair_writable(f, tag, regtype, regid, regno): - regN="%s%sN" % (regtype,regid) - f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \ - (regtype, regid)) - if (regtype == "C"): - f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \ - (regN, regno)) + regN = f"{regtype}{regid}N" + if regtype == "R": + f.write(f" const int {regN} = insn->regno[{regno}];\n") + elif regtype == "C": + f.write(f" const int {regN} = insn->regno[{regno}] + HEX_REG_SA0;\n") else: - f.write(" const int %s = insn->regno[%d];\n" % (regN, regno)) - if ('A_CONDEXEC' in hex_common.attribdict[tag]): - f.write(" if (!is_preloaded(ctx, %s)) {\n" % regN) - f.write(" tcg_gen_mov_tl(hex_new_value[%s], hex_gpr[%s]);\n" % \ - (regN, regN)) - f.write(" }\n") - f.write(" if (!is_preloaded(ctx, %s + 1)) {\n" % regN) - f.write(" tcg_gen_mov_tl(hex_new_value[%s + 1], hex_gpr[%s + 1]);\n" % \ - (regN, regN)) - f.write(" }\n") + hex_common.bad_register(regtype, regid) + f.write(f" TCGv_i64 {regtype}{regid}V = " f"get_result_gpr_pair(ctx, {regN});\n") + def genptr_decl_writable(f, tag, regtype, regid, regno): - regN="%s%sN" % (regtype,regid) - f.write(" TCGv %s%sV = tcg_temp_local_new();\n" % \ - (regtype, regid)) - if (regtype == "C"): - f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \ - (regN, regno)) + regN = f"{regtype}{regid}N" + if regtype == "R": + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" TCGv {regtype}{regid}V = get_result_gpr(ctx, {regN});\n") + elif regtype == "C": + f.write(f" const int {regN} = insn->regno[{regno}] + HEX_REG_SA0;\n") + f.write(f" TCGv {regtype}{regid}V = get_result_gpr(ctx, {regN});\n") + elif regtype == "P": + f.write(f" const int {regN} = insn->regno[{regno}];\n") + f.write(f" TCGv {regtype}{regid}V = tcg_temp_new();\n") else: - f.write(" const int %s = insn->regno[%d];\n" % (regN, regno)) - if ('A_CONDEXEC' in hex_common.attribdict[tag]): - f.write(" if (!is_preloaded(ctx, %s)) {\n" % regN) - f.write(" tcg_gen_mov_tl(hex_new_value[%s], hex_gpr[%s]);\n" % \ - (regN, regN)) - f.write(" }\n") + hex_common.bad_register(regtype, regid) + def genptr_decl(f, tag, regtype, regid, regno): - regN="%s%sN" % (regtype,regid) - if (regtype == "R"): - if (regid in {"ss", "tt"}): - f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \ - (regtype, regid)) - f.write(" const int %s = insn->regno[%d];\n" % \ - (regN, regno)) - elif (regid in {"dd", "ee", "xx", "yy"}): + regN = f"{regtype}{regid}N" + if regtype == "R": + if regid in {"ss", "tt"}: + f.write(f" TCGv_i64 {regtype}{regid}V = tcg_temp_new_i64();\n") + f.write(f" const int {regN} = insn->regno[{regno}];\n") + elif regid in {"dd", "ee", "xx", "yy"}: genptr_decl_pair_writable(f, tag, regtype, regid, regno) - elif (regid in {"s", "t", "u", "v"}): - f.write(" TCGv %s%sV = hex_gpr[insn->regno[%d]];\n" % \ - (regtype, regid, regno)) - elif (regid in {"d", "e", "x", "y"}): + elif regid in {"s", "t", "u", "v"}: + f.write( + f" TCGv {regtype}{regid}V = " f"hex_gpr[insn->regno[{regno}]];\n" + ) + elif regid in {"d", "e", "x", "y"}: genptr_decl_writable(f, tag, regtype, regid, regno) else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "P"): - if (regid in {"s", "t", "u", "v"}): - f.write(" TCGv %s%sV = hex_pred[insn->regno[%d]];\n" % \ - (regtype, regid, regno)) - elif (regid in {"d", "e", "x"}): + hex_common.bad_register(regtype, regid) + elif regtype == "P": + if regid in {"s", "t", "u", "v"}: + f.write( + f" TCGv {regtype}{regid}V = " f"hex_pred[insn->regno[{regno}]];\n" + ) + elif regid in {"d", "e", "x"}: genptr_decl_writable(f, tag, regtype, regid, regno) else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "C"): - if (regid == "ss"): - f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \ - (regtype, regid)) - f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \ - (regN, regno)) - elif (regid == "dd"): + hex_common.bad_register(regtype, regid) + elif regtype == "C": + if regid == "ss": + f.write(f" TCGv_i64 {regtype}{regid}V = " f"tcg_temp_new_i64();\n") + f.write(f" const int {regN} = insn->regno[{regno}] + " "HEX_REG_SA0;\n") + elif regid == "dd": genptr_decl_pair_writable(f, tag, regtype, regid, regno) - elif (regid == "s"): - f.write(" TCGv %s%sV = tcg_temp_local_new();\n" % \ - (regtype, regid)) - f.write(" const int %s%sN = insn->regno[%d] + HEX_REG_SA0;\n" % \ - (regtype, regid, regno)) - elif (regid == "d"): + elif regid == "s": + f.write(f" TCGv {regtype}{regid}V = tcg_temp_new();\n") + f.write( + f" const int {regtype}{regid}N = insn->regno[{regno}] + " + "HEX_REG_SA0;\n" + ) + elif regid == "d": genptr_decl_writable(f, tag, regtype, regid, regno) else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "M"): - if (regid == "u"): - f.write(" const int %s%sN = insn->regno[%d];\n"% \ - (regtype, regid, regno)) - f.write(" TCGv %s%sV = hex_gpr[%s%sN + HEX_REG_M0];\n" % \ - (regtype, regid, regtype, regid)) + hex_common.bad_register(regtype, regid) + elif regtype == "M": + if regid == "u": + f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") + f.write( + f" TCGv {regtype}{regid}V = hex_gpr[{regtype}{regid}N + " + "HEX_REG_M0];\n" + ) else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "V"): - if (regid in {"dd"}): - f.write(" const int %s%sN = insn->regno[%d];\n" %\ - (regtype, regid, regno)) - f.write(" const intptr_t %s%sV_off =\n" %\ - (regtype, regid)) - if (hex_common.is_tmp_result(tag)): - f.write(" ctx_tmp_vreg_off(ctx, %s%sN, 2, true);\n" % \ - (regtype, regid)) + hex_common.bad_register(regtype, regid) + elif regtype == "V": + if regid in {"dd"}: + f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") + f.write(f" const intptr_t {regtype}{regid}V_off =\n") + if hex_common.is_tmp_result(tag): + f.write( + f" ctx_tmp_vreg_off(ctx, {regtype}{regid}N, 2, " "true);\n" + ) else: - f.write(" ctx_future_vreg_off(ctx, %s%sN," % \ - (regtype, regid)) + f.write(f" ctx_future_vreg_off(ctx, {regtype}{regid}N,") f.write(" 2, true);\n") - if (not hex_common.skip_qemu_helper(tag)): - f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ - (regtype, regid)) - f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ - (regtype, regid, regtype, regid)) - elif (regid in {"uu", "vv", "xx"}): - f.write(" const int %s%sN = insn->regno[%d];\n" % \ - (regtype, regid, regno)) - f.write(" const intptr_t %s%sV_off =\n" % \ - (regtype, regid)) - f.write(" offsetof(CPUHexagonState, %s%sV);\n" % \ - (regtype, regid)) - if (not hex_common.skip_qemu_helper(tag)): - f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ - (regtype, regid)) - f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ - (regtype, regid, regtype, regid)) - elif (regid in {"s", "u", "v", "w"}): - f.write(" const int %s%sN = insn->regno[%d];\n" % \ - (regtype, regid, regno)) - f.write(" const intptr_t %s%sV_off =\n" % \ - (regtype, regid)) - f.write(" vreg_src_off(ctx, %s%sN);\n" % \ - (regtype, regid)) - if (not hex_common.skip_qemu_helper(tag)): - f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ - (regtype, regid)) - elif (regid in {"d", "x", "y"}): - f.write(" const int %s%sN = insn->regno[%d];\n" % \ - (regtype, regid, regno)) - f.write(" const intptr_t %s%sV_off =\n" % \ - (regtype, regid)) - if (regid == "y"): + if not hex_common.skip_qemu_helper(tag): + f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") + f.write( + f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, " + f"{regtype}{regid}V_off);\n" + ) + elif regid in {"uu", "vv", "xx"}: + f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") + f.write(f" const intptr_t {regtype}{regid}V_off =\n") + f.write(f" offsetof(CPUHexagonState, {regtype}{regid}V);\n") + if not hex_common.skip_qemu_helper(tag): + f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") + f.write( + f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, " + f"{regtype}{regid}V_off);\n" + ) + elif regid in {"s", "u", "v", "w"}: + f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") + f.write(f" const intptr_t {regtype}{regid}V_off =\n") + f.write(f" vreg_src_off(ctx, {regtype}{regid}N);\n") + if not hex_common.skip_qemu_helper(tag): + f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") + elif regid in {"d", "x", "y"}: + f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") + f.write(f" const intptr_t {regtype}{regid}V_off =\n") + if regid == "y": f.write(" offsetof(CPUHexagonState, vtmp);\n") - elif (hex_common.is_tmp_result(tag)): - f.write(" ctx_tmp_vreg_off(ctx, %s%sN, 1, true);\n" % \ - (regtype, regid)) + elif hex_common.is_tmp_result(tag): + f.write( + f" ctx_tmp_vreg_off(ctx, {regtype}{regid}N, 1, " "true);\n" + ) else: - f.write(" ctx_future_vreg_off(ctx, %s%sN," % \ - (regtype, regid)) - f.write(" 1, true);\n"); - if 'A_CONDEXEC' in hex_common.attribdict[tag]: - f.write(" if (!is_vreg_preloaded(ctx, %s)) {\n" % (regN)) - f.write(" intptr_t src_off =") - f.write(" offsetof(CPUHexagonState, VRegs[%s%sN]);\n"% \ - (regtype, regid)) - f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \ - (regtype, regid)) - f.write(" src_off,\n") - f.write(" sizeof(MMVector),\n") - f.write(" sizeof(MMVector));\n") - f.write(" }\n") + f.write(f" ctx_future_vreg_off(ctx, {regtype}{regid}N,") + f.write(" 1, true);\n") - if (not hex_common.skip_qemu_helper(tag)): - f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ - (regtype, regid)) - f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ - (regtype, regid, regtype, regid)) + if not hex_common.skip_qemu_helper(tag): + f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") + f.write( + f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, " + f"{regtype}{regid}V_off);\n" + ) else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "Q"): - if (regid in {"d", "e", "x"}): - f.write(" const int %s%sN = insn->regno[%d];\n" % \ - (regtype, regid, regno)) - f.write(" const intptr_t %s%sV_off =\n" % \ - (regtype, regid)) - f.write(" offsetof(CPUHexagonState,\n") - f.write(" future_QRegs[%s%sN]);\n" % \ - (regtype, regid)) - if (not hex_common.skip_qemu_helper(tag)): - f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ - (regtype, regid)) - f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ - (regtype, regid, regtype, regid)) - elif (regid in {"s", "t", "u", "v"}): - f.write(" const int %s%sN = insn->regno[%d];\n" % \ - (regtype, regid, regno)) - f.write(" const intptr_t %s%sV_off =\n" %\ - (regtype, regid)) - f.write(" offsetof(CPUHexagonState, QRegs[%s%sN]);\n" % \ - (regtype, regid)) - if (not hex_common.skip_qemu_helper(tag)): - f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ - (regtype, regid)) + hex_common.bad_register(regtype, regid) + elif regtype == "Q": + if regid in {"d", "e", "x"}: + f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") + f.write(f" const intptr_t {regtype}{regid}V_off =\n") + f.write(f" get_result_qreg(ctx, {regtype}{regid}N);\n") + if not hex_common.skip_qemu_helper(tag): + f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") + f.write( + f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, " + f"{regtype}{regid}V_off);\n" + ) + elif regid in {"s", "t", "u", "v"}: + f.write(f" const int {regtype}{regid}N = " f"insn->regno[{regno}];\n") + f.write(f" const intptr_t {regtype}{regid}V_off =\n") + f.write( + f" offsetof(CPUHexagonState, " f"QRegs[{regtype}{regid}N]);\n" + ) + if not hex_common.skip_qemu_helper(tag): + f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n") else: - print("Bad register parse: ", regtype, regid) + hex_common.bad_register(regtype, regid) else: - print("Bad register parse: ", regtype, regid) + hex_common.bad_register(regtype, regid) + def genptr_decl_new(f, tag, regtype, regid, regno): - if (regtype == "N"): - if (regid in {"s", "t"}): - f.write(" TCGv %s%sN = hex_new_value[insn->regno[%d]];\n" % \ - (regtype, regid, regno)) + if regtype == "N": + if regid in {"s", "t"}: + f.write( + f" TCGv {regtype}{regid}N = " + f"get_result_gpr(ctx, insn->regno[{regno}]);\n" + ) else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "P"): - if (regid in {"t", "u", "v"}): - f.write(" TCGv %s%sN = hex_new_pred_value[insn->regno[%d]];\n" % \ - (regtype, regid, regno)) + hex_common.bad_register(regtype, regid) + elif regtype == "P": + if regid in {"t", "u", "v"}: + f.write( + f" TCGv {regtype}{regid}N = " + f"ctx->new_pred_value[insn->regno[{regno}]];\n" + ) else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "O"): - if (regid == "s"): - f.write(" const intptr_t %s%sN_num = insn->regno[%d];\n" % \ - (regtype, regid, regno)) - if (hex_common.skip_qemu_helper(tag)): - f.write(" const intptr_t %s%sN_off =\n" % \ - (regtype, regid)) - f.write(" ctx_future_vreg_off(ctx, %s%sN_num," % \ - (regtype, regid)) + hex_common.bad_register(regtype, regid) + elif regtype == "O": + if regid == "s": + f.write( + f" const intptr_t {regtype}{regid}N_num = " + f"insn->regno[{regno}];\n" + ) + if hex_common.skip_qemu_helper(tag): + f.write(f" const intptr_t {regtype}{regid}N_off =\n") + f.write(" ctx_future_vreg_off(ctx, " f"{regtype}{regid}N_num,") f.write(" 1, true);\n") else: - f.write(" TCGv %s%sN = tcg_constant_tl(%s%sN_num);\n" % \ - (regtype, regid, regtype, regid)) + f.write( + f" TCGv {regtype}{regid}N = " + f"tcg_constant_tl({regtype}{regid}N_num);\n" + ) else: - print("Bad register parse: ", regtype, regid) + hex_common.bad_register(regtype, regid) else: - print("Bad register parse: ", regtype, regid) + hex_common.bad_register(regtype, regid) -def genptr_decl_opn(f, tag, regtype, regid, toss, numregs, i): - if (hex_common.is_pair(regid)): + +def genptr_decl_opn(f, tag, regtype, regid, i): + if hex_common.is_pair(regid): genptr_decl(f, tag, regtype, regid, i) - elif (hex_common.is_single(regid)): + elif hex_common.is_single(regid): if hex_common.is_old_val(regtype, regid, tag): - genptr_decl(f,tag, regtype, regid, i) + genptr_decl(f, tag, regtype, regid, i) elif hex_common.is_new_val(regtype, regid, tag): genptr_decl_new(f, tag, regtype, regid, i) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) -def genptr_decl_imm(f,immlett): - if (immlett.isupper()): + +def genptr_decl_imm(f, immlett): + if immlett.isupper(): i = 1 else: i = 0 - f.write(" int %s = insn->immed[%d];\n" % \ - (hex_common.imm_name(immlett), i)) + f.write(f" int {hex_common.imm_name(immlett)} = insn->immed[{i}];\n") -def genptr_free(f, tag, regtype, regid, regno): - if (regtype == "R"): - if (regid in {"dd", "ss", "tt", "xx", "yy"}): - f.write(" tcg_temp_free_i64(%s%sV);\n" % (regtype, regid)) - elif (regid in {"d", "e", "x", "y"}): - f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid)) - elif (regid not in {"s", "t", "u", "v"}): - print("Bad register parse: ",regtype,regid) - elif (regtype == "P"): - if (regid in {"d", "e", "x"}): - f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid)) - elif (regid not in {"s", "t", "u", "v"}): - print("Bad register parse: ",regtype,regid) - elif (regtype == "C"): - if (regid in {"dd", "ss"}): - f.write(" tcg_temp_free_i64(%s%sV);\n" % (regtype, regid)) - elif (regid in {"d", "s"}): - f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid)) - else: - print("Bad register parse: ",regtype,regid) - elif (regtype == "M"): - if (regid != "u"): - print("Bad register parse: ", regtype, regid) - elif (regtype == "V"): - if (regid in {"dd", "uu", "vv", "xx", \ - "d", "s", "u", "v", "w", "x", "y"}): - if (not hex_common.skip_qemu_helper(tag)): - f.write(" tcg_temp_free_ptr(%s%sV);\n" % \ - (regtype, regid)) - else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "Q"): - if (regid in {"d", "e", "s", "t", "u", "v", "x"}): - if (not hex_common.skip_qemu_helper(tag)): - f.write(" tcg_temp_free_ptr(%s%sV);\n" % \ - (regtype, regid)) - else: - print("Bad register parse: ", regtype, regid) - else: - print("Bad register parse: ", regtype, regid) - -def genptr_free_new(f, tag, regtype, regid, regno): - if (regtype == "N"): - if (regid not in {"s", "t"}): - print("Bad register parse: ", regtype, regid) - elif (regtype == "P"): - if (regid not in {"t", "u", "v"}): - print("Bad register parse: ", regtype, regid) - elif (regtype == "O"): - if (regid != "s"): - print("Bad register parse: ", regtype, regid) - else: - print("Bad register parse: ", regtype, regid) - -def genptr_free_opn(f,regtype,regid,i,tag): - if (hex_common.is_pair(regid)): - genptr_free(f, tag, regtype, regid, i) - elif (hex_common.is_single(regid)): - if hex_common.is_old_val(regtype, regid, tag): - genptr_free(f, tag, regtype, regid, i) - elif hex_common.is_new_val(regtype, regid, tag): - genptr_free_new(f, tag, regtype, regid, i) - else: - print("Bad register parse: ",regtype,regid,toss,numregs) - else: - print("Bad register parse: ",regtype,regid,toss,numregs) def genptr_src_read(f, tag, regtype, regid): - if (regtype == "R"): - if (regid in {"ss", "tt", "xx", "yy"}): - f.write(" tcg_gen_concat_i32_i64(%s%sV, hex_gpr[%s%sN],\n" % \ - (regtype, regid, regtype, regid)) - f.write(" hex_gpr[%s%sN + 1]);\n" % \ - (regtype, regid)) - elif (regid in {"x", "y"}): - f.write(" tcg_gen_mov_tl(%s%sV, hex_gpr[%s%sN]);\n" % \ - (regtype,regid,regtype,regid)) - elif (regid not in {"s", "t", "u", "v"}): - print("Bad register parse: ", regtype, regid) - elif (regtype == "P"): - if (regid == "x"): - f.write(" tcg_gen_mov_tl(%s%sV, hex_pred[%s%sN]);\n" % \ - (regtype, regid, regtype, regid)) - elif (regid not in {"s", "t", "u", "v"}): - print("Bad register parse: ", regtype, regid) - elif (regtype == "C"): - if (regid == "ss"): - f.write(" gen_read_ctrl_reg_pair(ctx, %s%sN, %s%sV);\n" % \ - (regtype, regid, regtype, regid)) - elif (regid == "s"): - f.write(" gen_read_ctrl_reg(ctx, %s%sN, %s%sV);\n" % \ - (regtype, regid, regtype, regid)) + if regtype == "R": + if regid in {"ss", "tt", "xx", "yy"}: + f.write( + f" tcg_gen_concat_i32_i64({regtype}{regid}V, " + f"hex_gpr[{regtype}{regid}N],\n" + ) + f.write( + f" hex_gpr[{regtype}" + f"{regid}N + 1]);\n" + ) + elif regid in {"x", "y"}: + ## For read/write registers, we need to get the original value into + ## the result TCGv. For conditional instructions, this is done in + ## gen_start_packet. For unconditional instructions, we do it here. + if "A_CONDEXEC" not in hex_common.attribdict[tag]: + f.write( + f" tcg_gen_mov_tl({regtype}{regid}V, " + f"hex_gpr[{regtype}{regid}N]);\n" + ) + elif regid not in {"s", "t", "u", "v"}: + hex_common.bad_register(regtype, regid) + elif regtype == "P": + if regid == "x": + f.write( + f" tcg_gen_mov_tl({regtype}{regid}V, " + f"hex_pred[{regtype}{regid}N]);\n" + ) + elif regid not in {"s", "t", "u", "v"}: + hex_common.bad_register(regtype, regid) + elif regtype == "C": + if regid == "ss": + f.write( + f" gen_read_ctrl_reg_pair(ctx, {regtype}{regid}N, " + f"{regtype}{regid}V);\n" + ) + elif regid == "s": + f.write( + f" gen_read_ctrl_reg(ctx, {regtype}{regid}N, " + f"{regtype}{regid}V);\n" + ) else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "M"): - if (regid != "u"): - print("Bad register parse: ", regtype, regid) - elif (regtype == "V"): - if (regid in {"uu", "vv", "xx"}): - f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \ - (regtype, regid)) - f.write(" vreg_src_off(ctx, %s%sN),\n" % \ - (regtype, regid)) + hex_common.bad_register(regtype, regid) + elif regtype == "M": + if regid != "u": + hex_common.bad_register(regtype, regid) + elif regtype == "V": + if regid in {"uu", "vv", "xx"}: + f.write(f" tcg_gen_gvec_mov(MO_64, {regtype}{regid}V_off,\n") + f.write(f" vreg_src_off(ctx, {regtype}{regid}N),\n") f.write(" sizeof(MMVector), sizeof(MMVector));\n") f.write(" tcg_gen_gvec_mov(MO_64,\n") - f.write(" %s%sV_off + sizeof(MMVector),\n" % \ - (regtype, regid)) - f.write(" vreg_src_off(ctx, %s%sN ^ 1),\n" % \ - (regtype, regid)) + f.write(f" {regtype}{regid}V_off + sizeof(MMVector),\n") + f.write(f" vreg_src_off(ctx, {regtype}{regid}N ^ 1),\n") f.write(" sizeof(MMVector), sizeof(MMVector));\n") - elif (regid in {"s", "u", "v", "w"}): - if (not hex_common.skip_qemu_helper(tag)): - f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ - (regtype, regid, regtype, regid)) - elif (regid in {"x", "y"}): - f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \ - (regtype, regid)) - f.write(" vreg_src_off(ctx, %s%sN),\n" % \ - (regtype, regid)) + elif regid in {"s", "u", "v", "w"}: + if not hex_common.skip_qemu_helper(tag): + f.write( + f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, " + f"{regtype}{regid}V_off);\n" + ) + elif regid in {"x", "y"}: + f.write(f" tcg_gen_gvec_mov(MO_64, {regtype}{regid}V_off,\n") + f.write(f" vreg_src_off(ctx, {regtype}{regid}N),\n") f.write(" sizeof(MMVector), sizeof(MMVector));\n") else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "Q"): - if (regid in {"s", "t", "u", "v"}): - if (not hex_common.skip_qemu_helper(tag)): - f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ - (regtype, regid, regtype, regid)) - elif (regid in {"x"}): - f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \ - (regtype, regid)) - f.write(" offsetof(CPUHexagonState, QRegs[%s%sN]),\n" % \ - (regtype, regid)) + hex_common.bad_register(regtype, regid) + elif regtype == "Q": + if regid in {"s", "t", "u", "v"}: + if not hex_common.skip_qemu_helper(tag): + f.write( + f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, " + f"{regtype}{regid}V_off);\n" + ) + elif regid in {"x"}: + f.write(f" tcg_gen_gvec_mov(MO_64, {regtype}{regid}V_off,\n") + f.write( + f" offsetof(CPUHexagonState, " f"QRegs[{regtype}{regid}N]),\n" + ) f.write(" sizeof(MMQReg), sizeof(MMQReg));\n") else: - print("Bad register parse: ", regtype, regid) + hex_common.bad_register(regtype, regid) else: - print("Bad register parse: ", regtype, regid) + hex_common.bad_register(regtype, regid) -def genptr_src_read_new(f,regtype,regid): - if (regtype == "N"): - if (regid not in {"s", "t"}): - print("Bad register parse: ", regtype, regid) - elif (regtype == "P"): - if (regid not in {"t", "u", "v"}): - print("Bad register parse: ", regtype, regid) - elif (regtype == "O"): - if (regid != "s"): - print("Bad register parse: ", regtype, regid) + +def genptr_src_read_new(f, regtype, regid): + if regtype == "N": + if regid not in {"s", "t"}: + hex_common.bad_register(regtype, regid) + elif regtype == "P": + if regid not in {"t", "u", "v"}: + hex_common.bad_register(regtype, regid) + elif regtype == "O": + if regid != "s": + hex_common.bad_register(regtype, regid) else: - print("Bad register parse: ", regtype, regid) + hex_common.bad_register(regtype, regid) -def genptr_src_read_opn(f,regtype,regid,tag): - if (hex_common.is_pair(regid)): + +def genptr_src_read_opn(f, regtype, regid, tag): + if hex_common.is_pair(regid): genptr_src_read(f, tag, regtype, regid) - elif (hex_common.is_single(regid)): + elif hex_common.is_single(regid): if hex_common.is_old_val(regtype, regid, tag): genptr_src_read(f, tag, regtype, regid) elif hex_common.is_new_val(regtype, regid, tag): - genptr_src_read_new(f,regtype,regid) + genptr_src_read_new(f, regtype, regid) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) -def gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i): - if (i > 0): f.write(", ") - if (hex_common.is_pair(regid)): - f.write("%s%sV" % (regtype,regid)) - elif (hex_common.is_single(regid)): + +def gen_helper_call_opn(f, tag, regtype, regid, i): + if i > 0: + f.write(", ") + if hex_common.is_pair(regid): + f.write(f"{regtype}{regid}V") + elif hex_common.is_single(regid): if hex_common.is_old_val(regtype, regid, tag): - f.write("%s%sV" % (regtype,regid)) + f.write(f"{regtype}{regid}V") elif hex_common.is_new_val(regtype, regid, tag): - f.write("%s%sN" % (regtype,regid)) + f.write(f"{regtype}{regid}N") else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) -def gen_helper_decl_imm(f,immlett): - f.write(" TCGv tcgv_%s = tcg_constant_tl(%s);\n" % \ - (hex_common.imm_name(immlett), hex_common.imm_name(immlett))) -def gen_helper_call_imm(f,immlett): - f.write(", tcgv_%s" % hex_common.imm_name(immlett)) +def gen_helper_decl_imm(f, immlett): + f.write( + f" TCGv tcgv_{hex_common.imm_name(immlett)} = " + f"tcg_constant_tl({hex_common.imm_name(immlett)});\n" + ) + + +def gen_helper_call_imm(f, immlett): + f.write(f", tcgv_{hex_common.imm_name(immlett)}") + def genptr_dst_write_pair(f, tag, regtype, regid): - if ('A_CONDEXEC' in hex_common.attribdict[tag]): - f.write(" gen_log_predicated_reg_write_pair(%s%sN, %s%sV, insn->slot);\n" % \ - (regtype, regid, regtype, regid)) - else: - f.write(" gen_log_reg_write_pair(%s%sN, %s%sV);\n" % \ - (regtype, regid, regtype, regid)) - f.write(" ctx_log_reg_write_pair(ctx, %s%sN);\n" % \ - (regtype, regid)) + f.write(f" gen_log_reg_write_pair(ctx, {regtype}{regid}N, " + f"{regtype}{regid}V);\n") + def genptr_dst_write(f, tag, regtype, regid): - if (regtype == "R"): - if (regid in {"dd", "xx", "yy"}): + if regtype == "R": + if regid in {"dd", "xx", "yy"}: genptr_dst_write_pair(f, tag, regtype, regid) - elif (regid in {"d", "e", "x", "y"}): - if ('A_CONDEXEC' in hex_common.attribdict[tag]): - f.write(" gen_log_predicated_reg_write(%s%sN, %s%sV,\n" % \ - (regtype, regid, regtype, regid)) - f.write(" insn->slot);\n") - else: - f.write(" gen_log_reg_write(%s%sN, %s%sV);\n" % \ - (regtype, regid, regtype, regid)) - f.write(" ctx_log_reg_write(ctx, %s%sN);\n" % \ - (regtype, regid)) + elif regid in {"d", "e", "x", "y"}: + f.write( + f" gen_log_reg_write(ctx, {regtype}{regid}N, " + f"{regtype}{regid}V);\n" + ) else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "P"): - if (regid in {"d", "e", "x"}): - f.write(" gen_log_pred_write(ctx, %s%sN, %s%sV);\n" % \ - (regtype, regid, regtype, regid)) - f.write(" ctx_log_pred_write(ctx, %s%sN);\n" % \ - (regtype, regid)) + hex_common.bad_register(regtype, regid) + elif regtype == "P": + if regid in {"d", "e", "x"}: + f.write( + f" gen_log_pred_write(ctx, {regtype}{regid}N, " + f"{regtype}{regid}V);\n" + ) else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "C"): - if (regid == "dd"): - f.write(" gen_write_ctrl_reg_pair(ctx, %s%sN, %s%sV);\n" % \ - (regtype, regid, regtype, regid)) - elif (regid == "d"): - f.write(" gen_write_ctrl_reg(ctx, %s%sN, %s%sV);\n" % \ - (regtype, regid, regtype, regid)) + hex_common.bad_register(regtype, regid) + elif regtype == "C": + if regid == "dd": + f.write( + f" gen_write_ctrl_reg_pair(ctx, {regtype}{regid}N, " + f"{regtype}{regid}V);\n" + ) + elif regid == "d": + f.write( + f" gen_write_ctrl_reg(ctx, {regtype}{regid}N, " + f"{regtype}{regid}V);\n" + ) else: - print("Bad register parse: ", regtype, regid) + hex_common.bad_register(regtype, regid) else: - print("Bad register parse: ", regtype, regid) + hex_common.bad_register(regtype, regid) + def genptr_dst_write_ext(f, tag, regtype, regid, newv="EXT_DFL"): - if (regtype == "V"): - if (regid in {"dd", "xx", "yy"}): - if ('A_CONDEXEC' in hex_common.attribdict[tag]): - is_predicated = "true" - else: - is_predicated = "false" - f.write(" gen_log_vreg_write_pair(ctx, %s%sV_off, %s%sN, " % \ - (regtype, regid, regtype, regid)) - f.write("%s, insn->slot, %s);\n" % \ - (newv, is_predicated)) - f.write(" ctx_log_vreg_write_pair(ctx, %s%sN, %s,\n" % \ - (regtype, regid, newv)) - f.write(" %s);\n" % (is_predicated)) - elif (regid in {"d", "x", "y"}): - if ('A_CONDEXEC' in hex_common.attribdict[tag]): - is_predicated = "true" - else: - is_predicated = "false" - f.write(" gen_log_vreg_write(ctx, %s%sV_off, %s%sN, %s, " % \ - (regtype, regid, regtype, regid, newv)) - f.write("insn->slot, %s);\n" % \ - (is_predicated)) - f.write(" ctx_log_vreg_write(ctx, %s%sN, %s, %s);\n" % \ - (regtype, regid, newv, is_predicated)) - else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "Q"): - if (regid in {"d", "e", "x"}): - if ('A_CONDEXEC' in hex_common.attribdict[tag]): - is_predicated = "true" - else: - is_predicated = "false" - f.write(" gen_log_qreg_write(%s%sV_off, %s%sN, %s, " % \ - (regtype, regid, regtype, regid, newv)) - f.write("insn->slot, %s);\n" % (is_predicated)) - f.write(" ctx_log_qreg_write(ctx, %s%sN, %s);\n" % \ - (regtype, regid, is_predicated)) - else: - print("Bad register parse: ", regtype, regid) + if regtype == "V": + if regid in {"xx"}: + f.write( + f" gen_log_vreg_write_pair(ctx, {regtype}{regid}V_off, " + f"{regtype}{regid}N, {newv});\n" + ) + elif regid in {"y"}: + f.write( + f" gen_log_vreg_write(ctx, {regtype}{regid}V_off, " + f"{regtype}{regid}N, {newv});\n" + ) + elif regid not in {"dd", "d", "x"}: + hex_common.bad_register(regtype, regid) + elif regtype == "Q": + if regid not in {"d", "e", "x"}: + hex_common.bad_register(regtype, regid) else: - print("Bad register parse: ", regtype, regid) + hex_common.bad_register(regtype, regid) -def genptr_dst_write_opn(f,regtype, regid, tag): - if (hex_common.is_pair(regid)): - if (hex_common.is_hvx_reg(regtype)): - if (hex_common.is_tmp_result(tag)): + +def genptr_dst_write_opn(f, regtype, regid, tag): + if hex_common.is_pair(regid): + if hex_common.is_hvx_reg(regtype): + if hex_common.is_tmp_result(tag): genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP") else: genptr_dst_write_ext(f, tag, regtype, regid) else: genptr_dst_write(f, tag, regtype, regid) - elif (hex_common.is_single(regid)): - if (hex_common.is_hvx_reg(regtype)): - if (hex_common.is_new_result(tag)): + elif hex_common.is_single(regid): + if hex_common.is_hvx_reg(regtype): + if hex_common.is_new_result(tag): genptr_dst_write_ext(f, tag, regtype, regid, "EXT_NEW") - elif (hex_common.is_tmp_result(tag)): + elif hex_common.is_tmp_result(tag): genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP") else: genptr_dst_write_ext(f, tag, regtype, regid, "EXT_DFL") else: genptr_dst_write(f, tag, regtype, regid) else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) + ## ## Generate the TCG code to call the helper ## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;} ## We produce: ## static void generate_A2_add(DisasContext *ctx) -## { -## TCGv RdV = tcg_temp_local_new(); -## const int RdN = insn->regno[0]; -## TCGv RsV = hex_gpr[insn->regno[1]]; -## TCGv RtV = hex_gpr[insn->regno[2]]; -## -## gen_log_reg_write(RdN, RdV); -## ctx_log_reg_write(ctx, RdN); -## tcg_temp_free(RdV); -## } +## { +## Insn *insn __attribute__((unused)) = ctx->insn; +## const int RdN = insn->regno[0]; +## TCGv RdV = get_result_gpr(ctx, RdN); +## TCGv RsV = hex_gpr[insn->regno[1]]; +## TCGv RtV = hex_gpr[insn->regno[2]]; +## +## gen_log_reg_write(ctx, RdN, RdV); +## } ## ## where depends on hex_common.skip_qemu_helper(tag) ## if hex_common.skip_qemu_helper(tag) is True @@ -592,124 +493,139 @@ def genptr_dst_write_opn(f,regtype, regid, tag): ## is gen_helper_A2_add(RdV, cpu_env, RsV, RtV); ## def gen_tcg_func(f, tag, regs, imms): - f.write("static void generate_%s(DisasContext *ctx)\n" %tag) - f.write('{\n') + f.write(f"static void generate_{tag}(DisasContext *ctx)\n") + f.write("{\n") f.write(" Insn *insn __attribute__((unused)) = ctx->insn;\n") - if hex_common.need_ea(tag): gen_decl_ea_tcg(f, tag) - i=0 + if hex_common.need_ea(tag): + gen_decl_ea_tcg(f, tag) + i = 0 ## Declare all the operands (regs and immediates) - for regtype,regid,toss,numregs in regs: - genptr_decl_opn(f, tag, regtype, regid, toss, numregs, i) + for regtype, regid in regs: + genptr_decl_opn(f, tag, regtype, regid, i) i += 1 - for immlett,bits,immshift in imms: - genptr_decl_imm(f,immlett) + for immlett, bits, immshift in imms: + genptr_decl_imm(f, immlett) - if 'A_PRIV' in hex_common.attribdict[tag]: - f.write(' fCHECKFORPRIV();\n') - if 'A_GUEST' in hex_common.attribdict[tag]: - f.write(' fCHECKFORGUEST();\n') + if "A_PRIV" in hex_common.attribdict[tag]: + f.write(" fCHECKFORPRIV();\n") + if "A_GUEST" in hex_common.attribdict[tag]: + f.write(" fCHECKFORGUEST();\n") ## Read all the inputs - for regtype,regid,toss,numregs in regs: - if (hex_common.is_read(regid)): - genptr_src_read_opn(f,regtype,regid,tag) + for regtype, regid in regs: + if hex_common.is_read(regid): + genptr_src_read_opn(f, regtype, regid, tag) if hex_common.is_idef_parser_enabled(tag): declared = [] ## Handle registers - for regtype,regid,toss,numregs in regs: - if (hex_common.is_pair(regid) - or (hex_common.is_single(regid) - and hex_common.is_old_val(regtype, regid, tag))): - declared.append("%s%sV" % (regtype, regid)) + for regtype, regid in regs: + if hex_common.is_pair(regid) or ( + hex_common.is_single(regid) + and hex_common.is_old_val(regtype, regid, tag) + ): + declared.append(f"{regtype}{regid}V") if regtype == "M": - declared.append("%s%sN" % (regtype, regid)) + declared.append(f"{regtype}{regid}N") elif hex_common.is_new_val(regtype, regid, tag): - declared.append("%s%sN" % (regtype,regid)) + declared.append(f"{regtype}{regid}N") else: - print("Bad register parse: ",regtype,regid,toss,numregs) + hex_common.bad_register(regtype, regid) ## Handle immediates - for immlett,bits,immshift in imms: + for immlett, bits, immshift in imms: declared.append(hex_common.imm_name(immlett)) arguments = ", ".join(["ctx", "ctx->insn", "ctx->pkt"] + declared) - f.write(" emit_%s(%s);\n" % (tag, arguments)) + f.write(f" emit_{tag}({arguments});\n") - elif ( hex_common.skip_qemu_helper(tag) ): - f.write(" fGEN_TCG_%s(%s);\n" % (tag, hex_common.semdict[tag])) + elif hex_common.skip_qemu_helper(tag): + f.write(f" fGEN_TCG_{tag}({hex_common.semdict[tag]});\n") else: ## Generate the call to the helper - for immlett,bits,immshift in imms: - gen_helper_decl_imm(f,immlett) + for immlett, bits, immshift in imms: + gen_helper_decl_imm(f, immlett) if hex_common.need_pkt_has_multi_cof(tag): f.write(" TCGv pkt_has_multi_cof = ") f.write("tcg_constant_tl(ctx->pkt->pkt_has_multi_cof);\n") + if hex_common.need_pkt_need_commit(tag): + f.write(" TCGv pkt_need_commit = ") + f.write("tcg_constant_tl(ctx->need_commit);\n") if hex_common.need_part1(tag): f.write(" TCGv part1 = tcg_constant_tl(insn->part1);\n") if hex_common.need_slot(tag): - f.write(" TCGv slot = tcg_constant_tl(insn->slot);\n") + f.write(" TCGv slotval = gen_slotval(ctx);\n") if hex_common.need_PC(tag): f.write(" TCGv PC = tcg_constant_tl(ctx->pkt->pc);\n") if hex_common.helper_needs_next_PC(tag): f.write(" TCGv next_PC = tcg_constant_tl(ctx->next_PC);\n") - f.write(" gen_helper_%s(" % (tag)) - i=0 + f.write(f" gen_helper_{tag}(") + i = 0 ## If there is a scalar result, it is the return type - for regtype,regid,toss,numregs in regs: - if (hex_common.is_written(regid)): - if (hex_common.is_hvx_reg(regtype)): + for regtype, regid in regs: + if hex_common.is_written(regid): + if hex_common.is_hvx_reg(regtype): continue - gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) + gen_helper_call_opn(f, tag, regtype, regid, i) i += 1 - if (i > 0): f.write(", ") + if i > 0: + f.write(", ") f.write("cpu_env") - i=1 - for regtype,regid,toss,numregs in regs: - if (hex_common.is_written(regid)): - if (not hex_common.is_hvx_reg(regtype)): + i = 1 + ## For conditional instructions, we pass in the destination register + if "A_CONDEXEC" in hex_common.attribdict[tag]: + for regtype, regid in regs: + if hex_common.is_writeonly(regid) and not hex_common.is_hvx_reg( + regtype + ): + gen_helper_call_opn(f, tag, regtype, regid, i) + i += 1 + for regtype, regid in regs: + if hex_common.is_written(regid): + if not hex_common.is_hvx_reg(regtype): continue - gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) + gen_helper_call_opn(f, tag, regtype, regid, i) i += 1 - for regtype,regid,toss,numregs in regs: - if (hex_common.is_read(regid)): - if (hex_common.is_hvx_reg(regtype) and - hex_common.is_readwrite(regid)): + for regtype, regid in regs: + if hex_common.is_read(regid): + if hex_common.is_hvx_reg(regtype) and hex_common.is_readwrite(regid): continue - gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) + gen_helper_call_opn(f, tag, regtype, regid, i) i += 1 - for immlett,bits,immshift in imms: - gen_helper_call_imm(f,immlett) + for immlett, bits, immshift in imms: + gen_helper_call_imm(f, immlett) if hex_common.need_pkt_has_multi_cof(tag): f.write(", pkt_has_multi_cof") - if hex_common.need_PC(tag): f.write(", PC") - if hex_common.helper_needs_next_PC(tag): f.write(", next_PC") - if hex_common.need_slot(tag): f.write(", slot") - if hex_common.need_part1(tag): f.write(", part1" ) + if hex_common.need_pkt_need_commit(tag): + f.write(", pkt_need_commit") + if hex_common.need_PC(tag): + f.write(", PC") + if hex_common.helper_needs_next_PC(tag): + f.write(", next_PC") + if hex_common.need_slot(tag): + f.write(", slotval") + if hex_common.need_part1(tag): + f.write(", part1") f.write(");\n") ## Write all the outputs - for regtype,regid,toss,numregs in regs: - if (hex_common.is_written(regid)): - genptr_dst_write_opn(f,regtype, regid, tag) - - ## Free all the operands (regs and immediates) - if hex_common.need_ea(tag): gen_free_ea_tcg(f) - for regtype,regid,toss,numregs in regs: - genptr_free_opn(f,regtype,regid,i,tag) - i += 1 + for regtype, regid in regs: + if hex_common.is_written(regid): + genptr_dst_write_opn(f, regtype, regid, tag) f.write("}\n\n") + def gen_def_tcg_func(f, tag, tagregs, tagimms): regs = tagregs[tag] imms = tagimms[tag] gen_tcg_func(f, tag, regs, imms) + def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) @@ -732,30 +648,31 @@ def main(): tagimms = hex_common.get_tagimms() output_file = sys.argv[-1] - with open(output_file, 'w') as f: + with open(output_file, "w") as f: f.write("#ifndef HEXAGON_TCG_FUNCS_H\n") f.write("#define HEXAGON_TCG_FUNCS_H\n\n") if is_idef_parser_enabled: - f.write("#include \"idef-generated-emitter.h.inc\"\n\n") + f.write('#include "idef-generated-emitter.h.inc"\n\n') for tag in hex_common.tags: ## Skip the priv instructions - if ( "A_PRIV" in hex_common.attribdict[tag] ) : + if "A_PRIV" in hex_common.attribdict[tag]: continue ## Skip the guest instructions - if ( "A_GUEST" in hex_common.attribdict[tag] ) : + if "A_GUEST" in hex_common.attribdict[tag]: continue ## Skip the diag instructions - if ( tag == "Y6_diag" ) : + if tag == "Y6_diag": continue - if ( tag == "Y6_diag0" ) : + if tag == "Y6_diag0": continue - if ( tag == "Y6_diag1" ) : + if tag == "Y6_diag1": continue gen_def_tcg_func(f, tag, tagregs, tagimms) f.write("#endif /* HEXAGON_TCG_FUNCS_H */\n") + if __name__ == "__main__": main() diff --git a/target/hexagon/gen_tcg_hvx.h b/target/hexagon/gen_tcg_hvx.h index 083f4d92c6..44bae53f8d 100644 --- a/target/hexagon/gen_tcg_hvx.h +++ b/target/hexagon/gen_tcg_hvx.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -128,22 +128,51 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \ sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vassign_tmp(SHORTCODE) \ + tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vcombine_tmp(SHORTCODE) \ + do { \ + tcg_gen_gvec_mov(MO_64, VddV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_mov(MO_64, VddV_off + sizeof(MMVector), VuV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } while (0) + +/* + * Vector combine + * + * Be careful that the source and dest don't overlap + */ +#define fGEN_TCG_V6_vcombine(SHORTCODE) \ + do { \ + if (VddV_off != VuV_off) { \ + tcg_gen_gvec_mov(MO_64, VddV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_mov(MO_64, VddV_off + sizeof(MMVector), VuV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + tcg_gen_gvec_mov(MO_64, tmpoff, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_mov(MO_64, VddV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_mov(MO_64, VddV_off + sizeof(MMVector), tmpoff, \ + sizeof(MMVector), sizeof(MMVector)); \ + } \ + } while (0) + /* Vector conditional move */ #define fGEN_TCG_VEC_CMOV(PRED) \ do { \ TCGv lsb = tcg_temp_new(); \ TCGLabel *false_label = gen_new_label(); \ - TCGLabel *end_label = gen_new_label(); \ tcg_gen_andi_tl(lsb, PsV, 1); \ tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \ - tcg_temp_free(lsb); \ tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_gen_br(end_label); \ gen_set_label(false_label); \ - tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \ - 1 << insn->slot); \ - gen_set_label(end_label); \ } while (0) @@ -212,7 +241,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 15); \ tcg_gen_gvec_sars(MO_16, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vasrh_acc(SHORTCODE) \ @@ -224,7 +252,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) sizeof(MMVector), sizeof(MMVector)); \ tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vasrw(SHORTCODE) \ @@ -233,7 +260,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 31); \ tcg_gen_gvec_sars(MO_32, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vasrw_acc(SHORTCODE) \ @@ -245,7 +271,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) sizeof(MMVector), sizeof(MMVector)); \ tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vlsrb(SHORTCODE) \ @@ -254,7 +279,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 7); \ tcg_gen_gvec_shrs(MO_8, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vlsrh(SHORTCODE) \ @@ -263,7 +287,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 15); \ tcg_gen_gvec_shrs(MO_16, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vlsrw(SHORTCODE) \ @@ -272,7 +295,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 31); \ tcg_gen_gvec_shrs(MO_32, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) /* Vector shift left - various forms */ @@ -282,7 +304,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 7); \ tcg_gen_gvec_shls(MO_8, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vaslh(SHORTCODE) \ @@ -291,7 +312,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 15); \ tcg_gen_gvec_shls(MO_16, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vaslh_acc(SHORTCODE) \ @@ -303,7 +323,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) sizeof(MMVector), sizeof(MMVector)); \ tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vaslw(SHORTCODE) \ @@ -312,7 +331,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 31); \ tcg_gen_gvec_shls(MO_32, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vaslw_acc(SHORTCODE) \ @@ -324,7 +342,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) sizeof(MMVector), sizeof(MMVector)); \ tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) /* Vector max - various forms */ @@ -560,18 +577,12 @@ static inline void assert_vhist_tmp(DisasContext *ctx) do { \ TCGv LSB = tcg_temp_new(); \ TCGLabel *false_label = gen_new_label(); \ - TCGLabel *end_label = gen_new_label(); \ GET_EA; \ PRED; \ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \ - tcg_temp_free(LSB); \ gen_vreg_load(ctx, DSTOFF, EA, true); \ INC; \ - tcg_gen_br(end_label); \ gen_set_label(false_label); \ - tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \ - 1 << insn->slot); \ - gen_set_label(end_label); \ } while (0) #define fGEN_TCG_PRED_VEC_LOAD_pred_pi \ @@ -731,18 +742,12 @@ static inline void assert_vhist_tmp(DisasContext *ctx) do { \ TCGv LSB = tcg_temp_new(); \ TCGLabel *false_label = gen_new_label(); \ - TCGLabel *end_label = gen_new_label(); \ GET_EA; \ PRED; \ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \ - tcg_temp_free(LSB); \ gen_vreg_store(ctx, EA, SRCOFF, insn->slot, ALIGN); \ INC; \ - tcg_gen_br(end_label); \ gen_set_label(false_label); \ - tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \ - 1 << insn->slot); \ - gen_set_label(end_label); \ } while (0) #define fGEN_TCG_PRED_VEC_STORE_pred_pi(ALIGN) \ diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c index 90db99024f..bcb287dd8b 100644 --- a/target/hexagon/genptr.c +++ b/target/hexagon/genptr.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -45,7 +45,7 @@ TCGv gen_read_preg(TCGv pred, uint8_t num) #define IMMUTABLE (~0) -static const target_ulong reg_immut_masks[TOTAL_PER_THREAD_REGS] = { +const target_ulong reg_immut_masks[TOTAL_PER_THREAD_REGS] = { [HEX_REG_USR] = 0xc13000c0, [HEX_REG_PC] = IMMUTABLE, [HEX_REG_GP] = 0x3f, @@ -65,112 +65,75 @@ static inline void gen_masked_reg_write(TCGv new_val, TCGv cur_val, tcg_gen_andi_tl(new_val, new_val, ~reg_mask); tcg_gen_andi_tl(tmp, cur_val, reg_mask); tcg_gen_or_tl(new_val, new_val, tmp); - - tcg_temp_free(tmp); } } -static inline void gen_log_predicated_reg_write(int rnum, TCGv val, - uint32_t slot) +TCGv get_result_gpr(DisasContext *ctx, int rnum) { - TCGv zero = tcg_constant_tl(0); - TCGv slot_mask = tcg_temp_new(); - - tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot); - tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], slot_mask, zero, - val, hex_new_value[rnum]); - if (HEX_DEBUG) { - /* - * Do this so HELPER(debug_commit_end) will know - * - * Note that slot_mask indicates the value is not written - * (i.e., slot was cancelled), so we create a true/false value before - * or'ing with hex_reg_written[rnum]. - */ - tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero); - tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask); + if (ctx->need_commit) { + if (rnum == HEX_REG_USR) { + return hex_new_value_usr; + } else { + if (ctx->new_value[rnum] == NULL) { + ctx->new_value[rnum] = tcg_temp_new(); + tcg_gen_movi_tl(ctx->new_value[rnum], 0); + } + return ctx->new_value[rnum]; + } + } else { + return hex_gpr[rnum]; } - - tcg_temp_free(slot_mask); } -void gen_log_reg_write(int rnum, TCGv val) +static TCGv_i64 get_result_gpr_pair(DisasContext *ctx, int rnum) +{ + TCGv_i64 result = tcg_temp_new_i64(); + tcg_gen_concat_i32_i64(result, get_result_gpr(ctx, rnum), + get_result_gpr(ctx, rnum + 1)); + return result; +} + +void gen_log_reg_write(DisasContext *ctx, int rnum, TCGv val) { const target_ulong reg_mask = reg_immut_masks[rnum]; gen_masked_reg_write(val, hex_gpr[rnum], reg_mask); - tcg_gen_mov_tl(hex_new_value[rnum], val); + tcg_gen_mov_tl(get_result_gpr(ctx, rnum), val); if (HEX_DEBUG) { /* Do this so HELPER(debug_commit_end) will know */ tcg_gen_movi_tl(hex_reg_written[rnum], 1); } } -static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, - uint32_t slot) +static void gen_log_reg_write_pair(DisasContext *ctx, int rnum, TCGv_i64 val) { TCGv val32 = tcg_temp_new(); - TCGv zero = tcg_constant_tl(0); - TCGv slot_mask = tcg_temp_new(); - tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot); /* Low word */ tcg_gen_extrl_i64_i32(val32, val); - tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], - slot_mask, zero, - val32, hex_new_value[rnum]); + gen_log_reg_write(ctx, rnum, val32); + /* High word */ tcg_gen_extrh_i64_i32(val32, val); - tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum + 1], - slot_mask, zero, - val32, hex_new_value[rnum + 1]); - if (HEX_DEBUG) { - /* - * Do this so HELPER(debug_commit_end) will know - * - * Note that slot_mask indicates the value is not written - * (i.e., slot was cancelled), so we create a true/false value before - * or'ing with hex_reg_written[rnum]. - */ - tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero); - tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask); - tcg_gen_or_tl(hex_reg_written[rnum + 1], hex_reg_written[rnum + 1], - slot_mask); - } - - tcg_temp_free(val32); - tcg_temp_free(slot_mask); + gen_log_reg_write(ctx, rnum + 1, val32); } -static void gen_log_reg_write_pair(int rnum, TCGv_i64 val) +TCGv get_result_pred(DisasContext *ctx, int pnum) { - const target_ulong reg_mask_low = reg_immut_masks[rnum]; - const target_ulong reg_mask_high = reg_immut_masks[rnum + 1]; - TCGv val32 = tcg_temp_new(); - - /* Low word */ - tcg_gen_extrl_i64_i32(val32, val); - gen_masked_reg_write(val32, hex_gpr[rnum], reg_mask_low); - tcg_gen_mov_tl(hex_new_value[rnum], val32); - if (HEX_DEBUG) { - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_movi_tl(hex_reg_written[rnum], 1); + if (ctx->need_commit) { + if (ctx->new_pred_value[pnum] == NULL) { + ctx->new_pred_value[pnum] = tcg_temp_new(); + tcg_gen_movi_tl(ctx->new_pred_value[pnum], 0); + } + return ctx->new_pred_value[pnum]; + } else { + return hex_pred[pnum]; } - - /* High word */ - tcg_gen_extrh_i64_i32(val32, val); - gen_masked_reg_write(val32, hex_gpr[rnum + 1], reg_mask_high); - tcg_gen_mov_tl(hex_new_value[rnum + 1], val32); - if (HEX_DEBUG) { - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_movi_tl(hex_reg_written[rnum + 1], 1); - } - - tcg_temp_free(val32); } void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val) { + TCGv pred = get_result_pred(ctx, pnum); TCGv base_val = tcg_temp_new(); tcg_gen_andi_tl(base_val, val, 0xff); @@ -183,14 +146,14 @@ void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val) * straight assignment. Otherwise, do an and. */ if (!test_bit(pnum, ctx->pregs_written)) { - tcg_gen_mov_tl(hex_new_pred_value[pnum], base_val); + tcg_gen_mov_tl(pred, base_val); } else { - tcg_gen_and_tl(hex_new_pred_value[pnum], - hex_new_pred_value[pnum], base_val); + tcg_gen_and_tl(pred, pred, base_val); } - tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pnum); - - tcg_temp_free(base_val); + if (HEX_DEBUG) { + tcg_gen_ori_tl(ctx->pred_written, ctx->pred_written, 1 << pnum); + } + set_bit(pnum, ctx->pregs_written); } static inline void gen_read_p3_0(TCGv control_reg) @@ -238,7 +201,6 @@ static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, TCGv p3_0 = tcg_temp_new(); gen_read_p3_0(p3_0); tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]); - tcg_temp_free(p3_0); } else if (reg_num == HEX_REG_PC - 1) { TCGv pc = tcg_constant_tl(ctx->base.pc_next); tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], pc); @@ -250,14 +212,11 @@ static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, tcg_gen_addi_tl(insn_cnt, hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns); tcg_gen_concat_i32_i64(dest, pkt_cnt, insn_cnt); - tcg_temp_free(pkt_cnt); - tcg_temp_free(insn_cnt); } else if (reg_num == HEX_REG_QEMU_HVX_CNT) { TCGv hvx_cnt = tcg_temp_new(); tcg_gen_addi_tl(hvx_cnt, hex_gpr[HEX_REG_QEMU_HVX_CNT], ctx->num_hvx_insns); tcg_gen_concat_i32_i64(dest, hvx_cnt, hex_gpr[reg_num + 1]); - tcg_temp_free(hvx_cnt); } else { tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], @@ -271,9 +230,7 @@ static void gen_write_p3_0(DisasContext *ctx, TCGv control_reg) for (int i = 0; i < NUM_PREGS; i++) { tcg_gen_extract_tl(hex_p8, control_reg, i * 8, 8); gen_log_pred_write(ctx, i, hex_p8); - ctx_log_pred_write(ctx, i); } - tcg_temp_free(hex_p8); } /* @@ -289,8 +246,7 @@ static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num, if (reg_num == HEX_REG_P3_0_ALIASED) { gen_write_p3_0(ctx, val); } else { - gen_log_reg_write(reg_num, val); - ctx_log_reg_write(ctx, reg_num); + gen_log_reg_write(ctx, reg_num, val); if (reg_num == HEX_REG_QEMU_PKT_CNT) { ctx->num_packets = 0; } @@ -307,16 +263,14 @@ static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num, TCGv_i64 val) { if (reg_num == HEX_REG_P3_0_ALIASED) { + TCGv result = get_result_gpr(ctx, reg_num + 1); TCGv val32 = tcg_temp_new(); tcg_gen_extrl_i64_i32(val32, val); gen_write_p3_0(ctx, val32); tcg_gen_extrh_i64_i32(val32, val); - gen_log_reg_write(reg_num + 1, val32); - tcg_temp_free(val32); - ctx_log_reg_write(ctx, reg_num + 1); + tcg_gen_mov_tl(result, val32); } else { - gen_log_reg_write_pair(reg_num, val); - ctx_log_reg_write_pair(ctx, reg_num); + gen_log_reg_write_pair(ctx, reg_num, val); if (reg_num == HEX_REG_QEMU_PKT_CNT) { ctx->num_packets = 0; ctx->num_insns = 0; @@ -346,7 +300,6 @@ TCGv gen_get_byte_i64(TCGv result, int N, TCGv_i64 src, bool sign) tcg_gen_extract_i64(res64, src, N * 8, 8); } tcg_gen_extrl_i64_i32(result, res64); - tcg_temp_free_i64(res64); return result; } @@ -371,7 +324,6 @@ void gen_set_half_i64(int N, TCGv_i64 result, TCGv src) TCGv_i64 src64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(src64, src); tcg_gen_deposit_i64(result, result, src64, N * 16, 16); - tcg_temp_free_i64(src64); } void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src) @@ -379,19 +331,18 @@ void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src) TCGv_i64 src64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(src64, src); tcg_gen_deposit_i64(result, result, src64, N * 8, 8); - tcg_temp_free_i64(src64); } static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index) { - tcg_gen_qemu_ld32u(dest, vaddr, mem_index); + tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_TEUL); tcg_gen_mov_tl(hex_llsc_addr, vaddr); tcg_gen_mov_tl(hex_llsc_val, dest); } static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index) { - tcg_gen_qemu_ld64(dest, vaddr, mem_index); + tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_TEUQ); tcg_gen_mov_tl(hex_llsc_addr, vaddr); tcg_gen_mov_i64(hex_llsc_val_i64, dest); } @@ -412,7 +363,6 @@ static inline void gen_store_conditional4(DisasContext *ctx, ctx->mem_idx, MO_32); tcg_gen_movcond_tl(TCG_COND_EQ, pred, tmp, hex_llsc_val, one, zero); - tcg_temp_free(tmp); tcg_gen_br(done); gen_set_label(fail); @@ -439,7 +389,6 @@ static inline void gen_store_conditional8(DisasContext *ctx, tcg_gen_movcond_i64(TCG_COND_EQ, tmp, tmp, hex_llsc_val_i64, one, zero); tcg_gen_extrl_i64_i32(pred, tmp); - tcg_temp_free_i64(tmp); tcg_gen_br(done); gen_set_label(fail); @@ -449,6 +398,14 @@ static inline void gen_store_conditional8(DisasContext *ctx, tcg_gen_movi_tl(hex_llsc_addr, ~0); } +#ifndef CONFIG_HEXAGON_IDEF_PARSER +static TCGv gen_slotval(DisasContext *ctx) +{ + int slotval = (ctx->pkt->pkt_has_store_s1 & 1) | (ctx->insn->slot << 1); + return tcg_constant_tl(slotval); +} +#endif + void gen_store32(TCGv vaddr, TCGv src, int width, uint32_t slot) { tcg_gen_mov_tl(hex_store_addr[slot], vaddr); @@ -523,9 +480,9 @@ static void gen_write_new_pc_addr(DisasContext *ctx, TCGv addr, if (ctx->pkt->pkt_has_multi_cof) { /* If there are multiple branches in a packet, ignore the second one */ tcg_gen_movcond_tl(TCG_COND_NE, hex_gpr[HEX_REG_PC], - hex_branch_taken, tcg_constant_tl(0), + ctx->branch_taken, tcg_constant_tl(0), hex_gpr[HEX_REG_PC], addr); - tcg_gen_movi_tl(hex_branch_taken, 1); + tcg_gen_movi_tl(ctx->branch_taken, 1); } else { tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], addr); } @@ -546,36 +503,33 @@ static void gen_write_new_pc_pcrel(DisasContext *ctx, int pc_off, ctx->branch_cond = TCG_COND_ALWAYS; if (pred != NULL) { ctx->branch_cond = cond; - tcg_gen_mov_tl(hex_branch_taken, pred); + tcg_gen_mov_tl(ctx->branch_taken, pred); } ctx->branch_dest = dest; } } -void gen_set_usr_field(int field, TCGv val) +void gen_set_usr_field(DisasContext *ctx, int field, TCGv val) { - tcg_gen_deposit_tl(hex_new_value[HEX_REG_USR], hex_new_value[HEX_REG_USR], - val, + TCGv usr = get_result_gpr(ctx, HEX_REG_USR); + tcg_gen_deposit_tl(usr, usr, val, reg_field_info[field].offset, reg_field_info[field].width); } -void gen_set_usr_fieldi(int field, int x) +void gen_set_usr_fieldi(DisasContext *ctx, int field, int x) { if (reg_field_info[field].width == 1) { + TCGv usr = get_result_gpr(ctx, HEX_REG_USR); target_ulong bit = 1 << reg_field_info[field].offset; if ((x & 1) == 1) { - tcg_gen_ori_tl(hex_new_value[HEX_REG_USR], - hex_new_value[HEX_REG_USR], - bit); + tcg_gen_ori_tl(usr, usr, bit); } else { - tcg_gen_andi_tl(hex_new_value[HEX_REG_USR], - hex_new_value[HEX_REG_USR], - ~bit); + tcg_gen_andi_tl(usr, usr, ~bit); } } else { TCGv val = tcg_constant_tl(x); - gen_set_usr_field(field, val); + gen_set_usr_field(ctx, field, val); } } @@ -587,12 +541,68 @@ static void gen_compare(TCGCond cond, TCGv res, TCGv arg1, TCGv arg2) tcg_gen_movcond_tl(cond, res, arg1, arg2, one, zero); } +#ifndef CONFIG_HEXAGON_IDEF_PARSER +static inline void gen_loop0r(DisasContext *ctx, TCGv RsV, int riV) +{ + fIMMEXT(riV); + fPCALIGN(riV); + gen_log_reg_write(ctx, HEX_REG_LC0, RsV); + gen_log_reg_write(ctx, HEX_REG_SA0, tcg_constant_tl(ctx->pkt->pc + riV)); + gen_set_usr_fieldi(ctx, USR_LPCFG, 0); +} + +static void gen_loop0i(DisasContext *ctx, int count, int riV) +{ + gen_loop0r(ctx, tcg_constant_tl(count), riV); +} + +static inline void gen_loop1r(DisasContext *ctx, TCGv RsV, int riV) +{ + fIMMEXT(riV); + fPCALIGN(riV); + gen_log_reg_write(ctx, HEX_REG_LC1, RsV); + gen_log_reg_write(ctx, HEX_REG_SA1, tcg_constant_tl(ctx->pkt->pc + riV)); +} + +static void gen_loop1i(DisasContext *ctx, int count, int riV) +{ + gen_loop1r(ctx, tcg_constant_tl(count), riV); +} + +static void gen_ploopNsr(DisasContext *ctx, int N, TCGv RsV, int riV) +{ + fIMMEXT(riV); + fPCALIGN(riV); + gen_log_reg_write(ctx, HEX_REG_LC0, RsV); + gen_log_reg_write(ctx, HEX_REG_SA0, tcg_constant_tl(ctx->pkt->pc + riV)); + gen_set_usr_fieldi(ctx, USR_LPCFG, N); + gen_log_pred_write(ctx, 3, tcg_constant_tl(0)); +} + +static void gen_ploopNsi(DisasContext *ctx, int N, int count, int riV) +{ + gen_ploopNsr(ctx, N, tcg_constant_tl(count), riV); +} + +static inline void gen_comparei(TCGCond cond, TCGv res, TCGv arg1, int arg2) +{ + gen_compare(cond, res, arg1, tcg_constant_tl(arg2)); +} +#endif + static void gen_cond_jumpr(DisasContext *ctx, TCGv dst_pc, TCGCond cond, TCGv pred) { gen_write_new_pc_addr(ctx, dst_pc, cond, pred); } +static void gen_cond_jumpr31(DisasContext *ctx, TCGCond cond, TCGv pred) +{ + TCGv LSB = tcg_temp_new(); + tcg_gen_andi_tl(LSB, pred, 1); + gen_cond_jumpr(ctx, hex_gpr[HEX_REG_LR], cond, LSB); +} + static void gen_cond_jump(DisasContext *ctx, TCGCond cond, TCGv pred, int pc_off) { @@ -607,12 +617,10 @@ static void gen_cmpnd_cmp_jmp(DisasContext *ctx, TCGv pred = tcg_temp_new(); gen_compare(cond1, pred, arg1, arg2); gen_log_pred_write(ctx, pnum, pred); - tcg_temp_free(pred); } else { TCGv pred = tcg_temp_new(); - tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]); + tcg_gen_mov_tl(pred, ctx->new_pred_value[pnum]); gen_cond_jump(ctx, cond2, pred, pc_off); - tcg_temp_free(pred); } } @@ -666,12 +674,10 @@ static void gen_cmpnd_tstbit0_jmp(DisasContext *ctx, tcg_gen_andi_tl(pred, arg, 1); gen_8bitsof(pred, pred); gen_log_pred_write(ctx, pnum, pred); - tcg_temp_free(pred); } else { TCGv pred = tcg_temp_new(); - tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]); + tcg_gen_mov_tl(pred, ctx->new_pred_value[pnum]); gen_cond_jump(ctx, cond, pred, pc_off); - tcg_temp_free(pred); } } @@ -681,7 +687,6 @@ static void gen_testbit0_jumpnv(DisasContext *ctx, TCGv pred = tcg_temp_new(); tcg_gen_andi_tl(pred, arg, 1); gen_cond_jump(ctx, cond, pred, pc_off); - tcg_temp_free(pred); } static void gen_jump(DisasContext *ctx, int pc_off) @@ -696,45 +701,161 @@ static void gen_jumpr(DisasContext *ctx, TCGv new_pc) static void gen_call(DisasContext *ctx, int pc_off) { - TCGv next_PC = - tcg_constant_tl(ctx->pkt->pc + ctx->pkt->encod_pkt_size_in_bytes); - gen_log_reg_write(HEX_REG_LR, next_PC); + TCGv lr = get_result_gpr(ctx, HEX_REG_LR); + tcg_gen_movi_tl(lr, ctx->next_PC); gen_write_new_pc_pcrel(ctx, pc_off, TCG_COND_ALWAYS, NULL); } +static void gen_callr(DisasContext *ctx, TCGv new_pc) +{ + TCGv lr = get_result_gpr(ctx, HEX_REG_LR); + tcg_gen_movi_tl(lr, ctx->next_PC); + gen_write_new_pc_addr(ctx, new_pc, TCG_COND_ALWAYS, NULL); +} + static void gen_cond_call(DisasContext *ctx, TCGv pred, TCGCond cond, int pc_off) { - TCGv next_PC; - TCGv lsb = tcg_temp_local_new(); + TCGv lr = get_result_gpr(ctx, HEX_REG_LR); + TCGv lsb = tcg_temp_new(); TCGLabel *skip = gen_new_label(); tcg_gen_andi_tl(lsb, pred, 1); gen_write_new_pc_pcrel(ctx, pc_off, cond, lsb); tcg_gen_brcondi_tl(cond, lsb, 0, skip); - tcg_temp_free(lsb); - next_PC = - tcg_constant_tl(ctx->pkt->pc + ctx->pkt->encod_pkt_size_in_bytes); - gen_log_reg_write(HEX_REG_LR, next_PC); + tcg_gen_movi_tl(lr, ctx->next_PC); gen_set_label(skip); } +static void gen_cond_callr(DisasContext *ctx, + TCGCond cond, TCGv pred, TCGv new_pc) +{ + TCGv lsb = tcg_temp_new(); + TCGLabel *skip = gen_new_label(); + tcg_gen_andi_tl(lsb, pred, 1); + tcg_gen_brcondi_tl(cond, lsb, 0, skip); + gen_callr(ctx, new_pc); + gen_set_label(skip); +} + +#ifndef CONFIG_HEXAGON_IDEF_PARSER +/* frame = ((LR << 32) | FP) ^ (FRAMEKEY << 32)) */ +static TCGv_i64 gen_frame_scramble(void) +{ + TCGv_i64 frame = tcg_temp_new_i64(); + TCGv tmp = tcg_temp_new(); + tcg_gen_xor_tl(tmp, hex_gpr[HEX_REG_LR], hex_gpr[HEX_REG_FRAMEKEY]); + tcg_gen_concat_i32_i64(frame, hex_gpr[HEX_REG_FP], tmp); + return frame; +} +#endif + +/* frame ^= (int64_t)FRAMEKEY << 32 */ +static void gen_frame_unscramble(TCGv_i64 frame) +{ + TCGv_i64 framekey = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(framekey, hex_gpr[HEX_REG_FRAMEKEY]); + tcg_gen_shli_i64(framekey, framekey, 32); + tcg_gen_xor_i64(frame, frame, framekey); +} + +static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA) +{ + Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */ + CHECK_NOSHUF(EA, 8); + tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_TEUQ); +} + +#ifndef CONFIG_HEXAGON_IDEF_PARSER +/* Stack overflow check */ +static void gen_framecheck(TCGv EA, int framesize) +{ + /* Not modelled in linux-user mode */ + /* Placeholder for system mode */ +#ifndef CONFIG_USER_ONLY + g_assert_not_reached(); +#endif +} + +static void gen_allocframe(DisasContext *ctx, TCGv r29, int framesize) +{ + TCGv r30 = tcg_temp_new(); + TCGv_i64 frame; + tcg_gen_addi_tl(r30, r29, -8); + frame = gen_frame_scramble(); + gen_store8(cpu_env, r30, frame, ctx->insn->slot); + gen_log_reg_write(ctx, HEX_REG_FP, r30); + gen_framecheck(r30, framesize); + tcg_gen_subi_tl(r29, r30, framesize); +} + +static void gen_deallocframe(DisasContext *ctx, TCGv_i64 r31_30, TCGv r30) +{ + TCGv r29 = tcg_temp_new(); + TCGv_i64 frame = tcg_temp_new_i64(); + gen_load_frame(ctx, frame, r30); + gen_frame_unscramble(frame); + tcg_gen_mov_i64(r31_30, frame); + tcg_gen_addi_tl(r29, r30, 8); + gen_log_reg_write(ctx, HEX_REG_SP, r29); +} +#endif + +static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src) +{ + /* + * frame = *src + * dst = frame_unscramble(frame) + * SP = src + 8 + * PC = dst.w[1] + */ + TCGv_i64 frame = tcg_temp_new_i64(); + TCGv r31 = tcg_temp_new(); + TCGv r29 = get_result_gpr(ctx, HEX_REG_SP); + + gen_load_frame(ctx, frame, src); + gen_frame_unscramble(frame); + tcg_gen_mov_i64(dst, frame); + tcg_gen_addi_tl(r29, src, 8); + tcg_gen_extrh_i64_i32(r31, dst); + gen_jumpr(ctx, r31); +} + +/* if (pred) dst = dealloc_return(src):raw */ +static void gen_cond_return(DisasContext *ctx, TCGv_i64 dst, TCGv src, + TCGv pred, TCGCond cond) +{ + TCGv LSB = tcg_temp_new(); + TCGLabel *skip = gen_new_label(); + tcg_gen_andi_tl(LSB, pred, 1); + + tcg_gen_brcondi_tl(cond, LSB, 0, skip); + gen_return(ctx, dst, src); + gen_set_label(skip); +} + +/* sub-instruction version (no RddV, so handle it manually) */ +static void gen_cond_return_subinsn(DisasContext *ctx, TCGCond cond, TCGv pred) +{ + TCGv_i64 RddV = get_result_gpr_pair(ctx, HEX_REG_FP); + gen_cond_return(ctx, RddV, hex_gpr[HEX_REG_FP], pred, cond); + gen_log_reg_write_pair(ctx, HEX_REG_FP, RddV); +} + static void gen_endloop0(DisasContext *ctx) { - TCGv lpcfg = tcg_temp_local_new(); + TCGv lpcfg = tcg_temp_new(); GET_USR_FIELD(USR_LPCFG, lpcfg); /* * if (lpcfg == 1) { - * hex_new_pred_value[3] = 0xff; - * hex_pred_written |= 1 << 3; + * p3 = 0xff; * } */ TCGLabel *label1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_NE, lpcfg, 1, label1); { - tcg_gen_movi_tl(hex_new_pred_value[3], 0xff); - tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << 3); + gen_log_pred_write(ctx, 3, tcg_constant_tl(0xff)); } gen_set_label(label1); @@ -747,7 +868,7 @@ static void gen_endloop0(DisasContext *ctx) tcg_gen_brcondi_tl(TCG_COND_EQ, lpcfg, 0, label2); { tcg_gen_subi_tl(lpcfg, lpcfg, 1); - SET_USR_FIELD(USR_LPCFG, lpcfg); + gen_set_usr_field(ctx, USR_LPCFG, lpcfg); } gen_set_label(label2); @@ -757,22 +878,97 @@ static void gen_endloop0(DisasContext *ctx) */ if (!ctx->is_tight_loop) { /* - * if (hex_gpr[HEX_REG_LC0] > 1) { - * PC = hex_gpr[HEX_REG_SA0]; - * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1; + * if (LC0 > 1) { + * PC = SA0; + * LC0--; * } */ TCGLabel *label3 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, label3); { + TCGv lc0 = get_result_gpr(ctx, HEX_REG_LC0); gen_jumpr(ctx, hex_gpr[HEX_REG_SA0]); - tcg_gen_subi_tl(hex_new_value[HEX_REG_LC0], - hex_gpr[HEX_REG_LC0], 1); + tcg_gen_subi_tl(lc0, hex_gpr[HEX_REG_LC0], 1); } gen_set_label(label3); } +} - tcg_temp_free(lpcfg); +static void gen_endloop1(DisasContext *ctx) +{ + /* + * if (LC1 > 1) { + * PC = SA1; + * LC1--; + * } + */ + TCGLabel *label = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC1], 1, label); + { + TCGv lc1 = get_result_gpr(ctx, HEX_REG_LC1); + gen_jumpr(ctx, hex_gpr[HEX_REG_SA1]); + tcg_gen_subi_tl(lc1, hex_gpr[HEX_REG_LC1], 1); + } + gen_set_label(label); +} + +static void gen_endloop01(DisasContext *ctx) +{ + TCGv lpcfg = tcg_temp_new(); + TCGLabel *label1 = gen_new_label(); + TCGLabel *label2 = gen_new_label(); + TCGLabel *label3 = gen_new_label(); + TCGLabel *done = gen_new_label(); + + GET_USR_FIELD(USR_LPCFG, lpcfg); + + /* + * if (lpcfg == 1) { + * p3 = 0xff; + * } + */ + tcg_gen_brcondi_tl(TCG_COND_NE, lpcfg, 1, label1); + { + gen_log_pred_write(ctx, 3, tcg_constant_tl(0xff)); + } + gen_set_label(label1); + + /* + * if (lpcfg) { + * SET_USR_FIELD(USR_LPCFG, lpcfg - 1); + * } + */ + tcg_gen_brcondi_tl(TCG_COND_EQ, lpcfg, 0, label2); + { + tcg_gen_subi_tl(lpcfg, lpcfg, 1); + gen_set_usr_field(ctx, USR_LPCFG, lpcfg); + } + gen_set_label(label2); + + /* + * if (LC0 > 1) { + * PC = SA0; + * LC0--; + * } else if (LC1 > 1) { + * PC = SA1; + * LC1--; + * } + */ + tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, label3); + { + TCGv lc0 = get_result_gpr(ctx, HEX_REG_LC0); + gen_jumpr(ctx, hex_gpr[HEX_REG_SA0]); + tcg_gen_subi_tl(lc0, hex_gpr[HEX_REG_LC0], 1); + tcg_gen_br(done); + } + gen_set_label(label3); + tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC1], 1, done); + { + TCGv lc1 = get_result_gpr(ctx, HEX_REG_LC1); + gen_jumpr(ctx, hex_gpr[HEX_REG_SA1]); + tcg_gen_subi_tl(lc1, hex_gpr[HEX_REG_LC1], 1); + } + gen_set_label(done); } static void gen_cmp_jumpnv(DisasContext *ctx, @@ -781,7 +977,6 @@ static void gen_cmp_jumpnv(DisasContext *ctx, TCGv pred = tcg_temp_new(); tcg_gen_setcond_tl(cond, pred, val, src); gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off); - tcg_temp_free(pred); } static void gen_cmpi_jumpnv(DisasContext *ctx, @@ -790,12 +985,13 @@ static void gen_cmpi_jumpnv(DisasContext *ctx, TCGv pred = tcg_temp_new(); tcg_gen_setcondi_tl(cond, pred, val, src); gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off); - tcg_temp_free(pred); } /* Shift left with saturation */ -static void gen_shl_sat(TCGv dst, TCGv src, TCGv shift_amt) +static void gen_shl_sat(DisasContext *ctx, TCGv dst, TCGv src, TCGv shift_amt) { + TCGv tmp = tcg_temp_new(); /* In case dst == src */ + TCGv usr = get_result_gpr(ctx, HEX_REG_USR); TCGv sh32 = tcg_temp_new(); TCGv dst_sar = tcg_temp_new(); TCGv ovf = tcg_temp_new(); @@ -819,22 +1015,17 @@ static void gen_shl_sat(TCGv dst, TCGv src, TCGv shift_amt) */ tcg_gen_andi_tl(sh32, shift_amt, 31); - tcg_gen_movcond_tl(TCG_COND_EQ, dst, sh32, shift_amt, + tcg_gen_movcond_tl(TCG_COND_EQ, tmp, sh32, shift_amt, src, tcg_constant_tl(0)); - tcg_gen_shl_tl(dst, dst, sh32); - tcg_gen_sar_tl(dst_sar, dst, sh32); + tcg_gen_shl_tl(tmp, tmp, sh32); + tcg_gen_sar_tl(dst_sar, tmp, sh32); tcg_gen_movcond_tl(TCG_COND_LT, satval, src, tcg_constant_tl(0), min, max); tcg_gen_setcond_tl(TCG_COND_NE, ovf, dst_sar, src); tcg_gen_shli_tl(ovf, ovf, reg_field_info[USR_OVF].offset); - tcg_gen_or_tl(hex_new_value[HEX_REG_USR], hex_new_value[HEX_REG_USR], ovf); + tcg_gen_or_tl(usr, usr, ovf); - tcg_gen_movcond_tl(TCG_COND_EQ, dst, dst_sar, src, dst, satval); - - tcg_temp_free(sh32); - tcg_temp_free(dst_sar); - tcg_temp_free(ovf); - tcg_temp_free(satval); + tcg_gen_movcond_tl(TCG_COND_EQ, dst, dst_sar, src, tmp, satval); } static void gen_sar(TCGv dst, TCGv src, TCGv shift_amt) @@ -846,13 +1037,12 @@ static void gen_sar(TCGv dst, TCGv src, TCGv shift_amt) TCGv tmp = tcg_temp_new(); tcg_gen_umin_tl(tmp, shift_amt, tcg_constant_tl(31)); tcg_gen_sar_tl(dst, src, tmp); - tcg_temp_free(tmp); } /* Bidirectional shift right with saturation */ -static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) +static void gen_asr_r_r_sat(DisasContext *ctx, TCGv RdV, TCGv RsV, TCGv RtV) { - TCGv shift_amt = tcg_temp_local_new(); + TCGv shift_amt = tcg_temp_new(); TCGLabel *positive = gen_new_label(); TCGLabel *done = gen_new_label(); @@ -861,7 +1051,7 @@ static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) /* Negative shift amount => shift left */ tcg_gen_neg_tl(shift_amt, shift_amt); - gen_shl_sat(RdV, RsV, shift_amt); + gen_shl_sat(ctx, RdV, RsV, shift_amt); tcg_gen_br(done); gen_set_label(positive); @@ -869,14 +1059,12 @@ static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) gen_sar(RdV, RsV, shift_amt); gen_set_label(done); - - tcg_temp_free(shift_amt); } /* Bidirectional shift left with saturation */ -static void gen_asl_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) +static void gen_asl_r_r_sat(DisasContext *ctx, TCGv RdV, TCGv RsV, TCGv RtV) { - TCGv shift_amt = tcg_temp_local_new(); + TCGv shift_amt = tcg_temp_new(); TCGLabel *positive = gen_new_label(); TCGLabel *done = gen_new_label(); @@ -890,11 +1078,108 @@ static void gen_asl_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) gen_set_label(positive); /* Positive shift amount => shift left */ - gen_shl_sat(RdV, RsV, shift_amt); + gen_shl_sat(ctx, RdV, RsV, shift_amt); gen_set_label(done); +} - tcg_temp_free(shift_amt); +static void gen_insert_rp(DisasContext *ctx, TCGv RxV, TCGv RsV, TCGv_i64 RttV) +{ + /* + * int width = fZXTN(6, 32, (fGETWORD(1, RttV))); + * int offset = fSXTN(7, 32, (fGETWORD(0, RttV))); + * size8u_t mask = ((fCONSTLL(1) << width) - 1); + * if (offset < 0) { + * RxV = 0; + * } else { + * RxV &= ~(mask << offset); + * RxV |= ((RsV & mask) << offset); + * } + */ + + TCGv width = tcg_temp_new(); + TCGv offset = tcg_temp_new(); + TCGv_i64 mask = tcg_temp_new_i64(); + TCGv_i64 result = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv_i64 offset64 = tcg_temp_new_i64(); + TCGLabel *label = gen_new_label(); + TCGLabel *done = gen_new_label(); + + tcg_gen_extrh_i64_i32(width, RttV); + tcg_gen_extract_tl(width, width, 0, 6); + tcg_gen_extrl_i64_i32(offset, RttV); + tcg_gen_sextract_tl(offset, offset, 0, 7); + /* Possible values for offset are -64 .. 63 */ + tcg_gen_brcondi_tl(TCG_COND_GE, offset, 0, label); + /* For negative offsets, zero out the result */ + tcg_gen_movi_tl(RxV, 0); + tcg_gen_br(done); + gen_set_label(label); + /* At this point, possible values of offset are 0 .. 63 */ + tcg_gen_ext_i32_i64(mask, width); + tcg_gen_shl_i64(mask, tcg_constant_i64(1), mask); + tcg_gen_subi_i64(mask, mask, 1); + tcg_gen_extu_i32_i64(result, RxV); + tcg_gen_ext_i32_i64(tmp, offset); + tcg_gen_shl_i64(tmp, mask, tmp); + tcg_gen_andc_i64(result, result, tmp); + tcg_gen_extu_i32_i64(tmp, RsV); + tcg_gen_and_i64(tmp, tmp, mask); + tcg_gen_extu_i32_i64(offset64, offset); + tcg_gen_shl_i64(tmp, tmp, offset64); + tcg_gen_or_i64(result, result, tmp); + tcg_gen_extrl_i64_i32(RxV, result); + gen_set_label(done); +} + +static void gen_asr_r_svw_trun(DisasContext *ctx, TCGv RdV, + TCGv_i64 RssV, TCGv RtV) +{ + /* + * for (int i = 0; i < 2; i++) { + * fSETHALF(i, RdV, fGETHALF(0, ((fSXTN(7, 32, RtV) > 0) ? + * (fCAST4_8s(fGETWORD(i, RssV)) >> fSXTN(7, 32, RtV)) : + * (fCAST4_8s(fGETWORD(i, RssV)) << -fSXTN(7, 32, RtV))))); + * } + */ + TCGv shift_amt32 = tcg_temp_new(); + TCGv_i64 shift_amt64 = tcg_temp_new_i64(); + TCGv_i64 tmp64 = tcg_temp_new_i64(); + TCGv tmp32 = tcg_temp_new(); + TCGLabel *label = gen_new_label(); + TCGLabel *zero = gen_new_label(); + TCGLabel *done = gen_new_label(); + + tcg_gen_sextract_tl(shift_amt32, RtV, 0, 7); + /* Possible values of shift_amt32 are -64 .. 63 */ + tcg_gen_brcondi_tl(TCG_COND_LE, shift_amt32, 0, label); + /* After branch, possible values of shift_amt32 are 1 .. 63 */ + tcg_gen_ext_i32_i64(shift_amt64, shift_amt32); + for (int i = 0; i < 2; i++) { + tcg_gen_sextract_i64(tmp64, RssV, i * 32, 32); + tcg_gen_sar_i64(tmp64, tmp64, shift_amt64); + tcg_gen_extrl_i64_i32(tmp32, tmp64); + tcg_gen_deposit_tl(RdV, RdV, tmp32, i * 16, 16); + } + tcg_gen_br(done); + gen_set_label(label); + tcg_gen_neg_tl(shift_amt32, shift_amt32); + /*At this point, possible values of shift_amt32 are 0 .. 64 */ + tcg_gen_brcondi_tl(TCG_COND_GT, shift_amt32, 63, zero); + /*At this point, possible values of shift_amt32 are 0 .. 63 */ + tcg_gen_ext_i32_i64(shift_amt64, shift_amt32); + for (int i = 0; i < 2; i++) { + tcg_gen_sextract_i64(tmp64, RssV, i * 32, 32); + tcg_gen_shl_i64(tmp64, tmp64, shift_amt64); + tcg_gen_extrl_i64_i32(tmp32, tmp64); + tcg_gen_deposit_tl(RdV, RdV, tmp32, i * 16, 16); + } + tcg_gen_br(done); + gen_set_label(zero); + /* When the shift_amt is 64, zero out the result */ + tcg_gen_movi_tl(RdV, 0); + gen_set_label(done); } static intptr_t vreg_src_off(DisasContext *ctx, int num) @@ -911,69 +1196,35 @@ static intptr_t vreg_src_off(DisasContext *ctx, int num) } static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num, - VRegWriteType type, int slot_num, - bool is_predicated) + VRegWriteType type) { - TCGLabel *label_end = NULL; intptr_t dstoff; - if (is_predicated) { - TCGv cancelled = tcg_temp_local_new(); - label_end = gen_new_label(); - - /* Don't do anything if the slot was cancelled */ - tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); - tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); - tcg_temp_free(cancelled); - } - if (type != EXT_TMP) { dstoff = ctx_future_vreg_off(ctx, num, 1, true); tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector)); - tcg_gen_ori_tl(hex_VRegs_updated, hex_VRegs_updated, 1 << num); } else { dstoff = ctx_tmp_vreg_off(ctx, num, 1, false); tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector)); } - - if (is_predicated) { - gen_set_label(label_end); - } } static void gen_log_vreg_write_pair(DisasContext *ctx, intptr_t srcoff, int num, - VRegWriteType type, int slot_num, - bool is_predicated) + VRegWriteType type) { - gen_log_vreg_write(ctx, srcoff, num ^ 0, type, slot_num, is_predicated); + gen_log_vreg_write(ctx, srcoff, num ^ 0, type); srcoff += sizeof(MMVector); - gen_log_vreg_write(ctx, srcoff, num ^ 1, type, slot_num, is_predicated); + gen_log_vreg_write(ctx, srcoff, num ^ 1, type); } -static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew, - int slot_num, bool is_predicated) +static intptr_t get_result_qreg(DisasContext *ctx, int qnum) { - TCGLabel *label_end = NULL; - intptr_t dstoff; - - if (is_predicated) { - TCGv cancelled = tcg_temp_local_new(); - label_end = gen_new_label(); - - /* Don't do anything if the slot was cancelled */ - tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); - tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); - tcg_temp_free(cancelled); - } - - dstoff = offsetof(CPUHexagonState, future_QRegs[num]); - tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMQReg), sizeof(MMQReg)); - - if (is_predicated) { - tcg_gen_ori_tl(hex_QRegs_updated, hex_QRegs_updated, 1 << num); - gen_set_label(label_end); + if (ctx->need_commit) { + return offsetof(CPUHexagonState, future_QRegs[qnum]); + } else { + return offsetof(CPUHexagonState, QRegs[qnum]); } } @@ -985,11 +1236,10 @@ static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src, tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1)); } for (int i = 0; i < sizeof(MMVector) / 8; i++) { - tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx); + tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_TEUQ); tcg_gen_addi_tl(src, src, 8); tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8); } - tcg_temp_free_i64(tmp); } static void gen_vreg_store(DisasContext *ctx, TCGv EA, intptr_t srcoff, @@ -1061,10 +1311,6 @@ static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff) tcg_gen_st8_i64(mask, cpu_env, dstoff + i); } - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(word); - tcg_temp_free_i64(bits); - tcg_temp_free_i64(mask); } void probe_noshuf_load(TCGv va, int s, int mi) @@ -1078,21 +1324,19 @@ void probe_noshuf_load(TCGv va, int s, int mi) * Note: Since this function might branch, `val` is * required to be a `tcg_temp_local`. */ -void gen_set_usr_field_if(int field, TCGv val) +void gen_set_usr_field_if(DisasContext *ctx, int field, TCGv val) { /* Sets the USR field if `val` is non-zero */ if (reg_field_info[field].width == 1) { + TCGv usr = get_result_gpr(ctx, HEX_REG_USR); TCGv tmp = tcg_temp_new(); tcg_gen_extract_tl(tmp, val, 0, reg_field_info[field].width); tcg_gen_shli_tl(tmp, tmp, reg_field_info[field].offset); - tcg_gen_or_tl(hex_new_value[HEX_REG_USR], - hex_new_value[HEX_REG_USR], - tmp); - tcg_temp_free(tmp); + tcg_gen_or_tl(usr, usr, tmp); } else { TCGLabel *skip_label = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, val, 0, skip_label); - gen_set_usr_field(field, val); + gen_set_usr_field(ctx, field, val); gen_set_label(skip_label); } } @@ -1107,22 +1351,28 @@ void gen_sat_i32(TCGv dest, TCGv source, int width) void gen_sat_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width) { - gen_sat_i32(dest, source, width); - tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, dest); + TCGv tmp = tcg_temp_new(); /* In case dest == source */ + gen_sat_i32(tmp, source, width); + tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, tmp); + tcg_gen_mov_tl(dest, tmp); } void gen_satu_i32(TCGv dest, TCGv source, int width) { + TCGv tmp = tcg_temp_new(); /* In case dest == source */ TCGv max_val = tcg_constant_tl((1 << width) - 1); TCGv zero = tcg_constant_tl(0); - tcg_gen_movcond_tl(TCG_COND_GTU, dest, source, max_val, max_val, source); - tcg_gen_movcond_tl(TCG_COND_LT, dest, source, zero, zero, dest); + tcg_gen_movcond_tl(TCG_COND_GTU, tmp, source, max_val, max_val, source); + tcg_gen_movcond_tl(TCG_COND_LT, tmp, source, zero, zero, tmp); + tcg_gen_mov_tl(dest, tmp); } void gen_satu_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width) { - gen_satu_i32(dest, source, width); - tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, dest); + TCGv tmp = tcg_temp_new(); /* In case dest == source */ + gen_satu_i32(tmp, source, width); + tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, tmp); + tcg_gen_mov_tl(dest, tmp); } void gen_sat_i64(TCGv_i64 dest, TCGv_i64 source, int width) @@ -1135,39 +1385,43 @@ void gen_sat_i64(TCGv_i64 dest, TCGv_i64 source, int width) void gen_sat_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width) { + TCGv_i64 tmp = tcg_temp_new_i64(); /* In case dest == source */ TCGv_i64 ovfl_64; - gen_sat_i64(dest, source, width); + gen_sat_i64(tmp, source, width); ovfl_64 = tcg_temp_new_i64(); - tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, dest, source); + tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, tmp, source); + tcg_gen_mov_i64(dest, tmp); tcg_gen_trunc_i64_tl(ovfl, ovfl_64); - tcg_temp_free_i64(ovfl_64); } void gen_satu_i64(TCGv_i64 dest, TCGv_i64 source, int width) { + TCGv_i64 tmp = tcg_temp_new_i64(); /* In case dest == source */ TCGv_i64 max_val = tcg_constant_i64((1LL << width) - 1LL); TCGv_i64 zero = tcg_constant_i64(0); - tcg_gen_movcond_i64(TCG_COND_GTU, dest, source, max_val, max_val, source); - tcg_gen_movcond_i64(TCG_COND_LT, dest, source, zero, zero, dest); + tcg_gen_movcond_i64(TCG_COND_GTU, tmp, source, max_val, max_val, source); + tcg_gen_movcond_i64(TCG_COND_LT, tmp, source, zero, zero, tmp); + tcg_gen_mov_i64(dest, tmp); } void gen_satu_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width) { + TCGv_i64 tmp = tcg_temp_new_i64(); /* In case dest == source */ TCGv_i64 ovfl_64; - gen_satu_i64(dest, source, width); + gen_satu_i64(tmp, source, width); ovfl_64 = tcg_temp_new_i64(); - tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, dest, source); + tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, tmp, source); + tcg_gen_mov_i64(dest, tmp); tcg_gen_trunc_i64_tl(ovfl, ovfl_64); - tcg_temp_free_i64(ovfl_64); } /* Implements the fADDSAT64 macro in TCG */ -void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) +void gen_add_sat_i64(DisasContext *ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 sum = tcg_temp_local_new_i64(); + TCGv_i64 sum = tcg_temp_new_i64(); TCGv_i64 xor = tcg_temp_new_i64(); TCGv_i64 cond1 = tcg_temp_new_i64(); - TCGv_i64 cond2 = tcg_temp_local_new_i64(); + TCGv_i64 cond2 = tcg_temp_new_i64(); TCGv_i64 cond3 = tcg_temp_new_i64(); TCGv_i64 mask = tcg_constant_i64(0x8000000000000000ULL); TCGv_i64 max_pos = tcg_constant_i64(0x7FFFFFFFFFFFFFFFLL); @@ -1182,15 +1436,12 @@ void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) /* if (xor & mask) */ tcg_gen_and_i64(cond1, xor, mask); - tcg_temp_free_i64(xor); tcg_gen_brcondi_i64(TCG_COND_NE, cond1, 0, no_ovfl_label); - tcg_temp_free_i64(cond1); /* else if ((a ^ sum) & mask) */ tcg_gen_xor_i64(cond2, a, sum); tcg_gen_and_i64(cond2, cond2, mask); tcg_gen_brcondi_i64(TCG_COND_NE, cond2, 0, ovfl_label); - tcg_temp_free_i64(cond2); /* fallthrough to no_ovfl_label branch */ /* if branch */ @@ -1201,11 +1452,8 @@ void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) /* else if branch */ gen_set_label(ovfl_label); tcg_gen_and_i64(cond3, sum, mask); - tcg_temp_free_i64(mask); - tcg_temp_free_i64(sum); tcg_gen_movcond_i64(TCG_COND_NE, ret, cond3, zero, max_pos, max_neg); - tcg_temp_free_i64(cond3); - SET_USR_FIELD(USR_OVF, 1); + gen_set_usr_fieldi(ctx, USR_OVF, 1); gen_set_label(ret_label); } diff --git a/target/hexagon/genptr.h b/target/hexagon/genptr.h index 591b059698..a4b43c2910 100644 --- a/target/hexagon/genptr.h +++ b/target/hexagon/genptr.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -35,11 +35,13 @@ void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot); void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, uint32_t slot); TCGv gen_read_reg(TCGv result, int num); TCGv gen_read_preg(TCGv pred, uint8_t num); -void gen_log_reg_write(int rnum, TCGv val); +TCGv get_result_gpr(DisasContext *ctx, int rnum); +TCGv get_result_pred(DisasContext *ctx, int pnum); +void gen_log_reg_write(DisasContext *ctx, int rnum, TCGv val); void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val); -void gen_set_usr_field(int field, TCGv val); -void gen_set_usr_fieldi(int field, int x); -void gen_set_usr_field_if(int field, TCGv val); +void gen_set_usr_field(DisasContext *ctx, int field, TCGv val); +void gen_set_usr_fieldi(DisasContext *ctx, int field, int x); +void gen_set_usr_field_if(DisasContext *ctx, int field, TCGv val); void gen_sat_i32(TCGv dest, TCGv source, int width); void gen_sat_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width); void gen_satu_i32(TCGv dest, TCGv source, int width); @@ -48,7 +50,7 @@ void gen_sat_i64(TCGv_i64 dest, TCGv_i64 source, int width); void gen_sat_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width); void gen_satu_i64(TCGv_i64 dest, TCGv_i64 source, int width); void gen_satu_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width); -void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b); +void gen_add_sat_i64(DisasContext *ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b); TCGv gen_8bitsof(TCGv result, TCGv value); void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src); TCGv gen_get_byte(TCGv result, int N, TCGv src, bool sign); @@ -58,4 +60,6 @@ void gen_set_half(int N, TCGv result, TCGv src); void gen_set_half_i64(int N, TCGv_i64 result, TCGv src); void probe_noshuf_load(TCGv va, int s, int mi); +extern const target_ulong reg_immut_masks[TOTAL_PER_THREAD_REGS]; + #endif diff --git a/target/hexagon/helper.h b/target/hexagon/helper.h index 368f0b5708..fa0ebaf7c8 100644 --- a/target/hexagon/helper.h +++ b/target/hexagon/helper.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,7 +21,7 @@ DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_RETURN, noreturn, env, i32) DEF_HELPER_1(debug_start_packet, void, env) DEF_HELPER_FLAGS_3(debug_check_store_width, TCG_CALL_NO_WG, void, env, int, int) -DEF_HELPER_FLAGS_3(debug_commit_end, TCG_CALL_NO_WG, void, env, int, int) +DEF_HELPER_FLAGS_5(debug_commit_end, TCG_CALL_NO_WG, void, env, i32, int, int, int) DEF_HELPER_2(commit_store, void, env, int) DEF_HELPER_3(gather_store, void, env, i32, int) DEF_HELPER_1(commit_hvx_stores, void, env) @@ -29,8 +29,10 @@ DEF_HELPER_FLAGS_4(fcircadd, TCG_CALL_NO_RWG_SE, s32, s32, s32, s32, s32) DEF_HELPER_FLAGS_1(fbrev, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_3(sfrecipa, i64, env, f32, f32) DEF_HELPER_2(sfinvsqrta, i64, env, f32) -DEF_HELPER_4(vacsh_val, s64, env, s64, s64, s64) +DEF_HELPER_5(vacsh_val, s64, env, s64, s64, s64, i32) DEF_HELPER_FLAGS_4(vacsh_pred, TCG_CALL_NO_RWG_SE, s32, env, s64, s64, s64) +DEF_HELPER_FLAGS_2(cabacdecbin_val, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(cabacdecbin_pred, TCG_CALL_NO_RWG_SE, s32, s64, s64) /* Floating point */ DEF_HELPER_2(conv_sf2df, f64, env, f32) @@ -107,4 +109,4 @@ DEF_HELPER_2(vwhist128qm, void, env, s32) DEF_HELPER_4(probe_noshuf_load, void, env, i32, int, int) DEF_HELPER_2(probe_pkt_scalar_store_s0, void, env, int) DEF_HELPER_2(probe_hvx_stores, void, env, int) -DEF_HELPER_3(probe_pkt_scalar_hvx_stores, void, env, int, int) +DEF_HELPER_2(probe_pkt_scalar_hvx_stores, void, env, int) diff --git a/target/hexagon/hex_common.py b/target/hexagon/hex_common.py index a29f61bb4f..dce1b852a7 100755 --- a/target/hexagon/hex_common.py +++ b/target/hexagon/hex_common.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ## -## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -21,14 +21,17 @@ import sys import re import string -behdict = {} # tag ->behavior -semdict = {} # tag -> semantics -attribdict = {} # tag -> attributes -macros = {} # macro -> macro information... -attribinfo = {} # Register information and misc -tags = [] # list of all tags -overrides = {} # tags with helper overrides -idef_parser_enabled = {} # tags enabled for idef-parser +behdict = {} # tag ->behavior +semdict = {} # tag -> semantics +attribdict = {} # tag -> attributes +macros = {} # macro -> macro information... +attribinfo = {} # Register information and misc +tags = [] # list of all tags +overrides = {} # tags with helper overrides +idef_parser_enabled = {} # tags enabled for idef-parser + +def bad_register(regtype, regid): + raise Exception(f"Bad register parse: regtype '{regtype}' regid '{regid}'") # We should do this as a hash for performance, # but to keep order let's keep it as a list. @@ -37,103 +40,119 @@ def uniquify(seq): seen_add = seen.add return [x for x in seq if x not in seen and not seen_add(x)] -regre = re.compile( - r"((?" % l) - macro.attribs |= expand_macro_attribs( - macros[submacro], allmac_re) + raise Exception(f"Couldn't find macro: <{l}>") + macro.attribs |= expand_macro_attribs(macros[submacro], allmac_re) finished_macros.add(macro.key) return macro.attribs + # When qemu needs an attribute that isn't in the imported files, # we'll add it here. def add_qemu_macro_attrib(name, attrib): macros[name].attribs.add(attrib) -immextre = re.compile(r'f(MUST_)?IMMEXT[(]([UuSsRr])') + +immextre = re.compile(r"f(MUST_)?IMMEXT[(]([UuSsRr])") + def is_cond_jump(tag): - if tag == 'J2_rte': + if tag == "J2_rte": return False - if ('A_HWLOOP0_END' in attribdict[tag] or - 'A_HWLOOP1_END' in attribdict[tag]): + if "A_HWLOOP0_END" in attribdict[tag] or "A_HWLOOP1_END" in attribdict[tag]: return False - return \ - re.compile(r"(if.*fBRANCH)|(if.*fJUMPR)").search(semdict[tag]) != None + return re.compile(r"(if.*fBRANCH)|(if.*fJUMPR)").search(semdict[tag]) != None + def is_cond_call(tag): return re.compile(r"(if.*fCALL)").search(semdict[tag]) != None + def calculate_attribs(): - add_qemu_macro_attrib('fREAD_PC', 'A_IMPLICIT_READS_PC') - add_qemu_macro_attrib('fTRAP', 'A_IMPLICIT_READS_PC') - add_qemu_macro_attrib('fWRITE_P0', 'A_WRITES_PRED_REG') - add_qemu_macro_attrib('fWRITE_P1', 'A_WRITES_PRED_REG') - add_qemu_macro_attrib('fWRITE_P2', 'A_WRITES_PRED_REG') - add_qemu_macro_attrib('fWRITE_P3', 'A_WRITES_PRED_REG') - add_qemu_macro_attrib('fSET_OVERFLOW', 'A_IMPLICIT_WRITES_USR') - add_qemu_macro_attrib('fSET_LPCFG', 'A_IMPLICIT_WRITES_USR') - add_qemu_macro_attrib('fSTORE', 'A_SCALAR_STORE') + add_qemu_macro_attrib("fREAD_PC", "A_IMPLICIT_READS_PC") + add_qemu_macro_attrib("fTRAP", "A_IMPLICIT_READS_PC") + add_qemu_macro_attrib("fWRITE_P0", "A_WRITES_PRED_REG") + add_qemu_macro_attrib("fWRITE_P1", "A_WRITES_PRED_REG") + add_qemu_macro_attrib("fWRITE_P2", "A_WRITES_PRED_REG") + add_qemu_macro_attrib("fWRITE_P3", "A_WRITES_PRED_REG") + add_qemu_macro_attrib("fSET_OVERFLOW", "A_IMPLICIT_WRITES_USR") + add_qemu_macro_attrib("fSET_LPCFG", "A_IMPLICIT_WRITES_USR") + add_qemu_macro_attrib("fLOAD", "A_SCALAR_LOAD") + add_qemu_macro_attrib("fSTORE", "A_SCALAR_STORE") + add_qemu_macro_attrib('fLSBNEW0', 'A_IMPLICIT_READS_P0') + add_qemu_macro_attrib('fLSBNEW0NOT', 'A_IMPLICIT_READS_P0') + add_qemu_macro_attrib('fREAD_P0', 'A_IMPLICIT_READS_P0') + add_qemu_macro_attrib('fLSBNEW1', 'A_IMPLICIT_READS_P1') + add_qemu_macro_attrib('fLSBNEW1NOT', 'A_IMPLICIT_READS_P1') + add_qemu_macro_attrib('fREAD_P3', 'A_IMPLICIT_READS_P3') # Recurse down macros, find attributes from sub-macros macroValues = list(macros.values()) - allmacros_restr = "|".join(set([ m.re.pattern for m in macroValues ])) + allmacros_restr = "|".join(set([m.re.pattern for m in macroValues])) allmacros_re = re.compile(allmacros_restr) for macro in macroValues: - expand_macro_attribs(macro,allmacros_re) + expand_macro_attribs(macro, allmacros_re) # Append attributes to all instructions for tag in tags: for macname in allmacros_re.findall(semdict[tag]): - if not macname: continue + if not macname: + continue macro = macros[macname] attribdict[tag] |= set(macro.attribs) # Figure out which instructions write predicate registers tagregs = get_tagregs() for tag in tags: regs = tagregs[tag] - for regtype, regid, toss, numregs in regs: + for regtype, regid in regs: if regtype == "P" and is_written(regid): - attribdict[tag].add('A_WRITES_PRED_REG') + attribdict[tag].add("A_WRITES_PRED_REG") # Mark conditional jumps and calls # Not all instructions are properly marked with A_CONDEXEC for tag in tags: if is_cond_jump(tag) or is_cond_call(tag): - attribdict[tag].add('A_CONDEXEC') + attribdict[tag].add("A_CONDEXEC") + def SEMANTICS(tag, beh, sem): - #print tag,beh,sem + # print tag,beh,sem behdict[tag] = beh semdict[tag] = sem attribdict[tag] = set() - tags.append(tag) # dicts have no order, this is for order + tags.append(tag) # dicts have no order, this is for order + def ATTRIBUTES(tag, attribstring): - attribstring = \ - attribstring.replace("ATTRIBS","").replace("(","").replace(")","") + attribstring = attribstring.replace("ATTRIBS", "").replace("(", "").replace(")", "") if not attribstring: return attribs = attribstring.split(",") for attrib in attribs: attribdict[tag].add(attrib.strip()) + class Macro(object): - __slots__ = ['key','name', 'beh', 'attribs', 're'] + __slots__ = ["key", "name", "beh", "attribs", "re"] + def __init__(self, name, beh, attribs): self.key = name self.name = name @@ -141,20 +160,25 @@ class Macro(object): self.attribs = set(attribs) self.re = re.compile("\\b" + name + "\\b") -def MACROATTRIB(macname,beh,attribstring): - attribstring = attribstring.replace("(","").replace(")","") + +def MACROATTRIB(macname, beh, attribstring): + attribstring = attribstring.replace("(", "").replace(")", "") if attribstring: attribs = attribstring.split(",") else: attribs = [] - macros[macname] = Macro(macname,beh,attribs) + macros[macname] = Macro(macname, beh, attribs) -def compute_tag_regs(tag): - return uniquify(regre.findall(behdict[tag])) +def compute_tag_regs(tag, full): + tagregs = regre.findall(behdict[tag]) + if not full: + tagregs = map(lambda reg: reg[:2], tagregs) + return uniquify(tagregs) def compute_tag_immediates(tag): return uniquify(immre.findall(behdict[tag])) + ## ## tagregs is the main data structure we'll use ## tagregs[tag] will contain the registers used by an instruction @@ -176,85 +200,120 @@ def compute_tag_immediates(tag): ## x, y read-write register ## xx, yy read-write register pair ## -def get_tagregs(): - return dict(zip(tags, list(map(compute_tag_regs, tags)))) +def get_tagregs(full=False): + compute_func = lambda tag: compute_tag_regs(tag, full) + return dict(zip(tags, list(map(compute_func, tags)))) def get_tagimms(): return dict(zip(tags, list(map(compute_tag_immediates, tags)))) + def is_pair(regid): return len(regid) == 2 + def is_single(regid): return len(regid) == 1 + def is_written(regid): return regid[0] in "dexy" + def is_writeonly(regid): return regid[0] in "de" + def is_read(regid): return regid[0] in "stuvwxy" + def is_readwrite(regid): return regid[0] in "xy" + def is_scalar_reg(regtype): return regtype in "RPC" + def is_hvx_reg(regtype): return regtype in "VQ" + def is_old_val(regtype, regid, tag): - return regtype+regid+'V' in semdict[tag] + return regtype + regid + "V" in semdict[tag] + def is_new_val(regtype, regid, tag): - return regtype+regid+'N' in semdict[tag] + return regtype + regid + "N" in semdict[tag] + def need_slot(tag): - if (('A_CONDEXEC' in attribdict[tag] and - 'A_JUMP' not in attribdict[tag]) or - 'A_STORE' in attribdict[tag] or - 'A_LOAD' in attribdict[tag]): + if ( + "A_CVI_SCATTER" not in attribdict[tag] + and "A_CVI_GATHER" not in attribdict[tag] + and ("A_STORE" in attribdict[tag] + or "A_LOAD" in attribdict[tag]) + ): return 1 else: return 0 + def need_part1(tag): return re.compile(r"fPART1").search(semdict[tag]) + def need_ea(tag): return re.compile(r"\bEA\b").search(semdict[tag]) + def need_PC(tag): - return 'A_IMPLICIT_READS_PC' in attribdict[tag] + return "A_IMPLICIT_READS_PC" in attribdict[tag] + def helper_needs_next_PC(tag): - return 'A_CALL' in attribdict[tag] + return "A_CALL" in attribdict[tag] + def need_pkt_has_multi_cof(tag): - return 'A_COF' in attribdict[tag] + return "A_COF" in attribdict[tag] + + +def need_pkt_need_commit(tag): + return 'A_IMPLICIT_WRITES_USR' in attribdict[tag] + +def need_condexec_reg(tag, regs): + if "A_CONDEXEC" in attribdict[tag]: + for regtype, regid in regs: + if is_writeonly(regid) and not is_hvx_reg(regtype): + return True + return False + def skip_qemu_helper(tag): return tag in overrides.keys() + def is_tmp_result(tag): - return ('A_CVI_TMP' in attribdict[tag] or - 'A_CVI_TMP_DST' in attribdict[tag]) + return "A_CVI_TMP" in attribdict[tag] or "A_CVI_TMP_DST" in attribdict[tag] + def is_new_result(tag): - return ('A_CVI_NEW' in attribdict[tag]) + return "A_CVI_NEW" in attribdict[tag] + def is_idef_parser_enabled(tag): return tag in idef_parser_enabled + def imm_name(immlett): - return "%siV" % immlett + return f"{immlett}iV" + def read_semantics_file(name): eval_line = "" - for line in open(name, 'rt').readlines(): + for line in open(name, "rt").readlines(): if not line.startswith("#"): eval_line += line if line.endswith("\\\n"): @@ -263,24 +322,29 @@ def read_semantics_file(name): eval(eval_line.strip()) eval_line = "" + def read_attribs_file(name): - attribre = re.compile(r'DEF_ATTRIB\(([A-Za-z0-9_]+), ([^,]*), ' + - r'"([A-Za-z0-9_\.]*)", "([A-Za-z0-9_\.]*)"\)') - for line in open(name, 'rt').readlines(): + attribre = re.compile( + r"DEF_ATTRIB\(([A-Za-z0-9_]+), ([^,]*), " + + r'"([A-Za-z0-9_\.]*)", "([A-Za-z0-9_\.]*)"\)' + ) + for line in open(name, "rt").readlines(): if not attribre.match(line): continue - (attrib_base,descr,rreg,wreg) = attribre.findall(line)[0] - attrib_base = 'A_' + attrib_base - attribinfo[attrib_base] = {'rreg':rreg, 'wreg':wreg, 'descr':descr} + (attrib_base, descr, rreg, wreg) = attribre.findall(line)[0] + attrib_base = "A_" + attrib_base + attribinfo[attrib_base] = {"rreg": rreg, "wreg": wreg, "descr": descr} + def read_overrides_file(name): overridere = re.compile("#define fGEN_TCG_([A-Za-z0-9_]+)\(.*") - for line in open(name, 'rt').readlines(): + for line in open(name, "rt").readlines(): if not overridere.match(line): continue tag = overridere.findall(line)[0] overrides[tag] = True + def read_idef_parser_enabled_file(name): global idef_parser_enabled with open(name, "r") as idef_parser_enabled_file: diff --git a/target/hexagon/iclass.c b/target/hexagon/iclass.c index 6091286993..c3f8523b27 100644 --- a/target/hexagon/iclass.c +++ b/target/hexagon/iclass.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -51,8 +51,10 @@ SlotMask find_iclass_slots(Opcode opcode, int itype) return SLOTS_0; } else if ((opcode == J2_trap0) || (opcode == Y2_isync) || - (opcode == J2_pause) || (opcode == J4_hintjumpr)) { + (opcode == J2_pause)) { return SLOTS_2; + } else if (opcode == J4_hintjumpr) { + return SLOTS_23; } else if (GET_ATTRIB(opcode, A_CRSLOT23)) { return SLOTS_23; } else if (GET_ATTRIB(opcode, A_RESTRICT_PREFERSLOT0)) { diff --git a/target/hexagon/idef-parser/README.rst b/target/hexagon/idef-parser/README.rst index ff6d14150a..debeddfde5 100644 --- a/target/hexagon/idef-parser/README.rst +++ b/target/hexagon/idef-parser/README.rst @@ -31,7 +31,6 @@ idef-parser will compile the above code into the following code: TCGv_i32 tmp_0 = tcg_temp_new_i32(); tcg_gen_add_i32(tmp_0, RsV, RtV); tcg_gen_mov_i32(RdV, tmp_0); - tcg_temp_free_i32(tmp_0); } The output of the compilation process will be a function, containing the @@ -102,12 +101,6 @@ The result of the addition is now stored in the temporary, we move it into the correct destination register. This code may seem inefficient, but QEMU will perform some optimizations on the tinycode, reducing the unnecessary copy. -:: - - tcg_temp_free_i32(tmp_0); - -Finally, we free the temporary we used to hold the addition result. - Parser Input ------------ @@ -294,9 +287,9 @@ generators the previous declarations are mapped to :: - int var1; -> TCGv_i32 var1 = tcg_temp_local_new_i32(); + int var1; -> TCGv_i32 var1 = tcg_temp_new_i32(); - int var2 = 0; -> TCGv_i32 var1 = tcg_temp_local_new_i32(); + int var2 = 0; -> TCGv_i32 var1 = tcg_temp_new_i32(); tcg_gen_movi_i32(j, ((int64_t) 0ULL)); which are later automatically freed at the end of the function they're declared @@ -524,7 +517,6 @@ instruction, TCGv_i32 tmp_0 = tcg_temp_new_i32(); tcg_gen_add_i32(tmp_0, RsV, RsV); tcg_gen_mov_i32(RdV, tmp_0); - tcg_temp_free_i32(tmp_0); } Here the bug, albeit hard to spot, is in ``tcg_gen_add_i32(tmp_0, RsV, RsV);`` diff --git a/target/hexagon/idef-parser/idef-parser.h b/target/hexagon/idef-parser/idef-parser.h index 5c49d4da3e..d23e71f13b 100644 --- a/target/hexagon/idef-parser/idef-parser.h +++ b/target/hexagon/idef-parser/idef-parser.h @@ -82,7 +82,6 @@ enum ImmUnionTag { VALUE, QEMU_TMP, IMM_PC, - IMM_NPC, IMM_CONSTEXT, }; @@ -185,7 +184,6 @@ typedef struct HexValue { unsigned bit_width; /**< Bit width of the rvalue */ HexSignedness signedness; /**< Unsigned flag for the rvalue */ bool is_dotnew; /**< rvalue of predicate type is dotnew? */ - bool is_manual; /**< Opt out of automatic freeing of params */ } HexValue; /** diff --git a/target/hexagon/idef-parser/idef-parser.lex b/target/hexagon/idef-parser/idef-parser.lex index ff87a02c3a..cd5958ec90 100644 --- a/target/hexagon/idef-parser/idef-parser.lex +++ b/target/hexagon/idef-parser/idef-parser.lex @@ -5,7 +5,7 @@ %{ /* - * Copyright(c) 2019-2022 rev.ng Labs Srl. All Rights Reserved. + * Copyright(c) 2019-2023 rev.ng Labs Srl. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -140,8 +140,6 @@ STRING_LIT \"(\\.|[^"\\])*\" yylval->rvalue.is_dotnew = true; yylval->rvalue.signedness = SIGNED; return PRED; } -"IV1DEAD()" | -"fPAUSE(uiV);" { return ';'; } "+=" { return INC; } "-=" { return DEC; } "++" { return PLUSPLUS; } @@ -159,9 +157,8 @@ STRING_LIT \"(\\.|[^"\\])*\" "else" { return ELSE; } "for" { return FOR; } "fREAD_IREG" { return ICIRC; } -"fPART1" { return PART1; } "if" { return IF; } -"fFRAME_SCRAMBLE" { return FSCR; } +"fFRAME_SCRAMBLE" | "fFRAME_UNSCRAMBLE" { return FSCR; } "fFRAMECHECK" { return FCHK; } "Constant_extended" { return CONSTEXT; } @@ -312,14 +309,10 @@ STRING_LIT \"(\\.|[^"\\])*\" "(unsigned int)" { yylval->cast.bit_width = 32; yylval->cast.signedness = UNSIGNED; return CAST; } -"fREAD_PC()" | -"PC" { return PC; } -"fREAD_NPC()" | -"NPC" { return NPC; } -"fGET_LPCFG" | +"fREAD_PC()" { return PC; } "USR.LPCFG" { return LPCFG; } "LOAD_CANCEL(EA)" { return LOAD_CANCEL; } -"STORE_CANCEL(EA)" | +"STORE_CANCEL(EA)" { return STORE_CANCEL; } "CANCEL" { return CANCEL; } "N"{LOWER_ID}"N" { yylval->rvalue.type = REGISTER_ARG; yylval->rvalue.reg.type = DOTNEW; @@ -360,14 +353,6 @@ STRING_LIT \"(\\.|[^"\\])*\" yylval->rvalue.bit_width = 32; yylval->rvalue.signedness = UNSIGNED; return REG; } -"fREAD_LC"[01] { yylval->rvalue.type = REGISTER; - yylval->rvalue.reg.type = CONTROL; - yylval->rvalue.reg.id = HEX_REG_LC0 - + (yytext[8] - '0') * 2; - yylval->rvalue.reg.bit_width = 32; - yylval->rvalue.bit_width = 32; - yylval->rvalue.signedness = UNSIGNED; - return REG; } "LC"[01] { yylval->rvalue.type = REGISTER; yylval->rvalue.reg.type = CONTROL; yylval->rvalue.reg.id = HEX_REG_LC0 @@ -376,14 +361,6 @@ STRING_LIT \"(\\.|[^"\\])*\" yylval->rvalue.bit_width = 32; yylval->rvalue.signedness = UNSIGNED; return REG; } -"fREAD_SA"[01] { yylval->rvalue.type = REGISTER; - yylval->rvalue.reg.type = CONTROL; - yylval->rvalue.reg.id = HEX_REG_SA0 - + (yytext[8] - '0') * 2; - yylval->rvalue.reg.bit_width = 32; - yylval->rvalue.bit_width = 32; - yylval->rvalue.signedness = UNSIGNED; - return REG; } "SA"[01] { yylval->rvalue.type = REGISTER; yylval->rvalue.reg.type = CONTROL; yylval->rvalue.reg.id = HEX_REG_SA0 @@ -424,12 +401,39 @@ STRING_LIT \"(\\.|[^"\\])*\" } return SIGN; } -"0x"{HEX_DIGIT}+ | -{DIGIT}+ { yylval->rvalue.type = IMMEDIATE; - yylval->rvalue.bit_width = 32; - yylval->rvalue.signedness = SIGNED; +"0x"{HEX_DIGIT}+ { uint64_t value = strtoull(yytext, NULL, 0); + yylval->rvalue.type = IMMEDIATE; yylval->rvalue.imm.type = VALUE; - yylval->rvalue.imm.value = strtoull(yytext, NULL, 0); + yylval->rvalue.imm.value = value; + if (value <= INT_MAX) { + yylval->rvalue.bit_width = sizeof(int) * 8; + yylval->rvalue.signedness = SIGNED; + } else if (value <= UINT_MAX) { + yylval->rvalue.bit_width = sizeof(unsigned int) * 8; + yylval->rvalue.signedness = UNSIGNED; + } else if (value <= LONG_MAX) { + yylval->rvalue.bit_width = sizeof(long) * 8; + yylval->rvalue.signedness = SIGNED; + } else if (value <= ULONG_MAX) { + yylval->rvalue.bit_width = sizeof(unsigned long) * 8; + yylval->rvalue.signedness = UNSIGNED; + } else { + g_assert_not_reached(); + } + return IMM; } +{DIGIT}+ { int64_t value = strtoll(yytext, NULL, 0); + yylval->rvalue.type = IMMEDIATE; + yylval->rvalue.imm.type = VALUE; + yylval->rvalue.imm.value = value; + if (value >= INT_MIN && value <= INT_MAX) { + yylval->rvalue.bit_width = sizeof(int) * 8; + yylval->rvalue.signedness = SIGNED; + } else if (value >= LONG_MIN && value <= LONG_MAX) { + yylval->rvalue.bit_width = sizeof(long) * 8; + yylval->rvalue.signedness = SIGNED; + } else { + g_assert_not_reached(); + } return IMM; } "0x"{HEX_DIGIT}+"ULL" | {DIGIT}+"ULL" { yylval->rvalue.type = IMMEDIATE; diff --git a/target/hexagon/idef-parser/idef-parser.y b/target/hexagon/idef-parser/idef-parser.y index c14cb39500..5c983954ed 100644 --- a/target/hexagon/idef-parser/idef-parser.y +++ b/target/hexagon/idef-parser/idef-parser.y @@ -1,6 +1,6 @@ %{ /* - * Copyright(c) 2019-2022 rev.ng Labs Srl. All Rights Reserved. + * Copyright(c) 2019-2023 rev.ng Labs Srl. All Rights Reserved. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -52,8 +52,8 @@ %token IN INAME VAR %token ABS CROUND ROUND CIRCADD COUNTONES INC DEC ANDA ORA XORA PLUSPLUS ASL %token ASR LSR EQ NEQ LTE GTE MIN MAX ANDL FOR ICIRC IF MUN FSCR FCHK SXT -%token ZXT CONSTEXT LOCNT BREV SIGN LOAD STORE PC NPC LPCFG -%token LOAD_CANCEL CANCEL IDENTITY PART1 ROTL INSBITS SETBITS EXTRANGE +%token ZXT CONSTEXT LOCNT BREV SIGN LOAD STORE PC LPCFG +%token LOAD_CANCEL STORE_CANCEL CANCEL IDENTITY ROTL INSBITS SETBITS EXTRANGE %token CAST4_8U FAIL CARRY_FROM_ADD ADDSAT64 LSBNEW %token TYPE_SIZE_T TYPE_INT TYPE_SIGNED TYPE_UNSIGNED TYPE_LONG @@ -269,9 +269,6 @@ statements : statements statement statement : control_statement | var_decl ';' | rvalue ';' - { - gen_rvalue_free(c, &@1, &$1); - } | code_block | ';' ; @@ -339,16 +336,6 @@ assign_statement : lvalue '=' rvalue OUT(c, &@1, &$1, " = ", &$3, ";\n"); $$ = $1; } - | PC '=' rvalue - { - @1.last_column = @3.last_column; - yyassert(c, &@1, !is_inside_ternary(c), - "Assignment side-effect not modeled!"); - $3 = gen_rvalue_truncate(c, &@1, &$3); - $3 = rvalue_materialize(c, &@1, &$3); - OUT(c, &@1, "gen_write_new_pc(", &$3, ");\n"); - gen_rvalue_free(c, &@1, &$3); /* Free temporary value */ - } | LOAD '(' IMM ',' IMM ',' SIGN ',' var ',' lvalue ')' { @1.last_column = @12.last_column; @@ -375,8 +362,7 @@ assign_statement : lvalue '=' rvalue "Assignment side-effect not modeled!"); $3 = gen_rvalue_truncate(c, &@1, &$3); $3 = rvalue_materialize(c, &@1, &$3); - OUT(c, &@1, "SET_USR_FIELD(USR_LPCFG, ", &$3, ");\n"); - gen_rvalue_free(c, &@1, &$3); + OUT(c, &@1, "gen_set_usr_field(ctx, USR_LPCFG, ", &$3, ");\n"); } | DEPOSIT '(' rvalue ',' rvalue ',' rvalue ')' { @@ -417,24 +403,20 @@ control_statement : frame_check | cancel_statement | if_statement | for_statement - | fpart1_statement ; frame_check : FCHK '(' rvalue ',' rvalue ')' ';' - { - gen_rvalue_free(c, &@1, &$3); - gen_rvalue_free(c, &@1, &$5); - } ; cancel_statement : LOAD_CANCEL { gen_load_cancel(c, &@1); } - | CANCEL + | STORE_CANCEL { gen_cancel(c, &@1); } + | CANCEL ; if_statement : if_stmt @@ -471,17 +453,6 @@ for_statement : FOR '(' IMM '=' IMM ';' IMM '<' IMM ';' IMM PLUSPLUS ')' } ; -fpart1_statement : PART1 - { - OUT(c, &@1, "if (insn->part1) {\n"); - } - '(' statements ')' - { - @1.last_column = @3.last_column; - OUT(c, &@1, "return; }\n"); - } - ; - if_stmt : IF '(' rvalue ')' { @1.last_column = @3.last_column; @@ -521,20 +492,6 @@ rvalue : FAIL rvalue.signedness = UNSIGNED; $$ = rvalue; } - | NPC - { - /* - * NPC is only read from CALLs, so we can hardcode it - * at translation time - */ - HexValue rvalue; - memset(&rvalue, 0, sizeof(HexValue)); - rvalue.type = IMMEDIATE; - rvalue.imm.type = IMM_NPC; - rvalue.bit_width = 32; - rvalue.signedness = UNSIGNED; - $$ = rvalue; - } | CONSTEXT { HexValue rvalue; @@ -543,7 +500,6 @@ rvalue : FAIL rvalue.imm.type = IMM_CONSTEXT; rvalue.signedness = UNSIGNED; rvalue.is_dotnew = false; - rvalue.is_manual = false; $$ = rvalue; } | var @@ -638,8 +594,6 @@ rvalue : FAIL | CAST rvalue { @1.last_column = @2.last_column; - /* Assign target signedness */ - $2.signedness = $1.signedness; $$ = gen_cast_op(c, &@1, &$2, $1.bit_width, $1.signedness); } | rvalue EQ rvalue @@ -702,7 +656,6 @@ rvalue : FAIL } | rvalue '?' { - $1.is_manual = true; Ternary t = { 0 }; t.state = IN_LEFT; t.cond = $1; @@ -730,7 +683,7 @@ rvalue : FAIL yyassert(c, &@1, $5.type == IMMEDIATE && $5.imm.type == VALUE, "SXT expects immediate values\n"); - $$ = gen_extend_op(c, &@1, &$3, $5.imm.value, &$7, SIGNED); + $$ = gen_extend_op(c, &@1, &$3, 64, &$7, SIGNED); } | ZXT '(' rvalue ',' IMM ',' rvalue ')' { @@ -738,7 +691,7 @@ rvalue : FAIL yyassert(c, &@1, $5.type == IMMEDIATE && $5.imm.type == VALUE, "ZXT expects immediate values\n"); - $$ = gen_extend_op(c, &@1, &$3, $5.imm.value, &$7, UNSIGNED); + $$ = gen_extend_op(c, &@1, &$3, 64, &$7, UNSIGNED); } | '(' rvalue ')' { @@ -774,7 +727,6 @@ rvalue : FAIL @1.last_column = @6.last_column; $$ = gen_tmp(c, &@1, 32, UNSIGNED); OUT(c, &@1, "gen_read_ireg(", &$$, ", ", &$3, ", ", &$6, ");\n"); - gen_rvalue_free(c, &@1, &$3); } | CIRCADD '(' rvalue ',' rvalue ',' rvalue ')' { @@ -793,11 +745,6 @@ rvalue : FAIL /* Ones count */ $$ = gen_ctpop_op(c, &@1, &$3); } - | LPCFG - { - $$ = gen_tmp_value(c, &@1, "0", 32, UNSIGNED); - OUT(c, &@1, "GET_USR_FIELD(USR_LPCFG, ", &$$, ");\n"); - } | EXTRACT '(' rvalue ',' rvalue ')' { @1.last_column = @6.last_column; diff --git a/target/hexagon/idef-parser/macros.inc b/target/hexagon/idef-parser/macros.inc index 6b697da87a..7478d4db17 100644 --- a/target/hexagon/idef-parser/macros.inc +++ b/target/hexagon/idef-parser/macros.inc @@ -97,16 +97,8 @@ #define fWRITE_LR(A) (LR = A) #define fWRITE_FP(A) (FP = A) #define fWRITE_SP(A) (SP = A) -/* - * Note: There is a rule in the parser that matches `PC = ...` and emits - * a call to `gen_write_new_pc`. We need to call `gen_write_new_pc` to - * get the correct semantics when there are multiple stores in a packet. - */ -#define fBRANCH(LOC, TYPE) (PC = LOC) -#define fJUMPR(REGNO, TARGET, TYPE) (PC = TARGET) #define fWRITE_LOOP_REGS0(START, COUNT) SA0 = START; (LC0 = COUNT) #define fWRITE_LOOP_REGS1(START, COUNT) SA1 = START; (LC1 = COUNT) -#define fWRITE_LC0(VAL) (LC0 = VAL) #define fWRITE_LC1(VAL) (LC1 = VAL) #define fSET_LPCFG(VAL) (USR.LPCFG = VAL) #define fWRITE_P0(VAL) P0 = VAL; @@ -121,7 +113,6 @@ #define fEA_GPI(IMM) (EA = fREAD_GP() + IMM) #define fPM_I(REG, IMM) (REG = REG + IMM) #define fPM_M(REG, MVAL) (REG = REG + MVAL) -#define fWRITE_NPC(VAL) (PC = VAL) /* Unary operators */ #define fROUND(A) (A + 0x8000) diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c index 8110686c51..7b5ebafec2 100644 --- a/target/hexagon/idef-parser/parser-helpers.c +++ b/target/hexagon/idef-parser/parser-helpers.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 rev.ng Labs Srl. All Rights Reserved. + * Copyright(c) 2019-2023 rev.ng Labs Srl. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -167,8 +167,9 @@ void reg_print(Context *c, YYLTYPE *locp, HexReg *reg) EMIT(c, "hex_gpr[%u]", reg->id); } -void imm_print(Context *c, YYLTYPE *locp, HexImm *imm) +void imm_print(Context *c, YYLTYPE *locp, HexValue *rvalue) { + HexImm *imm = &rvalue->imm; switch (imm->type) { case I: EMIT(c, "i"); @@ -177,7 +178,21 @@ void imm_print(Context *c, YYLTYPE *locp, HexImm *imm) EMIT(c, "%ciV", imm->id); break; case VALUE: - EMIT(c, "((int64_t) %" PRIu64 "ULL)", (int64_t) imm->value); + if (rvalue->bit_width == 32) { + if (rvalue->signedness == UNSIGNED) { + EMIT(c, "((uint32_t) 0x%" PRIx32 ")", (uint32_t) imm->value); + } else { + EMIT(c, "((int32_t) 0x%" PRIx32 ")", (int32_t) imm->value); + } + } else if (rvalue->bit_width == 64) { + if (rvalue->signedness == UNSIGNED) { + EMIT(c, "((uint64_t) 0x%" PRIx64 "ULL)", (uint64_t) imm->value); + } else { + EMIT(c, "((int64_t) 0x%" PRIx64 "LL)", (int64_t) imm->value); + } + } else { + g_assert_not_reached(); + } break; case QEMU_TMP: EMIT(c, "qemu_tmp_%" PRIu64, imm->index); @@ -185,9 +200,6 @@ void imm_print(Context *c, YYLTYPE *locp, HexImm *imm) case IMM_PC: EMIT(c, "ctx->base.pc_next"); break; - case IMM_NPC: - EMIT(c, "ctx->npc"); - break; case IMM_CONSTEXT: EMIT(c, "insn->extension_valid"); break; @@ -216,7 +228,7 @@ void rvalue_print(Context *c, YYLTYPE *locp, void *pointer) tmp_print(c, locp, &rvalue->tmp); break; case IMMEDIATE: - imm_print(c, locp, &rvalue->imm); + imm_print(c, locp, rvalue); break; case VARID: var_print(c, locp, &rvalue->var); @@ -278,7 +290,6 @@ static HexValue gen_constant(Context *c, rvalue.bit_width = bit_width; rvalue.signedness = signedness; rvalue.is_dotnew = false; - rvalue.is_manual = true; rvalue.tmp.index = c->inst.tmp_count; OUT(c, locp, "TCGv_i", &bit_width, " tmp_", &c->inst.tmp_count, " = tcg_constant_i", &bit_width, "(", value, ");\n"); @@ -299,7 +310,6 @@ HexValue gen_tmp(Context *c, rvalue.bit_width = bit_width; rvalue.signedness = signedness; rvalue.is_dotnew = false; - rvalue.is_manual = false; rvalue.tmp.index = c->inst.tmp_count; OUT(c, locp, "TCGv_i", &bit_width, " tmp_", &c->inst.tmp_count, " = tcg_temp_new_i", &bit_width, "();\n"); @@ -307,50 +317,9 @@ HexValue gen_tmp(Context *c, return rvalue; } -HexValue gen_tmp_local(Context *c, - YYLTYPE *locp, - unsigned bit_width, - HexSignedness signedness) -{ - HexValue rvalue; - assert(bit_width == 32 || bit_width == 64); - memset(&rvalue, 0, sizeof(HexValue)); - rvalue.type = TEMP; - rvalue.bit_width = bit_width; - rvalue.signedness = signedness; - rvalue.is_dotnew = false; - rvalue.is_manual = false; - rvalue.tmp.index = c->inst.tmp_count; - OUT(c, locp, "TCGv_i", &bit_width, " tmp_", &c->inst.tmp_count, - " = tcg_temp_local_new_i", &bit_width, "();\n"); - c->inst.tmp_count++; - return rvalue; -} - -HexValue gen_tmp_value(Context *c, - YYLTYPE *locp, - const char *value, - unsigned bit_width, - HexSignedness signedness) -{ - HexValue rvalue; - assert(bit_width == 32 || bit_width == 64); - memset(&rvalue, 0, sizeof(HexValue)); - rvalue.type = TEMP; - rvalue.bit_width = bit_width; - rvalue.signedness = signedness; - rvalue.is_dotnew = false; - rvalue.is_manual = false; - rvalue.tmp.index = c->inst.tmp_count; - OUT(c, locp, "TCGv_i", &bit_width, " tmp_", &c->inst.tmp_count, - " = tcg_const_i", &bit_width, "(", value, ");\n"); - c->inst.tmp_count++; - return rvalue; -} - -static HexValue gen_tmp_value_from_imm(Context *c, - YYLTYPE *locp, - HexValue *value) +static HexValue gen_constant_from_imm(Context *c, + YYLTYPE *locp, + HexValue *value) { HexValue rvalue; assert(value->type == IMMEDIATE); @@ -359,14 +328,13 @@ static HexValue gen_tmp_value_from_imm(Context *c, rvalue.bit_width = value->bit_width; rvalue.signedness = value->signedness; rvalue.is_dotnew = false; - rvalue.is_manual = false; rvalue.tmp.index = c->inst.tmp_count; /* - * Here we output the call to `tcg_const_i` in + * Here we output the call to `tcg_constant_i` in * order to create the temporary value. Note, that we * add a cast * - * `tcg_const_i`((int_t) ...)` + * `tcg_constant_i`((int_t) ...)` * * This cast is required to avoid implicit integer * conversion warnings since all immediates are @@ -374,7 +342,7 @@ static HexValue gen_tmp_value_from_imm(Context *c, * integer is 32-bit. */ OUT(c, locp, "TCGv_i", &rvalue.bit_width, " tmp_", &c->inst.tmp_count); - OUT(c, locp, " = tcg_const_i", &rvalue.bit_width, + OUT(c, locp, " = tcg_constant_i", &rvalue.bit_width, "((int", &rvalue.bit_width, "_t) (", value, "));\n"); c->inst.tmp_count++; @@ -395,7 +363,6 @@ HexValue gen_imm_value(Context *c __attribute__((unused)), rvalue.bit_width = bit_width; rvalue.signedness = signedness; rvalue.is_dotnew = false; - rvalue.is_manual = false; rvalue.imm.type = VALUE; rvalue.imm.value = value; return rvalue; @@ -410,7 +377,6 @@ HexValue gen_imm_qemu_tmp(Context *c, YYLTYPE *locp, unsigned bit_width, memset(&rvalue, 0, sizeof(HexValue)); rvalue.type = IMMEDIATE; rvalue.is_dotnew = false; - rvalue.is_manual = false; rvalue.bit_width = bit_width; rvalue.signedness = signedness; rvalue.imm.type = QEMU_TMP; @@ -418,26 +384,10 @@ HexValue gen_imm_qemu_tmp(Context *c, YYLTYPE *locp, unsigned bit_width, return rvalue; } -void gen_rvalue_free(Context *c, YYLTYPE *locp, HexValue *rvalue) -{ - if (rvalue->type == TEMP && !rvalue->is_manual) { - const char *bit_suffix = (rvalue->bit_width == 64) ? "i64" : "i32"; - OUT(c, locp, "tcg_temp_free_", bit_suffix, "(", rvalue, ");\n"); - } -} - -static void gen_rvalue_free_manual(Context *c, YYLTYPE *locp, HexValue *rvalue) -{ - rvalue->is_manual = false; - gen_rvalue_free(c, locp, rvalue); -} - HexValue rvalue_materialize(Context *c, YYLTYPE *locp, HexValue *rvalue) { if (rvalue->type == IMMEDIATE) { - HexValue res = gen_tmp_value_from_imm(c, locp, rvalue); - gen_rvalue_free(c, locp, rvalue); - return res; + return gen_constant_from_imm(c, locp, rvalue); } return *rvalue; } @@ -451,13 +401,10 @@ HexValue gen_rvalue_extend(Context *c, YYLTYPE *locp, HexValue *rvalue) if (rvalue->type == IMMEDIATE) { HexValue res = gen_imm_qemu_tmp(c, locp, 64, rvalue->signedness); - bool is_unsigned = (rvalue->signedness == UNSIGNED); - const char *sign_suffix = is_unsigned ? "u" : ""; gen_c_int_type(c, locp, 64, rvalue->signedness); - OUT(c, locp, " ", &res, " = "); - OUT(c, locp, "(", sign_suffix, "int64_t) "); - OUT(c, locp, "(", sign_suffix, "int32_t) "); - OUT(c, locp, rvalue, ";\n"); + OUT(c, locp, " ", &res, " = ("); + gen_c_int_type(c, locp, 64, rvalue->signedness); + OUT(c, locp, ")", rvalue, ";\n"); return res; } else { HexValue res = gen_tmp(c, locp, 64, rvalue->signedness); @@ -465,7 +412,6 @@ HexValue gen_rvalue_extend(Context *c, YYLTYPE *locp, HexValue *rvalue) const char *sign_suffix = is_unsigned ? "u" : ""; OUT(c, locp, "tcg_gen_ext", sign_suffix, "_i32_i64(", &res, ", ", rvalue, ");\n"); - gen_rvalue_free(c, locp, rvalue); return res; } } @@ -480,7 +426,6 @@ HexValue gen_rvalue_truncate(Context *c, YYLTYPE *locp, HexValue *rvalue) if (rvalue->bit_width == 64) { HexValue res = gen_tmp(c, locp, 32, rvalue->signedness); OUT(c, locp, "tcg_gen_trunc_i64_tl(", &res, ", ", rvalue, ");\n"); - gen_rvalue_free(c, locp, rvalue); return res; } } @@ -554,7 +499,7 @@ void gen_varid_allocate(Context *c, new_var.signedness = signedness; EMIT_HEAD(c, "TCGv_%s %s", bit_suffix, varid->var.name->str); - EMIT_HEAD(c, " = tcg_temp_local_new_%s();\n", bit_suffix); + EMIT_HEAD(c, " = tcg_temp_new_%s();\n", bit_suffix); g_array_append_val(c->inst.allocated, new_var); } @@ -607,11 +552,6 @@ HexValue gen_bin_cmp(Context *c, fprintf(stderr, "Error in evalutating immediateness!"); abort(); } - - /* Free operands */ - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); - return res; } @@ -647,8 +587,6 @@ static void gen_simple_op(Context *c, YYLTYPE *locp, unsigned bit_width, "(", res, ", ", op1, ", ", op2, ");\n"); break; } - gen_rvalue_free(c, locp, op1); - gen_rvalue_free(c, locp, op2); } static void gen_sub_op(Context *c, YYLTYPE *locp, unsigned bit_width, @@ -678,8 +616,6 @@ static void gen_sub_op(Context *c, YYLTYPE *locp, unsigned bit_width, "(", res, ", ", op1, ", ", op2, ");\n"); } break; } - gen_rvalue_free(c, locp, op1); - gen_rvalue_free(c, locp, op2); } static void gen_asl_op(Context *c, YYLTYPE *locp, unsigned bit_width, @@ -731,10 +667,7 @@ static void gen_asl_op(Context *c, YYLTYPE *locp, unsigned bit_width, OUT(c, locp, "tcg_gen_movcond_i", &bit_width); OUT(c, locp, "(TCG_COND_GEU, ", res, ", ", &op2_m, ", ", &edge); OUT(c, locp, ", ", &zero, ", ", res, ");\n"); - gen_rvalue_free(c, locp, &edge); } - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); } static void gen_asr_op(Context *c, YYLTYPE *locp, unsigned bit_width, @@ -789,11 +722,7 @@ static void gen_asr_op(Context *c, YYLTYPE *locp, unsigned bit_width, OUT(c, locp, "tcg_gen_movcond_i", &bit_width); OUT(c, locp, "(TCG_COND_GEU, ", res, ", ", &op2_m, ", ", &edge); OUT(c, locp, ", ", &tmp, ", ", res, ");\n"); - gen_rvalue_free(c, locp, &edge); - gen_rvalue_free(c, locp, &tmp); } - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); } static void gen_lsr_op(Context *c, YYLTYPE *locp, unsigned bit_width, @@ -835,10 +764,7 @@ static void gen_lsr_op(Context *c, YYLTYPE *locp, unsigned bit_width, OUT(c, locp, "tcg_gen_movcond_i", &bit_width); OUT(c, locp, "(TCG_COND_GEU, ", res, ", ", &op2_m, ", ", &edge); OUT(c, locp, ", ", &zero, ", ", res, ");\n"); - gen_rvalue_free(c, locp, &edge); } - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); } /* @@ -867,9 +793,6 @@ static void gen_andl_op(Context *c, YYLTYPE *locp, unsigned bit_width, tmp2 = gen_bin_cmp(c, locp, TCG_COND_NE, op2, &zero); OUT(c, locp, "tcg_gen_and_", bit_suffix, "(", res, ", ", &tmp1, ", ", &tmp2, ");\n"); - gen_rvalue_free_manual(c, locp, &zero); - gen_rvalue_free(c, locp, &tmp1); - gen_rvalue_free(c, locp, &tmp2); break; } } @@ -912,8 +835,6 @@ static void gen_minmax_op(Context *c, YYLTYPE *locp, unsigned bit_width, OUT(c, locp, res, ", ", op1, ", ", &op2_m, ");\n"); break; } - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); } /* Code generation functions */ @@ -1050,34 +971,18 @@ HexValue gen_cast_op(Context *c, unsigned target_width, HexSignedness signedness) { + HexValue res; assert_signedness(c, locp, src->signedness); if (src->bit_width == target_width) { - return *src; - } else if (src->type == IMMEDIATE) { - HexValue res = *src; - res.bit_width = target_width; - res.signedness = signedness; - return res; + res = *src; + } else if (src->bit_width < target_width) { + res = gen_rvalue_extend(c, locp, src); } else { - HexValue res = gen_tmp(c, locp, target_width, signedness); - /* Truncate */ - if (src->bit_width > target_width) { - OUT(c, locp, "tcg_gen_trunc_i64_tl(", &res, ", ", src, ");\n"); - } else { - assert_signedness(c, locp, src->signedness); - if (src->signedness == UNSIGNED) { - /* Extend unsigned */ - OUT(c, locp, "tcg_gen_extu_i32_i64(", - &res, ", ", src, ");\n"); - } else { - /* Extend signed */ - OUT(c, locp, "tcg_gen_ext_i32_i64(", - &res, ", ", src, ");\n"); - } - } - gen_rvalue_free(c, locp, src); - return res; + /* src->bit_width > target_width */ + res = gen_rvalue_truncate(c, locp, src); } + res.signedness = signedness; + return res; } @@ -1135,8 +1040,6 @@ static HexValue gen_extend_imm_width_op(Context *c, if (need_guarding) { OUT(c, locp, "}\n"); } - - gen_rvalue_free(c, locp, value); return res; } else { /* @@ -1161,8 +1064,6 @@ static HexValue gen_extend_imm_width_op(Context *c, ", 0);\n"); OUT(c, locp, "}\n"); } - - gen_rvalue_free(c, locp, value); return res; } } @@ -1193,16 +1094,11 @@ static HexValue gen_extend_tcg_width_op(Context *c, OUT(c, locp, "tcg_gen_subfi_i", &dst_width); OUT(c, locp, "(", &shift, ", ", &dst_width, ", ", &src_width_m, ");\n"); if (signedness == UNSIGNED) { - const char *mask_str = (dst_width == 32) - ? "0xffffffff" - : "0xffffffffffffffff"; - HexValue mask = gen_tmp_value(c, locp, mask_str, - dst_width, UNSIGNED); + HexValue mask = gen_constant(c, locp, "-1", dst_width, UNSIGNED); OUT(c, locp, "tcg_gen_shr_i", &dst_width, "(", - &mask, ", ", &mask, ", ", &shift, ");\n"); + &res, ", ", &mask, ", ", &shift, ");\n"); OUT(c, locp, "tcg_gen_and_i", &dst_width, "(", - &res, ", ", value, ", ", &mask, ");\n"); - gen_rvalue_free(c, locp, &mask); + &res, ", ", &res, ", ", value, ");\n"); } else { OUT(c, locp, "tcg_gen_shl_i", &dst_width, "(", &res, ", ", value, ", ", &shift, ");\n"); @@ -1214,10 +1110,6 @@ static HexValue gen_extend_tcg_width_op(Context *c, OUT(c, locp, &src_width_m, ", ", &zero, ", ", &zero, ", ", &res, ");\n"); - gen_rvalue_free(c, locp, &src_width_m); - gen_rvalue_free(c, locp, value); - gen_rvalue_free(c, locp, &shift); - return res; } @@ -1228,7 +1120,7 @@ HexValue gen_extend_op(Context *c, HexValue *value, HexSignedness signedness) { - unsigned bit_width = (dst_width = 64) ? 64 : 32; + unsigned bit_width = (dst_width == 64) ? 64 : 32; HexValue value_m = *value; HexValue src_width_m = *src_width; @@ -1313,15 +1205,12 @@ void gen_rdeposit_op(Context *c, */ k64 = gen_bin_op(c, locp, SUB_OP, &k64, &width_m); mask = gen_bin_op(c, locp, LSR_OP, &mask, &k64); - begin_m.is_manual = true; mask = gen_bin_op(c, locp, ASL_OP, &mask, &begin_m); - mask.is_manual = true; value_m = gen_bin_op(c, locp, ASL_OP, &value_m, &begin_m); value_m = gen_bin_op(c, locp, ANDB_OP, &value_m, &mask); OUT(c, locp, "tcg_gen_not_i", &dst->bit_width, "(", &mask, ", ", &mask, ");\n"); - mask.is_manual = false; res = gen_bin_op(c, locp, ANDB_OP, dst, &mask); res = gen_bin_op(c, locp, ORB_OP, &res, &value_m); @@ -1336,9 +1225,6 @@ void gen_rdeposit_op(Context *c, dst); OUT(c, locp, ", ", &width_m, ", ", &zero, ", ", &res, ", ", dst, ");\n"); - - gen_rvalue_free(c, locp, width); - gen_rvalue_free(c, locp, &res); } void gen_deposit_op(Context *c, @@ -1372,8 +1258,6 @@ void gen_deposit_op(Context *c, value_m = rvalue_materialize(c, locp, &value_m); OUT(c, locp, "tcg_gen_deposit_i", &bit_width, "(", dst, ", ", dst, ", "); OUT(c, locp, &value_m, ", ", index, " * ", &width, ", ", &width, ");\n"); - gen_rvalue_free(c, locp, index); - gen_rvalue_free(c, locp, &value_m); } HexValue gen_rextract_op(Context *c, @@ -1386,7 +1270,6 @@ HexValue gen_rextract_op(Context *c, HexValue res = gen_tmp(c, locp, bit_width, UNSIGNED); OUT(c, locp, "tcg_gen_extract_i", &bit_width, "(", &res); OUT(c, locp, ", ", src, ", ", &begin, ", ", &width, ");\n"); - gen_rvalue_free(c, locp, src); return res; } @@ -1419,12 +1302,8 @@ HexValue gen_extract_op(Context *c, const char *sign_suffix = (extract->signedness == UNSIGNED) ? "u" : ""; OUT(c, locp, "tcg_gen_ext", sign_suffix, "_i32_i64(", &tmp, ", ", &res, ");\n"); - gen_rvalue_free(c, locp, &res); res = tmp; } - - gen_rvalue_free(c, locp, src); - gen_rvalue_free(c, locp, index); return res; } @@ -1436,14 +1315,8 @@ void gen_write_reg(Context *c, YYLTYPE *locp, HexValue *reg, HexValue *value) value_m = rvalue_materialize(c, locp, &value_m); OUT(c, locp, - "gen_log_reg_write(", ®->reg.id, ", ", + "gen_log_reg_write(ctx, ", ®->reg.id, ", ", &value_m, ");\n"); - OUT(c, - locp, - "ctx_log_reg_write(ctx, ", ®->reg.id, - ");\n"); - gen_rvalue_free(c, locp, reg); - gen_rvalue_free(c, locp, &value_m); } void gen_assign(Context *c, @@ -1478,8 +1351,6 @@ void gen_assign(Context *c, const char *imm_suffix = (value_m.type == IMMEDIATE) ? "i" : ""; OUT(c, locp, "tcg_gen_mov", imm_suffix, "_i", &bit_width, "(", dst, ", ", &value_m, ");\n"); - - gen_rvalue_free(c, locp, &value_m); } HexValue gen_convround(Context *c, @@ -1495,8 +1366,6 @@ HexValue gen_convround(Context *c, HexValue and; HexValue src_p1; - src_m.is_manual = true; - and = gen_bin_op(c, locp, ANDB_OP, &src_m, &mask); src_p1 = gen_bin_op(c, locp, ADD_OP, &src_m, &one); @@ -1504,12 +1373,6 @@ HexValue gen_convround(Context *c, OUT(c, locp, ", ", &and, ", ", &mask, ", "); OUT(c, locp, &src_p1, ", ", &src_m, ");\n"); - /* Free src but use the original `is_manual` value */ - gen_rvalue_free(c, locp, src); - - /* Free the rest of the values */ - gen_rvalue_free(c, locp, &src_p1); - return res; } @@ -1535,9 +1398,6 @@ static HexValue gen_convround_n_b(Context *c, OUT(c, locp, "tcg_gen_add_i64(", &res); OUT(c, locp, ", ", &res, ", ", &tmp_64, ");\n"); - gen_rvalue_free(c, locp, &tmp); - gen_rvalue_free(c, locp, &tmp_64); - return res; } @@ -1560,10 +1420,6 @@ static HexValue gen_convround_n_c(Context *c, OUT(c, locp, "tcg_gen_add_i64(", &res); OUT(c, locp, ", ", &res, ", ", &tmp_64, ");\n"); - gen_rvalue_free(c, locp, &one); - gen_rvalue_free(c, locp, &tmp); - gen_rvalue_free(c, locp, &tmp_64); - return res; } @@ -1634,18 +1490,6 @@ HexValue gen_convround_n(Context *c, OUT(c, locp, "tcg_gen_shr_i64(", &res); OUT(c, locp, ", ", &res, ", ", &n_64, ");\n"); - gen_rvalue_free(c, locp, &src_casted); - gen_rvalue_free(c, locp, &pos_casted); - - gen_rvalue_free(c, locp, &r1); - gen_rvalue_free(c, locp, &r2); - gen_rvalue_free(c, locp, &r3); - - gen_rvalue_free(c, locp, &cond); - gen_rvalue_free(c, locp, &cond_64); - gen_rvalue_free(c, locp, &mask); - gen_rvalue_free(c, locp, &n_64); - res = gen_rvalue_truncate(c, locp, &res); return res; } @@ -1679,10 +1523,6 @@ HexValue gen_round(Context *c, b = gen_extend_op(c, locp, &src_width, 64, pos, UNSIGNED); b = rvalue_materialize(c, locp, &b); - /* Disable auto-free of values used more than once */ - a.is_manual = true; - b.is_manual = true; - n_m1 = gen_bin_op(c, locp, SUB_OP, &b, &one); shifted = gen_bin_op(c, locp, ASL_OP, &one, &n_m1); sum = gen_bin_op(c, locp, ADD_OP, &shifted, &a); @@ -1691,10 +1531,6 @@ HexValue gen_round(Context *c, OUT(c, locp, "(TCG_COND_EQ, ", &res, ", ", &b, ", ", &zero); OUT(c, locp, ", ", &a, ", ", &sum, ");\n"); - gen_rvalue_free_manual(c, locp, &a); - gen_rvalue_free_manual(c, locp, &b); - gen_rvalue_free(c, locp, &sum); - return res; } @@ -1720,9 +1556,6 @@ void gen_circ_op(Context *c, ", ", modifier); OUT(c, locp, ", ", &cs, ");\n"); - gen_rvalue_free(c, locp, &increment_m); - gen_rvalue_free(c, locp, modifier); - gen_rvalue_free(c, locp, &cs); } HexValue gen_locnt_op(Context *c, YYLTYPE *locp, HexValue *src) @@ -1738,7 +1571,6 @@ HexValue gen_locnt_op(Context *c, YYLTYPE *locp, HexValue *src) &res, ", ", &src_m, ");\n"); OUT(c, locp, "tcg_gen_clzi_i", bit_suffix, "(", &res, ", ", &res, ", "); OUT(c, locp, bit_suffix, ");\n"); - gen_rvalue_free(c, locp, &src_m); return res; } @@ -1752,7 +1584,6 @@ HexValue gen_ctpop_op(Context *c, YYLTYPE *locp, HexValue *src) src_m = rvalue_materialize(c, locp, &src_m); OUT(c, locp, "tcg_gen_ctpop_i", bit_suffix, "(", &res, ", ", &src_m, ");\n"); - gen_rvalue_free(c, locp, &src_m); return res; } @@ -1771,8 +1602,6 @@ HexValue gen_rotl(Context *c, YYLTYPE *locp, HexValue *src, HexValue *width) amount = rvalue_materialize(c, locp, &amount); OUT(c, locp, "tcg_gen_rotl_", suffix, "(", &res, ", ", src, ", ", &amount, ");\n"); - gen_rvalue_free(c, locp, src); - gen_rvalue_free(c, locp, &amount); return res; } @@ -1797,10 +1626,6 @@ HexValue gen_carry_from_add(Context *c, OUT(c, locp, "tcg_gen_add2_i64(", &res, ", ", &cf, ", ", &res, ", ", &cf); OUT(c, locp, ", ", &op2_m, ", ", &zero, ");\n"); - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); - gen_rvalue_free(c, locp, &op3_m); - gen_rvalue_free(c, locp, &res); return cf; } @@ -1812,7 +1637,8 @@ void gen_addsat64(Context *c, { HexValue op1_m = rvalue_materialize(c, locp, op1); HexValue op2_m = rvalue_materialize(c, locp, op2); - OUT(c, locp, "gen_add_sat_i64(", dst, ", ", &op1_m, ", ", &op2_m, ");\n"); + OUT(c, locp, "gen_add_sat_i64(ctx, ", dst, ", ", &op1_m, ", ", + &op2_m, ");\n"); } void gen_inst(Context *c, GString *iname) @@ -1840,9 +1666,7 @@ void gen_inst_init_args(Context *c, YYLTYPE *locp) for (unsigned i = 0; i < c->inst.init_list->len; i++) { HexValue *val = &g_array_index(c->inst.init_list, HexValue, i); if (val->type == REGISTER_ARG) { - char reg_id[5]; - reg_compose(c, locp, &val->reg, reg_id); - EMIT_HEAD(c, "tcg_gen_movi_i%u(%s, 0);\n", val->bit_width, reg_id); + /* Nothing to do here */ } else if (val->type == PREDICATE) { char suffix = val->is_dotnew ? 'N' : 'V'; EMIT_HEAD(c, "tcg_gen_movi_i%u(P%c%c, 0);\n", val->bit_width, @@ -1865,7 +1689,6 @@ void gen_inst_code(Context *c, YYLTYPE *locp) c->inst.name->str, c->inst.error_count); } else { - free_variables(c, locp); c->implemented_insn++; fprintf(c->enabled_file, "%s\n", c->inst.name->str); emit_footer(c); @@ -1885,20 +1708,14 @@ void gen_pred_assign(Context *c, YYLTYPE *locp, HexValue *left_pred, "Predicate assign not allowed in ternary!"); /* Extract predicate TCGv */ if (is_direct) { - *left_pred = gen_tmp_value(c, locp, "0", 32, UNSIGNED); + *left_pred = gen_tmp(c, locp, 32, UNSIGNED); } /* Extract first 8 bits, and store new predicate value */ - OUT(c, locp, "tcg_gen_mov_i32(", left_pred, ", ", &r, ");\n"); - OUT(c, locp, "tcg_gen_andi_i32(", left_pred, ", ", left_pred, - ", 0xff);\n"); + OUT(c, locp, "tcg_gen_andi_i32(", left_pred, ", ", &r, ", 0xff);\n"); if (is_direct) { OUT(c, locp, "gen_log_pred_write(ctx, ", pred_id, ", ", left_pred, ");\n"); - OUT(c, locp, "ctx_log_pred_write(ctx, ", pred_id, ");\n"); - gen_rvalue_free(c, locp, left_pred); } - /* Free temporary value */ - gen_rvalue_free(c, locp, &r); } void gen_cancel(Context *c, YYLTYPE *locp) @@ -1908,7 +1725,6 @@ void gen_cancel(Context *c, YYLTYPE *locp) void gen_load_cancel(Context *c, YYLTYPE *locp) { - gen_cancel(c, locp); OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n"); OUT(c, locp, "ctx->s1_store_processed = false;\n"); OUT(c, locp, "process_store(ctx, 1);\n"); @@ -1918,38 +1734,34 @@ void gen_load_cancel(Context *c, YYLTYPE *locp) void gen_load(Context *c, YYLTYPE *locp, HexValue *width, HexSignedness signedness, HexValue *ea, HexValue *dst) { - char size_suffix[4] = {0}; - const char *sign_suffix; + unsigned dst_bit_width; + unsigned src_bit_width; + /* Memop width is specified in the load macro */ assert_signedness(c, locp, signedness); - sign_suffix = (width->imm.value > 4) - ? "" - : ((signedness == UNSIGNED) ? "u" : "s"); + /* If dst is a variable, assert that is declared and load the type info */ if (dst->type == VARID) { find_variable(c, locp, dst, dst); } - snprintf(size_suffix, 4, "%" PRIu64, width->imm.value * 8); + src_bit_width = width->imm.value * 8; + dst_bit_width = MAX(dst->bit_width, 32); + /* Lookup the effective address EA */ find_variable(c, locp, ea, ea); OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n"); OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n"); OUT(c, locp, "process_store(ctx, 1);\n"); OUT(c, locp, "}\n"); - OUT(c, locp, "tcg_gen_qemu_ld", size_suffix, sign_suffix); + + OUT(c, locp, "tcg_gen_qemu_ld_i", &dst_bit_width); OUT(c, locp, "("); - if (dst->bit_width > width->imm.value * 8) { - /* - * Cast to the correct TCG type if necessary, to avoid implict cast - * warnings. This is needed when the width of the destination var is - * larger than the size of the requested load. - */ - OUT(c, locp, "(TCGv) "); + OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx, MO_", &src_bit_width); + if (signedness == SIGNED) { + OUT(c, locp, " | MO_SIGN"); } - OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx);\n"); - /* If the var in EA was truncated it is now a tmp HexValue, so free it. */ - gen_rvalue_free(c, locp, ea); + OUT(c, locp, " | MO_TE);\n"); } void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea, @@ -1963,9 +1775,6 @@ void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea, src_m = rvalue_materialize(c, locp, &src_m); OUT(c, locp, "gen_store", &mem_width, "(cpu_env, ", ea, ", ", &src_m); OUT(c, locp, ", insn->slot);\n"); - gen_rvalue_free(c, locp, &src_m); - /* If the var in ea was truncated it is now a tmp HexValue, so free it. */ - gen_rvalue_free(c, locp, ea); } void gen_sethalf(Context *c, YYLTYPE *locp, HexCast *sh, HexValue *n, @@ -2002,11 +1811,6 @@ void gen_setbits(Context *c, YYLTYPE *locp, HexValue *hi, HexValue *lo, OUT(c, locp, "tcg_gen_deposit_i32(", dst, ", ", dst, ", ", &tmp, ", "); OUT(c, locp, lo, ", ", &len, ");\n"); - - gen_rvalue_free(c, locp, &tmp); - gen_rvalue_free(c, locp, hi); - gen_rvalue_free(c, locp, lo); - gen_rvalue_free(c, locp, value); } unsigned gen_if_cond(Context *c, YYLTYPE *locp, HexValue *cond) @@ -2019,7 +1823,6 @@ unsigned gen_if_cond(Context *c, YYLTYPE *locp, HexValue *cond) bit_suffix = (cond->bit_width == 64) ? "i64" : "i32"; OUT(c, locp, "tcg_gen_brcondi_", bit_suffix, "(TCG_COND_EQ, ", cond, ", 0, if_label_", &c->inst.if_count, ");\n"); - gen_rvalue_free(c, locp, cond); return c->inst.if_count++; } @@ -2045,10 +1848,10 @@ HexValue gen_rvalue_pred(Context *c, YYLTYPE *locp, HexValue *pred) bool is_dotnew = pred->is_dotnew; char predicate_id[2] = { pred->pred.id, '\0' }; char *pred_str = (char *) &predicate_id; - *pred = gen_tmp_value(c, locp, "0", 32, UNSIGNED); + *pred = gen_tmp(c, locp, 32, UNSIGNED); if (is_dotnew) { OUT(c, locp, "tcg_gen_mov_i32(", pred, - ", hex_new_pred_value["); + ", ctx->new_pred_value["); OUT(c, locp, pred_str, "]);\n"); } else { OUT(c, locp, "gen_read_preg(", pred, ", ", pred_str, ");\n"); @@ -2110,7 +1913,6 @@ static inline HexValue gen_rvalue_simple_unary(Context *c, YYLTYPE *locp, res = gen_tmp(c, locp, bit_width, value->signedness); OUT(c, locp, tcg_code, "_i", &bit_width, "(", &res, ", ", value, ");\n"); - gen_rvalue_free(c, locp, value); } return res; } @@ -2136,7 +1938,6 @@ HexValue gen_rvalue_notl(Context *c, YYLTYPE *locp, HexValue *value) OUT(c, locp, "tcg_gen_movcond_i", &bit_width); OUT(c, locp, "(TCG_COND_EQ, ", &res, ", ", value, ", ", &zero); OUT(c, locp, ", ", &one, ", ", &zero, ");\n"); - gen_rvalue_free(c, locp, value); } return res; } @@ -2161,13 +1962,12 @@ HexValue gen_rvalue_sat(Context *c, YYLTYPE *locp, HexSat *sat, assert_signedness(c, locp, sat->signedness); unsigned_str = (sat->signedness == UNSIGNED) ? "u" : ""; - res = gen_tmp_local(c, locp, value->bit_width, sat->signedness); - ovfl = gen_tmp_local(c, locp, 32, sat->signedness); + res = gen_tmp(c, locp, value->bit_width, sat->signedness); + ovfl = gen_tmp(c, locp, 32, sat->signedness); OUT(c, locp, "gen_sat", unsigned_str, "_", bit_suffix, "_ovfl("); OUT(c, locp, &ovfl, ", ", &res, ", ", value, ", ", &width->imm.value, ");\n"); - OUT(c, locp, "gen_set_usr_field_if(USR_OVF,", &ovfl, ");\n"); - gen_rvalue_free(c, locp, value); + OUT(c, locp, "gen_set_usr_field_if(ctx, USR_OVF,", &ovfl, ");\n"); return res; } @@ -2182,9 +1982,6 @@ HexValue gen_rvalue_fscr(Context *c, YYLTYPE *locp, HexValue *value) OUT(c, locp, "tcg_gen_concat_i32_i64(", &key, ", ", &frame_key, ", ", &frame_key, ");\n"); OUT(c, locp, "tcg_gen_xor_i64(", &res, ", ", value, ", ", &key, ");\n"); - gen_rvalue_free(c, locp, &key); - gen_rvalue_free(c, locp, &frame_key); - gen_rvalue_free(c, locp, value); return res; } @@ -2206,7 +2003,6 @@ HexValue gen_rvalue_brev(Context *c, YYLTYPE *locp, HexValue *value) res = gen_tmp(c, locp, value->bit_width, value->signedness); *value = rvalue_materialize(c, locp, value); OUT(c, locp, "gen_helper_fbrev(", &res, ", ", value, ");\n"); - gen_rvalue_free(c, locp, value); return res; } @@ -2218,7 +2014,6 @@ HexValue gen_rvalue_ternary(Context *c, YYLTYPE *locp, HexValue *cond, unsigned bit_width = (is_64bit) ? 64 : 32; HexValue zero = gen_constant(c, locp, "0", bit_width, UNSIGNED); HexValue res = gen_tmp(c, locp, bit_width, UNSIGNED); - Ternary *ternary = NULL; if (is_64bit) { *cond = gen_rvalue_extend(c, locp, cond); @@ -2236,13 +2031,8 @@ HexValue gen_rvalue_ternary(Context *c, YYLTYPE *locp, HexValue *cond, OUT(c, locp, ", ", true_branch, ", ", false_branch, ");\n"); assert(c->ternary->len > 0); - ternary = &g_array_index(c->ternary, Ternary, c->ternary->len - 1); - gen_rvalue_free_manual(c, locp, &ternary->cond); g_array_remove_index(c->ternary, c->ternary->len - 1); - gen_rvalue_free(c, locp, cond); - gen_rvalue_free(c, locp, true_branch); - gen_rvalue_free(c, locp, false_branch); return res; } @@ -2321,15 +2111,6 @@ void track_string(Context *c, GString *s) g_array_append_val(c->inst.strings, s); } -void free_variables(Context *c, YYLTYPE *locp) -{ - for (unsigned i = 0; i < c->inst.allocated->len; ++i) { - Var *var = &g_array_index(c->inst.allocated, Var, i); - const char *suffix = var->bit_width == 64 ? "i64" : "i32"; - OUT(c, locp, "tcg_temp_free_", suffix, "(", var->name->str, ");\n"); - } -} - void free_instruction(Context *c) { assert(!is_inside_ternary(c)); diff --git a/target/hexagon/idef-parser/parser-helpers.h b/target/hexagon/idef-parser/parser-helpers.h index 2766296417..7c58087169 100644 --- a/target/hexagon/idef-parser/parser-helpers.h +++ b/target/hexagon/idef-parser/parser-helpers.h @@ -80,7 +80,7 @@ void reg_compose(Context *c, YYLTYPE *locp, HexReg *reg, char reg_id[5]); void reg_print(Context *c, YYLTYPE *locp, HexReg *reg); -void imm_print(Context *c, YYLTYPE *locp, HexImm *imm); +void imm_print(Context *c, YYLTYPE *locp, HexValue *rvalue); void var_print(Context *c, YYLTYPE *locp, HexVar *var); @@ -154,12 +154,6 @@ HexValue gen_tmp(Context *c, unsigned bit_width, HexSignedness signedness); -HexValue gen_tmp_value(Context *c, - YYLTYPE *locp, - const char *value, - unsigned bit_width, - HexSignedness signedness); - HexValue gen_imm_value(Context *c __attribute__((unused)), YYLTYPE *locp, int value, @@ -169,8 +163,6 @@ HexValue gen_imm_value(Context *c __attribute__((unused)), HexValue gen_imm_qemu_tmp(Context *c, YYLTYPE *locp, unsigned bit_width, HexSignedness signedness); -void gen_rvalue_free(Context *c, YYLTYPE *locp, HexValue *rvalue); - HexValue rvalue_materialize(Context *c, YYLTYPE *locp, HexValue *rvalue); HexValue gen_rvalue_extend(Context *c, YYLTYPE *locp, HexValue *rvalue); @@ -365,8 +357,6 @@ void emit_footer(Context *c); void track_string(Context *c, GString *s); -void free_variables(Context *c, YYLTYPE *locp); - void free_instruction(Context *c); void assert_signedness(Context *c, diff --git a/target/hexagon/imported/branch.idef b/target/hexagon/imported/branch.idef index 88f5f48cce..93e2e375a5 100644 --- a/target/hexagon/imported/branch.idef +++ b/target/hexagon/imported/branch.idef @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -34,6 +34,9 @@ Q6INSN(J2_jump,"jump #r22:2",ATTRIBS(A_JDIR), "direct unconditional jump", Q6INSN(J2_jumpr,"jumpr Rs32",ATTRIBS(A_JINDIR), "indirect unconditional jump", {fJUMPR(RsN,RsV,COF_TYPE_JUMPR);}) +Q6INSN(J2_jumprh,"jumprh Rs32",ATTRIBS(A_JINDIR, A_HINTED_COF), "indirect unconditional jump", +{fJUMPR(RsN,RsV,COF_TYPE_JUMPR);}) + #define OLDCOND_JUMP(TAG,OPER,OPER2,ATTRIB,DESCR,SEMANTICS) \ Q6INSN(TAG##t,"if (Pu4) "OPER":nt "OPER2,ATTRIB,DESCR,{fBRANCH_SPECULATE_STALL(fLSBOLD(PuV),,SPECULATE_NOT_TAKEN,12,0); if (fLSBOLD(PuV)) { SEMANTICS; }}) \ Q6INSN(TAG##f,"if (!Pu4) "OPER":nt "OPER2,ATTRIB,DESCR,{fBRANCH_SPECULATE_STALL(fLSBOLDNOT(PuV),,SPECULATE_NOT_TAKEN,12,0); if (fLSBOLDNOT(PuV)) { SEMANTICS; }}) \ @@ -196,6 +199,8 @@ Q6INSN(J2_callrt,"if (Pu4) callr Rs32",ATTRIBS(CINDIR_STD),"indirect conditional Q6INSN(J2_callrf,"if (!Pu4) callr Rs32",ATTRIBS(CINDIR_STD),"indirect conditional call if false", {fBRANCH_SPECULATE_STALL(fLSBOLDNOT(PuV),,SPECULATE_NOT_TAKEN,12,0);if (fLSBOLDNOT(PuV)) { fCALLR(RsV); }}) +Q6INSN(J2_callrh,"callrh Rs32",ATTRIBS(CINDIR_STD, A_HINTED_COF), "hinted indirect unconditional call", +{ fCALLR(RsV); }) diff --git a/target/hexagon/imported/encode_pp.def b/target/hexagon/imported/encode_pp.def index d71c04cd30..0cd30a5e85 100644 --- a/target/hexagon/imported/encode_pp.def +++ b/target/hexagon/imported/encode_pp.def @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -382,14 +382,23 @@ DEF_ENC32(L4_return_fnew_pt, ICLASS_LD" 011 0 000 sssss PP1110vv ---ddddd") DEF_ENC32(L4_return_tnew_pnt, ICLASS_LD" 011 0 000 sssss PP0010vv ---ddddd") DEF_ENC32(L4_return_fnew_pnt, ICLASS_LD" 011 0 000 sssss PP1010vv ---ddddd") -DEF_ENC32(L2_loadw_locked,ICLASS_LD" 001 0 000 sssss PP00---- -00ddddd") +DEF_ENC32(L2_loadw_locked,ICLASS_LD" 001 0 000 sssss PP000--- 000ddddd") +DEF_ENC32(L2_loadw_aq, ICLASS_LD" 001 0 000 sssss PP001--- 000ddddd") +DEF_ENC32(L4_loadd_aq, ICLASS_LD" 001 0 000 sssss PP011--- 000ddddd") +DEF_ENC32(R6_release_at_vi, ICLASS_ST" 000 01 11sssss PP0ttttt --0011dd") +DEF_ENC32(R6_release_st_vi, ICLASS_ST" 000 01 11sssss PP0ttttt --1011dd") +DEF_ENC32(S2_storew_rl_at_vi, ICLASS_ST" 000 01 01sssss PP-ttttt --0010dd") +DEF_ENC32(S2_storew_rl_st_vi, ICLASS_ST" 000 01 01sssss PP-ttttt --1010dd") -DEF_ENC32(L4_loadd_locked,ICLASS_LD" 001 0 000 sssss PP01---- -00ddddd") +DEF_ENC32(S4_stored_rl_at_vi, ICLASS_ST" 000 01 11sssss PP0ttttt --0010dd") +DEF_ENC32(S4_stored_rl_st_vi, ICLASS_ST" 000 01 11sssss PP0ttttt --1010dd") + +DEF_ENC32(L4_loadd_locked,ICLASS_LD" 001 0 000 sssss PP010--- 000ddddd") DEF_EXT_SPACE(EXTRACTW, ICLASS_LD" 001 0 000 iiiii PP0iiiii -01iiiii") DEF_ENC32(Y2_dcfetchbo, ICLASS_LD" 010 0 000 sssss PP0--iii iiiiiiii") @@ -479,8 +488,8 @@ STD_PST_ENC(rinew, "1 101","10ttt") /* x bus/cache */ /* x store/cache */ DEF_ENC32(S2_allocframe, ICLASS_ST" 000 01 00xxxxx PP000iii iiiiiiii") -DEF_ENC32(S2_storew_locked,ICLASS_ST" 000 01 01sssss PP-ttttt ------dd") -DEF_ENC32(S4_stored_locked,ICLASS_ST" 000 01 11sssss PP0ttttt ------dd") +DEF_ENC32(S2_storew_locked,ICLASS_ST" 000 01 01sssss PP-ttttt ----00dd") +DEF_ENC32(S4_stored_locked,ICLASS_ST" 000 01 11sssss PP0ttttt ----00dd") DEF_ENC32(Y2_dczeroa, ICLASS_ST" 000 01 10sssss PP0----- --------") @@ -515,6 +524,7 @@ DEF_FIELD32(ICLASS_J" 110- -------- PP-!---- --------",J_PT,"Predict-taken") DEF_FIELDROW_DESC32(ICLASS_J" 0000 -------- PP------ --------","[#0] PC=(Rs), R31=return") DEF_ENC32(J2_callr, ICLASS_J" 0000 101sssss PP------ --------") +DEF_ENC32(J2_callrh, ICLASS_J" 0000 110sssss PP------ --------") DEF_FIELDROW_DESC32(ICLASS_J" 0001 -------- PP------ --------","[#1] if (Pu) PC=(Rs), R31=return") DEF_ENC32(J2_callrt, ICLASS_J" 0001 000sssss PP----uu --------") @@ -522,6 +532,7 @@ DEF_ENC32(J2_callrf, ICLASS_J" 0001 001sssss PP----uu --------") DEF_FIELDROW_DESC32(ICLASS_J" 0010 -------- PP------ --------","[#2] PC=(Rs); ") DEF_ENC32(J2_jumpr, ICLASS_J" 0010 100sssss PP------ --------") +DEF_ENC32(J2_jumprh, ICLASS_J" 0010 110sssss PP------ --------") DEF_ENC32(J4_hintjumpr, ICLASS_J" 0010 101sssss PP------ --------") DEF_FIELDROW_DESC32(ICLASS_J" 0011 -------- PP------ --------","[#3] if (Pu) PC=(Rs) ") diff --git a/target/hexagon/imported/ldst.idef b/target/hexagon/imported/ldst.idef index 237634bdd9..53198176a9 100644 --- a/target/hexagon/imported/ldst.idef +++ b/target/hexagon/imported/ldst.idef @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -128,6 +128,24 @@ Q6INSN(S2_allocframe,"allocframe(Rx32,#u11:3):raw", ATTRIBS(A_REGWRSIZE_8B,A_MEM #define A_RETURN A_RESTRICT_COF_MAX1,A_RESTRICT_SLOT0ONLY,A_RESTRICT_NOSLOT1_STORE,A_RET_TYPE,A_DEALLOCRET +/**** Load Acquire Store Release Instructions****/ + + + +Q6INSN(L2_loadw_aq,"Rd32=memw_aq(Rs32)",ATTRIBS(A_REGWRSIZE_4B,A_ACQUIRE,A_RESTRICT_SLOT0ONLY,A_MEMSIZE_4B,A_LOAD),"Load Acquire Word", +{ fEA_REG(RsV); fLOAD(1,4,u,EA,RdV); }) +Q6INSN(L4_loadd_aq,"Rdd32=memd_aq(Rs32)",ATTRIBS(A_REGWRSIZE_8B,A_ACQUIRE,A_RESTRICT_SLOT0ONLY,A_MEMSIZE_8B,A_LOAD),"Load Acquire Double integer", +{ fEA_REG(RsV); fLOAD(1,8,u,EA,RddV); }) + +Q6INSN(R6_release_at_vi,"release(Rs32):at",ATTRIBS(A_MEMSIZE_0B,A_RELEASE,A_STORE,A_VTCM_ALLBANK_ACCESS,A_RLS_INNER,A_RLS_ALL_THREAD,A_RESTRICT_NOPACKET,A_RESTRICT_SLOT0ONLY), "Release lock", {fEA_REG(RsV); fSTORE(1,0,EA,RsV); }) +Q6INSN(R6_release_st_vi,"release(Rs32):st",ATTRIBS(A_MEMSIZE_0B,A_RELEASE,A_STORE,A_VTCM_ALLBANK_ACCESS,A_RLS_INNER,A_RLS_SAME_THREAD,A_RESTRICT_NOPACKET,A_RESTRICT_SLOT0ONLY), "Release lock", {fEA_REG(RsV); fSTORE(1,0,EA,RsV); }) + +Q6INSN(S2_storew_rl_at_vi,"memw_rl(Rs32):at=Rt32",ATTRIBS(A_REGWRSIZE_4B,A_RELEASE,A_VTCM_ALLBANK_ACCESS,A_RLS_INNER,A_RLS_ALL_THREAD,A_RESTRICT_NOPACKET,A_MEMSIZE_4B,A_STORE,A_RESTRICT_SLOT0ONLY),"Store Release Word", { fEA_REG(RsV); fSTORE(1,4,EA,RtV); }) +Q6INSN(S4_stored_rl_at_vi,"memd_rl(Rs32):at=Rtt32",ATTRIBS(A_REGWRSIZE_8B,A_RELEASE,A_VTCM_ALLBANK_ACCESS,A_RLS_INNER,A_RLS_ALL_THREAD,A_RESTRICT_NOPACKET,A_MEMSIZE_8B,A_STORE,A_RESTRICT_SLOT0ONLY),"Store Release Double integer", { fEA_REG(RsV); fSTORE(1,8,EA,RttV); }) + +Q6INSN(S2_storew_rl_st_vi,"memw_rl(Rs32):st=Rt32",ATTRIBS(A_REGWRSIZE_4B,A_RELEASE,A_VTCM_ALLBANK_ACCESS,A_RLS_INNER,A_RLS_SAME_THREAD,A_RESTRICT_NOPACKET,A_MEMSIZE_4B,A_STORE,A_RESTRICT_SLOT0ONLY),"Store Release Word", { fEA_REG(RsV); fSTORE(1,4,EA,RtV); }) +Q6INSN(S4_stored_rl_st_vi,"memd_rl(Rs32):st=Rtt32",ATTRIBS(A_REGWRSIZE_8B,A_RELEASE,A_VTCM_ALLBANK_ACCESS,A_RLS_INNER,A_RLS_SAME_THREAD,A_RESTRICT_NOPACKET,A_MEMSIZE_8B,A_STORE,A_RESTRICT_SLOT0ONLY),"Store Release Double integer", { fEA_REG(RsV); fSTORE(1,8,EA,RttV); }) + Q6INSN(L2_deallocframe,"Rdd32=deallocframe(Rs32):raw", ATTRIBS(A_REGWRSIZE_8B,A_MEMSIZE_8B,A_LOAD,A_DEALLOCFRAME), "Deallocate stack frame", { fHIDE(size8u_t tmp;) fEA_REG(RsV); fLOAD(1,8,u,EA,tmp); diff --git a/target/hexagon/imported/mmvec/encode_ext.def b/target/hexagon/imported/mmvec/encode_ext.def index 6fbbe2c422..402438f566 100644 --- a/target/hexagon/imported/mmvec/encode_ext.def +++ b/target/hexagon/imported/mmvec/encode_ext.def @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -257,6 +257,11 @@ DEF_ENC(V6_vasruhubrndsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 111 ddd DEF_ENC(V6_vasruwuhsat, ICLASS_CJ" 1 000 vvv vvttt PP 1 uuuuu 100 ddddd") // DEF_ENC(V6_vasruhubsat, ICLASS_CJ" 1 000 vvv vvttt PP 1 uuuuu 101 ddddd") // +DEF_ENC(V6_vasrvuhubrndsat,"00011101000vvvvvPP0uuuuu011ddddd") +DEF_ENC(V6_vasrvuhubsat,"00011101000vvvvvPP0uuuuu010ddddd") +DEF_ENC(V6_vasrvwuhrndsat,"00011101000vvvvvPP0uuuuu001ddddd") +DEF_ENC(V6_vasrvwuhsat,"00011101000vvvvvPP0uuuuu000ddddd") + /*************************************************************** * * Group #1, Uses Q6 Rt32 @@ -716,6 +721,7 @@ DEF_ENC(V6_vaddclbw, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 001 ddddd") // DEF_ENC(V6_vavguw, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 010 ddddd") // DEF_ENC(V6_vavguwrnd, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 011 ddddd") // +DEF_ENC(V6_vassign_tmp,"00011110--0---01PP0uuuuu110ddddd") DEF_ENC(V6_vavgb, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 100 ddddd") // DEF_ENC(V6_vavgbrnd, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 101 ddddd") // DEF_ENC(V6_vnavgb, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 110 ddddd") // @@ -730,6 +736,8 @@ DEF_ENC(V6_vmaxb, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 101 ddddd") // DEF_ENC(V6_vsatuwuh, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 110 ddddd") // DEF_ENC(V6_vdealb4w, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 111 ddddd") // +DEF_ENC(V6_v6mpyvubs10_vxx, ICLASS_CJ" 1 111 001 vvvvv PP 1 uuuuu 0ii xxxxx") +DEF_ENC(V6_v6mpyhubs10_vxx, ICLASS_CJ" 1 111 001 vvvvv PP 1 uuuuu 1ii xxxxx") DEF_ENC(V6_vmpyowh_rnd, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 000 ddddd") // DEF_ENC(V6_vshuffeb, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 001 ddddd") // @@ -739,6 +747,11 @@ DEF_ENC(V6_vshufoh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 100 ddddd") // DEF_ENC(V6_vshufoeh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 101 ddddd") // DEF_ENC(V6_vshufoeb, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 110 ddddd") // DEF_ENC(V6_vcombine, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 111 ddddd") // +DEF_ENC(V6_vcombine_tmp,"00011110101vvvvvPP0uuuuu111ddddd") + +DEF_ENC(V6_v6mpyvubs10, ICLASS_CJ" 1 111 010 vvvvv PP 1 uuuuu 0ii ddddd") +DEF_ENC(V6_v6mpyhubs10, ICLASS_CJ" 1 111 010 vvvvv PP 1 uuuuu 1ii ddddd") + DEF_ENC(V6_vmpyieoh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 000 ddddd") // DEF_ENC(V6_vadduwsat, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 001 ddddd") // @@ -789,6 +802,7 @@ DEF_ENC(V6_vrounduhub, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 011 ddddd") // DEF_ENC(V6_vrounduwuh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 100 ddddd") // DEF_ENC(V6_vmpyewuh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 101 ddddd") DEF_ENC(V6_vmpyowh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 111 ddddd") +DEF_ENC(V6_vmpyuhvs,"00011111110vvvvvPP1uuuuu111ddddd") #endif /* NO MMVEC */ diff --git a/target/hexagon/imported/mmvec/ext.idef b/target/hexagon/imported/mmvec/ext.idef index 8ca5a606e1..ead32c243b 100644 --- a/target/hexagon/imported/mmvec/ext.idef +++ b/target/hexagon/imported/mmvec/ext.idef @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -62,6 +62,9 @@ EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \ DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) +#define ITERATOR_INSN_SHIFT3_SLOT(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS,A_CVI_VS_3SRC,A_NOTE_SHIFT_RESOURCE,A_NOTE_NOVP,A_NOTE_VA_UNARY), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) #define ITERATOR_INSN_SHIFT_SLOT_VV_LATE(WIDTH,TAG,SYNTAX,DESCR,CODE) \ EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \ @@ -116,6 +119,10 @@ ITERATOR_INSN_MPY_SLOT_LATE(WIDTH,TAG, SYNTAX2,DESCR,CODE) EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV), \ DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) +#define ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC_VX_FWD(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + #define ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX2,DESCR,CODE) @@ -976,6 +983,22 @@ NARROWING_SHIFT(16,vasrhubrndsat,fSETBYTE,ub,h,:rnd:sat,fVSATUB,fVROUND,0x7) NARROWING_SHIFT(16,vasrhbsat,fSETBYTE,b,h,:sat,fVSATB,fVNOROUND,0x7) NARROWING_SHIFT(16,vasrhbrndsat,fSETBYTE,b,h,:rnd:sat,fVSATB,fVROUND,0x7) +#define NARROWING_VECTOR_SHIFT(ITERSIZE,TAG,DSTM,DSTTYPE,SRCTYPE,SRCTYPE2,SYNOPTS,SATFUNC,RNDFUNC,SHAMTMASK) \ +ITERATOR_INSN_SHIFT3_SLOT(ITERSIZE,TAG, \ +"Vd32." #DSTTYPE "=vasr(Vuu32." #SRCTYPE ",Vv32." #SRCTYPE2 ")" #SYNOPTS, \ +"Vector shift by vector right and shuffle", \ + fHIDE(int )shamt = VvV.SRCTYPE2[2*i+0] & SHAMTMASK; \ + DSTM(0,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VuuV.v[0].SRCTYPE[i],shamt) >> shamt)); \ + shamt = VvV.SRCTYPE2[2*i+1] & SHAMTMASK; \ + DSTM(1,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VuuV.v[1].SRCTYPE[i],shamt) >> shamt))) + +/* WORD TO HALF*/ +NARROWING_VECTOR_SHIFT(32,vasrvwuhsat,fSETHALF,uh,w,uh,:sat,fVSATUH,fVNOROUND,0xF) +NARROWING_VECTOR_SHIFT(32,vasrvwuhrndsat,fSETHALF,uh,w,uh,:rnd:sat,fVSATUH,fVROUND,0xF) +/* HALF TO BYTE*/ +NARROWING_VECTOR_SHIFT(16,vasrvuhubsat,fSETBYTE,ub,uh,ub,:sat,fVSATUB,fVNOROUND,0x7) +NARROWING_VECTOR_SHIFT(16,vasrvuhubrndsat,fSETBYTE,ub,uh,ub,:rnd:sat,fVSATUB,fVROUND,0x7) + NARROWING_SHIFT_NOV1(16,vasruhubsat,fSETBYTE,ub,uh,:sat,fVSATUB,fVNOROUND,0x7) NARROWING_SHIFT_NOV1(16,vasruhubrndsat,fSETBYTE,ub,uh,:rnd:sat,fVSATUB,fVROUND,0x7) @@ -1360,6 +1383,9 @@ ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyhvsrs,"Vd32=vmpyh(Vu32,Vv32):<<1:rnd:s +ITERATOR_INSN_MPY_SLOT(16,vmpyuhvs, "Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16", +"Vector by Vector Unsigned Halfword Multiply with 16 bit rightshift", + VdV.uh[i] = fGETUHALF(1,fMPY16UU(VuV.uh[i],VvV.uh[i]))) ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhus, "Vdd32=vmpyhus(Vu32,Vv32)","Vdd32.w=vmpy(Vu32.h,Vv32.uh)", @@ -2038,6 +2064,24 @@ ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8,vcombine,"Vdd32=vcombine(Vu32,Vv32)", /////////////////////////////////////////////////////////////////////////// +EXTINSN(V6_vcombine_tmp, "Vdd32.tmp=vcombine(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_REMAP,A_CVI_TMP,A_NO_INTRINSIC), +"Vector assign tmp, Any two to Vector Pair ", +{ + fHIDE(int i;) + fVFOREACH(8, i) { + VddV.v[0].ub[i] = VvV.ub[i]; + VddV.v[1].ub[i] = VuV.ub[i]; + } +}) + +EXTINSN(V6_vassign_tmp, "Vd32.tmp=Vu32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_REMAP,A_CVI_TMP,A_NO_INTRINSIC), +"Vector assign tmp, Any two to Vector Pair ", +{ + fHIDE(int i;) + fVFOREACH(32, i) { + VdV.w[i]=VuV.w[i]; + } +}) /********************************************************* * GENERAL PERMUTE NETWORKS @@ -2507,6 +2551,281 @@ EXTINSN(V6_vscattermhw , "vscatter(Rt32,Mu2,Vvv32.w).h=Vw32", ATTRIBS(A_EXTENSIO }) +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC_VX_FWD(32, v6mpyvubs10_vxx, "Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v", "", + fHIDE(size2s_t c00;) + fGET10BIT(c00, VvvV.v[0].uw[i], 0) + fHIDE(size2s_t c01;) + fGET10BIT(c01, VvvV.v[0].uw[i], 1) + fHIDE(size2s_t c02;) + fGET10BIT(c02, VvvV.v[0].uw[i], 2) + + fHIDE(size2s_t c10;) + fGET10BIT(c10, VvvV.v[1].uw[i], 0) + fHIDE(size2s_t c11;) + fGET10BIT(c11, VvvV.v[1].uw[i], 1) + fHIDE(size2s_t c12;) + fGET10BIT(c12, VvvV.v[1].uw[i], 2) + + if (uiV == 0) { + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c10); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c11); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c12); + + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c00); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c01); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c02); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c10); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c11); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c12); + + } else if (uiV == 1) { + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c00); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c01); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c02); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c10); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c11); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c12); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c00); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c01); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c02); + + } else if (uiV == 2) { + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c10); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c11); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c12); + + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c00); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c01); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c02); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c10); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c11); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c12); + + } else if (uiV == 3) { + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c00); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c01); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c02); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c10); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c11); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c12); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c00); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c01); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c02); + } +) +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC_VX_FWD(32, v6mpyhubs10_vxx, "Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h", "", + fHIDE(size2s_t c00;) + fGET10BIT(c00, VvvV.v[0].uw[i], 0) + fHIDE(size2s_t c01;) + fGET10BIT(c01, VvvV.v[0].uw[i], 1) + fHIDE(size2s_t c02;) + fGET10BIT(c02, VvvV.v[0].uw[i], 2) + fHIDE(size2s_t c10;) + fGET10BIT(c10, VvvV.v[1].uw[i], 0) + fHIDE(size2s_t c11;) + fGET10BIT(c11, VvvV.v[1].uw[i], 1) + fHIDE(size2s_t c12;) + fGET10BIT(c12, VvvV.v[1].uw[i], 2) + + if (uiV == 0) { + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c10); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c11); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c12); + + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c00); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c01); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c02); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c10); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c11); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c12); + + } else if (uiV == 1) { + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c00); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c01); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c02); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c10); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c11); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c12); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c00); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c01); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c02); + + } else if (uiV == 2) { + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c10); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c11); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c12); + + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c00); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c01); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c02); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c10); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c11); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c12); + + } else if (uiV == 3) { + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c00); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c01); + VxxV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c02); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c10); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c11); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c12); + + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c00); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c01); + VxxV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c02); + } +) + + +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(32, v6mpyvubs10, "Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v", "", + fHIDE(short c00;) + fGET10BIT(c00, VvvV.v[0].uw[i], 0) + fHIDE(short c01;) + fGET10BIT(c01, VvvV.v[0].uw[i], 1) + fHIDE(short c02;) + fGET10BIT(c02, VvvV.v[0].uw[i], 2) + fHIDE(short c10;) + fGET10BIT(c10, VvvV.v[1].uw[i], 0) + fHIDE(short c11;) + fGET10BIT(c11, VvvV.v[1].uw[i], 1) + fHIDE(short c12;) + fGET10BIT(c12, VvvV.v[1].uw[i], 2) + + + + if (uiV == 0) { + VddV.v[1].w[i] = fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c10); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c11); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c12); + + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c00); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c01); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c02); + + VddV.v[0].w[i] = fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c10); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c11); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c12); + + } else if (uiV == 1) { + VddV.v[1].w[i] = fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c00); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c01); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c02); + + VddV.v[0].w[i] = fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c10); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c11); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c12); + + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c00); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c01); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c02); + + } else if (uiV == 2) { + VddV.v[1].w[i] = fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c10); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c11); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c12); + + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c00); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c01); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c02); + + VddV.v[0].w[i] = fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c10); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c11); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c12); + + } else if (uiV == 3) { + VddV.v[1].w[i] = fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c00); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c01); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c02); + + VddV.v[0].w[i] = fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c10); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c11); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c12); + + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c00); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c01); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c02); + } +) + +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(32, v6mpyhubs10, "Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h", "", + fHIDE(short c00;) + fGET10BIT(c00, VvvV.v[0].uw[i], 0) + fHIDE(short c01;) + fGET10BIT(c01, VvvV.v[0].uw[i], 1) + fHIDE(short c02;) + fGET10BIT(c02, VvvV.v[0].uw[i], 2) + fHIDE(short c10;) + fGET10BIT(c10, VvvV.v[1].uw[i], 0) + fHIDE(short c11;) + fGET10BIT(c11, VvvV.v[1].uw[i], 1) + fHIDE(short c12;) + fGET10BIT(c12, VvvV.v[1].uw[i], 2) + + if (uiV == 0) { + VddV.v[1].w[i] = fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c10); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c11); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c12); + + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c00); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c01); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c02); + + VddV.v[0].w[i] = fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c10); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c11); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c12); + + } else if (uiV == 1) { + VddV.v[1].w[i] = fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c00); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c01); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c02); + + VddV.v[0].w[i] = fMPY16US(fGETUBYTE(3,VuuV.v[1].uw[i]), c10); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c11); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c12); + + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[1].uw[i]), c00); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c01); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c02); + + } else if (uiV == 2) { + VddV.v[1].w[i] = fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c10); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c11); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c12); + + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c00); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c01); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c02); + + VddV.v[0].w[i] = fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c10); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c11); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c12); + + } else if (uiV == 3) { + VddV.v[1].w[i] = fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c00); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c01); + VddV.v[1].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c02); + + VddV.v[0].w[i] = fMPY16US(fGETUBYTE(1,VuuV.v[1].uw[i]), c10); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(3,VuuV.v[0].uw[i]), c11); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(1,VuuV.v[0].uw[i]), c12); + + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[1].uw[i]), c00); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(2,VuuV.v[0].uw[i]), c01); + VddV.v[0].w[i] += fMPY16US(fGETUBYTE(0,VuuV.v[0].uw[i]), c02); + } +) + EXTINSN(V6_vscattermhwq, "if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA_DV,A_CVI_VM,A_MEMLIKE), "Scatter halfwords conditional", { diff --git a/target/hexagon/internal.h b/target/hexagon/internal.h index b1bfadc3f5..d732b6bb3c 100644 --- a/target/hexagon/internal.h +++ b/target/hexagon/internal.h @@ -33,6 +33,8 @@ int hexagon_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hexagon_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +int hexagon_hvx_gdb_read_register(CPUHexagonState *env, GByteArray *mem_buf, int n); +int hexagon_hvx_gdb_write_register(CPUHexagonState *env, uint8_t *mem_buf, int n); void hexagon_debug_vreg(CPUHexagonState *env, int regnum); void hexagon_debug_qreg(CPUHexagonState *env, int regnum); diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h index cd64bb8eec..5451b061ee 100644 --- a/target/hexagon/macros.h +++ b/target/hexagon/macros.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,16 +22,6 @@ #include "hex_regs.h" #include "reg_fields.h" -#ifdef QEMU_GENERATE -#define READ_REG(dest, NUM) gen_read_reg(dest, NUM) -#else -#define READ_REG(NUM) (env->gpr[(NUM)]) -#define READ_PREG(NUM) (env->pred[NUM]) - -#define WRITE_RREG(NUM, VAL) log_reg_write(env, NUM, VAL, slot) -#define WRITE_PREG(NUM, VAL) log_pred_write(env, NUM, VAL) -#endif - #define PCALIGN 4 #define PCALIGN_MASK (PCALIGN - 1) @@ -48,22 +38,23 @@ #define TYPE_INT(X) __builtin_types_compatible_p(typeof(X), int) #define TYPE_TCGV(X) __builtin_types_compatible_p(typeof(X), TCGv) #define TYPE_TCGV_I64(X) __builtin_types_compatible_p(typeof(X), TCGv_i64) - -#define SET_USR_FIELD_FUNC(X) \ - __builtin_choose_expr(TYPE_INT(X), \ - gen_set_usr_fieldi, \ - __builtin_choose_expr(TYPE_TCGV(X), \ - gen_set_usr_field, (void)0)) -#define SET_USR_FIELD(FIELD, VAL) \ - SET_USR_FIELD_FUNC(VAL)(FIELD, VAL) #else #define GET_USR_FIELD(FIELD) \ fEXTRACTU_BITS(env->gpr[HEX_REG_USR], reg_field_info[FIELD].width, \ reg_field_info[FIELD].offset) #define SET_USR_FIELD(FIELD, VAL) \ - fINSERT_BITS(env->new_value[HEX_REG_USR], reg_field_info[FIELD].width, \ - reg_field_info[FIELD].offset, (VAL)) + do { \ + if (pkt_need_commit) { \ + fINSERT_BITS(env->new_value_usr, \ + reg_field_info[FIELD].width, \ + reg_field_info[FIELD].offset, (VAL)); \ + } else { \ + fINSERT_BITS(env->gpr[HEX_REG_USR], \ + reg_field_info[FIELD].width, \ + reg_field_info[FIELD].offset, (VAL)); \ + } \ + } while (0) #endif #ifdef QEMU_GENERATE @@ -117,37 +108,37 @@ #define MEM_LOAD1s(DST, VA) \ do { \ CHECK_NOSHUF(VA, 1); \ - tcg_gen_qemu_ld8s(DST, VA, ctx->mem_idx); \ + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_SB); \ } while (0) #define MEM_LOAD1u(DST, VA) \ do { \ CHECK_NOSHUF(VA, 1); \ - tcg_gen_qemu_ld8u(DST, VA, ctx->mem_idx); \ + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_UB); \ } while (0) #define MEM_LOAD2s(DST, VA) \ do { \ CHECK_NOSHUF(VA, 2); \ - tcg_gen_qemu_ld16s(DST, VA, ctx->mem_idx); \ + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESW); \ } while (0) #define MEM_LOAD2u(DST, VA) \ do { \ CHECK_NOSHUF(VA, 2); \ - tcg_gen_qemu_ld16u(DST, VA, ctx->mem_idx); \ + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUW); \ } while (0) #define MEM_LOAD4s(DST, VA) \ do { \ CHECK_NOSHUF(VA, 4); \ - tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESL); \ } while (0) #define MEM_LOAD4u(DST, VA) \ do { \ CHECK_NOSHUF(VA, 4); \ - tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUL); \ } while (0) #define MEM_LOAD8u(DST, VA) \ do { \ CHECK_NOSHUF(VA, 8); \ - tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \ + tcg_gen_qemu_ld_i64(DST, VA, ctx->mem_idx, MO_TEUQ); \ } while (0) #define MEM_STORE1_FUNC(X) \ @@ -182,14 +173,14 @@ #define MEM_STORE8(VA, DATA, SLOT) \ MEM_STORE8_FUNC(DATA)(cpu_env, VA, DATA, SLOT) #else -#define MEM_LOAD1s(VA) ((int8_t)mem_load1(env, slot, VA)) -#define MEM_LOAD1u(VA) ((uint8_t)mem_load1(env, slot, VA)) -#define MEM_LOAD2s(VA) ((int16_t)mem_load2(env, slot, VA)) -#define MEM_LOAD2u(VA) ((uint16_t)mem_load2(env, slot, VA)) -#define MEM_LOAD4s(VA) ((int32_t)mem_load4(env, slot, VA)) -#define MEM_LOAD4u(VA) ((uint32_t)mem_load4(env, slot, VA)) -#define MEM_LOAD8s(VA) ((int64_t)mem_load8(env, slot, VA)) -#define MEM_LOAD8u(VA) ((uint64_t)mem_load8(env, slot, VA)) +#define MEM_LOAD1s(VA) ((int8_t)mem_load1(env, pkt_has_store_s1, slot, VA)) +#define MEM_LOAD1u(VA) ((uint8_t)mem_load1(env, pkt_has_store_s1, slot, VA)) +#define MEM_LOAD2s(VA) ((int16_t)mem_load2(env, pkt_has_store_s1, slot, VA)) +#define MEM_LOAD2u(VA) ((uint16_t)mem_load2(env, pkt_has_store_s1, slot, VA)) +#define MEM_LOAD4s(VA) ((int32_t)mem_load4(env, pkt_has_store_s1, slot, VA)) +#define MEM_LOAD4u(VA) ((uint32_t)mem_load4(env, pkt_has_store_s1, slot, VA)) +#define MEM_LOAD8s(VA) ((int64_t)mem_load8(env, pkt_has_store_s1, slot, VA)) +#define MEM_LOAD8u(VA) ((uint64_t)mem_load8(env, pkt_has_store_s1, slot, VA)) #define MEM_STORE1(VA, DATA, SLOT) log_store32(env, VA, DATA, 1, SLOT) #define MEM_STORE2(VA, DATA, SLOT) log_store32(env, VA, DATA, 2, SLOT) @@ -205,28 +196,11 @@ static inline void gen_cancel(uint32_t slot) #define CANCEL gen_cancel(slot); #else -#define CANCEL cancel_slot(env, slot) +#define CANCEL do { } while (0) #endif #define LOAD_CANCEL(EA) do { CANCEL; } while (0) -#ifdef QEMU_GENERATE -static inline void gen_pred_cancel(TCGv pred, uint32_t slot_num) - { - TCGv slot_mask = tcg_temp_new(); - TCGv tmp = tcg_temp_new(); - TCGv zero = tcg_constant_tl(0); - tcg_gen_ori_tl(slot_mask, hex_slot_cancelled, 1 << slot_num); - tcg_gen_andi_tl(tmp, pred, 1); - tcg_gen_movcond_tl(TCG_COND_EQ, hex_slot_cancelled, tmp, zero, - slot_mask, hex_slot_cancelled); - tcg_temp_free(slot_mask); - tcg_temp_free(tmp); -} -#define PRED_LOAD_CANCEL(PRED, EA) \ - gen_pred_cancel(PRED, insn->is_endloop ? 4 : insn->slot) -#endif - #define STORE_CANCEL(EA) { env->slot_cancelled |= (1 << slot); } #define fMAX(A, B) (((A) > (B)) ? (A) : (B)) @@ -262,12 +236,8 @@ static inline void gen_pred_cancel(TCGv pred, uint32_t slot_num) #ifdef QEMU_GENERATE #define fLSBNEW(PVAL) tcg_gen_andi_tl(LSB, (PVAL), 1) -#define fLSBNEW0 tcg_gen_andi_tl(LSB, hex_new_pred_value[0], 1) -#define fLSBNEW1 tcg_gen_andi_tl(LSB, hex_new_pred_value[1], 1) #else #define fLSBNEW(PVAL) ((PVAL) & 1) -#define fLSBNEW0 (env->new_pred_value[0] & 1) -#define fLSBNEW1 (env->new_pred_value[1] & 1) #endif #ifdef QEMU_GENERATE @@ -376,43 +346,28 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) tcg_gen_deposit_tl(result, msb, lsb, 0, 7); tcg_gen_shli_tl(result, result, shift); - - tcg_temp_free(msb); - tcg_temp_free(lsb); - return result; } -#define fREAD_IREG(VAL, SHIFT) gen_read_ireg(ireg, (VAL), (SHIFT)) -#else -#define fREAD_IREG(VAL) \ - (fSXTN(11, 64, (((VAL) & 0xf0000000) >> 21) | ((VAL >> 17) & 0x7f))) #endif -#define fREAD_LR() (READ_REG(HEX_REG_LR)) +#define fREAD_LR() (env->gpr[HEX_REG_LR]) -#define fWRITE_LR(A) WRITE_RREG(HEX_REG_LR, A) -#define fWRITE_FP(A) WRITE_RREG(HEX_REG_FP, A) -#define fWRITE_SP(A) WRITE_RREG(HEX_REG_SP, A) - -#define fREAD_SP() (READ_REG(HEX_REG_SP)) -#define fREAD_LC0 (READ_REG(HEX_REG_LC0)) -#define fREAD_LC1 (READ_REG(HEX_REG_LC1)) -#define fREAD_SA0 (READ_REG(HEX_REG_SA0)) -#define fREAD_SA1 (READ_REG(HEX_REG_SA1)) -#define fREAD_FP() (READ_REG(HEX_REG_FP)) +#define fREAD_SP() (env->gpr[HEX_REG_SP]) +#define fREAD_LC0 (env->gpr[HEX_REG_LC0]) +#define fREAD_LC1 (env->gpr[HEX_REG_LC1]) +#define fREAD_SA0 (env->gpr[HEX_REG_SA0]) +#define fREAD_SA1 (env->gpr[HEX_REG_SA1]) +#define fREAD_FP() (env->gpr[HEX_REG_FP]) #ifdef FIXME /* Figure out how to get insn->extension_valid to helper */ #define fREAD_GP() \ - (insn->extension_valid ? 0 : READ_REG(HEX_REG_GP)) + (insn->extension_valid ? 0 : env->gpr[HEX_REG_GP]) #else -#define fREAD_GP() READ_REG(HEX_REG_GP) +#define fREAD_GP() (env->gpr[HEX_REG_GP]) #endif #define fREAD_PC() (PC) -#define fREAD_NPC() (next_PC & (0xfffffffe)) - -#define fREAD_P0() (READ_PREG(0)) -#define fREAD_P3() (READ_PREG(3)) +#define fREAD_P0() (env->pred[0]) #define fCHECK_PCALIGN(A) @@ -421,36 +376,10 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) #define fBRANCH(LOC, TYPE) fWRITE_NPC(LOC) #define fJUMPR(REGNO, TARGET, TYPE) fBRANCH(TARGET, COF_TYPE_JUMPR) #define fHINTJR(TARGET) { /* Not modelled in qemu */} -#define fCALL(A) \ - do { \ - fWRITE_LR(fREAD_NPC()); \ - fBRANCH(A, COF_TYPE_CALL); \ - } while (0) -#define fCALLR(A) \ - do { \ - fWRITE_LR(fREAD_NPC()); \ - fBRANCH(A, COF_TYPE_CALLR); \ - } while (0) -#define fWRITE_LOOP_REGS0(START, COUNT) \ - do { \ - WRITE_RREG(HEX_REG_LC0, COUNT); \ - WRITE_RREG(HEX_REG_SA0, START); \ - } while (0) -#define fWRITE_LOOP_REGS1(START, COUNT) \ - do { \ - WRITE_RREG(HEX_REG_LC1, COUNT); \ - WRITE_RREG(HEX_REG_SA1, START);\ - } while (0) -#define fWRITE_LC0(VAL) WRITE_RREG(HEX_REG_LC0, VAL) -#define fWRITE_LC1(VAL) WRITE_RREG(HEX_REG_LC1, VAL) #define fSET_OVERFLOW() SET_USR_FIELD(USR_OVF, 1) #define fSET_LPCFG(VAL) SET_USR_FIELD(USR_LPCFG, (VAL)) #define fGET_LPCFG (GET_USR_FIELD(USR_LPCFG)) -#define fWRITE_P0(VAL) WRITE_PREG(0, VAL) -#define fWRITE_P1(VAL) WRITE_PREG(1, VAL) -#define fWRITE_P2(VAL) WRITE_PREG(2, VAL) -#define fWRITE_P3(VAL) WRITE_PREG(3, VAL) #define fPART1(WORK) if (part1) { WORK; return; } #define fCAST4u(A) ((uint32_t)(A)) #define fCAST4s(A) ((int32_t)(A)) @@ -512,7 +441,6 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) TCGv tmp = tcg_temp_new(); \ tcg_gen_shli_tl(tmp, REG2, SCALE); \ tcg_gen_add_tl(EA, REG, tmp); \ - tcg_temp_free(tmp); \ } while (0) #define fEA_IRs(IMM, REG, SCALE) \ do { \ @@ -608,7 +536,7 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) #define fMEMOP(NUM, SIZE, SIGN, EA, FNTYPE, VALUE) -#define fGET_FRAMEKEY() READ_REG(HEX_REG_FRAMEKEY) +#define fGET_FRAMEKEY() (env->gpr[HEX_REG_FRAMEKEY]) #define fFRAME_SCRAMBLE(VAL) ((VAL) ^ (fCAST8u(fGET_FRAMEKEY()) << 32)) #define fFRAME_UNSCRAMBLE(VAL) fFRAME_SCRAMBLE(VAL) @@ -718,22 +646,14 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) fEXTRACTU_BITS(env->gpr[HEX_REG_##REG], \ reg_field_info[FIELD].width, \ reg_field_info[FIELD].offset) -#define fGET_FIELD(VAL, FIELD) -#define fSET_FIELD(VAL, FIELD, NEWVAL) -#define fBARRIER() -#define fSYNCH() -#define fISYNC() -#define fDCFETCH(REG) \ - do { (void)REG; } while (0) /* Nothing to do in qemu */ -#define fICINVA(REG) \ - do { (void)REG; } while (0) /* Nothing to do in qemu */ -#define fL2FETCH(ADDR, HEIGHT, WIDTH, STRIDE, FLAGS) -#define fDCCLEANA(REG) \ - do { (void)REG; } while (0) /* Nothing to do in qemu */ -#define fDCCLEANINVA(REG) \ - do { (void)REG; } while (0) /* Nothing to do in qemu */ -#define fDCZEROA(REG) do { env->dczero_addr = (REG); } while (0) +#ifdef QEMU_GENERATE +#define fDCZEROA(REG) \ + do { \ + ctx->dczero_addr = tcg_temp_new(); \ + tcg_gen_mov_tl(ctx->dczero_addr, (REG)); \ + } while (0) +#endif #define fBRANCH_SPECULATE_STALL(DOTNEWVAL, JUMP_COND, SPEC_DIR, HINTBITNUM, \ STRBITNUM) /* Nothing */ diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build index c9d31d095c..da8e608d00 100644 --- a/target/hexagon/meson.build +++ b/target/hexagon/meson.build @@ -1,5 +1,5 @@ ## -## Copyright(c) 2020-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2020-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -183,7 +183,7 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs ) bison = generator( - find_program('bison'), + find_program('bison', version: '>=3.0'), output: ['@BASENAME@.tab.c', '@BASENAME@.tab.h'], arguments: ['@INPUT@', '--defines=@OUTPUT1@', '--output=@OUTPUT0@'] ) @@ -276,4 +276,13 @@ tcg_funcs_generated = custom_target( ) hexagon_ss.add(tcg_funcs_generated) +analyze_funcs_generated = custom_target( + 'analyze_funcs_generated.c.inc', + output: 'analyze_funcs_generated.c.inc', + depends: helper_dep, + depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h], + command: [python, files('gen_analyze_funcs.py'), helper_in, '@OUTPUT@'], +) +hexagon_ss.add(analyze_funcs_generated) + target_arch += {'hexagon': hexagon_ss} diff --git a/target/hexagon/mmvec/decode_ext_mmvec.c b/target/hexagon/mmvec/decode_ext_mmvec.c index 061a65ab88..174eb3b78b 100644 --- a/target/hexagon/mmvec/decode_ext_mmvec.c +++ b/target/hexagon/mmvec/decode_ext_mmvec.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -148,9 +148,9 @@ decode_shuffle_for_execution_vops(Packet *pkt) int i; for (i = 0; i < pkt->num_insns; i++) { uint16_t opcode = pkt->insn[i].opcode; - if (GET_ATTRIB(opcode, A_LOAD) && - (GET_ATTRIB(opcode, A_CVI_NEW) || - GET_ATTRIB(opcode, A_CVI_TMP))) { + if ((GET_ATTRIB(opcode, A_LOAD) && + GET_ATTRIB(opcode, A_CVI_NEW)) || + GET_ATTRIB(opcode, A_CVI_TMP)) { /* * Find prior consuming vector instructions * Move to end of packet diff --git a/target/hexagon/mmvec/macros.h b/target/hexagon/mmvec/macros.h index 1201d778d0..a655634fd1 100644 --- a/target/hexagon/mmvec/macros.h +++ b/target/hexagon/mmvec/macros.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -346,4 +346,11 @@ #define fUARCH_NOTE_PUMP_2X() #define IV1DEAD() + +#define fGET10BIT(COE, VAL, POS) \ + do { \ + COE = (sextract32(VAL, 24 + 2 * POS, 2) << 8) | \ + extract32(VAL, POS * 8, 8); \ + } while (0); + #endif diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index 35449ef524..12967ac21e 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -30,6 +30,7 @@ #include "mmvec/mmvec.h" #include "mmvec/macros.h" #include "op_helper.h" +#include "translate.h" #define SF_BIAS 127 #define SF_MANTBITS 23 @@ -51,38 +52,6 @@ G_NORETURN void HELPER(raise_exception)(CPUHexagonState *env, uint32_t excp) do_raise_exception_err(env, excp, 0); } -void log_reg_write(CPUHexagonState *env, int rnum, - target_ulong val, uint32_t slot) -{ - HEX_DEBUG_LOG("log_reg_write[%d] = " TARGET_FMT_ld " (0x" TARGET_FMT_lx ")", - rnum, val, val); - if (val == env->gpr[rnum]) { - HEX_DEBUG_LOG(" NO CHANGE"); - } - HEX_DEBUG_LOG("\n"); - - env->new_value[rnum] = val; - if (HEX_DEBUG) { - /* Do this so HELPER(debug_commit_end) will know */ - env->reg_written[rnum] = 1; - } -} - -static void log_pred_write(CPUHexagonState *env, int pnum, target_ulong val) -{ - HEX_DEBUG_LOG("log_pred_write[%d] = " TARGET_FMT_ld - " (0x" TARGET_FMT_lx ")\n", - pnum, val, val); - - /* Multiple writes to the same preg are and'ed together */ - if (env->pred_written & (1 << pnum)) { - env->new_pred_value[pnum] &= val & 0xff; - } else { - env->new_pred_value[pnum] = val & 0xff; - env->pred_written |= 1 << pnum; - } -} - void log_store32(CPUHexagonState *env, target_ulong addr, target_ulong val, int width, int slot) { @@ -105,30 +74,6 @@ void log_store64(CPUHexagonState *env, target_ulong addr, env->mem_log_stores[slot].data64 = val; } -void write_new_pc(CPUHexagonState *env, bool pkt_has_multi_cof, - target_ulong addr) -{ - HEX_DEBUG_LOG("write_new_pc(0x" TARGET_FMT_lx ")\n", addr); - - if (pkt_has_multi_cof) { - /* - * If more than one branch is taken in a packet, only the first one - * is actually done. - */ - if (env->branch_taken) { - HEX_DEBUG_LOG("INFO: multiple branches taken in same packet, " - "ignoring the second one\n"); - } else { - fCHECK_PCALIGN(addr); - env->gpr[HEX_REG_PC] = addr; - env->branch_taken = 1; - } - } else { - fCHECK_PCALIGN(addr); - env->gpr[HEX_REG_PC] = addr; - } -} - /* Handy place to set a breakpoint */ void HELPER(debug_start_packet)(CPUHexagonState *env) { @@ -258,14 +203,14 @@ static void print_store(CPUHexagonState *env, int slot) } /* This function is a handy place to set a breakpoint */ -void HELPER(debug_commit_end)(CPUHexagonState *env, int has_st0, int has_st1) +void HELPER(debug_commit_end)(CPUHexagonState *env, uint32_t this_PC, + int pred_written, int has_st0, int has_st1) { bool reg_printed = false; bool pred_printed = false; int i; - HEX_DEBUG_LOG("Packet committed: pc = 0x" TARGET_FMT_lx "\n", - env->this_PC); + HEX_DEBUG_LOG("Packet committed: pc = 0x" TARGET_FMT_lx "\n", this_PC); HEX_DEBUG_LOG("slot_cancelled = %d\n", env->slot_cancelled); for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) { @@ -275,18 +220,18 @@ void HELPER(debug_commit_end)(CPUHexagonState *env, int has_st0, int has_st1) reg_printed = true; } HEX_DEBUG_LOG("\tr%d = " TARGET_FMT_ld " (0x" TARGET_FMT_lx ")\n", - i, env->new_value[i], env->new_value[i]); + i, env->gpr[i], env->gpr[i]); } } for (i = 0; i < NUM_PREGS; i++) { - if (env->pred_written & (1 << i)) { + if (pred_written & (1 << i)) { if (!pred_printed) { HEX_DEBUG_LOG("Predicates written\n"); pred_printed = true; } HEX_DEBUG_LOG("\tp%d = 0x" TARGET_FMT_lx "\n", - i, env->new_pred_value[i]); + i, env->pred[i]); } } @@ -407,7 +352,8 @@ uint64_t HELPER(sfinvsqrta)(CPUHexagonState *env, float32 RsV) } int64_t HELPER(vacsh_val)(CPUHexagonState *env, - int64_t RxxV, int64_t RssV, int64_t RttV) + int64_t RxxV, int64_t RssV, int64_t RttV, + uint32_t pkt_need_commit) { for (int i = 0; i < 4; i++) { int xv = sextract64(RxxV, i * 16, 16); @@ -439,9 +385,91 @@ int32_t HELPER(vacsh_pred)(CPUHexagonState *env, return PeV; } -static void probe_store(CPUHexagonState *env, int slot, int mmu_idx) +int64_t HELPER(cabacdecbin_val)(int64_t RssV, int64_t RttV) { - if (!(env->slot_cancelled & (1 << slot))) { + int64_t RddV = 0; + size4u_t state; + size4u_t valMPS; + size4u_t bitpos; + size4u_t range; + size4u_t offset; + size4u_t rLPS; + size4u_t rMPS; + + state = fEXTRACTU_RANGE(fGETWORD(1, RttV), 5, 0); + valMPS = fEXTRACTU_RANGE(fGETWORD(1, RttV), 8, 8); + bitpos = fEXTRACTU_RANGE(fGETWORD(0, RttV), 4, 0); + range = fGETWORD(0, RssV); + offset = fGETWORD(1, RssV); + + /* calculate rLPS */ + range <<= bitpos; + offset <<= bitpos; + rLPS = rLPS_table_64x4[state][(range >> 29) & 3]; + rLPS = rLPS << 23; /* left aligned */ + + /* calculate rMPS */ + rMPS = (range & 0xff800000) - rLPS; + + /* most probable region */ + if (offset < rMPS) { + RddV = AC_next_state_MPS_64[state]; + fINSERT_RANGE(RddV, 8, 8, valMPS); + fINSERT_RANGE(RddV, 31, 23, (rMPS >> 23)); + fSETWORD(1, RddV, offset); + } + /* least probable region */ + else { + RddV = AC_next_state_LPS_64[state]; + fINSERT_RANGE(RddV, 8, 8, ((!state) ? (1 - valMPS) : (valMPS))); + fINSERT_RANGE(RddV, 31, 23, (rLPS >> 23)); + fSETWORD(1, RddV, (offset - rMPS)); + } + return RddV; +} + +int32_t HELPER(cabacdecbin_pred)(int64_t RssV, int64_t RttV) +{ + int32_t p0 = 0; + size4u_t state; + size4u_t valMPS; + size4u_t bitpos; + size4u_t range; + size4u_t offset; + size4u_t rLPS; + size4u_t rMPS; + + state = fEXTRACTU_RANGE(fGETWORD(1, RttV), 5, 0); + valMPS = fEXTRACTU_RANGE(fGETWORD(1, RttV), 8, 8); + bitpos = fEXTRACTU_RANGE(fGETWORD(0, RttV), 4, 0); + range = fGETWORD(0, RssV); + offset = fGETWORD(1, RssV); + + /* calculate rLPS */ + range <<= bitpos; + offset <<= bitpos; + rLPS = rLPS_table_64x4[state][(range >> 29) & 3]; + rLPS = rLPS << 23; /* left aligned */ + + /* calculate rMPS */ + rMPS = (range & 0xff800000) - rLPS; + + /* most probable region */ + if (offset < rMPS) { + p0 = valMPS; + + } + /* least probable region */ + else { + p0 = valMPS ^ 1; + } + return p0; +} + +static void probe_store(CPUHexagonState *env, int slot, int mmu_idx, + bool is_predicated) +{ + if (!is_predicated || !(env->slot_cancelled & (1 << slot))) { size1u_t width = env->mem_log_stores[slot].width; target_ulong va = env->mem_log_stores[slot].va; uintptr_t ra = GETPC(); @@ -461,9 +489,12 @@ void HELPER(probe_noshuf_load)(CPUHexagonState *env, target_ulong va, } /* Called during packet commit when there are two scalar stores */ -void HELPER(probe_pkt_scalar_store_s0)(CPUHexagonState *env, int mmu_idx) +void HELPER(probe_pkt_scalar_store_s0)(CPUHexagonState *env, int args) { - probe_store(env, 0, mmu_idx); + int mmu_idx = FIELD_EX32(args, PROBE_PKT_SCALAR_STORE_S0, MMU_IDX); + bool is_predicated = + FIELD_EX32(args, PROBE_PKT_SCALAR_STORE_S0, IS_PREDICATED); + probe_store(env, 0, mmu_idx, is_predicated); } void HELPER(probe_hvx_stores)(CPUHexagonState *env, int mmu_idx) @@ -507,18 +538,21 @@ void HELPER(probe_hvx_stores)(CPUHexagonState *env, int mmu_idx) } } -void HELPER(probe_pkt_scalar_hvx_stores)(CPUHexagonState *env, int mask, - int mmu_idx) +void HELPER(probe_pkt_scalar_hvx_stores)(CPUHexagonState *env, int mask) { - bool has_st0 = (mask >> 0) & 1; - bool has_st1 = (mask >> 1) & 1; - bool has_hvx_stores = (mask >> 2) & 1; + bool has_st0 = FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, HAS_ST0); + bool has_st1 = FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, HAS_ST1); + bool has_hvx_stores = + FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, HAS_HVX_STORES); + bool s0_is_pred = FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, S0_IS_PRED); + bool s1_is_pred = FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, S1_IS_PRED); + int mmu_idx = FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, MMU_IDX); if (has_st0) { - probe_store(env, 0, mmu_idx); + probe_store(env, 0, mmu_idx, s0_is_pred); } if (has_st1) { - probe_store(env, 1, mmu_idx); + probe_store(env, 1, mmu_idx, s1_is_pred); } if (has_hvx_stores) { HELPER(probe_hvx_stores)(env, mmu_idx); @@ -532,41 +566,45 @@ void HELPER(probe_pkt_scalar_hvx_stores)(CPUHexagonState *env, int mask, * If the load is in slot 0 and there is a store in slot1 (that * wasn't cancelled), we have to do the store first. */ -static void check_noshuf(CPUHexagonState *env, uint32_t slot, - target_ulong vaddr, int size) +static void check_noshuf(CPUHexagonState *env, bool pkt_has_store_s1, + uint32_t slot, target_ulong vaddr, int size) { - if (slot == 0 && env->pkt_has_store_s1 && + if (slot == 0 && pkt_has_store_s1 && ((env->slot_cancelled & (1 << 1)) == 0)) { HELPER(probe_noshuf_load)(env, vaddr, size, MMU_USER_IDX); HELPER(commit_store)(env, 1); } } -uint8_t mem_load1(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) +uint8_t mem_load1(CPUHexagonState *env, bool pkt_has_store_s1, + uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot, vaddr, 1); + check_noshuf(env, pkt_has_store_s1, slot, vaddr, 1); return cpu_ldub_data_ra(env, vaddr, ra); } -uint16_t mem_load2(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) +uint16_t mem_load2(CPUHexagonState *env, bool pkt_has_store_s1, + uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot, vaddr, 2); + check_noshuf(env, pkt_has_store_s1, slot, vaddr, 2); return cpu_lduw_data_ra(env, vaddr, ra); } -uint32_t mem_load4(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) +uint32_t mem_load4(CPUHexagonState *env, bool pkt_has_store_s1, + uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot, vaddr, 4); + check_noshuf(env, pkt_has_store_s1, slot, vaddr, 4); return cpu_ldl_data_ra(env, vaddr, ra); } -uint64_t mem_load8(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) +uint64_t mem_load8(CPUHexagonState *env, bool pkt_has_store_s1, + uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot, vaddr, 8); + check_noshuf(env, pkt_has_store_s1, slot, vaddr, 8); return cpu_ldq_data_ra(env, vaddr, ra); } @@ -1193,7 +1231,7 @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV, { float32 neg_RsV; arch_fpop_start(env); - neg_RsV = float32_sub(float32_zero, RsV, &env->fp_status); + neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1); RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status); arch_fpop_end(env); return RxV; @@ -1468,12 +1506,6 @@ void HELPER(vwhist128qm)(CPUHexagonState *env, int32_t uiV) } } -void cancel_slot(CPUHexagonState *env, uint32_t slot) -{ - HEX_DEBUG_LOG("Slot %d cancelled\n", slot); - env->slot_cancelled |= (1 << slot); -} - /* These macros can be referenced in the generated helper functions */ #define warn(...) /* Nothing */ #define fatal(...) g_assert_not_reached(); diff --git a/target/hexagon/op_helper.h b/target/hexagon/op_helper.h index 02347edee8..8f3764d15e 100644 --- a/target/hexagon/op_helper.h +++ b/target/hexagon/op_helper.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,16 +19,15 @@ #define HEXAGON_OP_HELPER_H /* Misc functions */ -void cancel_slot(CPUHexagonState *env, uint32_t slot); -void write_new_pc(CPUHexagonState *env, bool pkt_has_multi_cof, target_ulong addr); +uint8_t mem_load1(CPUHexagonState *env, bool pkt_has_store_s1, + uint32_t slot, target_ulong vaddr); +uint16_t mem_load2(CPUHexagonState *env, bool pkt_has_store_s1, + uint32_t slot, target_ulong vaddr); +uint32_t mem_load4(CPUHexagonState *env, bool pkt_has_store_s1, + uint32_t slot, target_ulong vaddr); +uint64_t mem_load8(CPUHexagonState *env, bool pkt_has_store_s1, + uint32_t slot, target_ulong vaddr); -uint8_t mem_load1(CPUHexagonState *env, uint32_t slot, target_ulong vaddr); -uint16_t mem_load2(CPUHexagonState *env, uint32_t slot, target_ulong vaddr); -uint32_t mem_load4(CPUHexagonState *env, uint32_t slot, target_ulong vaddr); -uint64_t mem_load8(CPUHexagonState *env, uint32_t slot, target_ulong vaddr); - -void log_reg_write(CPUHexagonState *env, int rnum, - target_ulong val, uint32_t slot); void log_store64(CPUHexagonState *env, target_ulong addr, int64_t val, int width, int slot); void log_store32(CPUHexagonState *env, target_ulong addr, diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c index 75f28e08ad..8838ab2364 100644 --- a/target/hexagon/translate.c +++ b/target/hexagon/translate.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,28 +27,30 @@ #include "insn.h" #include "decode.h" #include "translate.h" +#include "genptr.h" #include "printinsn.h" +#include "analyze_funcs_generated.c.inc" + +typedef void (*AnalyzeInsn)(DisasContext *ctx); +static const AnalyzeInsn opcode_analyze[XX_LAST_OPCODE] = { +#define OPCODE(X) [X] = analyze_##X +#include "opcodes_def_generated.h.inc" +#undef OPCODE +}; + TCGv hex_gpr[TOTAL_PER_THREAD_REGS]; TCGv hex_pred[NUM_PREGS]; -TCGv hex_this_PC; TCGv hex_slot_cancelled; -TCGv hex_branch_taken; -TCGv hex_new_value[TOTAL_PER_THREAD_REGS]; +TCGv hex_new_value_usr; TCGv hex_reg_written[TOTAL_PER_THREAD_REGS]; -TCGv hex_new_pred_value[NUM_PREGS]; -TCGv hex_pred_written; TCGv hex_store_addr[STORES_MAX]; TCGv hex_store_width[STORES_MAX]; TCGv hex_store_val32[STORES_MAX]; TCGv_i64 hex_store_val64[STORES_MAX]; -TCGv hex_pkt_has_store_s1; -TCGv hex_dczero_addr; TCGv hex_llsc_addr; TCGv hex_llsc_val; TCGv_i64 hex_llsc_val_i64; -TCGv hex_VRegs_updated; -TCGv hex_QRegs_updated; TCGv hex_vstore_addr[VSTORES_MAX]; TCGv hex_vstore_size[VSTORES_MAX]; TCGv hex_vstore_pending[VSTORES_MAX]; @@ -62,6 +64,10 @@ intptr_t ctx_future_vreg_off(DisasContext *ctx, int regnum, { intptr_t offset; + if (!ctx->need_commit) { + return offsetof(CPUHexagonState, VRegs[regnum]); + } + /* See if it is already allocated */ for (int i = 0; i < ctx->future_vregs_idx; i++) { if (ctx->future_vregs_num[i] == regnum) { @@ -121,14 +127,19 @@ static bool use_goto_tb(DisasContext *ctx, target_ulong dest) return translator_use_goto_tb(&ctx->base, dest); } -static void gen_goto_tb(DisasContext *ctx, int idx, target_ulong dest) +static void gen_goto_tb(DisasContext *ctx, int idx, target_ulong dest, bool + move_to_pc) { if (use_goto_tb(ctx, dest)) { tcg_gen_goto_tb(idx); - tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], dest); + if (move_to_pc) { + tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], dest); + } tcg_gen_exit_tb(ctx->base.tb, idx); } else { - tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], dest); + if (move_to_pc) { + tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], dest); + } tcg_gen_lookup_and_goto_ptr(); } } @@ -142,12 +153,12 @@ static void gen_end_tb(DisasContext *ctx) if (ctx->branch_cond != TCG_COND_NEVER) { if (ctx->branch_cond != TCG_COND_ALWAYS) { TCGLabel *skip = gen_new_label(); - tcg_gen_brcondi_tl(ctx->branch_cond, hex_branch_taken, 0, skip); - gen_goto_tb(ctx, 0, ctx->branch_dest); + tcg_gen_brcondi_tl(ctx->branch_cond, ctx->branch_taken, 0, skip); + gen_goto_tb(ctx, 0, ctx->branch_dest, true); gen_set_label(skip); - gen_goto_tb(ctx, 1, ctx->next_PC); + gen_goto_tb(ctx, 1, ctx->next_PC, false); } else { - gen_goto_tb(ctx, 0, ctx->branch_dest); + gen_goto_tb(ctx, 0, ctx->branch_dest, true); } } else if (ctx->is_tight_loop && pkt->insn[pkt->num_insns - 1].opcode == J2_endloop0) { @@ -158,9 +169,9 @@ static void gen_end_tb(DisasContext *ctx) TCGLabel *skip = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, skip); tcg_gen_subi_tl(hex_gpr[HEX_REG_LC0], hex_gpr[HEX_REG_LC0], 1); - gen_goto_tb(ctx, 0, ctx->base.tb->pc); + gen_goto_tb(ctx, 0, ctx->base.tb->pc, true); gen_set_label(skip); - gen_goto_tb(ctx, 1, ctx->next_PC); + gen_goto_tb(ctx, 1, ctx->next_PC, false); } else { tcg_gen_lookup_and_goto_ptr(); } @@ -239,12 +250,15 @@ static bool check_for_attrib(Packet *pkt, int attrib) static bool need_slot_cancelled(Packet *pkt) { - return check_for_attrib(pkt, A_CONDEXEC); -} - -static bool need_pred_written(Packet *pkt) -{ - return check_for_attrib(pkt, A_WRITES_PRED_REG); + /* We only need slot_cancelled for conditional store instructions */ + for (int i = 0; i < pkt->num_insns; i++) { + uint16_t opcode = pkt->insn[i].opcode; + if (GET_ATTRIB(opcode, A_CONDEXEC) && + GET_ATTRIB(opcode, A_SCALAR_STORE)) { + return true; + } + } + return false; } static bool need_next_PC(DisasContext *ctx) @@ -265,79 +279,8 @@ static bool need_next_PC(DisasContext *ctx) return false; } -static void gen_start_packet(DisasContext *ctx) -{ - Packet *pkt = ctx->pkt; - target_ulong next_PC = ctx->base.pc_next + pkt->encod_pkt_size_in_bytes; - int i; - - /* Clear out the disassembly context */ - ctx->next_PC = next_PC; - ctx->reg_log_idx = 0; - bitmap_zero(ctx->regs_written, TOTAL_PER_THREAD_REGS); - ctx->preg_log_idx = 0; - bitmap_zero(ctx->pregs_written, NUM_PREGS); - ctx->future_vregs_idx = 0; - ctx->tmp_vregs_idx = 0; - ctx->vreg_log_idx = 0; - bitmap_zero(ctx->vregs_updated_tmp, NUM_VREGS); - bitmap_zero(ctx->vregs_updated, NUM_VREGS); - bitmap_zero(ctx->vregs_select, NUM_VREGS); - ctx->qreg_log_idx = 0; - for (i = 0; i < STORES_MAX; i++) { - ctx->store_width[i] = 0; - } - tcg_gen_movi_tl(hex_pkt_has_store_s1, pkt->pkt_has_store_s1); - ctx->s1_store_processed = false; - ctx->pre_commit = true; - - if (HEX_DEBUG) { - /* Handy place to set a breakpoint before the packet executes */ - gen_helper_debug_start_packet(cpu_env); - tcg_gen_movi_tl(hex_this_PC, ctx->base.pc_next); - } - - /* Initialize the runtime state for packet semantics */ - if (need_slot_cancelled(pkt)) { - tcg_gen_movi_tl(hex_slot_cancelled, 0); - } - if (pkt->pkt_has_cof) { - if (pkt->pkt_has_multi_cof) { - tcg_gen_movi_tl(hex_branch_taken, 0); - } - if (need_next_PC(ctx)) { - tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], next_PC); - } - } - if (need_pred_written(pkt)) { - tcg_gen_movi_tl(hex_pred_written, 0); - } - - if (pkt->pkt_has_hvx) { - tcg_gen_movi_tl(hex_VRegs_updated, 0); - tcg_gen_movi_tl(hex_QRegs_updated, 0); - } -} - -bool is_gather_store_insn(DisasContext *ctx) -{ - Packet *pkt = ctx->pkt; - Insn *insn = ctx->insn; - if (GET_ATTRIB(insn->opcode, A_CVI_NEW) && - insn->new_value_producer_slot == 1) { - /* Look for gather instruction */ - for (int i = 0; i < pkt->num_insns; i++) { - Insn *in = &pkt->insn[i]; - if (GET_ATTRIB(in->opcode, A_CVI_GATHER) && in->slot == 1) { - return true; - } - } - } - return false; -} - /* - * The LOG_*_WRITE macros mark most of the writes in a packet + * The opcode_analyze functions mark most of the writes in a packet * However, there are some implicit writes marked as attributes * of the applicable instructions. */ @@ -360,18 +303,7 @@ static void mark_implicit_reg_write(DisasContext *ctx, int attrib, int rnum) is_predicated = true; } - if (is_predicated && !is_preloaded(ctx, rnum)) { - tcg_gen_mov_tl(hex_new_value[rnum], hex_gpr[rnum]); - } - - ctx_log_reg_write(ctx, rnum); - } -} - -static void mark_implicit_pred_write(DisasContext *ctx, int attrib, int pnum) -{ - if (GET_ATTRIB(ctx->insn->opcode, attrib)) { - ctx_log_pred_write(ctx, pnum); + ctx_log_reg_write(ctx, rnum, is_predicated); } } @@ -388,6 +320,13 @@ static void mark_implicit_reg_writes(DisasContext *ctx) mark_implicit_reg_write(ctx, A_FPOP, HEX_REG_USR); } +static void mark_implicit_pred_write(DisasContext *ctx, int attrib, int pnum) +{ + if (GET_ATTRIB(ctx->insn->opcode, attrib)) { + ctx_log_pred_write(ctx, pnum); + } +} + static void mark_implicit_pred_writes(DisasContext *ctx) { mark_implicit_pred_write(ctx, A_IMPLICIT_WRITES_P0, 0); @@ -396,6 +335,284 @@ static void mark_implicit_pred_writes(DisasContext *ctx) mark_implicit_pred_write(ctx, A_IMPLICIT_WRITES_P3, 3); } +static bool pkt_raises_exception(Packet *pkt) +{ + if (check_for_attrib(pkt, A_LOAD) || + check_for_attrib(pkt, A_STORE)) { + return true; + } + return false; +} + +static bool need_commit(DisasContext *ctx) +{ + Packet *pkt = ctx->pkt; + + /* + * If the short-circuit property is set to false, we'll always do the commit + */ + if (!ctx->short_circuit) { + return true; + } + + if (pkt_raises_exception(pkt)) { + return true; + } + + /* Registers with immutability flags require new_value */ + for (int i = 0; i < ctx->reg_log_idx; i++) { + int rnum = ctx->reg_log[i]; + if (reg_immut_masks[rnum]) { + return true; + } + } + + /* Floating point instructions are hard-coded to use new_value */ + if (check_for_attrib(pkt, A_FPOP)) { + return true; + } + + if (pkt->num_insns == 1) { + if (pkt->pkt_has_hvx) { + /* + * The HVX instructions with generated helpers use + * pass-by-reference, so they need the read/write overlap + * check below. + * The HVX instructions with overrides are OK. + */ + if (!ctx->has_hvx_helper) { + return false; + } + } else { + return false; + } + } + + /* Check for overlap between register reads and writes */ + for (int i = 0; i < ctx->reg_log_idx; i++) { + int rnum = ctx->reg_log[i]; + if (test_bit(rnum, ctx->regs_read)) { + return true; + } + } + + /* Check for overlap between predicate reads and writes */ + for (int i = 0; i < ctx->preg_log_idx; i++) { + int pnum = ctx->preg_log[i]; + if (test_bit(pnum, ctx->pregs_read)) { + return true; + } + } + + /* Check for overlap between HVX reads and writes */ + for (int i = 0; i < ctx->vreg_log_idx; i++) { + int vnum = ctx->vreg_log[i]; + if (test_bit(vnum, ctx->vregs_read)) { + return true; + } + } + if (!bitmap_empty(ctx->vregs_updated_tmp, NUM_VREGS)) { + int i = find_first_bit(ctx->vregs_updated_tmp, NUM_VREGS); + while (i < NUM_VREGS) { + if (test_bit(i, ctx->vregs_read)) { + return true; + } + i = find_next_bit(ctx->vregs_updated_tmp, NUM_VREGS, i + 1); + } + } + if (!bitmap_empty(ctx->vregs_select, NUM_VREGS)) { + int i = find_first_bit(ctx->vregs_select, NUM_VREGS); + while (i < NUM_VREGS) { + if (test_bit(i, ctx->vregs_read)) { + return true; + } + i = find_next_bit(ctx->vregs_select, NUM_VREGS, i + 1); + } + } + + /* Check for overlap between HVX predicate reads and writes */ + for (int i = 0; i < ctx->qreg_log_idx; i++) { + int qnum = ctx->qreg_log[i]; + if (test_bit(qnum, ctx->qregs_read)) { + return true; + } + } + + return false; +} + +static void mark_implicit_pred_read(DisasContext *ctx, int attrib, int pnum) +{ + if (GET_ATTRIB(ctx->insn->opcode, attrib)) { + ctx_log_pred_read(ctx, pnum); + } +} + +static void mark_implicit_pred_reads(DisasContext *ctx) +{ + mark_implicit_pred_read(ctx, A_IMPLICIT_READS_P0, 0); + mark_implicit_pred_read(ctx, A_IMPLICIT_READS_P1, 1); + mark_implicit_pred_read(ctx, A_IMPLICIT_READS_P3, 2); + mark_implicit_pred_read(ctx, A_IMPLICIT_READS_P3, 3); +} + +static void analyze_packet(DisasContext *ctx) +{ + Packet *pkt = ctx->pkt; + ctx->has_hvx_helper = false; + for (int i = 0; i < pkt->num_insns; i++) { + Insn *insn = &pkt->insn[i]; + ctx->insn = insn; + if (opcode_analyze[insn->opcode]) { + opcode_analyze[insn->opcode](ctx); + } + mark_implicit_reg_writes(ctx); + mark_implicit_pred_writes(ctx); + mark_implicit_pred_reads(ctx); + } + + ctx->need_commit = need_commit(ctx); +} + +static void gen_start_packet(DisasContext *ctx) +{ + Packet *pkt = ctx->pkt; + target_ulong next_PC = ctx->base.pc_next + pkt->encod_pkt_size_in_bytes; + int i; + + /* Clear out the disassembly context */ + ctx->next_PC = next_PC; + ctx->reg_log_idx = 0; + bitmap_zero(ctx->regs_written, TOTAL_PER_THREAD_REGS); + bitmap_zero(ctx->regs_read, TOTAL_PER_THREAD_REGS); + bitmap_zero(ctx->predicated_regs, TOTAL_PER_THREAD_REGS); + ctx->preg_log_idx = 0; + bitmap_zero(ctx->pregs_written, NUM_PREGS); + bitmap_zero(ctx->pregs_read, NUM_PREGS); + ctx->future_vregs_idx = 0; + ctx->tmp_vregs_idx = 0; + ctx->vreg_log_idx = 0; + bitmap_zero(ctx->vregs_updated_tmp, NUM_VREGS); + bitmap_zero(ctx->vregs_updated, NUM_VREGS); + bitmap_zero(ctx->vregs_select, NUM_VREGS); + bitmap_zero(ctx->predicated_future_vregs, NUM_VREGS); + bitmap_zero(ctx->predicated_tmp_vregs, NUM_VREGS); + bitmap_zero(ctx->vregs_read, NUM_VREGS); + bitmap_zero(ctx->qregs_read, NUM_QREGS); + ctx->qreg_log_idx = 0; + for (i = 0; i < STORES_MAX; i++) { + ctx->store_width[i] = 0; + } + ctx->s1_store_processed = false; + ctx->pre_commit = true; + for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) { + ctx->new_value[i] = NULL; + } + for (i = 0; i < NUM_PREGS; i++) { + ctx->new_pred_value[i] = NULL; + } + + analyze_packet(ctx); + + /* + * pregs_written is used both in the analyze phase as well as the code + * gen phase, so clear it again. + */ + bitmap_zero(ctx->pregs_written, NUM_PREGS); + + if (HEX_DEBUG) { + /* Handy place to set a breakpoint before the packet executes */ + gen_helper_debug_start_packet(cpu_env); + } + + /* Initialize the runtime state for packet semantics */ + if (need_slot_cancelled(pkt)) { + tcg_gen_movi_tl(hex_slot_cancelled, 0); + } + ctx->branch_taken = NULL; + if (pkt->pkt_has_cof) { + ctx->branch_taken = tcg_temp_new(); + if (pkt->pkt_has_multi_cof) { + tcg_gen_movi_tl(ctx->branch_taken, 0); + } + if (need_next_PC(ctx)) { + tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], next_PC); + } + } + if (HEX_DEBUG) { + ctx->pred_written = tcg_temp_new(); + tcg_gen_movi_tl(ctx->pred_written, 0); + } + + /* Preload the predicated registers into get_result_gpr(ctx, i) */ + if (ctx->need_commit && + !bitmap_empty(ctx->predicated_regs, TOTAL_PER_THREAD_REGS)) { + int i = find_first_bit(ctx->predicated_regs, TOTAL_PER_THREAD_REGS); + while (i < TOTAL_PER_THREAD_REGS) { + tcg_gen_mov_tl(get_result_gpr(ctx, i), hex_gpr[i]); + i = find_next_bit(ctx->predicated_regs, TOTAL_PER_THREAD_REGS, + i + 1); + } + } + + /* + * Preload the predicated pred registers into ctx->new_pred_value[pred_num] + * Only endloop instructions conditionally write to pred registers + */ + if (ctx->need_commit && pkt->pkt_has_endloop) { + for (int i = 0; i < ctx->preg_log_idx; i++) { + int pred_num = ctx->preg_log[i]; + ctx->new_pred_value[pred_num] = tcg_temp_new(); + tcg_gen_mov_tl(ctx->new_pred_value[pred_num], hex_pred[pred_num]); + } + } + + /* Preload the predicated HVX registers into future_VRegs and tmp_VRegs */ + if (!bitmap_empty(ctx->predicated_future_vregs, NUM_VREGS)) { + int i = find_first_bit(ctx->predicated_future_vregs, NUM_VREGS); + while (i < NUM_VREGS) { + const intptr_t VdV_off = + ctx_future_vreg_off(ctx, i, 1, true); + intptr_t src_off = offsetof(CPUHexagonState, VRegs[i]); + tcg_gen_gvec_mov(MO_64, VdV_off, + src_off, + sizeof(MMVector), + sizeof(MMVector)); + i = find_next_bit(ctx->predicated_future_vregs, NUM_VREGS, i + 1); + } + } + if (!bitmap_empty(ctx->predicated_tmp_vregs, NUM_VREGS)) { + int i = find_first_bit(ctx->predicated_tmp_vregs, NUM_VREGS); + while (i < NUM_VREGS) { + const intptr_t VdV_off = + ctx_tmp_vreg_off(ctx, i, 1, true); + intptr_t src_off = offsetof(CPUHexagonState, VRegs[i]); + tcg_gen_gvec_mov(MO_64, VdV_off, + src_off, + sizeof(MMVector), + sizeof(MMVector)); + i = find_next_bit(ctx->predicated_tmp_vregs, NUM_VREGS, i + 1); + } + } +} + +bool is_gather_store_insn(DisasContext *ctx) +{ + Packet *pkt = ctx->pkt; + Insn *insn = ctx->insn; + if (GET_ATTRIB(insn->opcode, A_CVI_NEW) && + insn->new_value_producer_slot == 1) { + /* Look for gather instruction */ + for (int i = 0; i < pkt->num_insns; i++) { + Insn *in = &pkt->insn[i]; + if (GET_ATTRIB(in->opcode, A_CVI_GATHER) && in->slot == 1) { + return true; + } + } + } + return false; +} + static void mark_store_width(DisasContext *ctx) { uint16_t opcode = ctx->insn->opcode; @@ -403,6 +620,9 @@ static void mark_store_width(DisasContext *ctx) uint8_t width = 0; if (GET_ATTRIB(opcode, A_SCALAR_STORE)) { + if (GET_ATTRIB(opcode, A_MEMSIZE_0B)) { + return; + } if (GET_ATTRIB(opcode, A_MEMSIZE_1B)) { width |= 1; } @@ -423,9 +643,7 @@ static void mark_store_width(DisasContext *ctx) static void gen_insn(DisasContext *ctx) { if (ctx->insn->generate) { - mark_implicit_reg_writes(ctx); ctx->insn->generate(ctx); - mark_implicit_pred_writes(ctx); mark_store_width(ctx); } else { gen_exception_end_tb(ctx, HEX_EXCP_INVALID_OPCODE); @@ -439,10 +657,15 @@ static void gen_reg_writes(DisasContext *ctx) { int i; + /* Early exit if not needed */ + if (!ctx->need_commit) { + return; + } + for (i = 0; i < ctx->reg_log_idx; i++) { int reg_num = ctx->reg_log[i]; - tcg_gen_mov_tl(hex_gpr[reg_num], hex_new_value[reg_num]); + tcg_gen_mov_tl(hex_gpr[reg_num], get_result_gpr(ctx, reg_num)); /* * ctx->is_tight_loop is set when SA0 points to the beginning of the TB. @@ -456,42 +679,14 @@ static void gen_reg_writes(DisasContext *ctx) static void gen_pred_writes(DisasContext *ctx) { - int i; - - /* Early exit if the log is empty */ - if (!ctx->preg_log_idx) { + /* Early exit if not needed or the log is empty */ + if (!ctx->need_commit || !ctx->preg_log_idx) { return; } - /* - * Only endloop instructions will conditionally - * write a predicate. If there are no endloop - * instructions, we can use the non-conditional - * write of the predicates. - */ - if (ctx->pkt->pkt_has_endloop) { - TCGv zero = tcg_constant_tl(0); - TCGv pred_written = tcg_temp_new(); - for (i = 0; i < ctx->preg_log_idx; i++) { - int pred_num = ctx->preg_log[i]; - - tcg_gen_andi_tl(pred_written, hex_pred_written, 1 << pred_num); - tcg_gen_movcond_tl(TCG_COND_NE, hex_pred[pred_num], - pred_written, zero, - hex_new_pred_value[pred_num], - hex_pred[pred_num]); - } - tcg_temp_free(pred_written); - } else { - for (i = 0; i < ctx->preg_log_idx; i++) { - int pred_num = ctx->preg_log[i]; - tcg_gen_mov_tl(hex_pred[pred_num], hex_new_pred_value[pred_num]); - if (HEX_DEBUG) { - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_ori_tl(hex_pred_written, hex_pred_written, - 1 << pred_num); - } - } + for (int i = 0; i < ctx->preg_log_idx; i++) { + int pred_num = ctx->preg_log[i]; + tcg_gen_mov_tl(hex_pred[pred_num], ctx->new_pred_value[pred_num]); } } @@ -536,10 +731,9 @@ void process_store(DisasContext *ctx, int slot_num) /* Don't do anything if the slot was cancelled */ tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); - tcg_temp_free(cancelled); } { - TCGv address = tcg_temp_local_new(); + TCGv address = tcg_temp_new(); tcg_gen_mov_tl(address, hex_store_addr[slot_num]); /* @@ -553,27 +747,27 @@ void process_store(DisasContext *ctx, int slot_num) switch (ctx->store_width[slot_num]) { case 1: gen_check_store_width(ctx, slot_num); - tcg_gen_qemu_st8(hex_store_val32[slot_num], - hex_store_addr[slot_num], - ctx->mem_idx); + tcg_gen_qemu_st_tl(hex_store_val32[slot_num], + hex_store_addr[slot_num], + ctx->mem_idx, MO_UB); break; case 2: gen_check_store_width(ctx, slot_num); - tcg_gen_qemu_st16(hex_store_val32[slot_num], - hex_store_addr[slot_num], - ctx->mem_idx); + tcg_gen_qemu_st_tl(hex_store_val32[slot_num], + hex_store_addr[slot_num], + ctx->mem_idx, MO_TEUW); break; case 4: gen_check_store_width(ctx, slot_num); - tcg_gen_qemu_st32(hex_store_val32[slot_num], - hex_store_addr[slot_num], - ctx->mem_idx); + tcg_gen_qemu_st_tl(hex_store_val32[slot_num], + hex_store_addr[slot_num], + ctx->mem_idx, MO_TEUL); break; case 8: gen_check_store_width(ctx, slot_num); - tcg_gen_qemu_st64(hex_store_val64[slot_num], - hex_store_addr[slot_num], - ctx->mem_idx); + tcg_gen_qemu_st_i64(hex_store_val64[slot_num], + hex_store_addr[slot_num], + ctx->mem_idx, MO_TEUQ); break; default: { @@ -586,7 +780,6 @@ void process_store(DisasContext *ctx, int slot_num) gen_helper_commit_store(cpu_env, slot); } } - tcg_temp_free(address); } if (is_predicated) { gen_set_label(label_end); @@ -619,16 +812,14 @@ static void process_dczeroa(DisasContext *ctx) TCGv addr = tcg_temp_new(); TCGv_i64 zero = tcg_constant_i64(0); - tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f); - tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); + tcg_gen_andi_tl(addr, ctx->dczero_addr, ~0x1f); + tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ); tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); + tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ); tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); + tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ); tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); - - tcg_temp_free(addr); + tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ); } } @@ -648,70 +839,40 @@ static void gen_commit_hvx(DisasContext *ctx) { int i; + /* Early exit if not needed */ + if (!ctx->need_commit) { + g_assert(!pkt_has_hvx_store(ctx->pkt)); + return; + } + /* * for (i = 0; i < ctx->vreg_log_idx; i++) { * int rnum = ctx->vreg_log[i]; - * if (ctx->vreg_is_predicated[i]) { - * if (env->VRegs_updated & (1 << rnum)) { - * env->VRegs[rnum] = env->future_VRegs[rnum]; - * } - * } else { - * env->VRegs[rnum] = env->future_VRegs[rnum]; - * } + * env->VRegs[rnum] = env->future_VRegs[rnum]; * } */ for (i = 0; i < ctx->vreg_log_idx; i++) { int rnum = ctx->vreg_log[i]; - bool is_predicated = ctx->vreg_is_predicated[i]; intptr_t dstoff = offsetof(CPUHexagonState, VRegs[rnum]); intptr_t srcoff = ctx_future_vreg_off(ctx, rnum, 1, false); size_t size = sizeof(MMVector); - if (is_predicated) { - TCGv cmp = tcg_temp_new(); - TCGLabel *label_skip = gen_new_label(); - - tcg_gen_andi_tl(cmp, hex_VRegs_updated, 1 << rnum); - tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip); - tcg_temp_free(cmp); - tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); - gen_set_label(label_skip); - } else { - tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); - } + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); } /* * for (i = 0; i < ctx->qreg_log_idx; i++) { * int rnum = ctx->qreg_log[i]; - * if (ctx->qreg_is_predicated[i]) { - * if (env->QRegs_updated) & (1 << rnum)) { - * env->QRegs[rnum] = env->future_QRegs[rnum]; - * } - * } else { - * env->QRegs[rnum] = env->future_QRegs[rnum]; - * } + * env->QRegs[rnum] = env->future_QRegs[rnum]; * } */ for (i = 0; i < ctx->qreg_log_idx; i++) { int rnum = ctx->qreg_log[i]; - bool is_predicated = ctx->qreg_is_predicated[i]; intptr_t dstoff = offsetof(CPUHexagonState, QRegs[rnum]); intptr_t srcoff = offsetof(CPUHexagonState, future_QRegs[rnum]); size_t size = sizeof(MMQReg); - if (is_predicated) { - TCGv cmp = tcg_temp_new(); - TCGLabel *label_skip = gen_new_label(); - - tcg_gen_andi_tl(cmp, hex_QRegs_updated, 1 << rnum); - tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip); - tcg_temp_free(cmp); - tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); - gen_set_label(label_skip); - } else { - tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); - } + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); } if (pkt_has_hvx_store(ctx->pkt)) { @@ -773,33 +934,54 @@ static void gen_commit_packet(DisasContext *ctx) g_assert(!has_store_s1 && !has_hvx_store); process_dczeroa(ctx); } else if (has_hvx_store) { - TCGv mem_idx = tcg_constant_tl(ctx->mem_idx); - if (!has_store_s0 && !has_store_s1) { + TCGv mem_idx = tcg_constant_tl(ctx->mem_idx); gen_helper_probe_hvx_stores(cpu_env, mem_idx); } else { int mask = 0; - TCGv mask_tcgv; if (has_store_s0) { - mask |= (1 << 0); + mask = + FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES, HAS_ST0, 1); } if (has_store_s1) { - mask |= (1 << 1); + mask = + FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES, HAS_ST1, 1); } if (has_hvx_store) { - mask |= (1 << 2); + mask = + FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES, + HAS_HVX_STORES, 1); } - mask_tcgv = tcg_constant_tl(mask); - gen_helper_probe_pkt_scalar_hvx_stores(cpu_env, mask_tcgv, mem_idx); + if (has_store_s0 && slot_is_predicated(pkt, 0)) { + mask = + FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES, + S0_IS_PRED, 1); + } + if (has_store_s1 && slot_is_predicated(pkt, 1)) { + mask = + FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES, + S1_IS_PRED, 1); + } + mask = FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES, MMU_IDX, + ctx->mem_idx); + gen_helper_probe_pkt_scalar_hvx_stores(cpu_env, + tcg_constant_tl(mask)); } } else if (has_store_s0 && has_store_s1) { /* * process_store_log will execute the slot 1 store first, * so we only have to probe the store in slot 0 */ - TCGv mem_idx = tcg_constant_tl(ctx->mem_idx); - gen_helper_probe_pkt_scalar_store_s0(cpu_env, mem_idx); + int args = 0; + args = + FIELD_DP32(args, PROBE_PKT_SCALAR_STORE_S0, MMU_IDX, ctx->mem_idx); + if (slot_is_predicated(pkt, 0)) { + args = + FIELD_DP32(args, PROBE_PKT_SCALAR_STORE_S0, IS_PREDICATED, 1); + } + TCGv args_tcgv = tcg_constant_tl(args); + gen_helper_probe_pkt_scalar_store_s0(cpu_env, args_tcgv); } process_store_log(ctx); @@ -817,7 +999,8 @@ static void gen_commit_packet(DisasContext *ctx) tcg_constant_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa); /* Handy place to set a breakpoint at the end of execution */ - gen_helper_debug_commit_end(cpu_env, has_st0, has_st1); + gen_helper_debug_commit_end(cpu_env, tcg_constant_tl(ctx->pkt->pc), + ctx->pred_written, has_st0, has_st1); } if (pkt->vhist_insn != NULL) { @@ -864,6 +1047,7 @@ static void hexagon_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); + HexagonCPU *hex_cpu = env_archcpu(cs->env_ptr); uint32_t hex_flags = dcbase->tb->flags; ctx->mem_idx = MMU_USER_IDX; @@ -872,6 +1056,7 @@ static void hexagon_tr_init_disas_context(DisasContextBase *dcbase, ctx->num_hvx_insns = 0; ctx->branch_cond = TCG_COND_NEVER; ctx->is_tight_loop = FIELD_EX32(hex_flags, TB_FLAGS, IS_TIGHT_LOOP); + ctx->short_circuit = hex_cpu->short_circuit; } static void hexagon_tr_tb_start(DisasContextBase *db, CPUState *cpu) @@ -962,7 +1147,7 @@ static const TranslatorOps hexagon_tr_ops = { .disas_log = hexagon_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; @@ -972,9 +1157,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, } #define NAME_LEN 64 -static char new_value_names[TOTAL_PER_THREAD_REGS][NAME_LEN]; static char reg_written_names[TOTAL_PER_THREAD_REGS][NAME_LEN]; -static char new_pred_value_names[NUM_PREGS][NAME_LEN]; static char store_addr_names[STORES_MAX][NAME_LEN]; static char store_width_names[STORES_MAX][NAME_LEN]; static char store_val32_names[STORES_MAX][NAME_LEN]; @@ -994,11 +1177,6 @@ void hexagon_translate_init(void) offsetof(CPUHexagonState, gpr[i]), hexagon_regnames[i]); - snprintf(new_value_names[i], NAME_LEN, "new_%s", hexagon_regnames[i]); - hex_new_value[i] = tcg_global_mem_new(cpu_env, - offsetof(CPUHexagonState, new_value[i]), - new_value_names[i]); - if (HEX_DEBUG) { snprintf(reg_written_names[i], NAME_LEN, "reg_written_%s", hexagon_regnames[i]); @@ -1007,39 +1185,22 @@ void hexagon_translate_init(void) reg_written_names[i]); } } + hex_new_value_usr = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, new_value_usr), "new_value_usr"); + for (i = 0; i < NUM_PREGS; i++) { hex_pred[i] = tcg_global_mem_new(cpu_env, offsetof(CPUHexagonState, pred[i]), hexagon_prednames[i]); - - snprintf(new_pred_value_names[i], NAME_LEN, "new_pred_%s", - hexagon_prednames[i]); - hex_new_pred_value[i] = tcg_global_mem_new(cpu_env, - offsetof(CPUHexagonState, new_pred_value[i]), - new_pred_value_names[i]); } - hex_pred_written = tcg_global_mem_new(cpu_env, - offsetof(CPUHexagonState, pred_written), "pred_written"); - hex_this_PC = tcg_global_mem_new(cpu_env, - offsetof(CPUHexagonState, this_PC), "this_PC"); hex_slot_cancelled = tcg_global_mem_new(cpu_env, offsetof(CPUHexagonState, slot_cancelled), "slot_cancelled"); - hex_branch_taken = tcg_global_mem_new(cpu_env, - offsetof(CPUHexagonState, branch_taken), "branch_taken"); - hex_pkt_has_store_s1 = tcg_global_mem_new(cpu_env, - offsetof(CPUHexagonState, pkt_has_store_s1), "pkt_has_store_s1"); - hex_dczero_addr = tcg_global_mem_new(cpu_env, - offsetof(CPUHexagonState, dczero_addr), "dczero_addr"); hex_llsc_addr = tcg_global_mem_new(cpu_env, offsetof(CPUHexagonState, llsc_addr), "llsc_addr"); hex_llsc_val = tcg_global_mem_new(cpu_env, offsetof(CPUHexagonState, llsc_val), "llsc_val"); hex_llsc_val_i64 = tcg_global_mem_new_i64(cpu_env, offsetof(CPUHexagonState, llsc_val_i64), "llsc_val_i64"); - hex_VRegs_updated = tcg_global_mem_new(cpu_env, - offsetof(CPUHexagonState, VRegs_updated), "VRegs_updated"); - hex_QRegs_updated = tcg_global_mem_new(cpu_env, - offsetof(CPUHexagonState, QRegs_updated), "QRegs_updated"); for (i = 0; i < STORES_MAX; i++) { snprintf(store_addr_names[i], NAME_LEN, "store_addr_%d", i); hex_store_addr[i] = tcg_global_mem_new(cpu_env, diff --git a/target/hexagon/translate.h b/target/hexagon/translate.h index d971f4f095..4dd59c6726 100644 --- a/target/hexagon/translate.h +++ b/target/hexagon/translate.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -38,9 +38,12 @@ typedef struct DisasContext { int reg_log[REG_WRITES_MAX]; int reg_log_idx; DECLARE_BITMAP(regs_written, TOTAL_PER_THREAD_REGS); + DECLARE_BITMAP(regs_read, TOTAL_PER_THREAD_REGS); + DECLARE_BITMAP(predicated_regs, TOTAL_PER_THREAD_REGS); int preg_log[PRED_WRITES_MAX]; int preg_log_idx; DECLARE_BITMAP(pregs_written, NUM_PREGS); + DECLARE_BITMAP(pregs_read, NUM_PREGS); uint8_t store_width[STORES_MAX]; bool s1_store_processed; int future_vregs_idx; @@ -48,52 +51,79 @@ typedef struct DisasContext { int tmp_vregs_idx; int tmp_vregs_num[VECTOR_TEMPS_MAX]; int vreg_log[NUM_VREGS]; - bool vreg_is_predicated[NUM_VREGS]; int vreg_log_idx; DECLARE_BITMAP(vregs_updated_tmp, NUM_VREGS); DECLARE_BITMAP(vregs_updated, NUM_VREGS); DECLARE_BITMAP(vregs_select, NUM_VREGS); + DECLARE_BITMAP(predicated_future_vregs, NUM_VREGS); + DECLARE_BITMAP(predicated_tmp_vregs, NUM_VREGS); + DECLARE_BITMAP(vregs_read, NUM_VREGS); int qreg_log[NUM_QREGS]; - bool qreg_is_predicated[NUM_QREGS]; int qreg_log_idx; + DECLARE_BITMAP(qregs_read, NUM_QREGS); bool pre_commit; + bool need_commit; TCGCond branch_cond; target_ulong branch_dest; bool is_tight_loop; + bool short_circuit; + bool has_hvx_helper; + TCGv new_value[TOTAL_PER_THREAD_REGS]; + TCGv new_pred_value[NUM_PREGS]; + TCGv pred_written; + TCGv branch_taken; + TCGv dczero_addr; } DisasContext; -static inline void ctx_log_reg_write(DisasContext *ctx, int rnum) -{ - if (test_bit(rnum, ctx->regs_written)) { - HEX_DEBUG_LOG("WARNING: Multiple writes to r%d\n", rnum); - } - ctx->reg_log[ctx->reg_log_idx] = rnum; - ctx->reg_log_idx++; - set_bit(rnum, ctx->regs_written); -} - -static inline void ctx_log_reg_write_pair(DisasContext *ctx, int rnum) -{ - ctx_log_reg_write(ctx, rnum); - ctx_log_reg_write(ctx, rnum + 1); -} - static inline void ctx_log_pred_write(DisasContext *ctx, int pnum) { - ctx->preg_log[ctx->preg_log_idx] = pnum; - ctx->preg_log_idx++; - set_bit(pnum, ctx->pregs_written); + if (!test_bit(pnum, ctx->pregs_written)) { + ctx->preg_log[ctx->preg_log_idx] = pnum; + ctx->preg_log_idx++; + set_bit(pnum, ctx->pregs_written); + } } -static inline bool is_preloaded(DisasContext *ctx, int num) +static inline void ctx_log_pred_read(DisasContext *ctx, int pnum) { - return test_bit(num, ctx->regs_written); + set_bit(pnum, ctx->pregs_read); } -static inline bool is_vreg_preloaded(DisasContext *ctx, int num) +static inline void ctx_log_reg_write(DisasContext *ctx, int rnum, + bool is_predicated) { - return test_bit(num, ctx->vregs_updated) || - test_bit(num, ctx->vregs_updated_tmp); + if (rnum == HEX_REG_P3_0_ALIASED) { + for (int i = 0; i < NUM_PREGS; i++) { + ctx_log_pred_write(ctx, i); + } + } else { + if (!test_bit(rnum, ctx->regs_written)) { + ctx->reg_log[ctx->reg_log_idx] = rnum; + ctx->reg_log_idx++; + set_bit(rnum, ctx->regs_written); + } + if (is_predicated) { + set_bit(rnum, ctx->predicated_regs); + } + } +} + +static inline void ctx_log_reg_write_pair(DisasContext *ctx, int rnum, + bool is_predicated) +{ + ctx_log_reg_write(ctx, rnum, is_predicated); + ctx_log_reg_write(ctx, rnum + 1, is_predicated); +} + +static inline void ctx_log_reg_read(DisasContext *ctx, int rnum) +{ + set_bit(rnum, ctx->regs_read); +} + +static inline void ctx_log_reg_read_pair(DisasContext *ctx, int rnum) +{ + ctx_log_reg_read(ctx, rnum); + ctx_log_reg_read(ctx, rnum + 1); } intptr_t ctx_future_vreg_off(DisasContext *ctx, int regnum, @@ -106,17 +136,25 @@ static inline void ctx_log_vreg_write(DisasContext *ctx, bool is_predicated) { if (type != EXT_TMP) { - ctx->vreg_log[ctx->vreg_log_idx] = rnum; - ctx->vreg_is_predicated[ctx->vreg_log_idx] = is_predicated; - ctx->vreg_log_idx++; + if (!test_bit(rnum, ctx->vregs_updated)) { + ctx->vreg_log[ctx->vreg_log_idx] = rnum; + ctx->vreg_log_idx++; + set_bit(rnum, ctx->vregs_updated); + } set_bit(rnum, ctx->vregs_updated); + if (is_predicated) { + set_bit(rnum, ctx->predicated_future_vregs); + } } if (type == EXT_NEW) { set_bit(rnum, ctx->vregs_select); } if (type == EXT_TMP) { set_bit(rnum, ctx->vregs_updated_tmp); + if (is_predicated) { + set_bit(rnum, ctx->predicated_tmp_vregs); + } } } @@ -128,37 +166,56 @@ static inline void ctx_log_vreg_write_pair(DisasContext *ctx, ctx_log_vreg_write(ctx, rnum ^ 1, type, is_predicated); } +static inline void ctx_log_vreg_read(DisasContext *ctx, int rnum) +{ + set_bit(rnum, ctx->vregs_read); +} + +static inline void ctx_log_vreg_read_pair(DisasContext *ctx, int rnum) +{ + ctx_log_vreg_read(ctx, rnum ^ 0); + ctx_log_vreg_read(ctx, rnum ^ 1); +} + static inline void ctx_log_qreg_write(DisasContext *ctx, - int rnum, bool is_predicated) + int rnum) { ctx->qreg_log[ctx->qreg_log_idx] = rnum; - ctx->qreg_is_predicated[ctx->qreg_log_idx] = is_predicated; ctx->qreg_log_idx++; } +static inline void ctx_log_qreg_read(DisasContext *ctx, int qnum) +{ + set_bit(qnum, ctx->qregs_read); +} + extern TCGv hex_gpr[TOTAL_PER_THREAD_REGS]; extern TCGv hex_pred[NUM_PREGS]; -extern TCGv hex_this_PC; extern TCGv hex_slot_cancelled; -extern TCGv hex_branch_taken; -extern TCGv hex_new_value[TOTAL_PER_THREAD_REGS]; +extern TCGv hex_new_value_usr; extern TCGv hex_reg_written[TOTAL_PER_THREAD_REGS]; -extern TCGv hex_new_pred_value[NUM_PREGS]; -extern TCGv hex_pred_written; extern TCGv hex_store_addr[STORES_MAX]; extern TCGv hex_store_width[STORES_MAX]; extern TCGv hex_store_val32[STORES_MAX]; extern TCGv_i64 hex_store_val64[STORES_MAX]; -extern TCGv hex_dczero_addr; extern TCGv hex_llsc_addr; extern TCGv hex_llsc_val; extern TCGv_i64 hex_llsc_val_i64; -extern TCGv hex_VRegs_updated; -extern TCGv hex_QRegs_updated; extern TCGv hex_vstore_addr[VSTORES_MAX]; extern TCGv hex_vstore_size[VSTORES_MAX]; extern TCGv hex_vstore_pending[VSTORES_MAX]; bool is_gather_store_insn(DisasContext *ctx); void process_store(DisasContext *ctx, int slot_num); + +FIELD(PROBE_PKT_SCALAR_STORE_S0, MMU_IDX, 0, 2) +FIELD(PROBE_PKT_SCALAR_STORE_S0, IS_PREDICATED, 2, 1) + +FIELD(PROBE_PKT_SCALAR_HVX_STORES, HAS_ST0, 0, 1) +FIELD(PROBE_PKT_SCALAR_HVX_STORES, HAS_ST1, 1, 1) +FIELD(PROBE_PKT_SCALAR_HVX_STORES, HAS_HVX_STORES, 2, 1) +FIELD(PROBE_PKT_SCALAR_HVX_STORES, S0_IS_PRED, 3, 1) +FIELD(PROBE_PKT_SCALAR_HVX_STORES, S1_IS_PRED, 4, 1) +FIELD(PROBE_PKT_SCALAR_HVX_STORES, MMU_IDX, 5, 2) + #endif diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h index a48a2701ae..c2791ae5f2 100644 --- a/target/hppa/cpu-param.h +++ b/target/hppa/cpu-param.h @@ -29,6 +29,5 @@ # define TARGET_PHYS_ADDR_SPACE_BITS 32 #endif #define TARGET_PAGE_BITS 12 -#define NB_MMU_MODES 5 #endif diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index 55c190280e..11022f9c99 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -26,7 +26,7 @@ #include "qemu/module.h" #include "exec/exec-all.h" #include "fpu/softfloat.h" - +#include "tcg/tcg.h" static void hppa_cpu_set_pc(CPUState *cs, vaddr value) { @@ -48,8 +48,10 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs, { HPPACPU *cpu = HPPA_CPU(cs); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + #ifdef CONFIG_USER_ONLY - cpu->env.iaoq_f = tb_pc(tb); + cpu->env.iaoq_f = tb->pc; cpu->env.iaoq_b = tb->cs_base; #else /* Recover the IAOQ values from the GVA + PRIV. */ @@ -59,7 +61,7 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs, int32_t diff = cs_base; cpu->env.iasq_f = iasq_f; - cpu->env.iaoq_f = (tb_pc(tb) & ~iasq_f) + priv; + cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv; if (diff) { cpu->env.iaoq_b = cpu->env.iaoq_f + diff; } diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 6f3b6beecf..b595ef25a9 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -322,11 +322,11 @@ static inline void cpu_hppa_change_prot_id(CPUHPPAState *env) { } void cpu_hppa_change_prot_id(CPUHPPAState *env); #endif -hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void hppa_cpu_dump_state(CPUState *cs, FILE *f, int); #ifndef CONFIG_USER_ONLY +hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); diff --git a/target/hppa/fpu_helper.c b/target/hppa/fpu_helper.c new file mode 100644 index 0000000000..576f283b04 --- /dev/null +++ b/target/hppa/fpu_helper.c @@ -0,0 +1,450 @@ +/* + * Helpers for HPPA FPU instructions. + * + * Copyright (c) 2016 Richard Henderson + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" + +void HELPER(loaded_fr0)(CPUHPPAState *env) +{ + uint32_t shadow = env->fr[0] >> 32; + int rm, d; + + env->fr0_shadow = shadow; + + switch (extract32(shadow, 9, 2)) { + default: + rm = float_round_nearest_even; + break; + case 1: + rm = float_round_to_zero; + break; + case 2: + rm = float_round_up; + break; + case 3: + rm = float_round_down; + break; + } + set_float_rounding_mode(rm, &env->fp_status); + + d = extract32(shadow, 5, 1); + set_flush_to_zero(d, &env->fp_status); + set_flush_inputs_to_zero(d, &env->fp_status); +} + +void cpu_hppa_loaded_fr0(CPUHPPAState *env) +{ + helper_loaded_fr0(env); +} + +#define CONVERT_BIT(X, SRC, DST) \ + ((SRC) > (DST) \ + ? (X) / ((SRC) / (DST)) & (DST) \ + : ((X) & (SRC)) * ((DST) / (SRC))) + +static void update_fr0_op(CPUHPPAState *env, uintptr_t ra) +{ + uint32_t soft_exp = get_float_exception_flags(&env->fp_status); + uint32_t hard_exp = 0; + uint32_t shadow = env->fr0_shadow; + + if (likely(soft_exp == 0)) { + env->fr[0] = (uint64_t)shadow << 32; + return; + } + set_float_exception_flags(0, &env->fp_status); + + hard_exp |= CONVERT_BIT(soft_exp, float_flag_inexact, 1u << 0); + hard_exp |= CONVERT_BIT(soft_exp, float_flag_underflow, 1u << 1); + hard_exp |= CONVERT_BIT(soft_exp, float_flag_overflow, 1u << 2); + hard_exp |= CONVERT_BIT(soft_exp, float_flag_divbyzero, 1u << 3); + hard_exp |= CONVERT_BIT(soft_exp, float_flag_invalid, 1u << 4); + shadow |= hard_exp << (32 - 5); + env->fr0_shadow = shadow; + env->fr[0] = (uint64_t)shadow << 32; + + if (hard_exp & shadow) { + hppa_dynamic_excp(env, EXCP_ASSIST, ra); + } +} + +float32 HELPER(fsqrt_s)(CPUHPPAState *env, float32 arg) +{ + float32 ret = float32_sqrt(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(frnd_s)(CPUHPPAState *env, float32 arg) +{ + float32 ret = float32_round_to_int(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(fadd_s)(CPUHPPAState *env, float32 a, float32 b) +{ + float32 ret = float32_add(a, b, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(fsub_s)(CPUHPPAState *env, float32 a, float32 b) +{ + float32 ret = float32_sub(a, b, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(fmpy_s)(CPUHPPAState *env, float32 a, float32 b) +{ + float32 ret = float32_mul(a, b, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(fdiv_s)(CPUHPPAState *env, float32 a, float32 b) +{ + float32 ret = float32_div(a, b, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fsqrt_d)(CPUHPPAState *env, float64 arg) +{ + float64 ret = float64_sqrt(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(frnd_d)(CPUHPPAState *env, float64 arg) +{ + float64 ret = float64_round_to_int(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fadd_d)(CPUHPPAState *env, float64 a, float64 b) +{ + float64 ret = float64_add(a, b, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fsub_d)(CPUHPPAState *env, float64 a, float64 b) +{ + float64 ret = float64_sub(a, b, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fmpy_d)(CPUHPPAState *env, float64 a, float64 b) +{ + float64 ret = float64_mul(a, b, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fdiv_d)(CPUHPPAState *env, float64 a, float64 b) +{ + float64 ret = float64_div(a, b, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fcnv_s_d)(CPUHPPAState *env, float32 arg) +{ + float64 ret = float32_to_float64(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(fcnv_d_s)(CPUHPPAState *env, float64 arg) +{ + float32 ret = float64_to_float32(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(fcnv_w_s)(CPUHPPAState *env, int32_t arg) +{ + float32 ret = int32_to_float32(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(fcnv_dw_s)(CPUHPPAState *env, int64_t arg) +{ + float32 ret = int64_to_float32(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fcnv_w_d)(CPUHPPAState *env, int32_t arg) +{ + float64 ret = int32_to_float64(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fcnv_dw_d)(CPUHPPAState *env, int64_t arg) +{ + float64 ret = int64_to_float64(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +int32_t HELPER(fcnv_s_w)(CPUHPPAState *env, float32 arg) +{ + int32_t ret = float32_to_int32(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +int32_t HELPER(fcnv_d_w)(CPUHPPAState *env, float64 arg) +{ + int32_t ret = float64_to_int32(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +int64_t HELPER(fcnv_s_dw)(CPUHPPAState *env, float32 arg) +{ + int64_t ret = float32_to_int64(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +int64_t HELPER(fcnv_d_dw)(CPUHPPAState *env, float64 arg) +{ + int64_t ret = float64_to_int64(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +int32_t HELPER(fcnv_t_s_w)(CPUHPPAState *env, float32 arg) +{ + int32_t ret = float32_to_int32_round_to_zero(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +int32_t HELPER(fcnv_t_d_w)(CPUHPPAState *env, float64 arg) +{ + int32_t ret = float64_to_int32_round_to_zero(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +int64_t HELPER(fcnv_t_s_dw)(CPUHPPAState *env, float32 arg) +{ + int64_t ret = float32_to_int64_round_to_zero(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +int64_t HELPER(fcnv_t_d_dw)(CPUHPPAState *env, float64 arg) +{ + int64_t ret = float64_to_int64_round_to_zero(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(fcnv_uw_s)(CPUHPPAState *env, uint32_t arg) +{ + float32 ret = uint32_to_float32(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(fcnv_udw_s)(CPUHPPAState *env, uint64_t arg) +{ + float32 ret = uint64_to_float32(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fcnv_uw_d)(CPUHPPAState *env, uint32_t arg) +{ + float64 ret = uint32_to_float64(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fcnv_udw_d)(CPUHPPAState *env, uint64_t arg) +{ + float64 ret = uint64_to_float64(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +uint32_t HELPER(fcnv_s_uw)(CPUHPPAState *env, float32 arg) +{ + uint32_t ret = float32_to_uint32(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +uint32_t HELPER(fcnv_d_uw)(CPUHPPAState *env, float64 arg) +{ + uint32_t ret = float64_to_uint32(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +uint64_t HELPER(fcnv_s_udw)(CPUHPPAState *env, float32 arg) +{ + uint64_t ret = float32_to_uint64(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +uint64_t HELPER(fcnv_d_udw)(CPUHPPAState *env, float64 arg) +{ + uint64_t ret = float64_to_uint64(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +uint32_t HELPER(fcnv_t_s_uw)(CPUHPPAState *env, float32 arg) +{ + uint32_t ret = float32_to_uint32_round_to_zero(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +uint32_t HELPER(fcnv_t_d_uw)(CPUHPPAState *env, float64 arg) +{ + uint32_t ret = float64_to_uint32_round_to_zero(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +uint64_t HELPER(fcnv_t_s_udw)(CPUHPPAState *env, float32 arg) +{ + uint64_t ret = float32_to_uint64_round_to_zero(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +uint64_t HELPER(fcnv_t_d_udw)(CPUHPPAState *env, float64 arg) +{ + uint64_t ret = float64_to_uint64_round_to_zero(arg, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +static void update_fr0_cmp(CPUHPPAState *env, uint32_t y, + uint32_t c, FloatRelation r) +{ + uint32_t shadow = env->fr0_shadow; + + switch (r) { + case float_relation_greater: + c = extract32(c, 4, 1); + break; + case float_relation_less: + c = extract32(c, 3, 1); + break; + case float_relation_equal: + c = extract32(c, 2, 1); + break; + case float_relation_unordered: + c = extract32(c, 1, 1); + break; + default: + g_assert_not_reached(); + } + + if (y) { + /* targeted comparison */ + /* set fpsr[ca[y - 1]] to current compare */ + shadow = deposit32(shadow, 21 - (y - 1), 1, c); + } else { + /* queued comparison */ + /* shift cq right by one place */ + shadow = deposit32(shadow, 11, 10, extract32(shadow, 12, 10)); + /* move fpsr[c] to fpsr[cq[0]] */ + shadow = deposit32(shadow, 21, 1, extract32(shadow, 26, 1)); + /* set fpsr[c] to current compare */ + shadow = deposit32(shadow, 26, 1, c); + } + + env->fr0_shadow = shadow; + env->fr[0] = (uint64_t)shadow << 32; +} + +void HELPER(fcmp_s)(CPUHPPAState *env, float32 a, float32 b, + uint32_t y, uint32_t c) +{ + FloatRelation r; + if (c & 1) { + r = float32_compare(a, b, &env->fp_status); + } else { + r = float32_compare_quiet(a, b, &env->fp_status); + } + update_fr0_op(env, GETPC()); + update_fr0_cmp(env, y, c, r); +} + +void HELPER(fcmp_d)(CPUHPPAState *env, float64 a, float64 b, + uint32_t y, uint32_t c) +{ + FloatRelation r; + if (c & 1) { + r = float64_compare(a, b, &env->fp_status); + } else { + r = float64_compare_quiet(a, b, &env->fp_status); + } + update_fr0_op(env, GETPC()); + update_fr0_cmp(env, y, c, r); +} + +float32 HELPER(fmpyfadd_s)(CPUHPPAState *env, float32 a, float32 b, float32 c) +{ + float32 ret = float32_muladd(a, b, c, 0, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float32 HELPER(fmpynfadd_s)(CPUHPPAState *env, float32 a, float32 b, float32 c) +{ + float32 ret = float32_muladd(a, b, c, float_muladd_negate_product, + &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fmpyfadd_d)(CPUHPPAState *env, float64 a, float64 b, float64 c) +{ + float64 ret = float64_muladd(a, b, c, 0, &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} + +float64 HELPER(fmpynfadd_d)(CPUHPPAState *env, float64 a, float64 b, float64 c) +{ + float64 ret = float64_muladd(a, b, c, float_muladd_negate_product, + &env->fp_status); + update_fr0_op(env, GETPC()); + return ret; +} diff --git a/target/hppa/gdbstub.c b/target/hppa/gdbstub.c index 729c37b2ca..48a514384f 100644 --- a/target/hppa/gdbstub.c +++ b/target/hppa/gdbstub.c @@ -19,7 +19,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" int hppa_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { diff --git a/target/hppa/meson.build b/target/hppa/meson.build index 021e42a2d0..81b4b4e617 100644 --- a/target/hppa/meson.build +++ b/target/hppa/meson.build @@ -4,6 +4,7 @@ hppa_ss = ss.source_set() hppa_ss.add(gen) hppa_ss.add(files( 'cpu.c', + 'fpu_helper.c', 'gdbstub.c', 'helper.c', 'int_helper.c', @@ -15,6 +16,7 @@ hppa_softmmu_ss = ss.source_set() hppa_softmmu_ss.add(files( 'machine.c', 'mem_helper.c', + 'sys_helper.c', )) target_arch += {'hppa': hppa_ss} diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c index fbd80e4248..32c27c66b2 100644 --- a/target/hppa/op_helper.c +++ b/target/hppa/op_helper.c @@ -24,8 +24,6 @@ #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" -#include "fpu/softfloat.h" #include "trace.h" G_NORETURN void HELPER(excp)(CPUHPPAState *env, int excp) @@ -197,432 +195,6 @@ target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr, #endif } -void HELPER(loaded_fr0)(CPUHPPAState *env) -{ - uint32_t shadow = env->fr[0] >> 32; - int rm, d; - - env->fr0_shadow = shadow; - - switch (extract32(shadow, 9, 2)) { - default: - rm = float_round_nearest_even; - break; - case 1: - rm = float_round_to_zero; - break; - case 2: - rm = float_round_up; - break; - case 3: - rm = float_round_down; - break; - } - set_float_rounding_mode(rm, &env->fp_status); - - d = extract32(shadow, 5, 1); - set_flush_to_zero(d, &env->fp_status); - set_flush_inputs_to_zero(d, &env->fp_status); -} - -void cpu_hppa_loaded_fr0(CPUHPPAState *env) -{ - helper_loaded_fr0(env); -} - -#define CONVERT_BIT(X, SRC, DST) \ - ((SRC) > (DST) \ - ? (X) / ((SRC) / (DST)) & (DST) \ - : ((X) & (SRC)) * ((DST) / (SRC))) - -static void update_fr0_op(CPUHPPAState *env, uintptr_t ra) -{ - uint32_t soft_exp = get_float_exception_flags(&env->fp_status); - uint32_t hard_exp = 0; - uint32_t shadow = env->fr0_shadow; - - if (likely(soft_exp == 0)) { - env->fr[0] = (uint64_t)shadow << 32; - return; - } - set_float_exception_flags(0, &env->fp_status); - - hard_exp |= CONVERT_BIT(soft_exp, float_flag_inexact, 1u << 0); - hard_exp |= CONVERT_BIT(soft_exp, float_flag_underflow, 1u << 1); - hard_exp |= CONVERT_BIT(soft_exp, float_flag_overflow, 1u << 2); - hard_exp |= CONVERT_BIT(soft_exp, float_flag_divbyzero, 1u << 3); - hard_exp |= CONVERT_BIT(soft_exp, float_flag_invalid, 1u << 4); - shadow |= hard_exp << (32 - 5); - env->fr0_shadow = shadow; - env->fr[0] = (uint64_t)shadow << 32; - - if (hard_exp & shadow) { - hppa_dynamic_excp(env, EXCP_ASSIST, ra); - } -} - -float32 HELPER(fsqrt_s)(CPUHPPAState *env, float32 arg) -{ - float32 ret = float32_sqrt(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(frnd_s)(CPUHPPAState *env, float32 arg) -{ - float32 ret = float32_round_to_int(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(fadd_s)(CPUHPPAState *env, float32 a, float32 b) -{ - float32 ret = float32_add(a, b, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(fsub_s)(CPUHPPAState *env, float32 a, float32 b) -{ - float32 ret = float32_sub(a, b, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(fmpy_s)(CPUHPPAState *env, float32 a, float32 b) -{ - float32 ret = float32_mul(a, b, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(fdiv_s)(CPUHPPAState *env, float32 a, float32 b) -{ - float32 ret = float32_div(a, b, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fsqrt_d)(CPUHPPAState *env, float64 arg) -{ - float64 ret = float64_sqrt(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(frnd_d)(CPUHPPAState *env, float64 arg) -{ - float64 ret = float64_round_to_int(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fadd_d)(CPUHPPAState *env, float64 a, float64 b) -{ - float64 ret = float64_add(a, b, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fsub_d)(CPUHPPAState *env, float64 a, float64 b) -{ - float64 ret = float64_sub(a, b, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fmpy_d)(CPUHPPAState *env, float64 a, float64 b) -{ - float64 ret = float64_mul(a, b, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fdiv_d)(CPUHPPAState *env, float64 a, float64 b) -{ - float64 ret = float64_div(a, b, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fcnv_s_d)(CPUHPPAState *env, float32 arg) -{ - float64 ret = float32_to_float64(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(fcnv_d_s)(CPUHPPAState *env, float64 arg) -{ - float32 ret = float64_to_float32(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(fcnv_w_s)(CPUHPPAState *env, int32_t arg) -{ - float32 ret = int32_to_float32(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(fcnv_dw_s)(CPUHPPAState *env, int64_t arg) -{ - float32 ret = int64_to_float32(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fcnv_w_d)(CPUHPPAState *env, int32_t arg) -{ - float64 ret = int32_to_float64(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fcnv_dw_d)(CPUHPPAState *env, int64_t arg) -{ - float64 ret = int64_to_float64(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -int32_t HELPER(fcnv_s_w)(CPUHPPAState *env, float32 arg) -{ - int32_t ret = float32_to_int32(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -int32_t HELPER(fcnv_d_w)(CPUHPPAState *env, float64 arg) -{ - int32_t ret = float64_to_int32(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -int64_t HELPER(fcnv_s_dw)(CPUHPPAState *env, float32 arg) -{ - int64_t ret = float32_to_int64(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -int64_t HELPER(fcnv_d_dw)(CPUHPPAState *env, float64 arg) -{ - int64_t ret = float64_to_int64(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -int32_t HELPER(fcnv_t_s_w)(CPUHPPAState *env, float32 arg) -{ - int32_t ret = float32_to_int32_round_to_zero(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -int32_t HELPER(fcnv_t_d_w)(CPUHPPAState *env, float64 arg) -{ - int32_t ret = float64_to_int32_round_to_zero(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -int64_t HELPER(fcnv_t_s_dw)(CPUHPPAState *env, float32 arg) -{ - int64_t ret = float32_to_int64_round_to_zero(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -int64_t HELPER(fcnv_t_d_dw)(CPUHPPAState *env, float64 arg) -{ - int64_t ret = float64_to_int64_round_to_zero(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(fcnv_uw_s)(CPUHPPAState *env, uint32_t arg) -{ - float32 ret = uint32_to_float32(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(fcnv_udw_s)(CPUHPPAState *env, uint64_t arg) -{ - float32 ret = uint64_to_float32(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fcnv_uw_d)(CPUHPPAState *env, uint32_t arg) -{ - float64 ret = uint32_to_float64(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fcnv_udw_d)(CPUHPPAState *env, uint64_t arg) -{ - float64 ret = uint64_to_float64(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -uint32_t HELPER(fcnv_s_uw)(CPUHPPAState *env, float32 arg) -{ - uint32_t ret = float32_to_uint32(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -uint32_t HELPER(fcnv_d_uw)(CPUHPPAState *env, float64 arg) -{ - uint32_t ret = float64_to_uint32(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -uint64_t HELPER(fcnv_s_udw)(CPUHPPAState *env, float32 arg) -{ - uint64_t ret = float32_to_uint64(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -uint64_t HELPER(fcnv_d_udw)(CPUHPPAState *env, float64 arg) -{ - uint64_t ret = float64_to_uint64(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -uint32_t HELPER(fcnv_t_s_uw)(CPUHPPAState *env, float32 arg) -{ - uint32_t ret = float32_to_uint32_round_to_zero(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -uint32_t HELPER(fcnv_t_d_uw)(CPUHPPAState *env, float64 arg) -{ - uint32_t ret = float64_to_uint32_round_to_zero(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -uint64_t HELPER(fcnv_t_s_udw)(CPUHPPAState *env, float32 arg) -{ - uint64_t ret = float32_to_uint64_round_to_zero(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -uint64_t HELPER(fcnv_t_d_udw)(CPUHPPAState *env, float64 arg) -{ - uint64_t ret = float64_to_uint64_round_to_zero(arg, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -static void update_fr0_cmp(CPUHPPAState *env, uint32_t y, - uint32_t c, FloatRelation r) -{ - uint32_t shadow = env->fr0_shadow; - - switch (r) { - case float_relation_greater: - c = extract32(c, 4, 1); - break; - case float_relation_less: - c = extract32(c, 3, 1); - break; - case float_relation_equal: - c = extract32(c, 2, 1); - break; - case float_relation_unordered: - c = extract32(c, 1, 1); - break; - default: - g_assert_not_reached(); - } - - if (y) { - /* targeted comparison */ - /* set fpsr[ca[y - 1]] to current compare */ - shadow = deposit32(shadow, 21 - (y - 1), 1, c); - } else { - /* queued comparison */ - /* shift cq right by one place */ - shadow = deposit32(shadow, 11, 10, extract32(shadow, 12, 10)); - /* move fpsr[c] to fpsr[cq[0]] */ - shadow = deposit32(shadow, 21, 1, extract32(shadow, 26, 1)); - /* set fpsr[c] to current compare */ - shadow = deposit32(shadow, 26, 1, c); - } - - env->fr0_shadow = shadow; - env->fr[0] = (uint64_t)shadow << 32; -} - -void HELPER(fcmp_s)(CPUHPPAState *env, float32 a, float32 b, - uint32_t y, uint32_t c) -{ - FloatRelation r; - if (c & 1) { - r = float32_compare(a, b, &env->fp_status); - } else { - r = float32_compare_quiet(a, b, &env->fp_status); - } - update_fr0_op(env, GETPC()); - update_fr0_cmp(env, y, c, r); -} - -void HELPER(fcmp_d)(CPUHPPAState *env, float64 a, float64 b, - uint32_t y, uint32_t c) -{ - FloatRelation r; - if (c & 1) { - r = float64_compare(a, b, &env->fp_status); - } else { - r = float64_compare_quiet(a, b, &env->fp_status); - } - update_fr0_op(env, GETPC()); - update_fr0_cmp(env, y, c, r); -} - -float32 HELPER(fmpyfadd_s)(CPUHPPAState *env, float32 a, float32 b, float32 c) -{ - float32 ret = float32_muladd(a, b, c, 0, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float32 HELPER(fmpynfadd_s)(CPUHPPAState *env, float32 a, float32 b, float32 c) -{ - float32 ret = float32_muladd(a, b, c, float_muladd_negate_product, - &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fmpyfadd_d)(CPUHPPAState *env, float64 a, float64 b, float64 c) -{ - float64 ret = float64_muladd(a, b, c, 0, &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - -float64 HELPER(fmpynfadd_d)(CPUHPPAState *env, float64 a, float64 b, float64 c) -{ - float64 ret = float64_muladd(a, b, c, float_muladd_negate_product, - &env->fp_status); - update_fr0_op(env, GETPC()); - return ret; -} - target_ureg HELPER(read_interval_timer)(void) { #ifdef CONFIG_USER_ONLY @@ -636,79 +208,3 @@ target_ureg HELPER(read_interval_timer)(void) return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2; #endif } - -#ifndef CONFIG_USER_ONLY -void HELPER(write_interval_timer)(CPUHPPAState *env, target_ureg val) -{ - HPPACPU *cpu = env_archcpu(env); - uint64_t current = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - uint64_t timeout; - - /* Even in 64-bit mode, the comparator is always 32-bit. But the - value we expose to the guest is 1/4 of the speed of the clock, - so moosh in 34 bits. */ - timeout = deposit64(current, 0, 34, (uint64_t)val << 2); - - /* If the mooshing puts the clock in the past, advance to next round. */ - if (timeout < current + 1000) { - timeout += 1ULL << 34; - } - - cpu->env.cr[CR_IT] = timeout; - timer_mod(cpu->alarm_timer, timeout); -} - -void HELPER(halt)(CPUHPPAState *env) -{ - qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); - helper_excp(env, EXCP_HLT); -} - -void HELPER(reset)(CPUHPPAState *env) -{ - qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); - helper_excp(env, EXCP_HLT); -} - -target_ureg HELPER(swap_system_mask)(CPUHPPAState *env, target_ureg nsm) -{ - target_ulong psw = env->psw; - /* - * Setting the PSW Q bit to 1, if it was not already 1, is an - * undefined operation. - * - * However, HP-UX 10.20 does this with the SSM instruction. - * Tested this on HP9000/712 and HP9000/785/C3750 and both - * machines set the Q bit from 0 to 1 without an exception, - * so let this go without comment. - */ - env->psw = (psw & ~PSW_SM) | (nsm & PSW_SM); - return psw & PSW_SM; -} - -void HELPER(rfi)(CPUHPPAState *env) -{ - env->iasq_f = (uint64_t)env->cr[CR_IIASQ] << 32; - env->iasq_b = (uint64_t)env->cr_back[0] << 32; - env->iaoq_f = env->cr[CR_IIAOQ]; - env->iaoq_b = env->cr_back[1]; - cpu_hppa_put_psw(env, env->cr[CR_IPSW]); -} - -void HELPER(getshadowregs)(CPUHPPAState *env) -{ - env->gr[1] = env->shadow[0]; - env->gr[8] = env->shadow[1]; - env->gr[9] = env->shadow[2]; - env->gr[16] = env->shadow[3]; - env->gr[17] = env->shadow[4]; - env->gr[24] = env->shadow[5]; - env->gr[25] = env->shadow[6]; -} - -void HELPER(rfi_r)(CPUHPPAState *env) -{ - helper_getshadowregs(env); - helper_rfi(env); -} -#endif diff --git a/target/hppa/sys_helper.c b/target/hppa/sys_helper.c new file mode 100644 index 0000000000..4bb4cf611c --- /dev/null +++ b/target/hppa/sys_helper.c @@ -0,0 +1,101 @@ +/* + * Helpers for HPPA system instructions. + * + * Copyright (c) 2016 Richard Henderson + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "qemu/timer.h" +#include "sysemu/runstate.h" + +void HELPER(write_interval_timer)(CPUHPPAState *env, target_ureg val) +{ + HPPACPU *cpu = env_archcpu(env); + uint64_t current = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint64_t timeout; + + /* + * Even in 64-bit mode, the comparator is always 32-bit. But the + * value we expose to the guest is 1/4 of the speed of the clock, + * so moosh in 34 bits. + */ + timeout = deposit64(current, 0, 34, (uint64_t)val << 2); + + /* If the mooshing puts the clock in the past, advance to next round. */ + if (timeout < current + 1000) { + timeout += 1ULL << 34; + } + + cpu->env.cr[CR_IT] = timeout; + timer_mod(cpu->alarm_timer, timeout); +} + +void HELPER(halt)(CPUHPPAState *env) +{ + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + helper_excp(env, EXCP_HLT); +} + +void HELPER(reset)(CPUHPPAState *env) +{ + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + helper_excp(env, EXCP_HLT); +} + +target_ureg HELPER(swap_system_mask)(CPUHPPAState *env, target_ureg nsm) +{ + target_ulong psw = env->psw; + /* + * Setting the PSW Q bit to 1, if it was not already 1, is an + * undefined operation. + * + * However, HP-UX 10.20 does this with the SSM instruction. + * Tested this on HP9000/712 and HP9000/785/C3750 and both + * machines set the Q bit from 0 to 1 without an exception, + * so let this go without comment. + */ + env->psw = (psw & ~PSW_SM) | (nsm & PSW_SM); + return psw & PSW_SM; +} + +void HELPER(rfi)(CPUHPPAState *env) +{ + env->iasq_f = (uint64_t)env->cr[CR_IIASQ] << 32; + env->iasq_b = (uint64_t)env->cr_back[0] << 32; + env->iaoq_f = env->cr[CR_IIAOQ]; + env->iaoq_b = env->cr_back[1]; + cpu_hppa_put_psw(env, env->cr[CR_IPSW]); +} + +void HELPER(getshadowregs)(CPUHPPAState *env) +{ + env->gr[1] = env->shadow[0]; + env->gr[8] = env->shadow[1]; + env->gr[9] = env->shadow[2]; + env->gr[16] = env->shadow[3]; + env->gr[17] = env->shadow[4]; + env->gr[24] = env->shadow[5]; + env->gr[25] = env->shadow[6]; +} + +void HELPER(rfi_r)(CPUHPPAState *env) +{ + helper_getshadowregs(env); + helper_rfi(env); +} diff --git a/target/hppa/translate.c b/target/hppa/translate.c index 981f8ee03d..59e4688bfa 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -35,13 +35,10 @@ #undef TCGv #undef tcg_temp_new #undef tcg_global_mem_new -#undef tcg_temp_local_new -#undef tcg_temp_free #if TARGET_LONG_BITS == 64 #define TCGv_tl TCGv_i64 #define tcg_temp_new_tl tcg_temp_new_i64 -#define tcg_temp_free_tl tcg_temp_free_i64 #if TARGET_REGISTER_BITS == 64 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64 #else @@ -50,7 +47,6 @@ #else #define TCGv_tl TCGv_i32 #define tcg_temp_new_tl tcg_temp_new_i32 -#define tcg_temp_free_tl tcg_temp_free_i32 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32 #endif @@ -59,8 +55,6 @@ #define tcg_temp_new tcg_temp_new_i64 #define tcg_global_mem_new tcg_global_mem_new_i64 -#define tcg_temp_local_new tcg_temp_local_new_i64 -#define tcg_temp_free tcg_temp_free_i64 #define tcg_gen_movi_reg tcg_gen_movi_i64 #define tcg_gen_mov_reg tcg_gen_mov_i64 @@ -141,8 +135,6 @@ #define tcg_gen_extract_reg tcg_gen_extract_i64 #define tcg_gen_sextract_reg tcg_gen_sextract_i64 #define tcg_gen_extract2_reg tcg_gen_extract2_i64 -#define tcg_const_reg tcg_const_i64 -#define tcg_const_local_reg tcg_const_local_i64 #define tcg_constant_reg tcg_constant_i64 #define tcg_gen_movcond_reg tcg_gen_movcond_i64 #define tcg_gen_add2_reg tcg_gen_add2_i64 @@ -155,8 +147,6 @@ #define TCGv_reg TCGv_i32 #define tcg_temp_new tcg_temp_new_i32 #define tcg_global_mem_new tcg_global_mem_new_i32 -#define tcg_temp_local_new tcg_temp_local_new_i32 -#define tcg_temp_free tcg_temp_free_i32 #define tcg_gen_movi_reg tcg_gen_movi_i32 #define tcg_gen_mov_reg tcg_gen_mov_i32 @@ -236,8 +226,6 @@ #define tcg_gen_extract_reg tcg_gen_extract_i32 #define tcg_gen_sextract_reg tcg_gen_sextract_i32 #define tcg_gen_extract2_reg tcg_gen_extract2_i32 -#define tcg_const_reg tcg_const_i32 -#define tcg_const_local_reg tcg_const_local_i32 #define tcg_constant_reg tcg_constant_i32 #define tcg_gen_movcond_reg tcg_gen_movcond_i32 #define tcg_gen_add2_reg tcg_gen_add2_i32 @@ -283,7 +271,7 @@ typedef struct DisasContext { #ifdef CONFIG_USER_ONLY #define UNALIGN(C) (C)->unalign #else -#define UNALIGN(C) 0 +#define UNALIGN(C) MO_ALIGN #endif /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ @@ -491,10 +479,6 @@ static void cond_free(DisasCond *cond) { switch (cond->c) { default: - if (cond->a0 != cpu_psw_n) { - tcg_temp_free(cond->a0); - } - tcg_temp_free(cond->a1); cond->a0 = NULL; cond->a1 = NULL; /* fallthru */ @@ -586,7 +570,9 @@ static TCGv_i32 load_frw_i32(unsigned rt) static TCGv_i32 load_frw0_i32(unsigned rt) { if (rt == 0) { - return tcg_const_i32(0); + TCGv_i32 ret = tcg_temp_new_i32(); + tcg_gen_movi_i32(ret, 0); + return ret; } else { return load_frw_i32(rt); } @@ -594,15 +580,15 @@ static TCGv_i32 load_frw0_i32(unsigned rt) static TCGv_i64 load_frw0_i64(unsigned rt) { + TCGv_i64 ret = tcg_temp_new_i64(); if (rt == 0) { - return tcg_const_i64(0); + tcg_gen_movi_i64(ret, 0); } else { - TCGv_i64 ret = tcg_temp_new_i64(); tcg_gen_ld32u_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt & 31]) + (rt & 32 ? LO_OFS : HI_OFS)); - return ret; } + return ret; } static void save_frw_i32(unsigned rt, TCGv_i32 val) @@ -625,7 +611,9 @@ static TCGv_i64 load_frd(unsigned rt) static TCGv_i64 load_frd0(unsigned rt) { if (rt == 0) { - return tcg_const_i64(0); + TCGv_i64 ret = tcg_temp_new_i64(); + tcg_gen_movi_i64(ret, 0); + return ret; } else { return load_frd(rt); } @@ -1024,7 +1012,6 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, tcg_gen_and_reg(tmp, in1, in2); tcg_gen_andc_reg(cb, cb, res); tcg_gen_or_reg(cb, cb, tmp); - tcg_temp_free(tmp); } switch (cf >> 1) { @@ -1043,7 +1030,6 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, tcg_gen_andc_reg(tmp, tmp, res); tcg_gen_andi_reg(tmp, tmp, 0x80808080u); cond = cond_make_0(TCG_COND_NE, tmp); - tcg_temp_free(tmp); break; case 3: /* SHZ / NHZ */ @@ -1052,7 +1038,6 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, tcg_gen_andc_reg(tmp, tmp, res); tcg_gen_andi_reg(tmp, tmp, 0x80008000u); cond = cond_make_0(TCG_COND_NE, tmp); - tcg_temp_free(tmp); break; case 4: /* SDC / NDC */ @@ -1073,9 +1058,6 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, default: g_assert_not_reached(); } - if (cf & 8) { - tcg_temp_free(cb); - } if (cf & 1) { cond.c = tcg_invert_cond(cond.c); } @@ -1093,7 +1075,6 @@ static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res, tcg_gen_xor_reg(sv, res, in1); tcg_gen_xor_reg(tmp, in1, in2); tcg_gen_andc_reg(sv, sv, tmp); - tcg_temp_free(tmp); return sv; } @@ -1108,7 +1089,6 @@ static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res, tcg_gen_xor_reg(sv, res, in1); tcg_gen_xor_reg(tmp, in1, in2); tcg_gen_and_reg(sv, sv, tmp); - tcg_temp_free(tmp); return sv; } @@ -1166,7 +1146,6 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, tmp = tcg_temp_new(); tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); gen_helper_tcond(cpu_env, tmp); - tcg_temp_free(tmp); } /* Write back the result. */ @@ -1175,7 +1154,6 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); } save_gpr(ctx, rt, dest); - tcg_temp_free(dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); @@ -1260,16 +1238,12 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1, tmp = tcg_temp_new(); tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); gen_helper_tcond(cpu_env, tmp); - tcg_temp_free(tmp); } /* Write back the result. */ save_or_nullify(ctx, cpu_psw_cb, cb); save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); save_gpr(ctx, rt, dest); - tcg_temp_free(dest); - tcg_temp_free(cb); - tcg_temp_free(cb_msb); /* Install the new nullification. */ cond_free(&ctx->null_cond); @@ -1324,7 +1298,6 @@ static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1, /* Clear. */ tcg_gen_movi_reg(dest, 0); save_gpr(ctx, rt, dest); - tcg_temp_free(dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); @@ -1384,7 +1357,6 @@ static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1, TCGv_reg tmp = tcg_temp_new(); tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); gen_helper_tcond(cpu_env, tmp); - tcg_temp_free(tmp); } save_gpr(ctx, rt, dest); @@ -1423,11 +1395,9 @@ static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base) tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5); tcg_gen_andi_reg(tmp, tmp, 030); tcg_gen_trunc_reg_ptr(ptr, tmp); - tcg_temp_free(tmp); tcg_gen_add_ptr(ptr, ptr, cpu_env); tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); - tcg_temp_free_ptr(ptr); return spc; } @@ -1585,7 +1555,6 @@ static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, tmp = tcg_temp_new_i32(); do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); save_frw_i32(rt, tmp); - tcg_temp_free_i32(tmp); if (rt == 0) { gen_helper_loaded_fr0(cpu_env); @@ -1611,7 +1580,6 @@ static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, tmp = tcg_temp_new_i64(); do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); save_frd(rt, tmp); - tcg_temp_free_i64(tmp); if (rt == 0) { gen_helper_loaded_fr0(cpu_env); @@ -1645,7 +1613,6 @@ static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, tmp = load_frw_i32(rt); do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); - tcg_temp_free_i32(tmp); return nullify_end(ctx); } @@ -1666,7 +1633,6 @@ static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, tmp = load_frd(rt); do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); - tcg_temp_free_i64(tmp); return nullify_end(ctx); } @@ -1688,7 +1654,6 @@ static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, func(tmp, cpu_env, tmp); save_frw_i32(rt, tmp); - tcg_temp_free_i32(tmp); return nullify_end(ctx); } @@ -1704,9 +1669,7 @@ static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, func(dst, cpu_env, src); - tcg_temp_free_i64(src); save_frw_i32(rt, dst); - tcg_temp_free_i32(dst); return nullify_end(ctx); } @@ -1721,7 +1684,6 @@ static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, func(tmp, cpu_env, tmp); save_frd(rt, tmp); - tcg_temp_free_i64(tmp); return nullify_end(ctx); } @@ -1737,9 +1699,7 @@ static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, func(dst, cpu_env, src); - tcg_temp_free_i32(src); save_frd(rt, dst); - tcg_temp_free_i64(dst); return nullify_end(ctx); } @@ -1755,9 +1715,7 @@ static bool do_fop_weww(DisasContext *ctx, unsigned rt, func(a, cpu_env, a, b); - tcg_temp_free_i32(b); save_frw_i32(rt, a); - tcg_temp_free_i32(a); return nullify_end(ctx); } @@ -1773,9 +1731,7 @@ static bool do_fop_dedd(DisasContext *ctx, unsigned rt, func(a, cpu_env, a, b); - tcg_temp_free_i64(b); save_frd(rt, a); - tcg_temp_free_i64(a); return nullify_end(ctx); } @@ -2101,8 +2057,6 @@ static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) tcg_gen_trunc_i64_reg(t1, t0); save_gpr(ctx, rt, t1); - tcg_temp_free(t1); - tcg_temp_free_i64(t0); cond_free(&ctx->null_cond); return true; @@ -2179,7 +2133,6 @@ static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) } else { tcg_gen_mov_i64(cpu_sr[rs], t64); } - tcg_temp_free_i64(t64); return nullify_end(ctx); } @@ -2195,7 +2148,6 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) tmp = tcg_temp_new(); tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1); save_or_nullify(ctx, cpu_sar, tmp); - tcg_temp_free(tmp); cond_free(&ctx->null_cond); return true; @@ -2257,7 +2209,6 @@ static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) tcg_gen_not_reg(tmp, load_gpr(ctx, a->r)); tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); save_or_nullify(ctx, cpu_sar, tmp); - tcg_temp_free(tmp); cond_free(&ctx->null_cond); return true; @@ -2276,8 +2227,6 @@ static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b))); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_trunc_i64_reg(dest, t0); - - tcg_temp_free_i64(t0); #endif save_gpr(ctx, a->t, dest); @@ -2440,8 +2389,6 @@ static bool trans_probe(DisasContext *ctx, arg_probe *a) gen_helper_probe(dest, cpu_env, addr, level, want); - tcg_temp_free_i32(level); - save_gpr(ctx, a->t, dest); return nullify_end(ctx); } @@ -2533,8 +2480,6 @@ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) : offsetof(CPUHPPAState, cr[CR_IIAOQ])); tcg_gen_shli_i64(stl, stl, 32); tcg_gen_or_tl(addr, atl, stl); - tcg_temp_free_tl(atl); - tcg_temp_free_tl(stl); reg = load_gpr(ctx, a->r); if (a->addr) { @@ -2542,7 +2487,6 @@ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) } else { gen_helper_itlbp(cpu_env, addr, reg); } - tcg_temp_free_tl(addr); /* Exit TB for TLB change if mmu is enabled. */ if (ctx->tb_flags & PSW_C) { @@ -2571,7 +2515,6 @@ static bool trans_lpa(DisasContext *ctx, arg_ldst *a) save_gpr(ctx, a->b, ofs); } save_gpr(ctx, a->t, paddr); - tcg_temp_free(paddr); return nullify_end(ctx); #endif @@ -2822,8 +2765,6 @@ static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero); tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); - tcg_temp_free(addc); - /* Write back the result register. */ save_gpr(ctx, a->t, dest); @@ -2845,10 +2786,6 @@ static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv); } - tcg_temp_free(add1); - tcg_temp_free(add2); - tcg_temp_free(dest); - return nullify_end(ctx); } @@ -3103,7 +3040,6 @@ static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1, cond = do_cond(c * 2 + f, dest, cb_msb, sv); save_gpr(ctx, r, dest); - tcg_temp_free(dest); return do_cbranch(ctx, disp, n, &cond); } @@ -3131,7 +3067,6 @@ static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) tcg_gen_shl_reg(tmp, tcg_r, cpu_sar); cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); - tcg_temp_free(tmp); return do_cbranch(ctx, a->disp, a->n, &cond); } @@ -3147,7 +3082,6 @@ static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) tcg_gen_shli_reg(tmp, tcg_r, a->p); cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); - tcg_temp_free(tmp); return do_cbranch(ctx, a->disp, a->n, &cond); } @@ -3200,7 +3134,6 @@ static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a) tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2)); tcg_gen_rotr_i32(t32, t32, cpu_sar); tcg_gen_extu_i32_reg(dest, t32); - tcg_temp_free_i32(t32); } else { TCGv_i64 t = tcg_temp_new_i64(); TCGv_i64 s = tcg_temp_new_i64(); @@ -3209,9 +3142,6 @@ static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a) tcg_gen_extu_reg_i64(s, cpu_sar); tcg_gen_shr_i64(t, t, s); tcg_gen_trunc_i64_reg(dest, t); - - tcg_temp_free_i64(t); - tcg_temp_free_i64(s); } save_gpr(ctx, a->t, dest); @@ -3243,13 +3173,11 @@ static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a) tcg_gen_trunc_reg_i32(t32, t2); tcg_gen_rotri_i32(t32, t32, sa); tcg_gen_extu_i32_reg(dest, t32); - tcg_temp_free_i32(t32); } else { TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]); tcg_gen_shri_i64(t64, t64, sa); tcg_gen_trunc_i64_reg(dest, t64); - tcg_temp_free_i64(t64); } save_gpr(ctx, a->t, dest); @@ -3283,7 +3211,6 @@ static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a) tcg_gen_shr_reg(dest, src, tmp); tcg_gen_extract_reg(dest, dest, 0, len); } - tcg_temp_free(tmp); save_gpr(ctx, a->t, dest); /* Install the new nullification. */ @@ -3403,7 +3330,8 @@ static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c, /* Convert big-endian bit numbering in SAR to left-shift. */ tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1); - mask = tcg_const_reg(msb + (msb - 1)); + mask = tcg_temp_new(); + tcg_gen_movi_reg(mask, msb + (msb - 1)); tcg_gen_and_reg(tmp, val, mask); if (rs) { tcg_gen_shl_reg(mask, mask, shift); @@ -3413,9 +3341,6 @@ static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c, } else { tcg_gen_shl_reg(dest, tmp, shift); } - tcg_temp_free(shift); - tcg_temp_free(mask); - tcg_temp_free(tmp); save_gpr(ctx, rt, dest); /* Install the new nullification. */ @@ -3490,7 +3415,6 @@ static bool trans_be(DisasContext *ctx, arg_be *a) tcg_gen_mov_i64(cpu_iasq_b, new_spc); nullify_set(ctx, a->n); } - tcg_temp_free_i64(new_spc); tcg_gen_lookup_and_goto_ptr(); ctx->base.is_jmp = DISAS_NORETURN; return nullify_end(ctx); @@ -3624,12 +3548,16 @@ static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) { + uint64_t ret; + + if (TARGET_REGISTER_BITS == 64) { + ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */ + } else { + ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */ + } + nullify_over(ctx); -#if TARGET_REGISTER_BITS == 64 - save_frd(0, tcg_const_i64(0x13080000000000ULL)); /* PA8700 (PCX-W2) */ -#else - save_frd(0, tcg_const_i64(0x0f080000000000ULL)); /* PA7300LC (PCX-L2) */ -#endif + save_frd(0, tcg_constant_i64(ret)); return nullify_end(ctx); } @@ -3879,9 +3807,6 @@ static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc); - tcg_temp_free_i32(ta); - tcg_temp_free_i32(tb); - return nullify_end(ctx); } @@ -3899,9 +3824,6 @@ static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc); - tcg_temp_free_i64(ta); - tcg_temp_free_i64(tb); - return nullify_end(ctx); } @@ -3961,7 +3883,6 @@ static bool trans_ftest(DisasContext *ctx, arg_ftest *a) tcg_gen_extract_reg(t, t, 21 - cbit, 1); ctx->null_cond = cond_make_0(TCG_COND_NE, t); - tcg_temp_free(t); } done: @@ -4022,8 +3943,6 @@ static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) y = load_frw0_i64(a->r2); tcg_gen_mul_i64(x, x, y); save_frd(a->t, x); - tcg_temp_free_i64(x); - tcg_temp_free_i64(y); return nullify_end(ctx); } @@ -4097,10 +4016,7 @@ static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) gen_helper_fmpyfadd_s(x, cpu_env, x, y, z); } - tcg_temp_free_i32(y); - tcg_temp_free_i32(z); save_frw_i32(a->t, x); - tcg_temp_free_i32(x); return nullify_end(ctx); } @@ -4119,10 +4035,7 @@ static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) gen_helper_fmpyfadd_d(x, cpu_env, x, y, z); } - tcg_temp_free_i64(y); - tcg_temp_free_i64(z); save_frd(a->t, x); - tcg_temp_free_i64(x); return nullify_end(ctx); } @@ -4237,13 +4150,11 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) } } - /* Free any temporaries allocated. */ + /* Forget any temporaries allocated. */ for (i = 0, n = ctx->ntempr; i < n; ++i) { - tcg_temp_free(ctx->tempr[i]); ctx->tempr[i] = NULL; } for (i = 0, n = ctx->ntempl; i < n; ++i) { - tcg_temp_free_tl(ctx->templ[i]); ctx->templ[i] = NULL; } ctx->ntempr = 0; @@ -4359,7 +4270,7 @@ static const TranslatorOps hppa_tr_ops = { .disas_log = hppa_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/i386/cpu-dump.c b/target/i386/cpu-dump.c index 08ac957e99..40697064d9 100644 --- a/target/i386/cpu-dump.c +++ b/target/i386/cpu-dump.c @@ -335,10 +335,7 @@ void x86_cpu_dump_local_apic_state(CPUState *cs, int flags) } qemu_printf(" PPR 0x%02x\n", apic_get_ppr(s)); } -#else -void x86_cpu_dump_local_apic_state(CPUState *cs, int flags) -{ -} + #endif /* !CONFIG_USER_ONLY */ #define DUMP_CODE_BYTES_TOTAL 50 diff --git a/target/i386/cpu-param.h b/target/i386/cpu-param.h index 527f7fa496..0df2cebcdd 100644 --- a/target/i386/cpu-param.h +++ b/target/i386/cpu-param.h @@ -23,11 +23,6 @@ # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif #define TARGET_PAGE_BITS 12 -#define NB_MMU_MODES 5 - -#ifndef CONFIG_USER_ONLY -# define TARGET_TB_PCREL 1 -#endif //// --- Begin LibAFL code --- diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 4d2b8d0444..1242bd541a 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -29,13 +29,14 @@ #include "kvm/kvm_i386.h" #include "sev.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "qapi/qapi-visit-machine.h" #include "qapi/qmp/qerror.h" -#include "qapi/qapi-commands-machine-target.h" #include "standard-headers/asm-x86/kvm_para.h" #include "hw/qdev-properties.h" #include "hw/i386/topology.h" #ifndef CONFIG_USER_ONLY +#include "qapi/qapi-commands-machine-target.h" #include "exec/address-spaces.h" #include "hw/boards.h" #include "hw/i386/sgx-epc.h" @@ -44,6 +45,8 @@ #include "disas/capstone.h" #include "cpu-internal.h" +static void x86_cpu_realizefn(DeviceState *dev, Error **errp); + /* Helpers for building CPUID[2] descriptors: */ struct CPUID2CacheDescriptorInfo { @@ -661,8 +664,10 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | \ /* CPUID_7_0_ECX_OSPKE is dynamic */ \ CPUID_7_0_ECX_LA57 | CPUID_7_0_ECX_PKS | CPUID_7_0_ECX_VAES) -#define TCG_7_0_EDX_FEATURES 0 -#define TCG_7_1_EAX_FEATURES 0 +#define TCG_7_0_EDX_FEATURES CPUID_7_0_EDX_FSRM +#define TCG_7_1_EAX_FEATURES (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | \ + CPUID_7_1_EAX_FSRC) +#define TCG_7_1_EDX_FEATURES 0 #define TCG_APM_FEATURES 0 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) @@ -804,7 +809,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "pfthreshold", "avic", NULL, "v-vmsave-vmload", "vgif", NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, + NULL, "vnmi", NULL, NULL, "svme-addr-chk", NULL, NULL, NULL, }, .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, @@ -858,7 +863,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "tsx-ldtrk", NULL, NULL /* pconfig */, "arch-lbr", NULL, NULL, "amx-bf16", "avx512-fp16", "amx-tile", "amx-int8", "spec-ctrl", "stibp", - NULL, "arch-capabilities", "core-capability", "ssbd", + "flush-l1d", "arch-capabilities", "core-capability", "ssbd", }, .cpuid = { .eax = 7, @@ -871,11 +876,11 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL, NULL, NULL, NULL, - "avx-vnni", "avx512-bf16", NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, + "avx-vnni", "avx512-bf16", NULL, "cmpccxadd", + NULL, NULL, "fzrm", "fsrs", + "fsrc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, "amx-fp16", NULL, "avx-ifma", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, @@ -886,6 +891,25 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { }, .tcg_features = TCG_7_1_EAX_FEATURES, }, + [FEAT_7_1_EDX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + "avx-vnni-int8", "avx-ne-convert", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, "prefetchiti", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 7, + .needs_ecx = true, .ecx = 1, + .reg = R_EDX, + }, + .tcg_features = TCG_7_1_EDX_FEATURES, + }, [FEAT_8000_0007_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -909,15 +933,31 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { NULL, NULL, NULL, NULL, NULL, "wbnoinvd", NULL, NULL, "ibpb", NULL, "ibrs", "amd-stibp", - NULL, NULL, NULL, NULL, + NULL, "stibp-always-on", NULL, NULL, NULL, NULL, NULL, NULL, "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, - NULL, NULL, NULL, NULL, + "amd-psfd", NULL, NULL, NULL, }, .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, .tcg_features = 0, .unmigratable_flags = 0, }, + [FEAT_8000_0021_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "no-nested-data-bp", NULL, "lfence-always-serializing", NULL, + NULL, NULL, "null-sel-clr-base", NULL, + "auto-ibrs", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { .eax = 0x80000021, .reg = R_EAX, }, + .tcg_features = 0, + .unmigratable_flags = 0, + }, [FEAT_XSAVE] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -1010,7 +1050,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", "taa-no", NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, + NULL, "fb-clear", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -1596,6 +1636,7 @@ typedef struct X86CPUVersionDefinition { const char *alias; const char *note; PropValue *props; + const CPUCaches *const cache_info; } X86CPUVersionDefinition; /* Base definition for a CPU model */ @@ -1704,6 +1745,56 @@ static const CPUCaches epyc_cache_info = { }, }; +static CPUCaches epyc_v4_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 64 * KiB, + .line_size = 64, + .associativity = 4, + .partitions = 1, + .sets = 256, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 8 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 8192, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + }, +}; + static const CPUCaches epyc_rome_cache_info = { .l1d_cache = &(CPUCacheInfo) { .type = DATA_CACHE, @@ -1754,6 +1845,56 @@ static const CPUCaches epyc_rome_cache_info = { }, }; +static const CPUCaches epyc_rome_v3_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 16 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 16384, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + }, +}; + static const CPUCaches epyc_milan_cache_info = { .l1d_cache = &(CPUCacheInfo) { .type = DATA_CACHE, @@ -1804,6 +1945,106 @@ static const CPUCaches epyc_milan_cache_info = { }, }; +static const CPUCaches epyc_milan_v2_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + }, +}; + +static const CPUCaches epyc_genoa_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 1 * MiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 2048, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + }, +}; + /* The following VMX features are not supported by KVM and are left out in the * CPU definitions: * @@ -3467,6 +3708,135 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } } }, + { + .name = "SapphireRapids", + .level = 0x20, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 143, + .stepping = 4, + /* + * please keep the ascending order so that we can have a clear view of + * bit position of each feature. + */ + .features[FEAT_1_EDX] = + CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | + CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | + CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | + CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | + CPUID_SSE | CPUID_SSE2, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 | + CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 | + CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | + CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | + CPUID_EXT_XSAVE | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_WBNOINVD, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | + CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | + CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM | + CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | + CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | + CPUID_7_0_EBX_AVX512IFMA | CPUID_7_0_EBX_CLFLUSHOPT | + CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | + CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 | + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_BUS_LOCK_DETECT, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_SERIALIZE | + CPUID_7_0_EDX_TSX_LDTRK | CPUID_7_0_EDX_AMX_BF16 | + CPUID_7_0_EDX_AVX512_FP16 | CPUID_7_0_EDX_AMX_TILE | + CPUID_7_0_EDX_AMX_INT8 | CPUID_7_0_EDX_SPEC_CTRL | + CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD, + .features[FEAT_ARCH_CAPABILITIES] = + MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | + MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | + MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES | CPUID_D_1_EAX_XFD, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16 | + CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC, + .features[FEAT_VMX_BASIC] = + MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = + MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_PAGE_WALK_LENGTH_5 | + MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | + MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | + MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = + VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | + VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | + VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = + VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | + VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | + VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | + VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | + VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_WBINVD_EXITING | + VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | + VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML | + VMX_SECONDARY_EXEC_XSAVES, + .features[FEAT_VMX_VMFUNC] = + MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Processor (SapphireRapids)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { /* end of list */ }, + }, + }, { .name = "Denverton", .level = 21, @@ -3959,6 +4329,15 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } } }, + { + .version = 4, + .props = (PropValue[]) { + { "model-id", + "AMD EPYC-v4 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_v4_cache_info + }, { /* end of list */ } } }, @@ -4078,6 +4457,25 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } } }, + { + .version = 3, + .props = (PropValue[]) { + { "model-id", + "AMD EPYC-Rome-v3 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_rome_v3_cache_info + }, + { + .version = 4, + .props = (PropValue[]) { + /* Erratum 1386 */ + { "model-id", + "AMD EPYC-Rome-v4 Processor (no XSAVES)" }, + { "xsaves", "off" }, + { /* end of list */ } + }, + }, { /* end of list */ } } }, @@ -4135,6 +4533,98 @@ static const X86CPUDefinition builtin_x86_defs[] = { .xlevel = 0x8000001E, .model_id = "AMD EPYC-Milan Processor", .cache_info = &epyc_milan_cache_info, + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "model-id", + "AMD EPYC-Milan-v2 Processor" }, + { "vaes", "on" }, + { "vpclmulqdq", "on" }, + { "stibp-always-on", "on" }, + { "amd-psfd", "on" }, + { "no-nested-data-bp", "on" }, + { "lfence-always-serializing", "on" }, + { "null-sel-clr-base", "on" }, + { /* end of list */ } + }, + .cache_info = &epyc_milan_v2_cache_info + }, + { /* end of list */ } + } + }, + { + .name = "EPYC-Genoa", + .level = 0xd, + .vendor = CPUID_VENDOR_AMD, + .family = 25, + .model = 17, + .stepping = 0, + .features[FEAT_1_EDX] = + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | + CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | + CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | + CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | + CPUID_VME | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | + CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_PCID | CPUID_EXT_CX16 | CPUID_EXT_FMA | + CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | + CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | + CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | + CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | + CPUID_8000_0008_EBX_IBRS | CPUID_8000_0008_EBX_STIBP | + CPUID_8000_0008_EBX_STIBP_ALWAYS_ON | + CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD, + .features[FEAT_8000_0021_EAX] = + CPUID_8000_0021_EAX_No_NESTED_DATA_BP | + CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING | + CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | + CPUID_8000_0021_EAX_AUTO_IBRS, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | + CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_AVX512F | + CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA | + CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | + CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 | + CPUID_7_0_ECX_RDPID, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_FSRM, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_AVX512_BF16, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_SVM] = + CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE | CPUID_SVM_VNMI | + CPUID_SVM_SVME_ADDR_CHK, + .xlevel = 0x80000022, + .model_id = "AMD EPYC-Genoa Processor", + .cache_info = &epyc_genoa_cache_info, }, }; @@ -4185,6 +4675,25 @@ static Property max_x86_cpu_properties[] = { DEFINE_PROP_END_OF_LIST() }; +static void max_x86_cpu_realize(DeviceState *dev, Error **errp) +{ + Object *obj = OBJECT(dev); + + if (!object_property_get_int(obj, "family", &error_abort)) { + if (X86_CPU(obj)->env.features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { + object_property_set_int(obj, "family", 15, &error_abort); + object_property_set_int(obj, "model", 107, &error_abort); + object_property_set_int(obj, "stepping", 1, &error_abort); + } else { + object_property_set_int(obj, "family", 6, &error_abort); + object_property_set_int(obj, "model", 6, &error_abort); + object_property_set_int(obj, "stepping", 3, &error_abort); + } + } + + x86_cpu_realizefn(dev, errp); +} + static void max_x86_cpu_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -4196,6 +4705,7 @@ static void max_x86_cpu_class_init(ObjectClass *oc, void *data) "Enables all features supported by the accelerator in the current host"; device_class_set_props(dc, max_x86_cpu_properties); + dc->realize = max_x86_cpu_realize; } static void max_x86_cpu_initfn(Object *obj) @@ -4214,15 +4724,6 @@ static void max_x86_cpu_initfn(Object *obj) */ object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, &error_abort); -#ifdef TARGET_X86_64 - object_property_set_int(OBJECT(cpu), "family", 15, &error_abort); - object_property_set_int(OBJECT(cpu), "model", 107, &error_abort); - object_property_set_int(OBJECT(cpu), "stepping", 1, &error_abort); -#else - object_property_set_int(OBJECT(cpu), "family", 6, &error_abort); - object_property_set_int(OBJECT(cpu), "model", 6, &error_abort); - object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort); -#endif object_property_set_str(OBJECT(cpu), "model-id", "QEMU TCG CPU version " QEMU_HW_VERSION, &error_abort); @@ -4713,40 +5214,6 @@ static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, visit_type_strList(v, "unavailable-features", &result, errp); } -/* Check for missing features that may prevent the CPU class from - * running using the current machine and accelerator. - */ -static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, - strList **list) -{ - strList **tail = list; - X86CPU *xc; - Error *err = NULL; - - if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { - QAPI_LIST_APPEND(tail, g_strdup("kvm")); - return; - } - - xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); - - x86_cpu_expand_features(xc, &err); - if (err) { - /* Errors at x86_cpu_expand_features should never happen, - * but in case it does, just report the model as not - * runnable at all using the "type" property. - */ - QAPI_LIST_APPEND(tail, g_strdup("type")); - error_free(err); - } - - x86_cpu_filter_features(xc, false); - - x86_cpu_list_feature_names(xc->filtered_features, tail); - - object_unref(OBJECT(xc)); -} - /* Print all cpuid feature names in featureset */ static void listflags(GList *features) @@ -4875,6 +5342,42 @@ void x86_cpu_list(void) g_list_free(names); } +#ifndef CONFIG_USER_ONLY + +/* Check for missing features that may prevent the CPU class from + * running using the current machine and accelerator. + */ +static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, + strList **list) +{ + strList **tail = list; + X86CPU *xc; + Error *err = NULL; + + if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { + QAPI_LIST_APPEND(tail, g_strdup("kvm")); + return; + } + + xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); + + x86_cpu_expand_features(xc, &err); + if (err) { + /* Errors at x86_cpu_expand_features should never happen, + * but in case it does, just report the model as not + * runnable at all using the "type" property. + */ + QAPI_LIST_APPEND(tail, g_strdup("type")); + error_free(err); + } + + x86_cpu_filter_features(xc, false); + + x86_cpu_list_feature_names(xc->filtered_features, tail); + + object_unref(OBJECT(xc)); +} + static void x86_cpu_definition_entry(gpointer data, gpointer user_data) { ObjectClass *oc = data; @@ -4915,6 +5418,8 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) return cpu_list; } +#endif /* !CONFIG_USER_ONLY */ + uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, bool migratable_only) { @@ -5057,6 +5562,31 @@ static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) assert(vdef->version == version); } +static const CPUCaches *x86_cpu_get_versioned_cache_info(X86CPU *cpu, + X86CPUModel *model) +{ + const X86CPUVersionDefinition *vdef; + X86CPUVersion version = x86_cpu_model_resolve_version(model); + const CPUCaches *cache_info = model->cpudef->cache_info; + + if (version == CPU_VERSION_LEGACY) { + return cache_info; + } + + for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { + if (vdef->cache_info) { + cache_info = vdef->cache_info; + } + + if (vdef->version == version) { + break; + } + } + + assert(vdef->version == version); + return cache_info; +} + /* * Load data from X86CPUDefinition into a X86CPU object. * Only for builtin_x86_defs models initialized with x86_register_cpudef_types. @@ -5089,7 +5619,7 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) } /* legacy-cache defaults to 'off' if CPU model provides cache info */ - cpu->legacy_cache = !def->cache_info; + cpu->legacy_cache = !x86_cpu_get_versioned_cache_info(cpu, model); env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; @@ -5386,9 +5916,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } } else if (count == 1) { *eax = env->features[FEAT_7_1_EAX]; + *edx = env->features[FEAT_7_1_EDX]; *ebx = 0; *ecx = 0; - *edx = 0; } else { *eax = 0; *ebx = 0; @@ -5583,8 +6113,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } else { *eax &= env->features[FEAT_SGX_12_1_EAX]; *ebx &= 0; /* ebx reserve */ - *ecx &= env->features[FEAT_XSAVE_XSS_LO]; - *edx &= env->features[FEAT_XSAVE_XSS_HI]; + *ecx &= env->features[FEAT_XSAVE_XCR0_LO]; + *edx &= env->features[FEAT_XSAVE_XCR0_HI]; /* FP and SSE are always allowed regardless of XSAVE/XCR0. */ *ecx |= XSTATE_FP_MASK | XSTATE_SSE_MASK; @@ -5622,7 +6152,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; } case 0x1D: { - /* AMX TILE */ + /* AMX TILE, for now hardcoded for Sapphire Rapids*/ *eax = 0; *ebx = 0; *ecx = 0; @@ -5643,7 +6173,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; } case 0x1E: { - /* AMX TMUL */ + /* AMX TMUL, for now hardcoded for Sapphire Rapids */ *eax = 0; *ebx = 0; *ecx = 0; @@ -5852,10 +6382,14 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, if (sev_enabled()) { *eax = 0x2; *eax |= sev_es_enabled() ? 0x8 : 0; - *ebx = sev_get_cbit_position(); - *ebx |= sev_get_reduced_phys_bits() << 6; + *ebx = sev_get_cbit_position() & 0x3f; /* EBX[5:0] */ + *ebx |= (sev_get_reduced_phys_bits() & 0x3f) << 6; /* EBX[11:6] */ } break; + case 0x80000021: + *eax = env->features[FEAT_8000_0021_EAX]; + *ebx = *ecx = *edx = 0; + break; default: /* reserved values: zero */ *eax = 0; @@ -6285,6 +6819,10 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); } + if (env->features[FEAT_8000_0021_EAX]) { + x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x80000021); + } + /* SGX requires CPUID[0x12] for EPC enumeration */ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX) { x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x12); @@ -6404,6 +6942,11 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) static bool ht_warned; unsigned requested_lbr_fmt; + /* Use pc-relative instructions in system-mode */ +#ifndef CONFIG_USER_ONLY + cs->tcg_cflags |= CF_PCREL; +#endif + if (cpu->apic_id == UNASSIGNED_APIC_ID) { error_setg(errp, "apic-id property was not initialized properly"); return; @@ -6563,14 +7106,17 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) /* Cache information initialization */ if (!cpu->legacy_cache) { - if (!xcc->model || !xcc->model->cpudef->cache_info) { + const CPUCaches *cache_info = + x86_cpu_get_versioned_cache_info(cpu, xcc->model); + + if (!xcc->model || !cache_info) { g_autofree char *name = x86_cpu_class_get_model_name(xcc); error_setg(errp, "CPU model '%s' doesn't support legacy-cache=off", name); return; } env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = - *xcc->model->cpudef->cache_info; + *cache_info; } else { /* Build legacy cache information */ env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; @@ -7070,6 +7616,7 @@ static Property x86_cpu_properties[] = { * own cache information (see x86_cpu_load_def()). */ DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), + DEFINE_PROP_BOOL("xen-vapic", X86CPU, xen_vapic, false), /* * From "Requirements for Implementing the Microsoft diff --git a/target/i386/cpu.h b/target/i386/cpu.h index d4bc19577a..7201a71de8 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -26,6 +26,9 @@ #include "exec/cpu-defs.h" #include "qapi/qapi-types-common.h" #include "qemu/cpu-float.h" +#include "qemu/timer.h" + +#define XEN_NR_VIRQS 24 /* The x86 has a strong memory model with some store-after-load re-ordering */ #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) @@ -597,6 +600,7 @@ typedef enum FeatureWord { FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ + FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ @@ -623,6 +627,7 @@ typedef enum FeatureWord { FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */ FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */ FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */ + FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */ FEATURE_WORDS, } FeatureWord; @@ -769,6 +774,7 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, #define CPUID_SVM_AVIC (1U << 13) #define CPUID_SVM_V_VMSAVE_VMLOAD (1U << 15) #define CPUID_SVM_VGIF (1U << 16) +#define CPUID_SVM_VNMI (1U << 25) #define CPUID_SVM_SVME_ADDR_CHK (1U << 28) /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ @@ -881,14 +887,20 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16) /* Architectural LBRs */ #define CPUID_7_0_EDX_ARCH_LBR (1U << 19) +/* AMX_BF16 instruction */ +#define CPUID_7_0_EDX_AMX_BF16 (1U << 22) /* AVX512_FP16 instruction */ #define CPUID_7_0_EDX_AVX512_FP16 (1U << 23) /* AMX tile (two-dimensional register) */ #define CPUID_7_0_EDX_AMX_TILE (1U << 24) +/* AMX_INT8 instruction */ +#define CPUID_7_0_EDX_AMX_INT8 (1U << 25) /* Speculation Control */ #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Single Thread Indirect Branch Predictors */ #define CPUID_7_0_EDX_STIBP (1U << 27) +/* Flush L1D cache */ +#define CPUID_7_0_EDX_FLUSH_L1D (1U << 28) /* Arch Capabilities */ #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) /* Core Capability */ @@ -900,6 +912,26 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, #define CPUID_7_1_EAX_AVX_VNNI (1U << 4) /* AVX512 BFloat16 Instruction */ #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) +/* CMPCCXADD Instructions */ +#define CPUID_7_1_EAX_CMPCCXADD (1U << 7) +/* Fast Zero REP MOVS */ +#define CPUID_7_1_EAX_FZRM (1U << 10) +/* Fast Short REP STOS */ +#define CPUID_7_1_EAX_FSRS (1U << 11) +/* Fast Short REP CMPS/SCAS */ +#define CPUID_7_1_EAX_FSRC (1U << 12) +/* Support Tile Computational Operations on FP16 Numbers */ +#define CPUID_7_1_EAX_AMX_FP16 (1U << 21) +/* Support for VPMADD52[H,L]UQ */ +#define CPUID_7_1_EAX_AVX_IFMA (1U << 23) + +/* Support for VPDPB[SU,UU,SS]D[,S] */ +#define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4) +/* AVX NE CONVERT Instructions */ +#define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5) +/* PREFETCHIT0/1 Instructions */ +#define CPUID_7_1_EDX_PREFETCHITI (1U << 14) + /* XFD Extend Feature Disabled */ #define CPUID_D_1_EAX_XFD (1U << 4) @@ -918,8 +950,21 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, #define CPUID_8000_0008_EBX_IBRS (1U << 14) /* Single Thread Indirect Branch Predictors */ #define CPUID_8000_0008_EBX_STIBP (1U << 15) +/* STIBP mode has enhanced performance and may be left always on */ +#define CPUID_8000_0008_EBX_STIBP_ALWAYS_ON (1U << 17) /* Speculative Store Bypass Disable */ #define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24) +/* Predictive Store Forwarding Disable */ +#define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28) + +/* Processor ignores nested data breakpoints */ +#define CPUID_8000_0021_EAX_No_NESTED_DATA_BP (1U << 0) +/* LFENCE is always serializing */ +#define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2) +/* Null Selector Clears Base */ +#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6) +/* Automatic IBRS */ +#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8) #define CPUID_XSAVE_XSAVEOPT (1U << 0) #define CPUID_XSAVE_XSAVEC (1U << 1) @@ -973,6 +1018,7 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6) #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7) #define MSR_ARCH_CAP_TAA_NO (1U << 8) +#define MSR_ARCH_CAP_FB_CLEAR (1U << 17) #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) @@ -1788,6 +1834,20 @@ typedef struct CPUArchState { #endif #if defined(CONFIG_KVM) struct kvm_nested_state *nested_state; + MemoryRegion *xen_vcpu_info_mr; + void *xen_vcpu_info_hva; + uint64_t xen_vcpu_info_gpa; + uint64_t xen_vcpu_info_default_gpa; + uint64_t xen_vcpu_time_info_gpa; + uint64_t xen_vcpu_runstate_gpa; + uint8_t xen_vcpu_callback_vector; + bool xen_callback_asserted; + uint16_t xen_virq[XEN_NR_VIRQS]; + uint64_t xen_singleshot_timer_ns; + QEMUTimer *xen_singleshot_timer; + uint64_t xen_periodic_timer_period; + QEMUTimer *xen_periodic_timer; + QemuMutex xen_timers_lock; #endif #if defined(CONFIG_HVF) HVFX86LazyFlags hvf_lflags; @@ -1964,6 +2024,8 @@ struct ArchCPU { int32_t thread_id; int32_t hv_max_vps; + + bool xen_vapic; }; @@ -1987,9 +2049,6 @@ void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags); -hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, - MemTxAttrs *attrs); - int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); @@ -1997,6 +2056,8 @@ void x86_cpu_list(void); int cpu_x86_support_mca_broadcast(CPUX86State *env); #ifndef CONFIG_USER_ONLY +hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, + MemTxAttrs *attrs); int cpu_get_pic_interrupt(CPUX86State *s); /* MSDOS compatibility mode FPU exception support */ @@ -2314,9 +2375,6 @@ static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc) } } -/* mem_helper.c */ -void helper_lock_init(void); - /* svm_helper.c */ #ifdef CONFIG_USER_ONLY static inline void @@ -2359,12 +2417,16 @@ typedef int X86CPUVersion; */ void x86_cpu_set_default_version(X86CPUVersion version); +#ifndef CONFIG_USER_ONLY + #define APIC_DEFAULT_ADDRESS 0xfee00000 #define APIC_SPACE_SIZE 0x100000 /* cpu-dump.c */ void x86_cpu_dump_local_apic_state(CPUState *cs, int flags); +#endif + /* cpu.c */ bool cpu_is_bsp(X86CPU *cpu); diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c index 786971284a..ebb000df6a 100644 --- a/target/i386/gdbstub.c +++ b/target/i386/gdbstub.c @@ -19,7 +19,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "include/gdbstub/helpers.h" #ifdef TARGET_X86_64 static const int gpr_map[16] = { diff --git a/target/i386/hax/hax-i386.h b/target/i386/hax/hax-i386.h index efbb346238..409ebdb4af 100644 --- a/target/i386/hax/hax-i386.h +++ b/target/i386/hax/hax-i386.h @@ -49,7 +49,6 @@ struct hax_vm { struct hax_vcpu_state **vcpus; }; -#ifdef NEED_CPU_H /* Functions exported to host specific mode */ hax_fd hax_vcpu_get_fd(CPUArchState *env); int valid_hax_tunnel_size(uint16_t size); @@ -66,7 +65,6 @@ int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set); int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set); int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set); -#endif int hax_vm_destroy(struct hax_vm *vm); int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap); diff --git a/target/i386/helper.c b/target/i386/helper.c index 0ac2da066d..8857444819 100644 --- a/target/i386/helper.c +++ b/target/i386/helper.c @@ -520,7 +520,7 @@ static inline target_ulong get_memio_eip(CPUX86State *env) } /* Per x86_restore_state_to_opc. */ - if (TARGET_TB_PCREL) { + if (cs->tcg_cflags & CF_PCREL) { return (env->eip & TARGET_PAGE_MASK) | data[0]; } else { return data[0] - env->segs[R_CS].base; diff --git a/target/i386/host-cpu.c b/target/i386/host-cpu.c index 10f8aba86e..92ecb7254b 100644 --- a/target/i386/host-cpu.c +++ b/target/i386/host-cpu.c @@ -11,6 +11,7 @@ #include "cpu.h" #include "host-cpu.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "sysemu/sysemu.h" /* Note: Only safe for use on x86(-64) hosts */ diff --git a/target/i386/hvf/hvf-i386.h b/target/i386/hvf/hvf-i386.h index 76e9235524..95b47c1c2e 100644 --- a/target/i386/hvf/hvf-i386.h +++ b/target/i386/hvf/hvf-i386.h @@ -24,11 +24,7 @@ void hvf_handle_io(CPUArchState *, uint16_t, void *, int, int, int); -#ifdef NEED_CPU_H -/* Functions exported to host specific mode */ - /* Host specific functions */ int hvf_inject_interrupt(CPUArchState *env, int vector); -#endif #endif diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 5870301991..de531842f6 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -22,6 +22,7 @@ #include #include "standard-headers/asm-x86/kvm_para.h" +#include "hw/xen/interface/arch-x86/cpuid.h" #include "cpu.h" #include "host-cpu.h" @@ -31,6 +32,7 @@ #include "sysemu/runstate.h" #include "kvm_i386.h" #include "sev.h" +#include "xen-emu.h" #include "hyperv.h" #include "hyperv-proto.h" @@ -42,6 +44,8 @@ #include "qemu/error-report.h" #include "qemu/memalign.h" #include "hw/i386/x86.h" +#include "hw/i386/kvm/xen_evtchn.h" +#include "hw/i386/pc.h" #include "hw/i386/apic.h" #include "hw/i386/apic_internal.h" #include "hw/i386/apic-msidef.h" @@ -49,6 +53,8 @@ #include "hw/i386/x86-iommu.h" #include "hw/i386/e820_memory_layout.h" +#include "hw/xen/xen.h" + #include "hw/pci/pci.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" @@ -352,7 +358,7 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, { struct kvm_cpuid2 *cpuid; uint32_t ret = 0; - uint32_t cpuid_1_edx; + uint32_t cpuid_1_edx, unused; uint64_t bitmask; cpuid = get_supported_cpuid(s); @@ -399,10 +405,20 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, } else if (function == 6 && reg == R_EAX) { ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */ } else if (function == 7 && index == 0 && reg == R_EBX) { + /* Not new instructions, just an optimization. */ + uint32_t ebx; + host_cpuid(7, 0, &unused, &ebx, &unused, &unused); + ret |= ebx & CPUID_7_0_EBX_ERMS; + if (host_tsx_broken()) { ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE); } } else if (function == 7 && index == 0 && reg == R_EDX) { + /* Not new instructions, just an optimization. */ + uint32_t edx; + host_cpuid(7, 0, &unused, &unused, &unused, &edx); + ret |= edx & CPUID_7_0_EDX_FSRM; + /* * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is @@ -411,6 +427,11 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, if (!has_msr_arch_capabs) { ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; } + } else if (function == 7 && index == 1 && reg == R_EAX) { + /* Not new instructions, just an optimization. */ + uint32_t eax; + host_cpuid(7, 1, &eax, &unused, &unused, &unused); + ret |= eax & (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC); } else if (function == 0xd && index == 0 && (reg == R_EAX || reg == R_EDX)) { /* @@ -1800,7 +1821,82 @@ int kvm_arch_init_vcpu(CPUState *cs) has_msr_hv_hypercall = true; } - if (cpu->expose_kvm) { + if (cs->kvm_state->xen_version) { +#ifdef CONFIG_XEN_EMU + struct kvm_cpuid_entry2 *xen_max_leaf; + + memcpy(signature, "XenVMMXenVMM", 12); + + xen_max_leaf = c = &cpuid_data.entries[cpuid_i++]; + c->function = kvm_base + XEN_CPUID_SIGNATURE; + c->eax = kvm_base + XEN_CPUID_TIME; + c->ebx = signature[0]; + c->ecx = signature[1]; + c->edx = signature[2]; + + c = &cpuid_data.entries[cpuid_i++]; + c->function = kvm_base + XEN_CPUID_VENDOR; + c->eax = cs->kvm_state->xen_version; + c->ebx = 0; + c->ecx = 0; + c->edx = 0; + + c = &cpuid_data.entries[cpuid_i++]; + c->function = kvm_base + XEN_CPUID_HVM_MSR; + /* Number of hypercall-transfer pages */ + c->eax = 1; + /* Hypercall MSR base address */ + if (hyperv_enabled(cpu)) { + c->ebx = XEN_HYPERCALL_MSR_HYPERV; + kvm_xen_init(cs->kvm_state, c->ebx); + } else { + c->ebx = XEN_HYPERCALL_MSR; + } + c->ecx = 0; + c->edx = 0; + + c = &cpuid_data.entries[cpuid_i++]; + c->function = kvm_base + XEN_CPUID_TIME; + c->eax = ((!!tsc_is_stable_and_known(env) << 1) | + (!!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP) << 2)); + /* default=0 (emulate if necessary) */ + c->ebx = 0; + /* guest tsc frequency */ + c->ecx = env->user_tsc_khz; + /* guest tsc incarnation (migration count) */ + c->edx = 0; + + c = &cpuid_data.entries[cpuid_i++]; + c->function = kvm_base + XEN_CPUID_HVM; + xen_max_leaf->eax = kvm_base + XEN_CPUID_HVM; + if (cs->kvm_state->xen_version >= XEN_VERSION(4, 5)) { + c->function = kvm_base + XEN_CPUID_HVM; + + if (cpu->xen_vapic) { + c->eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; + c->eax |= XEN_HVM_CPUID_X2APIC_VIRT; + } + + c->eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS; + + if (cs->kvm_state->xen_version >= XEN_VERSION(4, 6)) { + c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT; + c->ebx = cs->cpu_index; + } + } + + r = kvm_xen_init_vcpu(cs); + if (r) { + return r; + } + + kvm_base += 0x100; +#else /* CONFIG_XEN_EMU */ + /* This should never happen as kvm_arch_init() would have died first. */ + fprintf(stderr, "Cannot enable Xen CPUID without Xen support\n"); + abort(); +#endif + } else if (cpu->expose_kvm) { memcpy(signature, "KVMKVMKVM\0\0\0", 12); c = &cpuid_data.entries[cpuid_i++]; c->function = KVM_CPUID_SIGNATURE | kvm_base; @@ -2514,6 +2610,24 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } + if (s->xen_version) { +#ifdef CONFIG_XEN_EMU + if (!object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)) { + error_report("kvm: Xen support only available in PC machine"); + return -ENOTSUP; + } + /* hyperv_enabled() doesn't work yet. */ + uint32_t msr = XEN_HYPERCALL_MSR; + ret = kvm_xen_init(s, msr); + if (ret < 0) { + return ret; + } +#else + error_report("kvm: Xen support not enabled in qemu"); + return -ENOTSUP; +#endif + } + ret = kvm_get_supported_msrs(s); if (ret < 0) { return ret; @@ -4637,6 +4751,15 @@ int kvm_arch_put_registers(CPUState *cpu, int level) kvm_arch_set_tsc_khz(cpu); } +#ifdef CONFIG_XEN_EMU + if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) { + ret = kvm_put_xen_state(cpu); + if (ret < 0) { + return ret; + } + } +#endif + ret = kvm_getput_regs(x86_cpu, 1); if (ret < 0) { return ret; @@ -4736,6 +4859,14 @@ int kvm_arch_get_registers(CPUState *cs) if (ret < 0) { goto out; } +#ifdef CONFIG_XEN_EMU + if (xen_mode == XEN_EMULATE) { + ret = kvm_get_xen_state(cs); + if (ret < 0) { + goto out; + } + } +#endif ret = 0; out: cpu_sync_bndcs_hflags(&cpu->env); @@ -4860,6 +4991,19 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) kvm_rate_limit_on_bus_lock(); } +#ifdef CONFIG_XEN_EMU + /* + * If the callback is asserted as a GSI (or PCI INTx) then check if + * vcpu_info->evtchn_upcall_pending has been cleared, and deassert + * the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC + * EOI and only resample then, exactly how the VFIO eventfd pairs + * are designed to work for level triggered interrupts. + */ + if (x86_cpu->env.xen_callback_asserted) { + kvm_xen_maybe_deassert_callback(cpu); + } +#endif + /* We need to protect the apic state against concurrent accesses from * different threads in case the userspace irqchip is used. */ if (!kvm_irqchip_in_kernel()) { @@ -5380,6 +5524,11 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); ret = kvm_handle_wrmsr(cpu, run); break; +#ifdef CONFIG_XEN_EMU + case KVM_EXIT_XEN: + ret = kvm_xen_handle_exit(cpu, &run->xen); + break; +#endif default: fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); ret = -1; @@ -5508,6 +5657,20 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, } } +#ifdef CONFIG_XEN_EMU + if (xen_mode == XEN_EMULATE) { + int handled = xen_evtchn_translate_pirq_msi(route, address, data); + + /* + * If it was a PIRQ and successfully routed (handled == 0) or it was + * an error (handled < 0), return. If it wasn't a PIRQ, keep going. + */ + if (handled <= 0) { + return handled; + } + } +#endif + address = kvm_swizzle_msi_ext_dest_id(address); route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT; route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK; @@ -5527,8 +5690,8 @@ struct MSIRouteEntry { static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \ QLIST_HEAD_INITIALIZER(msi_route_list); -static void kvm_update_msi_routes_all(void *private, bool global, - uint32_t index, uint32_t mask) +void kvm_update_msi_routes_all(void *private, bool global, + uint32_t index, uint32_t mask) { int cnt = 0, vector; MSIRouteEntry *entry; @@ -5704,6 +5867,90 @@ static void kvm_arch_set_notify_window(Object *obj, Visitor *v, s->notify_window = value; } +static void kvm_arch_get_xen_version(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint32_t value = s->xen_version; + + visit_type_uint32(v, name, &value, errp); +} + +static void kvm_arch_set_xen_version(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + Error *error = NULL; + uint32_t value; + + visit_type_uint32(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + + s->xen_version = value; + if (value && xen_mode == XEN_DISABLED) { + xen_mode = XEN_EMULATE; + } +} + +static void kvm_arch_get_xen_gnttab_max_frames(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint16_t value = s->xen_gnttab_max_frames; + + visit_type_uint16(v, name, &value, errp); +} + +static void kvm_arch_set_xen_gnttab_max_frames(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + Error *error = NULL; + uint16_t value; + + visit_type_uint16(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + + s->xen_gnttab_max_frames = value; +} + +static void kvm_arch_get_xen_evtchn_max_pirq(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint16_t value = s->xen_evtchn_max_pirq; + + visit_type_uint16(v, name, &value, errp); +} + +static void kvm_arch_set_xen_evtchn_max_pirq(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + Error *error = NULL; + uint16_t value; + + visit_type_uint16(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + + s->xen_evtchn_max_pirq = value; +} + void kvm_arch_accel_class_init(ObjectClass *oc) { object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption", @@ -5720,6 +5967,29 @@ void kvm_arch_accel_class_init(ObjectClass *oc) object_class_property_set_description(oc, "notify-window", "Clock cycles without an event window " "after which a notification VM exit occurs"); + + object_class_property_add(oc, "xen-version", "uint32", + kvm_arch_get_xen_version, + kvm_arch_set_xen_version, + NULL, NULL); + object_class_property_set_description(oc, "xen-version", + "Xen version to be emulated " + "(in XENVER_version form " + "e.g. 0x4000a for 4.10)"); + + object_class_property_add(oc, "xen-gnttab-max-frames", "uint16", + kvm_arch_get_xen_gnttab_max_frames, + kvm_arch_set_xen_gnttab_max_frames, + NULL, NULL); + object_class_property_set_description(oc, "xen-gnttab-max-frames", + "Maximum number of grant table frames"); + + object_class_property_add(oc, "xen-evtchn-max-pirq", "uint16", + kvm_arch_get_xen_evtchn_max_pirq, + kvm_arch_set_xen_evtchn_max_pirq, + NULL, NULL); + object_class_property_set_description(oc, "xen-evtchn-max-pirq", + "Maximum number of Xen PIRQs"); } void kvm_set_max_apic_id(uint32_t max_apic_id) diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 6a5c24e3dc..e24753abfe 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -51,6 +51,8 @@ bool kvm_hv_vpindex_settable(void); bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp); uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address); +void kvm_update_msi_routes_all(void *private, bool global, + uint32_t index, uint32_t mask); bool kvm_enable_sgx_provisioning(KVMState *s); void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask); diff --git a/target/i386/kvm/meson.build b/target/i386/kvm/meson.build index 736df8b72e..322272091b 100644 --- a/target/i386/kvm/meson.build +++ b/target/i386/kvm/meson.build @@ -7,6 +7,8 @@ i386_softmmu_kvm_ss.add(files( 'kvm-cpu.c', )) +i386_softmmu_kvm_ss.add(when: 'CONFIG_XEN_EMU', if_true: files('xen-emu.c')) + i386_softmmu_kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c')) i386_softmmu_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c')) diff --git a/target/i386/kvm/trace-events b/target/i386/kvm/trace-events index 7c369db1e1..b365a8e8e2 100644 --- a/target/i386/kvm/trace-events +++ b/target/i386/kvm/trace-events @@ -5,3 +5,10 @@ kvm_x86_fixup_msi_error(uint32_t gsi) "VT-d failed to remap interrupt for GSI %" kvm_x86_add_msi_route(int virq) "Adding route entry for virq %d" kvm_x86_remove_msi_route(int virq) "Removing route entry for virq %d" kvm_x86_update_msi_routes(int num) "Updated %d MSI routes" + +# xen-emu.c +kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t ret) "xen_hypercall: cpu %d cpl %d input %" PRIu64 " a0 0x%" PRIx64 " a1 0x%" PRIx64 " a2 0x%" PRIx64" ret 0x%" PRIx64 +kvm_xen_soft_reset(void) "" +kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64 +kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64 +kvm_xen_set_vcpu_callback(int cpu, int vector) "callback vcpu %d vector %d" diff --git a/target/i386/kvm/xen-compat.h b/target/i386/kvm/xen-compat.h new file mode 100644 index 0000000000..7f30180cc2 --- /dev/null +++ b/target/i386/kvm/xen-compat.h @@ -0,0 +1,70 @@ +/* + * Xen HVM emulation support in KVM + * + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_I386_KVM_XEN_COMPAT_H +#define QEMU_I386_KVM_XEN_COMPAT_H + +#include "hw/xen/interface/memory.h" + +typedef uint32_t compat_pfn_t; +typedef uint32_t compat_ulong_t; +typedef uint32_t compat_ptr_t; + +#define __DEFINE_COMPAT_HANDLE(name, type) \ + typedef struct { \ + compat_ptr_t c; \ + type *_[0] __attribute__((packed)); \ + } __compat_handle_ ## name; \ + +#define DEFINE_COMPAT_HANDLE(name) __DEFINE_COMPAT_HANDLE(name, name) +#define COMPAT_HANDLE(name) __compat_handle_ ## name + +DEFINE_COMPAT_HANDLE(compat_pfn_t); +DEFINE_COMPAT_HANDLE(compat_ulong_t); +DEFINE_COMPAT_HANDLE(int); + +struct compat_xen_add_to_physmap { + domid_t domid; + uint16_t size; + unsigned int space; + compat_ulong_t idx; + compat_pfn_t gpfn; +}; + +struct compat_xen_add_to_physmap_batch { + domid_t domid; + uint16_t space; + uint16_t size; + uint16_t extra; + COMPAT_HANDLE(compat_ulong_t) idxs; + COMPAT_HANDLE(compat_pfn_t) gpfns; + COMPAT_HANDLE(int) errs; +}; + +struct compat_physdev_map_pirq { + domid_t domid; + uint16_t pad; + /* IN */ + int type; + /* IN (ignored for ..._MULTI_MSI) */ + int index; + /* IN or OUT */ + int pirq; + /* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */ + int bus; + /* IN */ + int devfn; + /* IN (also OUT for ..._MULTI_MSI) */ + int entry_nr; + /* IN */ + uint64_t table_base; +} __attribute__((packed)); + +#endif /* QEMU_I386_XEN_COMPAT_H */ diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c new file mode 100644 index 0000000000..d7c7eb8d9c --- /dev/null +++ b/target/i386/kvm/xen-emu.c @@ -0,0 +1,1903 @@ +/* + * Xen HVM emulation support in KVM + * + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "qemu/error-report.h" +#include "hw/xen/xen.h" +#include "sysemu/kvm_int.h" +#include "sysemu/kvm_xen.h" +#include "kvm/kvm_i386.h" +#include "exec/address-spaces.h" +#include "xen-emu.h" +#include "trace.h" +#include "sysemu/runstate.h" + +#include "hw/pci/msi.h" +#include "hw/i386/apic-msidef.h" +#include "hw/i386/e820_memory_layout.h" +#include "hw/i386/kvm/xen_overlay.h" +#include "hw/i386/kvm/xen_evtchn.h" +#include "hw/i386/kvm/xen_gnttab.h" +#include "hw/i386/kvm/xen_xenstore.h" + +#include "hw/xen/interface/version.h" +#include "hw/xen/interface/sched.h" +#include "hw/xen/interface/memory.h" +#include "hw/xen/interface/hvm/hvm_op.h" +#include "hw/xen/interface/hvm/params.h" +#include "hw/xen/interface/vcpu.h" +#include "hw/xen/interface/event_channel.h" +#include "hw/xen/interface/grant_table.h" + +#include "xen-compat.h" + +static void xen_vcpu_singleshot_timer_event(void *opaque); +static void xen_vcpu_periodic_timer_event(void *opaque); + +#ifdef TARGET_X86_64 +#define hypercall_compat32(longmode) (!(longmode)) +#else +#define hypercall_compat32(longmode) (false) +#endif + +static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa, + size_t *len, bool is_write) +{ + struct kvm_translation tr = { + .linear_address = gva, + }; + + if (len) { + *len = TARGET_PAGE_SIZE - (gva & ~TARGET_PAGE_MASK); + } + + if (kvm_vcpu_ioctl(cs, KVM_TRANSLATE, &tr) || !tr.valid || + (is_write && !tr.writeable)) { + return false; + } + *gpa = tr.physical_address; + return true; +} + +static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz, + bool is_write) +{ + uint8_t *buf = (uint8_t *)_buf; + uint64_t gpa; + size_t len; + + while (sz) { + if (!kvm_gva_to_gpa(cs, gva, &gpa, &len, is_write)) { + return -EFAULT; + } + if (len > sz) { + len = sz; + } + + cpu_physical_memory_rw(gpa, buf, len, is_write); + + buf += len; + sz -= len; + gva += len; + } + + return 0; +} + +static inline int kvm_copy_from_gva(CPUState *cs, uint64_t gva, void *buf, + size_t sz) +{ + return kvm_gva_rw(cs, gva, buf, sz, false); +} + +static inline int kvm_copy_to_gva(CPUState *cs, uint64_t gva, void *buf, + size_t sz) +{ + return kvm_gva_rw(cs, gva, buf, sz, true); +} + +int kvm_xen_init(KVMState *s, uint32_t hypercall_msr) +{ + const int required_caps = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | KVM_XEN_HVM_CONFIG_SHARED_INFO; + struct kvm_xen_hvm_config cfg = { + .msr = hypercall_msr, + .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, + }; + int xen_caps, ret; + + xen_caps = kvm_check_extension(s, KVM_CAP_XEN_HVM); + if (required_caps & ~xen_caps) { + error_report("kvm: Xen HVM guest support not present or insufficient"); + return -ENOSYS; + } + + if (xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND) { + struct kvm_xen_hvm_attr ha = { + .type = KVM_XEN_ATTR_TYPE_XEN_VERSION, + .u.xen_version = s->xen_version, + }; + (void)kvm_vm_ioctl(s, KVM_XEN_HVM_SET_ATTR, &ha); + + cfg.flags |= KVM_XEN_HVM_CONFIG_EVTCHN_SEND; + } + + ret = kvm_vm_ioctl(s, KVM_XEN_HVM_CONFIG, &cfg); + if (ret < 0) { + error_report("kvm: Failed to enable Xen HVM support: %s", + strerror(-ret)); + return ret; + } + + /* If called a second time, don't repeat the rest of the setup. */ + if (s->xen_caps) { + return 0; + } + + /* + * Event channel delivery via GSI/PCI_INTX needs to poll the vcpu_info + * of vCPU0 to deassert the IRQ when ->evtchn_upcall_pending is cleared. + * + * In the kernel, there's a notifier hook on the PIC/IOAPIC which allows + * such things to be polled at precisely the right time. We *could* do + * it nicely in the kernel: check vcpu_info[0]->evtchn_upcall_pending at + * the moment the IRQ is acked, and see if it should be reasserted. + * + * But the in-kernel irqchip is deprecated, so we're unlikely to add + * that support in the kernel. Insist on using the split irqchip mode + * instead. + * + * This leaves us polling for the level going low in QEMU, which lacks + * the appropriate hooks in its PIC/IOAPIC code. Even VFIO is sending a + * spurious 'ack' to an INTX IRQ every time there's any MMIO access to + * the device (for which it has to unmap the device and trap access, for + * some period after an IRQ!!). In the Xen case, we do it on exit from + * KVM_RUN, if the flag is set to say that the GSI is currently asserted. + * Which is kind of icky, but less so than the VFIO one. I may fix them + * both later... + */ + if (!kvm_kernel_irqchip_split()) { + error_report("kvm: Xen support requires kernel-irqchip=split"); + return -EINVAL; + } + + s->xen_caps = xen_caps; + + /* Tell fw_cfg to notify the BIOS to reserve the range. */ + ret = e820_add_entry(XEN_SPECIAL_AREA_ADDR, XEN_SPECIAL_AREA_SIZE, + E820_RESERVED); + if (ret < 0) { + fprintf(stderr, "e820_add_entry() table is full\n"); + return ret; + } + + /* The page couldn't be overlaid until KVM was initialized */ + xen_xenstore_reset(); + + return 0; +} + +int kvm_xen_init_vcpu(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + int err; + + /* + * The kernel needs to know the Xen/ACPI vCPU ID because that's + * what the guest uses in hypercalls such as timers. It doesn't + * match the APIC ID which is generally used for talking to the + * kernel about vCPUs. And if vCPU threads race with creating + * their KVM vCPUs out of order, it doesn't necessarily match + * with the kernel's internal vCPU indices either. + */ + if (kvm_xen_has_cap(EVTCHN_SEND)) { + struct kvm_xen_vcpu_attr va = { + .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID, + .u.vcpu_id = cs->cpu_index, + }; + err = kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &va); + if (err) { + error_report("kvm: Failed to set Xen vCPU ID attribute: %s", + strerror(-err)); + return err; + } + } + + env->xen_vcpu_info_gpa = INVALID_GPA; + env->xen_vcpu_info_default_gpa = INVALID_GPA; + env->xen_vcpu_time_info_gpa = INVALID_GPA; + env->xen_vcpu_runstate_gpa = INVALID_GPA; + + qemu_mutex_init(&env->xen_timers_lock); + env->xen_singleshot_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + xen_vcpu_singleshot_timer_event, + cpu); + if (!env->xen_singleshot_timer) { + return -ENOMEM; + } + env->xen_singleshot_timer->opaque = cs; + + env->xen_periodic_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + xen_vcpu_periodic_timer_event, + cpu); + if (!env->xen_periodic_timer) { + return -ENOMEM; + } + env->xen_periodic_timer->opaque = cs; + + return 0; +} + +uint32_t kvm_xen_get_caps(void) +{ + return kvm_state->xen_caps; +} + +static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + int err = 0; + + switch (cmd) { + case XENVER_get_features: { + struct xen_feature_info fi; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(fi) == 8); + + err = kvm_copy_from_gva(CPU(cpu), arg, &fi, sizeof(fi)); + if (err) { + break; + } + + fi.submap = 0; + if (fi.submap_idx == 0) { + fi.submap |= 1 << XENFEAT_writable_page_tables | + 1 << XENFEAT_writable_descriptor_tables | + 1 << XENFEAT_auto_translated_physmap | + 1 << XENFEAT_supervisor_mode_kernel | + 1 << XENFEAT_hvm_callback_vector | + 1 << XENFEAT_hvm_safe_pvclock | + 1 << XENFEAT_hvm_pirqs; + } + + err = kvm_copy_to_gva(CPU(cpu), arg, &fi, sizeof(fi)); + break; + } + + default: + return false; + } + + exit->u.hcall.result = err; + return true; +} + +static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa) +{ + struct kvm_xen_vcpu_attr xhsi; + + xhsi.type = type; + xhsi.u.gpa = gpa; + + trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa); + + return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi); +} + +static int kvm_xen_set_vcpu_callback_vector(CPUState *cs) +{ + uint8_t vector = X86_CPU(cs)->env.xen_vcpu_callback_vector; + struct kvm_xen_vcpu_attr xva; + + xva.type = KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR; + xva.u.vector = vector; + + trace_kvm_xen_set_vcpu_callback(cs->cpu_index, vector); + + return kvm_vcpu_ioctl(cs, KVM_XEN_HVM_SET_ATTR, &xva); +} + +static void do_set_vcpu_callback_vector(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_callback_vector = data.host_int; + + if (kvm_xen_has_cap(EVTCHN_SEND)) { + kvm_xen_set_vcpu_callback_vector(cs); + } +} + +static int set_vcpu_info(CPUState *cs, uint64_t gpa) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemoryRegionSection mrs = { .mr = NULL }; + void *vcpu_info_hva = NULL; + int ret; + + ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa); + if (ret || gpa == INVALID_GPA) { + goto out; + } + + mrs = memory_region_find(get_system_memory(), gpa, + sizeof(struct vcpu_info)); + if (mrs.mr && mrs.mr->ram_block && + !int128_lt(mrs.size, int128_make64(sizeof(struct vcpu_info)))) { + vcpu_info_hva = qemu_map_ram_ptr(mrs.mr->ram_block, + mrs.offset_within_region); + } + if (!vcpu_info_hva) { + if (mrs.mr) { + memory_region_unref(mrs.mr); + mrs.mr = NULL; + } + ret = -EINVAL; + } + + out: + if (env->xen_vcpu_info_mr) { + memory_region_unref(env->xen_vcpu_info_mr); + } + env->xen_vcpu_info_hva = vcpu_info_hva; + env->xen_vcpu_info_mr = mrs.mr; + return ret; +} + +static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_info_default_gpa = data.host_ulong; + + /* Changing the default does nothing if a vcpu_info was explicitly set. */ + if (env->xen_vcpu_info_gpa == INVALID_GPA) { + set_vcpu_info(cs, env->xen_vcpu_info_default_gpa); + } +} + +static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_info_gpa = data.host_ulong; + + set_vcpu_info(cs, env->xen_vcpu_info_gpa); +} + +void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id) +{ + CPUState *cs = qemu_get_cpu(vcpu_id); + if (!cs) { + return NULL; + } + + return X86_CPU(cs)->env.xen_vcpu_info_hva; +} + +void kvm_xen_maybe_deassert_callback(CPUState *cs) +{ + CPUX86State *env = &X86_CPU(cs)->env; + struct vcpu_info *vi = env->xen_vcpu_info_hva; + if (!vi) { + return; + } + + /* If the evtchn_upcall_pending flag is cleared, turn the GSI off. */ + if (!vi->evtchn_upcall_pending) { + qemu_mutex_lock_iothread(); + /* + * Check again now we have the lock, because it may have been + * asserted in the interim. And we don't want to take the lock + * every time because this is a fast path. + */ + if (!vi->evtchn_upcall_pending) { + X86_CPU(cs)->env.xen_callback_asserted = false; + xen_evtchn_set_callback_level(0); + } + qemu_mutex_unlock_iothread(); + } +} + +void kvm_xen_set_callback_asserted(void) +{ + CPUState *cs = qemu_get_cpu(0); + + if (cs) { + X86_CPU(cs)->env.xen_callback_asserted = true; + } +} + +void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type) +{ + CPUState *cs = qemu_get_cpu(vcpu_id); + uint8_t vector; + + if (!cs) { + return; + } + + vector = X86_CPU(cs)->env.xen_vcpu_callback_vector; + if (vector) { + /* + * The per-vCPU callback vector injected via lapic. Just + * deliver it as an MSI. + */ + MSIMessage msg = { + .address = APIC_DEFAULT_ADDRESS | X86_CPU(cs)->apic_id, + .data = vector | (1UL << MSI_DATA_LEVEL_SHIFT), + }; + kvm_irqchip_send_msi(kvm_state, msg); + return; + } + + switch (type) { + case HVM_PARAM_CALLBACK_TYPE_VECTOR: + /* + * If the evtchn_upcall_pending field in the vcpu_info is set, then + * KVM will automatically deliver the vector on entering the vCPU + * so all we have to do is kick it out. + */ + qemu_cpu_kick(cs); + break; + + case HVM_PARAM_CALLBACK_TYPE_GSI: + case HVM_PARAM_CALLBACK_TYPE_PCI_INTX: + if (vcpu_id == 0) { + xen_evtchn_set_callback_level(1); + } + break; + } +} + +static int kvm_xen_set_vcpu_timer(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + struct kvm_xen_vcpu_attr va = { + .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER, + .u.timer.port = env->xen_virq[VIRQ_TIMER], + .u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL, + .u.timer.expires_ns = env->xen_singleshot_timer_ns, + }; + + return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &va); +} + +static void do_set_vcpu_timer_virq(CPUState *cs, run_on_cpu_data data) +{ + kvm_xen_set_vcpu_timer(cs); +} + +int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port) +{ + CPUState *cs = qemu_get_cpu(vcpu_id); + + if (!cs) { + return -ENOENT; + } + + /* cpu.h doesn't include the actual Xen header. */ + qemu_build_assert(NR_VIRQS == XEN_NR_VIRQS); + + if (virq >= NR_VIRQS) { + return -EINVAL; + } + + if (port && X86_CPU(cs)->env.xen_virq[virq]) { + return -EEXIST; + } + + X86_CPU(cs)->env.xen_virq[virq] = port; + if (virq == VIRQ_TIMER && kvm_xen_has_cap(EVTCHN_SEND)) { + async_run_on_cpu(cs, do_set_vcpu_timer_virq, + RUN_ON_CPU_HOST_INT(port)); + } + return 0; +} + +static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_time_info_gpa = data.host_ulong; + + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, + env->xen_vcpu_time_info_gpa); +} + +static void do_set_vcpu_runstate_gpa(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_runstate_gpa = data.host_ulong; + + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, + env->xen_vcpu_runstate_gpa); +} + +static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->xen_vcpu_info_gpa = INVALID_GPA; + env->xen_vcpu_info_default_gpa = INVALID_GPA; + env->xen_vcpu_time_info_gpa = INVALID_GPA; + env->xen_vcpu_runstate_gpa = INVALID_GPA; + env->xen_vcpu_callback_vector = 0; + env->xen_singleshot_timer_ns = 0; + memset(env->xen_virq, 0, sizeof(env->xen_virq)); + + set_vcpu_info(cs, INVALID_GPA); + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, + INVALID_GPA); + kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, + INVALID_GPA); + if (kvm_xen_has_cap(EVTCHN_SEND)) { + kvm_xen_set_vcpu_callback_vector(cs); + kvm_xen_set_vcpu_timer(cs); + } + +} + +static int xen_set_shared_info(uint64_t gfn) +{ + uint64_t gpa = gfn << TARGET_PAGE_BITS; + int i, err; + + QEMU_IOTHREAD_LOCK_GUARD(); + + /* + * The xen_overlay device tells KVM about it too, since it had to + * do that on migration load anyway (unless we're going to jump + * through lots of hoops to maintain the fiction that this isn't + * KVM-specific. + */ + err = xen_overlay_map_shinfo_page(gpa); + if (err) { + return err; + } + + trace_kvm_xen_set_shared_info(gfn); + + for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) { + CPUState *cpu = qemu_get_cpu(i); + if (cpu) { + async_run_on_cpu(cpu, do_set_vcpu_info_default_gpa, + RUN_ON_CPU_HOST_ULONG(gpa)); + } + gpa += sizeof(vcpu_info_t); + } + + return err; +} + +static int add_to_physmap_one(uint32_t space, uint64_t idx, uint64_t gfn) +{ + switch (space) { + case XENMAPSPACE_shared_info: + if (idx > 0) { + return -EINVAL; + } + return xen_set_shared_info(gfn); + + case XENMAPSPACE_grant_table: + return xen_gnttab_map_page(idx, gfn); + + case XENMAPSPACE_gmfn: + case XENMAPSPACE_gmfn_range: + return -ENOTSUP; + + case XENMAPSPACE_gmfn_foreign: + case XENMAPSPACE_dev_mmio: + return -EPERM; + + default: + return -EINVAL; + } +} + +static int do_add_to_physmap(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t arg) +{ + struct xen_add_to_physmap xatp; + CPUState *cs = CPU(cpu); + + if (hypercall_compat32(exit->u.hcall.longmode)) { + struct compat_xen_add_to_physmap xatp32; + + qemu_build_assert(sizeof(struct compat_xen_add_to_physmap) == 16); + if (kvm_copy_from_gva(cs, arg, &xatp32, sizeof(xatp32))) { + return -EFAULT; + } + xatp.domid = xatp32.domid; + xatp.size = xatp32.size; + xatp.space = xatp32.space; + xatp.idx = xatp32.idx; + xatp.gpfn = xatp32.gpfn; + } else { + if (kvm_copy_from_gva(cs, arg, &xatp, sizeof(xatp))) { + return -EFAULT; + } + } + + if (xatp.domid != DOMID_SELF && xatp.domid != xen_domid) { + return -ESRCH; + } + + return add_to_physmap_one(xatp.space, xatp.idx, xatp.gpfn); +} + +static int do_add_to_physmap_batch(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t arg) +{ + struct xen_add_to_physmap_batch xatpb; + unsigned long idxs_gva, gpfns_gva, errs_gva; + CPUState *cs = CPU(cpu); + size_t op_sz; + + if (hypercall_compat32(exit->u.hcall.longmode)) { + struct compat_xen_add_to_physmap_batch xatpb32; + + qemu_build_assert(sizeof(struct compat_xen_add_to_physmap_batch) == 20); + if (kvm_copy_from_gva(cs, arg, &xatpb32, sizeof(xatpb32))) { + return -EFAULT; + } + xatpb.domid = xatpb32.domid; + xatpb.space = xatpb32.space; + xatpb.size = xatpb32.size; + + idxs_gva = xatpb32.idxs.c; + gpfns_gva = xatpb32.gpfns.c; + errs_gva = xatpb32.errs.c; + op_sz = sizeof(uint32_t); + } else { + if (kvm_copy_from_gva(cs, arg, &xatpb, sizeof(xatpb))) { + return -EFAULT; + } + op_sz = sizeof(unsigned long); + idxs_gva = (unsigned long)xatpb.idxs.p; + gpfns_gva = (unsigned long)xatpb.gpfns.p; + errs_gva = (unsigned long)xatpb.errs.p; + } + + if (xatpb.domid != DOMID_SELF && xatpb.domid != xen_domid) { + return -ESRCH; + } + + /* Explicitly invalid for the batch op. Not that we implement it anyway. */ + if (xatpb.space == XENMAPSPACE_gmfn_range) { + return -EINVAL; + } + + while (xatpb.size--) { + unsigned long idx = 0; + unsigned long gpfn = 0; + int err; + + /* For 32-bit compat this only copies the low 32 bits of each */ + if (kvm_copy_from_gva(cs, idxs_gva, &idx, op_sz) || + kvm_copy_from_gva(cs, gpfns_gva, &gpfn, op_sz)) { + return -EFAULT; + } + idxs_gva += op_sz; + gpfns_gva += op_sz; + + err = add_to_physmap_one(xatpb.space, idx, gpfn); + + if (kvm_copy_to_gva(cs, errs_gva, &err, sizeof(err))) { + return -EFAULT; + } + errs_gva += sizeof(err); + } + return 0; +} + +static bool kvm_xen_hcall_memory_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + int err; + + switch (cmd) { + case XENMEM_add_to_physmap: + err = do_add_to_physmap(exit, cpu, arg); + break; + + case XENMEM_add_to_physmap_batch: + err = do_add_to_physmap_batch(exit, cpu, arg); + break; + + default: + return false; + } + + exit->u.hcall.result = err; + return true; +} + +static bool handle_set_param(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t arg) +{ + CPUState *cs = CPU(cpu); + struct xen_hvm_param hp; + int err = 0; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(hp) == 16); + + if (kvm_copy_from_gva(cs, arg, &hp, sizeof(hp))) { + err = -EFAULT; + goto out; + } + + if (hp.domid != DOMID_SELF && hp.domid != xen_domid) { + err = -ESRCH; + goto out; + } + + switch (hp.index) { + case HVM_PARAM_CALLBACK_IRQ: + qemu_mutex_lock_iothread(); + err = xen_evtchn_set_callback_param(hp.value); + qemu_mutex_unlock_iothread(); + xen_set_long_mode(exit->u.hcall.longmode); + break; + default: + return false; + } + +out: + exit->u.hcall.result = err; + return true; +} + +static bool handle_get_param(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t arg) +{ + CPUState *cs = CPU(cpu); + struct xen_hvm_param hp; + int err = 0; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(hp) == 16); + + if (kvm_copy_from_gva(cs, arg, &hp, sizeof(hp))) { + err = -EFAULT; + goto out; + } + + if (hp.domid != DOMID_SELF && hp.domid != xen_domid) { + err = -ESRCH; + goto out; + } + + switch (hp.index) { + case HVM_PARAM_STORE_PFN: + hp.value = XEN_SPECIAL_PFN(XENSTORE); + break; + case HVM_PARAM_STORE_EVTCHN: + hp.value = xen_xenstore_get_port(); + break; + default: + return false; + } + + if (kvm_copy_to_gva(cs, arg, &hp, sizeof(hp))) { + err = -EFAULT; + } +out: + exit->u.hcall.result = err; + return true; +} + +static int kvm_xen_hcall_evtchn_upcall_vector(struct kvm_xen_exit *exit, + X86CPU *cpu, uint64_t arg) +{ + struct xen_hvm_evtchn_upcall_vector up; + CPUState *target_cs; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(up) == 8); + + if (kvm_copy_from_gva(CPU(cpu), arg, &up, sizeof(up))) { + return -EFAULT; + } + + if (up.vector < 0x10) { + return -EINVAL; + } + + target_cs = qemu_get_cpu(up.vcpu); + if (!target_cs) { + return -EINVAL; + } + + async_run_on_cpu(target_cs, do_set_vcpu_callback_vector, + RUN_ON_CPU_HOST_INT(up.vector)); + return 0; +} + +static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + int ret = -ENOSYS; + switch (cmd) { + case HVMOP_set_evtchn_upcall_vector: + ret = kvm_xen_hcall_evtchn_upcall_vector(exit, cpu, + exit->u.hcall.params[0]); + break; + + case HVMOP_pagetable_dying: + ret = -ENOSYS; + break; + + case HVMOP_set_param: + return handle_set_param(exit, cpu, arg); + + case HVMOP_get_param: + return handle_get_param(exit, cpu, arg); + + default: + return false; + } + + exit->u.hcall.result = ret; + return true; +} + +static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target, + uint64_t arg) +{ + struct vcpu_register_vcpu_info rvi; + uint64_t gpa; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(rvi) == 16); + qemu_build_assert(sizeof(struct vcpu_info) == 64); + + if (!target) { + return -ENOENT; + } + + if (kvm_copy_from_gva(cs, arg, &rvi, sizeof(rvi))) { + return -EFAULT; + } + + if (rvi.offset > TARGET_PAGE_SIZE - sizeof(struct vcpu_info)) { + return -EINVAL; + } + + gpa = ((rvi.mfn << TARGET_PAGE_BITS) + rvi.offset); + async_run_on_cpu(target, do_set_vcpu_info_gpa, RUN_ON_CPU_HOST_ULONG(gpa)); + return 0; +} + +static int vcpuop_register_vcpu_time_info(CPUState *cs, CPUState *target, + uint64_t arg) +{ + struct vcpu_register_time_memory_area tma; + uint64_t gpa; + size_t len; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(tma) == 8); + qemu_build_assert(sizeof(struct vcpu_time_info) == 32); + + if (!target) { + return -ENOENT; + } + + if (kvm_copy_from_gva(cs, arg, &tma, sizeof(tma))) { + return -EFAULT; + } + + /* + * Xen actually uses the GVA and does the translation through the guest + * page tables each time. But Linux/KVM uses the GPA, on the assumption + * that guests only ever use *global* addresses (kernel virtual addresses) + * for it. If Linux is changed to redo the GVA→GPA translation each time, + * it will offer a new vCPU attribute for that, and we'll use it instead. + */ + if (!kvm_gva_to_gpa(cs, tma.addr.p, &gpa, &len, false) || + len < sizeof(struct vcpu_time_info)) { + return -EFAULT; + } + + async_run_on_cpu(target, do_set_vcpu_time_info_gpa, + RUN_ON_CPU_HOST_ULONG(gpa)); + return 0; +} + +static int vcpuop_register_runstate_info(CPUState *cs, CPUState *target, + uint64_t arg) +{ + struct vcpu_register_runstate_memory_area rma; + uint64_t gpa; + size_t len; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(rma) == 8); + /* The runstate area actually does change size, but Linux copes. */ + + if (!target) { + return -ENOENT; + } + + if (kvm_copy_from_gva(cs, arg, &rma, sizeof(rma))) { + return -EFAULT; + } + + /* As with vcpu_time_info, Xen actually uses the GVA but KVM doesn't. */ + if (!kvm_gva_to_gpa(cs, rma.addr.p, &gpa, &len, false)) { + return -EFAULT; + } + + async_run_on_cpu(target, do_set_vcpu_runstate_gpa, + RUN_ON_CPU_HOST_ULONG(gpa)); + return 0; +} + +static uint64_t kvm_get_current_ns(void) +{ + struct kvm_clock_data data; + int ret; + + ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data); + if (ret < 0) { + fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret)); + abort(); + } + + return data.clock; +} + +static void xen_vcpu_singleshot_timer_event(void *opaque) +{ + CPUState *cpu = opaque; + CPUX86State *env = &X86_CPU(cpu)->env; + uint16_t port = env->xen_virq[VIRQ_TIMER]; + + if (likely(port)) { + xen_evtchn_set_port(port); + } + + qemu_mutex_lock(&env->xen_timers_lock); + env->xen_singleshot_timer_ns = 0; + qemu_mutex_unlock(&env->xen_timers_lock); +} + +static void xen_vcpu_periodic_timer_event(void *opaque) +{ + CPUState *cpu = opaque; + CPUX86State *env = &X86_CPU(cpu)->env; + uint16_t port = env->xen_virq[VIRQ_TIMER]; + int64_t qemu_now; + + if (likely(port)) { + xen_evtchn_set_port(port); + } + + qemu_mutex_lock(&env->xen_timers_lock); + + qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + timer_mod_ns(env->xen_periodic_timer, + qemu_now + env->xen_periodic_timer_period); + + qemu_mutex_unlock(&env->xen_timers_lock); +} + +static int do_set_periodic_timer(CPUState *target, uint64_t period_ns) +{ + CPUX86State *tenv = &X86_CPU(target)->env; + int64_t qemu_now; + + timer_del(tenv->xen_periodic_timer); + + qemu_mutex_lock(&tenv->xen_timers_lock); + + qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + timer_mod_ns(tenv->xen_periodic_timer, qemu_now + period_ns); + tenv->xen_periodic_timer_period = period_ns; + + qemu_mutex_unlock(&tenv->xen_timers_lock); + return 0; +} + +#define MILLISECS(_ms) ((int64_t)((_ms) * 1000000ULL)) +#define MICROSECS(_us) ((int64_t)((_us) * 1000ULL)) +#define STIME_MAX ((time_t)((int64_t)~0ull >> 1)) +/* Chosen so (NOW() + delta) wont overflow without an uptime of 200 years */ +#define STIME_DELTA_MAX ((int64_t)((uint64_t)~0ull >> 2)) + +static int vcpuop_set_periodic_timer(CPUState *cs, CPUState *target, + uint64_t arg) +{ + struct vcpu_set_periodic_timer spt; + + qemu_build_assert(sizeof(spt) == 8); + if (kvm_copy_from_gva(cs, arg, &spt, sizeof(spt))) { + return -EFAULT; + } + + if (spt.period_ns < MILLISECS(1) || spt.period_ns > STIME_DELTA_MAX) { + return -EINVAL; + } + + return do_set_periodic_timer(target, spt.period_ns); +} + +static int vcpuop_stop_periodic_timer(CPUState *target) +{ + CPUX86State *tenv = &X86_CPU(target)->env; + + qemu_mutex_lock(&tenv->xen_timers_lock); + + timer_del(tenv->xen_periodic_timer); + tenv->xen_periodic_timer_period = 0; + + qemu_mutex_unlock(&tenv->xen_timers_lock); + return 0; +} + +static int do_set_singleshot_timer(CPUState *cs, uint64_t timeout_abs, + bool future, bool linux_wa) +{ + CPUX86State *env = &X86_CPU(cs)->env; + int64_t now = kvm_get_current_ns(); + int64_t qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + int64_t delta = timeout_abs - now; + + if (future && timeout_abs < now) { + return -ETIME; + } + + if (linux_wa && unlikely((int64_t)timeout_abs < 0 || + (delta > 0 && (uint32_t)(delta >> 50) != 0))) { + /* + * Xen has a 'Linux workaround' in do_set_timer_op() which checks + * for negative absolute timeout values (caused by integer + * overflow), and for values about 13 days in the future (2^50ns) + * which would be caused by jiffies overflow. For those cases, it + * sets the timeout 100ms in the future (not *too* soon, since if + * a guest really did set a long timeout on purpose we don't want + * to keep churning CPU time by waking it up). + */ + delta = (100 * SCALE_MS); + timeout_abs = now + delta; + } + + qemu_mutex_lock(&env->xen_timers_lock); + + timer_mod_ns(env->xen_singleshot_timer, qemu_now + delta); + env->xen_singleshot_timer_ns = now + delta; + + qemu_mutex_unlock(&env->xen_timers_lock); + return 0; +} + +static int vcpuop_set_singleshot_timer(CPUState *cs, uint64_t arg) +{ + struct vcpu_set_singleshot_timer sst = { 0 }; + + /* + * The struct is a uint64_t followed by a uint32_t. On 32-bit that + * makes it 12 bytes. On 64-bit it gets padded to 16. The parts + * that get used are identical, and there's four bytes of padding + * unused at the end. For true Xen compatibility we should attempt + * to copy the full 16 bytes from 64-bit guests, and return -EFAULT + * if we can't get the padding too. But that's daft. Just copy what + * we need. + */ + qemu_build_assert(offsetof(struct vcpu_set_singleshot_timer, flags) == 8); + qemu_build_assert(sizeof(sst) >= 12); + + if (kvm_copy_from_gva(cs, arg, &sst, 12)) { + return -EFAULT; + } + + return do_set_singleshot_timer(cs, sst.timeout_abs_ns, + !!(sst.flags & VCPU_SSHOTTMR_future), + false); +} + +static int vcpuop_stop_singleshot_timer(CPUState *cs) +{ + CPUX86State *env = &X86_CPU(cs)->env; + + qemu_mutex_lock(&env->xen_timers_lock); + + timer_del(env->xen_singleshot_timer); + env->xen_singleshot_timer_ns = 0; + + qemu_mutex_unlock(&env->xen_timers_lock); + return 0; +} + +static bool kvm_xen_hcall_set_timer_op(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t timeout) +{ + int err; + + if (unlikely(timeout == 0)) { + err = vcpuop_stop_singleshot_timer(CPU(cpu)); + } else { + err = do_set_singleshot_timer(CPU(cpu), timeout, false, true); + } + exit->u.hcall.result = err; + return true; +} + +static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, int vcpu_id, uint64_t arg) +{ + CPUState *cs = CPU(cpu); + CPUState *dest = cs->cpu_index == vcpu_id ? cs : qemu_get_cpu(vcpu_id); + int err; + + if (!dest) { + err = -ENOENT; + goto out; + } + + switch (cmd) { + case VCPUOP_register_runstate_memory_area: + err = vcpuop_register_runstate_info(cs, dest, arg); + break; + case VCPUOP_register_vcpu_time_memory_area: + err = vcpuop_register_vcpu_time_info(cs, dest, arg); + break; + case VCPUOP_register_vcpu_info: + err = vcpuop_register_vcpu_info(cs, dest, arg); + break; + case VCPUOP_set_singleshot_timer: { + if (cs->cpu_index == vcpu_id) { + err = vcpuop_set_singleshot_timer(dest, arg); + } else { + err = -EINVAL; + } + break; + } + case VCPUOP_stop_singleshot_timer: + if (cs->cpu_index == vcpu_id) { + err = vcpuop_stop_singleshot_timer(dest); + } else { + err = -EINVAL; + } + break; + case VCPUOP_set_periodic_timer: { + err = vcpuop_set_periodic_timer(cs, dest, arg); + break; + } + case VCPUOP_stop_periodic_timer: + err = vcpuop_stop_periodic_timer(dest); + break; + + default: + return false; + } + + out: + exit->u.hcall.result = err; + return true; +} + +static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + CPUState *cs = CPU(cpu); + int err = -ENOSYS; + + switch (cmd) { + case EVTCHNOP_init_control: + case EVTCHNOP_expand_array: + case EVTCHNOP_set_priority: + /* We do not support FIFO channels at this point */ + err = -ENOSYS; + break; + + case EVTCHNOP_status: { + struct evtchn_status status; + + qemu_build_assert(sizeof(status) == 24); + if (kvm_copy_from_gva(cs, arg, &status, sizeof(status))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_status_op(&status); + if (!err && kvm_copy_to_gva(cs, arg, &status, sizeof(status))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_close: { + struct evtchn_close close; + + qemu_build_assert(sizeof(close) == 4); + if (kvm_copy_from_gva(cs, arg, &close, sizeof(close))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_close_op(&close); + break; + } + case EVTCHNOP_unmask: { + struct evtchn_unmask unmask; + + qemu_build_assert(sizeof(unmask) == 4); + if (kvm_copy_from_gva(cs, arg, &unmask, sizeof(unmask))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_unmask_op(&unmask); + break; + } + case EVTCHNOP_bind_virq: { + struct evtchn_bind_virq virq; + + qemu_build_assert(sizeof(virq) == 12); + if (kvm_copy_from_gva(cs, arg, &virq, sizeof(virq))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_bind_virq_op(&virq); + if (!err && kvm_copy_to_gva(cs, arg, &virq, sizeof(virq))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_bind_pirq: { + struct evtchn_bind_pirq pirq; + + qemu_build_assert(sizeof(pirq) == 12); + if (kvm_copy_from_gva(cs, arg, &pirq, sizeof(pirq))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_bind_pirq_op(&pirq); + if (!err && kvm_copy_to_gva(cs, arg, &pirq, sizeof(pirq))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_bind_ipi: { + struct evtchn_bind_ipi ipi; + + qemu_build_assert(sizeof(ipi) == 8); + if (kvm_copy_from_gva(cs, arg, &ipi, sizeof(ipi))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_bind_ipi_op(&ipi); + if (!err && kvm_copy_to_gva(cs, arg, &ipi, sizeof(ipi))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_send: { + struct evtchn_send send; + + qemu_build_assert(sizeof(send) == 4); + if (kvm_copy_from_gva(cs, arg, &send, sizeof(send))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_send_op(&send); + break; + } + case EVTCHNOP_alloc_unbound: { + struct evtchn_alloc_unbound alloc; + + qemu_build_assert(sizeof(alloc) == 8); + if (kvm_copy_from_gva(cs, arg, &alloc, sizeof(alloc))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_alloc_unbound_op(&alloc); + if (!err && kvm_copy_to_gva(cs, arg, &alloc, sizeof(alloc))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_bind_interdomain: { + struct evtchn_bind_interdomain interdomain; + + qemu_build_assert(sizeof(interdomain) == 12); + if (kvm_copy_from_gva(cs, arg, &interdomain, sizeof(interdomain))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_bind_interdomain_op(&interdomain); + if (!err && + kvm_copy_to_gva(cs, arg, &interdomain, sizeof(interdomain))) { + err = -EFAULT; + } + break; + } + case EVTCHNOP_bind_vcpu: { + struct evtchn_bind_vcpu vcpu; + + qemu_build_assert(sizeof(vcpu) == 8); + if (kvm_copy_from_gva(cs, arg, &vcpu, sizeof(vcpu))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_bind_vcpu_op(&vcpu); + break; + } + case EVTCHNOP_reset: { + struct evtchn_reset reset; + + qemu_build_assert(sizeof(reset) == 2); + if (kvm_copy_from_gva(cs, arg, &reset, sizeof(reset))) { + err = -EFAULT; + break; + } + + err = xen_evtchn_reset_op(&reset); + break; + } + default: + return false; + } + + exit->u.hcall.result = err; + return true; +} + +int kvm_xen_soft_reset(void) +{ + CPUState *cpu; + int err; + + assert(qemu_mutex_iothread_locked()); + + trace_kvm_xen_soft_reset(); + + err = xen_evtchn_soft_reset(); + if (err) { + return err; + } + + /* + * Zero is the reset/startup state for HVM_PARAM_CALLBACK_IRQ. Strictly, + * it maps to HVM_PARAM_CALLBACK_TYPE_GSI with GSI#0, but Xen refuses to + * to deliver to the timer interrupt and treats that as 'disabled'. + */ + err = xen_evtchn_set_callback_param(0); + if (err) { + return err; + } + + CPU_FOREACH(cpu) { + async_run_on_cpu(cpu, do_vcpu_soft_reset, RUN_ON_CPU_NULL); + } + + err = xen_overlay_map_shinfo_page(INVALID_GFN); + if (err) { + return err; + } + + err = xen_gnttab_reset(); + if (err) { + return err; + } + + err = xen_xenstore_reset(); + if (err) { + return err; + } + + return 0; +} + +static int schedop_shutdown(CPUState *cs, uint64_t arg) +{ + struct sched_shutdown shutdown; + int ret = 0; + + /* No need for 32/64 compat handling */ + qemu_build_assert(sizeof(shutdown) == 4); + + if (kvm_copy_from_gva(cs, arg, &shutdown, sizeof(shutdown))) { + return -EFAULT; + } + + switch (shutdown.reason) { + case SHUTDOWN_crash: + cpu_dump_state(cs, stderr, CPU_DUMP_CODE); + qemu_system_guest_panicked(NULL); + break; + + case SHUTDOWN_reboot: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + break; + + case SHUTDOWN_poweroff: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + break; + + case SHUTDOWN_soft_reset: + qemu_mutex_lock_iothread(); + ret = kvm_xen_soft_reset(); + qemu_mutex_unlock_iothread(); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static bool kvm_xen_hcall_sched_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + CPUState *cs = CPU(cpu); + int err = -ENOSYS; + + switch (cmd) { + case SCHEDOP_shutdown: + err = schedop_shutdown(cs, arg); + break; + + case SCHEDOP_poll: + /* + * Linux will panic if this doesn't work. Just yield; it's not + * worth overthinking it because with event channel handling + * in KVM, the kernel will intercept this and it will never + * reach QEMU anyway. The semantics of the hypercall explicltly + * permit spurious wakeups. + */ + case SCHEDOP_yield: + sched_yield(); + err = 0; + break; + + default: + return false; + } + + exit->u.hcall.result = err; + return true; +} + +static bool kvm_xen_hcall_gnttab_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg, int count) +{ + CPUState *cs = CPU(cpu); + int err; + + switch (cmd) { + case GNTTABOP_set_version: { + struct gnttab_set_version set; + + qemu_build_assert(sizeof(set) == 4); + if (kvm_copy_from_gva(cs, arg, &set, sizeof(set))) { + err = -EFAULT; + break; + } + + err = xen_gnttab_set_version_op(&set); + if (!err && kvm_copy_to_gva(cs, arg, &set, sizeof(set))) { + err = -EFAULT; + } + break; + } + case GNTTABOP_get_version: { + struct gnttab_get_version get; + + qemu_build_assert(sizeof(get) == 8); + if (kvm_copy_from_gva(cs, arg, &get, sizeof(get))) { + err = -EFAULT; + break; + } + + err = xen_gnttab_get_version_op(&get); + if (!err && kvm_copy_to_gva(cs, arg, &get, sizeof(get))) { + err = -EFAULT; + } + break; + } + case GNTTABOP_query_size: { + struct gnttab_query_size size; + + qemu_build_assert(sizeof(size) == 16); + if (kvm_copy_from_gva(cs, arg, &size, sizeof(size))) { + err = -EFAULT; + break; + } + + err = xen_gnttab_query_size_op(&size); + if (!err && kvm_copy_to_gva(cs, arg, &size, sizeof(size))) { + err = -EFAULT; + } + break; + } + case GNTTABOP_setup_table: + case GNTTABOP_copy: + case GNTTABOP_map_grant_ref: + case GNTTABOP_unmap_grant_ref: + case GNTTABOP_swap_grant_ref: + return false; + + default: + /* Xen explicitly returns -ENOSYS to HVM guests for all others */ + err = -ENOSYS; + break; + } + + exit->u.hcall.result = err; + return true; +} + +static bool kvm_xen_hcall_physdev_op(struct kvm_xen_exit *exit, X86CPU *cpu, + int cmd, uint64_t arg) +{ + CPUState *cs = CPU(cpu); + int err; + + switch (cmd) { + case PHYSDEVOP_map_pirq: { + struct physdev_map_pirq map; + + if (hypercall_compat32(exit->u.hcall.longmode)) { + struct compat_physdev_map_pirq *map32 = (void *)↦ + + if (kvm_copy_from_gva(cs, arg, map32, sizeof(*map32))) { + return -EFAULT; + } + + /* + * The only thing that's different is the alignment of the + * uint64_t table_base at the end, which gets padding to make + * it 64-bit aligned in the 64-bit version. + */ + qemu_build_assert(sizeof(*map32) == 36); + qemu_build_assert(offsetof(struct physdev_map_pirq, entry_nr) == + offsetof(struct compat_physdev_map_pirq, entry_nr)); + memmove(&map.table_base, &map32->table_base, sizeof(map.table_base)); + } else { + if (kvm_copy_from_gva(cs, arg, &map, sizeof(map))) { + err = -EFAULT; + break; + } + } + err = xen_physdev_map_pirq(&map); + /* + * Since table_base is an IN parameter and won't be changed, just + * copy the size of the compat structure back to the guest. + */ + if (!err && kvm_copy_to_gva(cs, arg, &map, + sizeof(struct compat_physdev_map_pirq))) { + err = -EFAULT; + } + break; + } + case PHYSDEVOP_unmap_pirq: { + struct physdev_unmap_pirq unmap; + + qemu_build_assert(sizeof(unmap) == 8); + if (kvm_copy_from_gva(cs, arg, &unmap, sizeof(unmap))) { + err = -EFAULT; + break; + } + + err = xen_physdev_unmap_pirq(&unmap); + if (!err && kvm_copy_to_gva(cs, arg, &unmap, sizeof(unmap))) { + err = -EFAULT; + } + break; + } + case PHYSDEVOP_eoi: { + struct physdev_eoi eoi; + + qemu_build_assert(sizeof(eoi) == 4); + if (kvm_copy_from_gva(cs, arg, &eoi, sizeof(eoi))) { + err = -EFAULT; + break; + } + + err = xen_physdev_eoi_pirq(&eoi); + if (!err && kvm_copy_to_gva(cs, arg, &eoi, sizeof(eoi))) { + err = -EFAULT; + } + break; + } + case PHYSDEVOP_irq_status_query: { + struct physdev_irq_status_query query; + + qemu_build_assert(sizeof(query) == 8); + if (kvm_copy_from_gva(cs, arg, &query, sizeof(query))) { + err = -EFAULT; + break; + } + + err = xen_physdev_query_pirq(&query); + if (!err && kvm_copy_to_gva(cs, arg, &query, sizeof(query))) { + err = -EFAULT; + } + break; + } + case PHYSDEVOP_get_free_pirq: { + struct physdev_get_free_pirq get; + + qemu_build_assert(sizeof(get) == 8); + if (kvm_copy_from_gva(cs, arg, &get, sizeof(get))) { + err = -EFAULT; + break; + } + + err = xen_physdev_get_free_pirq(&get); + if (!err && kvm_copy_to_gva(cs, arg, &get, sizeof(get))) { + err = -EFAULT; + } + break; + } + case PHYSDEVOP_pirq_eoi_gmfn_v2: /* FreeBSD 13 makes this hypercall */ + err = -ENOSYS; + break; + + default: + return false; + } + + exit->u.hcall.result = err; + return true; +} + +static bool do_kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit) +{ + uint16_t code = exit->u.hcall.input; + + if (exit->u.hcall.cpl > 0) { + exit->u.hcall.result = -EPERM; + return true; + } + + switch (code) { + case __HYPERVISOR_set_timer_op: + if (exit->u.hcall.longmode) { + return kvm_xen_hcall_set_timer_op(exit, cpu, + exit->u.hcall.params[0]); + } else { + /* In 32-bit mode, the 64-bit timer value is in two args. */ + uint64_t val = ((uint64_t)exit->u.hcall.params[1]) << 32 | + (uint32_t)exit->u.hcall.params[0]; + return kvm_xen_hcall_set_timer_op(exit, cpu, val); + } + case __HYPERVISOR_grant_table_op: + return kvm_xen_hcall_gnttab_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1], + exit->u.hcall.params[2]); + case __HYPERVISOR_sched_op: + return kvm_xen_hcall_sched_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + case __HYPERVISOR_event_channel_op: + return kvm_xen_hcall_evtchn_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + case __HYPERVISOR_vcpu_op: + return kvm_xen_hcall_vcpu_op(exit, cpu, + exit->u.hcall.params[0], + exit->u.hcall.params[1], + exit->u.hcall.params[2]); + case __HYPERVISOR_hvm_op: + return kvm_xen_hcall_hvm_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + case __HYPERVISOR_memory_op: + return kvm_xen_hcall_memory_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + case __HYPERVISOR_physdev_op: + return kvm_xen_hcall_physdev_op(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + case __HYPERVISOR_xen_version: + return kvm_xen_hcall_xen_version(exit, cpu, exit->u.hcall.params[0], + exit->u.hcall.params[1]); + default: + return false; + } +} + +int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit) +{ + if (exit->type != KVM_EXIT_XEN_HCALL) { + return -1; + } + + /* + * The kernel latches the guest 32/64 mode when the MSR is used to fill + * the hypercall page. So if we see a hypercall in a mode that doesn't + * match our own idea of the guest mode, fetch the kernel's idea of the + * "long mode" to remain in sync. + */ + if (exit->u.hcall.longmode != xen_is_long_mode()) { + xen_sync_long_mode(); + } + + if (!do_kvm_xen_handle_exit(cpu, exit)) { + /* + * Some hypercalls will be deliberately "implemented" by returning + * -ENOSYS. This case is for hypercalls which are unexpected. + */ + exit->u.hcall.result = -ENOSYS; + qemu_log_mask(LOG_UNIMP, "Unimplemented Xen hypercall %" + PRId64 " (0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 ")\n", + (uint64_t)exit->u.hcall.input, + (uint64_t)exit->u.hcall.params[0], + (uint64_t)exit->u.hcall.params[1], + (uint64_t)exit->u.hcall.params[2]); + } + + trace_kvm_xen_hypercall(CPU(cpu)->cpu_index, exit->u.hcall.cpl, + exit->u.hcall.input, exit->u.hcall.params[0], + exit->u.hcall.params[1], exit->u.hcall.params[2], + exit->u.hcall.result); + return 0; +} + +uint16_t kvm_xen_get_gnttab_max_frames(void) +{ + KVMState *s = KVM_STATE(current_accel()); + return s->xen_gnttab_max_frames; +} + +uint16_t kvm_xen_get_evtchn_max_pirq(void) +{ + KVMState *s = KVM_STATE(current_accel()); + return s->xen_evtchn_max_pirq; +} + +int kvm_put_xen_state(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + uint64_t gpa; + int ret; + + gpa = env->xen_vcpu_info_gpa; + if (gpa == INVALID_GPA) { + gpa = env->xen_vcpu_info_default_gpa; + } + + if (gpa != INVALID_GPA) { + ret = set_vcpu_info(cs, gpa); + if (ret < 0) { + return ret; + } + } + + gpa = env->xen_vcpu_time_info_gpa; + if (gpa != INVALID_GPA) { + ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, + gpa); + if (ret < 0) { + return ret; + } + } + + gpa = env->xen_vcpu_runstate_gpa; + if (gpa != INVALID_GPA) { + ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, + gpa); + if (ret < 0) { + return ret; + } + } + + if (env->xen_periodic_timer_period) { + ret = do_set_periodic_timer(cs, env->xen_periodic_timer_period); + if (ret < 0) { + return ret; + } + } + + if (!kvm_xen_has_cap(EVTCHN_SEND)) { + /* + * If the kernel has EVTCHN_SEND support then it handles timers too, + * so the timer will be restored by kvm_xen_set_vcpu_timer() below. + */ + if (env->xen_singleshot_timer_ns) { + ret = do_set_singleshot_timer(cs, env->xen_singleshot_timer_ns, + false, false); + if (ret < 0) { + return ret; + } + } + return 0; + } + + if (env->xen_vcpu_callback_vector) { + ret = kvm_xen_set_vcpu_callback_vector(cs); + if (ret < 0) { + return ret; + } + } + + if (env->xen_virq[VIRQ_TIMER]) { + ret = kvm_xen_set_vcpu_timer(cs); + if (ret < 0) { + return ret; + } + } + return 0; +} + +int kvm_get_xen_state(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + uint64_t gpa; + int ret; + + /* + * The kernel does not mark vcpu_info as dirty when it delivers interrupts + * to it. It's up to userspace to *assume* that any page shared thus is + * always considered dirty. The shared_info page is different since it's + * an overlay and migrated separately anyway. + */ + gpa = env->xen_vcpu_info_gpa; + if (gpa == INVALID_GPA) { + gpa = env->xen_vcpu_info_default_gpa; + } + if (gpa != INVALID_GPA) { + MemoryRegionSection mrs = memory_region_find(get_system_memory(), + gpa, + sizeof(struct vcpu_info)); + if (mrs.mr && + !int128_lt(mrs.size, int128_make64(sizeof(struct vcpu_info)))) { + memory_region_set_dirty(mrs.mr, mrs.offset_within_region, + sizeof(struct vcpu_info)); + } + } + + if (!kvm_xen_has_cap(EVTCHN_SEND)) { + return 0; + } + + /* + * If the kernel is accelerating timers, read out the current value of the + * singleshot timer deadline. + */ + if (env->xen_virq[VIRQ_TIMER]) { + struct kvm_xen_vcpu_attr va = { + .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER, + }; + ret = kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_GET_ATTR, &va); + if (ret < 0) { + return ret; + } + env->xen_singleshot_timer_ns = va.u.timer.expires_ns; + } + + return 0; +} diff --git a/target/i386/kvm/xen-emu.h b/target/i386/kvm/xen-emu.h new file mode 100644 index 0000000000..fe85e0b195 --- /dev/null +++ b/target/i386/kvm/xen-emu.h @@ -0,0 +1,33 @@ +/* + * Xen HVM emulation support in KVM + * + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_I386_KVM_XEN_EMU_H +#define QEMU_I386_KVM_XEN_EMU_H + +#define XEN_HYPERCALL_MSR 0x40000000 +#define XEN_HYPERCALL_MSR_HYPERV 0x40000200 + +#define XEN_CPUID_SIGNATURE 0 +#define XEN_CPUID_VENDOR 1 +#define XEN_CPUID_HVM_MSR 2 +#define XEN_CPUID_TIME 3 +#define XEN_CPUID_HVM 4 + +#define XEN_VERSION(maj, min) ((maj) << 16 | (min)) + +int kvm_xen_init(KVMState *s, uint32_t hypercall_msr); +int kvm_xen_init_vcpu(CPUState *cs); +int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit); +int kvm_put_xen_state(CPUState *cs); +int kvm_get_xen_state(CPUState *cs); +void kvm_xen_maybe_deassert_callback(CPUState *cs); + +#endif /* QEMU_I386_KVM_XEN_EMU_H */ diff --git a/target/i386/machine.c b/target/i386/machine.c index 310b125235..c7ac8084b2 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -6,8 +6,10 @@ #include "kvm/hyperv.h" #include "hw/i386/x86.h" #include "kvm/kvm_i386.h" +#include "hw/xen/xen.h" #include "sysemu/kvm.h" +#include "sysemu/kvm_xen.h" #include "sysemu/tcg.h" #include "qemu/error-report.h" @@ -1257,6 +1259,28 @@ static const VMStateDescription vmstate_nested_state = { } }; +static bool xen_vcpu_needed(void *opaque) +{ + return (xen_mode == XEN_EMULATE); +} + +static const VMStateDescription vmstate_xen_vcpu = { + .name = "cpu/xen_vcpu", + .version_id = 1, + .minimum_version_id = 1, + .needed = xen_vcpu_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU), + VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU), + VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU), + VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU), + VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU), + VMSTATE_UINT16_ARRAY(env.xen_virq, X86CPU, XEN_NR_VIRQS), + VMSTATE_UINT64(env.xen_singleshot_timer_ns, X86CPU), + VMSTATE_UINT64(env.xen_periodic_timer_period, X86CPU), + VMSTATE_END_OF_LIST() + } +}; #endif static bool mcg_ext_ctl_needed(void *opaque) @@ -1716,6 +1740,7 @@ const VMStateDescription vmstate_x86_cpu = { #endif #ifdef CONFIG_KVM &vmstate_nested_state, + &vmstate_xen_vcpu, #endif &vmstate_msr_tsx_ctrl, &vmstate_msr_intel_sgx, diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h index 0bd6bfad8a..fb63af7afa 100644 --- a/target/i386/ops_sse.h +++ b/target/i386/ops_sse.h @@ -2497,6 +2497,14 @@ void helper_vpermdq_ymm(Reg *d, Reg *v, Reg *s, uint32_t order) d->Q(1) = r1; d->Q(2) = r2; d->Q(3) = r3; + if (order & 0x8) { + d->Q(0) = 0; + d->Q(1) = 0; + } + if (order & 0x80) { + d->Q(2) = 0; + d->Q(3) = 0; + } } void helper_vpermq_ymm(Reg *d, Reg *s, uint32_t order) diff --git a/target/i386/sev.c b/target/i386/sev.c index 0ec970496e..fe2144c038 100644 --- a/target/i386/sev.c +++ b/target/i386/sev.c @@ -23,6 +23,7 @@ #include "qemu/base64.h" #include "qemu/module.h" #include "qemu/uuid.h" +#include "qemu/error-report.h" #include "crypto/hash.h" #include "sysemu/kvm.h" #include "sev.h" @@ -931,15 +932,26 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL); host_cbitpos = ebx & 0x3f; + /* + * The cbitpos value will be placed in bit positions 5:0 of the EBX + * register of CPUID 0x8000001F. No need to verify the range as the + * comparison against the host value accomplishes that. + */ if (host_cbitpos != sev->cbitpos) { error_setg(errp, "%s: cbitpos check failed, host '%d' requested '%d'", __func__, host_cbitpos, sev->cbitpos); goto err; } - if (sev->reduced_phys_bits < 1) { - error_setg(errp, "%s: reduced_phys_bits check failed, it should be >=1," - " requested '%d'", __func__, sev->reduced_phys_bits); + /* + * The reduced-phys-bits value will be placed in bit positions 11:6 of + * the EBX register of CPUID 0x8000001F, so verify the supplied value + * is in the range of 1 to 63. + */ + if (sev->reduced_phys_bits < 1 || sev->reduced_phys_bits > 63) { + error_setg(errp, "%s: reduced_phys_bits check failed," + " it should be in the range of 1 to 63, requested '%d'", + __func__, sev->reduced_phys_bits); goto err; } diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc index d5fd8d965c..46afd9960b 100644 --- a/target/i386/tcg/decode-new.c.inc +++ b/target/i386/tcg/decode-new.c.inc @@ -237,7 +237,7 @@ static void decode_group14(DisasContext *s, CPUX86State *env, X86OpEntry *entry, static void decode_0F6F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) { static const X86OpEntry opcodes_0F6F[4] = { - X86_OP_ENTRY3(MOVDQ, P,q, None,None, Q,q, vex1 mmx), /* movq */ + X86_OP_ENTRY3(MOVDQ, P,q, None,None, Q,q, vex5 mmx), /* movq */ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex1), /* movdqa */ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* movdqu */ {}, @@ -274,9 +274,9 @@ static void decode_0F78(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui { static const X86OpEntry opcodes_0F78[4] = { {}, - X86_OP_ENTRY3(EXTRQ_i, V,x, None,None, I,w, cpuid(SSE4A)), + X86_OP_ENTRY3(EXTRQ_i, V,x, None,None, I,w, cpuid(SSE4A)), /* AMD extension */ {}, - X86_OP_ENTRY3(INSERTQ_i, V,x, U,x, I,w, cpuid(SSE4A)), + X86_OP_ENTRY3(INSERTQ_i, V,x, U,x, I,w, cpuid(SSE4A)), /* AMD extension */ }; *entry = *decode_by_prefix(s, opcodes_0F78); } @@ -284,9 +284,9 @@ static void decode_0F78(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui static void decode_0F79(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) { if (s->prefix & PREFIX_REPNZ) { - entry->gen = gen_INSERTQ_r; + entry->gen = gen_INSERTQ_r; /* AMD extension */ } else if (s->prefix & PREFIX_DATA) { - entry->gen = gen_EXTRQ_r; + entry->gen = gen_EXTRQ_r; /* AMD extension */ } else { entry->gen = NULL; }; @@ -306,7 +306,7 @@ static void decode_0F7E(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui static void decode_0F7F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) { static const X86OpEntry opcodes_0F7F[4] = { - X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1 mmx), /* movq */ + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex5 mmx), /* movq */ X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1), /* movdqa */ X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4_unal), /* movdqu */ {}, @@ -639,15 +639,15 @@ static void decode_0F10(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui static const X86OpEntry opcodes_0F10_reg[4] = { X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPS */ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPD */ - X86_OP_ENTRY3(VMOVSS, V,x, H,x, W,x, vex4), - X86_OP_ENTRY3(VMOVLPx, V,x, H,x, W,x, vex4), /* MOVSD */ + X86_OP_ENTRY3(VMOVSS, V,x, H,x, W,x, vex5), + X86_OP_ENTRY3(VMOVLPx, V,x, H,x, W,x, vex5), /* MOVSD */ }; static const X86OpEntry opcodes_0F10_mem[4] = { X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPS */ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* MOVUPD */ - X86_OP_ENTRY3(VMOVSS_ld, V,x, H,x, M,ss, vex4), - X86_OP_ENTRY3(VMOVSD_ld, V,x, H,x, M,sd, vex4), + X86_OP_ENTRY3(VMOVSS_ld, V,x, H,x, M,ss, vex5), + X86_OP_ENTRY3(VMOVSD_ld, V,x, H,x, M,sd, vex5), }; if ((get_modrm(s, env) >> 6) == 3) { @@ -660,17 +660,17 @@ static void decode_0F10(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui static void decode_0F11(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) { static const X86OpEntry opcodes_0F11_reg[4] = { - X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPS */ - X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPD */ - X86_OP_ENTRY3(VMOVSS, W,x, H,x, V,x, vex4), - X86_OP_ENTRY3(VMOVLPx, W,x, H,x, V,q, vex4), /* MOVSD */ + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVUPS */ + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVUPD */ + X86_OP_ENTRY3(VMOVSS, W,x, H,x, V,x, vex5), + X86_OP_ENTRY3(VMOVLPx, W,x, H,x, V,q, vex5), /* MOVSD */ }; static const X86OpEntry opcodes_0F11_mem[4] = { - X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPS */ - X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVPD */ - X86_OP_ENTRY3(VMOVSS_st, M,ss, None,None, V,x, vex4), - X86_OP_ENTRY3(VMOVLPx_st, M,sd, None,None, V,x, vex4), /* MOVSD */ + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVUPS */ + X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex4), /* MOVUPD */ + X86_OP_ENTRY3(VMOVSS_st, M,ss, None,None, V,x, vex5), + X86_OP_ENTRY3(VMOVLPx_st, M,sd, None,None, V,x, vex5), /* MOVSD */ }; if ((get_modrm(s, env) >> 6) == 3) { @@ -687,16 +687,16 @@ static void decode_0F12(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui * Use dq for operand for compatibility with gen_MOVSD and * to allow VEX128 only. */ - X86_OP_ENTRY3(VMOVLPx_ld, V,dq, H,dq, M,q, vex4), /* MOVLPS */ - X86_OP_ENTRY3(VMOVLPx_ld, V,dq, H,dq, M,q, vex4), /* MOVLPD */ + X86_OP_ENTRY3(VMOVLPx_ld, V,dq, H,dq, M,q, vex5), /* MOVLPS */ + X86_OP_ENTRY3(VMOVLPx_ld, V,dq, H,dq, M,q, vex5), /* MOVLPD */ X86_OP_ENTRY3(VMOVSLDUP, V,x, None,None, W,x, vex4 cpuid(SSE3)), - X86_OP_ENTRY3(VMOVDDUP, V,x, None,None, WM,q, vex4 cpuid(SSE3)), /* qq if VEX.256 */ + X86_OP_ENTRY3(VMOVDDUP, V,x, None,None, WM,q, vex5 cpuid(SSE3)), /* qq if VEX.256 */ }; static const X86OpEntry opcodes_0F12_reg[4] = { - X86_OP_ENTRY3(VMOVHLPS, V,dq, H,dq, U,dq, vex4), - X86_OP_ENTRY3(VMOVLPx, W,x, H,x, U,q, vex4), /* MOVLPD */ + X86_OP_ENTRY3(VMOVHLPS, V,dq, H,dq, U,dq, vex7), + X86_OP_ENTRY3(VMOVLPx, W,x, H,x, U,q, vex5), /* MOVLPD */ X86_OP_ENTRY3(VMOVSLDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)), - X86_OP_ENTRY3(VMOVDDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)), + X86_OP_ENTRY3(VMOVDDUP, V,x, None,None, U,x, vex5 cpuid(SSE3)), }; if ((get_modrm(s, env) >> 6) == 3) { @@ -716,15 +716,15 @@ static void decode_0F16(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui * Operand 1 technically only reads the low 64 bits, but uses dq so that * it is easier to check for op0 == op1 in an endianness-neutral manner. */ - X86_OP_ENTRY3(VMOVHPx_ld, V,dq, H,dq, M,q, vex4), /* MOVHPS */ - X86_OP_ENTRY3(VMOVHPx_ld, V,dq, H,dq, M,q, vex4), /* MOVHPD */ + X86_OP_ENTRY3(VMOVHPx_ld, V,dq, H,dq, M,q, vex5), /* MOVHPS */ + X86_OP_ENTRY3(VMOVHPx_ld, V,dq, H,dq, M,q, vex5), /* MOVHPD */ X86_OP_ENTRY3(VMOVSHDUP, V,x, None,None, W,x, vex4 cpuid(SSE3)), {}, }; static const X86OpEntry opcodes_0F16_reg[4] = { /* Same as above, operand 1 could be Hq if it wasn't for big-endian. */ - X86_OP_ENTRY3(VMOVLHPS, V,dq, H,dq, U,q, vex4), - X86_OP_ENTRY3(VMOVHPx, V,x, H,x, U,x, vex4), /* MOVHPD */ + X86_OP_ENTRY3(VMOVLHPS, V,dq, H,dq, U,q, vex7), + X86_OP_ENTRY3(VMOVHPx, V,x, H,x, U,x, vex5), /* MOVHPD */ X86_OP_ENTRY3(VMOVSHDUP, V,x, None,None, U,x, vex4 cpuid(SSE3)), {}, }; @@ -750,8 +750,9 @@ static void decode_0F2A(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui static void decode_0F2B(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) { static const X86OpEntry opcodes_0F2B[4] = { - X86_OP_ENTRY3(MOVDQ, M,x, None,None, V,x, vex4), /* MOVNTPS */ - X86_OP_ENTRY3(MOVDQ, M,x, None,None, V,x, vex4), /* MOVNTPD */ + X86_OP_ENTRY3(MOVDQ, M,x, None,None, V,x, vex1), /* MOVNTPS */ + X86_OP_ENTRY3(MOVDQ, M,x, None,None, V,x, vex1), /* MOVNTPD */ + /* AMD extensions */ X86_OP_ENTRY3(VMOVSS_st, M,ss, None,None, V,x, vex4 cpuid(SSE4A)), /* MOVNTSS */ X86_OP_ENTRY3(VMOVLPx_st, M,sd, None,None, V,x, vex4 cpuid(SSE4A)), /* MOVNTSD */ }; @@ -783,6 +784,17 @@ static void decode_0F2D(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui *entry = *decode_by_prefix(s, opcodes_0F2D); } +static void decode_VxCOMISx(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + /* + * VUCOMISx and VCOMISx are different and use no-prefix and 0x66 for SS and SD + * respectively. Scalar values usually are associated with 0xF2 and 0xF3, for + * which X86_VEX_REPScalar exists, but here it has to be decoded by hand. + */ + entry->s1 = entry->s2 = (s->prefix & PREFIX_DATA ? X86_SIZE_sd : X86_SIZE_ss); + entry->gen = (*b == 0x2E ? gen_VUCOMI : gen_VCOMI); +} + static void decode_sse_unary(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) { if (!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))) { @@ -813,7 +825,7 @@ static void decode_0FE6(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui static const X86OpEntry opcodes_0FE6[4] = { {}, X86_OP_ENTRY2(VCVTTPD2DQ, V,x, W,x, vex2), - X86_OP_ENTRY2(VCVTDQ2PD, V,x, W,x, vex2), + X86_OP_ENTRY2(VCVTDQ2PD, V,x, W,x, vex5), X86_OP_ENTRY2(VCVTPD2DQ, V,x, W,x, vex2), }; *entry = *decode_by_prefix(s, opcodes_0FE6); @@ -831,17 +843,17 @@ static const X86OpEntry opcodes_0F[256] = { [0x10] = X86_OP_GROUP0(0F10), [0x11] = X86_OP_GROUP0(0F11), [0x12] = X86_OP_GROUP0(0F12), - [0x13] = X86_OP_ENTRY3(VMOVLPx_st, M,q, None,None, V,q, vex4 p_00_66), + [0x13] = X86_OP_ENTRY3(VMOVLPx_st, M,q, None,None, V,q, vex5 p_00_66), [0x14] = X86_OP_ENTRY3(VUNPCKLPx, V,x, H,x, W,x, vex4 p_00_66), [0x15] = X86_OP_ENTRY3(VUNPCKHPx, V,x, H,x, W,x, vex4 p_00_66), [0x16] = X86_OP_GROUP0(0F16), /* Incorrectly listed as Mq,Vq in the manual */ - [0x17] = X86_OP_ENTRY3(VMOVHPx_st, M,q, None,None, V,dq, vex4 p_00_66), + [0x17] = X86_OP_ENTRY3(VMOVHPx_st, M,q, None,None, V,dq, vex5 p_00_66), [0x50] = X86_OP_ENTRY3(MOVMSK, G,y, None,None, U,x, vex7 p_00_66), - [0x51] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), - [0x52] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex4_rep5 p_00_f3), - [0x53] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex4_rep5 p_00_f3), + [0x51] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), /* sqrtps */ + [0x52] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex4_rep5 p_00_f3), /* rsqrtps */ + [0x53] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex4_rep5 p_00_f3), /* rcpps */ [0x54] = X86_OP_ENTRY3(PAND, V,x, H,x, W,x, vex4 p_00_66), /* vand */ [0x55] = X86_OP_ENTRY3(PANDN, V,x, H,x, W,x, vex4 p_00_66), /* vandn */ [0x56] = X86_OP_ENTRY3(POR, V,x, H,x, W,x, vex4 p_00_66), /* vor */ @@ -871,15 +883,15 @@ static const X86OpEntry opcodes_0F[256] = { [0x2B] = X86_OP_GROUP0(0F2B), [0x2C] = X86_OP_GROUP0(0F2C), [0x2D] = X86_OP_GROUP0(0F2D), - [0x2E] = X86_OP_ENTRY3(VUCOMI, None,None, V,x, W,x, vex4 p_00_66), - [0x2F] = X86_OP_ENTRY3(VCOMI, None,None, V,x, W,x, vex4 p_00_66), + [0x2E] = X86_OP_GROUP3(VxCOMISx, None,None, V,x, W,x, vex3 p_00_66), /* VUCOMISS/SD */ + [0x2F] = X86_OP_GROUP3(VxCOMISx, None,None, V,x, W,x, vex3 p_00_66), /* VCOMISS/SD */ [0x38] = X86_OP_GROUP0(0F38), [0x3a] = X86_OP_GROUP0(0F3A), [0x58] = X86_OP_ENTRY3(VADD, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), [0x59] = X86_OP_ENTRY3(VMUL, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), - [0x5a] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), + [0x5a] = X86_OP_GROUP3(sse_unary, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), /* CVTPS2PD */ [0x5b] = X86_OP_GROUP0(0F5B), [0x5c] = X86_OP_ENTRY3(VSUB, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), [0x5d] = X86_OP_ENTRY3(VMIN, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2), @@ -1567,20 +1579,6 @@ illegal: return false; } -static void decode_temp_free(X86DecodedOp *op) -{ - if (op->v_ptr) { - tcg_temp_free_ptr(op->v_ptr); - } -} - -static void decode_temps_free(X86DecodedInsn *decode) -{ - decode_temp_free(&decode->op[0]); - decode_temp_free(&decode->op[1]); - decode_temp_free(&decode->op[2]); -} - /* * Convert one instruction. s->base.is_jmp is set if the translation must * be stopped. @@ -1835,7 +1833,6 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) decode.e.gen(s, env, &decode); gen_writeback(s, &decode, 0, s->T0); } - decode_temps_free(&decode); return; illegal_op: gen_illegal_opcode(s); diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc index e61ae9a2e9..4fe8dec427 100644 --- a/target/i386/tcg/emit.c.inc +++ b/target/i386/tcg/emit.c.inc @@ -629,7 +629,6 @@ static inline void gen_ternary_sse(DisasContext *s, CPUX86State *env, X86Decoded /* The format of the fourth input is Lx */ tcg_gen_addi_ptr(ptr3, cpu_env, ZMM_OFFSET(op3)); fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, ptr3); - tcg_temp_free_ptr(ptr3); } #define TERNARY_SSE(uname, uvname, lname) \ static void gen_##uvname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ @@ -1001,7 +1000,6 @@ static inline void gen_vsib_avx(DisasContext *s, CPUX86State *env, X86DecodedIns int ymmh_ofs = vector_elem_offset(&decode->op[1], MO_128, 1); tcg_gen_gvec_dup_imm(MO_64, ymmh_ofs, 16, 16, 0); } - tcg_temp_free_ptr(index); } #define VSIB_AVX(uname, lname) \ static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ @@ -1147,20 +1145,20 @@ static void gen_BLSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) static void gen_BZHI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; - TCGv bound; + TCGv bound = tcg_constant_tl(ot == MO_64 ? 63 : 31); + TCGv zero = tcg_constant_tl(0); + TCGv mone = tcg_constant_tl(-1); - tcg_gen_ext8u_tl(s->T1, cpu_regs[s->vex_v]); - bound = tcg_constant_tl(ot == MO_64 ? 63 : 31); + tcg_gen_ext8u_tl(s->T1, s->T1); /* * Note that since we're using BMILG (in order to get O * cleared) we need to store the inverse into C. */ - tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src, s->T1, bound); - tcg_gen_movcond_tl(TCG_COND_GT, s->T1, s->T1, bound, bound, s->T1); + tcg_gen_setcond_tl(TCG_COND_LEU, cpu_cc_src, s->T1, bound); - tcg_gen_movi_tl(s->A0, -1); - tcg_gen_shl_tl(s->A0, s->A0, s->T1); + tcg_gen_shl_tl(s->A0, mone, s->T1); + tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->T1, bound, s->A0, zero); tcg_gen_andc_tl(s->T0, s->T0, s->A0); gen_op_update1_cc(s); @@ -1627,7 +1625,6 @@ static void gen_PMOVMSKB(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco tcg_gen_deposit_tl(s->T0, t, s->T0, 8, TARGET_LONG_BITS - 8); } } - tcg_temp_free(t); } static void gen_PSHUFW(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1762,7 +1759,6 @@ static void gen_PSRLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco } else { gen_helper_psrldq_xmm(cpu_env, OP_PTR0, OP_PTR1, imm_vec); } - tcg_temp_free_ptr(imm_vec); } static void gen_PSLLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1775,7 +1771,6 @@ static void gen_PSLLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco } else { gen_helper_pslldq_xmm(cpu_env, OP_PTR0, OP_PTR1, imm_vec); } - tcg_temp_free_ptr(imm_vec); } static void gen_RORX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -2290,10 +2285,9 @@ static void gen_VZEROALL(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco { TCGv_ptr ptr = tcg_temp_new_ptr(); - tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_t0)); + tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_regs)); gen_helper_memset(ptr, ptr, tcg_constant_i32(0), tcg_constant_ptr(CPU_NB_REGS * sizeof(ZMMReg))); - tcg_temp_free_ptr(ptr); } static void gen_VZEROUPPER(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c index 55bd1194d3..b5f0abffa3 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -64,7 +64,7 @@ static bool ptw_translate(PTETranslate *inout, hwaddr addr) int flags; inout->gaddr = addr; - flags = probe_access_full(inout->env, addr, MMU_DATA_STORE, + flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE, inout->ptw_idx, true, &inout->haddr, &full, 0); if (unlikely(flags & TLB_INVALID_MASK)) { @@ -147,6 +147,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, hwaddr pte_addr, paddr; uint32_t pkr; int page_size; + int error_code; restart_all: rsvd_mask = ~MAKE_64BIT_MASK(0, env_archcpu(env)->phys_bits); @@ -428,7 +429,7 @@ do_check_protect_pse36: CPUTLBEntryFull *full; int flags, nested_page_size; - flags = probe_access_full(env, paddr, access_type, + flags = probe_access_full(env, paddr, 0, access_type, MMU_NESTED_IDX, true, &pte_trans.haddr, &full, 0); if (unlikely(flags & TLB_INVALID_MASK)) { @@ -467,7 +468,6 @@ do_check_protect_pse36: out->page_size = page_size; return true; - int error_code; do_fault_rsvd: error_code = PG_ERROR_RSVD_MASK; goto do_fault_cont; diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index 79ac5908f7..b942c306d6 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -49,10 +49,10 @@ static void x86_cpu_exec_exit(CPUState *cs) static void x86_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { - /* The instruction pointer is always up to date with TARGET_TB_PCREL. */ - if (!TARGET_TB_PCREL) { + /* The instruction pointer is always up to date with CF_PCREL. */ + if (!(tb_cflags(tb) & CF_PCREL)) { CPUX86State *env = cs->env_ptr; - env->eip = tb_pc(tb) - tb->cs_base; + env->eip = tb->pc - tb->cs_base; } } @@ -64,7 +64,7 @@ static void x86_restore_state_to_opc(CPUState *cs, CPUX86State *env = &cpu->env; int cc_op = data[1]; - if (TARGET_TB_PCREL) { + if (tb_cflags(tb) & CF_PCREL) { env->eip = (env->eip & TARGET_PAGE_MASK) | data[0]; } else { env->eip = data[0] - tb->cs_base; diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index 71eeab4d50..a8bcaf8e7e 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -482,7 +482,7 @@ static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, break; #endif default: - tcg_abort(); + g_assert_not_reached(); } return cpu_regs[reg]; } @@ -551,7 +551,7 @@ static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) static void gen_update_eip_cur(DisasContext *s) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save); } else { tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base); @@ -562,7 +562,7 @@ static void gen_update_eip_cur(DisasContext *s) static void gen_update_eip_next(DisasContext *s) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save); } else { tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base); @@ -594,7 +594,7 @@ static TCGv_i32 eip_next_i32(DisasContext *s) if (CODE64(s)) { return tcg_constant_i32(-1); } - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { TCGv_i32 ret = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(ret, cpu_eip); tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save); @@ -607,7 +607,7 @@ static TCGv_i32 eip_next_i32(DisasContext *s) static TCGv eip_next_tl(DisasContext *s) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { TCGv ret = tcg_temp_new(); tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save); return ret; @@ -619,7 +619,7 @@ static TCGv eip_next_tl(DisasContext *s) static TCGv eip_cur_tl(DisasContext *s) { assert(s->pc_save != -1); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { TCGv ret = tcg_temp_new(); tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save); return ret; @@ -666,7 +666,7 @@ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, } break; default: - tcg_abort(); + g_assert_not_reached(); } if (ovr_seg >= 0) { @@ -771,7 +771,7 @@ static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n) gen_helper_inl(v, cpu_env, n); break; default: - tcg_abort(); + g_assert_not_reached(); } } @@ -788,7 +788,7 @@ static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n) gen_helper_outl(cpu_env, v, n); break; default: - tcg_abort(); + g_assert_not_reached(); } } @@ -890,7 +890,7 @@ static void gen_compute_eflags(DisasContext *s) live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); if (dead) { - zero = tcg_const_tl(0); + zero = tcg_constant_tl(0); if (dead & USES_CC_DST) { dst = zero; } @@ -905,10 +905,6 @@ static void gen_compute_eflags(DisasContext *s) gen_update_cc_op(s); gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op); set_cc_op(s, CC_OP_EFLAGS); - - if (dead) { - tcg_temp_free(zero); - } } typedef struct CCPrepare { @@ -1422,7 +1418,7 @@ static void gen_helper_fp_arith_ST0_FT0(int op) /* NOTE the exception in "r" op ordering */ static void gen_helper_fp_arith_STN_ST0(int op, int opreg) { - TCGv_i32 tmp = tcg_const_i32(opreg); + TCGv_i32 tmp = tcg_constant_i32(opreg); switch (op) { case 0: gen_helper_fadd_STN_ST0(cpu_env, tmp); @@ -1449,7 +1445,7 @@ static void gen_exception(DisasContext *s, int trapno) { gen_update_cc_op(s); gen_update_eip_cur(s); - gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno)); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(trapno)); s->base.is_jmp = DISAS_NORETURN; } @@ -1657,7 +1653,7 @@ static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, /* Store the results into the CC variables. If we know that the variable must be dead, store unconditionally. Otherwise we'll need to not disrupt the current contents. */ - z_tl = tcg_const_tl(0); + z_tl = tcg_constant_tl(0); if (cc_op_live[s->cc_op] & USES_CC_DST) { tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl, result, cpu_cc_dst); @@ -1670,7 +1666,6 @@ static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, } else { tcg_gen_mov_tl(cpu_cc_src, shm1); } - tcg_temp_free(z_tl); /* Get the two potential CC_OP values into temporaries. */ tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); @@ -1682,12 +1677,10 @@ static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, } /* Conditionally store the CC_OP value. */ - z32 = tcg_const_i32(0); + z32 = tcg_constant_i32(0); s32 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(s32, count); tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop); - tcg_temp_free_i32(z32); - tcg_temp_free_i32(s32); /* The CC_OP value is no longer predictable. */ set_cc_op(s, CC_OP_DYNAMIC); @@ -1840,17 +1833,15 @@ static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live. Otherwise reuse CC_OP_ADCOX which have the C and O flags split out exactly as we computed above. */ - t0 = tcg_const_i32(0); + t0 = tcg_constant_i32(0); t1 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(t1, s->T1); tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX); tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS); tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0, s->tmp2_i32, s->tmp3_i32); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); - /* The CC_OP value is no longer predictable. */ + /* The CC_OP value is no longer predictable. */ set_cc_op(s, CC_OP_DYNAMIC); } @@ -1943,7 +1934,7 @@ static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, gen_op_ld_v(s, ot, s->T0, s->A0); else gen_op_mov_v_reg(s, ot, s->T0, op1); - + if (is_right) { switch (ot) { case MO_8: @@ -1961,7 +1952,7 @@ static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, break; #endif default: - tcg_abort(); + g_assert_not_reached(); } } else { switch (ot) { @@ -1980,7 +1971,7 @@ static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, break; #endif default: - tcg_abort(); + g_assert_not_reached(); } } /* store */ @@ -2069,7 +2060,6 @@ static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1, gen_op_st_rm_T0_A0(s, ot, op1); gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right); - tcg_temp_free(count); } static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s) @@ -2312,7 +2302,7 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, break; default: - tcg_abort(); + g_assert_not_reached(); } done: @@ -2339,7 +2329,7 @@ static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib) ea = cpu_regs[a.base]; } if (!ea) { - if (TARGET_TB_PCREL && a.base == -2) { + if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) { /* With cpu_eip ~= pc_save, the expression is pc-relative. */ tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save); } else { @@ -2464,7 +2454,7 @@ static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) ret = x86_ldl_code(env, s); break; default: - tcg_abort(); + g_assert_not_reached(); } return ret; } @@ -2527,19 +2517,12 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, cc.reg = t0; } if (!cc.use_reg2) { - cc.reg2 = tcg_const_tl(cc.imm); + cc.reg2 = tcg_constant_tl(cc.imm); } tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2, s->T0, cpu_regs[reg]); gen_op_mov_reg_v(s, ot, reg, s->T0); - - if (cc.mask != -1) { - tcg_temp_free(cc.reg); - } - if (!cc.use_reg2) { - tcg_temp_free(cc.reg2); - } } static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg) @@ -2562,7 +2545,7 @@ static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg) { if (PE(s) && !VM86(s)) { tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32); + gen_helper_load_seg(cpu_env, tcg_constant_i32(seg_reg), s->tmp2_i32); /* abort translation because the addseg value may change or because ss32 may change. For R_SS, translation must always stop as a special handling must be done to disable hardware @@ -2768,7 +2751,6 @@ static void gen_set_hflag(DisasContext *s, uint32_t mask) tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags)); tcg_gen_ori_i32(t, t, mask); tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags)); - tcg_temp_free_i32(t); s->flags |= mask; } } @@ -2780,7 +2762,6 @@ static void gen_reset_hflag(DisasContext *s, uint32_t mask) tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags)); tcg_gen_andi_i32(t, t, ~mask); tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags)); - tcg_temp_free_i32(t); s->flags &= ~mask; } } @@ -2792,7 +2773,6 @@ static void gen_set_eflags(DisasContext *s, target_ulong mask) tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags)); tcg_gen_ori_tl(t, t, mask); tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags)); - tcg_temp_free(t); } static void gen_reset_eflags(DisasContext *s, target_ulong mask) @@ -2802,7 +2782,6 @@ static void gen_reset_eflags(DisasContext *s, target_ulong mask) tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags)); tcg_gen_andi_tl(t, t, ~mask); tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags)); - tcg_temp_free(t); } /* Clear BND registers during legacy branches. */ @@ -2887,7 +2866,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num) if (!CODE64(s)) { if (ot == MO_16) { mask = 0xffff; - if (TARGET_TB_PCREL && CODE32(s)) { + if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) { use_goto_tb = false; } } else { @@ -2899,7 +2878,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num) gen_update_cc_op(s); set_cc_op(s, CC_OP_DYNAMIC); - if (TARGET_TB_PCREL) { + if (tb_cflags(s->base.tb) & CF_PCREL) { tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save); /* * If we can prove the branch does not leave the page and we have @@ -2916,13 +2895,13 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num) translator_use_goto_tb(&s->base, new_eip + s->cs_base)) { /* jump to same page: we can use a direct jump */ tcg_gen_goto_tb(tb_num); - if (!TARGET_TB_PCREL) { + if (!(tb_cflags(s->base.tb) & CF_PCREL)) { tcg_gen_movi_tl(cpu_eip, new_eip); } tcg_gen_exit_tb(s->base.tb, tb_num); s->base.is_jmp = DISAS_NORETURN; } else { - if (!TARGET_TB_PCREL) { + if (!(tb_cflags(s->base.tb) & CF_PCREL)) { tcg_gen_movi_tl(cpu_eip, new_eip); } if (s->jmp_opt) { @@ -3035,13 +3014,11 @@ static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm) tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ); } - tcg_temp_free_i64(val); /* Set tmp0 to match the required value of Z. */ tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp); Z = tcg_temp_new(); tcg_gen_trunc_i64_tl(Z, cmp); - tcg_temp_free_i64(cmp); /* * Extract the result values for the register pair. @@ -3062,12 +3039,10 @@ static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm) tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero, s->T1, cpu_regs[R_EDX]); } - tcg_temp_free_i64(old); /* Update Z. */ gen_compute_eflags(s); tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1); - tcg_temp_free(Z); } #ifdef TARGET_X86_64 @@ -3092,8 +3067,6 @@ static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm) } tcg_gen_extr_i128_i64(s->T0, s->T1, val); - tcg_temp_free_i128(cmp); - tcg_temp_free_i128(val); /* Determine success after the fact. */ t0 = tcg_temp_new_i64(); @@ -3101,13 +3074,11 @@ static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm) tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]); tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]); tcg_gen_or_i64(t0, t0, t1); - tcg_temp_free_i64(t1); /* Update Z. */ gen_compute_eflags(s); tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0); tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1); - tcg_temp_free_i64(t0); /* * Extract the result values for the register pair. We may do this @@ -3446,13 +3417,10 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) { goto illegal_op; } - a0 = tcg_temp_local_new(); - t0 = tcg_temp_local_new(); + a0 = s->A0; + t0 = s->T0; label1 = gen_new_label(); - tcg_gen_mov_tl(a0, s->A0); - tcg_gen_mov_tl(t0, s->T0); - gen_set_label(label1); t1 = tcg_temp_new(); t2 = tcg_temp_new(); @@ -3460,13 +3428,9 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_neg_tl(t1, t0); tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1, s->mem_index, ot | MO_LE); - tcg_temp_free(t1); tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1); - tcg_temp_free(t2); - tcg_temp_free(a0); tcg_gen_neg_tl(s->T0, t0); - tcg_temp_free(t0); } else { tcg_gen_neg_tl(s->T0, s->T0); if (mod != 3) { @@ -3779,7 +3743,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); break; default: - tcg_abort(); + g_assert_not_reached(); } break; case 0x99: /* CDQ/CWD */ @@ -3804,7 +3768,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); break; default: - tcg_abort(); + g_assert_not_reached(); } break; case 0x1af: /* imul Gv, Ev */ @@ -3952,9 +3916,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_mov_tl(s->cc_srcT, cmpv); tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv); set_cc_op(s, CC_OP_SUBB + ot); - tcg_temp_free(oldv); - tcg_temp_free(newv); - tcg_temp_free(cmpv); } break; case 0x1c7: /* cmpxchg8b */ @@ -4403,9 +4364,8 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_v_reg(s, ot, s->T1, reg); if (shift) { - TCGv imm = tcg_const_tl(x86_ldub_code(env, s)); + TCGv imm = tcg_constant_tl(x86_ldub_code(env, s)); gen_shiftd_rm_T1(s, ot, opreg, op, imm); - tcg_temp_free(imm); } else { gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]); } @@ -4563,7 +4523,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) break; case 0x0c: /* fldenv mem */ gen_helper_fldenv(cpu_env, s->A0, - tcg_const_i32(dflag - 1)); + tcg_constant_i32(dflag - 1)); update_fip = update_fdp = false; break; case 0x0d: /* fldcw mem */ @@ -4574,7 +4534,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) break; case 0x0e: /* fnstenv mem */ gen_helper_fstenv(cpu_env, s->A0, - tcg_const_i32(dflag - 1)); + tcg_constant_i32(dflag - 1)); update_fip = update_fdp = false; break; case 0x0f: /* fnstcw mem */ @@ -4592,12 +4552,12 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) break; case 0x2c: /* frstor mem */ gen_helper_frstor(cpu_env, s->A0, - tcg_const_i32(dflag - 1)); + tcg_constant_i32(dflag - 1)); update_fip = update_fdp = false; break; case 0x2e: /* fnsave mem */ gen_helper_fsave(cpu_env, s->A0, - tcg_const_i32(dflag - 1)); + tcg_constant_i32(dflag - 1)); update_fip = update_fdp = false; break; case 0x2f: /* fnstsw mem */ @@ -4639,7 +4599,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_st_tl(last_addr, cpu_env, offsetof(CPUX86State, fpdp)); } - tcg_temp_free(last_addr); } else { /* register float ops */ opreg = rm; @@ -4648,12 +4607,12 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) case 0x08: /* fld sti */ gen_helper_fpush(cpu_env); gen_helper_fmov_ST0_STN(cpu_env, - tcg_const_i32((opreg + 1) & 7)); + tcg_constant_i32((opreg + 1) & 7)); break; case 0x09: /* fxchg sti */ case 0x29: /* fxchg4 sti, undocumented op */ case 0x39: /* fxchg7 sti, undocumented op */ - gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_fxchg_ST0_STN(cpu_env, tcg_constant_i32(opreg)); break; case 0x0a: /* grp d9/2 */ switch (rm) { @@ -4793,27 +4752,27 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) } } else { gen_helper_fmov_FT0_STN(cpu_env, - tcg_const_i32(opreg)); + tcg_constant_i32(opreg)); gen_helper_fp_arith_ST0_FT0(op1); } } break; case 0x02: /* fcom */ case 0x22: /* fcom2, undocumented op */ - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); gen_helper_fcom_ST0_FT0(cpu_env); break; case 0x03: /* fcomp */ case 0x23: /* fcomp3, undocumented op */ case 0x32: /* fcomp5, undocumented op */ - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); gen_helper_fcom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); break; case 0x15: /* da/5 */ switch (rm) { case 1: /* fucompp */ - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1)); gen_helper_fucom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); gen_helper_fpop(cpu_env); @@ -4847,7 +4806,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); gen_helper_fucomi_ST0_FT0(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; @@ -4856,36 +4815,36 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); gen_helper_fcomi_ST0_FT0(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x28: /* ffree sti */ - gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg)); break; case 0x2a: /* fst sti */ - gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg)); break; case 0x2b: /* fstp sti */ case 0x0b: /* fstp1 sti, undocumented op */ case 0x3a: /* fstp8 sti, undocumented op */ case 0x3b: /* fstp9 sti, undocumented op */ - gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg)); gen_helper_fpop(cpu_env); break; case 0x2c: /* fucom st(i) */ - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); gen_helper_fucom_ST0_FT0(cpu_env); break; case 0x2d: /* fucomp st(i) */ - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); gen_helper_fucom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); break; case 0x33: /* de/3 */ switch (rm) { case 1: /* fcompp */ - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1)); gen_helper_fcom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); gen_helper_fpop(cpu_env); @@ -4895,7 +4854,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) } break; case 0x38: /* ffreep sti, undocumented op */ - gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg)); gen_helper_fpop(cpu_env); break; case 0x3c: /* df/4 */ @@ -4914,7 +4873,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); gen_helper_fucomi_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); set_cc_op(s, CC_OP_EFLAGS); @@ -4924,7 +4883,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); gen_helper_fcomi_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); set_cc_op(s, CC_OP_EFLAGS); @@ -4947,7 +4906,8 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); l1 = gen_new_label(); gen_jcc1_noeob(s, op1, l1); - gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg)); + gen_helper_fmov_ST0_STN(cpu_env, + tcg_constant_i32(opreg)); gen_set_label(l1); } break; @@ -5153,8 +5113,8 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (PE(s) && !VM86(s)) { gen_update_cc_op(s); gen_update_eip_cur(s); - gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1), - tcg_const_i32(val)); + gen_helper_lret_protected(cpu_env, tcg_constant_i32(dflag - 1), + tcg_constant_i32(val)); } else { gen_stack_A0(s); /* pop offset */ @@ -5181,7 +5141,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (!check_vm86_iopl(s)) { break; } - gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); + gen_helper_iret_real(cpu_env, tcg_constant_i32(dflag - 1)); } else { gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1), eip_next_i32(s)); @@ -5287,52 +5247,19 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) case 0x9d: /* popf */ gen_svm_check_intercept(s, SVM_EXIT_POPF); if (check_vm86_iopl(s)) { - ot = gen_pop_T0(s); + int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK; + if (CPL(s) == 0) { - if (dflag != MO_16) { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | AC_MASK | - ID_MASK | NT_MASK | - IF_MASK | - IOPL_MASK))); - } else { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | AC_MASK | - ID_MASK | NT_MASK | - IF_MASK | IOPL_MASK) - & 0xffff)); - } - } else { - if (CPL(s) <= IOPL(s)) { - if (dflag != MO_16) { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | - AC_MASK | - ID_MASK | - NT_MASK | - IF_MASK))); - } else { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | - AC_MASK | - ID_MASK | - NT_MASK | - IF_MASK) - & 0xffff)); - } - } else { - if (dflag != MO_16) { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | AC_MASK | - ID_MASK | NT_MASK))); - } else { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | AC_MASK | - ID_MASK | NT_MASK) - & 0xffff)); - } - } + mask |= IF_MASK | IOPL_MASK; + } else if (CPL(s) <= IOPL(s)) { + mask |= IF_MASK; } + if (dflag == MO_16) { + mask &= 0xffff; + } + + ot = gen_pop_T0(s); + gen_helper_write_eflags(cpu_env, s->T0, tcg_constant_i32(mask)); gen_pop_update(s, ot); set_cc_op(s, CC_OP_EFLAGS); /* abort translation because TF/AC flag may change */ @@ -5603,7 +5530,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (val == 0) { gen_exception(s, EXCP00_DIVZ); } else { - gen_helper_aam(cpu_env, tcg_const_i32(val)); + gen_helper_aam(cpu_env, tcg_constant_i32(val)); set_cc_op(s, CC_OP_LOGICB); } break; @@ -5611,7 +5538,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (CODE64(s)) goto illegal_op; val = x86_ldub_code(env, s); - gen_helper_aad(cpu_env, tcg_const_i32(val)); + gen_helper_aad(cpu_env, tcg_constant_i32(val)); set_cc_op(s, CC_OP_LOGICB); break; /************************/ @@ -5792,7 +5719,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (!PE(s)) { gen_exception_gpf(s); } else { - gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1)); + gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1)); s->base.is_jmp = DISAS_EOB_ONLY; } break; @@ -5811,7 +5738,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (!PE(s)) { gen_exception_gpf(s); } else { - gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1)); + gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1)); /* condition codes are modified only in long mode */ if (LMA(s)) { set_cc_op(s, CC_OP_EFLAGS); @@ -6017,7 +5944,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) } gen_update_cc_op(s); gen_update_eip_cur(s); - gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1), + gen_helper_vmrun(cpu_env, tcg_constant_i32(s->aflag - 1), cur_insn_len_i32(s)); tcg_gen_exit_tb(NULL, 0); s->base.is_jmp = DISAS_NORETURN; @@ -6041,7 +5968,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) } gen_update_cc_op(s); gen_update_eip_cur(s); - gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1)); + gen_helper_vmload(cpu_env, tcg_constant_i32(s->aflag - 1)); break; case 0xdb: /* VMSAVE */ @@ -6053,7 +5980,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) } gen_update_cc_op(s); gen_update_eip_cur(s); - gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1)); + gen_helper_vmsave(cpu_env, tcg_constant_i32(s->aflag - 1)); break; case 0xdc: /* STGI */ @@ -6268,13 +6195,13 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) #endif { TCGLabel *label1; - TCGv t0, t1, t2, a0; + TCGv t0, t1, t2; if (!PE(s) || VM86(s)) goto illegal_op; - t0 = tcg_temp_local_new(); - t1 = tcg_temp_local_new(); - t2 = tcg_temp_local_new(); + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + t2 = tcg_temp_new(); ot = MO_16; modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; @@ -6283,11 +6210,8 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, t0, s->A0); - a0 = tcg_temp_local_new(); - tcg_gen_mov_tl(a0, s->A0); } else { gen_op_mov_v_reg(s, ot, t0, rm); - a0 = NULL; } gen_op_mov_v_reg(s, ot, t1, reg); tcg_gen_andi_tl(s->tmp0, t0, 3); @@ -6300,17 +6224,13 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_movi_tl(t2, CC_Z); gen_set_label(label1); if (mod != 3) { - gen_op_st_v(s, ot, t0, a0); - tcg_temp_free(a0); + gen_op_st_v(s, ot, t0, s->A0); } else { gen_op_mov_reg_v(s, ot, rm, t0); } gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } break; case 0x102: /* lar */ @@ -6324,7 +6244,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | REX_R(s); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - t0 = tcg_temp_local_new(); + t0 = tcg_temp_new(); gen_update_cc_op(s); if (b == 0x102) { gen_helper_lar(t0, cpu_env, s->T0); @@ -6337,7 +6257,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(s, ot, reg, t0); gen_set_label(label1); set_cc_op(s, CC_OP_EFLAGS); - tcg_temp_free(t0); } break; case 0x118: @@ -6382,7 +6301,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) TCGv_i64 notu = tcg_temp_new_i64(); tcg_gen_not_i64(notu, cpu_bndu[reg]); gen_bndck(env, s, modrm, TCG_COND_GTU, notu); - tcg_temp_free_i64(notu); } else if (prefixes & PREFIX_DATA) { /* bndmov -- from reg/mem */ if (reg >= 4 || s->aflag == MO_16) { @@ -7072,7 +6990,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->tmp2_i32 = tcg_temp_new_i32(); dc->tmp3_i32 = tcg_temp_new_i32(); dc->tmp4 = tcg_temp_new(); - dc->cc_srcT = tcg_temp_local_new(); + dc->cc_srcT = tcg_temp_new(); } static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu) @@ -7085,7 +7003,7 @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) target_ulong pc_arg = dc->base.pc_next; dc->prev_insn_end = tcg_last_op(); - if (TARGET_TB_PCREL) { + if (tb_cflags(dcbase->tb) & CF_PCREL) { pc_arg -= dc->cs_base; pc_arg &= ~TARGET_PAGE_MASK; } @@ -7178,7 +7096,7 @@ static const TranslatorOps i386_tr_ops = { }; /* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index e738d83e81..52af81683c 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -12,14 +12,14 @@ #include "cpu.h" #include "exec/address-spaces.h" #include "exec/ioport.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/accel.h" #include "sysemu/whpx.h" #include "sysemu/cpus.h" #include "sysemu/runstate.h" #include "qemu/main-loop.h" #include "hw/boards.h" -#include "hw/i386/ioapic.h" +#include "hw/intc/ioapic.h" #include "hw/i386/apic_internal.h" #include "qemu/error-report.h" #include "qapi/error.h" diff --git a/target/i386/whpx/whpx-apic.c b/target/i386/whpx/whpx-apic.c index c15df35ad6..8710e37567 100644 --- a/target/i386/whpx/whpx-apic.c +++ b/target/i386/whpx/whpx-apic.c @@ -11,6 +11,7 @@ * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include "cpu.h" #include "hw/i386/apic_internal.h" #include "hw/i386/apic-msidef.h" diff --git a/target/loongarch/cpu-param.h b/target/loongarch/cpu-param.h index 414d8fff46..1265dc7cb5 100644 --- a/target/loongarch/cpu-param.h +++ b/target/loongarch/cpu-param.h @@ -13,6 +13,5 @@ #define TARGET_VIRT_ADDR_SPACE_BITS 48 #define TARGET_PAGE_BITS 14 -#define NB_MMU_MODES 5 #endif diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index 290ab4d526..ad93ecac92 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -12,12 +12,12 @@ #include "qemu/module.h" #include "sysemu/qtest.h" #include "exec/exec-all.h" -#include "qapi/qapi-commands-machine-target.h" #include "cpu.h" #include "internals.h" #include "fpu/softfloat-helpers.h" #include "cpu-csr.h" #include "sysemu/reset.h" +#include "tcg/tcg.h" const char * const regnames[32] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -52,6 +52,7 @@ static const char * const excp_names[] = { [EXCCODE_FPE] = "Floating Point Exception", [EXCCODE_DBP] = "Debug breakpoint", [EXCCODE_BCE] = "Bound Check Exception", + [EXCCODE_SXD] = "128 bit vector instructions Disable exception", }; const char *loongarch_exception_name(int32_t exception) @@ -186,9 +187,10 @@ static void loongarch_cpu_do_interrupt(CPUState *cs) case EXCCODE_IPE: case EXCCODE_FPD: case EXCCODE_FPE: - case EXCCODE_BCE: + case EXCCODE_SXD: env->CSR_BADV = env->pc; QEMU_FALLTHROUGH; + case EXCCODE_BCE: case EXCCODE_ADEM: case EXCCODE_PIL: case EXCCODE_PIS: @@ -321,7 +323,8 @@ static void loongarch_cpu_synchronize_from_tb(CPUState *cs, LoongArchCPU *cpu = LOONGARCH_CPU(cs); CPULoongArchState *env = &cpu->env; - env->pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + env->pc = tb->pc; } static void loongarch_restore_state_to_opc(CPUState *cs, @@ -385,6 +388,7 @@ static void loongarch_la464_initfn(Object *obj) data = FIELD_DP32(data, CPUCFG2, FP_SP, 1); data = FIELD_DP32(data, CPUCFG2, FP_DP, 1); data = FIELD_DP32(data, CPUCFG2, FP_VER, 1); + data = FIELD_DP32(data, CPUCFG2, LSX, 1), data = FIELD_DP32(data, CPUCFG2, LLFTP, 1); data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1); data = FIELD_DP32(data, CPUCFG2, LAM, 1); @@ -545,6 +549,8 @@ static void loongarch_qemu_write(void *opaque, hwaddr addr, static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) { switch (addr) { + case VERSION_REG: + return 0x11ULL; case FEATURE_REG: return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | 1ULL << IOCSRF_CSRIPI; @@ -599,7 +605,7 @@ static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(cpu_model); if (!oc) { - g_autofree char *typename + g_autofree char *typename = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); if (!oc) { @@ -653,7 +659,7 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) /* fpr */ if (flags & CPU_DUMP_FPU) { for (i = 0; i < 32; i++) { - qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i]); + qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i].vreg.D(0)); if ((i & 3) == 3) { qemu_fprintf(f, "\n"); } @@ -748,29 +754,3 @@ static const TypeInfo loongarch_cpu_type_infos[] = { }; DEFINE_TYPES(loongarch_cpu_type_infos) - -static void loongarch_cpu_add_definition(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CpuDefinitionInfoList **cpu_list = user_data; - CpuDefinitionInfo *info = g_new0(CpuDefinitionInfo, 1); - const char *typename = object_class_get_name(oc); - - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_LOONGARCH_CPU)); - info->q_typename = g_strdup(typename); - - QAPI_LIST_PREPEND(*cpu_list, info); -} - -CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) -{ - CpuDefinitionInfoList *cpu_list = NULL; - GSList *list; - - list = object_class_get_list(TYPE_LOONGARCH_CPU, false); - g_slist_foreach(list, loongarch_cpu_add_definition, &cpu_list); - g_slist_free(list); - - return cpu_list; -} diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h index e35cf65597..1f37e36b7c 100644 --- a/target/loongarch/cpu.h +++ b/target/loongarch/cpu.h @@ -8,12 +8,14 @@ #ifndef LOONGARCH_CPU_H #define LOONGARCH_CPU_H +#include "qemu/int128.h" #include "exec/cpu-defs.h" #include "fpu/softfloat-types.h" #include "hw/registerfields.h" #include "qemu/timer.h" +#ifndef CONFIG_USER_ONLY #include "exec/memory.h" -#include "hw/sysbus.h" +#endif #include "cpu-csr.h" #define IOCSRF_TEMP 0 @@ -27,6 +29,7 @@ #define IOCSRF_GMOD 9 #define IOCSRF_VM 11 +#define VERSION_REG 0x0 #define FEATURE_REG 0x8 #define VENDOR_REG 0x10 #define CPUNAME_REG 0x20 @@ -52,6 +55,10 @@ FIELD(FCSR0, CAUSE, 24, 5) do { \ (REG) = FIELD_DP32(REG, FCSR0, CAUSE, V); \ } while (0) +#define UPDATE_FP_CAUSE(REG, V) \ + do { \ + (REG) |= FIELD_DP32(0, FCSR0, CAUSE, V); \ + } while (0) #define GET_FP_ENABLES(REG) FIELD_EX32(REG, FCSR0, ENABLES) #define SET_FP_ENABLES(REG, V) \ @@ -239,6 +246,24 @@ FIELD(TLB_MISC, ASID, 1, 10) FIELD(TLB_MISC, VPPN, 13, 35) FIELD(TLB_MISC, PS, 48, 6) +#define LSX_LEN (128) +typedef union VReg { + int8_t B[LSX_LEN / 8]; + int16_t H[LSX_LEN / 16]; + int32_t W[LSX_LEN / 32]; + int64_t D[LSX_LEN / 64]; + uint8_t UB[LSX_LEN / 8]; + uint16_t UH[LSX_LEN / 16]; + uint32_t UW[LSX_LEN / 32]; + uint64_t UD[LSX_LEN / 64]; + Int128 Q[LSX_LEN / 128]; +}VReg; + +typedef union fpr_t fpr_t; +union fpr_t { + VReg vreg; +}; + struct LoongArchTLB { uint64_t tlb_misc; /* Fields corresponding to CSR_TLBELO0/1 */ @@ -251,7 +276,7 @@ typedef struct CPUArchState { uint64_t gpr[32]; uint64_t pc; - uint64_t fpr[32]; + fpr_t fpr[32]; float_status fp_status; bool cf[8]; @@ -398,6 +423,7 @@ static inline int cpu_mmu_index(CPULoongArchState *env, bool ifetch) #define HW_FLAGS_PLV_MASK R_CSR_CRMD_PLV_MASK /* 0x03 */ #define HW_FLAGS_CRMD_PG R_CSR_CRMD_PG_MASK /* 0x10 */ #define HW_FLAGS_EUEN_FPE 0x04 +#define HW_FLAGS_EUEN_SXE 0x08 static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, target_ulong *pc, @@ -408,6 +434,7 @@ static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, *cs_base = 0; *flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK); *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE; + *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE; } void loongarch_cpu_list(void); diff --git a/target/loongarch/csr_helper.c b/target/loongarch/csr_helper.c index 7e02787895..6526367946 100644 --- a/target/loongarch/csr_helper.c +++ b/target/loongarch/csr_helper.c @@ -15,7 +15,6 @@ #include "exec/cpu_ldst.h" #include "hw/irq.h" #include "cpu-csr.h" -#include "tcg/tcg-ldst.h" target_ulong helper_csrrd_pgd(CPULoongArchState *env) { diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c index 2e93e77e0d..5c402d944d 100644 --- a/target/loongarch/disas.c +++ b/target/loongarch/disas.c @@ -21,11 +21,21 @@ static inline int plus_1(DisasContext *ctx, int x) return x + 1; } +static inline int shl_1(DisasContext *ctx, int x) +{ + return x << 1; +} + static inline int shl_2(DisasContext *ctx, int x) { return x << 2; } +static inline int shl_3(DisasContext *ctx, int x) +{ + return x << 3; +} + #define CSR_NAME(REG) \ [LOONGARCH_CSR_##REG] = (#REG) @@ -784,3 +794,904 @@ PCADD_INSN(pcaddi) PCADD_INSN(pcalau12i) PCADD_INSN(pcaddu12i) PCADD_INSN(pcaddu18i) + +#define INSN_LSX(insn, type) \ +static bool trans_##insn(DisasContext *ctx, arg_##type * a) \ +{ \ + output_##type(ctx, a, #insn); \ + return true; \ +} + +static void output_cv(DisasContext *ctx, arg_cv *a, + const char *mnemonic) +{ + output(ctx, mnemonic, "fcc%d, v%d", a->cd, a->vj); +} + +static void output_vvv(DisasContext *ctx, arg_vvv *a, const char *mnemonic) +{ + output(ctx, mnemonic, "v%d, v%d, v%d", a->vd, a->vj, a->vk); +} + +static void output_vv_i(DisasContext *ctx, arg_vv_i *a, const char *mnemonic) +{ + output(ctx, mnemonic, "v%d, v%d, 0x%x", a->vd, a->vj, a->imm); +} + +static void output_vv(DisasContext *ctx, arg_vv *a, const char *mnemonic) +{ + output(ctx, mnemonic, "v%d, v%d", a->vd, a->vj); +} + +static void output_vvvv(DisasContext *ctx, arg_vvvv *a, const char *mnemonic) +{ + output(ctx, mnemonic, "v%d, v%d, v%d, v%d", a->vd, a->vj, a->vk, a->va); +} + +static void output_vr_i(DisasContext *ctx, arg_vr_i *a, const char *mnemonic) +{ + output(ctx, mnemonic, "v%d, r%d, 0x%x", a->vd, a->rj, a->imm); +} + +static void output_vr_ii(DisasContext *ctx, arg_vr_ii *a, const char *mnemonic) +{ + output(ctx, mnemonic, "v%d, r%d, 0x%x, 0x%x", a->vd, a->rj, a->imm, a->imm2); +} + +static void output_rv_i(DisasContext *ctx, arg_rv_i *a, const char *mnemonic) +{ + output(ctx, mnemonic, "r%d, v%d, 0x%x", a->rd, a->vj, a->imm); +} + +static void output_vr(DisasContext *ctx, arg_vr *a, const char *mnemonic) +{ + output(ctx, mnemonic, "v%d, r%d", a->vd, a->rj); +} + +static void output_vvr(DisasContext *ctx, arg_vvr *a, const char *mnemonic) +{ + output(ctx, mnemonic, "v%d, v%d, r%d", a->vd, a->vj, a->rk); +} + +static void output_vrr(DisasContext *ctx, arg_vrr *a, const char *mnemonic) +{ + output(ctx, mnemonic, "v%d, r%d, r%d", a->vd, a->rj, a->rk); +} + +static void output_v_i(DisasContext *ctx, arg_v_i *a, const char *mnemonic) +{ + output(ctx, mnemonic, "v%d, 0x%x", a->vd, a->imm); +} + +INSN_LSX(vadd_b, vvv) +INSN_LSX(vadd_h, vvv) +INSN_LSX(vadd_w, vvv) +INSN_LSX(vadd_d, vvv) +INSN_LSX(vadd_q, vvv) +INSN_LSX(vsub_b, vvv) +INSN_LSX(vsub_h, vvv) +INSN_LSX(vsub_w, vvv) +INSN_LSX(vsub_d, vvv) +INSN_LSX(vsub_q, vvv) + +INSN_LSX(vaddi_bu, vv_i) +INSN_LSX(vaddi_hu, vv_i) +INSN_LSX(vaddi_wu, vv_i) +INSN_LSX(vaddi_du, vv_i) +INSN_LSX(vsubi_bu, vv_i) +INSN_LSX(vsubi_hu, vv_i) +INSN_LSX(vsubi_wu, vv_i) +INSN_LSX(vsubi_du, vv_i) + +INSN_LSX(vneg_b, vv) +INSN_LSX(vneg_h, vv) +INSN_LSX(vneg_w, vv) +INSN_LSX(vneg_d, vv) + +INSN_LSX(vsadd_b, vvv) +INSN_LSX(vsadd_h, vvv) +INSN_LSX(vsadd_w, vvv) +INSN_LSX(vsadd_d, vvv) +INSN_LSX(vsadd_bu, vvv) +INSN_LSX(vsadd_hu, vvv) +INSN_LSX(vsadd_wu, vvv) +INSN_LSX(vsadd_du, vvv) +INSN_LSX(vssub_b, vvv) +INSN_LSX(vssub_h, vvv) +INSN_LSX(vssub_w, vvv) +INSN_LSX(vssub_d, vvv) +INSN_LSX(vssub_bu, vvv) +INSN_LSX(vssub_hu, vvv) +INSN_LSX(vssub_wu, vvv) +INSN_LSX(vssub_du, vvv) + +INSN_LSX(vhaddw_h_b, vvv) +INSN_LSX(vhaddw_w_h, vvv) +INSN_LSX(vhaddw_d_w, vvv) +INSN_LSX(vhaddw_q_d, vvv) +INSN_LSX(vhaddw_hu_bu, vvv) +INSN_LSX(vhaddw_wu_hu, vvv) +INSN_LSX(vhaddw_du_wu, vvv) +INSN_LSX(vhaddw_qu_du, vvv) +INSN_LSX(vhsubw_h_b, vvv) +INSN_LSX(vhsubw_w_h, vvv) +INSN_LSX(vhsubw_d_w, vvv) +INSN_LSX(vhsubw_q_d, vvv) +INSN_LSX(vhsubw_hu_bu, vvv) +INSN_LSX(vhsubw_wu_hu, vvv) +INSN_LSX(vhsubw_du_wu, vvv) +INSN_LSX(vhsubw_qu_du, vvv) + +INSN_LSX(vaddwev_h_b, vvv) +INSN_LSX(vaddwev_w_h, vvv) +INSN_LSX(vaddwev_d_w, vvv) +INSN_LSX(vaddwev_q_d, vvv) +INSN_LSX(vaddwod_h_b, vvv) +INSN_LSX(vaddwod_w_h, vvv) +INSN_LSX(vaddwod_d_w, vvv) +INSN_LSX(vaddwod_q_d, vvv) +INSN_LSX(vsubwev_h_b, vvv) +INSN_LSX(vsubwev_w_h, vvv) +INSN_LSX(vsubwev_d_w, vvv) +INSN_LSX(vsubwev_q_d, vvv) +INSN_LSX(vsubwod_h_b, vvv) +INSN_LSX(vsubwod_w_h, vvv) +INSN_LSX(vsubwod_d_w, vvv) +INSN_LSX(vsubwod_q_d, vvv) + +INSN_LSX(vaddwev_h_bu, vvv) +INSN_LSX(vaddwev_w_hu, vvv) +INSN_LSX(vaddwev_d_wu, vvv) +INSN_LSX(vaddwev_q_du, vvv) +INSN_LSX(vaddwod_h_bu, vvv) +INSN_LSX(vaddwod_w_hu, vvv) +INSN_LSX(vaddwod_d_wu, vvv) +INSN_LSX(vaddwod_q_du, vvv) +INSN_LSX(vsubwev_h_bu, vvv) +INSN_LSX(vsubwev_w_hu, vvv) +INSN_LSX(vsubwev_d_wu, vvv) +INSN_LSX(vsubwev_q_du, vvv) +INSN_LSX(vsubwod_h_bu, vvv) +INSN_LSX(vsubwod_w_hu, vvv) +INSN_LSX(vsubwod_d_wu, vvv) +INSN_LSX(vsubwod_q_du, vvv) + +INSN_LSX(vaddwev_h_bu_b, vvv) +INSN_LSX(vaddwev_w_hu_h, vvv) +INSN_LSX(vaddwev_d_wu_w, vvv) +INSN_LSX(vaddwev_q_du_d, vvv) +INSN_LSX(vaddwod_h_bu_b, vvv) +INSN_LSX(vaddwod_w_hu_h, vvv) +INSN_LSX(vaddwod_d_wu_w, vvv) +INSN_LSX(vaddwod_q_du_d, vvv) + +INSN_LSX(vavg_b, vvv) +INSN_LSX(vavg_h, vvv) +INSN_LSX(vavg_w, vvv) +INSN_LSX(vavg_d, vvv) +INSN_LSX(vavg_bu, vvv) +INSN_LSX(vavg_hu, vvv) +INSN_LSX(vavg_wu, vvv) +INSN_LSX(vavg_du, vvv) +INSN_LSX(vavgr_b, vvv) +INSN_LSX(vavgr_h, vvv) +INSN_LSX(vavgr_w, vvv) +INSN_LSX(vavgr_d, vvv) +INSN_LSX(vavgr_bu, vvv) +INSN_LSX(vavgr_hu, vvv) +INSN_LSX(vavgr_wu, vvv) +INSN_LSX(vavgr_du, vvv) + +INSN_LSX(vabsd_b, vvv) +INSN_LSX(vabsd_h, vvv) +INSN_LSX(vabsd_w, vvv) +INSN_LSX(vabsd_d, vvv) +INSN_LSX(vabsd_bu, vvv) +INSN_LSX(vabsd_hu, vvv) +INSN_LSX(vabsd_wu, vvv) +INSN_LSX(vabsd_du, vvv) + +INSN_LSX(vadda_b, vvv) +INSN_LSX(vadda_h, vvv) +INSN_LSX(vadda_w, vvv) +INSN_LSX(vadda_d, vvv) + +INSN_LSX(vmax_b, vvv) +INSN_LSX(vmax_h, vvv) +INSN_LSX(vmax_w, vvv) +INSN_LSX(vmax_d, vvv) +INSN_LSX(vmin_b, vvv) +INSN_LSX(vmin_h, vvv) +INSN_LSX(vmin_w, vvv) +INSN_LSX(vmin_d, vvv) +INSN_LSX(vmax_bu, vvv) +INSN_LSX(vmax_hu, vvv) +INSN_LSX(vmax_wu, vvv) +INSN_LSX(vmax_du, vvv) +INSN_LSX(vmin_bu, vvv) +INSN_LSX(vmin_hu, vvv) +INSN_LSX(vmin_wu, vvv) +INSN_LSX(vmin_du, vvv) +INSN_LSX(vmaxi_b, vv_i) +INSN_LSX(vmaxi_h, vv_i) +INSN_LSX(vmaxi_w, vv_i) +INSN_LSX(vmaxi_d, vv_i) +INSN_LSX(vmini_b, vv_i) +INSN_LSX(vmini_h, vv_i) +INSN_LSX(vmini_w, vv_i) +INSN_LSX(vmini_d, vv_i) +INSN_LSX(vmaxi_bu, vv_i) +INSN_LSX(vmaxi_hu, vv_i) +INSN_LSX(vmaxi_wu, vv_i) +INSN_LSX(vmaxi_du, vv_i) +INSN_LSX(vmini_bu, vv_i) +INSN_LSX(vmini_hu, vv_i) +INSN_LSX(vmini_wu, vv_i) +INSN_LSX(vmini_du, vv_i) + +INSN_LSX(vmul_b, vvv) +INSN_LSX(vmul_h, vvv) +INSN_LSX(vmul_w, vvv) +INSN_LSX(vmul_d, vvv) +INSN_LSX(vmuh_b, vvv) +INSN_LSX(vmuh_h, vvv) +INSN_LSX(vmuh_w, vvv) +INSN_LSX(vmuh_d, vvv) +INSN_LSX(vmuh_bu, vvv) +INSN_LSX(vmuh_hu, vvv) +INSN_LSX(vmuh_wu, vvv) +INSN_LSX(vmuh_du, vvv) + +INSN_LSX(vmulwev_h_b, vvv) +INSN_LSX(vmulwev_w_h, vvv) +INSN_LSX(vmulwev_d_w, vvv) +INSN_LSX(vmulwev_q_d, vvv) +INSN_LSX(vmulwod_h_b, vvv) +INSN_LSX(vmulwod_w_h, vvv) +INSN_LSX(vmulwod_d_w, vvv) +INSN_LSX(vmulwod_q_d, vvv) +INSN_LSX(vmulwev_h_bu, vvv) +INSN_LSX(vmulwev_w_hu, vvv) +INSN_LSX(vmulwev_d_wu, vvv) +INSN_LSX(vmulwev_q_du, vvv) +INSN_LSX(vmulwod_h_bu, vvv) +INSN_LSX(vmulwod_w_hu, vvv) +INSN_LSX(vmulwod_d_wu, vvv) +INSN_LSX(vmulwod_q_du, vvv) +INSN_LSX(vmulwev_h_bu_b, vvv) +INSN_LSX(vmulwev_w_hu_h, vvv) +INSN_LSX(vmulwev_d_wu_w, vvv) +INSN_LSX(vmulwev_q_du_d, vvv) +INSN_LSX(vmulwod_h_bu_b, vvv) +INSN_LSX(vmulwod_w_hu_h, vvv) +INSN_LSX(vmulwod_d_wu_w, vvv) +INSN_LSX(vmulwod_q_du_d, vvv) + +INSN_LSX(vmadd_b, vvv) +INSN_LSX(vmadd_h, vvv) +INSN_LSX(vmadd_w, vvv) +INSN_LSX(vmadd_d, vvv) +INSN_LSX(vmsub_b, vvv) +INSN_LSX(vmsub_h, vvv) +INSN_LSX(vmsub_w, vvv) +INSN_LSX(vmsub_d, vvv) + +INSN_LSX(vmaddwev_h_b, vvv) +INSN_LSX(vmaddwev_w_h, vvv) +INSN_LSX(vmaddwev_d_w, vvv) +INSN_LSX(vmaddwev_q_d, vvv) +INSN_LSX(vmaddwod_h_b, vvv) +INSN_LSX(vmaddwod_w_h, vvv) +INSN_LSX(vmaddwod_d_w, vvv) +INSN_LSX(vmaddwod_q_d, vvv) +INSN_LSX(vmaddwev_h_bu, vvv) +INSN_LSX(vmaddwev_w_hu, vvv) +INSN_LSX(vmaddwev_d_wu, vvv) +INSN_LSX(vmaddwev_q_du, vvv) +INSN_LSX(vmaddwod_h_bu, vvv) +INSN_LSX(vmaddwod_w_hu, vvv) +INSN_LSX(vmaddwod_d_wu, vvv) +INSN_LSX(vmaddwod_q_du, vvv) +INSN_LSX(vmaddwev_h_bu_b, vvv) +INSN_LSX(vmaddwev_w_hu_h, vvv) +INSN_LSX(vmaddwev_d_wu_w, vvv) +INSN_LSX(vmaddwev_q_du_d, vvv) +INSN_LSX(vmaddwod_h_bu_b, vvv) +INSN_LSX(vmaddwod_w_hu_h, vvv) +INSN_LSX(vmaddwod_d_wu_w, vvv) +INSN_LSX(vmaddwod_q_du_d, vvv) + +INSN_LSX(vdiv_b, vvv) +INSN_LSX(vdiv_h, vvv) +INSN_LSX(vdiv_w, vvv) +INSN_LSX(vdiv_d, vvv) +INSN_LSX(vdiv_bu, vvv) +INSN_LSX(vdiv_hu, vvv) +INSN_LSX(vdiv_wu, vvv) +INSN_LSX(vdiv_du, vvv) +INSN_LSX(vmod_b, vvv) +INSN_LSX(vmod_h, vvv) +INSN_LSX(vmod_w, vvv) +INSN_LSX(vmod_d, vvv) +INSN_LSX(vmod_bu, vvv) +INSN_LSX(vmod_hu, vvv) +INSN_LSX(vmod_wu, vvv) +INSN_LSX(vmod_du, vvv) + +INSN_LSX(vsat_b, vv_i) +INSN_LSX(vsat_h, vv_i) +INSN_LSX(vsat_w, vv_i) +INSN_LSX(vsat_d, vv_i) +INSN_LSX(vsat_bu, vv_i) +INSN_LSX(vsat_hu, vv_i) +INSN_LSX(vsat_wu, vv_i) +INSN_LSX(vsat_du, vv_i) + +INSN_LSX(vexth_h_b, vv) +INSN_LSX(vexth_w_h, vv) +INSN_LSX(vexth_d_w, vv) +INSN_LSX(vexth_q_d, vv) +INSN_LSX(vexth_hu_bu, vv) +INSN_LSX(vexth_wu_hu, vv) +INSN_LSX(vexth_du_wu, vv) +INSN_LSX(vexth_qu_du, vv) + +INSN_LSX(vsigncov_b, vvv) +INSN_LSX(vsigncov_h, vvv) +INSN_LSX(vsigncov_w, vvv) +INSN_LSX(vsigncov_d, vvv) + +INSN_LSX(vmskltz_b, vv) +INSN_LSX(vmskltz_h, vv) +INSN_LSX(vmskltz_w, vv) +INSN_LSX(vmskltz_d, vv) +INSN_LSX(vmskgez_b, vv) +INSN_LSX(vmsknz_b, vv) + +INSN_LSX(vldi, v_i) + +INSN_LSX(vand_v, vvv) +INSN_LSX(vor_v, vvv) +INSN_LSX(vxor_v, vvv) +INSN_LSX(vnor_v, vvv) +INSN_LSX(vandn_v, vvv) +INSN_LSX(vorn_v, vvv) + +INSN_LSX(vandi_b, vv_i) +INSN_LSX(vori_b, vv_i) +INSN_LSX(vxori_b, vv_i) +INSN_LSX(vnori_b, vv_i) + +INSN_LSX(vsll_b, vvv) +INSN_LSX(vsll_h, vvv) +INSN_LSX(vsll_w, vvv) +INSN_LSX(vsll_d, vvv) +INSN_LSX(vslli_b, vv_i) +INSN_LSX(vslli_h, vv_i) +INSN_LSX(vslli_w, vv_i) +INSN_LSX(vslli_d, vv_i) + +INSN_LSX(vsrl_b, vvv) +INSN_LSX(vsrl_h, vvv) +INSN_LSX(vsrl_w, vvv) +INSN_LSX(vsrl_d, vvv) +INSN_LSX(vsrli_b, vv_i) +INSN_LSX(vsrli_h, vv_i) +INSN_LSX(vsrli_w, vv_i) +INSN_LSX(vsrli_d, vv_i) + +INSN_LSX(vsra_b, vvv) +INSN_LSX(vsra_h, vvv) +INSN_LSX(vsra_w, vvv) +INSN_LSX(vsra_d, vvv) +INSN_LSX(vsrai_b, vv_i) +INSN_LSX(vsrai_h, vv_i) +INSN_LSX(vsrai_w, vv_i) +INSN_LSX(vsrai_d, vv_i) + +INSN_LSX(vrotr_b, vvv) +INSN_LSX(vrotr_h, vvv) +INSN_LSX(vrotr_w, vvv) +INSN_LSX(vrotr_d, vvv) +INSN_LSX(vrotri_b, vv_i) +INSN_LSX(vrotri_h, vv_i) +INSN_LSX(vrotri_w, vv_i) +INSN_LSX(vrotri_d, vv_i) + +INSN_LSX(vsllwil_h_b, vv_i) +INSN_LSX(vsllwil_w_h, vv_i) +INSN_LSX(vsllwil_d_w, vv_i) +INSN_LSX(vextl_q_d, vv) +INSN_LSX(vsllwil_hu_bu, vv_i) +INSN_LSX(vsllwil_wu_hu, vv_i) +INSN_LSX(vsllwil_du_wu, vv_i) +INSN_LSX(vextl_qu_du, vv) + +INSN_LSX(vsrlr_b, vvv) +INSN_LSX(vsrlr_h, vvv) +INSN_LSX(vsrlr_w, vvv) +INSN_LSX(vsrlr_d, vvv) +INSN_LSX(vsrlri_b, vv_i) +INSN_LSX(vsrlri_h, vv_i) +INSN_LSX(vsrlri_w, vv_i) +INSN_LSX(vsrlri_d, vv_i) + +INSN_LSX(vsrar_b, vvv) +INSN_LSX(vsrar_h, vvv) +INSN_LSX(vsrar_w, vvv) +INSN_LSX(vsrar_d, vvv) +INSN_LSX(vsrari_b, vv_i) +INSN_LSX(vsrari_h, vv_i) +INSN_LSX(vsrari_w, vv_i) +INSN_LSX(vsrari_d, vv_i) + +INSN_LSX(vsrln_b_h, vvv) +INSN_LSX(vsrln_h_w, vvv) +INSN_LSX(vsrln_w_d, vvv) +INSN_LSX(vsran_b_h, vvv) +INSN_LSX(vsran_h_w, vvv) +INSN_LSX(vsran_w_d, vvv) + +INSN_LSX(vsrlni_b_h, vv_i) +INSN_LSX(vsrlni_h_w, vv_i) +INSN_LSX(vsrlni_w_d, vv_i) +INSN_LSX(vsrlni_d_q, vv_i) +INSN_LSX(vsrani_b_h, vv_i) +INSN_LSX(vsrani_h_w, vv_i) +INSN_LSX(vsrani_w_d, vv_i) +INSN_LSX(vsrani_d_q, vv_i) + +INSN_LSX(vsrlrn_b_h, vvv) +INSN_LSX(vsrlrn_h_w, vvv) +INSN_LSX(vsrlrn_w_d, vvv) +INSN_LSX(vsrarn_b_h, vvv) +INSN_LSX(vsrarn_h_w, vvv) +INSN_LSX(vsrarn_w_d, vvv) + +INSN_LSX(vsrlrni_b_h, vv_i) +INSN_LSX(vsrlrni_h_w, vv_i) +INSN_LSX(vsrlrni_w_d, vv_i) +INSN_LSX(vsrlrni_d_q, vv_i) +INSN_LSX(vsrarni_b_h, vv_i) +INSN_LSX(vsrarni_h_w, vv_i) +INSN_LSX(vsrarni_w_d, vv_i) +INSN_LSX(vsrarni_d_q, vv_i) + +INSN_LSX(vssrln_b_h, vvv) +INSN_LSX(vssrln_h_w, vvv) +INSN_LSX(vssrln_w_d, vvv) +INSN_LSX(vssran_b_h, vvv) +INSN_LSX(vssran_h_w, vvv) +INSN_LSX(vssran_w_d, vvv) +INSN_LSX(vssrln_bu_h, vvv) +INSN_LSX(vssrln_hu_w, vvv) +INSN_LSX(vssrln_wu_d, vvv) +INSN_LSX(vssran_bu_h, vvv) +INSN_LSX(vssran_hu_w, vvv) +INSN_LSX(vssran_wu_d, vvv) + +INSN_LSX(vssrlni_b_h, vv_i) +INSN_LSX(vssrlni_h_w, vv_i) +INSN_LSX(vssrlni_w_d, vv_i) +INSN_LSX(vssrlni_d_q, vv_i) +INSN_LSX(vssrani_b_h, vv_i) +INSN_LSX(vssrani_h_w, vv_i) +INSN_LSX(vssrani_w_d, vv_i) +INSN_LSX(vssrani_d_q, vv_i) +INSN_LSX(vssrlni_bu_h, vv_i) +INSN_LSX(vssrlni_hu_w, vv_i) +INSN_LSX(vssrlni_wu_d, vv_i) +INSN_LSX(vssrlni_du_q, vv_i) +INSN_LSX(vssrani_bu_h, vv_i) +INSN_LSX(vssrani_hu_w, vv_i) +INSN_LSX(vssrani_wu_d, vv_i) +INSN_LSX(vssrani_du_q, vv_i) + +INSN_LSX(vssrlrn_b_h, vvv) +INSN_LSX(vssrlrn_h_w, vvv) +INSN_LSX(vssrlrn_w_d, vvv) +INSN_LSX(vssrarn_b_h, vvv) +INSN_LSX(vssrarn_h_w, vvv) +INSN_LSX(vssrarn_w_d, vvv) +INSN_LSX(vssrlrn_bu_h, vvv) +INSN_LSX(vssrlrn_hu_w, vvv) +INSN_LSX(vssrlrn_wu_d, vvv) +INSN_LSX(vssrarn_bu_h, vvv) +INSN_LSX(vssrarn_hu_w, vvv) +INSN_LSX(vssrarn_wu_d, vvv) + +INSN_LSX(vssrlrni_b_h, vv_i) +INSN_LSX(vssrlrni_h_w, vv_i) +INSN_LSX(vssrlrni_w_d, vv_i) +INSN_LSX(vssrlrni_d_q, vv_i) +INSN_LSX(vssrlrni_bu_h, vv_i) +INSN_LSX(vssrlrni_hu_w, vv_i) +INSN_LSX(vssrlrni_wu_d, vv_i) +INSN_LSX(vssrlrni_du_q, vv_i) +INSN_LSX(vssrarni_b_h, vv_i) +INSN_LSX(vssrarni_h_w, vv_i) +INSN_LSX(vssrarni_w_d, vv_i) +INSN_LSX(vssrarni_d_q, vv_i) +INSN_LSX(vssrarni_bu_h, vv_i) +INSN_LSX(vssrarni_hu_w, vv_i) +INSN_LSX(vssrarni_wu_d, vv_i) +INSN_LSX(vssrarni_du_q, vv_i) + +INSN_LSX(vclo_b, vv) +INSN_LSX(vclo_h, vv) +INSN_LSX(vclo_w, vv) +INSN_LSX(vclo_d, vv) +INSN_LSX(vclz_b, vv) +INSN_LSX(vclz_h, vv) +INSN_LSX(vclz_w, vv) +INSN_LSX(vclz_d, vv) + +INSN_LSX(vpcnt_b, vv) +INSN_LSX(vpcnt_h, vv) +INSN_LSX(vpcnt_w, vv) +INSN_LSX(vpcnt_d, vv) + +INSN_LSX(vbitclr_b, vvv) +INSN_LSX(vbitclr_h, vvv) +INSN_LSX(vbitclr_w, vvv) +INSN_LSX(vbitclr_d, vvv) +INSN_LSX(vbitclri_b, vv_i) +INSN_LSX(vbitclri_h, vv_i) +INSN_LSX(vbitclri_w, vv_i) +INSN_LSX(vbitclri_d, vv_i) +INSN_LSX(vbitset_b, vvv) +INSN_LSX(vbitset_h, vvv) +INSN_LSX(vbitset_w, vvv) +INSN_LSX(vbitset_d, vvv) +INSN_LSX(vbitseti_b, vv_i) +INSN_LSX(vbitseti_h, vv_i) +INSN_LSX(vbitseti_w, vv_i) +INSN_LSX(vbitseti_d, vv_i) +INSN_LSX(vbitrev_b, vvv) +INSN_LSX(vbitrev_h, vvv) +INSN_LSX(vbitrev_w, vvv) +INSN_LSX(vbitrev_d, vvv) +INSN_LSX(vbitrevi_b, vv_i) +INSN_LSX(vbitrevi_h, vv_i) +INSN_LSX(vbitrevi_w, vv_i) +INSN_LSX(vbitrevi_d, vv_i) + +INSN_LSX(vfrstp_b, vvv) +INSN_LSX(vfrstp_h, vvv) +INSN_LSX(vfrstpi_b, vv_i) +INSN_LSX(vfrstpi_h, vv_i) + +INSN_LSX(vfadd_s, vvv) +INSN_LSX(vfadd_d, vvv) +INSN_LSX(vfsub_s, vvv) +INSN_LSX(vfsub_d, vvv) +INSN_LSX(vfmul_s, vvv) +INSN_LSX(vfmul_d, vvv) +INSN_LSX(vfdiv_s, vvv) +INSN_LSX(vfdiv_d, vvv) + +INSN_LSX(vfmadd_s, vvvv) +INSN_LSX(vfmadd_d, vvvv) +INSN_LSX(vfmsub_s, vvvv) +INSN_LSX(vfmsub_d, vvvv) +INSN_LSX(vfnmadd_s, vvvv) +INSN_LSX(vfnmadd_d, vvvv) +INSN_LSX(vfnmsub_s, vvvv) +INSN_LSX(vfnmsub_d, vvvv) + +INSN_LSX(vfmax_s, vvv) +INSN_LSX(vfmax_d, vvv) +INSN_LSX(vfmin_s, vvv) +INSN_LSX(vfmin_d, vvv) + +INSN_LSX(vfmaxa_s, vvv) +INSN_LSX(vfmaxa_d, vvv) +INSN_LSX(vfmina_s, vvv) +INSN_LSX(vfmina_d, vvv) + +INSN_LSX(vflogb_s, vv) +INSN_LSX(vflogb_d, vv) + +INSN_LSX(vfclass_s, vv) +INSN_LSX(vfclass_d, vv) + +INSN_LSX(vfsqrt_s, vv) +INSN_LSX(vfsqrt_d, vv) +INSN_LSX(vfrecip_s, vv) +INSN_LSX(vfrecip_d, vv) +INSN_LSX(vfrsqrt_s, vv) +INSN_LSX(vfrsqrt_d, vv) + +INSN_LSX(vfcvtl_s_h, vv) +INSN_LSX(vfcvth_s_h, vv) +INSN_LSX(vfcvtl_d_s, vv) +INSN_LSX(vfcvth_d_s, vv) +INSN_LSX(vfcvt_h_s, vvv) +INSN_LSX(vfcvt_s_d, vvv) + +INSN_LSX(vfrint_s, vv) +INSN_LSX(vfrint_d, vv) +INSN_LSX(vfrintrm_s, vv) +INSN_LSX(vfrintrm_d, vv) +INSN_LSX(vfrintrp_s, vv) +INSN_LSX(vfrintrp_d, vv) +INSN_LSX(vfrintrz_s, vv) +INSN_LSX(vfrintrz_d, vv) +INSN_LSX(vfrintrne_s, vv) +INSN_LSX(vfrintrne_d, vv) + +INSN_LSX(vftint_w_s, vv) +INSN_LSX(vftint_l_d, vv) +INSN_LSX(vftintrm_w_s, vv) +INSN_LSX(vftintrm_l_d, vv) +INSN_LSX(vftintrp_w_s, vv) +INSN_LSX(vftintrp_l_d, vv) +INSN_LSX(vftintrz_w_s, vv) +INSN_LSX(vftintrz_l_d, vv) +INSN_LSX(vftintrne_w_s, vv) +INSN_LSX(vftintrne_l_d, vv) +INSN_LSX(vftint_wu_s, vv) +INSN_LSX(vftint_lu_d, vv) +INSN_LSX(vftintrz_wu_s, vv) +INSN_LSX(vftintrz_lu_d, vv) +INSN_LSX(vftint_w_d, vvv) +INSN_LSX(vftintrm_w_d, vvv) +INSN_LSX(vftintrp_w_d, vvv) +INSN_LSX(vftintrz_w_d, vvv) +INSN_LSX(vftintrne_w_d, vvv) +INSN_LSX(vftintl_l_s, vv) +INSN_LSX(vftinth_l_s, vv) +INSN_LSX(vftintrml_l_s, vv) +INSN_LSX(vftintrmh_l_s, vv) +INSN_LSX(vftintrpl_l_s, vv) +INSN_LSX(vftintrph_l_s, vv) +INSN_LSX(vftintrzl_l_s, vv) +INSN_LSX(vftintrzh_l_s, vv) +INSN_LSX(vftintrnel_l_s, vv) +INSN_LSX(vftintrneh_l_s, vv) + +INSN_LSX(vffint_s_w, vv) +INSN_LSX(vffint_s_wu, vv) +INSN_LSX(vffint_d_l, vv) +INSN_LSX(vffint_d_lu, vv) +INSN_LSX(vffintl_d_w, vv) +INSN_LSX(vffinth_d_w, vv) +INSN_LSX(vffint_s_l, vvv) + +INSN_LSX(vseq_b, vvv) +INSN_LSX(vseq_h, vvv) +INSN_LSX(vseq_w, vvv) +INSN_LSX(vseq_d, vvv) +INSN_LSX(vseqi_b, vv_i) +INSN_LSX(vseqi_h, vv_i) +INSN_LSX(vseqi_w, vv_i) +INSN_LSX(vseqi_d, vv_i) + +INSN_LSX(vsle_b, vvv) +INSN_LSX(vsle_h, vvv) +INSN_LSX(vsle_w, vvv) +INSN_LSX(vsle_d, vvv) +INSN_LSX(vslei_b, vv_i) +INSN_LSX(vslei_h, vv_i) +INSN_LSX(vslei_w, vv_i) +INSN_LSX(vslei_d, vv_i) +INSN_LSX(vsle_bu, vvv) +INSN_LSX(vsle_hu, vvv) +INSN_LSX(vsle_wu, vvv) +INSN_LSX(vsle_du, vvv) +INSN_LSX(vslei_bu, vv_i) +INSN_LSX(vslei_hu, vv_i) +INSN_LSX(vslei_wu, vv_i) +INSN_LSX(vslei_du, vv_i) + +INSN_LSX(vslt_b, vvv) +INSN_LSX(vslt_h, vvv) +INSN_LSX(vslt_w, vvv) +INSN_LSX(vslt_d, vvv) +INSN_LSX(vslti_b, vv_i) +INSN_LSX(vslti_h, vv_i) +INSN_LSX(vslti_w, vv_i) +INSN_LSX(vslti_d, vv_i) +INSN_LSX(vslt_bu, vvv) +INSN_LSX(vslt_hu, vvv) +INSN_LSX(vslt_wu, vvv) +INSN_LSX(vslt_du, vvv) +INSN_LSX(vslti_bu, vv_i) +INSN_LSX(vslti_hu, vv_i) +INSN_LSX(vslti_wu, vv_i) +INSN_LSX(vslti_du, vv_i) + +#define output_vfcmp(C, PREFIX, SUFFIX) \ +{ \ + (C)->info->fprintf_func((C)->info->stream, "%08x %s%s\t%d, f%d, f%d", \ + (C)->insn, PREFIX, SUFFIX, a->vd, \ + a->vj, a->vk); \ +} + +static bool output_vvv_fcond(DisasContext *ctx, arg_vvv_fcond * a, + const char *suffix) +{ + bool ret = true; + switch (a->fcond) { + case 0x0: + output_vfcmp(ctx, "vfcmp_caf_", suffix); + break; + case 0x1: + output_vfcmp(ctx, "vfcmp_saf_", suffix); + break; + case 0x2: + output_vfcmp(ctx, "vfcmp_clt_", suffix); + break; + case 0x3: + output_vfcmp(ctx, "vfcmp_slt_", suffix); + break; + case 0x4: + output_vfcmp(ctx, "vfcmp_ceq_", suffix); + break; + case 0x5: + output_vfcmp(ctx, "vfcmp_seq_", suffix); + break; + case 0x6: + output_vfcmp(ctx, "vfcmp_cle_", suffix); + break; + case 0x7: + output_vfcmp(ctx, "vfcmp_sle_", suffix); + break; + case 0x8: + output_vfcmp(ctx, "vfcmp_cun_", suffix); + break; + case 0x9: + output_vfcmp(ctx, "vfcmp_sun_", suffix); + break; + case 0xA: + output_vfcmp(ctx, "vfcmp_cult_", suffix); + break; + case 0xB: + output_vfcmp(ctx, "vfcmp_sult_", suffix); + break; + case 0xC: + output_vfcmp(ctx, "vfcmp_cueq_", suffix); + break; + case 0xD: + output_vfcmp(ctx, "vfcmp_sueq_", suffix); + break; + case 0xE: + output_vfcmp(ctx, "vfcmp_cule_", suffix); + break; + case 0xF: + output_vfcmp(ctx, "vfcmp_sule_", suffix); + break; + case 0x10: + output_vfcmp(ctx, "vfcmp_cne_", suffix); + break; + case 0x11: + output_vfcmp(ctx, "vfcmp_sne_", suffix); + break; + case 0x14: + output_vfcmp(ctx, "vfcmp_cor_", suffix); + break; + case 0x15: + output_vfcmp(ctx, "vfcmp_sor_", suffix); + break; + case 0x18: + output_vfcmp(ctx, "vfcmp_cune_", suffix); + break; + case 0x19: + output_vfcmp(ctx, "vfcmp_sune_", suffix); + break; + default: + ret = false; + } + return ret; +} + +#define LSX_FCMP_INSN(suffix) \ +static bool trans_vfcmp_cond_##suffix(DisasContext *ctx, \ + arg_vvv_fcond * a) \ +{ \ + return output_vvv_fcond(ctx, a, #suffix); \ +} + +LSX_FCMP_INSN(s) +LSX_FCMP_INSN(d) + +INSN_LSX(vbitsel_v, vvvv) +INSN_LSX(vbitseli_b, vv_i) + +INSN_LSX(vseteqz_v, cv) +INSN_LSX(vsetnez_v, cv) +INSN_LSX(vsetanyeqz_b, cv) +INSN_LSX(vsetanyeqz_h, cv) +INSN_LSX(vsetanyeqz_w, cv) +INSN_LSX(vsetanyeqz_d, cv) +INSN_LSX(vsetallnez_b, cv) +INSN_LSX(vsetallnez_h, cv) +INSN_LSX(vsetallnez_w, cv) +INSN_LSX(vsetallnez_d, cv) + +INSN_LSX(vinsgr2vr_b, vr_i) +INSN_LSX(vinsgr2vr_h, vr_i) +INSN_LSX(vinsgr2vr_w, vr_i) +INSN_LSX(vinsgr2vr_d, vr_i) +INSN_LSX(vpickve2gr_b, rv_i) +INSN_LSX(vpickve2gr_h, rv_i) +INSN_LSX(vpickve2gr_w, rv_i) +INSN_LSX(vpickve2gr_d, rv_i) +INSN_LSX(vpickve2gr_bu, rv_i) +INSN_LSX(vpickve2gr_hu, rv_i) +INSN_LSX(vpickve2gr_wu, rv_i) +INSN_LSX(vpickve2gr_du, rv_i) + +INSN_LSX(vreplgr2vr_b, vr) +INSN_LSX(vreplgr2vr_h, vr) +INSN_LSX(vreplgr2vr_w, vr) +INSN_LSX(vreplgr2vr_d, vr) + +INSN_LSX(vreplve_b, vvr) +INSN_LSX(vreplve_h, vvr) +INSN_LSX(vreplve_w, vvr) +INSN_LSX(vreplve_d, vvr) +INSN_LSX(vreplvei_b, vv_i) +INSN_LSX(vreplvei_h, vv_i) +INSN_LSX(vreplvei_w, vv_i) +INSN_LSX(vreplvei_d, vv_i) + +INSN_LSX(vbsll_v, vv_i) +INSN_LSX(vbsrl_v, vv_i) + +INSN_LSX(vpackev_b, vvv) +INSN_LSX(vpackev_h, vvv) +INSN_LSX(vpackev_w, vvv) +INSN_LSX(vpackev_d, vvv) +INSN_LSX(vpackod_b, vvv) +INSN_LSX(vpackod_h, vvv) +INSN_LSX(vpackod_w, vvv) +INSN_LSX(vpackod_d, vvv) + +INSN_LSX(vpickev_b, vvv) +INSN_LSX(vpickev_h, vvv) +INSN_LSX(vpickev_w, vvv) +INSN_LSX(vpickev_d, vvv) +INSN_LSX(vpickod_b, vvv) +INSN_LSX(vpickod_h, vvv) +INSN_LSX(vpickod_w, vvv) +INSN_LSX(vpickod_d, vvv) + +INSN_LSX(vilvl_b, vvv) +INSN_LSX(vilvl_h, vvv) +INSN_LSX(vilvl_w, vvv) +INSN_LSX(vilvl_d, vvv) +INSN_LSX(vilvh_b, vvv) +INSN_LSX(vilvh_h, vvv) +INSN_LSX(vilvh_w, vvv) +INSN_LSX(vilvh_d, vvv) + +INSN_LSX(vshuf_b, vvvv) +INSN_LSX(vshuf_h, vvv) +INSN_LSX(vshuf_w, vvv) +INSN_LSX(vshuf_d, vvv) +INSN_LSX(vshuf4i_b, vv_i) +INSN_LSX(vshuf4i_h, vv_i) +INSN_LSX(vshuf4i_w, vv_i) +INSN_LSX(vshuf4i_d, vv_i) + +INSN_LSX(vpermi_w, vv_i) + +INSN_LSX(vextrins_d, vv_i) +INSN_LSX(vextrins_w, vv_i) +INSN_LSX(vextrins_h, vv_i) +INSN_LSX(vextrins_b, vv_i) + +INSN_LSX(vld, vr_i) +INSN_LSX(vst, vr_i) +INSN_LSX(vldx, vrr) +INSN_LSX(vstx, vrr) + +INSN_LSX(vldrepl_d, vr_i) +INSN_LSX(vldrepl_w, vr_i) +INSN_LSX(vldrepl_h, vr_i) +INSN_LSX(vldrepl_b, vr_i) +INSN_LSX(vstelm_d, vr_ii) +INSN_LSX(vstelm_w, vr_ii) +INSN_LSX(vstelm_h, vr_ii) +INSN_LSX(vstelm_b, vr_ii) diff --git a/target/loongarch/fpu_helper.c b/target/loongarch/fpu_helper.c index 4b9637210a..f6753c5875 100644 --- a/target/loongarch/fpu_helper.c +++ b/target/loongarch/fpu_helper.c @@ -33,7 +33,7 @@ void restore_fp_status(CPULoongArchState *env) set_flush_to_zero(0, &env->fp_status); } -static int ieee_ex_to_loongarch(int xcpt) +int ieee_ex_to_loongarch(int xcpt) { int ret = 0; if (xcpt & float_flag_invalid) { diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c index a4d1e28e36..0752fff924 100644 --- a/target/loongarch/gdbstub.c +++ b/target/loongarch/gdbstub.c @@ -10,6 +10,7 @@ #include "cpu.h" #include "internals.h" #include "exec/gdbstub.h" +#include "gdbstub/helpers.h" uint64_t read_fcc(CPULoongArchState *env) { @@ -68,7 +69,7 @@ static int loongarch_gdb_get_fpu(CPULoongArchState *env, GByteArray *mem_buf, int n) { if (0 <= n && n < 32) { - return gdb_get_reg64(mem_buf, env->fpr[n]); + return gdb_get_reg64(mem_buf, env->fpr[n].vreg.D(0)); } else if (n == 32) { uint64_t val = read_fcc(env); return gdb_get_reg64(mem_buf, val); @@ -84,7 +85,7 @@ static int loongarch_gdb_set_fpu(CPULoongArchState *env, int length = 0; if (0 <= n && n < 32) { - env->fpr[n] = ldq_p(mem_buf); + env->fpr[n].vreg.D(0) = ldq_p(mem_buf); length = 8; } else if (n == 32) { uint64_t val = ldq_p(mem_buf); diff --git a/target/loongarch/helper.h b/target/loongarch/helper.h index 9c01823a26..b9de77d926 100644 --- a/target/loongarch/helper.h +++ b/target/loongarch/helper.h @@ -130,3 +130,569 @@ DEF_HELPER_4(ldpte, void, env, tl, tl, i32) DEF_HELPER_1(ertn, void, env) DEF_HELPER_1(idle, void, env) #endif + +/* LoongArch LSX */ +DEF_HELPER_4(vhaddw_h_b, void, env, i32, i32, i32) +DEF_HELPER_4(vhaddw_w_h, void, env, i32, i32, i32) +DEF_HELPER_4(vhaddw_d_w, void, env, i32, i32, i32) +DEF_HELPER_4(vhaddw_q_d, void, env, i32, i32, i32) +DEF_HELPER_4(vhaddw_hu_bu, void, env, i32, i32, i32) +DEF_HELPER_4(vhaddw_wu_hu, void, env, i32, i32, i32) +DEF_HELPER_4(vhaddw_du_wu, void, env, i32, i32, i32) +DEF_HELPER_4(vhaddw_qu_du, void, env, i32, i32, i32) +DEF_HELPER_4(vhsubw_h_b, void, env, i32, i32, i32) +DEF_HELPER_4(vhsubw_w_h, void, env, i32, i32, i32) +DEF_HELPER_4(vhsubw_d_w, void, env, i32, i32, i32) +DEF_HELPER_4(vhsubw_q_d, void, env, i32, i32, i32) +DEF_HELPER_4(vhsubw_hu_bu, void, env, i32, i32, i32) +DEF_HELPER_4(vhsubw_wu_hu, void, env, i32, i32, i32) +DEF_HELPER_4(vhsubw_du_wu, void, env, i32, i32, i32) +DEF_HELPER_4(vhsubw_qu_du, void, env, i32, i32, i32) + +DEF_HELPER_FLAGS_4(vaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vsubwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vsubwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vavg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vavgr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vabsd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vadda_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vadda_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vadda_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vadda_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vmaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vmuh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmulwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmulwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmulwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmadd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmsub_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmsub_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_4(vdiv_b, void, env, i32, i32, i32) +DEF_HELPER_4(vdiv_h, void, env, i32, i32, i32) +DEF_HELPER_4(vdiv_w, void, env, i32, i32, i32) +DEF_HELPER_4(vdiv_d, void, env, i32, i32, i32) +DEF_HELPER_4(vdiv_bu, void, env, i32, i32, i32) +DEF_HELPER_4(vdiv_hu, void, env, i32, i32, i32) +DEF_HELPER_4(vdiv_wu, void, env, i32, i32, i32) +DEF_HELPER_4(vdiv_du, void, env, i32, i32, i32) +DEF_HELPER_4(vmod_b, void, env, i32, i32, i32) +DEF_HELPER_4(vmod_h, void, env, i32, i32, i32) +DEF_HELPER_4(vmod_w, void, env, i32, i32, i32) +DEF_HELPER_4(vmod_d, void, env, i32, i32, i32) +DEF_HELPER_4(vmod_bu, void, env, i32, i32, i32) +DEF_HELPER_4(vmod_hu, void, env, i32, i32, i32) +DEF_HELPER_4(vmod_wu, void, env, i32, i32, i32) +DEF_HELPER_4(vmod_du, void, env, i32, i32, i32) + +DEF_HELPER_FLAGS_4(vsat_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_3(vexth_h_b, void, env, i32, i32) +DEF_HELPER_3(vexth_w_h, void, env, i32, i32) +DEF_HELPER_3(vexth_d_w, void, env, i32, i32) +DEF_HELPER_3(vexth_q_d, void, env, i32, i32) +DEF_HELPER_3(vexth_hu_bu, void, env, i32, i32) +DEF_HELPER_3(vexth_wu_hu, void, env, i32, i32) +DEF_HELPER_3(vexth_du_wu, void, env, i32, i32) +DEF_HELPER_3(vexth_qu_du, void, env, i32, i32) + +DEF_HELPER_FLAGS_4(vsigncov_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsigncov_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsigncov_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsigncov_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_3(vmskltz_b, void, env, i32, i32) +DEF_HELPER_3(vmskltz_h, void, env, i32, i32) +DEF_HELPER_3(vmskltz_w, void, env, i32, i32) +DEF_HELPER_3(vmskltz_d, void, env, i32, i32) +DEF_HELPER_3(vmskgez_b, void, env, i32, i32) +DEF_HELPER_3(vmsknz_b, void, env, i32,i32) + +DEF_HELPER_FLAGS_4(vnori_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_4(vsllwil_h_b, void, env, i32, i32, i32) +DEF_HELPER_4(vsllwil_w_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsllwil_d_w, void, env, i32, i32, i32) +DEF_HELPER_3(vextl_q_d, void, env, i32, i32) +DEF_HELPER_4(vsllwil_hu_bu, void, env, i32, i32, i32) +DEF_HELPER_4(vsllwil_wu_hu, void, env, i32, i32, i32) +DEF_HELPER_4(vsllwil_du_wu, void, env, i32, i32, i32) +DEF_HELPER_3(vextl_qu_du, void, env, i32, i32) + +DEF_HELPER_4(vsrlr_b, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlr_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlr_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlr_d, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlri_b, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlri_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlri_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlri_d, void, env, i32, i32, i32) + +DEF_HELPER_4(vsrar_b, void, env, i32, i32, i32) +DEF_HELPER_4(vsrar_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrar_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrar_d, void, env, i32, i32, i32) +DEF_HELPER_4(vsrari_b, void, env, i32, i32, i32) +DEF_HELPER_4(vsrari_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrari_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrari_d, void, env, i32, i32, i32) + +DEF_HELPER_4(vsrln_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrln_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrln_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vsran_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsran_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsran_w_d, void, env, i32, i32, i32) + +DEF_HELPER_4(vsrlni_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlni_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlni_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlni_d_q, void, env, i32, i32, i32) +DEF_HELPER_4(vsrani_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrani_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrani_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vsrani_d_q, void, env, i32, i32, i32) + +DEF_HELPER_4(vsrlrn_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlrn_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlrn_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vsrarn_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrarn_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrarn_w_d, void, env, i32, i32, i32) + +DEF_HELPER_4(vsrlrni_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlrni_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlrni_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vsrlrni_d_q, void, env, i32, i32, i32) +DEF_HELPER_4(vsrarni_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsrarni_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsrarni_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vsrarni_d_q, void, env, i32, i32, i32) + +DEF_HELPER_4(vssrln_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrln_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrln_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssran_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssran_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssran_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrln_bu_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrln_hu_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrln_wu_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssran_bu_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssran_hu_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssran_wu_d, void, env, i32, i32, i32) + +DEF_HELPER_4(vssrlni_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlni_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlni_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlni_d_q, void, env, i32, i32, i32) +DEF_HELPER_4(vssrani_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrani_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrani_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrani_d_q, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlni_bu_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlni_hu_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlni_wu_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlni_du_q, void, env, i32, i32, i32) +DEF_HELPER_4(vssrani_bu_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrani_hu_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrani_wu_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrani_du_q, void, env, i32, i32, i32) + +DEF_HELPER_4(vssrlrn_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrn_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrn_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarn_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarn_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarn_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrn_bu_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrn_hu_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrn_wu_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarn_bu_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarn_hu_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarn_wu_d, void, env, i32, i32, i32) + +DEF_HELPER_4(vssrlrni_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrni_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrni_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrni_d_q, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarni_b_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarni_h_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarni_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarni_d_q, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrni_bu_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrni_hu_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrni_wu_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrlrni_du_q, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarni_bu_h, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarni_hu_w, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarni_wu_d, void, env, i32, i32, i32) +DEF_HELPER_4(vssrarni_du_q, void, env, i32, i32, i32) + +DEF_HELPER_3(vclo_b, void, env, i32, i32) +DEF_HELPER_3(vclo_h, void, env, i32, i32) +DEF_HELPER_3(vclo_w, void, env, i32, i32) +DEF_HELPER_3(vclo_d, void, env, i32, i32) +DEF_HELPER_3(vclz_b, void, env, i32, i32) +DEF_HELPER_3(vclz_h, void, env, i32, i32) +DEF_HELPER_3(vclz_w, void, env, i32, i32) +DEF_HELPER_3(vclz_d, void, env, i32, i32) + +DEF_HELPER_3(vpcnt_b, void, env, i32, i32) +DEF_HELPER_3(vpcnt_h, void, env, i32, i32) +DEF_HELPER_3(vpcnt_w, void, env, i32, i32) +DEF_HELPER_3(vpcnt_d, void, env, i32, i32) + +DEF_HELPER_FLAGS_4(vbitclr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitclr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitclr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitclr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitclri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitclri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitclri_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitclri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vbitset_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitset_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitset_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitset_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitseti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitseti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitseti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitseti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vbitrev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitrev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitrev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitrev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitrevi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitrevi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitrevi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitrevi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_4(vfrstp_b, void, env, i32, i32, i32) +DEF_HELPER_4(vfrstp_h, void, env, i32, i32, i32) +DEF_HELPER_4(vfrstpi_b, void, env, i32, i32, i32) +DEF_HELPER_4(vfrstpi_h, void, env, i32, i32, i32) + +DEF_HELPER_4(vfadd_s, void, env, i32, i32, i32) +DEF_HELPER_4(vfadd_d, void, env, i32, i32, i32) +DEF_HELPER_4(vfsub_s, void, env, i32, i32, i32) +DEF_HELPER_4(vfsub_d, void, env, i32, i32, i32) +DEF_HELPER_4(vfmul_s, void, env, i32, i32, i32) +DEF_HELPER_4(vfmul_d, void, env, i32, i32, i32) +DEF_HELPER_4(vfdiv_s, void, env, i32, i32, i32) +DEF_HELPER_4(vfdiv_d, void, env, i32, i32, i32) + +DEF_HELPER_5(vfmadd_s, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vfmadd_d, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vfmsub_s, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vfmsub_d, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vfnmadd_s, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vfnmadd_d, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vfnmsub_s, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vfnmsub_d, void, env, i32, i32, i32, i32) + +DEF_HELPER_4(vfmax_s, void, env, i32, i32, i32) +DEF_HELPER_4(vfmax_d, void, env, i32, i32, i32) +DEF_HELPER_4(vfmin_s, void, env, i32, i32, i32) +DEF_HELPER_4(vfmin_d, void, env, i32, i32, i32) + +DEF_HELPER_4(vfmaxa_s, void, env, i32, i32, i32) +DEF_HELPER_4(vfmaxa_d, void, env, i32, i32, i32) +DEF_HELPER_4(vfmina_s, void, env, i32, i32, i32) +DEF_HELPER_4(vfmina_d, void, env, i32, i32, i32) + +DEF_HELPER_3(vflogb_s, void, env, i32, i32) +DEF_HELPER_3(vflogb_d, void, env, i32, i32) + +DEF_HELPER_3(vfclass_s, void, env, i32, i32) +DEF_HELPER_3(vfclass_d, void, env, i32, i32) + +DEF_HELPER_3(vfsqrt_s, void, env, i32, i32) +DEF_HELPER_3(vfsqrt_d, void, env, i32, i32) +DEF_HELPER_3(vfrecip_s, void, env, i32, i32) +DEF_HELPER_3(vfrecip_d, void, env, i32, i32) +DEF_HELPER_3(vfrsqrt_s, void, env, i32, i32) +DEF_HELPER_3(vfrsqrt_d, void, env, i32, i32) + +DEF_HELPER_3(vfcvtl_s_h, void, env, i32, i32) +DEF_HELPER_3(vfcvth_s_h, void, env, i32, i32) +DEF_HELPER_3(vfcvtl_d_s, void, env, i32, i32) +DEF_HELPER_3(vfcvth_d_s, void, env, i32, i32) +DEF_HELPER_4(vfcvt_h_s, void, env, i32, i32, i32) +DEF_HELPER_4(vfcvt_s_d, void, env, i32, i32, i32) + +DEF_HELPER_3(vfrintrne_s, void, env, i32, i32) +DEF_HELPER_3(vfrintrne_d, void, env, i32, i32) +DEF_HELPER_3(vfrintrz_s, void, env, i32, i32) +DEF_HELPER_3(vfrintrz_d, void, env, i32, i32) +DEF_HELPER_3(vfrintrp_s, void, env, i32, i32) +DEF_HELPER_3(vfrintrp_d, void, env, i32, i32) +DEF_HELPER_3(vfrintrm_s, void, env, i32, i32) +DEF_HELPER_3(vfrintrm_d, void, env, i32, i32) +DEF_HELPER_3(vfrint_s, void, env, i32, i32) +DEF_HELPER_3(vfrint_d, void, env, i32, i32) + +DEF_HELPER_3(vftintrne_w_s, void, env, i32, i32) +DEF_HELPER_3(vftintrne_l_d, void, env, i32, i32) +DEF_HELPER_3(vftintrz_w_s, void, env, i32, i32) +DEF_HELPER_3(vftintrz_l_d, void, env, i32, i32) +DEF_HELPER_3(vftintrp_w_s, void, env, i32, i32) +DEF_HELPER_3(vftintrp_l_d, void, env, i32, i32) +DEF_HELPER_3(vftintrm_w_s, void, env, i32, i32) +DEF_HELPER_3(vftintrm_l_d, void, env, i32, i32) +DEF_HELPER_3(vftint_w_s, void, env, i32, i32) +DEF_HELPER_3(vftint_l_d, void, env, i32, i32) +DEF_HELPER_3(vftintrz_wu_s, void, env, i32, i32) +DEF_HELPER_3(vftintrz_lu_d, void, env, i32, i32) +DEF_HELPER_3(vftint_wu_s, void, env, i32, i32) +DEF_HELPER_3(vftint_lu_d, void, env, i32, i32) +DEF_HELPER_4(vftintrne_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vftintrz_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vftintrp_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vftintrm_w_d, void, env, i32, i32, i32) +DEF_HELPER_4(vftint_w_d, void, env, i32, i32, i32) +DEF_HELPER_3(vftintrnel_l_s, void, env, i32, i32) +DEF_HELPER_3(vftintrneh_l_s, void, env, i32, i32) +DEF_HELPER_3(vftintrzl_l_s, void, env, i32, i32) +DEF_HELPER_3(vftintrzh_l_s, void, env, i32, i32) +DEF_HELPER_3(vftintrpl_l_s, void, env, i32, i32) +DEF_HELPER_3(vftintrph_l_s, void, env, i32, i32) +DEF_HELPER_3(vftintrml_l_s, void, env, i32, i32) +DEF_HELPER_3(vftintrmh_l_s, void, env, i32, i32) +DEF_HELPER_3(vftintl_l_s, void, env, i32, i32) +DEF_HELPER_3(vftinth_l_s, void, env, i32, i32) + +DEF_HELPER_3(vffint_s_w, void, env, i32, i32) +DEF_HELPER_3(vffint_d_l, void, env, i32, i32) +DEF_HELPER_3(vffint_s_wu, void, env, i32, i32) +DEF_HELPER_3(vffint_d_lu, void, env, i32, i32) +DEF_HELPER_3(vffintl_d_w, void, env, i32, i32) +DEF_HELPER_3(vffinth_d_w, void, env, i32, i32) +DEF_HELPER_4(vffint_s_l, void, env, i32, i32, i32) + +DEF_HELPER_FLAGS_4(vseqi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vseqi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vseqi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vseqi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vslei_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vslti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_5(vfcmp_c_s, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vfcmp_s_s, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vfcmp_c_d, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vfcmp_s_d, void, env, i32, i32, i32, i32) + +DEF_HELPER_FLAGS_4(vbitseli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_3(vsetanyeqz_b, void, env, i32, i32) +DEF_HELPER_3(vsetanyeqz_h, void, env, i32, i32) +DEF_HELPER_3(vsetanyeqz_w, void, env, i32, i32) +DEF_HELPER_3(vsetanyeqz_d, void, env, i32, i32) +DEF_HELPER_3(vsetallnez_b, void, env, i32, i32) +DEF_HELPER_3(vsetallnez_h, void, env, i32, i32) +DEF_HELPER_3(vsetallnez_w, void, env, i32, i32) +DEF_HELPER_3(vsetallnez_d, void, env, i32, i32) + +DEF_HELPER_4(vpackev_b, void, env, i32, i32, i32) +DEF_HELPER_4(vpackev_h, void, env, i32, i32, i32) +DEF_HELPER_4(vpackev_w, void, env, i32, i32, i32) +DEF_HELPER_4(vpackev_d, void, env, i32, i32, i32) +DEF_HELPER_4(vpackod_b, void, env, i32, i32, i32) +DEF_HELPER_4(vpackod_h, void, env, i32, i32, i32) +DEF_HELPER_4(vpackod_w, void, env, i32, i32, i32) +DEF_HELPER_4(vpackod_d, void, env, i32, i32, i32) + +DEF_HELPER_4(vpickev_b, void, env, i32, i32, i32) +DEF_HELPER_4(vpickev_h, void, env, i32, i32, i32) +DEF_HELPER_4(vpickev_w, void, env, i32, i32, i32) +DEF_HELPER_4(vpickev_d, void, env, i32, i32, i32) +DEF_HELPER_4(vpickod_b, void, env, i32, i32, i32) +DEF_HELPER_4(vpickod_h, void, env, i32, i32, i32) +DEF_HELPER_4(vpickod_w, void, env, i32, i32, i32) +DEF_HELPER_4(vpickod_d, void, env, i32, i32, i32) + +DEF_HELPER_4(vilvl_b, void, env, i32, i32, i32) +DEF_HELPER_4(vilvl_h, void, env, i32, i32, i32) +DEF_HELPER_4(vilvl_w, void, env, i32, i32, i32) +DEF_HELPER_4(vilvl_d, void, env, i32, i32, i32) +DEF_HELPER_4(vilvh_b, void, env, i32, i32, i32) +DEF_HELPER_4(vilvh_h, void, env, i32, i32, i32) +DEF_HELPER_4(vilvh_w, void, env, i32, i32, i32) +DEF_HELPER_4(vilvh_d, void, env, i32, i32, i32) + +DEF_HELPER_5(vshuf_b, void, env, i32, i32, i32, i32) +DEF_HELPER_4(vshuf_h, void, env, i32, i32, i32) +DEF_HELPER_4(vshuf_w, void, env, i32, i32, i32) +DEF_HELPER_4(vshuf_d, void, env, i32, i32, i32) +DEF_HELPER_4(vshuf4i_b, void, env, i32, i32, i32) +DEF_HELPER_4(vshuf4i_h, void, env, i32, i32, i32) +DEF_HELPER_4(vshuf4i_w, void, env, i32, i32, i32) +DEF_HELPER_4(vshuf4i_d, void, env, i32, i32, i32) + +DEF_HELPER_4(vpermi_w, void, env, i32, i32, i32) + +DEF_HELPER_4(vextrins_b, void, env, i32, i32, i32) +DEF_HELPER_4(vextrins_h, void, env, i32, i32, i32) +DEF_HELPER_4(vextrins_w, void, env, i32, i32, i32) +DEF_HELPER_4(vextrins_d, void, env, i32, i32, i32) diff --git a/target/loongarch/insn_trans/trans_arith.c.inc b/target/loongarch/insn_trans/trans_arith.c.inc index 8e45eadbc8..43d6cf261d 100644 --- a/target/loongarch/insn_trans/trans_arith.c.inc +++ b/target/loongarch/insn_trans/trans_arith.c.inc @@ -100,14 +100,12 @@ static void gen_mulh_d(TCGv dest, TCGv src1, TCGv src2) { TCGv discard = tcg_temp_new(); tcg_gen_muls2_tl(discard, dest, src1, src2); - tcg_temp_free(discard); } static void gen_mulh_du(TCGv dest, TCGv src1, TCGv src2) { TCGv discard = tcg_temp_new(); tcg_gen_mulu2_tl(discard, dest, src1, src2); - tcg_temp_free(discard); } static void prep_divisor_d(TCGv ret, TCGv src1, TCGv src2) @@ -129,9 +127,6 @@ static void prep_divisor_d(TCGv ret, TCGv src1, TCGv src2) tcg_gen_and_tl(ret, ret, t0); tcg_gen_or_tl(ret, ret, t1); tcg_gen_movcond_tl(TCG_COND_NE, ret, ret, zero, ret, src2); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void prep_divisor_du(TCGv ret, TCGv src2) @@ -152,7 +147,6 @@ static void gen_div_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); prep_divisor_d(t0, src1, src2); tcg_gen_div_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_rem_d(TCGv dest, TCGv src1, TCGv src2) @@ -160,7 +154,6 @@ static void gen_rem_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); prep_divisor_d(t0, src1, src2); tcg_gen_rem_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_div_du(TCGv dest, TCGv src1, TCGv src2) @@ -168,7 +161,6 @@ static void gen_div_du(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); prep_divisor_du(t0, src2); tcg_gen_divu_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_rem_du(TCGv dest, TCGv src1, TCGv src2) @@ -176,7 +168,6 @@ static void gen_rem_du(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); prep_divisor_du(t0, src2); tcg_gen_remu_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_div_w(TCGv dest, TCGv src1, TCGv src2) @@ -185,7 +176,6 @@ static void gen_div_w(TCGv dest, TCGv src1, TCGv src2) /* We need not check for integer overflow for div_w. */ prep_divisor_du(t0, src2); tcg_gen_div_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_rem_w(TCGv dest, TCGv src1, TCGv src2) @@ -194,7 +184,6 @@ static void gen_rem_w(TCGv dest, TCGv src1, TCGv src2) /* We need not check for integer overflow for rem_w. */ prep_divisor_du(t0, src2); tcg_gen_rem_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_alsl(TCGv dest, TCGv src1, TCGv src2, target_long sa) @@ -202,7 +191,6 @@ static void gen_alsl(TCGv dest, TCGv src1, TCGv src2, target_long sa) TCGv t0 = tcg_temp_new(); tcg_gen_shli_tl(t0, src1, sa); tcg_gen_add_tl(dest, t0, src2); - tcg_temp_free(t0); } static bool trans_lu32i_d(DisasContext *ctx, arg_lu32i_d *a) diff --git a/target/loongarch/insn_trans/trans_atomic.c.inc b/target/loongarch/insn_trans/trans_atomic.c.inc index 6763c1c301..612709f2a7 100644 --- a/target/loongarch/insn_trans/trans_atomic.c.inc +++ b/target/loongarch/insn_trans/trans_atomic.c.inc @@ -14,7 +14,6 @@ static bool gen_ll(DisasContext *ctx, arg_rr_i *a, MemOp mop) tcg_gen_st_tl(t0, cpu_env, offsetof(CPULoongArchState, lladdr)); tcg_gen_st_tl(dest, cpu_env, offsetof(CPULoongArchState, llval)); gen_set_gpr(a->rd, dest, EXT_NONE); - tcg_temp_free(t0); return true; } @@ -43,8 +42,6 @@ static bool gen_sc(DisasContext *ctx, arg_rr_i *a, MemOp mop) tcg_gen_setcond_tl(TCG_COND_EQ, dest, t0, cpu_llval); gen_set_label(done); gen_set_gpr(a->rd, dest, EXT_NONE); - tcg_temp_free(t0); - tcg_temp_free(val); return true; } diff --git a/target/loongarch/insn_trans/trans_bit.c.inc b/target/loongarch/insn_trans/trans_bit.c.inc index b01e4aeb23..25b4d7858b 100644 --- a/target/loongarch/insn_trans/trans_bit.c.inc +++ b/target/loongarch/insn_trans/trans_bit.c.inc @@ -122,9 +122,6 @@ static void gen_revb_2h(TCGv dest, TCGv src1) tcg_gen_and_tl(t1, src1, mask); tcg_gen_shli_tl(t1, t1, 8); tcg_gen_or_tl(dest, t0, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_revb_4h(TCGv dest, TCGv src1) @@ -138,9 +135,6 @@ static void gen_revb_4h(TCGv dest, TCGv src1) tcg_gen_and_tl(t1, src1, mask); tcg_gen_shli_tl(t1, t1, 8); tcg_gen_or_tl(dest, t0, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_revh_2w(TCGv dest, TCGv src1) @@ -154,9 +148,6 @@ static void gen_revh_2w(TCGv dest, TCGv src1) tcg_gen_and_i64(t0, t0, mask); tcg_gen_shli_i64(t1, t1, 16); tcg_gen_or_i64(dest, t1, t0); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static void gen_revh_d(TCGv dest, TCGv src1) @@ -171,9 +162,6 @@ static void gen_revh_d(TCGv dest, TCGv src1) tcg_gen_shli_tl(t0, t0, 16); tcg_gen_or_tl(t0, t0, t1); tcg_gen_rotri_tl(dest, t0, 32); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_maskeqz(TCGv dest, TCGv src1, TCGv src2) diff --git a/target/loongarch/insn_trans/trans_farith.c.inc b/target/loongarch/insn_trans/trans_farith.c.inc index 7081fbb89b..21ea47308b 100644 --- a/target/loongarch/insn_trans/trans_farith.c.inc +++ b/target/loongarch/insn_trans/trans_farith.c.inc @@ -17,18 +17,29 @@ static bool gen_fff(DisasContext *ctx, arg_fff *a, void (*func)(TCGv, TCGv_env, TCGv, TCGv)) { + TCGv dest = get_fpr(ctx, a->fd); + TCGv src1 = get_fpr(ctx, a->fj); + TCGv src2 = get_fpr(ctx, a->fk); + CHECK_FPE; - func(cpu_fpr[a->fd], cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk]); + func(dest, cpu_env, src1, src2); + set_fpr(a->fd, dest); + return true; } static bool gen_ff(DisasContext *ctx, arg_ff *a, void (*func)(TCGv, TCGv_env, TCGv)) { + TCGv dest = get_fpr(ctx, a->fd); + TCGv src = get_fpr(ctx, a->fj); + CHECK_FPE; - func(cpu_fpr[a->fd], cpu_env, cpu_fpr[a->fj]); + func(dest, cpu_env, src); + set_fpr(a->fd, dest); + return true; } @@ -37,61 +48,98 @@ static bool gen_muladd(DisasContext *ctx, arg_ffff *a, int flag) { TCGv_i32 tflag = tcg_constant_i32(flag); + TCGv dest = get_fpr(ctx, a->fd); + TCGv src1 = get_fpr(ctx, a->fj); + TCGv src2 = get_fpr(ctx, a->fk); + TCGv src3 = get_fpr(ctx, a->fa); CHECK_FPE; - func(cpu_fpr[a->fd], cpu_env, cpu_fpr[a->fj], - cpu_fpr[a->fk], cpu_fpr[a->fa], tflag); + func(dest, cpu_env, src1, src2, src3, tflag); + set_fpr(a->fd, dest); + return true; } static bool trans_fcopysign_s(DisasContext *ctx, arg_fcopysign_s *a) { + TCGv dest = get_fpr(ctx, a->fd); + TCGv src1 = get_fpr(ctx, a->fk); + TCGv src2 = get_fpr(ctx, a->fj); + CHECK_FPE; - tcg_gen_deposit_i64(cpu_fpr[a->fd], cpu_fpr[a->fk], cpu_fpr[a->fj], 0, 31); + tcg_gen_deposit_i64(dest, src1, src2, 0, 31); + set_fpr(a->fd, dest); + return true; } static bool trans_fcopysign_d(DisasContext *ctx, arg_fcopysign_d *a) { + TCGv dest = get_fpr(ctx, a->fd); + TCGv src1 = get_fpr(ctx, a->fk); + TCGv src2 = get_fpr(ctx, a->fj); + CHECK_FPE; - tcg_gen_deposit_i64(cpu_fpr[a->fd], cpu_fpr[a->fk], cpu_fpr[a->fj], 0, 63); + tcg_gen_deposit_i64(dest, src1, src2, 0, 63); + set_fpr(a->fd, dest); + return true; } static bool trans_fabs_s(DisasContext *ctx, arg_fabs_s *a) { + TCGv dest = get_fpr(ctx, a->fd); + TCGv src = get_fpr(ctx, a->fj); + CHECK_FPE; - tcg_gen_andi_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], MAKE_64BIT_MASK(0, 31)); - gen_nanbox_s(cpu_fpr[a->fd], cpu_fpr[a->fd]); + tcg_gen_andi_i64(dest, src, MAKE_64BIT_MASK(0, 31)); + gen_nanbox_s(dest, dest); + set_fpr(a->fd, dest); + return true; } static bool trans_fabs_d(DisasContext *ctx, arg_fabs_d *a) { + TCGv dest = get_fpr(ctx, a->fd); + TCGv src = get_fpr(ctx, a->fj); + CHECK_FPE; - tcg_gen_andi_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], MAKE_64BIT_MASK(0, 63)); + tcg_gen_andi_i64(dest, src, MAKE_64BIT_MASK(0, 63)); + set_fpr(a->fd, dest); + return true; } static bool trans_fneg_s(DisasContext *ctx, arg_fneg_s *a) { + TCGv dest = get_fpr(ctx, a->fd); + TCGv src = get_fpr(ctx, a->fj); + CHECK_FPE; - tcg_gen_xori_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], 0x80000000); - gen_nanbox_s(cpu_fpr[a->fd], cpu_fpr[a->fd]); + tcg_gen_xori_i64(dest, src, 0x80000000); + gen_nanbox_s(dest, dest); + set_fpr(a->fd, dest); + return true; } static bool trans_fneg_d(DisasContext *ctx, arg_fneg_d *a) { + TCGv dest = get_fpr(ctx, a->fd); + TCGv src = get_fpr(ctx, a->fj); + CHECK_FPE; - tcg_gen_xori_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], 0x8000000000000000LL); + tcg_gen_xori_i64(dest, src, 0x8000000000000000LL); + set_fpr(a->fd, dest); + return true; } diff --git a/target/loongarch/insn_trans/trans_fcmp.c.inc b/target/loongarch/insn_trans/trans_fcmp.c.inc index 2ccf646ccb..a78868dbc4 100644 --- a/target/loongarch/insn_trans/trans_fcmp.c.inc +++ b/target/loongarch/insn_trans/trans_fcmp.c.inc @@ -25,39 +25,40 @@ static uint32_t get_fcmp_flags(int cond) static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a) { - TCGv var; + TCGv var, src1, src2; uint32_t flags; void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32); CHECK_FPE; var = tcg_temp_new(); + src1 = get_fpr(ctx, a->fj); + src2 = get_fpr(ctx, a->fk); fn = (a->fcond & 1 ? gen_helper_fcmp_s_s : gen_helper_fcmp_c_s); flags = get_fcmp_flags(a->fcond >> 1); - fn(var, cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk], tcg_constant_i32(flags)); + fn(var, cpu_env, src1, src2, tcg_constant_i32(flags)); tcg_gen_st8_tl(var, cpu_env, offsetof(CPULoongArchState, cf[a->cd])); - tcg_temp_free(var); return true; } static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a) { - TCGv var; + TCGv var, src1, src2; uint32_t flags; void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32); CHECK_FPE; var = tcg_temp_new(); + src1 = get_fpr(ctx, a->fj); + src2 = get_fpr(ctx, a->fk); fn = (a->fcond & 1 ? gen_helper_fcmp_s_d : gen_helper_fcmp_c_d); flags = get_fcmp_flags(a->fcond >> 1); - fn(var, cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk], tcg_constant_i32(flags)); + fn(var, cpu_env, src1, src2, tcg_constant_i32(flags)); tcg_gen_st8_tl(var, cpu_env, offsetof(CPULoongArchState, cf[a->cd])); - - tcg_temp_free(var); return true; } diff --git a/target/loongarch/insn_trans/trans_fmemory.c.inc b/target/loongarch/insn_trans/trans_fmemory.c.inc index 3025a1d3e9..91c09fb6d9 100644 --- a/target/loongarch/insn_trans/trans_fmemory.c.inc +++ b/target/loongarch/insn_trans/trans_fmemory.c.inc @@ -13,22 +13,19 @@ static void maybe_nanbox_load(TCGv freg, MemOp mop) static bool gen_fload_i(DisasContext *ctx, arg_fr_i *a, MemOp mop) { TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; + TCGv dest = get_fpr(ctx, a->fd); CHECK_FPE; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } - tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - maybe_nanbox_load(cpu_fpr[a->fd], mop); - - if (temp) { - tcg_temp_free(temp); - } + tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); + maybe_nanbox_load(dest, mop); + set_fpr(a->fd, dest); return true; } @@ -36,21 +33,18 @@ static bool gen_fload_i(DisasContext *ctx, arg_fr_i *a, MemOp mop) static bool gen_fstore_i(DisasContext *ctx, arg_fr_i *a, MemOp mop) { TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; + TCGv src = get_fpr(ctx, a->fd); CHECK_FPE; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } - tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); + tcg_gen_qemu_st_tl(src, addr, ctx->mem_idx, mop); - if (temp) { - tcg_temp_free(temp); - } return true; } @@ -58,15 +52,16 @@ static bool gen_floadx(DisasContext *ctx, arg_frr *a, MemOp mop) { TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv dest = get_fpr(ctx, a->fd); TCGv addr; CHECK_FPE; addr = tcg_temp_new(); tcg_gen_add_tl(addr, src1, src2); - tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - maybe_nanbox_load(cpu_fpr[a->fd], mop); - tcg_temp_free(addr); + tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); + maybe_nanbox_load(dest, mop); + set_fpr(a->fd, dest); return true; } @@ -75,14 +70,14 @@ static bool gen_fstorex(DisasContext *ctx, arg_frr *a, MemOp mop) { TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv src3 = get_fpr(ctx, a->fd); TCGv addr; CHECK_FPE; addr = tcg_temp_new(); tcg_gen_add_tl(addr, src1, src2); - tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - tcg_temp_free(addr); + tcg_gen_qemu_st_tl(src3, addr, ctx->mem_idx, mop); return true; } @@ -91,6 +86,7 @@ static bool gen_fload_gt(DisasContext *ctx, arg_frr *a, MemOp mop) { TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv dest = get_fpr(ctx, a->fd); TCGv addr; CHECK_FPE; @@ -98,9 +94,9 @@ static bool gen_fload_gt(DisasContext *ctx, arg_frr *a, MemOp mop) addr = tcg_temp_new(); gen_helper_asrtgt_d(cpu_env, src1, src2); tcg_gen_add_tl(addr, src1, src2); - tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - maybe_nanbox_load(cpu_fpr[a->fd], mop); - tcg_temp_free(addr); + tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); + maybe_nanbox_load(dest, mop); + set_fpr(a->fd, dest); return true; } @@ -109,6 +105,7 @@ static bool gen_fstore_gt(DisasContext *ctx, arg_frr *a, MemOp mop) { TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv src3 = get_fpr(ctx, a->fd); TCGv addr; CHECK_FPE; @@ -116,8 +113,7 @@ static bool gen_fstore_gt(DisasContext *ctx, arg_frr *a, MemOp mop) addr = tcg_temp_new(); gen_helper_asrtgt_d(cpu_env, src1, src2); tcg_gen_add_tl(addr, src1, src2); - tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - tcg_temp_free(addr); + tcg_gen_qemu_st_tl(src3, addr, ctx->mem_idx, mop); return true; } @@ -126,6 +122,7 @@ static bool gen_fload_le(DisasContext *ctx, arg_frr *a, MemOp mop) { TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv dest = get_fpr(ctx, a->fd); TCGv addr; CHECK_FPE; @@ -133,9 +130,9 @@ static bool gen_fload_le(DisasContext *ctx, arg_frr *a, MemOp mop) addr = tcg_temp_new(); gen_helper_asrtle_d(cpu_env, src1, src2); tcg_gen_add_tl(addr, src1, src2); - tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - maybe_nanbox_load(cpu_fpr[a->fd], mop); - tcg_temp_free(addr); + tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); + maybe_nanbox_load(dest, mop); + set_fpr(a->fd, dest); return true; } @@ -144,6 +141,7 @@ static bool gen_fstore_le(DisasContext *ctx, arg_frr *a, MemOp mop) { TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE); TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE); + TCGv src3 = get_fpr(ctx, a->fd); TCGv addr; CHECK_FPE; @@ -151,8 +149,7 @@ static bool gen_fstore_le(DisasContext *ctx, arg_frr *a, MemOp mop) addr = tcg_temp_new(); gen_helper_asrtle_d(cpu_env, src1, src2); tcg_gen_add_tl(addr, src1, src2); - tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - tcg_temp_free(addr); + tcg_gen_qemu_st_tl(src3, addr, ctx->mem_idx, mop); return true; } diff --git a/target/loongarch/insn_trans/trans_fmov.c.inc b/target/loongarch/insn_trans/trans_fmov.c.inc index 8e5106db4e..5af0dd1b66 100644 --- a/target/loongarch/insn_trans/trans_fmov.c.inc +++ b/target/loongarch/insn_trans/trans_fmov.c.inc @@ -10,15 +10,17 @@ static const uint32_t fcsr_mask[4] = { static bool trans_fsel(DisasContext *ctx, arg_fsel *a) { TCGv zero = tcg_constant_tl(0); + TCGv dest = get_fpr(ctx, a->fd); + TCGv src1 = get_fpr(ctx, a->fj); + TCGv src2 = get_fpr(ctx, a->fk); TCGv cond; CHECK_FPE; cond = tcg_temp_new(); tcg_gen_ld8u_tl(cond, cpu_env, offsetof(CPULoongArchState, cf[a->ca])); - tcg_gen_movcond_tl(TCG_COND_EQ, cpu_fpr[a->fd], cond, zero, - cpu_fpr[a->fj], cpu_fpr[a->fk]); - tcg_temp_free(cond); + tcg_gen_movcond_tl(TCG_COND_EQ, dest, cond, zero, src1, src2); + set_fpr(a->fd, dest); return true; } @@ -26,15 +28,16 @@ static bool trans_fsel(DisasContext *ctx, arg_fsel *a) static bool gen_f2f(DisasContext *ctx, arg_ff *a, void (*func)(TCGv, TCGv), bool nanbox) { - TCGv dest = cpu_fpr[a->fd]; - TCGv src = cpu_fpr[a->fj]; + TCGv dest = get_fpr(ctx, a->fd); + TCGv src = get_fpr(ctx, a->fj); CHECK_FPE; func(dest, src); if (nanbox) { - gen_nanbox_s(cpu_fpr[a->fd], cpu_fpr[a->fd]); + gen_nanbox_s(dest, dest); } + set_fpr(a->fd, dest); return true; } @@ -43,10 +46,13 @@ static bool gen_r2f(DisasContext *ctx, arg_fr *a, void (*func)(TCGv, TCGv)) { TCGv src = gpr_src(ctx, a->rj, EXT_NONE); + TCGv dest = get_fpr(ctx, a->fd); CHECK_FPE; - func(cpu_fpr[a->fd], src); + func(dest, src); + set_fpr(a->fd, dest); + return true; } @@ -54,10 +60,11 @@ static bool gen_f2r(DisasContext *ctx, arg_rf *a, void (*func)(TCGv, TCGv)) { TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); + TCGv src = get_fpr(ctx, a->fj); CHECK_FPE; - func(dest, cpu_fpr[a->fj]); + func(dest, src); gen_set_gpr(a->rd, dest, EXT_NONE); return true; @@ -82,9 +89,6 @@ static bool trans_movgr2fcsr(DisasContext *ctx, arg_movgr2fcsr *a) tcg_gen_andi_i32(fcsr0, fcsr0, ~mask); tcg_gen_or_i32(fcsr0, fcsr0, temp); tcg_gen_st_i32(fcsr0, cpu_env, offsetof(CPULoongArchState, fcsr0)); - - tcg_temp_free_i32(temp); - tcg_temp_free_i32(fcsr0); } /* @@ -128,23 +132,27 @@ static void gen_movfrh2gr_s(TCGv dest, TCGv src) static bool trans_movfr2cf(DisasContext *ctx, arg_movfr2cf *a) { TCGv t0; + TCGv src = get_fpr(ctx, a->fj); CHECK_FPE; t0 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_fpr[a->fj], 0x1); + tcg_gen_andi_tl(t0, src, 0x1); tcg_gen_st8_tl(t0, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); - tcg_temp_free(t0); return true; } static bool trans_movcf2fr(DisasContext *ctx, arg_movcf2fr *a) { + TCGv dest = get_fpr(ctx, a->fd); + CHECK_FPE; - tcg_gen_ld8u_tl(cpu_fpr[a->fd], cpu_env, + tcg_gen_ld8u_tl(dest, cpu_env, offsetof(CPULoongArchState, cf[a->cj & 0x7])); + set_fpr(a->fd, dest); + return true; } @@ -157,7 +165,6 @@ static bool trans_movgr2cf(DisasContext *ctx, arg_movgr2cf *a) t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, gpr_src(ctx, a->rj, EXT_NONE), 0x1); tcg_gen_st8_tl(t0, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); - tcg_temp_free(t0); return true; } diff --git a/target/loongarch/insn_trans/trans_lsx.c.inc b/target/loongarch/insn_trans/trans_lsx.c.inc new file mode 100644 index 0000000000..68779daff6 --- /dev/null +++ b/target/loongarch/insn_trans/trans_lsx.c.inc @@ -0,0 +1,4413 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LSX translate functions + * Copyright (c) 2022-2023 Loongson Technology Corporation Limited + */ + +#ifndef CONFIG_USER_ONLY +#define CHECK_SXE do { \ + if ((ctx->base.tb->flags & HW_FLAGS_EUEN_SXE) == 0) { \ + generate_exception(ctx, EXCCODE_SXD); \ + return true; \ + } \ +} while (0) +#else +#define CHECK_SXE +#endif + +static bool gen_vvvv(DisasContext *ctx, arg_vvvv *a, + void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32, + TCGv_i32, TCGv_i32)) +{ + TCGv_i32 vd = tcg_constant_i32(a->vd); + TCGv_i32 vj = tcg_constant_i32(a->vj); + TCGv_i32 vk = tcg_constant_i32(a->vk); + TCGv_i32 va = tcg_constant_i32(a->va); + + CHECK_SXE; + func(cpu_env, vd, vj, vk, va); + return true; +} + +static bool gen_vvv(DisasContext *ctx, arg_vvv *a, + void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 vd = tcg_constant_i32(a->vd); + TCGv_i32 vj = tcg_constant_i32(a->vj); + TCGv_i32 vk = tcg_constant_i32(a->vk); + + CHECK_SXE; + + func(cpu_env, vd, vj, vk); + return true; +} + +static bool gen_vv(DisasContext *ctx, arg_vv *a, + void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 vd = tcg_constant_i32(a->vd); + TCGv_i32 vj = tcg_constant_i32(a->vj); + + CHECK_SXE; + func(cpu_env, vd, vj); + return true; +} + +static bool gen_vv_i(DisasContext *ctx, arg_vv_i *a, + void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 vd = tcg_constant_i32(a->vd); + TCGv_i32 vj = tcg_constant_i32(a->vj); + TCGv_i32 imm = tcg_constant_i32(a->imm); + + CHECK_SXE; + func(cpu_env, vd, vj, imm); + return true; +} + +static bool gen_cv(DisasContext *ctx, arg_cv *a, + void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 vj = tcg_constant_i32(a->vj); + TCGv_i32 cd = tcg_constant_i32(a->cd); + + CHECK_SXE; + func(cpu_env, cd, vj); + return true; +} + +static bool gvec_vvv(DisasContext *ctx, arg_vvv *a, MemOp mop, + void (*func)(unsigned, uint32_t, uint32_t, + uint32_t, uint32_t, uint32_t)) +{ + uint32_t vd_ofs, vj_ofs, vk_ofs; + + CHECK_SXE; + + vd_ofs = vec_full_offset(a->vd); + vj_ofs = vec_full_offset(a->vj); + vk_ofs = vec_full_offset(a->vk); + + func(mop, vd_ofs, vj_ofs, vk_ofs, 16, ctx->vl/8); + return true; +} + +static bool gvec_vv(DisasContext *ctx, arg_vv *a, MemOp mop, + void (*func)(unsigned, uint32_t, uint32_t, + uint32_t, uint32_t)) +{ + uint32_t vd_ofs, vj_ofs; + + CHECK_SXE; + + vd_ofs = vec_full_offset(a->vd); + vj_ofs = vec_full_offset(a->vj); + + func(mop, vd_ofs, vj_ofs, 16, ctx->vl/8); + return true; +} + +static bool gvec_vv_i(DisasContext *ctx, arg_vv_i *a, MemOp mop, + void (*func)(unsigned, uint32_t, uint32_t, + int64_t, uint32_t, uint32_t)) +{ + uint32_t vd_ofs, vj_ofs; + + CHECK_SXE; + + vd_ofs = vec_full_offset(a->vd); + vj_ofs = vec_full_offset(a->vj); + + func(mop, vd_ofs, vj_ofs, a->imm , 16, ctx->vl/8); + return true; +} + +static bool gvec_subi(DisasContext *ctx, arg_vv_i *a, MemOp mop) +{ + uint32_t vd_ofs, vj_ofs; + + CHECK_SXE; + + vd_ofs = vec_full_offset(a->vd); + vj_ofs = vec_full_offset(a->vj); + + tcg_gen_gvec_addi(mop, vd_ofs, vj_ofs, -a->imm, 16, ctx->vl/8); + return true; +} + +TRANS(vadd_b, gvec_vvv, MO_8, tcg_gen_gvec_add) +TRANS(vadd_h, gvec_vvv, MO_16, tcg_gen_gvec_add) +TRANS(vadd_w, gvec_vvv, MO_32, tcg_gen_gvec_add) +TRANS(vadd_d, gvec_vvv, MO_64, tcg_gen_gvec_add) + +#define VADDSUB_Q(NAME) \ +static bool trans_v## NAME ##_q(DisasContext *ctx, arg_vvv *a) \ +{ \ + TCGv_i64 rh, rl, ah, al, bh, bl; \ + \ + CHECK_SXE; \ + \ + rh = tcg_temp_new_i64(); \ + rl = tcg_temp_new_i64(); \ + ah = tcg_temp_new_i64(); \ + al = tcg_temp_new_i64(); \ + bh = tcg_temp_new_i64(); \ + bl = tcg_temp_new_i64(); \ + \ + get_vreg64(ah, a->vj, 1); \ + get_vreg64(al, a->vj, 0); \ + get_vreg64(bh, a->vk, 1); \ + get_vreg64(bl, a->vk, 0); \ + \ + tcg_gen_## NAME ##2_i64(rl, rh, al, ah, bl, bh); \ + \ + set_vreg64(rh, a->vd, 1); \ + set_vreg64(rl, a->vd, 0); \ + \ + return true; \ +} + +VADDSUB_Q(add) +VADDSUB_Q(sub) + +TRANS(vsub_b, gvec_vvv, MO_8, tcg_gen_gvec_sub) +TRANS(vsub_h, gvec_vvv, MO_16, tcg_gen_gvec_sub) +TRANS(vsub_w, gvec_vvv, MO_32, tcg_gen_gvec_sub) +TRANS(vsub_d, gvec_vvv, MO_64, tcg_gen_gvec_sub) + +TRANS(vaddi_bu, gvec_vv_i, MO_8, tcg_gen_gvec_addi) +TRANS(vaddi_hu, gvec_vv_i, MO_16, tcg_gen_gvec_addi) +TRANS(vaddi_wu, gvec_vv_i, MO_32, tcg_gen_gvec_addi) +TRANS(vaddi_du, gvec_vv_i, MO_64, tcg_gen_gvec_addi) +TRANS(vsubi_bu, gvec_subi, MO_8) +TRANS(vsubi_hu, gvec_subi, MO_16) +TRANS(vsubi_wu, gvec_subi, MO_32) +TRANS(vsubi_du, gvec_subi, MO_64) + +TRANS(vneg_b, gvec_vv, MO_8, tcg_gen_gvec_neg) +TRANS(vneg_h, gvec_vv, MO_16, tcg_gen_gvec_neg) +TRANS(vneg_w, gvec_vv, MO_32, tcg_gen_gvec_neg) +TRANS(vneg_d, gvec_vv, MO_64, tcg_gen_gvec_neg) + +TRANS(vsadd_b, gvec_vvv, MO_8, tcg_gen_gvec_ssadd) +TRANS(vsadd_h, gvec_vvv, MO_16, tcg_gen_gvec_ssadd) +TRANS(vsadd_w, gvec_vvv, MO_32, tcg_gen_gvec_ssadd) +TRANS(vsadd_d, gvec_vvv, MO_64, tcg_gen_gvec_ssadd) +TRANS(vsadd_bu, gvec_vvv, MO_8, tcg_gen_gvec_usadd) +TRANS(vsadd_hu, gvec_vvv, MO_16, tcg_gen_gvec_usadd) +TRANS(vsadd_wu, gvec_vvv, MO_32, tcg_gen_gvec_usadd) +TRANS(vsadd_du, gvec_vvv, MO_64, tcg_gen_gvec_usadd) +TRANS(vssub_b, gvec_vvv, MO_8, tcg_gen_gvec_sssub) +TRANS(vssub_h, gvec_vvv, MO_16, tcg_gen_gvec_sssub) +TRANS(vssub_w, gvec_vvv, MO_32, tcg_gen_gvec_sssub) +TRANS(vssub_d, gvec_vvv, MO_64, tcg_gen_gvec_sssub) +TRANS(vssub_bu, gvec_vvv, MO_8, tcg_gen_gvec_ussub) +TRANS(vssub_hu, gvec_vvv, MO_16, tcg_gen_gvec_ussub) +TRANS(vssub_wu, gvec_vvv, MO_32, tcg_gen_gvec_ussub) +TRANS(vssub_du, gvec_vvv, MO_64, tcg_gen_gvec_ussub) + +TRANS(vhaddw_h_b, gen_vvv, gen_helper_vhaddw_h_b) +TRANS(vhaddw_w_h, gen_vvv, gen_helper_vhaddw_w_h) +TRANS(vhaddw_d_w, gen_vvv, gen_helper_vhaddw_d_w) +TRANS(vhaddw_q_d, gen_vvv, gen_helper_vhaddw_q_d) +TRANS(vhaddw_hu_bu, gen_vvv, gen_helper_vhaddw_hu_bu) +TRANS(vhaddw_wu_hu, gen_vvv, gen_helper_vhaddw_wu_hu) +TRANS(vhaddw_du_wu, gen_vvv, gen_helper_vhaddw_du_wu) +TRANS(vhaddw_qu_du, gen_vvv, gen_helper_vhaddw_qu_du) +TRANS(vhsubw_h_b, gen_vvv, gen_helper_vhsubw_h_b) +TRANS(vhsubw_w_h, gen_vvv, gen_helper_vhsubw_w_h) +TRANS(vhsubw_d_w, gen_vvv, gen_helper_vhsubw_d_w) +TRANS(vhsubw_q_d, gen_vvv, gen_helper_vhsubw_q_d) +TRANS(vhsubw_hu_bu, gen_vvv, gen_helper_vhsubw_hu_bu) +TRANS(vhsubw_wu_hu, gen_vvv, gen_helper_vhsubw_wu_hu) +TRANS(vhsubw_du_wu, gen_vvv, gen_helper_vhsubw_du_wu) +TRANS(vhsubw_qu_du, gen_vvv, gen_helper_vhsubw_qu_du) + +static void gen_vaddwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + + /* Sign-extend the even elements from a */ + tcg_gen_shli_vec(vece, t1, a, halfbits); + tcg_gen_sari_vec(vece, t1, t1, halfbits); + + /* Sign-extend the even elements from b */ + tcg_gen_shli_vec(vece, t2, b, halfbits); + tcg_gen_sari_vec(vece, t2, t2, halfbits); + + tcg_gen_add_vec(vece, t, t1, t2); +} + +static void gen_vaddwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_ext16s_i32(t1, a); + tcg_gen_ext16s_i32(t2, b); + tcg_gen_add_i32(t, t1, t2); +} + +static void gen_vaddwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_ext32s_i64(t1, a); + tcg_gen_ext32s_i64(t2, b); + tcg_gen_add_i64(t, t1, t2); +} + +static void do_vaddwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vaddwev_s, + .fno = gen_helper_vaddwev_h_b, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vaddwev_w_h, + .fniv = gen_vaddwev_s, + .fno = gen_helper_vaddwev_w_h, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vaddwev_d_w, + .fniv = gen_vaddwev_s, + .fno = gen_helper_vaddwev_d_w, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_vaddwev_q_d, + .vece = MO_128 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vaddwev_h_b, gvec_vvv, MO_8, do_vaddwev_s) +TRANS(vaddwev_w_h, gvec_vvv, MO_16, do_vaddwev_s) +TRANS(vaddwev_d_w, gvec_vvv, MO_32, do_vaddwev_s) +TRANS(vaddwev_q_d, gvec_vvv, MO_64, do_vaddwev_s) + +static void gen_vaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_sari_i32(t1, a, 16); + tcg_gen_sari_i32(t2, b, 16); + tcg_gen_add_i32(t, t1, t2); +} + +static void gen_vaddwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_sari_i64(t1, a, 32); + tcg_gen_sari_i64(t2, b, 32); + tcg_gen_add_i64(t, t1, t2); +} + +static void gen_vaddwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + + /* Sign-extend the odd elements for vector */ + tcg_gen_sari_vec(vece, t1, a, halfbits); + tcg_gen_sari_vec(vece, t2, b, halfbits); + + tcg_gen_add_vec(vece, t, t1, t2); +} + +static void do_vaddwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vaddwod_s, + .fno = gen_helper_vaddwod_h_b, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vaddwod_w_h, + .fniv = gen_vaddwod_s, + .fno = gen_helper_vaddwod_w_h, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vaddwod_d_w, + .fniv = gen_vaddwod_s, + .fno = gen_helper_vaddwod_d_w, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_vaddwod_q_d, + .vece = MO_128 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vaddwod_h_b, gvec_vvv, MO_8, do_vaddwod_s) +TRANS(vaddwod_w_h, gvec_vvv, MO_16, do_vaddwod_s) +TRANS(vaddwod_d_w, gvec_vvv, MO_32, do_vaddwod_s) +TRANS(vaddwod_q_d, gvec_vvv, MO_64, do_vaddwod_s) + +static void gen_vsubwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + + /* Sign-extend the even elements from a */ + tcg_gen_shli_vec(vece, t1, a, halfbits); + tcg_gen_sari_vec(vece, t1, t1, halfbits); + + /* Sign-extend the even elements from b */ + tcg_gen_shli_vec(vece, t2, b, halfbits); + tcg_gen_sari_vec(vece, t2, t2, halfbits); + + tcg_gen_sub_vec(vece, t, t1, t2); +} + +static void gen_vsubwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_ext16s_i32(t1, a); + tcg_gen_ext16s_i32(t2, b); + tcg_gen_sub_i32(t, t1, t2); +} + +static void gen_vsubwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_ext32s_i64(t1, a); + tcg_gen_ext32s_i64(t2, b); + tcg_gen_sub_i64(t, t1, t2); +} + +static void do_vsubwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_sub_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vsubwev_s, + .fno = gen_helper_vsubwev_h_b, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vsubwev_w_h, + .fniv = gen_vsubwev_s, + .fno = gen_helper_vsubwev_w_h, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vsubwev_d_w, + .fniv = gen_vsubwev_s, + .fno = gen_helper_vsubwev_d_w, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_vsubwev_q_d, + .vece = MO_128 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vsubwev_h_b, gvec_vvv, MO_8, do_vsubwev_s) +TRANS(vsubwev_w_h, gvec_vvv, MO_16, do_vsubwev_s) +TRANS(vsubwev_d_w, gvec_vvv, MO_32, do_vsubwev_s) +TRANS(vsubwev_q_d, gvec_vvv, MO_64, do_vsubwev_s) + +static void gen_vsubwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + + /* Sign-extend the odd elements for vector */ + tcg_gen_sari_vec(vece, t1, a, halfbits); + tcg_gen_sari_vec(vece, t2, b, halfbits); + + tcg_gen_sub_vec(vece, t, t1, t2); +} + +static void gen_vsubwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_sari_i32(t1, a, 16); + tcg_gen_sari_i32(t2, b, 16); + tcg_gen_sub_i32(t, t1, t2); +} + +static void gen_vsubwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_sari_i64(t1, a, 32); + tcg_gen_sari_i64(t2, b, 32); + tcg_gen_sub_i64(t, t1, t2); +} + +static void do_vsubwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, INDEX_op_sub_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vsubwod_s, + .fno = gen_helper_vsubwod_h_b, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vsubwod_w_h, + .fniv = gen_vsubwod_s, + .fno = gen_helper_vsubwod_w_h, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vsubwod_d_w, + .fniv = gen_vsubwod_s, + .fno = gen_helper_vsubwod_d_w, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_vsubwod_q_d, + .vece = MO_128 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vsubwod_h_b, gvec_vvv, MO_8, do_vsubwod_s) +TRANS(vsubwod_w_h, gvec_vvv, MO_16, do_vsubwod_s) +TRANS(vsubwod_d_w, gvec_vvv, MO_32, do_vsubwod_s) +TRANS(vsubwod_q_d, gvec_vvv, MO_64, do_vsubwod_s) + +static void gen_vaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, t3; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece)); + tcg_gen_and_vec(vece, t1, a, t3); + tcg_gen_and_vec(vece, t2, b, t3); + tcg_gen_add_vec(vece, t, t1, t2); +} + +static void gen_vaddwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_ext16u_i32(t1, a); + tcg_gen_ext16u_i32(t2, b); + tcg_gen_add_i32(t, t1, t2); +} + +static void gen_vaddwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_ext32u_i64(t1, a); + tcg_gen_ext32u_i64(t2, b); + tcg_gen_add_i64(t, t1, t2); +} + +static void do_vaddwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vaddwev_u, + .fno = gen_helper_vaddwev_h_bu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vaddwev_w_hu, + .fniv = gen_vaddwev_u, + .fno = gen_helper_vaddwev_w_hu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vaddwev_d_wu, + .fniv = gen_vaddwev_u, + .fno = gen_helper_vaddwev_d_wu, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_vaddwev_q_du, + .vece = MO_128 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vaddwev_h_bu, gvec_vvv, MO_8, do_vaddwev_u) +TRANS(vaddwev_w_hu, gvec_vvv, MO_16, do_vaddwev_u) +TRANS(vaddwev_d_wu, gvec_vvv, MO_32, do_vaddwev_u) +TRANS(vaddwev_q_du, gvec_vvv, MO_64, do_vaddwev_u) + +static void gen_vaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + + /* Zero-extend the odd elements for vector */ + tcg_gen_shri_vec(vece, t1, a, halfbits); + tcg_gen_shri_vec(vece, t2, b, halfbits); + + tcg_gen_add_vec(vece, t, t1, t2); +} + +static void gen_vaddwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_shri_i32(t1, a, 16); + tcg_gen_shri_i32(t2, b, 16); + tcg_gen_add_i32(t, t1, t2); +} + +static void gen_vaddwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_shri_i64(t1, a, 32); + tcg_gen_shri_i64(t2, b, 32); + tcg_gen_add_i64(t, t1, t2); +} + +static void do_vaddwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vaddwod_u, + .fno = gen_helper_vaddwod_h_bu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vaddwod_w_hu, + .fniv = gen_vaddwod_u, + .fno = gen_helper_vaddwod_w_hu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vaddwod_d_wu, + .fniv = gen_vaddwod_u, + .fno = gen_helper_vaddwod_d_wu, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_vaddwod_q_du, + .vece = MO_128 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vaddwod_h_bu, gvec_vvv, MO_8, do_vaddwod_u) +TRANS(vaddwod_w_hu, gvec_vvv, MO_16, do_vaddwod_u) +TRANS(vaddwod_d_wu, gvec_vvv, MO_32, do_vaddwod_u) +TRANS(vaddwod_q_du, gvec_vvv, MO_64, do_vaddwod_u) + +static void gen_vsubwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, t3; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece)); + tcg_gen_and_vec(vece, t1, a, t3); + tcg_gen_and_vec(vece, t2, b, t3); + tcg_gen_sub_vec(vece, t, t1, t2); +} + +static void gen_vsubwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_ext16u_i32(t1, a); + tcg_gen_ext16u_i32(t2, b); + tcg_gen_sub_i32(t, t1, t2); +} + +static void gen_vsubwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_ext32u_i64(t1, a); + tcg_gen_ext32u_i64(t2, b); + tcg_gen_sub_i64(t, t1, t2); +} + +static void do_vsubwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sub_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vsubwev_u, + .fno = gen_helper_vsubwev_h_bu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vsubwev_w_hu, + .fniv = gen_vsubwev_u, + .fno = gen_helper_vsubwev_w_hu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vsubwev_d_wu, + .fniv = gen_vsubwev_u, + .fno = gen_helper_vsubwev_d_wu, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_vsubwev_q_du, + .vece = MO_128 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vsubwev_h_bu, gvec_vvv, MO_8, do_vsubwev_u) +TRANS(vsubwev_w_hu, gvec_vvv, MO_16, do_vsubwev_u) +TRANS(vsubwev_d_wu, gvec_vvv, MO_32, do_vsubwev_u) +TRANS(vsubwev_q_du, gvec_vvv, MO_64, do_vsubwev_u) + +static void gen_vsubwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + + /* Zero-extend the odd elements for vector */ + tcg_gen_shri_vec(vece, t1, a, halfbits); + tcg_gen_shri_vec(vece, t2, b, halfbits); + + tcg_gen_sub_vec(vece, t, t1, t2); +} + +static void gen_vsubwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_shri_i32(t1, a, 16); + tcg_gen_shri_i32(t2, b, 16); + tcg_gen_sub_i32(t, t1, t2); +} + +static void gen_vsubwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_shri_i64(t1, a, 32); + tcg_gen_shri_i64(t2, b, 32); + tcg_gen_sub_i64(t, t1, t2); +} + +static void do_vsubwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_sub_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vsubwod_u, + .fno = gen_helper_vsubwod_h_bu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vsubwod_w_hu, + .fniv = gen_vsubwod_u, + .fno = gen_helper_vsubwod_w_hu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vsubwod_d_wu, + .fniv = gen_vsubwod_u, + .fno = gen_helper_vsubwod_d_wu, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_vsubwod_q_du, + .vece = MO_128 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vsubwod_h_bu, gvec_vvv, MO_8, do_vsubwod_u) +TRANS(vsubwod_w_hu, gvec_vvv, MO_16, do_vsubwod_u) +TRANS(vsubwod_d_wu, gvec_vvv, MO_32, do_vsubwod_u) +TRANS(vsubwod_q_du, gvec_vvv, MO_64, do_vsubwod_u) + +static void gen_vaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, t3; + + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, halfbits)); + + /* Zero-extend the even elements from a */ + tcg_gen_and_vec(vece, t1, a, t3); + + /* Sign-extend the even elements from b */ + tcg_gen_shli_vec(vece, t2, b, halfbits); + tcg_gen_sari_vec(vece, t2, t2, halfbits); + + tcg_gen_add_vec(vece, t, t1, t2); +} + +static void gen_vaddwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_ext16u_i32(t1, a); + tcg_gen_ext16s_i32(t2, b); + tcg_gen_add_i32(t, t1, t2); +} + +static void gen_vaddwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_ext32u_i64(t1, a); + tcg_gen_ext32s_i64(t2, b); + tcg_gen_add_i64(t, t1, t2); +} + +static void do_vaddwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vaddwev_u_s, + .fno = gen_helper_vaddwev_h_bu_b, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vaddwev_w_hu_h, + .fniv = gen_vaddwev_u_s, + .fno = gen_helper_vaddwev_w_hu_h, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vaddwev_d_wu_w, + .fniv = gen_vaddwev_u_s, + .fno = gen_helper_vaddwev_d_wu_w, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_vaddwev_q_du_d, + .vece = MO_128 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vaddwev_h_bu_b, gvec_vvv, MO_8, do_vaddwev_u_s) +TRANS(vaddwev_w_hu_h, gvec_vvv, MO_16, do_vaddwev_u_s) +TRANS(vaddwev_d_wu_w, gvec_vvv, MO_32, do_vaddwev_u_s) +TRANS(vaddwev_q_du_d, gvec_vvv, MO_64, do_vaddwev_u_s) + +static void gen_vaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + + /* Zero-extend the odd elements from a */ + tcg_gen_shri_vec(vece, t1, a, halfbits); + /* Sign-extend the odd elements from b */ + tcg_gen_sari_vec(vece, t2, b, halfbits); + + tcg_gen_add_vec(vece, t, t1, t2); +} + +static void gen_vaddwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_shri_i32(t1, a, 16); + tcg_gen_sari_i32(t2, b, 16); + tcg_gen_add_i32(t, t1, t2); +} + +static void gen_vaddwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_shri_i64(t1, a, 32); + tcg_gen_sari_i64(t2, b, 32); + tcg_gen_add_i64(t, t1, t2); +} + +static void do_vaddwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vaddwod_u_s, + .fno = gen_helper_vaddwod_h_bu_b, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vaddwod_w_hu_h, + .fniv = gen_vaddwod_u_s, + .fno = gen_helper_vaddwod_w_hu_h, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vaddwod_d_wu_w, + .fniv = gen_vaddwod_u_s, + .fno = gen_helper_vaddwod_d_wu_w, + .opt_opc = vecop_list, + .vece = MO_64 + }, + { + .fno = gen_helper_vaddwod_q_du_d, + .vece = MO_128 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vaddwod_h_bu_b, gvec_vvv, MO_8, do_vaddwod_u_s) +TRANS(vaddwod_w_hu_h, gvec_vvv, MO_16, do_vaddwod_u_s) +TRANS(vaddwod_d_wu_w, gvec_vvv, MO_32, do_vaddwod_u_s) +TRANS(vaddwod_q_du_d, gvec_vvv, MO_64, do_vaddwod_u_s) + +static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, + void (*gen_shr_vec)(unsigned, TCGv_vec, + TCGv_vec, int64_t), + void (*gen_round_vec)(unsigned, TCGv_vec, + TCGv_vec, TCGv_vec)) +{ + TCGv_vec tmp = tcg_temp_new_vec_matching(t); + gen_round_vec(vece, tmp, a, b); + tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1)); + gen_shr_vec(vece, a, a, 1); + gen_shr_vec(vece, b, b, 1); + tcg_gen_add_vec(vece, t, a, b); + tcg_gen_add_vec(vece, t, t, tmp); +} + +static void gen_vavg_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + do_vavg(vece, t, a, b, tcg_gen_sari_vec, tcg_gen_and_vec); +} + +static void gen_vavg_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + do_vavg(vece, t, a, b, tcg_gen_shri_vec, tcg_gen_and_vec); +} + +static void gen_vavgr_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + do_vavg(vece, t, a, b, tcg_gen_sari_vec, tcg_gen_or_vec); +} + +static void gen_vavgr_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + do_vavg(vece, t, a, b, tcg_gen_shri_vec, tcg_gen_or_vec); +} + +static void do_vavg_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vavg_s, + .fno = gen_helper_vavg_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vavg_s, + .fno = gen_helper_vavg_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vavg_s, + .fno = gen_helper_vavg_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vavg_s, + .fno = gen_helper_vavg_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +static void do_vavg_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vavg_u, + .fno = gen_helper_vavg_bu, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vavg_u, + .fno = gen_helper_vavg_hu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vavg_u, + .fno = gen_helper_vavg_wu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vavg_u, + .fno = gen_helper_vavg_du, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vavg_b, gvec_vvv, MO_8, do_vavg_s) +TRANS(vavg_h, gvec_vvv, MO_16, do_vavg_s) +TRANS(vavg_w, gvec_vvv, MO_32, do_vavg_s) +TRANS(vavg_d, gvec_vvv, MO_64, do_vavg_s) +TRANS(vavg_bu, gvec_vvv, MO_8, do_vavg_u) +TRANS(vavg_hu, gvec_vvv, MO_16, do_vavg_u) +TRANS(vavg_wu, gvec_vvv, MO_32, do_vavg_u) +TRANS(vavg_du, gvec_vvv, MO_64, do_vavg_u) + +static void do_vavgr_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vavgr_s, + .fno = gen_helper_vavgr_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vavgr_s, + .fno = gen_helper_vavgr_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vavgr_s, + .fno = gen_helper_vavgr_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vavgr_s, + .fno = gen_helper_vavgr_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +static void do_vavgr_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vavgr_u, + .fno = gen_helper_vavgr_bu, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vavgr_u, + .fno = gen_helper_vavgr_hu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vavgr_u, + .fno = gen_helper_vavgr_wu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vavgr_u, + .fno = gen_helper_vavgr_du, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vavgr_b, gvec_vvv, MO_8, do_vavgr_s) +TRANS(vavgr_h, gvec_vvv, MO_16, do_vavgr_s) +TRANS(vavgr_w, gvec_vvv, MO_32, do_vavgr_s) +TRANS(vavgr_d, gvec_vvv, MO_64, do_vavgr_s) +TRANS(vavgr_bu, gvec_vvv, MO_8, do_vavgr_u) +TRANS(vavgr_hu, gvec_vvv, MO_16, do_vavgr_u) +TRANS(vavgr_wu, gvec_vvv, MO_32, do_vavgr_u) +TRANS(vavgr_du, gvec_vvv, MO_64, do_vavgr_u) + +static void gen_vabsd_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_smax_vec(vece, t, a, b); + tcg_gen_smin_vec(vece, a, a, b); + tcg_gen_sub_vec(vece, t, t, a); +} + +static void do_vabsd_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_smax_vec, INDEX_op_smin_vec, INDEX_op_sub_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vabsd_s, + .fno = gen_helper_vabsd_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vabsd_s, + .fno = gen_helper_vabsd_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vabsd_s, + .fno = gen_helper_vabsd_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vabsd_s, + .fno = gen_helper_vabsd_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +static void gen_vabsd_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_umax_vec(vece, t, a, b); + tcg_gen_umin_vec(vece, a, a, b); + tcg_gen_sub_vec(vece, t, t, a); +} + +static void do_vabsd_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_umax_vec, INDEX_op_umin_vec, INDEX_op_sub_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vabsd_u, + .fno = gen_helper_vabsd_bu, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vabsd_u, + .fno = gen_helper_vabsd_hu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vabsd_u, + .fno = gen_helper_vabsd_wu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vabsd_u, + .fno = gen_helper_vabsd_du, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vabsd_b, gvec_vvv, MO_8, do_vabsd_s) +TRANS(vabsd_h, gvec_vvv, MO_16, do_vabsd_s) +TRANS(vabsd_w, gvec_vvv, MO_32, do_vabsd_s) +TRANS(vabsd_d, gvec_vvv, MO_64, do_vabsd_s) +TRANS(vabsd_bu, gvec_vvv, MO_8, do_vabsd_u) +TRANS(vabsd_hu, gvec_vvv, MO_16, do_vabsd_u) +TRANS(vabsd_wu, gvec_vvv, MO_32, do_vabsd_u) +TRANS(vabsd_du, gvec_vvv, MO_64, do_vabsd_u) + +static void gen_vadda(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + + tcg_gen_abs_vec(vece, t1, a); + tcg_gen_abs_vec(vece, t2, b); + tcg_gen_add_vec(vece, t, t1, t2); +} + +static void do_vadda(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_abs_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vadda, + .fno = gen_helper_vadda_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vadda, + .fno = gen_helper_vadda_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vadda, + .fno = gen_helper_vadda_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vadda, + .fno = gen_helper_vadda_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vadda_b, gvec_vvv, MO_8, do_vadda) +TRANS(vadda_h, gvec_vvv, MO_16, do_vadda) +TRANS(vadda_w, gvec_vvv, MO_32, do_vadda) +TRANS(vadda_d, gvec_vvv, MO_64, do_vadda) + +TRANS(vmax_b, gvec_vvv, MO_8, tcg_gen_gvec_smax) +TRANS(vmax_h, gvec_vvv, MO_16, tcg_gen_gvec_smax) +TRANS(vmax_w, gvec_vvv, MO_32, tcg_gen_gvec_smax) +TRANS(vmax_d, gvec_vvv, MO_64, tcg_gen_gvec_smax) +TRANS(vmax_bu, gvec_vvv, MO_8, tcg_gen_gvec_umax) +TRANS(vmax_hu, gvec_vvv, MO_16, tcg_gen_gvec_umax) +TRANS(vmax_wu, gvec_vvv, MO_32, tcg_gen_gvec_umax) +TRANS(vmax_du, gvec_vvv, MO_64, tcg_gen_gvec_umax) + +TRANS(vmin_b, gvec_vvv, MO_8, tcg_gen_gvec_smin) +TRANS(vmin_h, gvec_vvv, MO_16, tcg_gen_gvec_smin) +TRANS(vmin_w, gvec_vvv, MO_32, tcg_gen_gvec_smin) +TRANS(vmin_d, gvec_vvv, MO_64, tcg_gen_gvec_smin) +TRANS(vmin_bu, gvec_vvv, MO_8, tcg_gen_gvec_umin) +TRANS(vmin_hu, gvec_vvv, MO_16, tcg_gen_gvec_umin) +TRANS(vmin_wu, gvec_vvv, MO_32, tcg_gen_gvec_umin) +TRANS(vmin_du, gvec_vvv, MO_64, tcg_gen_gvec_umin) + +static void gen_vmini_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + tcg_gen_smin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm)); +} + +static void gen_vmini_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + tcg_gen_umin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm)); +} + +static void gen_vmaxi_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + tcg_gen_smax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm)); +} + +static void gen_vmaxi_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + tcg_gen_umax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm)); +} + +static void do_vmini_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + int64_t imm, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_smin_vec, 0 + }; + static const GVecGen2i op[4] = { + { + .fniv = gen_vmini_s, + .fnoi = gen_helper_vmini_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vmini_s, + .fnoi = gen_helper_vmini_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vmini_s, + .fnoi = gen_helper_vmini_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vmini_s, + .fnoi = gen_helper_vmini_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]); +} + +static void do_vmini_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + int64_t imm, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_umin_vec, 0 + }; + static const GVecGen2i op[4] = { + { + .fniv = gen_vmini_u, + .fnoi = gen_helper_vmini_bu, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vmini_u, + .fnoi = gen_helper_vmini_hu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vmini_u, + .fnoi = gen_helper_vmini_wu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vmini_u, + .fnoi = gen_helper_vmini_du, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]); +} + +TRANS(vmini_b, gvec_vv_i, MO_8, do_vmini_s) +TRANS(vmini_h, gvec_vv_i, MO_16, do_vmini_s) +TRANS(vmini_w, gvec_vv_i, MO_32, do_vmini_s) +TRANS(vmini_d, gvec_vv_i, MO_64, do_vmini_s) +TRANS(vmini_bu, gvec_vv_i, MO_8, do_vmini_u) +TRANS(vmini_hu, gvec_vv_i, MO_16, do_vmini_u) +TRANS(vmini_wu, gvec_vv_i, MO_32, do_vmini_u) +TRANS(vmini_du, gvec_vv_i, MO_64, do_vmini_u) + +static void do_vmaxi_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + int64_t imm, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_smax_vec, 0 + }; + static const GVecGen2i op[4] = { + { + .fniv = gen_vmaxi_s, + .fnoi = gen_helper_vmaxi_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vmaxi_s, + .fnoi = gen_helper_vmaxi_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vmaxi_s, + .fnoi = gen_helper_vmaxi_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vmaxi_s, + .fnoi = gen_helper_vmaxi_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]); +} + +static void do_vmaxi_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + int64_t imm, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_umax_vec, 0 + }; + static const GVecGen2i op[4] = { + { + .fniv = gen_vmaxi_u, + .fnoi = gen_helper_vmaxi_bu, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vmaxi_u, + .fnoi = gen_helper_vmaxi_hu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vmaxi_u, + .fnoi = gen_helper_vmaxi_wu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vmaxi_u, + .fnoi = gen_helper_vmaxi_du, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]); +} + +TRANS(vmaxi_b, gvec_vv_i, MO_8, do_vmaxi_s) +TRANS(vmaxi_h, gvec_vv_i, MO_16, do_vmaxi_s) +TRANS(vmaxi_w, gvec_vv_i, MO_32, do_vmaxi_s) +TRANS(vmaxi_d, gvec_vv_i, MO_64, do_vmaxi_s) +TRANS(vmaxi_bu, gvec_vv_i, MO_8, do_vmaxi_u) +TRANS(vmaxi_hu, gvec_vv_i, MO_16, do_vmaxi_u) +TRANS(vmaxi_wu, gvec_vv_i, MO_32, do_vmaxi_u) +TRANS(vmaxi_du, gvec_vv_i, MO_64, do_vmaxi_u) + +TRANS(vmul_b, gvec_vvv, MO_8, tcg_gen_gvec_mul) +TRANS(vmul_h, gvec_vvv, MO_16, tcg_gen_gvec_mul) +TRANS(vmul_w, gvec_vvv, MO_32, tcg_gen_gvec_mul) +TRANS(vmul_d, gvec_vvv, MO_64, tcg_gen_gvec_mul) + +static void gen_vmuh_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 discard = tcg_temp_new_i32(); + tcg_gen_muls2_i32(discard, t, a, b); +} + +static void gen_vmuh_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 discard = tcg_temp_new_i64(); + tcg_gen_muls2_i64(discard, t, a, b); +} + +static void do_vmuh_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 op[4] = { + { + .fno = gen_helper_vmuh_b, + .vece = MO_8 + }, + { + .fno = gen_helper_vmuh_h, + .vece = MO_16 + }, + { + .fni4 = gen_vmuh_w, + .fno = gen_helper_vmuh_w, + .vece = MO_32 + }, + { + .fni8 = gen_vmuh_d, + .fno = gen_helper_vmuh_d, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmuh_b, gvec_vvv, MO_8, do_vmuh_s) +TRANS(vmuh_h, gvec_vvv, MO_16, do_vmuh_s) +TRANS(vmuh_w, gvec_vvv, MO_32, do_vmuh_s) +TRANS(vmuh_d, gvec_vvv, MO_64, do_vmuh_s) + +static void gen_vmuh_wu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 discard = tcg_temp_new_i32(); + tcg_gen_mulu2_i32(discard, t, a, b); +} + +static void gen_vmuh_du(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 discard = tcg_temp_new_i64(); + tcg_gen_mulu2_i64(discard, t, a, b); +} + +static void do_vmuh_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 op[4] = { + { + .fno = gen_helper_vmuh_bu, + .vece = MO_8 + }, + { + .fno = gen_helper_vmuh_hu, + .vece = MO_16 + }, + { + .fni4 = gen_vmuh_wu, + .fno = gen_helper_vmuh_wu, + .vece = MO_32 + }, + { + .fni8 = gen_vmuh_du, + .fno = gen_helper_vmuh_du, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmuh_bu, gvec_vvv, MO_8, do_vmuh_u) +TRANS(vmuh_hu, gvec_vvv, MO_16, do_vmuh_u) +TRANS(vmuh_wu, gvec_vvv, MO_32, do_vmuh_u) +TRANS(vmuh_du, gvec_vvv, MO_64, do_vmuh_u) + +static void gen_vmulwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + tcg_gen_shli_vec(vece, t1, a, halfbits); + tcg_gen_sari_vec(vece, t1, t1, halfbits); + tcg_gen_shli_vec(vece, t2, b, halfbits); + tcg_gen_sari_vec(vece, t2, t2, halfbits); + tcg_gen_mul_vec(vece, t, t1, t2); +} + +static void gen_vmulwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_ext16s_i32(t1, a); + tcg_gen_ext16s_i32(t2, b); + tcg_gen_mul_i32(t, t1, t2); +} + +static void gen_vmulwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_ext32s_i64(t1, a); + tcg_gen_ext32s_i64(t2, b); + tcg_gen_mul_i64(t, t1, t2); +} + +static void do_vmulwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmulwev_s, + .fno = gen_helper_vmulwev_h_b, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmulwev_w_h, + .fniv = gen_vmulwev_s, + .fno = gen_helper_vmulwev_w_h, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmulwev_d_w, + .fniv = gen_vmulwev_s, + .fno = gen_helper_vmulwev_d_w, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmulwev_h_b, gvec_vvv, MO_8, do_vmulwev_s) +TRANS(vmulwev_w_h, gvec_vvv, MO_16, do_vmulwev_s) +TRANS(vmulwev_d_w, gvec_vvv, MO_32, do_vmulwev_s) + +static void tcg_gen_mulus2_i64(TCGv_i64 rl, TCGv_i64 rh, + TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_mulsu2_i64(rl, rh, arg2, arg1); +} + +#define VMUL_Q(NAME, FN, idx1, idx2) \ +static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) \ +{ \ + TCGv_i64 rh, rl, arg1, arg2; \ + \ + rh = tcg_temp_new_i64(); \ + rl = tcg_temp_new_i64(); \ + arg1 = tcg_temp_new_i64(); \ + arg2 = tcg_temp_new_i64(); \ + \ + get_vreg64(arg1, a->vj, idx1); \ + get_vreg64(arg2, a->vk, idx2); \ + \ + tcg_gen_## FN ##_i64(rl, rh, arg1, arg2); \ + \ + set_vreg64(rh, a->vd, 1); \ + set_vreg64(rl, a->vd, 0); \ + \ + return true; \ +} + +VMUL_Q(vmulwev_q_d, muls2, 0, 0) +VMUL_Q(vmulwod_q_d, muls2, 1, 1) +VMUL_Q(vmulwev_q_du, mulu2, 0, 0) +VMUL_Q(vmulwod_q_du, mulu2, 1, 1) +VMUL_Q(vmulwev_q_du_d, mulus2, 0, 0) +VMUL_Q(vmulwod_q_du_d, mulus2, 1, 1) + +static void gen_vmulwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + tcg_gen_sari_vec(vece, t1, a, halfbits); + tcg_gen_sari_vec(vece, t2, b, halfbits); + tcg_gen_mul_vec(vece, t, t1, t2); +} + +static void gen_vmulwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_sari_i32(t1, a, 16); + tcg_gen_sari_i32(t2, b, 16); + tcg_gen_mul_i32(t, t1, t2); +} + +static void gen_vmulwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_sari_i64(t1, a, 32); + tcg_gen_sari_i64(t2, b, 32); + tcg_gen_mul_i64(t, t1, t2); +} + +static void do_vmulwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, INDEX_op_mul_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmulwod_s, + .fno = gen_helper_vmulwod_h_b, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmulwod_w_h, + .fniv = gen_vmulwod_s, + .fno = gen_helper_vmulwod_w_h, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmulwod_d_w, + .fniv = gen_vmulwod_s, + .fno = gen_helper_vmulwod_d_w, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmulwod_h_b, gvec_vvv, MO_8, do_vmulwod_s) +TRANS(vmulwod_w_h, gvec_vvv, MO_16, do_vmulwod_s) +TRANS(vmulwod_d_w, gvec_vvv, MO_32, do_vmulwod_s) + +static void gen_vmulwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, mask; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece)); + tcg_gen_and_vec(vece, t1, a, mask); + tcg_gen_and_vec(vece, t2, b, mask); + tcg_gen_mul_vec(vece, t, t1, t2); +} + +static void gen_vmulwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_ext16u_i32(t1, a); + tcg_gen_ext16u_i32(t2, b); + tcg_gen_mul_i32(t, t1, t2); +} + +static void gen_vmulwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_ext32u_i64(t1, a); + tcg_gen_ext32u_i64(t2, b); + tcg_gen_mul_i64(t, t1, t2); +} + +static void do_vmulwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_mul_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmulwev_u, + .fno = gen_helper_vmulwev_h_bu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmulwev_w_hu, + .fniv = gen_vmulwev_u, + .fno = gen_helper_vmulwev_w_hu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmulwev_d_wu, + .fniv = gen_vmulwev_u, + .fno = gen_helper_vmulwev_d_wu, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmulwev_h_bu, gvec_vvv, MO_8, do_vmulwev_u) +TRANS(vmulwev_w_hu, gvec_vvv, MO_16, do_vmulwev_u) +TRANS(vmulwev_d_wu, gvec_vvv, MO_32, do_vmulwev_u) + +static void gen_vmulwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + tcg_gen_shri_vec(vece, t1, a, halfbits); + tcg_gen_shri_vec(vece, t2, b, halfbits); + tcg_gen_mul_vec(vece, t, t1, t2); +} + +static void gen_vmulwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_shri_i32(t1, a, 16); + tcg_gen_shri_i32(t2, b, 16); + tcg_gen_mul_i32(t, t1, t2); +} + +static void gen_vmulwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_shri_i64(t1, a, 32); + tcg_gen_shri_i64(t2, b, 32); + tcg_gen_mul_i64(t, t1, t2); +} + +static void do_vmulwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_mul_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmulwod_u, + .fno = gen_helper_vmulwod_h_bu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmulwod_w_hu, + .fniv = gen_vmulwod_u, + .fno = gen_helper_vmulwod_w_hu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmulwod_d_wu, + .fniv = gen_vmulwod_u, + .fno = gen_helper_vmulwod_d_wu, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmulwod_h_bu, gvec_vvv, MO_8, do_vmulwod_u) +TRANS(vmulwod_w_hu, gvec_vvv, MO_16, do_vmulwod_u) +TRANS(vmulwod_d_wu, gvec_vvv, MO_32, do_vmulwod_u) + +static void gen_vmulwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, mask; + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece)); + tcg_gen_and_vec(vece, t1, a, mask); + tcg_gen_shli_vec(vece, t2, b, halfbits); + tcg_gen_sari_vec(vece, t2, t2, halfbits); + tcg_gen_mul_vec(vece, t, t1, t2); +} + +static void gen_vmulwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_ext16u_i32(t1, a); + tcg_gen_ext16s_i32(t2, b); + tcg_gen_mul_i32(t, t1, t2); +} + +static void gen_vmulwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_ext32u_i64(t1, a); + tcg_gen_ext32s_i64(t2, b); + tcg_gen_mul_i64(t, t1, t2); +} + +static void do_vmulwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmulwev_u_s, + .fno = gen_helper_vmulwev_h_bu_b, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmulwev_w_hu_h, + .fniv = gen_vmulwev_u_s, + .fno = gen_helper_vmulwev_w_hu_h, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmulwev_d_wu_w, + .fniv = gen_vmulwev_u_s, + .fno = gen_helper_vmulwev_d_wu_w, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmulwev_h_bu_b, gvec_vvv, MO_8, do_vmulwev_u_s) +TRANS(vmulwev_w_hu_h, gvec_vvv, MO_16, do_vmulwev_u_s) +TRANS(vmulwev_d_wu_w, gvec_vvv, MO_32, do_vmulwev_u_s) + +static void gen_vmulwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2; + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + tcg_gen_shri_vec(vece, t1, a, halfbits); + tcg_gen_sari_vec(vece, t2, b, halfbits); + tcg_gen_mul_vec(vece, t, t1, t2); +} + +static void gen_vmulwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1, t2; + + t1 = tcg_temp_new_i32(); + t2 = tcg_temp_new_i32(); + tcg_gen_shri_i32(t1, a, 16); + tcg_gen_sari_i32(t2, b, 16); + tcg_gen_mul_i32(t, t1, t2); +} +static void gen_vmulwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1, t2; + + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + tcg_gen_shri_i64(t1, a, 32); + tcg_gen_sari_i64(t2, b, 32); + tcg_gen_mul_i64(t, t1, t2); +} + +static void do_vmulwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmulwod_u_s, + .fno = gen_helper_vmulwod_h_bu_b, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmulwod_w_hu_h, + .fniv = gen_vmulwod_u_s, + .fno = gen_helper_vmulwod_w_hu_h, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmulwod_d_wu_w, + .fniv = gen_vmulwod_u_s, + .fno = gen_helper_vmulwod_d_wu_w, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmulwod_h_bu_b, gvec_vvv, MO_8, do_vmulwod_u_s) +TRANS(vmulwod_w_hu_h, gvec_vvv, MO_16, do_vmulwod_u_s) +TRANS(vmulwod_d_wu_w, gvec_vvv, MO_32, do_vmulwod_u_s) + +static void gen_vmadd(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1; + + t1 = tcg_temp_new_vec_matching(t); + tcg_gen_mul_vec(vece, t1, a, b); + tcg_gen_add_vec(vece, t, t, t1); +} + +static void gen_vmadd_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1; + + t1 = tcg_temp_new_i32(); + tcg_gen_mul_i32(t1, a, b); + tcg_gen_add_i32(t, t, t1); +} + +static void gen_vmadd_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1; + + t1 = tcg_temp_new_i64(); + tcg_gen_mul_i64(t1, a, b); + tcg_gen_add_i64(t, t, t1); +} + +static void do_vmadd(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_mul_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vmadd, + .fno = gen_helper_vmadd_b, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vmadd, + .fno = gen_helper_vmadd_h, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmadd_w, + .fniv = gen_vmadd, + .fno = gen_helper_vmadd_w, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmadd_d, + .fniv = gen_vmadd, + .fno = gen_helper_vmadd_d, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmadd_b, gvec_vvv, MO_8, do_vmadd) +TRANS(vmadd_h, gvec_vvv, MO_16, do_vmadd) +TRANS(vmadd_w, gvec_vvv, MO_32, do_vmadd) +TRANS(vmadd_d, gvec_vvv, MO_64, do_vmadd) + +static void gen_vmsub(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1; + + t1 = tcg_temp_new_vec_matching(t); + tcg_gen_mul_vec(vece, t1, a, b); + tcg_gen_sub_vec(vece, t, t, t1); +} + +static void gen_vmsub_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1; + + t1 = tcg_temp_new_i32(); + tcg_gen_mul_i32(t1, a, b); + tcg_gen_sub_i32(t, t, t1); +} + +static void gen_vmsub_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1; + + t1 = tcg_temp_new_i64(); + tcg_gen_mul_i64(t1, a, b); + tcg_gen_sub_i64(t, t, t1); +} + +static void do_vmsub(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_mul_vec, INDEX_op_sub_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vmsub, + .fno = gen_helper_vmsub_b, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vmsub, + .fno = gen_helper_vmsub_h, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmsub_w, + .fniv = gen_vmsub, + .fno = gen_helper_vmsub_w, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmsub_d, + .fniv = gen_vmsub, + .fno = gen_helper_vmsub_d, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmsub_b, gvec_vvv, MO_8, do_vmsub) +TRANS(vmsub_h, gvec_vvv, MO_16, do_vmsub) +TRANS(vmsub_w, gvec_vvv, MO_32, do_vmsub) +TRANS(vmsub_d, gvec_vvv, MO_64, do_vmsub) + +static void gen_vmaddwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, t3; + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + t3 = tcg_temp_new_vec_matching(t); + tcg_gen_shli_vec(vece, t1, a, halfbits); + tcg_gen_sari_vec(vece, t1, t1, halfbits); + tcg_gen_shli_vec(vece, t2, b, halfbits); + tcg_gen_sari_vec(vece, t2, t2, halfbits); + tcg_gen_mul_vec(vece, t3, t1, t2); + tcg_gen_add_vec(vece, t, t, t3); +} + +static void gen_vmaddwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1; + + t1 = tcg_temp_new_i32(); + gen_vmulwev_w_h(t1, a, b); + tcg_gen_add_i32(t, t, t1); +} + +static void gen_vmaddwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1; + + t1 = tcg_temp_new_i64(); + gen_vmulwev_d_w(t1, a, b); + tcg_gen_add_i64(t, t, t1); +} + +static void do_vmaddwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, + INDEX_op_mul_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmaddwev_s, + .fno = gen_helper_vmaddwev_h_b, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmaddwev_w_h, + .fniv = gen_vmaddwev_s, + .fno = gen_helper_vmaddwev_w_h, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmaddwev_d_w, + .fniv = gen_vmaddwev_s, + .fno = gen_helper_vmaddwev_d_w, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmaddwev_h_b, gvec_vvv, MO_8, do_vmaddwev_s) +TRANS(vmaddwev_w_h, gvec_vvv, MO_16, do_vmaddwev_s) +TRANS(vmaddwev_d_w, gvec_vvv, MO_32, do_vmaddwev_s) + +#define VMADD_Q(NAME, FN, idx1, idx2) \ +static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) \ +{ \ + TCGv_i64 rh, rl, arg1, arg2, th, tl; \ + \ + rh = tcg_temp_new_i64(); \ + rl = tcg_temp_new_i64(); \ + arg1 = tcg_temp_new_i64(); \ + arg2 = tcg_temp_new_i64(); \ + th = tcg_temp_new_i64(); \ + tl = tcg_temp_new_i64(); \ + \ + get_vreg64(arg1, a->vj, idx1); \ + get_vreg64(arg2, a->vk, idx2); \ + get_vreg64(rh, a->vd, 1); \ + get_vreg64(rl, a->vd, 0); \ + \ + tcg_gen_## FN ##_i64(tl, th, arg1, arg2); \ + tcg_gen_add2_i64(rl, rh, rl, rh, tl, th); \ + \ + set_vreg64(rh, a->vd, 1); \ + set_vreg64(rl, a->vd, 0); \ + \ + return true; \ +} + +VMADD_Q(vmaddwev_q_d, muls2, 0, 0) +VMADD_Q(vmaddwod_q_d, muls2, 1, 1) +VMADD_Q(vmaddwev_q_du, mulu2, 0, 0) +VMADD_Q(vmaddwod_q_du, mulu2, 1, 1) +VMADD_Q(vmaddwev_q_du_d, mulus2, 0, 0) +VMADD_Q(vmaddwod_q_du_d, mulus2, 1, 1) + +static void gen_vmaddwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, t3; + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + t3 = tcg_temp_new_vec_matching(t); + tcg_gen_sari_vec(vece, t1, a, halfbits); + tcg_gen_sari_vec(vece, t2, b, halfbits); + tcg_gen_mul_vec(vece, t3, t1, t2); + tcg_gen_add_vec(vece, t, t, t3); +} + +static void gen_vmaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1; + + t1 = tcg_temp_new_i32(); + gen_vmulwod_w_h(t1, a, b); + tcg_gen_add_i32(t, t, t1); +} + +static void gen_vmaddwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1; + + t1 = tcg_temp_new_i64(); + gen_vmulwod_d_w(t1, a, b); + tcg_gen_add_i64(t, t, t1); +} + +static void do_vmaddwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, INDEX_op_mul_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmaddwod_s, + .fno = gen_helper_vmaddwod_h_b, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmaddwod_w_h, + .fniv = gen_vmaddwod_s, + .fno = gen_helper_vmaddwod_w_h, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmaddwod_d_w, + .fniv = gen_vmaddwod_s, + .fno = gen_helper_vmaddwod_d_w, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmaddwod_h_b, gvec_vvv, MO_8, do_vmaddwod_s) +TRANS(vmaddwod_w_h, gvec_vvv, MO_16, do_vmaddwod_s) +TRANS(vmaddwod_d_w, gvec_vvv, MO_32, do_vmaddwod_s) + +static void gen_vmaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, mask; + + t1 = tcg_temp_new_vec_matching(t); + t2 = tcg_temp_new_vec_matching(b); + mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece)); + tcg_gen_and_vec(vece, t1, a, mask); + tcg_gen_and_vec(vece, t2, b, mask); + tcg_gen_mul_vec(vece, t1, t1, t2); + tcg_gen_add_vec(vece, t, t, t1); +} + +static void gen_vmaddwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1; + + t1 = tcg_temp_new_i32(); + gen_vmulwev_w_hu(t1, a, b); + tcg_gen_add_i32(t, t, t1); +} + +static void gen_vmaddwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1; + + t1 = tcg_temp_new_i64(); + gen_vmulwev_d_wu(t1, a, b); + tcg_gen_add_i64(t, t, t1); +} + +static void do_vmaddwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_mul_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmaddwev_u, + .fno = gen_helper_vmaddwev_h_bu, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmaddwev_w_hu, + .fniv = gen_vmaddwev_u, + .fno = gen_helper_vmaddwev_w_hu, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmaddwev_d_wu, + .fniv = gen_vmaddwev_u, + .fno = gen_helper_vmaddwev_d_wu, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmaddwev_h_bu, gvec_vvv, MO_8, do_vmaddwev_u) +TRANS(vmaddwev_w_hu, gvec_vvv, MO_16, do_vmaddwev_u) +TRANS(vmaddwev_d_wu, gvec_vvv, MO_32, do_vmaddwev_u) + +static void gen_vmaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, t3; + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + t3 = tcg_temp_new_vec_matching(t); + tcg_gen_shri_vec(vece, t1, a, halfbits); + tcg_gen_shri_vec(vece, t2, b, halfbits); + tcg_gen_mul_vec(vece, t3, t1, t2); + tcg_gen_add_vec(vece, t, t, t3); +} + +static void gen_vmaddwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1; + + t1 = tcg_temp_new_i32(); + gen_vmulwod_w_hu(t1, a, b); + tcg_gen_add_i32(t, t, t1); +} + +static void gen_vmaddwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1; + + t1 = tcg_temp_new_i64(); + gen_vmulwod_d_wu(t1, a, b); + tcg_gen_add_i64(t, t, t1); +} + +static void do_vmaddwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_mul_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmaddwod_u, + .fno = gen_helper_vmaddwod_h_bu, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmaddwod_w_hu, + .fniv = gen_vmaddwod_u, + .fno = gen_helper_vmaddwod_w_hu, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmaddwod_d_wu, + .fniv = gen_vmaddwod_u, + .fno = gen_helper_vmaddwod_d_wu, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmaddwod_h_bu, gvec_vvv, MO_8, do_vmaddwod_u) +TRANS(vmaddwod_w_hu, gvec_vvv, MO_16, do_vmaddwod_u) +TRANS(vmaddwod_d_wu, gvec_vvv, MO_32, do_vmaddwod_u) + +static void gen_vmaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, mask; + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece)); + tcg_gen_and_vec(vece, t1, a, mask); + tcg_gen_shli_vec(vece, t2, b, halfbits); + tcg_gen_sari_vec(vece, t2, t2, halfbits); + tcg_gen_mul_vec(vece, t1, t1, t2); + tcg_gen_add_vec(vece, t, t, t1); +} + +static void gen_vmaddwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1; + + t1 = tcg_temp_new_i32(); + gen_vmulwev_w_hu_h(t1, a, b); + tcg_gen_add_i32(t, t, t1); +} + +static void gen_vmaddwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1; + + t1 = tcg_temp_new_i64(); + gen_vmulwev_d_wu_w(t1, a, b); + tcg_gen_add_i64(t, t, t1); +} + +static void do_vmaddwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, INDEX_op_sari_vec, + INDEX_op_mul_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmaddwev_u_s, + .fno = gen_helper_vmaddwev_h_bu_b, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmaddwev_w_hu_h, + .fniv = gen_vmaddwev_u_s, + .fno = gen_helper_vmaddwev_w_hu_h, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmaddwev_d_wu_w, + .fniv = gen_vmaddwev_u_s, + .fno = gen_helper_vmaddwev_d_wu_w, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmaddwev_h_bu_b, gvec_vvv, MO_8, do_vmaddwev_u_s) +TRANS(vmaddwev_w_hu_h, gvec_vvv, MO_16, do_vmaddwev_u_s) +TRANS(vmaddwev_d_wu_w, gvec_vvv, MO_32, do_vmaddwev_u_s) + +static void gen_vmaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, t2, t3; + int halfbits = 4 << vece; + + t1 = tcg_temp_new_vec_matching(a); + t2 = tcg_temp_new_vec_matching(b); + t3 = tcg_temp_new_vec_matching(t); + tcg_gen_shri_vec(vece, t1, a, halfbits); + tcg_gen_sari_vec(vece, t2, b, halfbits); + tcg_gen_mul_vec(vece, t3, t1, t2); + tcg_gen_add_vec(vece, t, t, t3); +} + +static void gen_vmaddwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t1; + + t1 = tcg_temp_new_i32(); + gen_vmulwod_w_hu_h(t1, a, b); + tcg_gen_add_i32(t, t, t1); +} + +static void gen_vmaddwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1; + + t1 = tcg_temp_new_i64(); + gen_vmulwod_d_wu_w(t1, a, b); + tcg_gen_add_i64(t, t, t1); +} + +static void do_vmaddwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_sari_vec, + INDEX_op_mul_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen3 op[3] = { + { + .fniv = gen_vmaddwod_u_s, + .fno = gen_helper_vmaddwod_h_bu_b, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fni4 = gen_vmaddwod_w_hu_h, + .fniv = gen_vmaddwod_u_s, + .fno = gen_helper_vmaddwod_w_hu_h, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fni8 = gen_vmaddwod_d_wu_w, + .fniv = gen_vmaddwod_u_s, + .fno = gen_helper_vmaddwod_d_wu_w, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vmaddwod_h_bu_b, gvec_vvv, MO_8, do_vmaddwod_u_s) +TRANS(vmaddwod_w_hu_h, gvec_vvv, MO_16, do_vmaddwod_u_s) +TRANS(vmaddwod_d_wu_w, gvec_vvv, MO_32, do_vmaddwod_u_s) + +TRANS(vdiv_b, gen_vvv, gen_helper_vdiv_b) +TRANS(vdiv_h, gen_vvv, gen_helper_vdiv_h) +TRANS(vdiv_w, gen_vvv, gen_helper_vdiv_w) +TRANS(vdiv_d, gen_vvv, gen_helper_vdiv_d) +TRANS(vdiv_bu, gen_vvv, gen_helper_vdiv_bu) +TRANS(vdiv_hu, gen_vvv, gen_helper_vdiv_hu) +TRANS(vdiv_wu, gen_vvv, gen_helper_vdiv_wu) +TRANS(vdiv_du, gen_vvv, gen_helper_vdiv_du) +TRANS(vmod_b, gen_vvv, gen_helper_vmod_b) +TRANS(vmod_h, gen_vvv, gen_helper_vmod_h) +TRANS(vmod_w, gen_vvv, gen_helper_vmod_w) +TRANS(vmod_d, gen_vvv, gen_helper_vmod_d) +TRANS(vmod_bu, gen_vvv, gen_helper_vmod_bu) +TRANS(vmod_hu, gen_vvv, gen_helper_vmod_hu) +TRANS(vmod_wu, gen_vvv, gen_helper_vmod_wu) +TRANS(vmod_du, gen_vvv, gen_helper_vmod_du) + +static void gen_vsat_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec max) +{ + TCGv_vec min; + + min = tcg_temp_new_vec_matching(t); + tcg_gen_not_vec(vece, min, max); + tcg_gen_smax_vec(vece, t, a, min); + tcg_gen_smin_vec(vece, t, t, max); +} + +static void do_vsat_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + int64_t imm, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_smax_vec, INDEX_op_smin_vec, 0 + }; + static const GVecGen2s op[4] = { + { + .fniv = gen_vsat_s, + .fno = gen_helper_vsat_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vsat_s, + .fno = gen_helper_vsat_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vsat_s, + .fno = gen_helper_vsat_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vsat_s, + .fno = gen_helper_vsat_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_2s(vd_ofs, vj_ofs, oprsz, maxsz, + tcg_constant_i64((1ll<< imm) -1), &op[vece]); +} + +TRANS(vsat_b, gvec_vv_i, MO_8, do_vsat_s) +TRANS(vsat_h, gvec_vv_i, MO_16, do_vsat_s) +TRANS(vsat_w, gvec_vv_i, MO_32, do_vsat_s) +TRANS(vsat_d, gvec_vv_i, MO_64, do_vsat_s) + +static void gen_vsat_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec max) +{ + tcg_gen_umin_vec(vece, t, a, max); +} + +static void do_vsat_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + int64_t imm, uint32_t oprsz, uint32_t maxsz) +{ + uint64_t max; + static const TCGOpcode vecop_list[] = { + INDEX_op_umin_vec, 0 + }; + static const GVecGen2s op[4] = { + { + .fniv = gen_vsat_u, + .fno = gen_helper_vsat_bu, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vsat_u, + .fno = gen_helper_vsat_hu, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vsat_u, + .fno = gen_helper_vsat_wu, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vsat_u, + .fno = gen_helper_vsat_du, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + max = (imm == 0x3f) ? UINT64_MAX : (1ull << (imm + 1)) - 1; + tcg_gen_gvec_2s(vd_ofs, vj_ofs, oprsz, maxsz, + tcg_constant_i64(max), &op[vece]); +} + +TRANS(vsat_bu, gvec_vv_i, MO_8, do_vsat_u) +TRANS(vsat_hu, gvec_vv_i, MO_16, do_vsat_u) +TRANS(vsat_wu, gvec_vv_i, MO_32, do_vsat_u) +TRANS(vsat_du, gvec_vv_i, MO_64, do_vsat_u) + +TRANS(vexth_h_b, gen_vv, gen_helper_vexth_h_b) +TRANS(vexth_w_h, gen_vv, gen_helper_vexth_w_h) +TRANS(vexth_d_w, gen_vv, gen_helper_vexth_d_w) +TRANS(vexth_q_d, gen_vv, gen_helper_vexth_q_d) +TRANS(vexth_hu_bu, gen_vv, gen_helper_vexth_hu_bu) +TRANS(vexth_wu_hu, gen_vv, gen_helper_vexth_wu_hu) +TRANS(vexth_du_wu, gen_vv, gen_helper_vexth_du_wu) +TRANS(vexth_qu_du, gen_vv, gen_helper_vexth_qu_du) + +static void gen_vsigncov(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t1, zero; + + t1 = tcg_temp_new_vec_matching(t); + zero = tcg_constant_vec_matching(t, vece, 0); + + tcg_gen_neg_vec(vece, t1, b); + tcg_gen_cmpsel_vec(TCG_COND_LT, vece, t, a, zero, t1, b); + tcg_gen_cmpsel_vec(TCG_COND_EQ, vece, t, a, zero, zero, t); +} + +static void do_vsigncov(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_neg_vec, INDEX_op_cmpsel_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vsigncov, + .fno = gen_helper_vsigncov_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vsigncov, + .fno = gen_helper_vsigncov_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vsigncov, + .fno = gen_helper_vsigncov_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vsigncov, + .fno = gen_helper_vsigncov_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vsigncov_b, gvec_vvv, MO_8, do_vsigncov) +TRANS(vsigncov_h, gvec_vvv, MO_16, do_vsigncov) +TRANS(vsigncov_w, gvec_vvv, MO_32, do_vsigncov) +TRANS(vsigncov_d, gvec_vvv, MO_64, do_vsigncov) + +TRANS(vmskltz_b, gen_vv, gen_helper_vmskltz_b) +TRANS(vmskltz_h, gen_vv, gen_helper_vmskltz_h) +TRANS(vmskltz_w, gen_vv, gen_helper_vmskltz_w) +TRANS(vmskltz_d, gen_vv, gen_helper_vmskltz_d) +TRANS(vmskgez_b, gen_vv, gen_helper_vmskgez_b) +TRANS(vmsknz_b, gen_vv, gen_helper_vmsknz_b) + +#define EXPAND_BYTE(bit) ((uint64_t)(bit ? 0xff : 0)) + +static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm) +{ + int mode; + uint64_t data, t; + + /* + * imm bit [11:8] is mode, mode value is 0-12. + * other values are invalid. + */ + mode = (imm >> 8) & 0xf; + t = imm & 0xff; + switch (mode) { + case 0: + /* data: {2{24'0, imm[7:0]}} */ + data = (t << 32) | t ; + break; + case 1: + /* data: {2{16'0, imm[7:0], 8'0}} */ + data = (t << 24) | (t << 8); + break; + case 2: + /* data: {2{8'0, imm[7:0], 16'0}} */ + data = (t << 48) | (t << 16); + break; + case 3: + /* data: {2{imm[7:0], 24'0}} */ + data = (t << 56) | (t << 24); + break; + case 4: + /* data: {4{8'0, imm[7:0]}} */ + data = (t << 48) | (t << 32) | (t << 16) | t; + break; + case 5: + /* data: {4{imm[7:0], 8'0}} */ + data = (t << 56) |(t << 40) | (t << 24) | (t << 8); + break; + case 6: + /* data: {2{16'0, imm[7:0], 8'1}} */ + data = (t << 40) | ((uint64_t)0xff << 32) | (t << 8) | 0xff; + break; + case 7: + /* data: {2{8'0, imm[7:0], 16'1}} */ + data = (t << 48) | ((uint64_t)0xffff << 32) | (t << 16) | 0xffff; + break; + case 8: + /* data: {8{imm[7:0]}} */ + data =(t << 56) | (t << 48) | (t << 40) | (t << 32) | + (t << 24) | (t << 16) | (t << 8) | t; + break; + case 9: + /* data: {{8{imm[7]}, ..., 8{imm[0]}}} */ + { + uint64_t b0,b1,b2,b3,b4,b5,b6,b7; + b0 = t& 0x1; + b1 = (t & 0x2) >> 1; + b2 = (t & 0x4) >> 2; + b3 = (t & 0x8) >> 3; + b4 = (t & 0x10) >> 4; + b5 = (t & 0x20) >> 5; + b6 = (t & 0x40) >> 6; + b7 = (t & 0x80) >> 7; + data = (EXPAND_BYTE(b7) << 56) | + (EXPAND_BYTE(b6) << 48) | + (EXPAND_BYTE(b5) << 40) | + (EXPAND_BYTE(b4) << 32) | + (EXPAND_BYTE(b3) << 24) | + (EXPAND_BYTE(b2) << 16) | + (EXPAND_BYTE(b1) << 8) | + EXPAND_BYTE(b0); + } + break; + case 10: + /* data: {2{imm[7], ~imm[6], {5{imm[6]}}, imm[5:0], 19'0}} */ + { + uint64_t b6, b7; + uint64_t t0, t1; + b6 = (imm & 0x40) >> 6; + b7 = (imm & 0x80) >> 7; + t0 = (imm & 0x3f); + t1 = (b7 << 6) | ((1-b6) << 5) | (uint64_t)(b6 ? 0x1f : 0); + data = (t1 << 57) | (t0 << 51) | (t1 << 25) | (t0 << 19); + } + break; + case 11: + /* data: {32'0, imm[7], ~{imm[6]}, 5{imm[6]}, imm[5:0], 19'0} */ + { + uint64_t b6,b7; + uint64_t t0, t1; + b6 = (imm & 0x40) >> 6; + b7 = (imm & 0x80) >> 7; + t0 = (imm & 0x3f); + t1 = (b7 << 6) | ((1-b6) << 5) | (b6 ? 0x1f : 0); + data = (t1 << 25) | (t0 << 19); + } + break; + case 12: + /* data: {imm[7], ~imm[6], 8{imm[6]}, imm[5:0], 48'0} */ + { + uint64_t b6,b7; + uint64_t t0, t1; + b6 = (imm & 0x40) >> 6; + b7 = (imm & 0x80) >> 7; + t0 = (imm & 0x3f); + t1 = (b7 << 9) | ((1-b6) << 8) | (b6 ? 0xff : 0); + data = (t1 << 54) | (t0 << 48); + } + break; + default: + generate_exception(ctx, EXCCODE_INE); + g_assert_not_reached(); + } + return data; +} + +static bool trans_vldi(DisasContext *ctx, arg_vldi *a) +{ + int sel, vece; + uint64_t value; + CHECK_SXE; + + sel = (a->imm >> 12) & 0x1; + + if (sel) { + value = vldi_get_value(ctx, a->imm); + vece = MO_64; + } else { + value = ((int32_t)(a->imm << 22)) >> 22; + vece = (a->imm >> 10) & 0x3; + } + + tcg_gen_gvec_dup_i64(vece, vec_full_offset(a->vd), 16, ctx->vl/8, + tcg_constant_i64(value)); + return true; +} + +TRANS(vand_v, gvec_vvv, MO_64, tcg_gen_gvec_and) +TRANS(vor_v, gvec_vvv, MO_64, tcg_gen_gvec_or) +TRANS(vxor_v, gvec_vvv, MO_64, tcg_gen_gvec_xor) +TRANS(vnor_v, gvec_vvv, MO_64, tcg_gen_gvec_nor) + +static bool trans_vandn_v(DisasContext *ctx, arg_vvv *a) +{ + uint32_t vd_ofs, vj_ofs, vk_ofs; + + CHECK_SXE; + + vd_ofs = vec_full_offset(a->vd); + vj_ofs = vec_full_offset(a->vj); + vk_ofs = vec_full_offset(a->vk); + + tcg_gen_gvec_andc(MO_64, vd_ofs, vk_ofs, vj_ofs, 16, ctx->vl/8); + return true; +} +TRANS(vorn_v, gvec_vvv, MO_64, tcg_gen_gvec_orc) +TRANS(vandi_b, gvec_vv_i, MO_8, tcg_gen_gvec_andi) +TRANS(vori_b, gvec_vv_i, MO_8, tcg_gen_gvec_ori) +TRANS(vxori_b, gvec_vv_i, MO_8, tcg_gen_gvec_xori) + +static void gen_vnori(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + TCGv_vec t1; + + t1 = tcg_constant_vec_matching(t, vece, imm); + tcg_gen_nor_vec(vece, t, a, t1); +} + +static void gen_vnori_b(TCGv_i64 t, TCGv_i64 a, int64_t imm) +{ + tcg_gen_movi_i64(t, dup_const(MO_8, imm)); + tcg_gen_nor_i64(t, a, t); +} + +static void do_vnori_b(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + int64_t imm, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_nor_vec, 0 + }; + static const GVecGen2i op = { + .fni8 = gen_vnori_b, + .fniv = gen_vnori, + .fnoi = gen_helper_vnori_b, + .opt_opc = vecop_list, + .vece = MO_8 + }; + + tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op); +} + +TRANS(vnori_b, gvec_vv_i, MO_8, do_vnori_b) + +TRANS(vsll_b, gvec_vvv, MO_8, tcg_gen_gvec_shlv) +TRANS(vsll_h, gvec_vvv, MO_16, tcg_gen_gvec_shlv) +TRANS(vsll_w, gvec_vvv, MO_32, tcg_gen_gvec_shlv) +TRANS(vsll_d, gvec_vvv, MO_64, tcg_gen_gvec_shlv) +TRANS(vslli_b, gvec_vv_i, MO_8, tcg_gen_gvec_shli) +TRANS(vslli_h, gvec_vv_i, MO_16, tcg_gen_gvec_shli) +TRANS(vslli_w, gvec_vv_i, MO_32, tcg_gen_gvec_shli) +TRANS(vslli_d, gvec_vv_i, MO_64, tcg_gen_gvec_shli) + +TRANS(vsrl_b, gvec_vvv, MO_8, tcg_gen_gvec_shrv) +TRANS(vsrl_h, gvec_vvv, MO_16, tcg_gen_gvec_shrv) +TRANS(vsrl_w, gvec_vvv, MO_32, tcg_gen_gvec_shrv) +TRANS(vsrl_d, gvec_vvv, MO_64, tcg_gen_gvec_shrv) +TRANS(vsrli_b, gvec_vv_i, MO_8, tcg_gen_gvec_shri) +TRANS(vsrli_h, gvec_vv_i, MO_16, tcg_gen_gvec_shri) +TRANS(vsrli_w, gvec_vv_i, MO_32, tcg_gen_gvec_shri) +TRANS(vsrli_d, gvec_vv_i, MO_64, tcg_gen_gvec_shri) + +TRANS(vsra_b, gvec_vvv, MO_8, tcg_gen_gvec_sarv) +TRANS(vsra_h, gvec_vvv, MO_16, tcg_gen_gvec_sarv) +TRANS(vsra_w, gvec_vvv, MO_32, tcg_gen_gvec_sarv) +TRANS(vsra_d, gvec_vvv, MO_64, tcg_gen_gvec_sarv) +TRANS(vsrai_b, gvec_vv_i, MO_8, tcg_gen_gvec_sari) +TRANS(vsrai_h, gvec_vv_i, MO_16, tcg_gen_gvec_sari) +TRANS(vsrai_w, gvec_vv_i, MO_32, tcg_gen_gvec_sari) +TRANS(vsrai_d, gvec_vv_i, MO_64, tcg_gen_gvec_sari) + +TRANS(vrotr_b, gvec_vvv, MO_8, tcg_gen_gvec_rotrv) +TRANS(vrotr_h, gvec_vvv, MO_16, tcg_gen_gvec_rotrv) +TRANS(vrotr_w, gvec_vvv, MO_32, tcg_gen_gvec_rotrv) +TRANS(vrotr_d, gvec_vvv, MO_64, tcg_gen_gvec_rotrv) +TRANS(vrotri_b, gvec_vv_i, MO_8, tcg_gen_gvec_rotri) +TRANS(vrotri_h, gvec_vv_i, MO_16, tcg_gen_gvec_rotri) +TRANS(vrotri_w, gvec_vv_i, MO_32, tcg_gen_gvec_rotri) +TRANS(vrotri_d, gvec_vv_i, MO_64, tcg_gen_gvec_rotri) + +TRANS(vsllwil_h_b, gen_vv_i, gen_helper_vsllwil_h_b) +TRANS(vsllwil_w_h, gen_vv_i, gen_helper_vsllwil_w_h) +TRANS(vsllwil_d_w, gen_vv_i, gen_helper_vsllwil_d_w) +TRANS(vextl_q_d, gen_vv, gen_helper_vextl_q_d) +TRANS(vsllwil_hu_bu, gen_vv_i, gen_helper_vsllwil_hu_bu) +TRANS(vsllwil_wu_hu, gen_vv_i, gen_helper_vsllwil_wu_hu) +TRANS(vsllwil_du_wu, gen_vv_i, gen_helper_vsllwil_du_wu) +TRANS(vextl_qu_du, gen_vv, gen_helper_vextl_qu_du) + +TRANS(vsrlr_b, gen_vvv, gen_helper_vsrlr_b) +TRANS(vsrlr_h, gen_vvv, gen_helper_vsrlr_h) +TRANS(vsrlr_w, gen_vvv, gen_helper_vsrlr_w) +TRANS(vsrlr_d, gen_vvv, gen_helper_vsrlr_d) +TRANS(vsrlri_b, gen_vv_i, gen_helper_vsrlri_b) +TRANS(vsrlri_h, gen_vv_i, gen_helper_vsrlri_h) +TRANS(vsrlri_w, gen_vv_i, gen_helper_vsrlri_w) +TRANS(vsrlri_d, gen_vv_i, gen_helper_vsrlri_d) + +TRANS(vsrar_b, gen_vvv, gen_helper_vsrar_b) +TRANS(vsrar_h, gen_vvv, gen_helper_vsrar_h) +TRANS(vsrar_w, gen_vvv, gen_helper_vsrar_w) +TRANS(vsrar_d, gen_vvv, gen_helper_vsrar_d) +TRANS(vsrari_b, gen_vv_i, gen_helper_vsrari_b) +TRANS(vsrari_h, gen_vv_i, gen_helper_vsrari_h) +TRANS(vsrari_w, gen_vv_i, gen_helper_vsrari_w) +TRANS(vsrari_d, gen_vv_i, gen_helper_vsrari_d) + +TRANS(vsrln_b_h, gen_vvv, gen_helper_vsrln_b_h) +TRANS(vsrln_h_w, gen_vvv, gen_helper_vsrln_h_w) +TRANS(vsrln_w_d, gen_vvv, gen_helper_vsrln_w_d) +TRANS(vsran_b_h, gen_vvv, gen_helper_vsran_b_h) +TRANS(vsran_h_w, gen_vvv, gen_helper_vsran_h_w) +TRANS(vsran_w_d, gen_vvv, gen_helper_vsran_w_d) + +TRANS(vsrlni_b_h, gen_vv_i, gen_helper_vsrlni_b_h) +TRANS(vsrlni_h_w, gen_vv_i, gen_helper_vsrlni_h_w) +TRANS(vsrlni_w_d, gen_vv_i, gen_helper_vsrlni_w_d) +TRANS(vsrlni_d_q, gen_vv_i, gen_helper_vsrlni_d_q) +TRANS(vsrani_b_h, gen_vv_i, gen_helper_vsrani_b_h) +TRANS(vsrani_h_w, gen_vv_i, gen_helper_vsrani_h_w) +TRANS(vsrani_w_d, gen_vv_i, gen_helper_vsrani_w_d) +TRANS(vsrani_d_q, gen_vv_i, gen_helper_vsrani_d_q) + +TRANS(vsrlrn_b_h, gen_vvv, gen_helper_vsrlrn_b_h) +TRANS(vsrlrn_h_w, gen_vvv, gen_helper_vsrlrn_h_w) +TRANS(vsrlrn_w_d, gen_vvv, gen_helper_vsrlrn_w_d) +TRANS(vsrarn_b_h, gen_vvv, gen_helper_vsrarn_b_h) +TRANS(vsrarn_h_w, gen_vvv, gen_helper_vsrarn_h_w) +TRANS(vsrarn_w_d, gen_vvv, gen_helper_vsrarn_w_d) + +TRANS(vsrlrni_b_h, gen_vv_i, gen_helper_vsrlrni_b_h) +TRANS(vsrlrni_h_w, gen_vv_i, gen_helper_vsrlrni_h_w) +TRANS(vsrlrni_w_d, gen_vv_i, gen_helper_vsrlrni_w_d) +TRANS(vsrlrni_d_q, gen_vv_i, gen_helper_vsrlrni_d_q) +TRANS(vsrarni_b_h, gen_vv_i, gen_helper_vsrarni_b_h) +TRANS(vsrarni_h_w, gen_vv_i, gen_helper_vsrarni_h_w) +TRANS(vsrarni_w_d, gen_vv_i, gen_helper_vsrarni_w_d) +TRANS(vsrarni_d_q, gen_vv_i, gen_helper_vsrarni_d_q) + +TRANS(vssrln_b_h, gen_vvv, gen_helper_vssrln_b_h) +TRANS(vssrln_h_w, gen_vvv, gen_helper_vssrln_h_w) +TRANS(vssrln_w_d, gen_vvv, gen_helper_vssrln_w_d) +TRANS(vssran_b_h, gen_vvv, gen_helper_vssran_b_h) +TRANS(vssran_h_w, gen_vvv, gen_helper_vssran_h_w) +TRANS(vssran_w_d, gen_vvv, gen_helper_vssran_w_d) +TRANS(vssrln_bu_h, gen_vvv, gen_helper_vssrln_bu_h) +TRANS(vssrln_hu_w, gen_vvv, gen_helper_vssrln_hu_w) +TRANS(vssrln_wu_d, gen_vvv, gen_helper_vssrln_wu_d) +TRANS(vssran_bu_h, gen_vvv, gen_helper_vssran_bu_h) +TRANS(vssran_hu_w, gen_vvv, gen_helper_vssran_hu_w) +TRANS(vssran_wu_d, gen_vvv, gen_helper_vssran_wu_d) + +TRANS(vssrlni_b_h, gen_vv_i, gen_helper_vssrlni_b_h) +TRANS(vssrlni_h_w, gen_vv_i, gen_helper_vssrlni_h_w) +TRANS(vssrlni_w_d, gen_vv_i, gen_helper_vssrlni_w_d) +TRANS(vssrlni_d_q, gen_vv_i, gen_helper_vssrlni_d_q) +TRANS(vssrani_b_h, gen_vv_i, gen_helper_vssrani_b_h) +TRANS(vssrani_h_w, gen_vv_i, gen_helper_vssrani_h_w) +TRANS(vssrani_w_d, gen_vv_i, gen_helper_vssrani_w_d) +TRANS(vssrani_d_q, gen_vv_i, gen_helper_vssrani_d_q) +TRANS(vssrlni_bu_h, gen_vv_i, gen_helper_vssrlni_bu_h) +TRANS(vssrlni_hu_w, gen_vv_i, gen_helper_vssrlni_hu_w) +TRANS(vssrlni_wu_d, gen_vv_i, gen_helper_vssrlni_wu_d) +TRANS(vssrlni_du_q, gen_vv_i, gen_helper_vssrlni_du_q) +TRANS(vssrani_bu_h, gen_vv_i, gen_helper_vssrani_bu_h) +TRANS(vssrani_hu_w, gen_vv_i, gen_helper_vssrani_hu_w) +TRANS(vssrani_wu_d, gen_vv_i, gen_helper_vssrani_wu_d) +TRANS(vssrani_du_q, gen_vv_i, gen_helper_vssrani_du_q) + +TRANS(vssrlrn_b_h, gen_vvv, gen_helper_vssrlrn_b_h) +TRANS(vssrlrn_h_w, gen_vvv, gen_helper_vssrlrn_h_w) +TRANS(vssrlrn_w_d, gen_vvv, gen_helper_vssrlrn_w_d) +TRANS(vssrarn_b_h, gen_vvv, gen_helper_vssrarn_b_h) +TRANS(vssrarn_h_w, gen_vvv, gen_helper_vssrarn_h_w) +TRANS(vssrarn_w_d, gen_vvv, gen_helper_vssrarn_w_d) +TRANS(vssrlrn_bu_h, gen_vvv, gen_helper_vssrlrn_bu_h) +TRANS(vssrlrn_hu_w, gen_vvv, gen_helper_vssrlrn_hu_w) +TRANS(vssrlrn_wu_d, gen_vvv, gen_helper_vssrlrn_wu_d) +TRANS(vssrarn_bu_h, gen_vvv, gen_helper_vssrarn_bu_h) +TRANS(vssrarn_hu_w, gen_vvv, gen_helper_vssrarn_hu_w) +TRANS(vssrarn_wu_d, gen_vvv, gen_helper_vssrarn_wu_d) + +TRANS(vssrlrni_b_h, gen_vv_i, gen_helper_vssrlrni_b_h) +TRANS(vssrlrni_h_w, gen_vv_i, gen_helper_vssrlrni_h_w) +TRANS(vssrlrni_w_d, gen_vv_i, gen_helper_vssrlrni_w_d) +TRANS(vssrlrni_d_q, gen_vv_i, gen_helper_vssrlrni_d_q) +TRANS(vssrarni_b_h, gen_vv_i, gen_helper_vssrarni_b_h) +TRANS(vssrarni_h_w, gen_vv_i, gen_helper_vssrarni_h_w) +TRANS(vssrarni_w_d, gen_vv_i, gen_helper_vssrarni_w_d) +TRANS(vssrarni_d_q, gen_vv_i, gen_helper_vssrarni_d_q) +TRANS(vssrlrni_bu_h, gen_vv_i, gen_helper_vssrlrni_bu_h) +TRANS(vssrlrni_hu_w, gen_vv_i, gen_helper_vssrlrni_hu_w) +TRANS(vssrlrni_wu_d, gen_vv_i, gen_helper_vssrlrni_wu_d) +TRANS(vssrlrni_du_q, gen_vv_i, gen_helper_vssrlrni_du_q) +TRANS(vssrarni_bu_h, gen_vv_i, gen_helper_vssrarni_bu_h) +TRANS(vssrarni_hu_w, gen_vv_i, gen_helper_vssrarni_hu_w) +TRANS(vssrarni_wu_d, gen_vv_i, gen_helper_vssrarni_wu_d) +TRANS(vssrarni_du_q, gen_vv_i, gen_helper_vssrarni_du_q) + +TRANS(vclo_b, gen_vv, gen_helper_vclo_b) +TRANS(vclo_h, gen_vv, gen_helper_vclo_h) +TRANS(vclo_w, gen_vv, gen_helper_vclo_w) +TRANS(vclo_d, gen_vv, gen_helper_vclo_d) +TRANS(vclz_b, gen_vv, gen_helper_vclz_b) +TRANS(vclz_h, gen_vv, gen_helper_vclz_h) +TRANS(vclz_w, gen_vv, gen_helper_vclz_w) +TRANS(vclz_d, gen_vv, gen_helper_vclz_d) + +TRANS(vpcnt_b, gen_vv, gen_helper_vpcnt_b) +TRANS(vpcnt_h, gen_vv, gen_helper_vpcnt_h) +TRANS(vpcnt_w, gen_vv, gen_helper_vpcnt_w) +TRANS(vpcnt_d, gen_vv, gen_helper_vpcnt_d) + +static void do_vbit(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, + void (*func)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec)) +{ + TCGv_vec mask, lsh, t1, one; + + lsh = tcg_temp_new_vec_matching(t); + t1 = tcg_temp_new_vec_matching(t); + mask = tcg_constant_vec_matching(t, vece, (8 << vece) - 1); + one = tcg_constant_vec_matching(t, vece, 1); + + tcg_gen_and_vec(vece, lsh, b, mask); + tcg_gen_shlv_vec(vece, t1, one, lsh); + func(vece, t, a, t1); +} + +static void gen_vbitclr(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + do_vbit(vece, t, a, b, tcg_gen_andc_vec); +} + +static void gen_vbitset(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + do_vbit(vece, t, a, b, tcg_gen_or_vec); +} + +static void gen_vbitrev(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) +{ + do_vbit(vece, t, a, b, tcg_gen_xor_vec); +} + +static void do_vbitclr(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shlv_vec, INDEX_op_andc_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vbitclr, + .fno = gen_helper_vbitclr_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vbitclr, + .fno = gen_helper_vbitclr_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vbitclr, + .fno = gen_helper_vbitclr_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vbitclr, + .fno = gen_helper_vbitclr_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vbitclr_b, gvec_vvv, MO_8, do_vbitclr) +TRANS(vbitclr_h, gvec_vvv, MO_16, do_vbitclr) +TRANS(vbitclr_w, gvec_vvv, MO_32, do_vbitclr) +TRANS(vbitclr_d, gvec_vvv, MO_64, do_vbitclr) + +static void do_vbiti(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm, + void (*func)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec)) +{ + int lsh; + TCGv_vec t1, one; + + lsh = imm & ((8 << vece) -1); + t1 = tcg_temp_new_vec_matching(t); + one = tcg_constant_vec_matching(t, vece, 1); + + tcg_gen_shli_vec(vece, t1, one, lsh); + func(vece, t, a, t1); +} + +static void gen_vbitclri(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + do_vbiti(vece, t, a, imm, tcg_gen_andc_vec); +} + +static void gen_vbitseti(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + do_vbiti(vece, t, a, imm, tcg_gen_or_vec); +} + +static void gen_vbitrevi(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + do_vbiti(vece, t, a, imm, tcg_gen_xor_vec); +} + +static void do_vbitclri(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + int64_t imm, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, INDEX_op_andc_vec, 0 + }; + static const GVecGen2i op[4] = { + { + .fniv = gen_vbitclri, + .fnoi = gen_helper_vbitclri_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vbitclri, + .fnoi = gen_helper_vbitclri_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vbitclri, + .fnoi = gen_helper_vbitclri_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vbitclri, + .fnoi = gen_helper_vbitclri_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]); +} + +TRANS(vbitclri_b, gvec_vv_i, MO_8, do_vbitclri) +TRANS(vbitclri_h, gvec_vv_i, MO_16, do_vbitclri) +TRANS(vbitclri_w, gvec_vv_i, MO_32, do_vbitclri) +TRANS(vbitclri_d, gvec_vv_i, MO_64, do_vbitclri) + +static void do_vbitset(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shlv_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vbitset, + .fno = gen_helper_vbitset_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vbitset, + .fno = gen_helper_vbitset_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vbitset, + .fno = gen_helper_vbitset_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vbitset, + .fno = gen_helper_vbitset_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vbitset_b, gvec_vvv, MO_8, do_vbitset) +TRANS(vbitset_h, gvec_vvv, MO_16, do_vbitset) +TRANS(vbitset_w, gvec_vvv, MO_32, do_vbitset) +TRANS(vbitset_d, gvec_vvv, MO_64, do_vbitset) + +static void do_vbitseti(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + int64_t imm, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, 0 + }; + static const GVecGen2i op[4] = { + { + .fniv = gen_vbitseti, + .fnoi = gen_helper_vbitseti_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vbitseti, + .fnoi = gen_helper_vbitseti_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vbitseti, + .fnoi = gen_helper_vbitseti_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vbitseti, + .fnoi = gen_helper_vbitseti_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]); +} + +TRANS(vbitseti_b, gvec_vv_i, MO_8, do_vbitseti) +TRANS(vbitseti_h, gvec_vv_i, MO_16, do_vbitseti) +TRANS(vbitseti_w, gvec_vv_i, MO_32, do_vbitseti) +TRANS(vbitseti_d, gvec_vv_i, MO_64, do_vbitseti) + +static void do_vbitrev(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shlv_vec, 0 + }; + static const GVecGen3 op[4] = { + { + .fniv = gen_vbitrev, + .fno = gen_helper_vbitrev_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vbitrev, + .fno = gen_helper_vbitrev_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vbitrev, + .fno = gen_helper_vbitrev_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vbitrev, + .fno = gen_helper_vbitrev_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]); +} + +TRANS(vbitrev_b, gvec_vvv, MO_8, do_vbitrev) +TRANS(vbitrev_h, gvec_vvv, MO_16, do_vbitrev) +TRANS(vbitrev_w, gvec_vvv, MO_32, do_vbitrev) +TRANS(vbitrev_d, gvec_vvv, MO_64, do_vbitrev) + +static void do_vbitrevi(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs, + int64_t imm, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shli_vec, 0 + }; + static const GVecGen2i op[4] = { + { + .fniv = gen_vbitrevi, + .fnoi = gen_helper_vbitrevi_b, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_vbitrevi, + .fnoi = gen_helper_vbitrevi_h, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_vbitrevi, + .fnoi = gen_helper_vbitrevi_w, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_vbitrevi, + .fnoi = gen_helper_vbitrevi_d, + .opt_opc = vecop_list, + .vece = MO_64 + }, + }; + + tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]); +} + +TRANS(vbitrevi_b, gvec_vv_i, MO_8, do_vbitrevi) +TRANS(vbitrevi_h, gvec_vv_i, MO_16, do_vbitrevi) +TRANS(vbitrevi_w, gvec_vv_i, MO_32, do_vbitrevi) +TRANS(vbitrevi_d, gvec_vv_i, MO_64, do_vbitrevi) + +TRANS(vfrstp_b, gen_vvv, gen_helper_vfrstp_b) +TRANS(vfrstp_h, gen_vvv, gen_helper_vfrstp_h) +TRANS(vfrstpi_b, gen_vv_i, gen_helper_vfrstpi_b) +TRANS(vfrstpi_h, gen_vv_i, gen_helper_vfrstpi_h) + +TRANS(vfadd_s, gen_vvv, gen_helper_vfadd_s) +TRANS(vfadd_d, gen_vvv, gen_helper_vfadd_d) +TRANS(vfsub_s, gen_vvv, gen_helper_vfsub_s) +TRANS(vfsub_d, gen_vvv, gen_helper_vfsub_d) +TRANS(vfmul_s, gen_vvv, gen_helper_vfmul_s) +TRANS(vfmul_d, gen_vvv, gen_helper_vfmul_d) +TRANS(vfdiv_s, gen_vvv, gen_helper_vfdiv_s) +TRANS(vfdiv_d, gen_vvv, gen_helper_vfdiv_d) + +TRANS(vfmadd_s, gen_vvvv, gen_helper_vfmadd_s) +TRANS(vfmadd_d, gen_vvvv, gen_helper_vfmadd_d) +TRANS(vfmsub_s, gen_vvvv, gen_helper_vfmsub_s) +TRANS(vfmsub_d, gen_vvvv, gen_helper_vfmsub_d) +TRANS(vfnmadd_s, gen_vvvv, gen_helper_vfnmadd_s) +TRANS(vfnmadd_d, gen_vvvv, gen_helper_vfnmadd_d) +TRANS(vfnmsub_s, gen_vvvv, gen_helper_vfnmsub_s) +TRANS(vfnmsub_d, gen_vvvv, gen_helper_vfnmsub_d) + +TRANS(vfmax_s, gen_vvv, gen_helper_vfmax_s) +TRANS(vfmax_d, gen_vvv, gen_helper_vfmax_d) +TRANS(vfmin_s, gen_vvv, gen_helper_vfmin_s) +TRANS(vfmin_d, gen_vvv, gen_helper_vfmin_d) + +TRANS(vfmaxa_s, gen_vvv, gen_helper_vfmaxa_s) +TRANS(vfmaxa_d, gen_vvv, gen_helper_vfmaxa_d) +TRANS(vfmina_s, gen_vvv, gen_helper_vfmina_s) +TRANS(vfmina_d, gen_vvv, gen_helper_vfmina_d) + +TRANS(vflogb_s, gen_vv, gen_helper_vflogb_s) +TRANS(vflogb_d, gen_vv, gen_helper_vflogb_d) + +TRANS(vfclass_s, gen_vv, gen_helper_vfclass_s) +TRANS(vfclass_d, gen_vv, gen_helper_vfclass_d) + +TRANS(vfsqrt_s, gen_vv, gen_helper_vfsqrt_s) +TRANS(vfsqrt_d, gen_vv, gen_helper_vfsqrt_d) +TRANS(vfrecip_s, gen_vv, gen_helper_vfrecip_s) +TRANS(vfrecip_d, gen_vv, gen_helper_vfrecip_d) +TRANS(vfrsqrt_s, gen_vv, gen_helper_vfrsqrt_s) +TRANS(vfrsqrt_d, gen_vv, gen_helper_vfrsqrt_d) + +TRANS(vfcvtl_s_h, gen_vv, gen_helper_vfcvtl_s_h) +TRANS(vfcvth_s_h, gen_vv, gen_helper_vfcvth_s_h) +TRANS(vfcvtl_d_s, gen_vv, gen_helper_vfcvtl_d_s) +TRANS(vfcvth_d_s, gen_vv, gen_helper_vfcvth_d_s) +TRANS(vfcvt_h_s, gen_vvv, gen_helper_vfcvt_h_s) +TRANS(vfcvt_s_d, gen_vvv, gen_helper_vfcvt_s_d) + +TRANS(vfrintrne_s, gen_vv, gen_helper_vfrintrne_s) +TRANS(vfrintrne_d, gen_vv, gen_helper_vfrintrne_d) +TRANS(vfrintrz_s, gen_vv, gen_helper_vfrintrz_s) +TRANS(vfrintrz_d, gen_vv, gen_helper_vfrintrz_d) +TRANS(vfrintrp_s, gen_vv, gen_helper_vfrintrp_s) +TRANS(vfrintrp_d, gen_vv, gen_helper_vfrintrp_d) +TRANS(vfrintrm_s, gen_vv, gen_helper_vfrintrm_s) +TRANS(vfrintrm_d, gen_vv, gen_helper_vfrintrm_d) +TRANS(vfrint_s, gen_vv, gen_helper_vfrint_s) +TRANS(vfrint_d, gen_vv, gen_helper_vfrint_d) + +TRANS(vftintrne_w_s, gen_vv, gen_helper_vftintrne_w_s) +TRANS(vftintrne_l_d, gen_vv, gen_helper_vftintrne_l_d) +TRANS(vftintrz_w_s, gen_vv, gen_helper_vftintrz_w_s) +TRANS(vftintrz_l_d, gen_vv, gen_helper_vftintrz_l_d) +TRANS(vftintrp_w_s, gen_vv, gen_helper_vftintrp_w_s) +TRANS(vftintrp_l_d, gen_vv, gen_helper_vftintrp_l_d) +TRANS(vftintrm_w_s, gen_vv, gen_helper_vftintrm_w_s) +TRANS(vftintrm_l_d, gen_vv, gen_helper_vftintrm_l_d) +TRANS(vftint_w_s, gen_vv, gen_helper_vftint_w_s) +TRANS(vftint_l_d, gen_vv, gen_helper_vftint_l_d) +TRANS(vftintrz_wu_s, gen_vv, gen_helper_vftintrz_wu_s) +TRANS(vftintrz_lu_d, gen_vv, gen_helper_vftintrz_lu_d) +TRANS(vftint_wu_s, gen_vv, gen_helper_vftint_wu_s) +TRANS(vftint_lu_d, gen_vv, gen_helper_vftint_lu_d) +TRANS(vftintrne_w_d, gen_vvv, gen_helper_vftintrne_w_d) +TRANS(vftintrz_w_d, gen_vvv, gen_helper_vftintrz_w_d) +TRANS(vftintrp_w_d, gen_vvv, gen_helper_vftintrp_w_d) +TRANS(vftintrm_w_d, gen_vvv, gen_helper_vftintrm_w_d) +TRANS(vftint_w_d, gen_vvv, gen_helper_vftint_w_d) +TRANS(vftintrnel_l_s, gen_vv, gen_helper_vftintrnel_l_s) +TRANS(vftintrneh_l_s, gen_vv, gen_helper_vftintrneh_l_s) +TRANS(vftintrzl_l_s, gen_vv, gen_helper_vftintrzl_l_s) +TRANS(vftintrzh_l_s, gen_vv, gen_helper_vftintrzh_l_s) +TRANS(vftintrpl_l_s, gen_vv, gen_helper_vftintrpl_l_s) +TRANS(vftintrph_l_s, gen_vv, gen_helper_vftintrph_l_s) +TRANS(vftintrml_l_s, gen_vv, gen_helper_vftintrml_l_s) +TRANS(vftintrmh_l_s, gen_vv, gen_helper_vftintrmh_l_s) +TRANS(vftintl_l_s, gen_vv, gen_helper_vftintl_l_s) +TRANS(vftinth_l_s, gen_vv, gen_helper_vftinth_l_s) + +TRANS(vffint_s_w, gen_vv, gen_helper_vffint_s_w) +TRANS(vffint_d_l, gen_vv, gen_helper_vffint_d_l) +TRANS(vffint_s_wu, gen_vv, gen_helper_vffint_s_wu) +TRANS(vffint_d_lu, gen_vv, gen_helper_vffint_d_lu) +TRANS(vffintl_d_w, gen_vv, gen_helper_vffintl_d_w) +TRANS(vffinth_d_w, gen_vv, gen_helper_vffinth_d_w) +TRANS(vffint_s_l, gen_vvv, gen_helper_vffint_s_l) + +static bool do_cmp(DisasContext *ctx, arg_vvv *a, MemOp mop, TCGCond cond) +{ + uint32_t vd_ofs, vj_ofs, vk_ofs; + + CHECK_SXE; + + vd_ofs = vec_full_offset(a->vd); + vj_ofs = vec_full_offset(a->vj); + vk_ofs = vec_full_offset(a->vk); + + tcg_gen_gvec_cmp(cond, mop, vd_ofs, vj_ofs, vk_ofs, 16, ctx->vl/8); + return true; +} + +static void do_cmpi_vec(TCGCond cond, + unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + tcg_gen_cmp_vec(cond, vece, t, a, tcg_constant_vec_matching(t, vece, imm)); +} + +static void gen_vseqi_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + do_cmpi_vec(TCG_COND_EQ, vece, t, a, imm); +} + +static void gen_vslei_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + do_cmpi_vec(TCG_COND_LE, vece, t, a, imm); +} + +static void gen_vslti_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + do_cmpi_vec(TCG_COND_LT, vece, t, a, imm); +} + +static void gen_vslei_u_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + do_cmpi_vec(TCG_COND_LEU, vece, t, a, imm); +} + +static void gen_vslti_u_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm) +{ + do_cmpi_vec(TCG_COND_LTU, vece, t, a, imm); +} + +#define DO_CMPI_S(NAME) \ +static bool do_## NAME ##_s(DisasContext *ctx, arg_vv_i *a, MemOp mop) \ +{ \ + uint32_t vd_ofs, vj_ofs; \ + \ + CHECK_SXE; \ + \ + static const TCGOpcode vecop_list[] = { \ + INDEX_op_cmp_vec, 0 \ + }; \ + static const GVecGen2i op[4] = { \ + { \ + .fniv = gen_## NAME ##_s_vec, \ + .fnoi = gen_helper_## NAME ##_b, \ + .opt_opc = vecop_list, \ + .vece = MO_8 \ + }, \ + { \ + .fniv = gen_## NAME ##_s_vec, \ + .fnoi = gen_helper_## NAME ##_h, \ + .opt_opc = vecop_list, \ + .vece = MO_16 \ + }, \ + { \ + .fniv = gen_## NAME ##_s_vec, \ + .fnoi = gen_helper_## NAME ##_w, \ + .opt_opc = vecop_list, \ + .vece = MO_32 \ + }, \ + { \ + .fniv = gen_## NAME ##_s_vec, \ + .fnoi = gen_helper_## NAME ##_d, \ + .opt_opc = vecop_list, \ + .vece = MO_64 \ + } \ + }; \ + \ + vd_ofs = vec_full_offset(a->vd); \ + vj_ofs = vec_full_offset(a->vj); \ + \ + tcg_gen_gvec_2i(vd_ofs, vj_ofs, 16, ctx->vl/8, a->imm, &op[mop]); \ + \ + return true; \ +} + +DO_CMPI_S(vseqi) +DO_CMPI_S(vslei) +DO_CMPI_S(vslti) + +#define DO_CMPI_U(NAME) \ +static bool do_## NAME ##_u(DisasContext *ctx, arg_vv_i *a, MemOp mop) \ +{ \ + uint32_t vd_ofs, vj_ofs; \ + \ + CHECK_SXE; \ + \ + static const TCGOpcode vecop_list[] = { \ + INDEX_op_cmp_vec, 0 \ + }; \ + static const GVecGen2i op[4] = { \ + { \ + .fniv = gen_## NAME ##_u_vec, \ + .fnoi = gen_helper_## NAME ##_bu, \ + .opt_opc = vecop_list, \ + .vece = MO_8 \ + }, \ + { \ + .fniv = gen_## NAME ##_u_vec, \ + .fnoi = gen_helper_## NAME ##_hu, \ + .opt_opc = vecop_list, \ + .vece = MO_16 \ + }, \ + { \ + .fniv = gen_## NAME ##_u_vec, \ + .fnoi = gen_helper_## NAME ##_wu, \ + .opt_opc = vecop_list, \ + .vece = MO_32 \ + }, \ + { \ + .fniv = gen_## NAME ##_u_vec, \ + .fnoi = gen_helper_## NAME ##_du, \ + .opt_opc = vecop_list, \ + .vece = MO_64 \ + } \ + }; \ + \ + vd_ofs = vec_full_offset(a->vd); \ + vj_ofs = vec_full_offset(a->vj); \ + \ + tcg_gen_gvec_2i(vd_ofs, vj_ofs, 16, ctx->vl/8, a->imm, &op[mop]); \ + \ + return true; \ +} + +DO_CMPI_U(vslei) +DO_CMPI_U(vslti) + +TRANS(vseq_b, do_cmp, MO_8, TCG_COND_EQ) +TRANS(vseq_h, do_cmp, MO_16, TCG_COND_EQ) +TRANS(vseq_w, do_cmp, MO_32, TCG_COND_EQ) +TRANS(vseq_d, do_cmp, MO_64, TCG_COND_EQ) +TRANS(vseqi_b, do_vseqi_s, MO_8) +TRANS(vseqi_h, do_vseqi_s, MO_16) +TRANS(vseqi_w, do_vseqi_s, MO_32) +TRANS(vseqi_d, do_vseqi_s, MO_64) + +TRANS(vsle_b, do_cmp, MO_8, TCG_COND_LE) +TRANS(vsle_h, do_cmp, MO_16, TCG_COND_LE) +TRANS(vsle_w, do_cmp, MO_32, TCG_COND_LE) +TRANS(vsle_d, do_cmp, MO_64, TCG_COND_LE) +TRANS(vslei_b, do_vslei_s, MO_8) +TRANS(vslei_h, do_vslei_s, MO_16) +TRANS(vslei_w, do_vslei_s, MO_32) +TRANS(vslei_d, do_vslei_s, MO_64) +TRANS(vsle_bu, do_cmp, MO_8, TCG_COND_LEU) +TRANS(vsle_hu, do_cmp, MO_16, TCG_COND_LEU) +TRANS(vsle_wu, do_cmp, MO_32, TCG_COND_LEU) +TRANS(vsle_du, do_cmp, MO_64, TCG_COND_LEU) +TRANS(vslei_bu, do_vslei_u, MO_8) +TRANS(vslei_hu, do_vslei_u, MO_16) +TRANS(vslei_wu, do_vslei_u, MO_32) +TRANS(vslei_du, do_vslei_u, MO_64) + +TRANS(vslt_b, do_cmp, MO_8, TCG_COND_LT) +TRANS(vslt_h, do_cmp, MO_16, TCG_COND_LT) +TRANS(vslt_w, do_cmp, MO_32, TCG_COND_LT) +TRANS(vslt_d, do_cmp, MO_64, TCG_COND_LT) +TRANS(vslti_b, do_vslti_s, MO_8) +TRANS(vslti_h, do_vslti_s, MO_16) +TRANS(vslti_w, do_vslti_s, MO_32) +TRANS(vslti_d, do_vslti_s, MO_64) +TRANS(vslt_bu, do_cmp, MO_8, TCG_COND_LTU) +TRANS(vslt_hu, do_cmp, MO_16, TCG_COND_LTU) +TRANS(vslt_wu, do_cmp, MO_32, TCG_COND_LTU) +TRANS(vslt_du, do_cmp, MO_64, TCG_COND_LTU) +TRANS(vslti_bu, do_vslti_u, MO_8) +TRANS(vslti_hu, do_vslti_u, MO_16) +TRANS(vslti_wu, do_vslti_u, MO_32) +TRANS(vslti_du, do_vslti_u, MO_64) + +static bool trans_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a) +{ + uint32_t flags; + void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32); + TCGv_i32 vd = tcg_constant_i32(a->vd); + TCGv_i32 vj = tcg_constant_i32(a->vj); + TCGv_i32 vk = tcg_constant_i32(a->vk); + + CHECK_SXE; + + fn = (a->fcond & 1 ? gen_helper_vfcmp_s_s : gen_helper_vfcmp_c_s); + flags = get_fcmp_flags(a->fcond >> 1); + fn(cpu_env, vd, vj, vk, tcg_constant_i32(flags)); + + return true; +} + +static bool trans_vfcmp_cond_d(DisasContext *ctx, arg_vvv_fcond *a) +{ + uint32_t flags; + void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32); + TCGv_i32 vd = tcg_constant_i32(a->vd); + TCGv_i32 vj = tcg_constant_i32(a->vj); + TCGv_i32 vk = tcg_constant_i32(a->vk); + + fn = (a->fcond & 1 ? gen_helper_vfcmp_s_d : gen_helper_vfcmp_c_d); + flags = get_fcmp_flags(a->fcond >> 1); + fn(cpu_env, vd, vj, vk, tcg_constant_i32(flags)); + + return true; +} + +static bool trans_vbitsel_v(DisasContext *ctx, arg_vvvv *a) +{ + CHECK_SXE; + + tcg_gen_gvec_bitsel(MO_64, vec_full_offset(a->vd), vec_full_offset(a->va), + vec_full_offset(a->vk), vec_full_offset(a->vj), + 16, ctx->vl/8); + return true; +} + +static void gen_vbitseli(unsigned vece, TCGv_vec a, TCGv_vec b, int64_t imm) +{ + tcg_gen_bitsel_vec(vece, a, a, tcg_constant_vec_matching(a, vece, imm), b); +} + +static bool trans_vbitseli_b(DisasContext *ctx, arg_vv_i *a) +{ + static const GVecGen2i op = { + .fniv = gen_vbitseli, + .fnoi = gen_helper_vbitseli_b, + .vece = MO_8, + .load_dest = true + }; + + CHECK_SXE; + + tcg_gen_gvec_2i(vec_full_offset(a->vd), vec_full_offset(a->vj), + 16, ctx->vl/8, a->imm, &op); + return true; +} + +#define VSET(NAME, COND) \ +static bool trans_## NAME (DisasContext *ctx, arg_cv *a) \ +{ \ + TCGv_i64 t1, al, ah; \ + \ + al = tcg_temp_new_i64(); \ + ah = tcg_temp_new_i64(); \ + t1 = tcg_temp_new_i64(); \ + \ + get_vreg64(ah, a->vj, 1); \ + get_vreg64(al, a->vj, 0); \ + \ + CHECK_SXE; \ + tcg_gen_or_i64(t1, al, ah); \ + tcg_gen_setcondi_i64(COND, t1, t1, 0); \ + tcg_gen_st8_tl(t1, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); \ + \ + return true; \ +} + +VSET(vseteqz_v, TCG_COND_EQ) +VSET(vsetnez_v, TCG_COND_NE) + +TRANS(vsetanyeqz_b, gen_cv, gen_helper_vsetanyeqz_b) +TRANS(vsetanyeqz_h, gen_cv, gen_helper_vsetanyeqz_h) +TRANS(vsetanyeqz_w, gen_cv, gen_helper_vsetanyeqz_w) +TRANS(vsetanyeqz_d, gen_cv, gen_helper_vsetanyeqz_d) +TRANS(vsetallnez_b, gen_cv, gen_helper_vsetallnez_b) +TRANS(vsetallnez_h, gen_cv, gen_helper_vsetallnez_h) +TRANS(vsetallnez_w, gen_cv, gen_helper_vsetallnez_w) +TRANS(vsetallnez_d, gen_cv, gen_helper_vsetallnez_d) + +static bool trans_vinsgr2vr_b(DisasContext *ctx, arg_vr_i *a) +{ + TCGv src = gpr_src(ctx, a->rj, EXT_NONE); + CHECK_SXE; + tcg_gen_st8_i64(src, cpu_env, + offsetof(CPULoongArchState, fpr[a->vd].vreg.B(a->imm))); + return true; +} + +static bool trans_vinsgr2vr_h(DisasContext *ctx, arg_vr_i *a) +{ + TCGv src = gpr_src(ctx, a->rj, EXT_NONE); + CHECK_SXE; + tcg_gen_st16_i64(src, cpu_env, + offsetof(CPULoongArchState, fpr[a->vd].vreg.H(a->imm))); + return true; +} + +static bool trans_vinsgr2vr_w(DisasContext *ctx, arg_vr_i *a) +{ + TCGv src = gpr_src(ctx, a->rj, EXT_NONE); + CHECK_SXE; + tcg_gen_st32_i64(src, cpu_env, + offsetof(CPULoongArchState, fpr[a->vd].vreg.W(a->imm))); + return true; +} + +static bool trans_vinsgr2vr_d(DisasContext *ctx, arg_vr_i *a) +{ + TCGv src = gpr_src(ctx, a->rj, EXT_NONE); + CHECK_SXE; + tcg_gen_st_i64(src, cpu_env, + offsetof(CPULoongArchState, fpr[a->vd].vreg.D(a->imm))); + return true; +} + +static bool trans_vpickve2gr_b(DisasContext *ctx, arg_rv_i *a) +{ + TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE); + CHECK_SXE; + tcg_gen_ld8s_i64(dst, cpu_env, + offsetof(CPULoongArchState, fpr[a->vj].vreg.B(a->imm))); + return true; +} + +static bool trans_vpickve2gr_h(DisasContext *ctx, arg_rv_i *a) +{ + TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE); + CHECK_SXE; + tcg_gen_ld16s_i64(dst, cpu_env, + offsetof(CPULoongArchState, fpr[a->vj].vreg.H(a->imm))); + return true; +} + +static bool trans_vpickve2gr_w(DisasContext *ctx, arg_rv_i *a) +{ + TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE); + CHECK_SXE; + tcg_gen_ld32s_i64(dst, cpu_env, + offsetof(CPULoongArchState, fpr[a->vj].vreg.W(a->imm))); + return true; +} + +static bool trans_vpickve2gr_d(DisasContext *ctx, arg_rv_i *a) +{ + TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE); + CHECK_SXE; + tcg_gen_ld_i64(dst, cpu_env, + offsetof(CPULoongArchState, fpr[a->vj].vreg.D(a->imm))); + return true; +} + +static bool trans_vpickve2gr_bu(DisasContext *ctx, arg_rv_i *a) +{ + TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE); + CHECK_SXE; + tcg_gen_ld8u_i64(dst, cpu_env, + offsetof(CPULoongArchState, fpr[a->vj].vreg.B(a->imm))); + return true; +} + +static bool trans_vpickve2gr_hu(DisasContext *ctx, arg_rv_i *a) +{ + TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE); + CHECK_SXE; + tcg_gen_ld16u_i64(dst, cpu_env, + offsetof(CPULoongArchState, fpr[a->vj].vreg.H(a->imm))); + return true; +} + +static bool trans_vpickve2gr_wu(DisasContext *ctx, arg_rv_i *a) +{ + TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE); + CHECK_SXE; + tcg_gen_ld32u_i64(dst, cpu_env, + offsetof(CPULoongArchState, fpr[a->vj].vreg.W(a->imm))); + return true; +} + +static bool trans_vpickve2gr_du(DisasContext *ctx, arg_rv_i *a) +{ + TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE); + CHECK_SXE; + tcg_gen_ld_i64(dst, cpu_env, + offsetof(CPULoongArchState, fpr[a->vj].vreg.D(a->imm))); + return true; +} + +static bool gvec_dup(DisasContext *ctx, arg_vr *a, MemOp mop) +{ + TCGv src = gpr_src(ctx, a->rj, EXT_NONE); + CHECK_SXE; + + tcg_gen_gvec_dup_i64(mop, vec_full_offset(a->vd), + 16, ctx->vl/8, src); + return true; +} + +TRANS(vreplgr2vr_b, gvec_dup, MO_8) +TRANS(vreplgr2vr_h, gvec_dup, MO_16) +TRANS(vreplgr2vr_w, gvec_dup, MO_32) +TRANS(vreplgr2vr_d, gvec_dup, MO_64) + +static bool trans_vreplvei_b(DisasContext *ctx, arg_vv_i *a) +{ + CHECK_SXE; + tcg_gen_gvec_dup_mem(MO_8,vec_full_offset(a->vd), + offsetof(CPULoongArchState, + fpr[a->vj].vreg.B((a->imm))), + 16, ctx->vl/8); + return true; +} + +static bool trans_vreplvei_h(DisasContext *ctx, arg_vv_i *a) +{ + CHECK_SXE; + tcg_gen_gvec_dup_mem(MO_16, vec_full_offset(a->vd), + offsetof(CPULoongArchState, + fpr[a->vj].vreg.H((a->imm))), + 16, ctx->vl/8); + return true; +} +static bool trans_vreplvei_w(DisasContext *ctx, arg_vv_i *a) +{ + CHECK_SXE; + tcg_gen_gvec_dup_mem(MO_32, vec_full_offset(a->vd), + offsetof(CPULoongArchState, + fpr[a->vj].vreg.W((a->imm))), + 16, ctx->vl/8); + return true; +} +static bool trans_vreplvei_d(DisasContext *ctx, arg_vv_i *a) +{ + CHECK_SXE; + tcg_gen_gvec_dup_mem(MO_64, vec_full_offset(a->vd), + offsetof(CPULoongArchState, + fpr[a->vj].vreg.D((a->imm))), + 16, ctx->vl/8); + return true; +} + +static bool gen_vreplve(DisasContext *ctx, arg_vvr *a, int vece, int bit, + void (*func)(TCGv_i64, TCGv_ptr, tcg_target_long)) +{ + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_ptr t1 = tcg_temp_new_ptr(); + TCGv_i64 t2 = tcg_temp_new_i64(); + + CHECK_SXE; + + tcg_gen_andi_i64(t0, gpr_src(ctx, a->rk, EXT_NONE), (LSX_LEN/bit) -1); + tcg_gen_shli_i64(t0, t0, vece); + if (HOST_BIG_ENDIAN) { + tcg_gen_xori_i64(t0, t0, vece << ((LSX_LEN/bit) -1)); + } + + tcg_gen_trunc_i64_ptr(t1, t0); + tcg_gen_add_ptr(t1, t1, cpu_env); + func(t2, t1, vec_full_offset(a->vj)); + tcg_gen_gvec_dup_i64(vece, vec_full_offset(a->vd), 16, ctx->vl/8, t2); + + return true; +} + +TRANS(vreplve_b, gen_vreplve, MO_8, 8, tcg_gen_ld8u_i64) +TRANS(vreplve_h, gen_vreplve, MO_16, 16, tcg_gen_ld16u_i64) +TRANS(vreplve_w, gen_vreplve, MO_32, 32, tcg_gen_ld32u_i64) +TRANS(vreplve_d, gen_vreplve, MO_64, 64, tcg_gen_ld_i64) + +static bool trans_vbsll_v(DisasContext *ctx, arg_vv_i *a) +{ + int ofs; + TCGv_i64 desthigh, destlow, high, low; + + CHECK_SXE; + + desthigh = tcg_temp_new_i64(); + destlow = tcg_temp_new_i64(); + high = tcg_temp_new_i64(); + low = tcg_temp_new_i64(); + + get_vreg64(low, a->vj, 0); + + ofs = ((a->imm) & 0xf) * 8; + if (ofs < 64) { + get_vreg64(high, a->vj, 1); + tcg_gen_extract2_i64(desthigh, low, high, 64 - ofs); + tcg_gen_shli_i64(destlow, low, ofs); + } else { + tcg_gen_shli_i64(desthigh, low, ofs - 64); + destlow = tcg_constant_i64(0); + } + + set_vreg64(desthigh, a->vd, 1); + set_vreg64(destlow, a->vd, 0); + + return true; +} + +static bool trans_vbsrl_v(DisasContext *ctx, arg_vv_i *a) +{ + TCGv_i64 desthigh, destlow, high, low; + int ofs; + + CHECK_SXE; + + desthigh = tcg_temp_new_i64(); + destlow = tcg_temp_new_i64(); + high = tcg_temp_new_i64(); + low = tcg_temp_new_i64(); + + get_vreg64(high, a->vj, 1); + + ofs = ((a->imm) & 0xf) * 8; + if (ofs < 64) { + get_vreg64(low, a->vj, 0); + tcg_gen_extract2_i64(destlow, low, high, ofs); + tcg_gen_shri_i64(desthigh, high, ofs); + } else { + tcg_gen_shri_i64(destlow, high, ofs - 64); + desthigh = tcg_constant_i64(0); + } + + set_vreg64(desthigh, a->vd, 1); + set_vreg64(destlow, a->vd, 0); + + return true; +} + +TRANS(vpackev_b, gen_vvv, gen_helper_vpackev_b) +TRANS(vpackev_h, gen_vvv, gen_helper_vpackev_h) +TRANS(vpackev_w, gen_vvv, gen_helper_vpackev_w) +TRANS(vpackev_d, gen_vvv, gen_helper_vpackev_d) +TRANS(vpackod_b, gen_vvv, gen_helper_vpackod_b) +TRANS(vpackod_h, gen_vvv, gen_helper_vpackod_h) +TRANS(vpackod_w, gen_vvv, gen_helper_vpackod_w) +TRANS(vpackod_d, gen_vvv, gen_helper_vpackod_d) + +TRANS(vpickev_b, gen_vvv, gen_helper_vpickev_b) +TRANS(vpickev_h, gen_vvv, gen_helper_vpickev_h) +TRANS(vpickev_w, gen_vvv, gen_helper_vpickev_w) +TRANS(vpickev_d, gen_vvv, gen_helper_vpickev_d) +TRANS(vpickod_b, gen_vvv, gen_helper_vpickod_b) +TRANS(vpickod_h, gen_vvv, gen_helper_vpickod_h) +TRANS(vpickod_w, gen_vvv, gen_helper_vpickod_w) +TRANS(vpickod_d, gen_vvv, gen_helper_vpickod_d) + +TRANS(vilvl_b, gen_vvv, gen_helper_vilvl_b) +TRANS(vilvl_h, gen_vvv, gen_helper_vilvl_h) +TRANS(vilvl_w, gen_vvv, gen_helper_vilvl_w) +TRANS(vilvl_d, gen_vvv, gen_helper_vilvl_d) +TRANS(vilvh_b, gen_vvv, gen_helper_vilvh_b) +TRANS(vilvh_h, gen_vvv, gen_helper_vilvh_h) +TRANS(vilvh_w, gen_vvv, gen_helper_vilvh_w) +TRANS(vilvh_d, gen_vvv, gen_helper_vilvh_d) + +TRANS(vshuf_b, gen_vvvv, gen_helper_vshuf_b) +TRANS(vshuf_h, gen_vvv, gen_helper_vshuf_h) +TRANS(vshuf_w, gen_vvv, gen_helper_vshuf_w) +TRANS(vshuf_d, gen_vvv, gen_helper_vshuf_d) +TRANS(vshuf4i_b, gen_vv_i, gen_helper_vshuf4i_b) +TRANS(vshuf4i_h, gen_vv_i, gen_helper_vshuf4i_h) +TRANS(vshuf4i_w, gen_vv_i, gen_helper_vshuf4i_w) +TRANS(vshuf4i_d, gen_vv_i, gen_helper_vshuf4i_d) + +TRANS(vpermi_w, gen_vv_i, gen_helper_vpermi_w) + +TRANS(vextrins_b, gen_vv_i, gen_helper_vextrins_b) +TRANS(vextrins_h, gen_vv_i, gen_helper_vextrins_h) +TRANS(vextrins_w, gen_vv_i, gen_helper_vextrins_w) +TRANS(vextrins_d, gen_vv_i, gen_helper_vextrins_d) + +static bool trans_vld(DisasContext *ctx, arg_vr_i *a) +{ + TCGv addr, temp; + TCGv_i64 rl, rh; + TCGv_i128 val; + + CHECK_SXE; + + addr = gpr_src(ctx, a->rj, EXT_NONE); + val = tcg_temp_new_i128(); + rl = tcg_temp_new_i64(); + rh = tcg_temp_new_i64(); + + if (a->imm) { + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, addr, a->imm); + addr = temp; + } + + tcg_gen_qemu_ld_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE); + tcg_gen_extr_i128_i64(rl, rh, val); + set_vreg64(rh, a->vd, 1); + set_vreg64(rl, a->vd, 0); + + return true; +} + +static bool trans_vst(DisasContext *ctx, arg_vr_i *a) +{ + TCGv addr, temp; + TCGv_i128 val; + TCGv_i64 ah, al; + + CHECK_SXE; + + addr = gpr_src(ctx, a->rj, EXT_NONE); + val = tcg_temp_new_i128(); + ah = tcg_temp_new_i64(); + al = tcg_temp_new_i64(); + + if (a->imm) { + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, addr, a->imm); + addr = temp; + } + + get_vreg64(ah, a->vd, 1); + get_vreg64(al, a->vd, 0); + tcg_gen_concat_i64_i128(val, al, ah); + tcg_gen_qemu_st_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE); + + return true; +} + +static bool trans_vldx(DisasContext *ctx, arg_vrr *a) +{ + TCGv addr, src1, src2; + TCGv_i64 rl, rh; + TCGv_i128 val; + + CHECK_SXE; + + addr = tcg_temp_new(); + src1 = gpr_src(ctx, a->rj, EXT_NONE); + src2 = gpr_src(ctx, a->rk, EXT_NONE); + val = tcg_temp_new_i128(); + rl = tcg_temp_new_i64(); + rh = tcg_temp_new_i64(); + + tcg_gen_add_tl(addr, src1, src2); + tcg_gen_qemu_ld_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE); + tcg_gen_extr_i128_i64(rl, rh, val); + set_vreg64(rh, a->vd, 1); + set_vreg64(rl, a->vd, 0); + + return true; +} + +static bool trans_vstx(DisasContext *ctx, arg_vrr *a) +{ + TCGv addr, src1, src2; + TCGv_i64 ah, al; + TCGv_i128 val; + + CHECK_SXE; + + addr = tcg_temp_new(); + src1 = gpr_src(ctx, a->rj, EXT_NONE); + src2 = gpr_src(ctx, a->rk, EXT_NONE); + val = tcg_temp_new_i128(); + ah = tcg_temp_new_i64(); + al = tcg_temp_new_i64(); + + tcg_gen_add_tl(addr, src1, src2); + get_vreg64(ah, a->vd, 1); + get_vreg64(al, a->vd, 0); + tcg_gen_concat_i64_i128(val, al, ah); + tcg_gen_qemu_st_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE); + + return true; +} + +#define VLDREPL(NAME, MO) \ +static bool trans_## NAME (DisasContext *ctx, arg_vr_i *a) \ +{ \ + TCGv addr, temp; \ + TCGv_i64 val; \ + \ + CHECK_SXE; \ + \ + addr = gpr_src(ctx, a->rj, EXT_NONE); \ + val = tcg_temp_new_i64(); \ + \ + if (a->imm) { \ + temp = tcg_temp_new(); \ + tcg_gen_addi_tl(temp, addr, a->imm); \ + addr = temp; \ + } \ + \ + tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, MO); \ + tcg_gen_gvec_dup_i64(MO, vec_full_offset(a->vd), 16, ctx->vl/8, val); \ + \ + return true; \ +} + +VLDREPL(vldrepl_b, MO_8) +VLDREPL(vldrepl_h, MO_16) +VLDREPL(vldrepl_w, MO_32) +VLDREPL(vldrepl_d, MO_64) + +#define VSTELM(NAME, MO, E) \ +static bool trans_## NAME (DisasContext *ctx, arg_vr_ii *a) \ +{ \ + TCGv addr, temp; \ + TCGv_i64 val; \ + \ + CHECK_SXE; \ + \ + addr = gpr_src(ctx, a->rj, EXT_NONE); \ + val = tcg_temp_new_i64(); \ + \ + if (a->imm) { \ + temp = tcg_temp_new(); \ + tcg_gen_addi_tl(temp, addr, a->imm); \ + addr = temp; \ + } \ + \ + tcg_gen_ld_i64(val, cpu_env, \ + offsetof(CPULoongArchState, fpr[a->vd].vreg.E(a->imm2))); \ + tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, MO); \ + \ + return true; \ +} + +VSTELM(vstelm_b, MO_8, B) +VSTELM(vstelm_h, MO_16, H) +VSTELM(vstelm_w, MO_32, W) +VSTELM(vstelm_d, MO_64, D) diff --git a/target/loongarch/insn_trans/trans_memory.c.inc b/target/loongarch/insn_trans/trans_memory.c.inc index d5eb31147c..75cfdf59ad 100644 --- a/target/loongarch/insn_trans/trans_memory.c.inc +++ b/target/loongarch/insn_trans/trans_memory.c.inc @@ -7,21 +7,15 @@ static bool gen_load(DisasContext *ctx, arg_rr_i *a, MemOp mop) { TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); gen_set_gpr(a->rd, dest, EXT_NONE); - - if (temp) { - tcg_temp_free(temp); - } - return true; } @@ -29,20 +23,14 @@ static bool gen_store(DisasContext *ctx, arg_rr_i *a, MemOp mop) { TCGv data = gpr_src(ctx, a->rd, EXT_NONE); TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, mop); - - if (temp) { - tcg_temp_free(temp); - } - return true; } @@ -56,7 +44,6 @@ static bool gen_loadx(DisasContext *ctx, arg_rrr *a, MemOp mop) tcg_gen_add_tl(addr, src1, src2); tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); gen_set_gpr(a->rd, dest, EXT_NONE); - tcg_temp_free(addr); return true; } @@ -70,7 +57,6 @@ static bool gen_storex(DisasContext *ctx, arg_rrr *a, MemOp mop) tcg_gen_add_tl(addr, src1, src2); tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, mop); - tcg_temp_free(addr); return true; } @@ -146,21 +132,15 @@ static bool gen_ldptr(DisasContext *ctx, arg_rr_i *a, MemOp mop) { TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); gen_set_gpr(a->rd, dest, EXT_NONE); - - if (temp) { - tcg_temp_free(temp); - } - return true; } @@ -168,20 +148,14 @@ static bool gen_stptr(DisasContext *ctx, arg_rr_i *a, MemOp mop) { TCGv data = gpr_src(ctx, a->rd, EXT_NONE); TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, mop); - - if (temp) { - tcg_temp_free(temp); - } - return true; } diff --git a/target/loongarch/insn_trans/trans_privileged.c.inc b/target/loongarch/insn_trans/trans_privileged.c.inc index 40f82becb0..5a04352b01 100644 --- a/target/loongarch/insn_trans/trans_privileged.c.inc +++ b/target/loongarch/insn_trans/trans_privileged.c.inc @@ -243,7 +243,7 @@ static bool trans_csrwr(DisasContext *ctx, arg_csrwr *a) dest = gpr_dst(ctx, a->rd, EXT_NONE); csr->writefn(dest, cpu_env, src1); } else { - dest = temp_new(ctx); + dest = tcg_temp_new(); tcg_gen_ld_tl(dest, cpu_env, csr->offset); tcg_gen_st_tl(src1, cpu_env, csr->offset); } @@ -291,10 +291,6 @@ static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a) tcg_gen_st_tl(newv, cpu_env, csr->offset); } gen_set_gpr(a->rd, oldv, EXT_NONE); - - tcg_temp_free(temp); - tcg_temp_free(newv); - tcg_temp_free(oldv); return true; } diff --git a/target/loongarch/insn_trans/trans_shift.c.inc b/target/loongarch/insn_trans/trans_shift.c.inc index 5260af2337..bf5428a2ba 100644 --- a/target/loongarch/insn_trans/trans_shift.c.inc +++ b/target/loongarch/insn_trans/trans_shift.c.inc @@ -8,7 +8,6 @@ static void gen_sll_w(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x1f); tcg_gen_shl_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_srl_w(TCGv dest, TCGv src1, TCGv src2) @@ -16,7 +15,6 @@ static void gen_srl_w(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x1f); tcg_gen_shr_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_sra_w(TCGv dest, TCGv src1, TCGv src2) @@ -24,7 +22,6 @@ static void gen_sra_w(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x1f); tcg_gen_sar_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_sll_d(TCGv dest, TCGv src1, TCGv src2) @@ -32,7 +29,6 @@ static void gen_sll_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x3f); tcg_gen_shl_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_srl_d(TCGv dest, TCGv src1, TCGv src2) @@ -40,7 +36,6 @@ static void gen_srl_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x3f); tcg_gen_shr_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_sra_d(TCGv dest, TCGv src1, TCGv src2) @@ -48,7 +43,6 @@ static void gen_sra_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x3f); tcg_gen_sar_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_rotr_w(TCGv dest, TCGv src1, TCGv src2) @@ -64,10 +58,6 @@ static void gen_rotr_w(TCGv dest, TCGv src1, TCGv src2) tcg_gen_rotr_i32(t1, t1, t2); tcg_gen_ext_i32_tl(dest, t1); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - tcg_temp_free(t0); } static void gen_rotr_d(TCGv dest, TCGv src1, TCGv src2) @@ -75,7 +65,6 @@ static void gen_rotr_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x3f); tcg_gen_rotr_tl(dest, src1, t0); - tcg_temp_free(t0); } static bool trans_srai_w(DisasContext *ctx, arg_srai_w *a) diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode index de7b8f0f3c..c9c3bc2c73 100644 --- a/target/loongarch/insns.decode +++ b/target/loongarch/insns.decode @@ -485,3 +485,814 @@ ldpte 0000 01100100 01 ........ ..... 00000 @j_i ertn 0000 01100100 10000 01110 00000 00000 @empty idle 0000 01100100 10001 ............... @i15 dbcl 0000 00000010 10101 ............... @i15 + +# +# LSX Fields +# + +%i9s3 10:s9 !function=shl_3 +%i10s2 10:s10 !function=shl_2 +%i11s1 10:s11 !function=shl_1 +%i8s3 10:s8 !function=shl_3 +%i8s2 10:s8 !function=shl_2 +%i8s1 10:s8 !function=shl_1 + +# +# LSX Argument sets +# + +&vv vd vj +&cv cd vj +&vvv vd vj vk +&vv_i vd vj imm +&vvvv vd vj vk va +&vvv_fcond vd vj vk fcond +&vr_i vd rj imm +&rv_i rd vj imm +&vr vd rj +&vvr vd vj rk +&vrr vd rj rk +&vr_ii vd rj imm imm2 +&v_i vd imm + +# +# LSX Formats +# +@vv .... ........ ..... ..... vj:5 vd:5 &vv +@cv .... ........ ..... ..... vj:5 .. cd:3 &cv +@vvv .... ........ ..... vk:5 vj:5 vd:5 &vvv +@vv_ui1 .... ........ ..... .... imm:1 vj:5 vd:5 &vv_i +@vv_ui2 .... ........ ..... ... imm:2 vj:5 vd:5 &vv_i +@vv_ui3 .... ........ ..... .. imm:3 vj:5 vd:5 &vv_i +@vv_ui4 .... ........ ..... . imm:4 vj:5 vd:5 &vv_i +@vv_ui5 .... ........ ..... imm:5 vj:5 vd:5 &vv_i +@vv_ui6 .... ........ .... imm:6 vj:5 vd:5 &vv_i +@vv_ui7 .... ........ ... imm:7 vj:5 vd:5 &vv_i +@vv_ui8 .... ........ .. imm:8 vj:5 vd:5 &vv_i +@vv_i5 .... ........ ..... imm:s5 vj:5 vd:5 &vv_i +@vvvv .... ........ va:5 vk:5 vj:5 vd:5 &vvvv +@vvv_fcond .... ........ fcond:5 vk:5 vj:5 vd:5 &vvv_fcond +@vr_ui4 .... ........ ..... . imm:4 rj:5 vd:5 &vr_i +@vr_ui3 .... ........ ..... .. imm:3 rj:5 vd:5 &vr_i +@vr_ui2 .... ........ ..... ... imm:2 rj:5 vd:5 &vr_i +@vr_ui1 .... ........ ..... .... imm:1 rj:5 vd:5 &vr_i +@rv_ui4 .... ........ ..... . imm:4 vj:5 rd:5 &rv_i +@rv_ui3 .... ........ ..... .. imm:3 vj:5 rd:5 &rv_i +@rv_ui2 .... ........ ..... ... imm:2 vj:5 rd:5 &rv_i +@rv_ui1 .... ........ ..... .... imm:1 vj:5 rd:5 &rv_i +@vr .... ........ ..... ..... rj:5 vd:5 &vr +@vvr .... ........ ..... rk:5 vj:5 vd:5 &vvr +@vr_i9 .... ........ . ......... rj:5 vd:5 &vr_i imm=%i9s3 +@vr_i10 .... ........ .......... rj:5 vd:5 &vr_i imm=%i10s2 +@vr_i11 .... ....... ........... rj:5 vd:5 &vr_i imm=%i11s1 +@vr_i12 .... ...... imm:s12 rj:5 vd:5 &vr_i +@vr_i8i1 .... ........ . imm2:1 ........ rj:5 vd:5 &vr_ii imm=%i8s3 +@vr_i8i2 .... ........ imm2:2 ........ rj:5 vd:5 &vr_ii imm=%i8s2 +@vr_i8i3 .... ....... imm2:3 ........ rj:5 vd:5 &vr_ii imm=%i8s1 +@vr_i8i4 .... ...... imm2:4 imm:s8 rj:5 vd:5 &vr_ii +@vrr .... ........ ..... rk:5 rj:5 vd:5 &vrr +@v_i13 .... ........ .. imm:13 vd:5 &v_i + +vadd_b 0111 00000000 10100 ..... ..... ..... @vvv +vadd_h 0111 00000000 10101 ..... ..... ..... @vvv +vadd_w 0111 00000000 10110 ..... ..... ..... @vvv +vadd_d 0111 00000000 10111 ..... ..... ..... @vvv +vadd_q 0111 00010010 11010 ..... ..... ..... @vvv +vsub_b 0111 00000000 11000 ..... ..... ..... @vvv +vsub_h 0111 00000000 11001 ..... ..... ..... @vvv +vsub_w 0111 00000000 11010 ..... ..... ..... @vvv +vsub_d 0111 00000000 11011 ..... ..... ..... @vvv +vsub_q 0111 00010010 11011 ..... ..... ..... @vvv + +vaddi_bu 0111 00101000 10100 ..... ..... ..... @vv_ui5 +vaddi_hu 0111 00101000 10101 ..... ..... ..... @vv_ui5 +vaddi_wu 0111 00101000 10110 ..... ..... ..... @vv_ui5 +vaddi_du 0111 00101000 10111 ..... ..... ..... @vv_ui5 +vsubi_bu 0111 00101000 11000 ..... ..... ..... @vv_ui5 +vsubi_hu 0111 00101000 11001 ..... ..... ..... @vv_ui5 +vsubi_wu 0111 00101000 11010 ..... ..... ..... @vv_ui5 +vsubi_du 0111 00101000 11011 ..... ..... ..... @vv_ui5 + +vneg_b 0111 00101001 11000 01100 ..... ..... @vv +vneg_h 0111 00101001 11000 01101 ..... ..... @vv +vneg_w 0111 00101001 11000 01110 ..... ..... @vv +vneg_d 0111 00101001 11000 01111 ..... ..... @vv + +vsadd_b 0111 00000100 01100 ..... ..... ..... @vvv +vsadd_h 0111 00000100 01101 ..... ..... ..... @vvv +vsadd_w 0111 00000100 01110 ..... ..... ..... @vvv +vsadd_d 0111 00000100 01111 ..... ..... ..... @vvv +vsadd_bu 0111 00000100 10100 ..... ..... ..... @vvv +vsadd_hu 0111 00000100 10101 ..... ..... ..... @vvv +vsadd_wu 0111 00000100 10110 ..... ..... ..... @vvv +vsadd_du 0111 00000100 10111 ..... ..... ..... @vvv +vssub_b 0111 00000100 10000 ..... ..... ..... @vvv +vssub_h 0111 00000100 10001 ..... ..... ..... @vvv +vssub_w 0111 00000100 10010 ..... ..... ..... @vvv +vssub_d 0111 00000100 10011 ..... ..... ..... @vvv +vssub_bu 0111 00000100 11000 ..... ..... ..... @vvv +vssub_hu 0111 00000100 11001 ..... ..... ..... @vvv +vssub_wu 0111 00000100 11010 ..... ..... ..... @vvv +vssub_du 0111 00000100 11011 ..... ..... ..... @vvv + +vhaddw_h_b 0111 00000101 01000 ..... ..... ..... @vvv +vhaddw_w_h 0111 00000101 01001 ..... ..... ..... @vvv +vhaddw_d_w 0111 00000101 01010 ..... ..... ..... @vvv +vhaddw_q_d 0111 00000101 01011 ..... ..... ..... @vvv +vhaddw_hu_bu 0111 00000101 10000 ..... ..... ..... @vvv +vhaddw_wu_hu 0111 00000101 10001 ..... ..... ..... @vvv +vhaddw_du_wu 0111 00000101 10010 ..... ..... ..... @vvv +vhaddw_qu_du 0111 00000101 10011 ..... ..... ..... @vvv +vhsubw_h_b 0111 00000101 01100 ..... ..... ..... @vvv +vhsubw_w_h 0111 00000101 01101 ..... ..... ..... @vvv +vhsubw_d_w 0111 00000101 01110 ..... ..... ..... @vvv +vhsubw_q_d 0111 00000101 01111 ..... ..... ..... @vvv +vhsubw_hu_bu 0111 00000101 10100 ..... ..... ..... @vvv +vhsubw_wu_hu 0111 00000101 10101 ..... ..... ..... @vvv +vhsubw_du_wu 0111 00000101 10110 ..... ..... ..... @vvv +vhsubw_qu_du 0111 00000101 10111 ..... ..... ..... @vvv + +vaddwev_h_b 0111 00000001 11100 ..... ..... ..... @vvv +vaddwev_w_h 0111 00000001 11101 ..... ..... ..... @vvv +vaddwev_d_w 0111 00000001 11110 ..... ..... ..... @vvv +vaddwev_q_d 0111 00000001 11111 ..... ..... ..... @vvv +vaddwod_h_b 0111 00000010 00100 ..... ..... ..... @vvv +vaddwod_w_h 0111 00000010 00101 ..... ..... ..... @vvv +vaddwod_d_w 0111 00000010 00110 ..... ..... ..... @vvv +vaddwod_q_d 0111 00000010 00111 ..... ..... ..... @vvv +vsubwev_h_b 0111 00000010 00000 ..... ..... ..... @vvv +vsubwev_w_h 0111 00000010 00001 ..... ..... ..... @vvv +vsubwev_d_w 0111 00000010 00010 ..... ..... ..... @vvv +vsubwev_q_d 0111 00000010 00011 ..... ..... ..... @vvv +vsubwod_h_b 0111 00000010 01000 ..... ..... ..... @vvv +vsubwod_w_h 0111 00000010 01001 ..... ..... ..... @vvv +vsubwod_d_w 0111 00000010 01010 ..... ..... ..... @vvv +vsubwod_q_d 0111 00000010 01011 ..... ..... ..... @vvv + +vaddwev_h_bu 0111 00000010 11100 ..... ..... ..... @vvv +vaddwev_w_hu 0111 00000010 11101 ..... ..... ..... @vvv +vaddwev_d_wu 0111 00000010 11110 ..... ..... ..... @vvv +vaddwev_q_du 0111 00000010 11111 ..... ..... ..... @vvv +vaddwod_h_bu 0111 00000011 00100 ..... ..... ..... @vvv +vaddwod_w_hu 0111 00000011 00101 ..... ..... ..... @vvv +vaddwod_d_wu 0111 00000011 00110 ..... ..... ..... @vvv +vaddwod_q_du 0111 00000011 00111 ..... ..... ..... @vvv +vsubwev_h_bu 0111 00000011 00000 ..... ..... ..... @vvv +vsubwev_w_hu 0111 00000011 00001 ..... ..... ..... @vvv +vsubwev_d_wu 0111 00000011 00010 ..... ..... ..... @vvv +vsubwev_q_du 0111 00000011 00011 ..... ..... ..... @vvv +vsubwod_h_bu 0111 00000011 01000 ..... ..... ..... @vvv +vsubwod_w_hu 0111 00000011 01001 ..... ..... ..... @vvv +vsubwod_d_wu 0111 00000011 01010 ..... ..... ..... @vvv +vsubwod_q_du 0111 00000011 01011 ..... ..... ..... @vvv + +vaddwev_h_bu_b 0111 00000011 11100 ..... ..... ..... @vvv +vaddwev_w_hu_h 0111 00000011 11101 ..... ..... ..... @vvv +vaddwev_d_wu_w 0111 00000011 11110 ..... ..... ..... @vvv +vaddwev_q_du_d 0111 00000011 11111 ..... ..... ..... @vvv +vaddwod_h_bu_b 0111 00000100 00000 ..... ..... ..... @vvv +vaddwod_w_hu_h 0111 00000100 00001 ..... ..... ..... @vvv +vaddwod_d_wu_w 0111 00000100 00010 ..... ..... ..... @vvv +vaddwod_q_du_d 0111 00000100 00011 ..... ..... ..... @vvv + +vavg_b 0111 00000110 01000 ..... ..... ..... @vvv +vavg_h 0111 00000110 01001 ..... ..... ..... @vvv +vavg_w 0111 00000110 01010 ..... ..... ..... @vvv +vavg_d 0111 00000110 01011 ..... ..... ..... @vvv +vavg_bu 0111 00000110 01100 ..... ..... ..... @vvv +vavg_hu 0111 00000110 01101 ..... ..... ..... @vvv +vavg_wu 0111 00000110 01110 ..... ..... ..... @vvv +vavg_du 0111 00000110 01111 ..... ..... ..... @vvv +vavgr_b 0111 00000110 10000 ..... ..... ..... @vvv +vavgr_h 0111 00000110 10001 ..... ..... ..... @vvv +vavgr_w 0111 00000110 10010 ..... ..... ..... @vvv +vavgr_d 0111 00000110 10011 ..... ..... ..... @vvv +vavgr_bu 0111 00000110 10100 ..... ..... ..... @vvv +vavgr_hu 0111 00000110 10101 ..... ..... ..... @vvv +vavgr_wu 0111 00000110 10110 ..... ..... ..... @vvv +vavgr_du 0111 00000110 10111 ..... ..... ..... @vvv + +vabsd_b 0111 00000110 00000 ..... ..... ..... @vvv +vabsd_h 0111 00000110 00001 ..... ..... ..... @vvv +vabsd_w 0111 00000110 00010 ..... ..... ..... @vvv +vabsd_d 0111 00000110 00011 ..... ..... ..... @vvv +vabsd_bu 0111 00000110 00100 ..... ..... ..... @vvv +vabsd_hu 0111 00000110 00101 ..... ..... ..... @vvv +vabsd_wu 0111 00000110 00110 ..... ..... ..... @vvv +vabsd_du 0111 00000110 00111 ..... ..... ..... @vvv + +vadda_b 0111 00000101 11000 ..... ..... ..... @vvv +vadda_h 0111 00000101 11001 ..... ..... ..... @vvv +vadda_w 0111 00000101 11010 ..... ..... ..... @vvv +vadda_d 0111 00000101 11011 ..... ..... ..... @vvv + +vmax_b 0111 00000111 00000 ..... ..... ..... @vvv +vmax_h 0111 00000111 00001 ..... ..... ..... @vvv +vmax_w 0111 00000111 00010 ..... ..... ..... @vvv +vmax_d 0111 00000111 00011 ..... ..... ..... @vvv +vmaxi_b 0111 00101001 00000 ..... ..... ..... @vv_i5 +vmaxi_h 0111 00101001 00001 ..... ..... ..... @vv_i5 +vmaxi_w 0111 00101001 00010 ..... ..... ..... @vv_i5 +vmaxi_d 0111 00101001 00011 ..... ..... ..... @vv_i5 +vmax_bu 0111 00000111 01000 ..... ..... ..... @vvv +vmax_hu 0111 00000111 01001 ..... ..... ..... @vvv +vmax_wu 0111 00000111 01010 ..... ..... ..... @vvv +vmax_du 0111 00000111 01011 ..... ..... ..... @vvv +vmaxi_bu 0111 00101001 01000 ..... ..... ..... @vv_ui5 +vmaxi_hu 0111 00101001 01001 ..... ..... ..... @vv_ui5 +vmaxi_wu 0111 00101001 01010 ..... ..... ..... @vv_ui5 +vmaxi_du 0111 00101001 01011 ..... ..... ..... @vv_ui5 + +vmin_b 0111 00000111 00100 ..... ..... ..... @vvv +vmin_h 0111 00000111 00101 ..... ..... ..... @vvv +vmin_w 0111 00000111 00110 ..... ..... ..... @vvv +vmin_d 0111 00000111 00111 ..... ..... ..... @vvv +vmini_b 0111 00101001 00100 ..... ..... ..... @vv_i5 +vmini_h 0111 00101001 00101 ..... ..... ..... @vv_i5 +vmini_w 0111 00101001 00110 ..... ..... ..... @vv_i5 +vmini_d 0111 00101001 00111 ..... ..... ..... @vv_i5 +vmin_bu 0111 00000111 01100 ..... ..... ..... @vvv +vmin_hu 0111 00000111 01101 ..... ..... ..... @vvv +vmin_wu 0111 00000111 01110 ..... ..... ..... @vvv +vmin_du 0111 00000111 01111 ..... ..... ..... @vvv +vmini_bu 0111 00101001 01100 ..... ..... ..... @vv_ui5 +vmini_hu 0111 00101001 01101 ..... ..... ..... @vv_ui5 +vmini_wu 0111 00101001 01110 ..... ..... ..... @vv_ui5 +vmini_du 0111 00101001 01111 ..... ..... ..... @vv_ui5 + +vmul_b 0111 00001000 01000 ..... ..... ..... @vvv +vmul_h 0111 00001000 01001 ..... ..... ..... @vvv +vmul_w 0111 00001000 01010 ..... ..... ..... @vvv +vmul_d 0111 00001000 01011 ..... ..... ..... @vvv +vmuh_b 0111 00001000 01100 ..... ..... ..... @vvv +vmuh_h 0111 00001000 01101 ..... ..... ..... @vvv +vmuh_w 0111 00001000 01110 ..... ..... ..... @vvv +vmuh_d 0111 00001000 01111 ..... ..... ..... @vvv +vmuh_bu 0111 00001000 10000 ..... ..... ..... @vvv +vmuh_hu 0111 00001000 10001 ..... ..... ..... @vvv +vmuh_wu 0111 00001000 10010 ..... ..... ..... @vvv +vmuh_du 0111 00001000 10011 ..... ..... ..... @vvv + +vmulwev_h_b 0111 00001001 00000 ..... ..... ..... @vvv +vmulwev_w_h 0111 00001001 00001 ..... ..... ..... @vvv +vmulwev_d_w 0111 00001001 00010 ..... ..... ..... @vvv +vmulwev_q_d 0111 00001001 00011 ..... ..... ..... @vvv +vmulwod_h_b 0111 00001001 00100 ..... ..... ..... @vvv +vmulwod_w_h 0111 00001001 00101 ..... ..... ..... @vvv +vmulwod_d_w 0111 00001001 00110 ..... ..... ..... @vvv +vmulwod_q_d 0111 00001001 00111 ..... ..... ..... @vvv +vmulwev_h_bu 0111 00001001 10000 ..... ..... ..... @vvv +vmulwev_w_hu 0111 00001001 10001 ..... ..... ..... @vvv +vmulwev_d_wu 0111 00001001 10010 ..... ..... ..... @vvv +vmulwev_q_du 0111 00001001 10011 ..... ..... ..... @vvv +vmulwod_h_bu 0111 00001001 10100 ..... ..... ..... @vvv +vmulwod_w_hu 0111 00001001 10101 ..... ..... ..... @vvv +vmulwod_d_wu 0111 00001001 10110 ..... ..... ..... @vvv +vmulwod_q_du 0111 00001001 10111 ..... ..... ..... @vvv +vmulwev_h_bu_b 0111 00001010 00000 ..... ..... ..... @vvv +vmulwev_w_hu_h 0111 00001010 00001 ..... ..... ..... @vvv +vmulwev_d_wu_w 0111 00001010 00010 ..... ..... ..... @vvv +vmulwev_q_du_d 0111 00001010 00011 ..... ..... ..... @vvv +vmulwod_h_bu_b 0111 00001010 00100 ..... ..... ..... @vvv +vmulwod_w_hu_h 0111 00001010 00101 ..... ..... ..... @vvv +vmulwod_d_wu_w 0111 00001010 00110 ..... ..... ..... @vvv +vmulwod_q_du_d 0111 00001010 00111 ..... ..... ..... @vvv + +vmadd_b 0111 00001010 10000 ..... ..... ..... @vvv +vmadd_h 0111 00001010 10001 ..... ..... ..... @vvv +vmadd_w 0111 00001010 10010 ..... ..... ..... @vvv +vmadd_d 0111 00001010 10011 ..... ..... ..... @vvv +vmsub_b 0111 00001010 10100 ..... ..... ..... @vvv +vmsub_h 0111 00001010 10101 ..... ..... ..... @vvv +vmsub_w 0111 00001010 10110 ..... ..... ..... @vvv +vmsub_d 0111 00001010 10111 ..... ..... ..... @vvv + +vmaddwev_h_b 0111 00001010 11000 ..... ..... ..... @vvv +vmaddwev_w_h 0111 00001010 11001 ..... ..... ..... @vvv +vmaddwev_d_w 0111 00001010 11010 ..... ..... ..... @vvv +vmaddwev_q_d 0111 00001010 11011 ..... ..... ..... @vvv +vmaddwod_h_b 0111 00001010 11100 ..... ..... ..... @vvv +vmaddwod_w_h 0111 00001010 11101 ..... ..... ..... @vvv +vmaddwod_d_w 0111 00001010 11110 ..... ..... ..... @vvv +vmaddwod_q_d 0111 00001010 11111 ..... ..... ..... @vvv +vmaddwev_h_bu 0111 00001011 01000 ..... ..... ..... @vvv +vmaddwev_w_hu 0111 00001011 01001 ..... ..... ..... @vvv +vmaddwev_d_wu 0111 00001011 01010 ..... ..... ..... @vvv +vmaddwev_q_du 0111 00001011 01011 ..... ..... ..... @vvv +vmaddwod_h_bu 0111 00001011 01100 ..... ..... ..... @vvv +vmaddwod_w_hu 0111 00001011 01101 ..... ..... ..... @vvv +vmaddwod_d_wu 0111 00001011 01110 ..... ..... ..... @vvv +vmaddwod_q_du 0111 00001011 01111 ..... ..... ..... @vvv +vmaddwev_h_bu_b 0111 00001011 11000 ..... ..... ..... @vvv +vmaddwev_w_hu_h 0111 00001011 11001 ..... ..... ..... @vvv +vmaddwev_d_wu_w 0111 00001011 11010 ..... ..... ..... @vvv +vmaddwev_q_du_d 0111 00001011 11011 ..... ..... ..... @vvv +vmaddwod_h_bu_b 0111 00001011 11100 ..... ..... ..... @vvv +vmaddwod_w_hu_h 0111 00001011 11101 ..... ..... ..... @vvv +vmaddwod_d_wu_w 0111 00001011 11110 ..... ..... ..... @vvv +vmaddwod_q_du_d 0111 00001011 11111 ..... ..... ..... @vvv + +vdiv_b 0111 00001110 00000 ..... ..... ..... @vvv +vdiv_h 0111 00001110 00001 ..... ..... ..... @vvv +vdiv_w 0111 00001110 00010 ..... ..... ..... @vvv +vdiv_d 0111 00001110 00011 ..... ..... ..... @vvv +vdiv_bu 0111 00001110 01000 ..... ..... ..... @vvv +vdiv_hu 0111 00001110 01001 ..... ..... ..... @vvv +vdiv_wu 0111 00001110 01010 ..... ..... ..... @vvv +vdiv_du 0111 00001110 01011 ..... ..... ..... @vvv +vmod_b 0111 00001110 00100 ..... ..... ..... @vvv +vmod_h 0111 00001110 00101 ..... ..... ..... @vvv +vmod_w 0111 00001110 00110 ..... ..... ..... @vvv +vmod_d 0111 00001110 00111 ..... ..... ..... @vvv +vmod_bu 0111 00001110 01100 ..... ..... ..... @vvv +vmod_hu 0111 00001110 01101 ..... ..... ..... @vvv +vmod_wu 0111 00001110 01110 ..... ..... ..... @vvv +vmod_du 0111 00001110 01111 ..... ..... ..... @vvv + +vsat_b 0111 00110010 01000 01 ... ..... ..... @vv_ui3 +vsat_h 0111 00110010 01000 1 .... ..... ..... @vv_ui4 +vsat_w 0111 00110010 01001 ..... ..... ..... @vv_ui5 +vsat_d 0111 00110010 0101 ...... ..... ..... @vv_ui6 +vsat_bu 0111 00110010 10000 01 ... ..... ..... @vv_ui3 +vsat_hu 0111 00110010 10000 1 .... ..... ..... @vv_ui4 +vsat_wu 0111 00110010 10001 ..... ..... ..... @vv_ui5 +vsat_du 0111 00110010 1001 ...... ..... ..... @vv_ui6 + +vexth_h_b 0111 00101001 11101 11000 ..... ..... @vv +vexth_w_h 0111 00101001 11101 11001 ..... ..... @vv +vexth_d_w 0111 00101001 11101 11010 ..... ..... @vv +vexth_q_d 0111 00101001 11101 11011 ..... ..... @vv +vexth_hu_bu 0111 00101001 11101 11100 ..... ..... @vv +vexth_wu_hu 0111 00101001 11101 11101 ..... ..... @vv +vexth_du_wu 0111 00101001 11101 11110 ..... ..... @vv +vexth_qu_du 0111 00101001 11101 11111 ..... ..... @vv + +vsigncov_b 0111 00010010 11100 ..... ..... ..... @vvv +vsigncov_h 0111 00010010 11101 ..... ..... ..... @vvv +vsigncov_w 0111 00010010 11110 ..... ..... ..... @vvv +vsigncov_d 0111 00010010 11111 ..... ..... ..... @vvv + +vmskltz_b 0111 00101001 11000 10000 ..... ..... @vv +vmskltz_h 0111 00101001 11000 10001 ..... ..... @vv +vmskltz_w 0111 00101001 11000 10010 ..... ..... @vv +vmskltz_d 0111 00101001 11000 10011 ..... ..... @vv +vmskgez_b 0111 00101001 11000 10100 ..... ..... @vv +vmsknz_b 0111 00101001 11000 11000 ..... ..... @vv + +vldi 0111 00111110 00 ............. ..... @v_i13 + +vand_v 0111 00010010 01100 ..... ..... ..... @vvv +vor_v 0111 00010010 01101 ..... ..... ..... @vvv +vxor_v 0111 00010010 01110 ..... ..... ..... @vvv +vnor_v 0111 00010010 01111 ..... ..... ..... @vvv +vandn_v 0111 00010010 10000 ..... ..... ..... @vvv +vorn_v 0111 00010010 10001 ..... ..... ..... @vvv + +vandi_b 0111 00111101 00 ........ ..... ..... @vv_ui8 +vori_b 0111 00111101 01 ........ ..... ..... @vv_ui8 +vxori_b 0111 00111101 10 ........ ..... ..... @vv_ui8 +vnori_b 0111 00111101 11 ........ ..... ..... @vv_ui8 + +vsll_b 0111 00001110 10000 ..... ..... ..... @vvv +vsll_h 0111 00001110 10001 ..... ..... ..... @vvv +vsll_w 0111 00001110 10010 ..... ..... ..... @vvv +vsll_d 0111 00001110 10011 ..... ..... ..... @vvv +vslli_b 0111 00110010 11000 01 ... ..... ..... @vv_ui3 +vslli_h 0111 00110010 11000 1 .... ..... ..... @vv_ui4 +vslli_w 0111 00110010 11001 ..... ..... ..... @vv_ui5 +vslli_d 0111 00110010 1101 ...... ..... ..... @vv_ui6 + +vsrl_b 0111 00001110 10100 ..... ..... ..... @vvv +vsrl_h 0111 00001110 10101 ..... ..... ..... @vvv +vsrl_w 0111 00001110 10110 ..... ..... ..... @vvv +vsrl_d 0111 00001110 10111 ..... ..... ..... @vvv +vsrli_b 0111 00110011 00000 01 ... ..... ..... @vv_ui3 +vsrli_h 0111 00110011 00000 1 .... ..... ..... @vv_ui4 +vsrli_w 0111 00110011 00001 ..... ..... ..... @vv_ui5 +vsrli_d 0111 00110011 0001 ...... ..... ..... @vv_ui6 + +vsra_b 0111 00001110 11000 ..... ..... ..... @vvv +vsra_h 0111 00001110 11001 ..... ..... ..... @vvv +vsra_w 0111 00001110 11010 ..... ..... ..... @vvv +vsra_d 0111 00001110 11011 ..... ..... ..... @vvv +vsrai_b 0111 00110011 01000 01 ... ..... ..... @vv_ui3 +vsrai_h 0111 00110011 01000 1 .... ..... ..... @vv_ui4 +vsrai_w 0111 00110011 01001 ..... ..... ..... @vv_ui5 +vsrai_d 0111 00110011 0101 ...... ..... ..... @vv_ui6 + +vrotr_b 0111 00001110 11100 ..... ..... ..... @vvv +vrotr_h 0111 00001110 11101 ..... ..... ..... @vvv +vrotr_w 0111 00001110 11110 ..... ..... ..... @vvv +vrotr_d 0111 00001110 11111 ..... ..... ..... @vvv +vrotri_b 0111 00101010 00000 01 ... ..... ..... @vv_ui3 +vrotri_h 0111 00101010 00000 1 .... ..... ..... @vv_ui4 +vrotri_w 0111 00101010 00001 ..... ..... ..... @vv_ui5 +vrotri_d 0111 00101010 0001 ...... ..... ..... @vv_ui6 + +vsllwil_h_b 0111 00110000 10000 01 ... ..... ..... @vv_ui3 +vsllwil_w_h 0111 00110000 10000 1 .... ..... ..... @vv_ui4 +vsllwil_d_w 0111 00110000 10001 ..... ..... ..... @vv_ui5 +vextl_q_d 0111 00110000 10010 00000 ..... ..... @vv +vsllwil_hu_bu 0111 00110000 11000 01 ... ..... ..... @vv_ui3 +vsllwil_wu_hu 0111 00110000 11000 1 .... ..... ..... @vv_ui4 +vsllwil_du_wu 0111 00110000 11001 ..... ..... ..... @vv_ui5 +vextl_qu_du 0111 00110000 11010 00000 ..... ..... @vv + +vsrlr_b 0111 00001111 00000 ..... ..... ..... @vvv +vsrlr_h 0111 00001111 00001 ..... ..... ..... @vvv +vsrlr_w 0111 00001111 00010 ..... ..... ..... @vvv +vsrlr_d 0111 00001111 00011 ..... ..... ..... @vvv +vsrlri_b 0111 00101010 01000 01 ... ..... ..... @vv_ui3 +vsrlri_h 0111 00101010 01000 1 .... ..... ..... @vv_ui4 +vsrlri_w 0111 00101010 01001 ..... ..... ..... @vv_ui5 +vsrlri_d 0111 00101010 0101 ...... ..... ..... @vv_ui6 + +vsrar_b 0111 00001111 00100 ..... ..... ..... @vvv +vsrar_h 0111 00001111 00101 ..... ..... ..... @vvv +vsrar_w 0111 00001111 00110 ..... ..... ..... @vvv +vsrar_d 0111 00001111 00111 ..... ..... ..... @vvv +vsrari_b 0111 00101010 10000 01 ... ..... ..... @vv_ui3 +vsrari_h 0111 00101010 10000 1 .... ..... ..... @vv_ui4 +vsrari_w 0111 00101010 10001 ..... ..... ..... @vv_ui5 +vsrari_d 0111 00101010 1001 ...... ..... ..... @vv_ui6 + +vsrln_b_h 0111 00001111 01001 ..... ..... ..... @vvv +vsrln_h_w 0111 00001111 01010 ..... ..... ..... @vvv +vsrln_w_d 0111 00001111 01011 ..... ..... ..... @vvv +vsran_b_h 0111 00001111 01101 ..... ..... ..... @vvv +vsran_h_w 0111 00001111 01110 ..... ..... ..... @vvv +vsran_w_d 0111 00001111 01111 ..... ..... ..... @vvv + +vsrlni_b_h 0111 00110100 00000 1 .... ..... ..... @vv_ui4 +vsrlni_h_w 0111 00110100 00001 ..... ..... ..... @vv_ui5 +vsrlni_w_d 0111 00110100 0001 ...... ..... ..... @vv_ui6 +vsrlni_d_q 0111 00110100 001 ....... ..... ..... @vv_ui7 +vsrani_b_h 0111 00110101 10000 1 .... ..... ..... @vv_ui4 +vsrani_h_w 0111 00110101 10001 ..... ..... ..... @vv_ui5 +vsrani_w_d 0111 00110101 1001 ...... ..... ..... @vv_ui6 +vsrani_d_q 0111 00110101 101 ....... ..... ..... @vv_ui7 + +vsrlrn_b_h 0111 00001111 10001 ..... ..... ..... @vvv +vsrlrn_h_w 0111 00001111 10010 ..... ..... ..... @vvv +vsrlrn_w_d 0111 00001111 10011 ..... ..... ..... @vvv +vsrarn_b_h 0111 00001111 10101 ..... ..... ..... @vvv +vsrarn_h_w 0111 00001111 10110 ..... ..... ..... @vvv +vsrarn_w_d 0111 00001111 10111 ..... ..... ..... @vvv + +vsrlrni_b_h 0111 00110100 01000 1 .... ..... ..... @vv_ui4 +vsrlrni_h_w 0111 00110100 01001 ..... ..... ..... @vv_ui5 +vsrlrni_w_d 0111 00110100 0101 ...... ..... ..... @vv_ui6 +vsrlrni_d_q 0111 00110100 011 ....... ..... ..... @vv_ui7 +vsrarni_b_h 0111 00110101 11000 1 .... ..... ..... @vv_ui4 +vsrarni_h_w 0111 00110101 11001 ..... ..... ..... @vv_ui5 +vsrarni_w_d 0111 00110101 1101 ...... ..... ..... @vv_ui6 +vsrarni_d_q 0111 00110101 111 ....... ..... ..... @vv_ui7 + +vssrln_b_h 0111 00001111 11001 ..... ..... ..... @vvv +vssrln_h_w 0111 00001111 11010 ..... ..... ..... @vvv +vssrln_w_d 0111 00001111 11011 ..... ..... ..... @vvv +vssran_b_h 0111 00001111 11101 ..... ..... ..... @vvv +vssran_h_w 0111 00001111 11110 ..... ..... ..... @vvv +vssran_w_d 0111 00001111 11111 ..... ..... ..... @vvv +vssrln_bu_h 0111 00010000 01001 ..... ..... ..... @vvv +vssrln_hu_w 0111 00010000 01010 ..... ..... ..... @vvv +vssrln_wu_d 0111 00010000 01011 ..... ..... ..... @vvv +vssran_bu_h 0111 00010000 01101 ..... ..... ..... @vvv +vssran_hu_w 0111 00010000 01110 ..... ..... ..... @vvv +vssran_wu_d 0111 00010000 01111 ..... ..... ..... @vvv + +vssrlni_b_h 0111 00110100 10000 1 .... ..... ..... @vv_ui4 +vssrlni_h_w 0111 00110100 10001 ..... ..... ..... @vv_ui5 +vssrlni_w_d 0111 00110100 1001 ...... ..... ..... @vv_ui6 +vssrlni_d_q 0111 00110100 101 ....... ..... ..... @vv_ui7 +vssrani_b_h 0111 00110110 00000 1 .... ..... ..... @vv_ui4 +vssrani_h_w 0111 00110110 00001 ..... ..... ..... @vv_ui5 +vssrani_w_d 0111 00110110 0001 ...... ..... ..... @vv_ui6 +vssrani_d_q 0111 00110110 001 ....... ..... ..... @vv_ui7 +vssrlni_bu_h 0111 00110100 11000 1 .... ..... ..... @vv_ui4 +vssrlni_hu_w 0111 00110100 11001 ..... ..... ..... @vv_ui5 +vssrlni_wu_d 0111 00110100 1101 ...... ..... ..... @vv_ui6 +vssrlni_du_q 0111 00110100 111 ....... ..... ..... @vv_ui7 +vssrani_bu_h 0111 00110110 01000 1 .... ..... ..... @vv_ui4 +vssrani_hu_w 0111 00110110 01001 ..... ..... ..... @vv_ui5 +vssrani_wu_d 0111 00110110 0101 ...... ..... ..... @vv_ui6 +vssrani_du_q 0111 00110110 011 ....... ..... ..... @vv_ui7 + +vssrlrn_b_h 0111 00010000 00001 ..... ..... ..... @vvv +vssrlrn_h_w 0111 00010000 00010 ..... ..... ..... @vvv +vssrlrn_w_d 0111 00010000 00011 ..... ..... ..... @vvv +vssrarn_b_h 0111 00010000 00101 ..... ..... ..... @vvv +vssrarn_h_w 0111 00010000 00110 ..... ..... ..... @vvv +vssrarn_w_d 0111 00010000 00111 ..... ..... ..... @vvv +vssrlrn_bu_h 0111 00010000 10001 ..... ..... ..... @vvv +vssrlrn_hu_w 0111 00010000 10010 ..... ..... ..... @vvv +vssrlrn_wu_d 0111 00010000 10011 ..... ..... ..... @vvv +vssrarn_bu_h 0111 00010000 10101 ..... ..... ..... @vvv +vssrarn_hu_w 0111 00010000 10110 ..... ..... ..... @vvv +vssrarn_wu_d 0111 00010000 10111 ..... ..... ..... @vvv + +vssrlrni_b_h 0111 00110101 00000 1 .... ..... ..... @vv_ui4 +vssrlrni_h_w 0111 00110101 00001 ..... ..... ..... @vv_ui5 +vssrlrni_w_d 0111 00110101 0001 ...... ..... ..... @vv_ui6 +vssrlrni_d_q 0111 00110101 001 ....... ..... ..... @vv_ui7 +vssrarni_b_h 0111 00110110 10000 1 .... ..... ..... @vv_ui4 +vssrarni_h_w 0111 00110110 10001 ..... ..... ..... @vv_ui5 +vssrarni_w_d 0111 00110110 1001 ...... ..... ..... @vv_ui6 +vssrarni_d_q 0111 00110110 101 ....... ..... ..... @vv_ui7 +vssrlrni_bu_h 0111 00110101 01000 1 .... ..... ..... @vv_ui4 +vssrlrni_hu_w 0111 00110101 01001 ..... ..... ..... @vv_ui5 +vssrlrni_wu_d 0111 00110101 0101 ...... ..... ..... @vv_ui6 +vssrlrni_du_q 0111 00110101 011 ....... ..... ..... @vv_ui7 +vssrarni_bu_h 0111 00110110 11000 1 .... ..... ..... @vv_ui4 +vssrarni_hu_w 0111 00110110 11001 ..... ..... ..... @vv_ui5 +vssrarni_wu_d 0111 00110110 1101 ...... ..... ..... @vv_ui6 +vssrarni_du_q 0111 00110110 111 ....... ..... ..... @vv_ui7 + +vclo_b 0111 00101001 11000 00000 ..... ..... @vv +vclo_h 0111 00101001 11000 00001 ..... ..... @vv +vclo_w 0111 00101001 11000 00010 ..... ..... @vv +vclo_d 0111 00101001 11000 00011 ..... ..... @vv +vclz_b 0111 00101001 11000 00100 ..... ..... @vv +vclz_h 0111 00101001 11000 00101 ..... ..... @vv +vclz_w 0111 00101001 11000 00110 ..... ..... @vv +vclz_d 0111 00101001 11000 00111 ..... ..... @vv + +vpcnt_b 0111 00101001 11000 01000 ..... ..... @vv +vpcnt_h 0111 00101001 11000 01001 ..... ..... @vv +vpcnt_w 0111 00101001 11000 01010 ..... ..... @vv +vpcnt_d 0111 00101001 11000 01011 ..... ..... @vv + +vbitclr_b 0111 00010000 11000 ..... ..... ..... @vvv +vbitclr_h 0111 00010000 11001 ..... ..... ..... @vvv +vbitclr_w 0111 00010000 11010 ..... ..... ..... @vvv +vbitclr_d 0111 00010000 11011 ..... ..... ..... @vvv +vbitclri_b 0111 00110001 00000 01 ... ..... ..... @vv_ui3 +vbitclri_h 0111 00110001 00000 1 .... ..... ..... @vv_ui4 +vbitclri_w 0111 00110001 00001 ..... ..... ..... @vv_ui5 +vbitclri_d 0111 00110001 0001 ...... ..... ..... @vv_ui6 +vbitset_b 0111 00010000 11100 ..... ..... ..... @vvv +vbitset_h 0111 00010000 11101 ..... ..... ..... @vvv +vbitset_w 0111 00010000 11110 ..... ..... ..... @vvv +vbitset_d 0111 00010000 11111 ..... ..... ..... @vvv +vbitseti_b 0111 00110001 01000 01 ... ..... ..... @vv_ui3 +vbitseti_h 0111 00110001 01000 1 .... ..... ..... @vv_ui4 +vbitseti_w 0111 00110001 01001 ..... ..... ..... @vv_ui5 +vbitseti_d 0111 00110001 0101 ...... ..... ..... @vv_ui6 +vbitrev_b 0111 00010001 00000 ..... ..... ..... @vvv +vbitrev_h 0111 00010001 00001 ..... ..... ..... @vvv +vbitrev_w 0111 00010001 00010 ..... ..... ..... @vvv +vbitrev_d 0111 00010001 00011 ..... ..... ..... @vvv +vbitrevi_b 0111 00110001 10000 01 ... ..... ..... @vv_ui3 +vbitrevi_h 0111 00110001 10000 1 .... ..... ..... @vv_ui4 +vbitrevi_w 0111 00110001 10001 ..... ..... ..... @vv_ui5 +vbitrevi_d 0111 00110001 1001 ...... ..... ..... @vv_ui6 + +vfrstp_b 0111 00010010 10110 ..... ..... ..... @vvv +vfrstp_h 0111 00010010 10111 ..... ..... ..... @vvv +vfrstpi_b 0111 00101001 10100 ..... ..... ..... @vv_ui5 +vfrstpi_h 0111 00101001 10101 ..... ..... ..... @vv_ui5 + +vfadd_s 0111 00010011 00001 ..... ..... ..... @vvv +vfadd_d 0111 00010011 00010 ..... ..... ..... @vvv +vfsub_s 0111 00010011 00101 ..... ..... ..... @vvv +vfsub_d 0111 00010011 00110 ..... ..... ..... @vvv +vfmul_s 0111 00010011 10001 ..... ..... ..... @vvv +vfmul_d 0111 00010011 10010 ..... ..... ..... @vvv +vfdiv_s 0111 00010011 10101 ..... ..... ..... @vvv +vfdiv_d 0111 00010011 10110 ..... ..... ..... @vvv + +vfmadd_s 0000 10010001 ..... ..... ..... ..... @vvvv +vfmadd_d 0000 10010010 ..... ..... ..... ..... @vvvv +vfmsub_s 0000 10010101 ..... ..... ..... ..... @vvvv +vfmsub_d 0000 10010110 ..... ..... ..... ..... @vvvv +vfnmadd_s 0000 10011001 ..... ..... ..... ..... @vvvv +vfnmadd_d 0000 10011010 ..... ..... ..... ..... @vvvv +vfnmsub_s 0000 10011101 ..... ..... ..... ..... @vvvv +vfnmsub_d 0000 10011110 ..... ..... ..... ..... @vvvv + +vfmax_s 0111 00010011 11001 ..... ..... ..... @vvv +vfmax_d 0111 00010011 11010 ..... ..... ..... @vvv +vfmin_s 0111 00010011 11101 ..... ..... ..... @vvv +vfmin_d 0111 00010011 11110 ..... ..... ..... @vvv + +vfmaxa_s 0111 00010100 00001 ..... ..... ..... @vvv +vfmaxa_d 0111 00010100 00010 ..... ..... ..... @vvv +vfmina_s 0111 00010100 00101 ..... ..... ..... @vvv +vfmina_d 0111 00010100 00110 ..... ..... ..... @vvv + +vflogb_s 0111 00101001 11001 10001 ..... ..... @vv +vflogb_d 0111 00101001 11001 10010 ..... ..... @vv + +vfclass_s 0111 00101001 11001 10101 ..... ..... @vv +vfclass_d 0111 00101001 11001 10110 ..... ..... @vv + +vfsqrt_s 0111 00101001 11001 11001 ..... ..... @vv +vfsqrt_d 0111 00101001 11001 11010 ..... ..... @vv +vfrecip_s 0111 00101001 11001 11101 ..... ..... @vv +vfrecip_d 0111 00101001 11001 11110 ..... ..... @vv +vfrsqrt_s 0111 00101001 11010 00001 ..... ..... @vv +vfrsqrt_d 0111 00101001 11010 00010 ..... ..... @vv + +vfcvtl_s_h 0111 00101001 11011 11010 ..... ..... @vv +vfcvth_s_h 0111 00101001 11011 11011 ..... ..... @vv +vfcvtl_d_s 0111 00101001 11011 11100 ..... ..... @vv +vfcvth_d_s 0111 00101001 11011 11101 ..... ..... @vv +vfcvt_h_s 0111 00010100 01100 ..... ..... ..... @vvv +vfcvt_s_d 0111 00010100 01101 ..... ..... ..... @vvv + +vfrint_s 0111 00101001 11010 01101 ..... ..... @vv +vfrint_d 0111 00101001 11010 01110 ..... ..... @vv +vfrintrm_s 0111 00101001 11010 10001 ..... ..... @vv +vfrintrm_d 0111 00101001 11010 10010 ..... ..... @vv +vfrintrp_s 0111 00101001 11010 10101 ..... ..... @vv +vfrintrp_d 0111 00101001 11010 10110 ..... ..... @vv +vfrintrz_s 0111 00101001 11010 11001 ..... ..... @vv +vfrintrz_d 0111 00101001 11010 11010 ..... ..... @vv +vfrintrne_s 0111 00101001 11010 11101 ..... ..... @vv +vfrintrne_d 0111 00101001 11010 11110 ..... ..... @vv + +vftint_w_s 0111 00101001 11100 01100 ..... ..... @vv +vftint_l_d 0111 00101001 11100 01101 ..... ..... @vv +vftintrm_w_s 0111 00101001 11100 01110 ..... ..... @vv +vftintrm_l_d 0111 00101001 11100 01111 ..... ..... @vv +vftintrp_w_s 0111 00101001 11100 10000 ..... ..... @vv +vftintrp_l_d 0111 00101001 11100 10001 ..... ..... @vv +vftintrz_w_s 0111 00101001 11100 10010 ..... ..... @vv +vftintrz_l_d 0111 00101001 11100 10011 ..... ..... @vv +vftintrne_w_s 0111 00101001 11100 10100 ..... ..... @vv +vftintrne_l_d 0111 00101001 11100 10101 ..... ..... @vv +vftint_wu_s 0111 00101001 11100 10110 ..... ..... @vv +vftint_lu_d 0111 00101001 11100 10111 ..... ..... @vv +vftintrz_wu_s 0111 00101001 11100 11100 ..... ..... @vv +vftintrz_lu_d 0111 00101001 11100 11101 ..... ..... @vv +vftint_w_d 0111 00010100 10011 ..... ..... ..... @vvv +vftintrm_w_d 0111 00010100 10100 ..... ..... ..... @vvv +vftintrp_w_d 0111 00010100 10101 ..... ..... ..... @vvv +vftintrz_w_d 0111 00010100 10110 ..... ..... ..... @vvv +vftintrne_w_d 0111 00010100 10111 ..... ..... ..... @vvv +vftintl_l_s 0111 00101001 11101 00000 ..... ..... @vv +vftinth_l_s 0111 00101001 11101 00001 ..... ..... @vv +vftintrml_l_s 0111 00101001 11101 00010 ..... ..... @vv +vftintrmh_l_s 0111 00101001 11101 00011 ..... ..... @vv +vftintrpl_l_s 0111 00101001 11101 00100 ..... ..... @vv +vftintrph_l_s 0111 00101001 11101 00101 ..... ..... @vv +vftintrzl_l_s 0111 00101001 11101 00110 ..... ..... @vv +vftintrzh_l_s 0111 00101001 11101 00111 ..... ..... @vv +vftintrnel_l_s 0111 00101001 11101 01000 ..... ..... @vv +vftintrneh_l_s 0111 00101001 11101 01001 ..... ..... @vv + +vffint_s_w 0111 00101001 11100 00000 ..... ..... @vv +vffint_s_wu 0111 00101001 11100 00001 ..... ..... @vv +vffint_d_l 0111 00101001 11100 00010 ..... ..... @vv +vffint_d_lu 0111 00101001 11100 00011 ..... ..... @vv +vffintl_d_w 0111 00101001 11100 00100 ..... ..... @vv +vffinth_d_w 0111 00101001 11100 00101 ..... ..... @vv +vffint_s_l 0111 00010100 10000 ..... ..... ..... @vvv + +vseq_b 0111 00000000 00000 ..... ..... ..... @vvv +vseq_h 0111 00000000 00001 ..... ..... ..... @vvv +vseq_w 0111 00000000 00010 ..... ..... ..... @vvv +vseq_d 0111 00000000 00011 ..... ..... ..... @vvv +vseqi_b 0111 00101000 00000 ..... ..... ..... @vv_i5 +vseqi_h 0111 00101000 00001 ..... ..... ..... @vv_i5 +vseqi_w 0111 00101000 00010 ..... ..... ..... @vv_i5 +vseqi_d 0111 00101000 00011 ..... ..... ..... @vv_i5 + +vsle_b 0111 00000000 00100 ..... ..... ..... @vvv +vsle_h 0111 00000000 00101 ..... ..... ..... @vvv +vsle_w 0111 00000000 00110 ..... ..... ..... @vvv +vsle_d 0111 00000000 00111 ..... ..... ..... @vvv +vslei_b 0111 00101000 00100 ..... ..... ..... @vv_i5 +vslei_h 0111 00101000 00101 ..... ..... ..... @vv_i5 +vslei_w 0111 00101000 00110 ..... ..... ..... @vv_i5 +vslei_d 0111 00101000 00111 ..... ..... ..... @vv_i5 +vsle_bu 0111 00000000 01000 ..... ..... ..... @vvv +vsle_hu 0111 00000000 01001 ..... ..... ..... @vvv +vsle_wu 0111 00000000 01010 ..... ..... ..... @vvv +vsle_du 0111 00000000 01011 ..... ..... ..... @vvv +vslei_bu 0111 00101000 01000 ..... ..... ..... @vv_ui5 +vslei_hu 0111 00101000 01001 ..... ..... ..... @vv_ui5 +vslei_wu 0111 00101000 01010 ..... ..... ..... @vv_ui5 +vslei_du 0111 00101000 01011 ..... ..... ..... @vv_ui5 + +vslt_b 0111 00000000 01100 ..... ..... ..... @vvv +vslt_h 0111 00000000 01101 ..... ..... ..... @vvv +vslt_w 0111 00000000 01110 ..... ..... ..... @vvv +vslt_d 0111 00000000 01111 ..... ..... ..... @vvv +vslti_b 0111 00101000 01100 ..... ..... ..... @vv_i5 +vslti_h 0111 00101000 01101 ..... ..... ..... @vv_i5 +vslti_w 0111 00101000 01110 ..... ..... ..... @vv_i5 +vslti_d 0111 00101000 01111 ..... ..... ..... @vv_i5 +vslt_bu 0111 00000000 10000 ..... ..... ..... @vvv +vslt_hu 0111 00000000 10001 ..... ..... ..... @vvv +vslt_wu 0111 00000000 10010 ..... ..... ..... @vvv +vslt_du 0111 00000000 10011 ..... ..... ..... @vvv +vslti_bu 0111 00101000 10000 ..... ..... ..... @vv_ui5 +vslti_hu 0111 00101000 10001 ..... ..... ..... @vv_ui5 +vslti_wu 0111 00101000 10010 ..... ..... ..... @vv_ui5 +vslti_du 0111 00101000 10011 ..... ..... ..... @vv_ui5 + +vfcmp_cond_s 0000 11000101 ..... ..... ..... ..... @vvv_fcond +vfcmp_cond_d 0000 11000110 ..... ..... ..... ..... @vvv_fcond + +vbitsel_v 0000 11010001 ..... ..... ..... ..... @vvvv + +vbitseli_b 0111 00111100 01 ........ ..... ..... @vv_ui8 + +vseteqz_v 0111 00101001 11001 00110 ..... 00 ... @cv +vsetnez_v 0111 00101001 11001 00111 ..... 00 ... @cv +vsetanyeqz_b 0111 00101001 11001 01000 ..... 00 ... @cv +vsetanyeqz_h 0111 00101001 11001 01001 ..... 00 ... @cv +vsetanyeqz_w 0111 00101001 11001 01010 ..... 00 ... @cv +vsetanyeqz_d 0111 00101001 11001 01011 ..... 00 ... @cv +vsetallnez_b 0111 00101001 11001 01100 ..... 00 ... @cv +vsetallnez_h 0111 00101001 11001 01101 ..... 00 ... @cv +vsetallnez_w 0111 00101001 11001 01110 ..... 00 ... @cv +vsetallnez_d 0111 00101001 11001 01111 ..... 00 ... @cv + +vinsgr2vr_b 0111 00101110 10111 0 .... ..... ..... @vr_ui4 +vinsgr2vr_h 0111 00101110 10111 10 ... ..... ..... @vr_ui3 +vinsgr2vr_w 0111 00101110 10111 110 .. ..... ..... @vr_ui2 +vinsgr2vr_d 0111 00101110 10111 1110 . ..... ..... @vr_ui1 +vpickve2gr_b 0111 00101110 11111 0 .... ..... ..... @rv_ui4 +vpickve2gr_h 0111 00101110 11111 10 ... ..... ..... @rv_ui3 +vpickve2gr_w 0111 00101110 11111 110 .. ..... ..... @rv_ui2 +vpickve2gr_d 0111 00101110 11111 1110 . ..... ..... @rv_ui1 +vpickve2gr_bu 0111 00101111 00111 0 .... ..... ..... @rv_ui4 +vpickve2gr_hu 0111 00101111 00111 10 ... ..... ..... @rv_ui3 +vpickve2gr_wu 0111 00101111 00111 110 .. ..... ..... @rv_ui2 +vpickve2gr_du 0111 00101111 00111 1110 . ..... ..... @rv_ui1 + +vreplgr2vr_b 0111 00101001 11110 00000 ..... ..... @vr +vreplgr2vr_h 0111 00101001 11110 00001 ..... ..... @vr +vreplgr2vr_w 0111 00101001 11110 00010 ..... ..... @vr +vreplgr2vr_d 0111 00101001 11110 00011 ..... ..... @vr + +vreplve_b 0111 00010010 00100 ..... ..... ..... @vvr +vreplve_h 0111 00010010 00101 ..... ..... ..... @vvr +vreplve_w 0111 00010010 00110 ..... ..... ..... @vvr +vreplve_d 0111 00010010 00111 ..... ..... ..... @vvr +vreplvei_b 0111 00101111 01111 0 .... ..... ..... @vv_ui4 +vreplvei_h 0111 00101111 01111 10 ... ..... ..... @vv_ui3 +vreplvei_w 0111 00101111 01111 110 .. ..... ..... @vv_ui2 +vreplvei_d 0111 00101111 01111 1110 . ..... ..... @vv_ui1 + +vbsll_v 0111 00101000 11100 ..... ..... ..... @vv_ui5 +vbsrl_v 0111 00101000 11101 ..... ..... ..... @vv_ui5 + +vpackev_b 0111 00010001 01100 ..... ..... ..... @vvv +vpackev_h 0111 00010001 01101 ..... ..... ..... @vvv +vpackev_w 0111 00010001 01110 ..... ..... ..... @vvv +vpackev_d 0111 00010001 01111 ..... ..... ..... @vvv +vpackod_b 0111 00010001 10000 ..... ..... ..... @vvv +vpackod_h 0111 00010001 10001 ..... ..... ..... @vvv +vpackod_w 0111 00010001 10010 ..... ..... ..... @vvv +vpackod_d 0111 00010001 10011 ..... ..... ..... @vvv + +vpickev_b 0111 00010001 11100 ..... ..... ..... @vvv +vpickev_h 0111 00010001 11101 ..... ..... ..... @vvv +vpickev_w 0111 00010001 11110 ..... ..... ..... @vvv +vpickev_d 0111 00010001 11111 ..... ..... ..... @vvv +vpickod_b 0111 00010010 00000 ..... ..... ..... @vvv +vpickod_h 0111 00010010 00001 ..... ..... ..... @vvv +vpickod_w 0111 00010010 00010 ..... ..... ..... @vvv +vpickod_d 0111 00010010 00011 ..... ..... ..... @vvv + +vilvl_b 0111 00010001 10100 ..... ..... ..... @vvv +vilvl_h 0111 00010001 10101 ..... ..... ..... @vvv +vilvl_w 0111 00010001 10110 ..... ..... ..... @vvv +vilvl_d 0111 00010001 10111 ..... ..... ..... @vvv +vilvh_b 0111 00010001 11000 ..... ..... ..... @vvv +vilvh_h 0111 00010001 11001 ..... ..... ..... @vvv +vilvh_w 0111 00010001 11010 ..... ..... ..... @vvv +vilvh_d 0111 00010001 11011 ..... ..... ..... @vvv + +vshuf_b 0000 11010101 ..... ..... ..... ..... @vvvv +vshuf_h 0111 00010111 10101 ..... ..... ..... @vvv +vshuf_w 0111 00010111 10110 ..... ..... ..... @vvv +vshuf_d 0111 00010111 10111 ..... ..... ..... @vvv +vshuf4i_b 0111 00111001 00 ........ ..... ..... @vv_ui8 +vshuf4i_h 0111 00111001 01 ........ ..... ..... @vv_ui8 +vshuf4i_w 0111 00111001 10 ........ ..... ..... @vv_ui8 +vshuf4i_d 0111 00111001 11 ........ ..... ..... @vv_ui8 + +vpermi_w 0111 00111110 01 ........ ..... ..... @vv_ui8 + +vextrins_d 0111 00111000 00 ........ ..... ..... @vv_ui8 +vextrins_w 0111 00111000 01 ........ ..... ..... @vv_ui8 +vextrins_h 0111 00111000 10 ........ ..... ..... @vv_ui8 +vextrins_b 0111 00111000 11 ........ ..... ..... @vv_ui8 + +vld 0010 110000 ............ ..... ..... @vr_i12 +vst 0010 110001 ............ ..... ..... @vr_i12 +vldx 0011 10000100 00000 ..... ..... ..... @vrr +vstx 0011 10000100 01000 ..... ..... ..... @vrr + +vldrepl_d 0011 00000001 0 ......... ..... ..... @vr_i9 +vldrepl_w 0011 00000010 .......... ..... ..... @vr_i10 +vldrepl_h 0011 0000010 ........... ..... ..... @vr_i11 +vldrepl_b 0011 000010 ............ ..... ..... @vr_i12 +vstelm_d 0011 00010001 0 . ........ ..... ..... @vr_i8i1 +vstelm_w 0011 00010010 .. ........ ..... ..... @vr_i8i2 +vstelm_h 0011 0001010 ... ........ ..... ..... @vr_i8i3 +vstelm_b 0011 000110 .... ........ ..... ..... @vr_i8i4 diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h index f01635aed6..7b0f29c942 100644 --- a/target/loongarch/internals.h +++ b/target/loongarch/internals.h @@ -21,6 +21,28 @@ /* Global bit for huge page */ #define LOONGARCH_HGLOBAL_SHIFT 12 +#if HOST_BIG_ENDIAN +#define B(x) B[15 - (x)] +#define H(x) H[7 - (x)] +#define W(x) W[3 - (x)] +#define D(x) D[1 - (x)] +#define UB(x) UB[15 - (x)] +#define UH(x) UH[7 - (x)] +#define UW(x) UW[3 - (x)] +#define UD(x) UD[1 -(x)] +#define Q(x) Q[x] +#else +#define B(x) B[x] +#define H(x) H[x] +#define W(x) W[x] +#define D(x) D[x] +#define UB(x) UB[x] +#define UH(x) UH[x] +#define UW(x) UW[x] +#define UD(x) UD[x] +#define Q(x) Q[x] +#endif + void loongarch_translate_init(void); void loongarch_cpu_dump_state(CPUState *cpu, FILE *f, int flags); @@ -31,6 +53,7 @@ void G_NORETURN do_raise_exception(CPULoongArchState *env, const char *loongarch_exception_name(int32_t exception); +int ieee_ex_to_loongarch(int xcpt); void restore_fp_status(CPULoongArchState *env); #ifndef CONFIG_USER_ONLY diff --git a/target/loongarch/iocsr_helper.c b/target/loongarch/iocsr_helper.c index 505853e17b..dda9845d6c 100644 --- a/target/loongarch/iocsr_helper.c +++ b/target/loongarch/iocsr_helper.c @@ -12,7 +12,6 @@ #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" -#include "tcg/tcg-ldst.h" #define GET_MEMTXATTRS(cas) \ ((MemTxAttrs){.requester_id = env_cpu(cas)->cpu_index}) diff --git a/target/loongarch/loongarch-qmp-cmds.c b/target/loongarch/loongarch-qmp-cmds.c new file mode 100644 index 0000000000..6c25957881 --- /dev/null +++ b/target/loongarch/loongarch-qmp-cmds.c @@ -0,0 +1,37 @@ +/* + * QEMU LoongArch CPU (monitor definitions) + * + * SPDX-FileCopyrightText: 2021 Loongson Technology Corporation Limited + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/qapi-commands-machine-target.h" +#include "cpu.h" + +static void loongarch_cpu_add_definition(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CpuDefinitionInfoList **cpu_list = user_data; + CpuDefinitionInfo *info = g_new0(CpuDefinitionInfo, 1); + const char *typename = object_class_get_name(oc); + + info->name = g_strndup(typename, + strlen(typename) - strlen("-" TYPE_LOONGARCH_CPU)); + info->q_typename = g_strdup(typename); + + QAPI_LIST_PREPEND(*cpu_list, info); +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + CpuDefinitionInfoList *cpu_list = NULL; + GSList *list; + + list = object_class_get_list(TYPE_LOONGARCH_CPU, false); + g_slist_foreach(list, loongarch_cpu_add_definition, &cpu_list); + g_slist_free(list); + + return cpu_list; +} diff --git a/target/loongarch/lsx_helper.c b/target/loongarch/lsx_helper.c new file mode 100644 index 0000000000..9571f0aef0 --- /dev/null +++ b/target/loongarch/lsx_helper.c @@ -0,0 +1,3004 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch LSX helper functions. + * + * Copyright (c) 2022-2023 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" +#include "internals.h" +#include "tcg/tcg.h" + +#define DO_ADD(a, b) (a + b) +#define DO_SUB(a, b) (a - b) + +#define DO_ODD_EVEN(NAME, BIT, E1, E2, DO_OP) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + typedef __typeof(Vd->E1(0)) TD; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i)); \ + } \ +} + +DO_ODD_EVEN(vhaddw_h_b, 16, H, B, DO_ADD) +DO_ODD_EVEN(vhaddw_w_h, 32, W, H, DO_ADD) +DO_ODD_EVEN(vhaddw_d_w, 64, D, W, DO_ADD) + +void HELPER(vhaddw_q_d)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t vk) +{ + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + VReg *Vk = &(env->fpr[vk].vreg); + + Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0))); +} + +DO_ODD_EVEN(vhsubw_h_b, 16, H, B, DO_SUB) +DO_ODD_EVEN(vhsubw_w_h, 32, W, H, DO_SUB) +DO_ODD_EVEN(vhsubw_d_w, 64, D, W, DO_SUB) + +void HELPER(vhsubw_q_d)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t vk) +{ + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + VReg *Vk = &(env->fpr[vk].vreg); + + Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0))); +} + +DO_ODD_EVEN(vhaddw_hu_bu, 16, UH, UB, DO_ADD) +DO_ODD_EVEN(vhaddw_wu_hu, 32, UW, UH, DO_ADD) +DO_ODD_EVEN(vhaddw_du_wu, 64, UD, UW, DO_ADD) + +void HELPER(vhaddw_qu_du)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t vk) +{ + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + VReg *Vk = &(env->fpr[vk].vreg); + + Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)), + int128_make64((uint64_t)Vk->D(0))); +} + +DO_ODD_EVEN(vhsubw_hu_bu, 16, UH, UB, DO_SUB) +DO_ODD_EVEN(vhsubw_wu_hu, 32, UW, UH, DO_SUB) +DO_ODD_EVEN(vhsubw_du_wu, 64, UD, UW, DO_SUB) + +void HELPER(vhsubw_qu_du)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t vk) +{ + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + VReg *Vk = &(env->fpr[vk].vreg); + + Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)), + int128_make64((uint64_t)Vk->D(0))); +} + +#define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + typedef __typeof(Vd->E1(0)) TD; \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \ + } \ +} + +#define DO_ODD(NAME, BIT, E1, E2, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + typedef __typeof(Vd->E1(0)) TD; \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1)); \ + } \ +} + +void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t v) +{ + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + Vd->Q(0) = int128_add(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0))); +} + +DO_EVEN(vaddwev_h_b, 16, H, B, DO_ADD) +DO_EVEN(vaddwev_w_h, 32, W, H, DO_ADD) +DO_EVEN(vaddwev_d_w, 64, D, W, DO_ADD) + +void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t v) +{ + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1))); +} + +DO_ODD(vaddwod_h_b, 16, H, B, DO_ADD) +DO_ODD(vaddwod_w_h, 32, W, H, DO_ADD) +DO_ODD(vaddwod_d_w, 64, D, W, DO_ADD) + +void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t v) +{ + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + Vd->Q(0) = int128_sub(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0))); +} + +DO_EVEN(vsubwev_h_b, 16, H, B, DO_SUB) +DO_EVEN(vsubwev_w_h, 32, W, H, DO_SUB) +DO_EVEN(vsubwev_d_w, 64, D, W, DO_SUB) + +void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t v) +{ + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1))); +} + +DO_ODD(vsubwod_h_b, 16, H, B, DO_SUB) +DO_ODD(vsubwod_w_h, 32, W, H, DO_SUB) +DO_ODD(vsubwod_d_w, 64, D, W, DO_SUB) + +void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t v) +{ + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)), + int128_make64((uint64_t)Vk->D(0))); +} + +DO_EVEN(vaddwev_h_bu, 16, UH, UB, DO_ADD) +DO_EVEN(vaddwev_w_hu, 32, UW, UH, DO_ADD) +DO_EVEN(vaddwev_d_wu, 64, UD, UW, DO_ADD) + +void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t v) +{ + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)), + int128_make64((uint64_t)Vk->D(1))); +} + +DO_ODD(vaddwod_h_bu, 16, UH, UB, DO_ADD) +DO_ODD(vaddwod_w_hu, 32, UW, UH, DO_ADD) +DO_ODD(vaddwod_d_wu, 64, UD, UW, DO_ADD) + +void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t v) +{ + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(0)), + int128_make64((uint64_t)Vk->D(0))); +} + +DO_EVEN(vsubwev_h_bu, 16, UH, UB, DO_SUB) +DO_EVEN(vsubwev_w_hu, 32, UW, UH, DO_SUB) +DO_EVEN(vsubwev_d_wu, 64, UD, UW, DO_SUB) + +void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t v) +{ + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)), + int128_make64((uint64_t)Vk->D(1))); +} + +DO_ODD(vsubwod_h_bu, 16, UH, UB, DO_SUB) +DO_ODD(vsubwod_w_hu, 32, UW, UH, DO_SUB) +DO_ODD(vsubwod_d_wu, 64, UD, UW, DO_SUB) + +#define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + typedef __typeof(Vd->ES1(0)) TDS; \ + typedef __typeof(Vd->EU1(0)) TDU; \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \ + } \ +} + +#define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + typedef __typeof(Vd->ES1(0)) TDS; \ + typedef __typeof(Vd->EU1(0)) TDU; \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i + 1)); \ + } \ +} + +void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t v) +{ + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)), + int128_makes64(Vk->D(0))); +} + +DO_EVEN_U_S(vaddwev_h_bu_b, 16, H, UH, B, UB, DO_ADD) +DO_EVEN_U_S(vaddwev_w_hu_h, 32, W, UW, H, UH, DO_ADD) +DO_EVEN_U_S(vaddwev_d_wu_w, 64, D, UD, W, UW, DO_ADD) + +void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t v) +{ + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)), + int128_makes64(Vk->D(1))); +} + +DO_ODD_U_S(vaddwod_h_bu_b, 16, H, UH, B, UB, DO_ADD) +DO_ODD_U_S(vaddwod_w_hu_h, 32, W, UW, H, UH, DO_ADD) +DO_ODD_U_S(vaddwod_d_wu_w, 64, D, UD, W, UW, DO_ADD) + +#define DO_VAVG(a, b) ((a >> 1) + (b >> 1) + (a & b & 1)) +#define DO_VAVGR(a, b) ((a >> 1) + (b >> 1) + ((a | b) & 1)) + +#define DO_3OP(NAME, BIT, E, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \ + } \ +} + +DO_3OP(vavg_b, 8, B, DO_VAVG) +DO_3OP(vavg_h, 16, H, DO_VAVG) +DO_3OP(vavg_w, 32, W, DO_VAVG) +DO_3OP(vavg_d, 64, D, DO_VAVG) +DO_3OP(vavgr_b, 8, B, DO_VAVGR) +DO_3OP(vavgr_h, 16, H, DO_VAVGR) +DO_3OP(vavgr_w, 32, W, DO_VAVGR) +DO_3OP(vavgr_d, 64, D, DO_VAVGR) +DO_3OP(vavg_bu, 8, UB, DO_VAVG) +DO_3OP(vavg_hu, 16, UH, DO_VAVG) +DO_3OP(vavg_wu, 32, UW, DO_VAVG) +DO_3OP(vavg_du, 64, UD, DO_VAVG) +DO_3OP(vavgr_bu, 8, UB, DO_VAVGR) +DO_3OP(vavgr_hu, 16, UH, DO_VAVGR) +DO_3OP(vavgr_wu, 32, UW, DO_VAVGR) +DO_3OP(vavgr_du, 64, UD, DO_VAVGR) + +#define DO_VABSD(a, b) ((a > b) ? (a -b) : (b-a)) + +DO_3OP(vabsd_b, 8, B, DO_VABSD) +DO_3OP(vabsd_h, 16, H, DO_VABSD) +DO_3OP(vabsd_w, 32, W, DO_VABSD) +DO_3OP(vabsd_d, 64, D, DO_VABSD) +DO_3OP(vabsd_bu, 8, UB, DO_VABSD) +DO_3OP(vabsd_hu, 16, UH, DO_VABSD) +DO_3OP(vabsd_wu, 32, UW, DO_VABSD) +DO_3OP(vabsd_du, 64, UD, DO_VABSD) + +#define DO_VABS(a) ((a < 0) ? (-a) : (a)) + +#define DO_VADDA(NAME, BIT, E, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = DO_OP(Vj->E(i)) + DO_OP(Vk->E(i)); \ + } \ +} + +DO_VADDA(vadda_b, 8, B, DO_VABS) +DO_VADDA(vadda_h, 16, H, DO_VABS) +DO_VADDA(vadda_w, 32, W, DO_VABS) +DO_VADDA(vadda_d, 64, D, DO_VABS) + +#define DO_MIN(a, b) (a < b ? a : b) +#define DO_MAX(a, b) (a > b ? a : b) + +#define VMINMAXI(NAME, BIT, E, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + typedef __typeof(Vd->E(0)) TD; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \ + } \ +} + +VMINMAXI(vmini_b, 8, B, DO_MIN) +VMINMAXI(vmini_h, 16, H, DO_MIN) +VMINMAXI(vmini_w, 32, W, DO_MIN) +VMINMAXI(vmini_d, 64, D, DO_MIN) +VMINMAXI(vmaxi_b, 8, B, DO_MAX) +VMINMAXI(vmaxi_h, 16, H, DO_MAX) +VMINMAXI(vmaxi_w, 32, W, DO_MAX) +VMINMAXI(vmaxi_d, 64, D, DO_MAX) +VMINMAXI(vmini_bu, 8, UB, DO_MIN) +VMINMAXI(vmini_hu, 16, UH, DO_MIN) +VMINMAXI(vmini_wu, 32, UW, DO_MIN) +VMINMAXI(vmini_du, 64, UD, DO_MIN) +VMINMAXI(vmaxi_bu, 8, UB, DO_MAX) +VMINMAXI(vmaxi_hu, 16, UH, DO_MAX) +VMINMAXI(vmaxi_wu, 32, UW, DO_MAX) +VMINMAXI(vmaxi_du, 64, UD, DO_MAX) + +#define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + typedef __typeof(Vd->E1(0)) T; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \ + } \ +} + +void HELPER(vmuh_d)(void *vd, void *vj, void *vk, uint32_t v) +{ + uint64_t l, h1, h2; + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + muls64(&l, &h1, Vj->D(0), Vk->D(0)); + muls64(&l, &h2, Vj->D(1), Vk->D(1)); + + Vd->D(0) = h1; + Vd->D(1) = h2; +} + +DO_VMUH(vmuh_b, 8, H, B, DO_MUH) +DO_VMUH(vmuh_h, 16, W, H, DO_MUH) +DO_VMUH(vmuh_w, 32, D, W, DO_MUH) + +void HELPER(vmuh_du)(void *vd, void *vj, void *vk, uint32_t v) +{ + uint64_t l, h1, h2; + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + VReg *Vk = (VReg *)vk; + + mulu64(&l, &h1, Vj->D(0), Vk->D(0)); + mulu64(&l, &h2, Vj->D(1), Vk->D(1)); + + Vd->D(0) = h1; + Vd->D(1) = h2; +} + +DO_VMUH(vmuh_bu, 8, UH, UB, DO_MUH) +DO_VMUH(vmuh_hu, 16, UW, UH, DO_MUH) +DO_VMUH(vmuh_wu, 32, UD, UW, DO_MUH) + +#define DO_MUL(a, b) (a * b) + +DO_EVEN(vmulwev_h_b, 16, H, B, DO_MUL) +DO_EVEN(vmulwev_w_h, 32, W, H, DO_MUL) +DO_EVEN(vmulwev_d_w, 64, D, W, DO_MUL) + +DO_ODD(vmulwod_h_b, 16, H, B, DO_MUL) +DO_ODD(vmulwod_w_h, 32, W, H, DO_MUL) +DO_ODD(vmulwod_d_w, 64, D, W, DO_MUL) + +DO_EVEN(vmulwev_h_bu, 16, UH, UB, DO_MUL) +DO_EVEN(vmulwev_w_hu, 32, UW, UH, DO_MUL) +DO_EVEN(vmulwev_d_wu, 64, UD, UW, DO_MUL) + +DO_ODD(vmulwod_h_bu, 16, UH, UB, DO_MUL) +DO_ODD(vmulwod_w_hu, 32, UW, UH, DO_MUL) +DO_ODD(vmulwod_d_wu, 64, UD, UW, DO_MUL) + +DO_EVEN_U_S(vmulwev_h_bu_b, 16, H, UH, B, UB, DO_MUL) +DO_EVEN_U_S(vmulwev_w_hu_h, 32, W, UW, H, UH, DO_MUL) +DO_EVEN_U_S(vmulwev_d_wu_w, 64, D, UD, W, UW, DO_MUL) + +DO_ODD_U_S(vmulwod_h_bu_b, 16, H, UH, B, UB, DO_MUL) +DO_ODD_U_S(vmulwod_w_hu_h, 32, W, UW, H, UH, DO_MUL) +DO_ODD_U_S(vmulwod_d_wu_w, 64, D, UD, W, UW, DO_MUL) + +#define DO_MADD(a, b, c) (a + b * c) +#define DO_MSUB(a, b, c) (a - b * c) + +#define VMADDSUB(NAME, BIT, E, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = DO_OP(Vd->E(i), Vj->E(i) ,Vk->E(i)); \ + } \ +} + +VMADDSUB(vmadd_b, 8, B, DO_MADD) +VMADDSUB(vmadd_h, 16, H, DO_MADD) +VMADDSUB(vmadd_w, 32, W, DO_MADD) +VMADDSUB(vmadd_d, 64, D, DO_MADD) +VMADDSUB(vmsub_b, 8, B, DO_MSUB) +VMADDSUB(vmsub_h, 16, H, DO_MSUB) +VMADDSUB(vmsub_w, 32, W, DO_MSUB) +VMADDSUB(vmsub_d, 64, D, DO_MSUB) + +#define VMADDWEV(NAME, BIT, E1, E2, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + typedef __typeof(Vd->E1(0)) TD; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i), (TD)Vk->E2(2 * i)); \ + } \ +} + +VMADDWEV(vmaddwev_h_b, 16, H, B, DO_MUL) +VMADDWEV(vmaddwev_w_h, 32, W, H, DO_MUL) +VMADDWEV(vmaddwev_d_w, 64, D, W, DO_MUL) +VMADDWEV(vmaddwev_h_bu, 16, UH, UB, DO_MUL) +VMADDWEV(vmaddwev_w_hu, 32, UW, UH, DO_MUL) +VMADDWEV(vmaddwev_d_wu, 64, UD, UW, DO_MUL) + +#define VMADDWOD(NAME, BIT, E1, E2, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + typedef __typeof(Vd->E1(0)) TD; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i + 1), \ + (TD)Vk->E2(2 * i + 1)); \ + } \ +} + +VMADDWOD(vmaddwod_h_b, 16, H, B, DO_MUL) +VMADDWOD(vmaddwod_w_h, 32, W, H, DO_MUL) +VMADDWOD(vmaddwod_d_w, 64, D, W, DO_MUL) +VMADDWOD(vmaddwod_h_bu, 16, UH, UB, DO_MUL) +VMADDWOD(vmaddwod_w_hu, 32, UW, UH, DO_MUL) +VMADDWOD(vmaddwod_d_wu, 64, UD, UW, DO_MUL) + +#define VMADDWEV_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + typedef __typeof(Vd->ES1(0)) TS1; \ + typedef __typeof(Vd->EU1(0)) TU1; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i), \ + (TS1)Vk->ES2(2 * i)); \ + } \ +} + +VMADDWEV_U_S(vmaddwev_h_bu_b, 16, H, UH, B, UB, DO_MUL) +VMADDWEV_U_S(vmaddwev_w_hu_h, 32, W, UW, H, UH, DO_MUL) +VMADDWEV_U_S(vmaddwev_d_wu_w, 64, D, UD, W, UW, DO_MUL) + +#define VMADDWOD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + typedef __typeof(Vd->ES1(0)) TS1; \ + typedef __typeof(Vd->EU1(0)) TU1; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1), \ + (TS1)Vk->ES2(2 * i + 1)); \ + } \ +} + +VMADDWOD_U_S(vmaddwod_h_bu_b, 16, H, UH, B, UB, DO_MUL) +VMADDWOD_U_S(vmaddwod_w_hu_h, 32, W, UW, H, UH, DO_MUL) +VMADDWOD_U_S(vmaddwod_d_wu_w, 64, D, UD, W, UW, DO_MUL) + +#define DO_DIVU(N, M) (unlikely(M == 0) ? 0 : N / M) +#define DO_REMU(N, M) (unlikely(M == 0) ? 0 : N % M) +#define DO_DIV(N, M) (unlikely(M == 0) ? 0 :\ + unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M) +#define DO_REM(N, M) (unlikely(M == 0) ? 0 :\ + unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M) + +#define VDIV(NAME, BIT, E, DO_OP) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \ + } \ +} + +VDIV(vdiv_b, 8, B, DO_DIV) +VDIV(vdiv_h, 16, H, DO_DIV) +VDIV(vdiv_w, 32, W, DO_DIV) +VDIV(vdiv_d, 64, D, DO_DIV) +VDIV(vdiv_bu, 8, UB, DO_DIVU) +VDIV(vdiv_hu, 16, UH, DO_DIVU) +VDIV(vdiv_wu, 32, UW, DO_DIVU) +VDIV(vdiv_du, 64, UD, DO_DIVU) +VDIV(vmod_b, 8, B, DO_REM) +VDIV(vmod_h, 16, H, DO_REM) +VDIV(vmod_w, 32, W, DO_REM) +VDIV(vmod_d, 64, D, DO_REM) +VDIV(vmod_bu, 8, UB, DO_REMU) +VDIV(vmod_hu, 16, UH, DO_REMU) +VDIV(vmod_wu, 32, UW, DO_REMU) +VDIV(vmod_du, 64, UD, DO_REMU) + +#define VSAT_S(NAME, BIT, E) \ +void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + typedef __typeof(Vd->E(0)) TD; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : \ + Vj->E(i) < (TD)~max ? (TD)~max: Vj->E(i); \ + } \ +} + +VSAT_S(vsat_b, 8, B) +VSAT_S(vsat_h, 16, H) +VSAT_S(vsat_w, 32, W) +VSAT_S(vsat_d, 64, D) + +#define VSAT_U(NAME, BIT, E) \ +void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + typedef __typeof(Vd->E(0)) TD; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : Vj->E(i); \ + } \ +} + +VSAT_U(vsat_bu, 8, UB) +VSAT_U(vsat_hu, 16, UH) +VSAT_U(vsat_wu, 32, UW) +VSAT_U(vsat_du, 64, UD) + +#define VEXTH(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = Vj->E2(i + LSX_LEN/BIT); \ + } \ +} + +void HELPER(vexth_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + Vd->Q(0) = int128_makes64(Vj->D(1)); +} + +void HELPER(vexth_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + Vd->Q(0) = int128_make64((uint64_t)Vj->D(1)); +} + +VEXTH(vexth_h_b, 16, H, B) +VEXTH(vexth_w_h, 32, W, H) +VEXTH(vexth_d_w, 64, D, W) +VEXTH(vexth_hu_bu, 16, UH, UB) +VEXTH(vexth_wu_hu, 32, UW, UH) +VEXTH(vexth_du_wu, 64, UD, UW) + +#define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b) + +DO_3OP(vsigncov_b, 8, B, DO_SIGNCOV) +DO_3OP(vsigncov_h, 16, H, DO_SIGNCOV) +DO_3OP(vsigncov_w, 32, W, DO_SIGNCOV) +DO_3OP(vsigncov_d, 64, D, DO_SIGNCOV) + +static uint64_t do_vmskltz_b(int64_t val) +{ + uint64_t m = 0x8080808080808080ULL; + uint64_t c = val & m; + c |= c << 7; + c |= c << 14; + c |= c << 28; + return c >> 56; +} + +void HELPER(vmskltz_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + uint16_t temp = 0; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + temp = do_vmskltz_b(Vj->D(0)); + temp |= (do_vmskltz_b(Vj->D(1)) << 8); + Vd->D(0) = temp; + Vd->D(1) = 0; +} + +static uint64_t do_vmskltz_h(int64_t val) +{ + uint64_t m = 0x8000800080008000ULL; + uint64_t c = val & m; + c |= c << 15; + c |= c << 30; + return c >> 60; +} + +void HELPER(vmskltz_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + uint16_t temp = 0; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + temp = do_vmskltz_h(Vj->D(0)); + temp |= (do_vmskltz_h(Vj->D(1)) << 4); + Vd->D(0) = temp; + Vd->D(1) = 0; +} + +static uint64_t do_vmskltz_w(int64_t val) +{ + uint64_t m = 0x8000000080000000ULL; + uint64_t c = val & m; + c |= c << 31; + return c >> 62; +} + +void HELPER(vmskltz_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + uint16_t temp = 0; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + temp = do_vmskltz_w(Vj->D(0)); + temp |= (do_vmskltz_w(Vj->D(1)) << 2); + Vd->D(0) = temp; + Vd->D(1) = 0; +} + +static uint64_t do_vmskltz_d(int64_t val) +{ + return (uint64_t)val >> 63; +} +void HELPER(vmskltz_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + uint16_t temp = 0; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + temp = do_vmskltz_d(Vj->D(0)); + temp |= (do_vmskltz_d(Vj->D(1)) << 1); + Vd->D(0) = temp; + Vd->D(1) = 0; +} + +void HELPER(vmskgez_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + uint16_t temp = 0; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + temp = do_vmskltz_b(Vj->D(0)); + temp |= (do_vmskltz_b(Vj->D(1)) << 8); + Vd->D(0) = (uint16_t)(~temp); + Vd->D(1) = 0; +} + +static uint64_t do_vmskez_b(uint64_t a) +{ + uint64_t m = 0x7f7f7f7f7f7f7f7fULL; + uint64_t c = ~(((a & m) + m) | a | m); + c |= c << 7; + c |= c << 14; + c |= c << 28; + return c >> 56; +} + +void HELPER(vmsknz_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + uint16_t temp = 0; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + temp = do_vmskez_b(Vj->D(0)); + temp |= (do_vmskez_b(Vj->D(1)) << 8); + Vd->D(0) = (uint16_t)(~temp); + Vd->D(1) = 0; +} + +void HELPER(vnori_b)(void *vd, void *vj, uint64_t imm, uint32_t v) +{ + int i; + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + + for (i = 0; i < LSX_LEN/8; i++) { + Vd->B(i) = ~(Vj->B(i) | (uint8_t)imm); + } +} + +#define VSLLWIL(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + typedef __typeof(temp.E1(0)) TD; \ + \ + temp.D(0) = 0; \ + temp.D(1) = 0; \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E1(i) = (TD)Vj->E2(i) << (imm % BIT); \ + } \ + *Vd = temp; \ +} + +void HELPER(vextl_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + Vd->Q(0) = int128_makes64(Vj->D(0)); +} + +void HELPER(vextl_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + Vd->Q(0) = int128_make64(Vj->D(0)); +} + +VSLLWIL(vsllwil_h_b, 16, H, B) +VSLLWIL(vsllwil_w_h, 32, W, H) +VSLLWIL(vsllwil_d_w, 64, D, W) +VSLLWIL(vsllwil_hu_bu, 16, UH, UB) +VSLLWIL(vsllwil_wu_hu, 32, UW, UH) +VSLLWIL(vsllwil_du_wu, 64, UD, UW) + +#define do_vsrlr(E, T) \ +static T do_vsrlr_ ##E(T s1, int sh) \ +{ \ + if (sh == 0) { \ + return s1; \ + } else { \ + return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \ + } \ +} + +do_vsrlr(B, uint8_t) +do_vsrlr(H, uint16_t) +do_vsrlr(W, uint32_t) +do_vsrlr(D, uint64_t) + +#define VSRLR(NAME, BIT, T, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \ + } \ +} + +VSRLR(vsrlr_b, 8, uint8_t, B) +VSRLR(vsrlr_h, 16, uint16_t, H) +VSRLR(vsrlr_w, 32, uint32_t, W) +VSRLR(vsrlr_d, 64, uint64_t, D) + +#define VSRLRI(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), imm); \ + } \ +} + +VSRLRI(vsrlri_b, 8, B) +VSRLRI(vsrlri_h, 16, H) +VSRLRI(vsrlri_w, 32, W) +VSRLRI(vsrlri_d, 64, D) + +#define do_vsrar(E, T) \ +static T do_vsrar_ ##E(T s1, int sh) \ +{ \ + if (sh == 0) { \ + return s1; \ + } else { \ + return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \ + } \ +} + +do_vsrar(B, int8_t) +do_vsrar(H, int16_t) +do_vsrar(W, int32_t) +do_vsrar(D, int64_t) + +#define VSRAR(NAME, BIT, T, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = do_vsrar_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \ + } \ +} + +VSRAR(vsrar_b, 8, uint8_t, B) +VSRAR(vsrar_h, 16, uint16_t, H) +VSRAR(vsrar_w, 32, uint32_t, W) +VSRAR(vsrar_d, 64, uint64_t, D) + +#define VSRARI(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = do_vsrar_ ## E(Vj->E(i), imm); \ + } \ +} + +VSRARI(vsrari_b, 8, B) +VSRARI(vsrari_h, 16, H) +VSRARI(vsrari_w, 32, W) +VSRARI(vsrari_d, 64, D) + +#define R_SHIFT(a, b) (a >> b) + +#define VSRLN(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = R_SHIFT((T)Vj->E2(i),((T)Vk->E2(i)) % BIT); \ + } \ + Vd->D(1) = 0; \ +} + +VSRLN(vsrln_b_h, 16, uint16_t, B, H) +VSRLN(vsrln_h_w, 32, uint32_t, H, W) +VSRLN(vsrln_w_d, 64, uint64_t, W, D) + +#define VSRAN(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = R_SHIFT(Vj->E2(i), ((T)Vk->E2(i)) % BIT); \ + } \ + Vd->D(1) = 0; \ +} + +VSRAN(vsran_b_h, 16, uint16_t, B, H) +VSRAN(vsran_h_w, 32, uint32_t, H, W) +VSRAN(vsran_w_d, 64, uint64_t, W, D) + +#define VSRLNI(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i, max; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + temp.D(0) = 0; \ + temp.D(1) = 0; \ + max = LSX_LEN/BIT; \ + for (i = 0; i < max; i++) { \ + temp.E1(i) = R_SHIFT((T)Vj->E2(i), imm); \ + temp.E1(i + max) = R_SHIFT((T)Vd->E2(i), imm); \ + } \ + *Vd = temp; \ +} + +void HELPER(vsrlni_d_q)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + temp.D(0) = 0; + temp.D(1) = 0; + temp.D(0) = int128_getlo(int128_urshift(Vj->Q(0), imm % 128)); + temp.D(1) = int128_getlo(int128_urshift(Vd->Q(0), imm % 128)); + *Vd = temp; +} + +VSRLNI(vsrlni_b_h, 16, uint16_t, B, H) +VSRLNI(vsrlni_h_w, 32, uint32_t, H, W) +VSRLNI(vsrlni_w_d, 64, uint64_t, W, D) + +#define VSRANI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i, max; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + temp.D(0) = 0; \ + temp.D(1) = 0; \ + max = LSX_LEN/BIT; \ + for (i = 0; i < max; i++) { \ + temp.E1(i) = R_SHIFT(Vj->E2(i), imm); \ + temp.E1(i + max) = R_SHIFT(Vd->E2(i), imm); \ + } \ + *Vd = temp; \ +} + +void HELPER(vsrani_d_q)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + temp.D(0) = 0; + temp.D(1) = 0; + temp.D(0) = int128_getlo(int128_rshift(Vj->Q(0), imm % 128)); + temp.D(1) = int128_getlo(int128_rshift(Vd->Q(0), imm % 128)); + *Vd = temp; +} + +VSRANI(vsrani_b_h, 16, B, H) +VSRANI(vsrani_h_w, 32, H, W) +VSRANI(vsrani_w_d, 64, W, D) + +#define VSRLRN(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = do_vsrlr_ ## E2(Vj->E2(i), ((T)Vk->E2(i))%BIT); \ + } \ + Vd->D(1) = 0; \ +} + +VSRLRN(vsrlrn_b_h, 16, uint16_t, B, H) +VSRLRN(vsrlrn_h_w, 32, uint32_t, H, W) +VSRLRN(vsrlrn_w_d, 64, uint64_t, W, D) + +#define VSRARN(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = do_vsrar_ ## E2(Vj->E2(i), ((T)Vk->E2(i))%BIT); \ + } \ + Vd->D(1) = 0; \ +} + +VSRARN(vsrarn_b_h, 16, uint8_t, B, H) +VSRARN(vsrarn_h_w, 32, uint16_t, H, W) +VSRARN(vsrarn_w_d, 64, uint32_t, W, D) + +#define VSRLRNI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i, max; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + temp.D(0) = 0; \ + temp.D(1) = 0; \ + max = LSX_LEN/BIT; \ + for (i = 0; i < max; i++) { \ + temp.E1(i) = do_vsrlr_ ## E2(Vj->E2(i), imm); \ + temp.E1(i + max) = do_vsrlr_ ## E2(Vd->E2(i), imm); \ + } \ + *Vd = temp; \ +} + +void HELPER(vsrlrni_d_q)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + Int128 r1, r2; + + if (imm == 0) { + temp.D(0) = int128_getlo(Vj->Q(0)); + temp.D(1) = int128_getlo(Vd->Q(0)); + } else { + r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one()); + r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one()); + + temp.D(0) = int128_getlo(int128_add(int128_urshift(Vj->Q(0), imm), r1)); + temp.D(1) = int128_getlo(int128_add(int128_urshift(Vd->Q(0), imm), r2)); + } + *Vd = temp; +} + +VSRLRNI(vsrlrni_b_h, 16, B, H) +VSRLRNI(vsrlrni_h_w, 32, H, W) +VSRLRNI(vsrlrni_w_d, 64, W, D) + +#define VSRARNI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i, max; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + temp.D(0) = 0; \ + temp.D(1) = 0; \ + max = LSX_LEN/BIT; \ + for (i = 0; i < max; i++) { \ + temp.E1(i) = do_vsrar_ ## E2(Vj->E2(i), imm); \ + temp.E1(i + max) = do_vsrar_ ## E2(Vd->E2(i), imm); \ + } \ + *Vd = temp; \ +} + +void HELPER(vsrarni_d_q)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + Int128 r1, r2; + + if (imm == 0) { + temp.D(0) = int128_getlo(Vj->Q(0)); + temp.D(1) = int128_getlo(Vd->Q(0)); + } else { + r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one()); + r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one()); + + temp.D(0) = int128_getlo(int128_add(int128_rshift(Vj->Q(0), imm), r1)); + temp.D(1) = int128_getlo(int128_add(int128_rshift(Vd->Q(0), imm), r2)); + } + *Vd = temp; +} + +VSRARNI(vsrarni_b_h, 16, B, H) +VSRARNI(vsrarni_h_w, 32, H, W) +VSRARNI(vsrarni_w_d, 64, W, D) + +#define SSRLNS(NAME, T1, T2, T3) \ +static T1 do_ssrlns_ ## NAME(T2 e2, int sa, int sh) \ +{ \ + T1 shft_res; \ + if (sa == 0) { \ + shft_res = e2; \ + } else { \ + shft_res = (((T1)e2) >> sa); \ + } \ + T3 mask; \ + mask = (1ull << sh) -1; \ + if (shft_res > mask) { \ + return mask; \ + } else { \ + return shft_res; \ + } \ +} + +SSRLNS(B, uint16_t, int16_t, uint8_t) +SSRLNS(H, uint32_t, int32_t, uint16_t) +SSRLNS(W, uint64_t, int64_t, uint32_t) + +#define VSSRLN(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = do_ssrlns_ ## E1(Vj->E2(i), (T)Vk->E2(i)% BIT, BIT/2 -1); \ + } \ + Vd->D(1) = 0; \ +} + +VSSRLN(vssrln_b_h, 16, uint16_t, B, H) +VSSRLN(vssrln_h_w, 32, uint32_t, H, W) +VSSRLN(vssrln_w_d, 64, uint64_t, W, D) + +#define SSRANS(E, T1, T2) \ +static T1 do_ssrans_ ## E(T1 e2, int sa, int sh) \ +{ \ + T1 shft_res; \ + if (sa == 0) { \ + shft_res = e2; \ + } else { \ + shft_res = e2 >> sa; \ + } \ + T2 mask; \ + mask = (1ll << sh) -1; \ + if (shft_res > mask) { \ + return mask; \ + } else if (shft_res < -(mask +1)) { \ + return ~mask; \ + } else { \ + return shft_res; \ + } \ +} + +SSRANS(B, int16_t, int8_t) +SSRANS(H, int32_t, int16_t) +SSRANS(W, int64_t, int32_t) + +#define VSSRAN(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = do_ssrans_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \ + } \ + Vd->D(1) = 0; \ +} + +VSSRAN(vssran_b_h, 16, uint16_t, B, H) +VSSRAN(vssran_h_w, 32, uint32_t, H, W) +VSSRAN(vssran_w_d, 64, uint64_t, W, D) + +#define SSRLNU(E, T1, T2, T3) \ +static T1 do_ssrlnu_ ## E(T3 e2, int sa, int sh) \ +{ \ + T1 shft_res; \ + if (sa == 0) { \ + shft_res = e2; \ + } else { \ + shft_res = (((T1)e2) >> sa); \ + } \ + T2 mask; \ + mask = (1ull << sh) -1; \ + if (shft_res > mask) { \ + return mask; \ + } else { \ + return shft_res; \ + } \ +} + +SSRLNU(B, uint16_t, uint8_t, int16_t) +SSRLNU(H, uint32_t, uint16_t, int32_t) +SSRLNU(W, uint64_t, uint32_t, int64_t) + +#define VSSRLNU(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = do_ssrlnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \ + } \ + Vd->D(1) = 0; \ +} + +VSSRLNU(vssrln_bu_h, 16, uint16_t, B, H) +VSSRLNU(vssrln_hu_w, 32, uint32_t, H, W) +VSSRLNU(vssrln_wu_d, 64, uint64_t, W, D) + +#define SSRANU(E, T1, T2, T3) \ +static T1 do_ssranu_ ## E(T3 e2, int sa, int sh) \ +{ \ + T1 shft_res; \ + if (sa == 0) { \ + shft_res = e2; \ + } else { \ + shft_res = e2 >> sa; \ + } \ + if (e2 < 0) { \ + shft_res = 0; \ + } \ + T2 mask; \ + mask = (1ull << sh) -1; \ + if (shft_res > mask) { \ + return mask; \ + } else { \ + return shft_res; \ + } \ +} + +SSRANU(B, uint16_t, uint8_t, int16_t) +SSRANU(H, uint32_t, uint16_t, int32_t) +SSRANU(W, uint64_t, uint32_t, int64_t) + +#define VSSRANU(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = do_ssranu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \ + } \ + Vd->D(1) = 0; \ +} + +VSSRANU(vssran_bu_h, 16, uint16_t, B, H) +VSSRANU(vssran_hu_w, 32, uint32_t, H, W) +VSSRANU(vssran_wu_d, 64, uint64_t, W, D) + +#define VSSRLNI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E1(i) = do_ssrlns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \ + temp.E1(i + LSX_LEN/BIT) = do_ssrlns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\ + } \ + *Vd = temp; \ +} + +void HELPER(vssrlni_d_q)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + Int128 shft_res1, shft_res2, mask; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + if (imm == 0) { + shft_res1 = Vj->Q(0); + shft_res2 = Vd->Q(0); + } else { + shft_res1 = int128_urshift(Vj->Q(0), imm); + shft_res2 = int128_urshift(Vd->Q(0), imm); + } + mask = int128_sub(int128_lshift(int128_one(), 63), int128_one()); + + if (int128_ult(mask, shft_res1)) { + Vd->D(0) = int128_getlo(mask); + }else { + Vd->D(0) = int128_getlo(shft_res1); + } + + if (int128_ult(mask, shft_res2)) { + Vd->D(1) = int128_getlo(mask); + }else { + Vd->D(1) = int128_getlo(shft_res2); + } +} + +VSSRLNI(vssrlni_b_h, 16, B, H) +VSSRLNI(vssrlni_h_w, 32, H, W) +VSSRLNI(vssrlni_w_d, 64, W, D) + +#define VSSRANI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E1(i) = do_ssrans_ ## E1(Vj->E2(i), imm, BIT/2 -1); \ + temp.E1(i + LSX_LEN/BIT) = do_ssrans_ ## E1(Vd->E2(i), imm, BIT/2 -1); \ + } \ + *Vd = temp; \ +} + +void HELPER(vssrani_d_q)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + Int128 shft_res1, shft_res2, mask, min; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + if (imm == 0) { + shft_res1 = Vj->Q(0); + shft_res2 = Vd->Q(0); + } else { + shft_res1 = int128_rshift(Vj->Q(0), imm); + shft_res2 = int128_rshift(Vd->Q(0), imm); + } + mask = int128_sub(int128_lshift(int128_one(), 63), int128_one()); + min = int128_lshift(int128_one(), 63); + + if (int128_gt(shft_res1, mask)) { + Vd->D(0) = int128_getlo(mask); + } else if (int128_lt(shft_res1, int128_neg(min))) { + Vd->D(0) = int128_getlo(min); + } else { + Vd->D(0) = int128_getlo(shft_res1); + } + + if (int128_gt(shft_res2, mask)) { + Vd->D(1) = int128_getlo(mask); + } else if (int128_lt(shft_res2, int128_neg(min))) { + Vd->D(1) = int128_getlo(min); + } else { + Vd->D(1) = int128_getlo(shft_res2); + } +} + +VSSRANI(vssrani_b_h, 16, B, H) +VSSRANI(vssrani_h_w, 32, H, W) +VSSRANI(vssrani_w_d, 64, W, D) + +#define VSSRLNUI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E1(i) = do_ssrlnu_ ## E1(Vj->E2(i), imm, BIT/2); \ + temp.E1(i + LSX_LEN/BIT) = do_ssrlnu_ ## E1(Vd->E2(i), imm, BIT/2); \ + } \ + *Vd = temp; \ +} + +void HELPER(vssrlni_du_q)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + Int128 shft_res1, shft_res2, mask; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + if (imm == 0) { + shft_res1 = Vj->Q(0); + shft_res2 = Vd->Q(0); + } else { + shft_res1 = int128_urshift(Vj->Q(0), imm); + shft_res2 = int128_urshift(Vd->Q(0), imm); + } + mask = int128_sub(int128_lshift(int128_one(), 64), int128_one()); + + if (int128_ult(mask, shft_res1)) { + Vd->D(0) = int128_getlo(mask); + }else { + Vd->D(0) = int128_getlo(shft_res1); + } + + if (int128_ult(mask, shft_res2)) { + Vd->D(1) = int128_getlo(mask); + }else { + Vd->D(1) = int128_getlo(shft_res2); + } +} + +VSSRLNUI(vssrlni_bu_h, 16, B, H) +VSSRLNUI(vssrlni_hu_w, 32, H, W) +VSSRLNUI(vssrlni_wu_d, 64, W, D) + +#define VSSRANUI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E1(i) = do_ssranu_ ## E1(Vj->E2(i), imm, BIT/2); \ + temp.E1(i + LSX_LEN/BIT) = do_ssranu_ ## E1(Vd->E2(i), imm, BIT/2); \ + } \ + *Vd = temp; \ +} + +void HELPER(vssrani_du_q)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + Int128 shft_res1, shft_res2, mask; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + if (imm == 0) { + shft_res1 = Vj->Q(0); + shft_res2 = Vd->Q(0); + } else { + shft_res1 = int128_rshift(Vj->Q(0), imm); + shft_res2 = int128_rshift(Vd->Q(0), imm); + } + + if (int128_lt(Vj->Q(0), int128_zero())) { + shft_res1 = int128_zero(); + } + + if (int128_lt(Vd->Q(0), int128_zero())) { + shft_res2 = int128_zero(); + } + + mask = int128_sub(int128_lshift(int128_one(), 64), int128_one()); + + if (int128_ult(mask, shft_res1)) { + Vd->D(0) = int128_getlo(mask); + }else { + Vd->D(0) = int128_getlo(shft_res1); + } + + if (int128_ult(mask, shft_res2)) { + Vd->D(1) = int128_getlo(mask); + }else { + Vd->D(1) = int128_getlo(shft_res2); + } +} + +VSSRANUI(vssrani_bu_h, 16, B, H) +VSSRANUI(vssrani_hu_w, 32, H, W) +VSSRANUI(vssrani_wu_d, 64, W, D) + +#define SSRLRNS(E1, E2, T1, T2, T3) \ +static T1 do_ssrlrns_ ## E1(T2 e2, int sa, int sh) \ +{ \ + T1 shft_res; \ + \ + shft_res = do_vsrlr_ ## E2(e2, sa); \ + T1 mask; \ + mask = (1ull << sh) -1; \ + if (shft_res > mask) { \ + return mask; \ + } else { \ + return shft_res; \ + } \ +} + +SSRLRNS(B, H, uint16_t, int16_t, uint8_t) +SSRLRNS(H, W, uint32_t, int32_t, uint16_t) +SSRLRNS(W, D, uint64_t, int64_t, uint32_t) + +#define VSSRLRN(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \ + } \ + Vd->D(1) = 0; \ +} + +VSSRLRN(vssrlrn_b_h, 16, uint16_t, B, H) +VSSRLRN(vssrlrn_h_w, 32, uint32_t, H, W) +VSSRLRN(vssrlrn_w_d, 64, uint64_t, W, D) + +#define SSRARNS(E1, E2, T1, T2) \ +static T1 do_ssrarns_ ## E1(T1 e2, int sa, int sh) \ +{ \ + T1 shft_res; \ + \ + shft_res = do_vsrar_ ## E2(e2, sa); \ + T2 mask; \ + mask = (1ll << sh) -1; \ + if (shft_res > mask) { \ + return mask; \ + } else if (shft_res < -(mask +1)) { \ + return ~mask; \ + } else { \ + return shft_res; \ + } \ +} + +SSRARNS(B, H, int16_t, int8_t) +SSRARNS(H, W, int32_t, int16_t) +SSRARNS(W, D, int64_t, int32_t) + +#define VSSRARN(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = do_ssrarns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \ + } \ + Vd->D(1) = 0; \ +} + +VSSRARN(vssrarn_b_h, 16, uint16_t, B, H) +VSSRARN(vssrarn_h_w, 32, uint32_t, H, W) +VSSRARN(vssrarn_w_d, 64, uint64_t, W, D) + +#define SSRLRNU(E1, E2, T1, T2, T3) \ +static T1 do_ssrlrnu_ ## E1(T3 e2, int sa, int sh) \ +{ \ + T1 shft_res; \ + \ + shft_res = do_vsrlr_ ## E2(e2, sa); \ + \ + T2 mask; \ + mask = (1ull << sh) -1; \ + if (shft_res > mask) { \ + return mask; \ + } else { \ + return shft_res; \ + } \ +} + +SSRLRNU(B, H, uint16_t, uint8_t, int16_t) +SSRLRNU(H, W, uint32_t, uint16_t, int32_t) +SSRLRNU(W, D, uint64_t, uint32_t, int64_t) + +#define VSSRLRNU(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \ + } \ + Vd->D(1) = 0; \ +} + +VSSRLRNU(vssrlrn_bu_h, 16, uint16_t, B, H) +VSSRLRNU(vssrlrn_hu_w, 32, uint32_t, H, W) +VSSRLRNU(vssrlrn_wu_d, 64, uint64_t, W, D) + +#define SSRARNU(E1, E2, T1, T2, T3) \ +static T1 do_ssrarnu_ ## E1(T3 e2, int sa, int sh) \ +{ \ + T1 shft_res; \ + \ + if (e2 < 0) { \ + shft_res = 0; \ + } else { \ + shft_res = do_vsrar_ ## E2(e2, sa); \ + } \ + T2 mask; \ + mask = (1ull << sh) -1; \ + if (shft_res > mask) { \ + return mask; \ + } else { \ + return shft_res; \ + } \ +} + +SSRARNU(B, H, uint16_t, uint8_t, int16_t) +SSRARNU(H, W, uint32_t, uint16_t, int32_t) +SSRARNU(W, D, uint64_t, uint32_t, int64_t) + +#define VSSRARNU(NAME, BIT, T, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \ + } \ + Vd->D(1) = 0; \ +} + +VSSRARNU(vssrarn_bu_h, 16, uint16_t, B, H) +VSSRARNU(vssrarn_hu_w, 32, uint32_t, H, W) +VSSRARNU(vssrarn_wu_d, 64, uint64_t, W, D) + +#define VSSRLRNI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \ + temp.E1(i + LSX_LEN/BIT) = do_ssrlrns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\ + } \ + *Vd = temp; \ +} + +#define VSSRLRNI_Q(NAME, sh) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + Int128 shft_res1, shft_res2, mask, r1, r2; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + if (imm == 0) { \ + shft_res1 = Vj->Q(0); \ + shft_res2 = Vd->Q(0); \ + } else { \ + r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one()); \ + r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one()); \ + \ + shft_res1 = (int128_add(int128_urshift(Vj->Q(0), imm), r1)); \ + shft_res2 = (int128_add(int128_urshift(Vd->Q(0), imm), r2)); \ + } \ + \ + mask = int128_sub(int128_lshift(int128_one(), sh), int128_one()); \ + \ + if (int128_ult(mask, shft_res1)) { \ + Vd->D(0) = int128_getlo(mask); \ + }else { \ + Vd->D(0) = int128_getlo(shft_res1); \ + } \ + \ + if (int128_ult(mask, shft_res2)) { \ + Vd->D(1) = int128_getlo(mask); \ + }else { \ + Vd->D(1) = int128_getlo(shft_res2); \ + } \ +} + +VSSRLRNI(vssrlrni_b_h, 16, B, H) +VSSRLRNI(vssrlrni_h_w, 32, H, W) +VSSRLRNI(vssrlrni_w_d, 64, W, D) +VSSRLRNI_Q(vssrlrni_d_q, 63) + +#define VSSRARNI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E1(i) = do_ssrarns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \ + temp.E1(i + LSX_LEN/BIT) = do_ssrarns_ ## E1(Vd->E2(i), imm, BIT/2 -1); \ + } \ + *Vd = temp; \ +} + +void HELPER(vssrarni_d_q)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + Int128 shft_res1, shft_res2, mask1, mask2, r1, r2; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + if (imm == 0) { + shft_res1 = Vj->Q(0); + shft_res2 = Vd->Q(0); + } else { + r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one()); + r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one()); + + shft_res1 = int128_add(int128_rshift(Vj->Q(0), imm), r1); + shft_res2 = int128_add(int128_rshift(Vd->Q(0), imm), r2); + } + + mask1 = int128_sub(int128_lshift(int128_one(), 63), int128_one()); + mask2 = int128_lshift(int128_one(), 63); + + if (int128_gt(shft_res1, mask1)) { + Vd->D(0) = int128_getlo(mask1); + } else if (int128_lt(shft_res1, int128_neg(mask2))) { + Vd->D(0) = int128_getlo(mask2); + } else { + Vd->D(0) = int128_getlo(shft_res1); + } + + if (int128_gt(shft_res2, mask1)) { + Vd->D(1) = int128_getlo(mask1); + } else if (int128_lt(shft_res2, int128_neg(mask2))) { + Vd->D(1) = int128_getlo(mask2); + } else { + Vd->D(1) = int128_getlo(shft_res2); + } +} + +VSSRARNI(vssrarni_b_h, 16, B, H) +VSSRARNI(vssrarni_h_w, 32, H, W) +VSSRARNI(vssrarni_w_d, 64, W, D) + +#define VSSRLRNUI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), imm, BIT/2); \ + temp.E1(i + LSX_LEN/BIT) = do_ssrlrnu_ ## E1(Vd->E2(i), imm, BIT/2); \ + } \ + *Vd = temp; \ +} + +VSSRLRNUI(vssrlrni_bu_h, 16, B, H) +VSSRLRNUI(vssrlrni_hu_w, 32, H, W) +VSSRLRNUI(vssrlrni_wu_d, 64, W, D) +VSSRLRNI_Q(vssrlrni_du_q, 64) + +#define VSSRARNUI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), imm, BIT/2); \ + temp.E1(i + LSX_LEN/BIT) = do_ssrarnu_ ## E1(Vd->E2(i), imm, BIT/2); \ + } \ + *Vd = temp; \ +} + +void HELPER(vssrarni_du_q)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + Int128 shft_res1, shft_res2, mask1, mask2, r1, r2; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + if (imm == 0) { + shft_res1 = Vj->Q(0); + shft_res2 = Vd->Q(0); + } else { + r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one()); + r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one()); + + shft_res1 = int128_add(int128_rshift(Vj->Q(0), imm), r1); + shft_res2 = int128_add(int128_rshift(Vd->Q(0), imm), r2); + } + + if (int128_lt(Vj->Q(0), int128_zero())) { + shft_res1 = int128_zero(); + } + if (int128_lt(Vd->Q(0), int128_zero())) { + shft_res2 = int128_zero(); + } + + mask1 = int128_sub(int128_lshift(int128_one(), 64), int128_one()); + mask2 = int128_lshift(int128_one(), 64); + + if (int128_gt(shft_res1, mask1)) { + Vd->D(0) = int128_getlo(mask1); + } else if (int128_lt(shft_res1, int128_neg(mask2))) { + Vd->D(0) = int128_getlo(mask2); + } else { + Vd->D(0) = int128_getlo(shft_res1); + } + + if (int128_gt(shft_res2, mask1)) { + Vd->D(1) = int128_getlo(mask1); + } else if (int128_lt(shft_res2, int128_neg(mask2))) { + Vd->D(1) = int128_getlo(mask2); + } else { + Vd->D(1) = int128_getlo(shft_res2); + } +} + +VSSRARNUI(vssrarni_bu_h, 16, B, H) +VSSRARNUI(vssrarni_hu_w, 32, H, W) +VSSRARNUI(vssrarni_wu_d, 64, W, D) + +#define DO_2OP(NAME, BIT, E, DO_OP) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) \ + { \ + Vd->E(i) = DO_OP(Vj->E(i)); \ + } \ +} + +#define DO_CLO_B(N) (clz32(~N & 0xff) - 24) +#define DO_CLO_H(N) (clz32(~N & 0xffff) - 16) +#define DO_CLO_W(N) (clz32(~N)) +#define DO_CLO_D(N) (clz64(~N)) +#define DO_CLZ_B(N) (clz32(N) - 24) +#define DO_CLZ_H(N) (clz32(N) - 16) +#define DO_CLZ_W(N) (clz32(N)) +#define DO_CLZ_D(N) (clz64(N)) + +DO_2OP(vclo_b, 8, UB, DO_CLO_B) +DO_2OP(vclo_h, 16, UH, DO_CLO_H) +DO_2OP(vclo_w, 32, UW, DO_CLO_W) +DO_2OP(vclo_d, 64, UD, DO_CLO_D) +DO_2OP(vclz_b, 8, UB, DO_CLZ_B) +DO_2OP(vclz_h, 16, UH, DO_CLZ_H) +DO_2OP(vclz_w, 32, UW, DO_CLZ_W) +DO_2OP(vclz_d, 64, UD, DO_CLZ_D) + +#define VPCNT(NAME, BIT, E, FN) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) \ + { \ + Vd->E(i) = FN(Vj->E(i)); \ + } \ +} + +VPCNT(vpcnt_b, 8, UB, ctpop8) +VPCNT(vpcnt_h, 16, UH, ctpop16) +VPCNT(vpcnt_w, 32, UW, ctpop32) +VPCNT(vpcnt_d, 64, UD, ctpop64) + +#define DO_BITCLR(a, bit) (a & ~(1ull << bit)) +#define DO_BITSET(a, bit) (a | 1ull << bit) +#define DO_BITREV(a, bit) (a ^ (1ull << bit)) + +#define DO_BIT(NAME, BIT, E, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + VReg *Vk = (VReg *)vk; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)%BIT); \ + } \ +} + +DO_BIT(vbitclr_b, 8, UB, DO_BITCLR) +DO_BIT(vbitclr_h, 16, UH, DO_BITCLR) +DO_BIT(vbitclr_w, 32, UW, DO_BITCLR) +DO_BIT(vbitclr_d, 64, UD, DO_BITCLR) +DO_BIT(vbitset_b, 8, UB, DO_BITSET) +DO_BIT(vbitset_h, 16, UH, DO_BITSET) +DO_BIT(vbitset_w, 32, UW, DO_BITSET) +DO_BIT(vbitset_d, 64, UD, DO_BITSET) +DO_BIT(vbitrev_b, 8, UB, DO_BITREV) +DO_BIT(vbitrev_h, 16, UH, DO_BITREV) +DO_BIT(vbitrev_w, 32, UW, DO_BITREV) +DO_BIT(vbitrev_d, 64, UD, DO_BITREV) + +#define DO_BITI(NAME, BIT, E, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = DO_OP(Vj->E(i), imm); \ + } \ +} + +DO_BITI(vbitclri_b, 8, UB, DO_BITCLR) +DO_BITI(vbitclri_h, 16, UH, DO_BITCLR) +DO_BITI(vbitclri_w, 32, UW, DO_BITCLR) +DO_BITI(vbitclri_d, 64, UD, DO_BITCLR) +DO_BITI(vbitseti_b, 8, UB, DO_BITSET) +DO_BITI(vbitseti_h, 16, UH, DO_BITSET) +DO_BITI(vbitseti_w, 32, UW, DO_BITSET) +DO_BITI(vbitseti_d, 64, UD, DO_BITSET) +DO_BITI(vbitrevi_b, 8, UB, DO_BITREV) +DO_BITI(vbitrevi_h, 16, UH, DO_BITREV) +DO_BITI(vbitrevi_w, 32, UW, DO_BITREV) +DO_BITI(vbitrevi_d, 64, UD, DO_BITREV) + +#define VFRSTP(NAME, BIT, MASK, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i, m; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + if (Vj->E(i) < 0) { \ + break; \ + } \ + } \ + m = Vk->E(0) & MASK; \ + Vd->E(m) = i; \ +} + +VFRSTP(vfrstp_b, 8, 0xf, B) +VFRSTP(vfrstp_h, 16, 0x7, H) + +#define VFRSTPI(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i, m; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + if (Vj->E(i) < 0) { \ + break; \ + } \ + } \ + m = imm % (LSX_LEN/BIT); \ + Vd->E(m) = i; \ +} + +VFRSTPI(vfrstpi_b, 8, B) +VFRSTPI(vfrstpi_h, 16, H) + +static void vec_update_fcsr0_mask(CPULoongArchState *env, + uintptr_t pc, int mask) +{ + int flags = get_float_exception_flags(&env->fp_status); + + set_float_exception_flags(0, &env->fp_status); + + flags &= ~mask; + + if (flags) { + flags = ieee_ex_to_loongarch(flags); + UPDATE_FP_CAUSE(env->fcsr0, flags); + } + + if (GET_FP_ENABLES(env->fcsr0) & flags) { + do_raise_exception(env, EXCCODE_FPE, pc); + } else { + UPDATE_FP_FLAGS(env->fcsr0, flags); + } +} + +static void vec_update_fcsr0(CPULoongArchState *env, uintptr_t pc) +{ + vec_update_fcsr0_mask(env, pc, 0); +} + +static inline void vec_clear_cause(CPULoongArchState *env) +{ + SET_FP_CAUSE(env->fcsr0, 0); +} + +#define DO_3OP_F(NAME, BIT, E, FN) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + vec_clear_cause(env); \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = FN(Vj->E(i), Vk->E(i), &env->fp_status); \ + vec_update_fcsr0(env, GETPC()); \ + } \ +} + +DO_3OP_F(vfadd_s, 32, UW, float32_add) +DO_3OP_F(vfadd_d, 64, UD, float64_add) +DO_3OP_F(vfsub_s, 32, UW, float32_sub) +DO_3OP_F(vfsub_d, 64, UD, float64_sub) +DO_3OP_F(vfmul_s, 32, UW, float32_mul) +DO_3OP_F(vfmul_d, 64, UD, float64_mul) +DO_3OP_F(vfdiv_s, 32, UW, float32_div) +DO_3OP_F(vfdiv_d, 64, UD, float64_div) +DO_3OP_F(vfmax_s, 32, UW, float32_maxnum) +DO_3OP_F(vfmax_d, 64, UD, float64_maxnum) +DO_3OP_F(vfmin_s, 32, UW, float32_minnum) +DO_3OP_F(vfmin_d, 64, UD, float64_minnum) +DO_3OP_F(vfmaxa_s, 32, UW, float32_maxnummag) +DO_3OP_F(vfmaxa_d, 64, UD, float64_maxnummag) +DO_3OP_F(vfmina_s, 32, UW, float32_minnummag) +DO_3OP_F(vfmina_d, 64, UD, float64_minnummag) + +#define DO_4OP_F(NAME, BIT, E, FN, flags) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk, uint32_t va) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + VReg *Va = &(env->fpr[va].vreg); \ + \ + vec_clear_cause(env); \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = FN(Vj->E(i), Vk->E(i), Va->E(i), flags, &env->fp_status); \ + vec_update_fcsr0(env, GETPC()); \ + } \ +} + +DO_4OP_F(vfmadd_s, 32, UW, float32_muladd, 0) +DO_4OP_F(vfmadd_d, 64, UD, float64_muladd, 0) +DO_4OP_F(vfmsub_s, 32, UW, float32_muladd, float_muladd_negate_c) +DO_4OP_F(vfmsub_d, 64, UD, float64_muladd, float_muladd_negate_c) +DO_4OP_F(vfnmadd_s, 32, UW, float32_muladd, float_muladd_negate_result) +DO_4OP_F(vfnmadd_d, 64, UD, float64_muladd, float_muladd_negate_result) +DO_4OP_F(vfnmsub_s, 32, UW, float32_muladd, + float_muladd_negate_c | float_muladd_negate_result) +DO_4OP_F(vfnmsub_d, 64, UD, float64_muladd, + float_muladd_negate_c | float_muladd_negate_result) + +#define DO_2OP_F(NAME, BIT, E, FN) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + vec_clear_cause(env); \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = FN(env, Vj->E(i)); \ + } \ +} + +#define FLOGB(BIT, T) \ +static T do_flogb_## BIT(CPULoongArchState *env, T fj) \ +{ \ + T fp, fd; \ + float_status *status = &env->fp_status; \ + FloatRoundMode old_mode = get_float_rounding_mode(status); \ + \ + set_float_rounding_mode(float_round_down, status); \ + fp = float ## BIT ##_log2(fj, status); \ + fd = float ## BIT ##_round_to_int(fp, status); \ + set_float_rounding_mode(old_mode, status); \ + vec_update_fcsr0_mask(env, GETPC(), float_flag_inexact); \ + return fd; \ +} + +FLOGB(32, uint32_t) +FLOGB(64, uint64_t) + +#define FCLASS(NAME, BIT, E, FN) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = FN(env, Vj->E(i)); \ + } \ +} + +FCLASS(vfclass_s, 32, UW, helper_fclass_s) +FCLASS(vfclass_d, 64, UD, helper_fclass_d) + +#define FSQRT(BIT, T) \ +static T do_fsqrt_## BIT(CPULoongArchState *env, T fj) \ +{ \ + T fd; \ + fd = float ## BIT ##_sqrt(fj, &env->fp_status); \ + vec_update_fcsr0(env, GETPC()); \ + return fd; \ +} + +FSQRT(32, uint32_t) +FSQRT(64, uint64_t) + +#define FRECIP(BIT, T) \ +static T do_frecip_## BIT(CPULoongArchState *env, T fj) \ +{ \ + T fd; \ + fd = float ## BIT ##_div(float ## BIT ##_one, fj, &env->fp_status); \ + vec_update_fcsr0(env, GETPC()); \ + return fd; \ +} + +FRECIP(32, uint32_t) +FRECIP(64, uint64_t) + +#define FRSQRT(BIT, T) \ +static T do_frsqrt_## BIT(CPULoongArchState *env, T fj) \ +{ \ + T fd, fp; \ + fp = float ## BIT ##_sqrt(fj, &env->fp_status); \ + fd = float ## BIT ##_div(float ## BIT ##_one, fp, &env->fp_status); \ + vec_update_fcsr0(env, GETPC()); \ + return fd; \ +} + +FRSQRT(32, uint32_t) +FRSQRT(64, uint64_t) + +DO_2OP_F(vflogb_s, 32, UW, do_flogb_32) +DO_2OP_F(vflogb_d, 64, UD, do_flogb_64) +DO_2OP_F(vfsqrt_s, 32, UW, do_fsqrt_32) +DO_2OP_F(vfsqrt_d, 64, UD, do_fsqrt_64) +DO_2OP_F(vfrecip_s, 32, UW, do_frecip_32) +DO_2OP_F(vfrecip_d, 64, UD, do_frecip_64) +DO_2OP_F(vfrsqrt_s, 32, UW, do_frsqrt_32) +DO_2OP_F(vfrsqrt_d, 64, UD, do_frsqrt_64) + +static uint32_t float16_cvt_float32(uint16_t h, float_status *status) +{ + return float16_to_float32(h, true, status); +} +static uint64_t float32_cvt_float64(uint32_t s, float_status *status) +{ + return float32_to_float64(s, status); +} + +static uint16_t float32_cvt_float16(uint32_t s, float_status *status) +{ + return float32_to_float16(s, true, status); +} +static uint32_t float64_cvt_float32(uint64_t d, float_status *status) +{ + return float64_to_float32(d, status); +} + +void HELPER(vfcvtl_s_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + int i; + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + vec_clear_cause(env); + for (i = 0; i < LSX_LEN/32; i++) { + temp.UW(i) = float16_cvt_float32(Vj->UH(i), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } + *Vd = temp; +} + +void HELPER(vfcvtl_d_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + int i; + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + vec_clear_cause(env); + for (i = 0; i < LSX_LEN/64; i++) { + temp.UD(i) = float32_cvt_float64(Vj->UW(i), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } + *Vd = temp; +} + +void HELPER(vfcvth_s_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + int i; + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + vec_clear_cause(env); + for (i = 0; i < LSX_LEN/32; i++) { + temp.UW(i) = float16_cvt_float32(Vj->UH(i + 4), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } + *Vd = temp; +} + +void HELPER(vfcvth_d_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + int i; + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + vec_clear_cause(env); + for (i = 0; i < LSX_LEN/64; i++) { + temp.UD(i) = float32_cvt_float64(Vj->UW(i + 2), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } + *Vd = temp; +} + +void HELPER(vfcvt_h_s)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t vk) +{ + int i; + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + VReg *Vk = &(env->fpr[vk].vreg); + + vec_clear_cause(env); + for(i = 0; i < LSX_LEN/32; i++) { + temp.UH(i + 4) = float32_cvt_float16(Vj->UW(i), &env->fp_status); + temp.UH(i) = float32_cvt_float16(Vk->UW(i), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } + *Vd = temp; +} + +void HELPER(vfcvt_s_d)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t vk) +{ + int i; + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + VReg *Vk = &(env->fpr[vk].vreg); + + vec_clear_cause(env); + for(i = 0; i < LSX_LEN/64; i++) { + temp.UW(i + 2) = float64_cvt_float32(Vj->UD(i), &env->fp_status); + temp.UW(i) = float64_cvt_float32(Vk->UD(i), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } + *Vd = temp; +} + +void HELPER(vfrint_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + int i; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + vec_clear_cause(env); + for (i = 0; i < 4; i++) { + Vd->W(i) = float32_round_to_int(Vj->UW(i), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } +} + +void HELPER(vfrint_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + int i; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + vec_clear_cause(env); + for (i = 0; i < 2; i++) { + Vd->D(i) = float64_round_to_int(Vj->UD(i), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } +} + +#define FCVT_2OP(NAME, BIT, E, MODE) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \ +{ \ + int i; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + vec_clear_cause(env); \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \ + set_float_rounding_mode(MODE, &env->fp_status); \ + Vd->E(i) = float## BIT ## _round_to_int(Vj->E(i), &env->fp_status); \ + set_float_rounding_mode(old_mode, &env->fp_status); \ + vec_update_fcsr0(env, GETPC()); \ + } \ +} + +FCVT_2OP(vfrintrne_s, 32, UW, float_round_nearest_even) +FCVT_2OP(vfrintrne_d, 64, UD, float_round_nearest_even) +FCVT_2OP(vfrintrz_s, 32, UW, float_round_to_zero) +FCVT_2OP(vfrintrz_d, 64, UD, float_round_to_zero) +FCVT_2OP(vfrintrp_s, 32, UW, float_round_up) +FCVT_2OP(vfrintrp_d, 64, UD, float_round_up) +FCVT_2OP(vfrintrm_s, 32, UW, float_round_down) +FCVT_2OP(vfrintrm_d, 64, UD, float_round_down) + +#define FTINT(NAME, FMT1, FMT2, T1, T2, MODE) \ +static T2 do_ftint ## NAME(CPULoongArchState *env, T1 fj) \ +{ \ + T2 fd; \ + FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \ + \ + set_float_rounding_mode(MODE, &env->fp_status); \ + fd = do_## FMT1 ##_to_## FMT2(env, fj); \ + set_float_rounding_mode(old_mode, &env->fp_status); \ + return fd; \ +} + +#define DO_FTINT(FMT1, FMT2, T1, T2) \ +static T2 do_## FMT1 ##_to_## FMT2(CPULoongArchState *env, T1 fj) \ +{ \ + T2 fd; \ + \ + fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \ + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { \ + if (FMT1 ##_is_any_nan(fj)) { \ + fd = 0; \ + } \ + } \ + vec_update_fcsr0(env, GETPC()); \ + return fd; \ +} + +DO_FTINT(float32, int32, uint32_t, uint32_t) +DO_FTINT(float64, int64, uint64_t, uint64_t) +DO_FTINT(float32, uint32, uint32_t, uint32_t) +DO_FTINT(float64, uint64, uint64_t, uint64_t) +DO_FTINT(float64, int32, uint64_t, uint32_t) +DO_FTINT(float32, int64, uint32_t, uint64_t) + +FTINT(rne_w_s, float32, int32, uint32_t, uint32_t, float_round_nearest_even) +FTINT(rne_l_d, float64, int64, uint64_t, uint64_t, float_round_nearest_even) +FTINT(rp_w_s, float32, int32, uint32_t, uint32_t, float_round_up) +FTINT(rp_l_d, float64, int64, uint64_t, uint64_t, float_round_up) +FTINT(rz_w_s, float32, int32, uint32_t, uint32_t, float_round_to_zero) +FTINT(rz_l_d, float64, int64, uint64_t, uint64_t, float_round_to_zero) +FTINT(rm_w_s, float32, int32, uint32_t, uint32_t, float_round_down) +FTINT(rm_l_d, float64, int64, uint64_t, uint64_t, float_round_down) + +DO_2OP_F(vftintrne_w_s, 32, UW, do_ftintrne_w_s) +DO_2OP_F(vftintrne_l_d, 64, UD, do_ftintrne_l_d) +DO_2OP_F(vftintrp_w_s, 32, UW, do_ftintrp_w_s) +DO_2OP_F(vftintrp_l_d, 64, UD, do_ftintrp_l_d) +DO_2OP_F(vftintrz_w_s, 32, UW, do_ftintrz_w_s) +DO_2OP_F(vftintrz_l_d, 64, UD, do_ftintrz_l_d) +DO_2OP_F(vftintrm_w_s, 32, UW, do_ftintrm_w_s) +DO_2OP_F(vftintrm_l_d, 64, UD, do_ftintrm_l_d) +DO_2OP_F(vftint_w_s, 32, UW, do_float32_to_int32) +DO_2OP_F(vftint_l_d, 64, UD, do_float64_to_int64) + +FTINT(rz_wu_s, float32, uint32, uint32_t, uint32_t, float_round_to_zero) +FTINT(rz_lu_d, float64, uint64, uint64_t, uint64_t, float_round_to_zero) + +DO_2OP_F(vftintrz_wu_s, 32, UW, do_ftintrz_wu_s) +DO_2OP_F(vftintrz_lu_d, 64, UD, do_ftintrz_lu_d) +DO_2OP_F(vftint_wu_s, 32, UW, do_float32_to_uint32) +DO_2OP_F(vftint_lu_d, 64, UD, do_float64_to_uint64) + +FTINT(rm_w_d, float64, int32, uint64_t, uint32_t, float_round_down) +FTINT(rp_w_d, float64, int32, uint64_t, uint32_t, float_round_up) +FTINT(rz_w_d, float64, int32, uint64_t, uint32_t, float_round_to_zero) +FTINT(rne_w_d, float64, int32, uint64_t, uint32_t, float_round_nearest_even) + +#define FTINT_W_D(NAME, FN) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + vec_clear_cause(env); \ + for (i = 0; i < 2; i++) { \ + temp.W(i + 2) = FN(env, Vj->UD(i)); \ + temp.W(i) = FN(env, Vk->UD(i)); \ + } \ + *Vd = temp; \ +} + +FTINT_W_D(vftint_w_d, do_float64_to_int32) +FTINT_W_D(vftintrm_w_d, do_ftintrm_w_d) +FTINT_W_D(vftintrp_w_d, do_ftintrp_w_d) +FTINT_W_D(vftintrz_w_d, do_ftintrz_w_d) +FTINT_W_D(vftintrne_w_d, do_ftintrne_w_d) + +FTINT(rml_l_s, float32, int64, uint32_t, uint64_t, float_round_down) +FTINT(rpl_l_s, float32, int64, uint32_t, uint64_t, float_round_up) +FTINT(rzl_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero) +FTINT(rnel_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even) +FTINT(rmh_l_s, float32, int64, uint32_t, uint64_t, float_round_down) +FTINT(rph_l_s, float32, int64, uint32_t, uint64_t, float_round_up) +FTINT(rzh_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero) +FTINT(rneh_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even) + +#define FTINTL_L_S(NAME, FN) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + vec_clear_cause(env); \ + for (i = 0; i < 2; i++) { \ + temp.D(i) = FN(env, Vj->UW(i)); \ + } \ + *Vd = temp; \ +} + +FTINTL_L_S(vftintl_l_s, do_float32_to_int64) +FTINTL_L_S(vftintrml_l_s, do_ftintrml_l_s) +FTINTL_L_S(vftintrpl_l_s, do_ftintrpl_l_s) +FTINTL_L_S(vftintrzl_l_s, do_ftintrzl_l_s) +FTINTL_L_S(vftintrnel_l_s, do_ftintrnel_l_s) + +#define FTINTH_L_S(NAME, FN) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + vec_clear_cause(env); \ + for (i = 0; i < 2; i++) { \ + temp.D(i) = FN(env, Vj->UW(i + 2)); \ + } \ + *Vd = temp; \ +} + +FTINTH_L_S(vftinth_l_s, do_float32_to_int64) +FTINTH_L_S(vftintrmh_l_s, do_ftintrmh_l_s) +FTINTH_L_S(vftintrph_l_s, do_ftintrph_l_s) +FTINTH_L_S(vftintrzh_l_s, do_ftintrzh_l_s) +FTINTH_L_S(vftintrneh_l_s, do_ftintrneh_l_s) + +#define FFINT(NAME, FMT1, FMT2, T1, T2) \ +static T2 do_ffint_ ## NAME(CPULoongArchState *env, T1 fj) \ +{ \ + T2 fd; \ + \ + fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \ + vec_update_fcsr0(env, GETPC()); \ + return fd; \ +} + +FFINT(s_w, int32, float32, int32_t, uint32_t) +FFINT(d_l, int64, float64, int64_t, uint64_t) +FFINT(s_wu, uint32, float32, uint32_t, uint32_t) +FFINT(d_lu, uint64, float64, uint64_t, uint64_t) + +DO_2OP_F(vffint_s_w, 32, W, do_ffint_s_w) +DO_2OP_F(vffint_d_l, 64, D, do_ffint_d_l) +DO_2OP_F(vffint_s_wu, 32, UW, do_ffint_s_wu) +DO_2OP_F(vffint_d_lu, 64, UD, do_ffint_d_lu) + +void HELPER(vffintl_d_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + int i; + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + vec_clear_cause(env); + for (i = 0; i < 2; i++) { + temp.D(i) = int32_to_float64(Vj->W(i), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } + *Vd = temp; +} + +void HELPER(vffinth_d_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj) +{ + int i; + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + vec_clear_cause(env); + for (i = 0; i < 2; i++) { + temp.D(i) = int32_to_float64(Vj->W(i + 2), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } + *Vd = temp; +} + +void HELPER(vffint_s_l)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t vk) +{ + int i; + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + VReg *Vk = &(env->fpr[vk].vreg); + + vec_clear_cause(env); + for (i = 0; i < 2; i++) { + temp.W(i + 2) = int64_to_float32(Vj->D(i), &env->fp_status); + temp.W(i) = int64_to_float32(Vk->D(i), &env->fp_status); + vec_update_fcsr0(env, GETPC()); + } + *Vd = temp; +} + +#define VSEQ(a, b) (a == b ? -1 : 0) +#define VSLE(a, b) (a <= b ? -1 : 0) +#define VSLT(a, b) (a < b ? -1 : 0) + +#define VCMPI(NAME, BIT, E, DO_OP) \ +void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \ +{ \ + int i; \ + VReg *Vd = (VReg *)vd; \ + VReg *Vj = (VReg *)vj; \ + typedef __typeof(Vd->E(0)) TD; \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \ + } \ +} + +VCMPI(vseqi_b, 8, B, VSEQ) +VCMPI(vseqi_h, 16, H, VSEQ) +VCMPI(vseqi_w, 32, W, VSEQ) +VCMPI(vseqi_d, 64, D, VSEQ) +VCMPI(vslei_b, 8, B, VSLE) +VCMPI(vslei_h, 16, H, VSLE) +VCMPI(vslei_w, 32, W, VSLE) +VCMPI(vslei_d, 64, D, VSLE) +VCMPI(vslei_bu, 8, UB, VSLE) +VCMPI(vslei_hu, 16, UH, VSLE) +VCMPI(vslei_wu, 32, UW, VSLE) +VCMPI(vslei_du, 64, UD, VSLE) +VCMPI(vslti_b, 8, B, VSLT) +VCMPI(vslti_h, 16, H, VSLT) +VCMPI(vslti_w, 32, W, VSLT) +VCMPI(vslti_d, 64, D, VSLT) +VCMPI(vslti_bu, 8, UB, VSLT) +VCMPI(vslti_hu, 16, UH, VSLT) +VCMPI(vslti_wu, 32, UW, VSLT) +VCMPI(vslti_du, 64, UD, VSLT) + +static uint64_t vfcmp_common(CPULoongArchState *env, + FloatRelation cmp, uint32_t flags) +{ + uint64_t ret = 0; + + switch (cmp) { + case float_relation_less: + ret = (flags & FCMP_LT); + break; + case float_relation_equal: + ret = (flags & FCMP_EQ); + break; + case float_relation_greater: + ret = (flags & FCMP_GT); + break; + case float_relation_unordered: + ret = (flags & FCMP_UN); + break; + default: + g_assert_not_reached(); + } + + if (ret) { + ret = -1; + } + + return ret; +} + +#define VFCMP(NAME, BIT, E, FN) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk, uint32_t flags) \ +{ \ + int i; \ + VReg t; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + vec_clear_cause(env); \ + for (i = 0; i < LSX_LEN/BIT ; i++) { \ + FloatRelation cmp; \ + cmp = FN(Vj->E(i), Vk->E(i), &env->fp_status); \ + t.E(i) = vfcmp_common(env, cmp, flags); \ + vec_update_fcsr0(env, GETPC()); \ + } \ + *Vd = t; \ +} + +VFCMP(vfcmp_c_s, 32, UW, float32_compare_quiet) +VFCMP(vfcmp_s_s, 32, UW, float32_compare) +VFCMP(vfcmp_c_d, 64, UD, float64_compare_quiet) +VFCMP(vfcmp_s_d, 64, UD, float64_compare) + +void HELPER(vbitseli_b)(void *vd, void *vj, uint64_t imm, uint32_t v) +{ + int i; + VReg *Vd = (VReg *)vd; + VReg *Vj = (VReg *)vj; + + for (i = 0; i < 16; i++) { + Vd->B(i) = (~Vd->B(i) & Vj->B(i)) | (Vd->B(i) & imm); + } +} + +/* Copy from target/arm/tcg/sve_helper.c */ +static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz) +{ + uint64_t bits = 8 << esz; + uint64_t ones = dup_const(esz, 1); + uint64_t signs = ones << (bits - 1); + uint64_t cmp0, cmp1; + + cmp1 = dup_const(esz, n); + cmp0 = cmp1 ^ m0; + cmp1 = cmp1 ^ m1; + cmp0 = (cmp0 - ones) & ~cmp0; + cmp1 = (cmp1 - ones) & ~cmp1; + return (cmp0 | cmp1) & signs; +} + +#define SETANYEQZ(NAME, MO) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \ +{ \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + env->cf[cd & 0x7] = do_match2(0, Vj->D(0), Vj->D(1), MO); \ +} +SETANYEQZ(vsetanyeqz_b, MO_8) +SETANYEQZ(vsetanyeqz_h, MO_16) +SETANYEQZ(vsetanyeqz_w, MO_32) +SETANYEQZ(vsetanyeqz_d, MO_64) + +#define SETALLNEZ(NAME, MO) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \ +{ \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + env->cf[cd & 0x7]= !do_match2(0, Vj->D(0), Vj->D(1), MO); \ +} +SETALLNEZ(vsetallnez_b, MO_8) +SETALLNEZ(vsetallnez_h, MO_16) +SETALLNEZ(vsetallnez_w, MO_32) +SETALLNEZ(vsetallnez_d, MO_64) + +#define VPACKEV(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E(2 * i + 1) = Vj->E(2 * i); \ + temp.E(2 *i) = Vk->E(2 * i); \ + } \ + *Vd = temp; \ +} + +VPACKEV(vpackev_b, 16, B) +VPACKEV(vpackev_h, 32, H) +VPACKEV(vpackev_w, 64, W) +VPACKEV(vpackev_d, 128, D) + +#define VPACKOD(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E(2 * i + 1) = Vj->E(2 * i + 1); \ + temp.E(2 * i) = Vk->E(2 * i + 1); \ + } \ + *Vd = temp; \ +} + +VPACKOD(vpackod_b, 16, B) +VPACKOD(vpackod_h, 32, H) +VPACKOD(vpackod_w, 64, W) +VPACKOD(vpackod_d, 128, D) + +#define VPICKEV(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i); \ + temp.E(i) = Vk->E(2 * i); \ + } \ + *Vd = temp; \ +} + +VPICKEV(vpickev_b, 16, B) +VPICKEV(vpickev_h, 32, H) +VPICKEV(vpickev_w, 64, W) +VPICKEV(vpickev_d, 128, D) + +#define VPICKOD(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i + 1); \ + temp.E(i) = Vk->E(2 * i + 1); \ + } \ + *Vd = temp; \ +} + +VPICKOD(vpickod_b, 16, B) +VPICKOD(vpickod_h, 32, H) +VPICKOD(vpickod_w, 64, W) +VPICKOD(vpickod_d, 128, D) + +#define VILVL(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E(2 * i + 1) = Vj->E(i); \ + temp.E(2 * i) = Vk->E(i); \ + } \ + *Vd = temp; \ +} + +VILVL(vilvl_b, 16, B) +VILVL(vilvl_h, 32, H) +VILVL(vilvl_w, 64, W) +VILVL(vilvl_d, 128, D) + +#define VILVH(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E(2 * i + 1) = Vj->E(i + LSX_LEN/BIT); \ + temp.E(2 * i) = Vk->E(i + LSX_LEN/BIT); \ + } \ + *Vd = temp; \ +} + +VILVH(vilvh_b, 16, B) +VILVH(vilvh_h, 32, H) +VILVH(vilvh_w, 64, W) +VILVH(vilvh_d, 128, D) + +void HELPER(vshuf_b)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t vk, uint32_t va) +{ + int i, m; + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + VReg *Vk = &(env->fpr[vk].vreg); + VReg *Va = &(env->fpr[va].vreg); + + m = LSX_LEN/8; + for (i = 0; i < m ; i++) { + uint64_t k = (uint8_t)Va->B(i) % (2 * m); + temp.B(i) = k < m ? Vk->B(k) : Vj->B(k - m); + } + *Vd = temp; +} + +#define VSHUF(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i, m; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + m = LSX_LEN/BIT; \ + for (i = 0; i < m; i++) { \ + uint64_t k = ((uint8_t) Vd->E(i)) % (2 * m); \ + temp.E(i) = k < m ? Vk->E(k) : Vj->E(k - m); \ + } \ + *Vd = temp; \ +} + +VSHUF(vshuf_h, 16, H) +VSHUF(vshuf_w, 32, W) +VSHUF(vshuf_d, 64, D) + +#define VSHUF4I(NAME, BIT, E) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + for (i = 0; i < LSX_LEN/BIT; i++) { \ + temp.E(i) = Vj->E(((i) & 0xfc) + (((imm) >> \ + (2 * ((i) & 0x03))) & 0x03)); \ + } \ + *Vd = temp; \ +} + +VSHUF4I(vshuf4i_b, 8, B) +VSHUF4I(vshuf4i_h, 16, H) +VSHUF4I(vshuf4i_w, 32, W) + +void HELPER(vshuf4i_d)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + VReg temp; + temp.D(0) = (imm & 2 ? Vj : Vd)->D(imm & 1); + temp.D(1) = (imm & 8 ? Vj : Vd)->D((imm >> 2) & 1); + *Vd = temp; +} + +void HELPER(vpermi_w)(CPULoongArchState *env, + uint32_t vd, uint32_t vj, uint32_t imm) +{ + VReg temp; + VReg *Vd = &(env->fpr[vd].vreg); + VReg *Vj = &(env->fpr[vj].vreg); + + temp.W(0) = Vj->W(imm & 0x3); + temp.W(1) = Vj->W((imm >> 2) & 0x3); + temp.W(2) = Vd->W((imm >> 4) & 0x3); + temp.W(3) = Vd->W((imm >> 6) & 0x3); + *Vd = temp; +} + +#define VEXTRINS(NAME, BIT, E, MASK) \ +void HELPER(NAME)(CPULoongArchState *env, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int ins, extr; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + ins = (imm >> 4) & MASK; \ + extr = imm & MASK; \ + Vd->E(ins) = Vj->E(extr); \ +} + +VEXTRINS(vextrins_b, 8, B, 0xf) +VEXTRINS(vextrins_h, 16, H, 0x7) +VEXTRINS(vextrins_w, 32, W, 0x3) +VEXTRINS(vextrins_d, 64, D, 0x1) diff --git a/target/loongarch/machine.c b/target/loongarch/machine.c index b1e523ea72..d8ac99c9a4 100644 --- a/target/loongarch/machine.c +++ b/target/loongarch/machine.c @@ -10,6 +10,72 @@ #include "migration/cpu.h" #include "internals.h" +static const VMStateDescription vmstate_fpu_reg = { + .name = "fpu_reg", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(UD(0), VReg), + VMSTATE_END_OF_LIST() + } +}; + +#define VMSTATE_FPU_REGS(_field, _state, _start) \ + VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, 32, 0, \ + vmstate_fpu_reg, fpr_t) + +static bool fpu_needed(void *opaque) +{ + LoongArchCPU *cpu = opaque; + + return FIELD_EX64(cpu->env.cpucfg[2], CPUCFG2, FP); +} + +static const VMStateDescription vmstate_fpu = { + .name = "cpu/fpu", + .version_id = 1, + .minimum_version_id = 1, + .needed = fpu_needed, + .fields = (VMStateField[]) { + VMSTATE_FPU_REGS(env.fpr, LoongArchCPU, 0), + VMSTATE_UINT32(env.fcsr0, LoongArchCPU), + VMSTATE_BOOL_ARRAY(env.cf, LoongArchCPU, 8), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_lsxh_reg = { + .name = "lsxh_reg", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(UD(1), VReg), + VMSTATE_END_OF_LIST() + } +}; + +#define VMSTATE_LSXH_REGS(_field, _state, _start) \ + VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, 32, 0, \ + vmstate_lsxh_reg, fpr_t) + +static bool lsx_needed(void *opaque) +{ + LoongArchCPU *cpu = opaque; + + return FIELD_EX64(cpu->env.cpucfg[2], CPUCFG2, LSX); +} + +static const VMStateDescription vmstate_lsx = { + .name = "cpu/lsx", + .version_id = 1, + .minimum_version_id = 1, + .needed = lsx_needed, + .fields = (VMStateField[]) { + VMSTATE_LSXH_REGS(env.fpr, LoongArchCPU, 0), + VMSTATE_END_OF_LIST() + }, +}; + /* TLB state */ const VMStateDescription vmstate_tlb = { .name = "cpu/tlb", @@ -24,18 +90,13 @@ const VMStateDescription vmstate_tlb = { }; /* LoongArch CPU state */ - const VMStateDescription vmstate_loongarch_cpu = { .name = "cpu", - .version_id = 0, - .minimum_version_id = 0, + .version_id = 1, + .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32), VMSTATE_UINTTL(env.pc, LoongArchCPU), - VMSTATE_UINT64_ARRAY(env.fpr, LoongArchCPU, 32), - VMSTATE_UINT32(env.fcsr0, LoongArchCPU), - VMSTATE_BOOL_ARRAY(env.cf, LoongArchCPU, 8), /* Remaining CSRs */ VMSTATE_UINT64(env.CSR_CRMD, LoongArchCPU), @@ -99,4 +160,9 @@ const VMStateDescription vmstate_loongarch_cpu = { VMSTATE_END_OF_LIST() }, + .subsections = (const VMStateDescription*[]) { + &vmstate_fpu, + &vmstate_lsx, + NULL + } }; diff --git a/target/loongarch/meson.build b/target/loongarch/meson.build index 690633969f..1117a51c52 100644 --- a/target/loongarch/meson.build +++ b/target/loongarch/meson.build @@ -11,11 +11,13 @@ loongarch_tcg_ss.add(files( 'op_helper.c', 'translate.c', 'gdbstub.c', + 'lsx_helper.c', )) loongarch_tcg_ss.add(zlib) loongarch_softmmu_ss = ss.source_set() loongarch_softmmu_ss.add(files( + 'loongarch-qmp-cmds.c', 'machine.c', 'tlb_helper.c', 'constant_timer.c', diff --git a/target/loongarch/op_helper.c b/target/loongarch/op_helper.c index 568c071601..60335a05e2 100644 --- a/target/loongarch/op_helper.c +++ b/target/loongarch/op_helper.c @@ -49,14 +49,16 @@ target_ulong helper_bitswap(target_ulong v) void helper_asrtle_d(CPULoongArchState *env, target_ulong rj, target_ulong rk) { if (rj > rk) { - do_raise_exception(env, EXCCODE_BCE, 0); + env->CSR_BADV = rj; + do_raise_exception(env, EXCCODE_BCE, GETPC()); } } void helper_asrtgt_d(CPULoongArchState *env, target_ulong rj, target_ulong rk) { if (rj <= rk) { - do_raise_exception(env, EXCCODE_BCE, 0); + env->CSR_BADV = rj; + do_raise_exception(env, EXCCODE_BCE, GETPC()); } } diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c index 72a6275665..ae53f5ee9d 100644 --- a/target/loongarch/translate.c +++ b/target/loongarch/translate.c @@ -8,6 +8,8 @@ #include "qemu/osdep.h" #include "cpu.h" #include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" + #include "exec/translator.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" @@ -21,7 +23,6 @@ /* Global register indices */ TCGv cpu_gpr[32], cpu_pc; static TCGv cpu_lladdr, cpu_llval; -TCGv_i64 cpu_fpr[32]; #include "exec/gen-icount.h" @@ -29,16 +30,43 @@ TCGv_i64 cpu_fpr[32]; #define DISAS_EXIT DISAS_TARGET_1 #define DISAS_EXIT_UPDATE DISAS_TARGET_2 +static inline int vec_full_offset(int regno) +{ + return offsetof(CPULoongArchState, fpr[regno]); +} + +static inline void get_vreg64(TCGv_i64 dest, int regno, int index) +{ + tcg_gen_ld_i64(dest, cpu_env, + offsetof(CPULoongArchState, fpr[regno].vreg.D(index))); +} + +static inline void set_vreg64(TCGv_i64 src, int regno, int index) +{ + tcg_gen_st_i64(src, cpu_env, + offsetof(CPULoongArchState, fpr[regno].vreg.D(index))); +} + static inline int plus_1(DisasContext *ctx, int x) { return x + 1; } +static inline int shl_1(DisasContext *ctx, int x) +{ + return x << 1; +} + static inline int shl_2(DisasContext *ctx, int x) { return x << 2; } +static inline int shl_3(DisasContext *ctx, int x) +{ + return x << 3; +} + /* * LoongArch the upper 32 bits are undefined ("can be any value"). * QEMU chooses to nanbox, because it is most likely to show guest bugs early. @@ -71,6 +99,7 @@ static void loongarch_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { int64_t bound; + CPULoongArchState *env = cs->env_ptr; DisasContext *ctx = container_of(dcbase, DisasContext, base); ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK; @@ -85,8 +114,9 @@ static void loongarch_tr_init_disas_context(DisasContextBase *dcbase, bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; ctx->base.max_insns = MIN(ctx->base.max_insns, bound); - ctx->ntemp = 0; - memset(ctx->temp, 0, sizeof(ctx->temp)); + if (FIELD_EX64(env->cpucfg[2], CPUCFG2, LSX)) { + ctx->vl = LSX_LEN; + } ctx->zero = tcg_constant_tl(0); } @@ -110,12 +140,6 @@ static void loongarch_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) * * Further, we may provide an extension for word operations. */ -static TCGv temp_new(DisasContext *ctx) -{ - assert(ctx->ntemp < ARRAY_SIZE(ctx->temp)); - return ctx->temp[ctx->ntemp++] = tcg_temp_new(); -} - static TCGv gpr_src(DisasContext *ctx, int reg_num, DisasExtend src_ext) { TCGv t; @@ -128,11 +152,11 @@ static TCGv gpr_src(DisasContext *ctx, int reg_num, DisasExtend src_ext) case EXT_NONE: return cpu_gpr[reg_num]; case EXT_SIGN: - t = temp_new(ctx); + t = tcg_temp_new(); tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]); return t; case EXT_ZERO: - t = temp_new(ctx); + t = tcg_temp_new(); tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]); return t; } @@ -142,7 +166,7 @@ static TCGv gpr_src(DisasContext *ctx, int reg_num, DisasExtend src_ext) static TCGv gpr_dst(DisasContext *ctx, int reg_num, DisasExtend dst_ext) { if (reg_num == 0 || dst_ext) { - return temp_new(ctx); + return tcg_temp_new(); } return cpu_gpr[reg_num]; } @@ -166,6 +190,20 @@ static void gen_set_gpr(int reg_num, TCGv t, DisasExtend dst_ext) } } +static TCGv get_fpr(DisasContext *ctx, int reg_num) +{ + TCGv t = tcg_temp_new(); + tcg_gen_ld_i64(t, cpu_env, + offsetof(CPULoongArchState, fpr[reg_num].vreg.D(0))); + return t; +} + +static void set_fpr(int reg_num, TCGv val) +{ + tcg_gen_st_i64(val, cpu_env, + offsetof(CPULoongArchState, fpr[reg_num].vreg.D(0))); +} + #include "decode-insns.c.inc" #include "insn_trans/trans_arith.c.inc" #include "insn_trans/trans_shift.c.inc" @@ -180,13 +218,14 @@ static void gen_set_gpr(int reg_num, TCGv t, DisasExtend dst_ext) #include "insn_trans/trans_fmemory.c.inc" #include "insn_trans/trans_branch.c.inc" #include "insn_trans/trans_privileged.c.inc" +#include "insn_trans/trans_lsx.c.inc" static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) { CPULoongArchState *env = cs->env_ptr; DisasContext *ctx = container_of(dcbase, DisasContext, base); - ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next); + ctx->opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next); if (!decode(ctx, ctx->opcode)) { qemu_log_mask(LOG_UNIMP, "Error: unknown opcode. " @@ -195,12 +234,6 @@ static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) generate_exception(ctx, EXCCODE_INE); } - for (int i = ctx->ntemp - 1; i >= 0; --i) { - tcg_temp_free(ctx->temp[i]); - ctx->temp[i] = NULL; - } - ctx->ntemp = 0; - ctx->base.pc_next += 4; } @@ -245,7 +278,7 @@ static const TranslatorOps loongarch_tr_ops = { .disas_log = loongarch_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; @@ -265,11 +298,6 @@ void loongarch_translate_init(void) regnames[i]); } - for (i = 0; i < 32; i++) { - int off = offsetof(CPULoongArchState, fpr[i]); - cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env, off, fregnames[i]); - } - cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPULoongArchState, pc), "pc"); cpu_lladdr = tcg_global_mem_new(cpu_env, offsetof(CPULoongArchState, lladdr), "lladdr"); diff --git a/target/loongarch/translate.h b/target/loongarch/translate.h index 6d2e382e8b..7f60090580 100644 --- a/target/loongarch/translate.h +++ b/target/loongarch/translate.h @@ -31,10 +31,8 @@ typedef struct DisasContext { uint32_t opcode; uint16_t mem_idx; uint16_t plv; + int vl; /* Vector length */ TCGv zero; - /* Space for 3 operands plus 1 extra for address computation. */ - TCGv temp[4]; - uint8_t ntemp; } DisasContext; void generate_exception(DisasContext *ctx, int excp); diff --git a/target/m68k/cpu-param.h b/target/m68k/cpu-param.h index 44a8d193f0..39dcbcece8 100644 --- a/target/m68k/cpu-param.h +++ b/target/m68k/cpu-param.h @@ -17,6 +17,5 @@ #define TARGET_PAGE_BITS 12 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define NB_MMU_MODES 2 #endif diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h index 3a9cfe2f33..048d5aae2b 100644 --- a/target/m68k/cpu.h +++ b/target/m68k/cpu.h @@ -176,9 +176,9 @@ struct ArchCPU { #ifndef CONFIG_USER_ONLY void m68k_cpu_do_interrupt(CPUState *cpu); bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req); +hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); #endif /* !CONFIG_USER_ONLY */ void m68k_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); @@ -581,10 +581,12 @@ static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch) bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); +#ifndef CONFIG_USER_ONLY void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); +#endif #include "exec/cpu-all.h" diff --git a/target/m68k/gdbstub.c b/target/m68k/gdbstub.c index eb2d030e14..1e5f033a12 100644 --- a/target/m68k/gdbstub.c +++ b/target/m68k/gdbstub.c @@ -19,7 +19,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" int m68k_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { diff --git a/target/m68k/helper.c b/target/m68k/helper.c index 4621cf2402..3b3a6ea8bd 100644 --- a/target/m68k/helper.c +++ b/target/m68k/helper.c @@ -23,6 +23,7 @@ #include "exec/exec-all.h" #include "exec/gdbstub.h" #include "exec/helper-proto.h" +#include "gdbstub/helpers.h" #include "fpu/softfloat.h" #include "qemu/qemu-print.h" diff --git a/target/m68k/m68k-semi.c b/target/m68k/m68k-semi.c index 87b1314925..88ad9ba814 100644 --- a/target/m68k/m68k-semi.c +++ b/target/m68k/m68k-semi.c @@ -20,7 +20,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/syscalls.h" +#include "gdbstub/helpers.h" #include "semihosting/syscalls.h" #include "semihosting/softmmu-uaccess.h" #include "hw/boards.h" diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 31178c3b1d..44d852b106 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -121,35 +121,9 @@ typedef struct DisasContext { int done_mac; int writeback_mask; TCGv writeback[8]; -#define MAX_TO_RELEASE 8 - int release_count; - TCGv release[MAX_TO_RELEASE]; bool ss_active; } DisasContext; -static void init_release_array(DisasContext *s) -{ -#ifdef CONFIG_DEBUG_TCG - memset(s->release, 0, sizeof(s->release)); -#endif - s->release_count = 0; -} - -static void do_release(DisasContext *s) -{ - int i; - for (i = 0; i < s->release_count; i++) { - tcg_temp_free(s->release[i]); - } - init_release_array(s); -} - -static TCGv mark_to_release(DisasContext *s, TCGv tmp) -{ - g_assert(s->release_count < MAX_TO_RELEASE); - return s->release[s->release_count++] = tmp; -} - static TCGv get_areg(DisasContext *s, unsigned regno) { if (s->writeback_mask & (1 << regno)) { @@ -164,7 +138,6 @@ static void delay_set_areg(DisasContext *s, unsigned regno, { if (s->writeback_mask & (1 << regno)) { if (give_temp) { - tcg_temp_free(s->writeback[regno]); s->writeback[regno] = val; } else { tcg_gen_mov_i32(s->writeback[regno], val); @@ -189,7 +162,6 @@ static void do_writebacks(DisasContext *s) do { unsigned regno = ctz32(mask); tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]); - tcg_temp_free(s->writeback[regno]); mask &= mask - 1; } while (mask); } @@ -292,11 +264,7 @@ static void gen_jmp(DisasContext *s, TCGv dest) static void gen_raise_exception(int nr) { - TCGv_i32 tmp; - - tmp = tcg_const_i32(nr); - gen_helper_raise_exception(cpu_env, tmp); - tcg_temp_free_i32(tmp); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr)); } static void gen_raise_exception_format2(DisasContext *s, int nr, @@ -336,23 +304,14 @@ static inline void gen_addr_fault(DisasContext *s) static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr, int sign, int index) { - TCGv tmp; - tmp = tcg_temp_new_i32(); - switch(opsize) { + TCGv tmp = tcg_temp_new_i32(); + + switch (opsize) { case OS_BYTE: - if (sign) - tcg_gen_qemu_ld8s(tmp, addr, index); - else - tcg_gen_qemu_ld8u(tmp, addr, index); - break; case OS_WORD: - if (sign) - tcg_gen_qemu_ld16s(tmp, addr, index); - else - tcg_gen_qemu_ld16u(tmp, addr, index); - break; case OS_LONG: - tcg_gen_qemu_ld32u(tmp, addr, index); + tcg_gen_qemu_ld_tl(tmp, addr, index, + opsize | (sign ? MO_SIGN : 0) | MO_TE); break; default: g_assert_not_reached(); @@ -364,15 +323,11 @@ static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr, static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val, int index) { - switch(opsize) { + switch (opsize) { case OS_BYTE: - tcg_gen_qemu_st8(val, addr, index); - break; case OS_WORD: - tcg_gen_qemu_st16(val, addr, index); - break; case OS_LONG: - tcg_gen_qemu_st32(val, addr, index); + tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE); break; default: g_assert_not_reached(); @@ -396,8 +351,7 @@ static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val, gen_store(s, opsize, addr, val, index); return store_dummy; } else { - return mark_to_release(s, gen_load(s, opsize, addr, - what == EA_LOADS, index)); + return gen_load(s, opsize, addr, what == EA_LOADS, index); } } @@ -491,7 +445,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) } else { bd = 0; } - tmp = mark_to_release(s, tcg_temp_new()); + tmp = tcg_temp_new(); if ((ext & 0x44) == 0) { /* pre-index */ add = gen_addr_index(s, ext, tmp); @@ -501,7 +455,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) if ((ext & 0x80) == 0) { /* base not suppressed */ if (IS_NULL_QREG(base)) { - base = mark_to_release(s, tcg_const_i32(offset + bd)); + base = tcg_constant_i32(offset + bd); bd = 0; } if (!IS_NULL_QREG(add)) { @@ -517,11 +471,11 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) add = tmp; } } else { - add = mark_to_release(s, tcg_const_i32(bd)); + add = tcg_constant_i32(bd); } if ((ext & 3) != 0) { /* memory indirect */ - base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s))); + base = gen_load(s, OS_LONG, add, 0, IS_USER(s)); if ((ext & 0x44) == 4) { add = gen_addr_index(s, ext, tmp); tcg_gen_add_i32(tmp, add, base); @@ -546,7 +500,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) } } else { /* brief extension word format */ - tmp = mark_to_release(s, tcg_temp_new()); + tmp = tcg_temp_new(); add = gen_addr_index(s, ext, tmp); if (!IS_NULL_QREG(base)) { tcg_gen_add_i32(tmp, add, base); @@ -609,9 +563,7 @@ static void gen_flush_flags(DisasContext *s) gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1); tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V); tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0); - tcg_temp_free(t0); tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V); - tcg_temp_free(t1); break; case CC_OP_SUBB: @@ -626,9 +578,7 @@ static void gen_flush_flags(DisasContext *s) gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1); tcg_gen_xor_i32(t1, QREG_CC_N, t0); tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0); - tcg_temp_free(t0); tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1); - tcg_temp_free(t1); break; case CC_OP_CMPB: @@ -642,7 +592,6 @@ static void gen_flush_flags(DisasContext *s) tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N); tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N); tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0); - tcg_temp_free(t0); tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z); break; @@ -658,9 +607,7 @@ static void gen_flush_flags(DisasContext *s) break; default: - t0 = tcg_const_i32(s->cc_op); - gen_helper_flush_flags(cpu_env, t0); - tcg_temp_free(t0); + gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op)); s->cc_op_synced = 1; break; } @@ -676,7 +623,7 @@ static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign) if (opsize == OS_LONG) { tmp = val; } else { - tmp = mark_to_release(s, tcg_temp_new()); + tmp = tcg_temp_new(); gen_ext(tmp, val, opsize, sign); } @@ -756,14 +703,12 @@ static void gen_partset_reg(int opsize, TCGv reg, TCGv val) tmp = tcg_temp_new(); tcg_gen_ext8u_i32(tmp, val); tcg_gen_or_i32(reg, reg, tmp); - tcg_temp_free(tmp); break; case OS_WORD: tcg_gen_andi_i32(reg, reg, 0xffff0000); tmp = tcg_temp_new(); tcg_gen_ext16u_i32(tmp, val); tcg_gen_or_i32(reg, reg, tmp); - tcg_temp_free(tmp); break; case OS_LONG: case OS_SINGLE: @@ -802,7 +747,7 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, return NULL_QREG; } reg = get_areg(s, reg0); - tmp = mark_to_release(s, tcg_temp_new()); + tmp = tcg_temp_new(); if (reg0 == 7 && opsize == OS_BYTE && m68k_feature(s->env, M68K_FEATURE_M68K)) { tcg_gen_subi_i32(tmp, reg, 2); @@ -812,7 +757,7 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, return tmp; case 5: /* Indirect displacement. */ reg = get_areg(s, reg0); - tmp = mark_to_release(s, tcg_temp_new()); + tmp = tcg_temp_new(); ext = read_im16(env, s); tcg_gen_addi_i32(tmp, reg, (int16_t)ext); return tmp; @@ -823,14 +768,14 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, switch (reg0) { case 0: /* Absolute short. */ offset = (int16_t)read_im16(env, s); - return mark_to_release(s, tcg_const_i32(offset)); + return tcg_constant_i32(offset); case 1: /* Absolute long. */ offset = read_im32(env, s); - return mark_to_release(s, tcg_const_i32(offset)); + return tcg_constant_i32(offset); case 2: /* pc displacement */ offset = s->pc; offset += (int16_t)read_im16(env, s); - return mark_to_release(s, tcg_const_i32(offset)); + return tcg_constant_i32(offset); case 3: /* pc index+displacement. */ return gen_lea_indexed(env, s, NULL_QREG); case 4: /* Immediate. */ @@ -958,7 +903,7 @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0, default: g_assert_not_reached(); } - return mark_to_release(s, tcg_const_i32(offset)); + return tcg_constant_i32(offset); default: return NULL_QREG; } @@ -997,12 +942,10 @@ static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src) t32 = tcg_temp_new(); tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper)); tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper)); - tcg_temp_free(t32); t64 = tcg_temp_new_i64(); tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower)); tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower)); - tcg_temp_free_i64(t64); } static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, @@ -1015,23 +958,17 @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, tmp = tcg_temp_new(); switch (opsize) { case OS_BYTE: - tcg_gen_qemu_ld8s(tmp, addr, index); - gen_helper_exts32(cpu_env, fp, tmp); - break; case OS_WORD: - tcg_gen_qemu_ld16s(tmp, addr, index); - gen_helper_exts32(cpu_env, fp, tmp); - break; case OS_LONG: - tcg_gen_qemu_ld32u(tmp, addr, index); + tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE); gen_helper_exts32(cpu_env, fp, tmp); break; case OS_SINGLE: - tcg_gen_qemu_ld32u(tmp, addr, index); + tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL); gen_helper_extf32(cpu_env, fp, tmp); break; case OS_DOUBLE: - tcg_gen_qemu_ld64(t64, addr, index); + tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ); gen_helper_extf64(cpu_env, fp, t64); break; case OS_EXTENDED: @@ -1039,11 +976,11 @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); break; } - tcg_gen_qemu_ld32u(tmp, addr, index); + tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL); tcg_gen_shri_i32(tmp, tmp, 16); tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper)); tcg_gen_addi_i32(tmp, addr, 4); - tcg_gen_qemu_ld64(t64, tmp, index); + tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ); tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower)); break; case OS_PACKED: @@ -1056,8 +993,6 @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, default: g_assert_not_reached(); } - tcg_temp_free(tmp); - tcg_temp_free_i64(t64); } static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, @@ -1070,24 +1005,18 @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, tmp = tcg_temp_new(); switch (opsize) { case OS_BYTE: - gen_helper_reds32(tmp, cpu_env, fp); - tcg_gen_qemu_st8(tmp, addr, index); - break; case OS_WORD: - gen_helper_reds32(tmp, cpu_env, fp); - tcg_gen_qemu_st16(tmp, addr, index); - break; case OS_LONG: gen_helper_reds32(tmp, cpu_env, fp); - tcg_gen_qemu_st32(tmp, addr, index); + tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE); break; case OS_SINGLE: gen_helper_redf32(tmp, cpu_env, fp); - tcg_gen_qemu_st32(tmp, addr, index); + tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL); break; case OS_DOUBLE: gen_helper_redf64(t64, cpu_env, fp); - tcg_gen_qemu_st64(t64, addr, index); + tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ); break; case OS_EXTENDED: if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { @@ -1096,10 +1025,10 @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, } tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper)); tcg_gen_shli_i32(tmp, tmp, 16); - tcg_gen_qemu_st32(tmp, addr, index); + tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL); tcg_gen_addi_i32(tmp, addr, 4); tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower)); - tcg_gen_qemu_st64(t64, tmp, index); + tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ); break; case OS_PACKED: /* @@ -1111,8 +1040,6 @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, default: g_assert_not_reached(); } - tcg_temp_free(tmp); - tcg_temp_free_i64(t64); } static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr, @@ -1168,7 +1095,6 @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, default: g_assert_not_reached(); } - tcg_temp_free(tmp); } return 0; case 1: /* Address register direct. */ @@ -1212,41 +1138,34 @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, } switch (opsize) { case OS_BYTE: - tmp = tcg_const_i32((int8_t)read_im8(env, s)); + tmp = tcg_constant_i32((int8_t)read_im8(env, s)); gen_helper_exts32(cpu_env, fp, tmp); - tcg_temp_free(tmp); break; case OS_WORD: - tmp = tcg_const_i32((int16_t)read_im16(env, s)); + tmp = tcg_constant_i32((int16_t)read_im16(env, s)); gen_helper_exts32(cpu_env, fp, tmp); - tcg_temp_free(tmp); break; case OS_LONG: - tmp = tcg_const_i32(read_im32(env, s)); + tmp = tcg_constant_i32(read_im32(env, s)); gen_helper_exts32(cpu_env, fp, tmp); - tcg_temp_free(tmp); break; case OS_SINGLE: - tmp = tcg_const_i32(read_im32(env, s)); + tmp = tcg_constant_i32(read_im32(env, s)); gen_helper_extf32(cpu_env, fp, tmp); - tcg_temp_free(tmp); break; case OS_DOUBLE: - t64 = tcg_const_i64(read_im64(env, s)); + t64 = tcg_constant_i64(read_im64(env, s)); gen_helper_extf64(cpu_env, fp, t64); - tcg_temp_free_i64(t64); break; case OS_EXTENDED: if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); break; } - tmp = tcg_const_i32(read_im32(env, s) >> 16); + tmp = tcg_constant_i32(read_im32(env, s) >> 16); tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper)); - tcg_temp_free(tmp); - t64 = tcg_const_i64(read_im64(env, s)); + t64 = tcg_constant_i64(read_im64(env, s)); tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower)); - tcg_temp_free_i64(t64); break; case OS_PACKED: /* @@ -1276,8 +1195,6 @@ static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn, typedef struct { TCGCond tcond; - bool g1; - bool g2; TCGv v1; TCGv v2; } DisasCompare; @@ -1290,7 +1207,6 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) /* The CC_OP_CMP form can handle most normal comparisons directly. */ if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) { - c->g1 = c->g2 = 1; c->v1 = QREG_CC_N; c->v2 = QREG_CC_V; switch (cond) { @@ -1308,8 +1224,7 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) goto done; case 10: /* PL */ case 11: /* MI */ - c->g1 = c->g2 = 0; - c->v2 = tcg_const_i32(0); + c->v2 = tcg_constant_i32(0); c->v1 = tmp = tcg_temp_new(); tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V); gen_ext(tmp, tmp, op - CC_OP_CMPB, 1); @@ -1325,9 +1240,7 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) } } - c->g1 = 1; - c->g2 = 0; - c->v2 = tcg_const_i32(0); + c->v2 = tcg_constant_i32(0); switch (cond) { case 0: /* T */ @@ -1410,7 +1323,6 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) case 2: /* HI (!C && !Z) -> !(C || Z)*/ case 3: /* LS (C || Z) */ c->v1 = tmp = tcg_temp_new(); - c->g1 = 0; tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2); tcg_gen_or_i32(tmp, tmp, QREG_CC_C); tcond = TCG_COND_NE; @@ -1438,20 +1350,17 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) case 12: /* GE (!(N ^ V)) */ case 13: /* LT (N ^ V) */ c->v1 = tmp = tcg_temp_new(); - c->g1 = 0; tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V); tcond = TCG_COND_LT; break; case 14: /* GT (!(Z || (N ^ V))) */ case 15: /* LE (Z || (N ^ V)) */ c->v1 = tmp = tcg_temp_new(); - c->g1 = 0; tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2); tcg_gen_neg_i32(tmp, tmp); tmp2 = tcg_temp_new(); tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V); tcg_gen_or_i32(tmp, tmp, tmp2); - tcg_temp_free(tmp2); tcond = TCG_COND_LT; break; } @@ -1463,16 +1372,6 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) c->tcond = tcond; } -static void free_cond(DisasCompare *c) -{ - if (!c->g1) { - tcg_temp_free(c->v1); - } - if (!c->g2) { - tcg_temp_free(c->v2); - } -} - static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1) { DisasCompare c; @@ -1480,7 +1379,6 @@ static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1) gen_cc_cond(&c, s, cond); update_cc_op(s); tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1); - free_cond(&c); } /* Force a TB lookup after an instruction that changes the CPU state. */ @@ -1539,11 +1437,9 @@ DISAS_INSN(scc) tmp = tcg_temp_new(); tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2); - free_cond(&c); tcg_gen_neg_i32(tmp, tmp); DEST_EA(env, insn, OS_BYTE, tmp, NULL); - tcg_temp_free(tmp); } DISAS_INSN(dbcc) @@ -1610,7 +1506,6 @@ DISAS_INSN(mulw) tcg_gen_mul_i32(tmp, tmp, src); tcg_gen_mov_i32(reg, tmp); gen_logic_cc(s, tmp, OS_LONG); - tcg_temp_free(tmp); } DISAS_INSN(divw) @@ -1707,8 +1602,8 @@ static void bcd_add(TCGv dest, TCGv src) * = result with some possible exceeding 0x6 */ - t0 = tcg_const_i32(0x066); - tcg_gen_add_i32(t0, t0, src); + t0 = tcg_temp_new(); + tcg_gen_addi_i32(t0, src, 0x066); t1 = tcg_temp_new(); tcg_gen_add_i32(t1, t0, dest); @@ -1741,7 +1636,6 @@ static void bcd_add(TCGv dest, TCGv src) tcg_gen_andi_i32(t0, t0, 0x22); tcg_gen_add_i32(dest, t0, t0); tcg_gen_add_i32(dest, dest, t0); - tcg_temp_free(t0); /* * remove the exceeding 0x6 @@ -1749,7 +1643,6 @@ static void bcd_add(TCGv dest, TCGv src) */ tcg_gen_sub_i32(dest, t1, dest); - tcg_temp_free(t1); } static void bcd_sub(TCGv dest, TCGv src) @@ -1798,13 +1691,10 @@ static void bcd_sub(TCGv dest, TCGv src) tcg_gen_andi_i32(t2, t2, 0x22); tcg_gen_add_i32(t0, t2, t2); tcg_gen_add_i32(t0, t0, t2); - tcg_temp_free(t2); /* return t1 - t0 */ tcg_gen_sub_i32(dest, t1, t0); - tcg_temp_free(t0); - tcg_temp_free(t1); } static void bcd_flags(TCGv val) @@ -1899,14 +1789,13 @@ DISAS_INSN(nbcd) SRC_EA(env, src, OS_BYTE, 0, &addr); - dest = tcg_const_i32(0); + dest = tcg_temp_new(); + tcg_gen_movi_i32(dest, 0); bcd_sub(dest, src); DEST_EA(env, insn, OS_BYTE, dest, &addr); bcd_flags(dest); - - tcg_temp_free(dest); } DISAS_INSN(addsub) @@ -1945,7 +1834,6 @@ DISAS_INSN(addsub) } else { gen_partset_reg(opsize, DREG(insn, 9), dest); } - tcg_temp_free(dest); } /* Reverse the order of the bits in REG. */ @@ -1980,9 +1868,8 @@ DISAS_INSN(bitop_reg) else tcg_gen_andi_i32(src2, DREG(insn, 9), 31); - tmp = tcg_const_i32(1); - tcg_gen_shl_i32(tmp, tmp, src2); - tcg_temp_free(src2); + tmp = tcg_temp_new(); + tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2); tcg_gen_and_i32(QREG_CC_Z, src1, tmp); @@ -2000,11 +1887,9 @@ DISAS_INSN(bitop_reg) default: /* btst */ break; } - tcg_temp_free(tmp); if (op) { DEST_EA(env, insn, opsize, dest, &addr); } - tcg_temp_free(dest); } DISAS_INSN(sats) @@ -2024,7 +1909,6 @@ static void gen_push(DisasContext *s, TCGv val) tcg_gen_subi_i32(tmp, QREG_SP, 4); gen_store(s, OS_LONG, tmp, val, IS_USER(s)); tcg_gen_mov_i32(QREG_SP, tmp); - tcg_temp_free(tmp); } static TCGv mreg(int reg) @@ -2087,7 +1971,7 @@ DISAS_INSN(movem) addr = tcg_temp_new(); tcg_gen_mov_i32(addr, tmp); - incr = tcg_const_i32(opsize_bytes(opsize)); + incr = tcg_constant_i32(opsize_bytes(opsize)); if (is_load) { /* memory to register */ @@ -2100,7 +1984,6 @@ DISAS_INSN(movem) for (i = 0; i < 16; i++) { if (mask & (1 << i)) { tcg_gen_mov_i32(mreg(i), r[i]); - tcg_temp_free(r[i]); } } if (mode == 3) { @@ -2127,7 +2010,6 @@ DISAS_INSN(movem) tmp = tcg_temp_new(); tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr); gen_store(s, opsize, addr, tmp, IS_USER(s)); - tcg_temp_free(tmp); } else { gen_store(s, opsize, addr, mreg(i), IS_USER(s)); } @@ -2143,9 +2025,6 @@ DISAS_INSN(movem) } } } - - tcg_temp_free(incr); - tcg_temp_free(addr); } DISAS_INSN(movep) @@ -2175,22 +2054,20 @@ DISAS_INSN(movep) if (insn & 0x80) { for ( ; i > 0 ; i--) { tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8); - tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s)); + tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB); if (i > 1) { tcg_gen_addi_i32(abuf, abuf, 2); } } } else { for ( ; i > 0 ; i--) { - tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s)); + tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB); tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8); if (i > 1) { tcg_gen_addi_i32(abuf, abuf, 2); } } } - tcg_temp_free(abuf); - tcg_temp_free(dbuf); } DISAS_INSN(bitop_im) @@ -2249,7 +2126,6 @@ DISAS_INSN(bitop_im) break; } DEST_EA(env, insn, opsize, tmp, &addr); - tcg_temp_free(tmp); } } @@ -2272,7 +2148,6 @@ static TCGv gen_get_sr(DisasContext *s) sr = tcg_temp_new(); tcg_gen_andi_i32(sr, QREG_SR, 0xffe0); tcg_gen_or_i32(sr, sr, ccr); - tcg_temp_free(ccr); return sr; } @@ -2332,13 +2207,13 @@ DISAS_INSN(arith_im) opsize = insn_opsize(insn); switch (opsize) { case OS_BYTE: - im = tcg_const_i32((int8_t)read_im8(env, s)); + im = tcg_constant_i32((int8_t)read_im8(env, s)); break; case OS_WORD: - im = tcg_const_i32((int16_t)read_im16(env, s)); + im = tcg_constant_i32((int16_t)read_im16(env, s)); break; case OS_LONG: - im = tcg_const_i32(read_im32(env, s)); + im = tcg_constant_i32(read_im32(env, s)); break; default: g_assert_not_reached(); @@ -2421,8 +2296,6 @@ DISAS_INSN(arith_im) default: abort(); } - tcg_temp_free(im); - tcg_temp_free(dest); } DISAS_INSN(cas) @@ -2478,8 +2351,6 @@ DISAS_INSN(cas) gen_update_cc_cmp(s, load, cmp, opsize); gen_partset_reg(opsize, DREG(ext, 0), load); - tcg_temp_free(load); - switch (extract32(insn, 3, 3)) { case 3: /* Indirect postincrement. */ tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize)); @@ -2494,7 +2365,6 @@ DISAS_INSN(cas2w) { uint16_t ext1, ext2; TCGv addr1, addr2; - TCGv regs; /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */ @@ -2526,16 +2396,15 @@ DISAS_INSN(cas2w) * Dc2 = (R2) */ - regs = tcg_const_i32(REG(ext2, 6) | - (REG(ext1, 6) << 3) | - (REG(ext2, 0) << 6) | - (REG(ext1, 0) << 9)); if (tb_cflags(s->base.tb) & CF_PARALLEL) { gen_helper_exit_atomic(cpu_env); } else { + TCGv regs = tcg_constant_i32(REG(ext2, 6) | + (REG(ext1, 6) << 3) | + (REG(ext2, 0) << 6) | + (REG(ext1, 0) << 9)); gen_helper_cas2w(cpu_env, regs, addr1, addr2); } - tcg_temp_free(regs); /* Note that cas2w also assigned to env->cc_op. */ s->cc_op = CC_OP_CMPW; @@ -2577,16 +2446,15 @@ DISAS_INSN(cas2l) * Dc2 = (R2) */ - regs = tcg_const_i32(REG(ext2, 6) | - (REG(ext1, 6) << 3) | - (REG(ext2, 0) << 6) | - (REG(ext1, 0) << 9)); + regs = tcg_constant_i32(REG(ext2, 6) | + (REG(ext1, 6) << 3) | + (REG(ext2, 0) << 6) | + (REG(ext1, 0) << 9)); if (tb_cflags(s->base.tb) & CF_PARALLEL) { gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2); } else { gen_helper_cas2l(cpu_env, regs, addr1, addr2); } - tcg_temp_free(regs); /* Note that cas2l also assigned to env->cc_op. */ s->cc_op = CC_OP_CMPL; @@ -2655,10 +2523,9 @@ DISAS_INSN(negx) * (X, N) = -(src + X); */ - z = tcg_const_i32(0); + z = tcg_constant_i32(0); tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z); tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X); - tcg_temp_free(z); gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1); @@ -2701,12 +2568,10 @@ DISAS_INSN(clr) int opsize; TCGv zero; - zero = tcg_const_i32(0); - + zero = tcg_constant_i32(0); opsize = insn_opsize(insn); DEST_EA(env, insn, opsize, zero, NULL); gen_logic_cc(s, zero, opsize); - tcg_temp_free(zero); } DISAS_INSN(move_from_ccr) @@ -2732,7 +2597,6 @@ DISAS_INSN(neg) gen_update_cc_add(dest, src1, opsize); tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0); DEST_EA(env, insn, opsize, dest, &addr); - tcg_temp_free(dest); } DISAS_INSN(move_to_ccr) @@ -2767,8 +2631,6 @@ DISAS_INSN(swap) tcg_gen_shli_i32(src1, reg, 16); tcg_gen_shri_i32(src2, reg, 16); tcg_gen_or_i32(reg, src1, src2); - tcg_temp_free(src2); - tcg_temp_free(src1); gen_logic_cc(s, reg, OS_LONG); } @@ -2811,7 +2673,6 @@ DISAS_INSN(ext) else tcg_gen_mov_i32(reg, tmp); gen_logic_cc(s, tmp, OS_LONG); - tcg_temp_free(tmp); } DISAS_INSN(tst) @@ -2856,7 +2717,6 @@ DISAS_INSN(tas) tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80), IS_USER(s), MO_SB); gen_logic_cc(s, src1, OS_BYTE); - tcg_temp_free(src1); switch (mode) { case 3: /* Indirect postincrement. */ @@ -2945,7 +2805,6 @@ static void gen_link(DisasContext *s, uint16_t insn, int32_t offset) tcg_gen_mov_i32(reg, tmp); } tcg_gen_addi_i32(QREG_SP, tmp, offset); - tcg_temp_free(tmp); } DISAS_INSN(link) @@ -2976,8 +2835,6 @@ DISAS_INSN(unlk) tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s)); tcg_gen_mov_i32(reg, tmp); tcg_gen_addi_i32(QREG_SP, src, 4); - tcg_temp_free(src); - tcg_temp_free(tmp); } #if defined(CONFIG_SOFTMMU) @@ -3017,10 +2874,8 @@ DISAS_INSN(rtr) tcg_gen_addi_i32(sp, QREG_SP, 2); tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s)); tcg_gen_addi_i32(QREG_SP, sp, 4); - tcg_temp_free(sp); gen_set_sr(s, ccr, true); - tcg_temp_free(ccr); gen_jmp(s, tmp); } @@ -3049,7 +2904,7 @@ DISAS_INSN(jump) } if ((insn & 0x40) == 0) { /* jsr */ - gen_push(s, tcg_const_i32(s->pc)); + gen_push(s, tcg_constant_i32(s->pc)); } gen_jmp(s, tmp); } @@ -3074,7 +2929,7 @@ DISAS_INSN(addsubq) if (imm == 0) { imm = 8; } - val = tcg_const_i32(imm); + val = tcg_constant_i32(imm); dest = tcg_temp_new(); tcg_gen_mov_i32(dest, src); if ((insn & 0x38) == 0x08) { @@ -3099,9 +2954,7 @@ DISAS_INSN(addsubq) } gen_update_cc_add(dest, val, opsize); } - tcg_temp_free(val); DEST_EA(env, insn, opsize, dest, &addr); - tcg_temp_free(dest); } DISAS_INSN(branch) @@ -3120,7 +2973,7 @@ DISAS_INSN(branch) } if (op == 1) { /* bsr */ - gen_push(s, tcg_const_i32(s->pc)); + gen_push(s, tcg_constant_i32(s->pc)); } if (op > 1) { /* Bcc */ @@ -3179,7 +3032,6 @@ DISAS_INSN(or) gen_partset_reg(opsize, DREG(insn, 9), dest); } gen_logic_cc(s, dest, opsize); - tcg_temp_free(dest); } DISAS_INSN(suba) @@ -3194,7 +3046,7 @@ DISAS_INSN(suba) static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize) { - TCGv tmp; + TCGv tmp, zero; gen_flush_flags(s); /* compute old Z */ @@ -3203,18 +3055,18 @@ static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize) * (X, N) = dest - (src + X); */ - tmp = tcg_const_i32(0); - tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp); - tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X); + zero = tcg_constant_i32(0); + tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero); + tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X); gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1); /* Compute signed-overflow for subtract. */ + tmp = tcg_temp_new(); tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest); tcg_gen_xor_i32(tmp, dest, src); tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp); - tcg_temp_free(tmp); /* Copy the rest of the results into place. */ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ @@ -3262,9 +3114,6 @@ DISAS_INSN(subx_mem) gen_subx(s, src, dest, opsize); gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s)); - - tcg_temp_free(dest); - tcg_temp_free(src); } DISAS_INSN(mov3q) @@ -3273,12 +3122,12 @@ DISAS_INSN(mov3q) int val; val = (insn >> 9) & 7; - if (val == 0) + if (val == 0) { val = -1; - src = tcg_const_i32(val); + } + src = tcg_constant_i32(val); gen_logic_cc(s, src, OS_LONG); DEST_EA(env, insn, OS_LONG, src, NULL); - tcg_temp_free(src); } DISAS_INSN(cmp) @@ -3338,7 +3187,6 @@ DISAS_INSN(eor) tcg_gen_xor_i32(dest, src, DREG(insn, 9)); gen_logic_cc(s, dest, opsize); DEST_EA(env, insn, opsize, dest, &addr); - tcg_temp_free(dest); } static void do_exg(TCGv reg1, TCGv reg2) @@ -3347,7 +3195,6 @@ static void do_exg(TCGv reg1, TCGv reg2) tcg_gen_mov_i32(temp, reg1); tcg_gen_mov_i32(reg1, reg2); tcg_gen_mov_i32(reg2, temp); - tcg_temp_free(temp); } DISAS_INSN(exg_dd) @@ -3390,7 +3237,6 @@ DISAS_INSN(and) gen_partset_reg(opsize, reg, dest); } gen_logic_cc(s, dest, opsize); - tcg_temp_free(dest); } DISAS_INSN(adda) @@ -3405,7 +3251,7 @@ DISAS_INSN(adda) static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize) { - TCGv tmp; + TCGv tmp, zero; gen_flush_flags(s); /* compute old Z */ @@ -3414,17 +3260,17 @@ static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize) * (X, N) = src + dest + X; */ - tmp = tcg_const_i32(0); - tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp); - tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp); + zero = tcg_constant_i32(0); + tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero); + tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero); gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); /* Compute signed-overflow for addition. */ + tmp = tcg_temp_new(); tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src); tcg_gen_xor_i32(tmp, dest, src); tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp); - tcg_temp_free(tmp); /* Copy the rest of the results into place. */ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ @@ -3472,9 +3318,6 @@ DISAS_INSN(addx_mem) gen_addx(s, src, dest, opsize); gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s)); - - tcg_temp_free(dest); - tcg_temp_free(src); } static inline void shift_im(DisasContext *s, uint16_t insn, int opsize) @@ -3508,7 +3351,6 @@ static inline void shift_im(DisasContext *s, uint16_t insn, int opsize) tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1); tcg_gen_sari_i32(t0, reg, bits - count - 1); tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0); - tcg_temp_free(t0); } tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V); } @@ -3561,12 +3403,11 @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64); /* Note that C=0 if shift count is 0, and we get that for free. */ } else { - TCGv zero = tcg_const_i32(0); + TCGv zero = tcg_constant_i32(0); tcg_gen_extrl_i64_i32(QREG_CC_N, t64); tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits); tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C, s32, zero, zero, QREG_CC_C); - tcg_temp_free(zero); } tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1); @@ -3584,10 +3425,9 @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) * V = ((s ^ t) & (-1 << (bits - 1))) != 0 */ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) { - TCGv_i64 tt = tcg_const_i64(32); + TCGv_i64 tt = tcg_constant_i64(32); /* if shift is greater than 32, use 32 */ tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64); - tcg_temp_free_i64(tt); /* Sign extend the input to 64 bits; re-do the shift. */ tcg_gen_ext_i32_i64(t64, reg); tcg_gen_shl_i64(s64, t64, s64); @@ -3619,10 +3459,6 @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N); - tcg_temp_free(s32); - tcg_temp_free_i64(s64); - tcg_temp_free_i64(t64); - /* Write back the result. */ gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N); set_cc_op(s, CC_OP_FLAGS); @@ -3770,7 +3606,7 @@ static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size) { TCGv X, shl, shr, shx, sz, zero; - sz = tcg_const_i32(size); + sz = tcg_constant_i32(size); shr = tcg_temp_new(); shl = tcg_temp_new(); @@ -3781,27 +3617,22 @@ static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size) tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */ tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */ /* shx = shx < 0 ? size : shx; */ - zero = tcg_const_i32(0); + zero = tcg_constant_i32(0); tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx); - tcg_temp_free(zero); } else { tcg_gen_mov_i32(shr, shift); /* shr = shift */ tcg_gen_movi_i32(shl, size + 1); tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */ tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */ } - tcg_temp_free_i32(sz); /* reg = (reg << shl) | (reg >> shr) | (x << shx); */ tcg_gen_shl_i32(shl, reg, shl); tcg_gen_shr_i32(shr, reg, shr); tcg_gen_or_i32(reg, shl, shr); - tcg_temp_free(shl); - tcg_temp_free(shr); tcg_gen_shl_i32(shx, QREG_CC_X, shx); tcg_gen_or_i32(reg, reg, shx); - tcg_temp_free(shx); /* X = (reg >> size) & 1 */ @@ -3835,7 +3666,6 @@ static TCGv rotate32_x(TCGv reg, TCGv shift, int left) /* rotate */ tcg_gen_rotl_i64(t0, t0, shift64); - tcg_temp_free_i64(shift64); /* result is [reg:..:reg:X] */ @@ -3849,7 +3679,6 @@ static TCGv rotate32_x(TCGv reg, TCGv shift, int left) tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X); tcg_gen_rotr_i64(t0, t0, shift64); - tcg_temp_free_i64(shift64); /* result is value: [X:reg:..:reg] */ @@ -3863,17 +3692,13 @@ static TCGv rotate32_x(TCGv reg, TCGv shift, int left) tcg_gen_shli_i32(hi, hi, 1); } - tcg_temp_free_i64(t0); tcg_gen_or_i32(lo, lo, hi); - tcg_temp_free(hi); /* if shift == 0, register and X are not affected */ - zero = tcg_const_i32(0); + zero = tcg_constant_i32(0); tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X); tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo); - tcg_temp_free(zero); - tcg_temp_free(lo); return X; } @@ -3889,15 +3714,13 @@ DISAS_INSN(rotate_im) tmp = 8; } - shift = tcg_const_i32(tmp); + shift = tcg_constant_i32(tmp); if (insn & 8) { rotate(DREG(insn, 0), shift, left, 32); } else { TCGv X = rotate32_x(DREG(insn, 0), shift, left); rotate_x_flags(DREG(insn, 0), X, 32); - tcg_temp_free(X); } - tcg_temp_free(shift); set_cc_op(s, CC_OP_FLAGS); } @@ -3916,15 +3739,13 @@ DISAS_INSN(rotate8_im) tmp = 8; } - shift = tcg_const_i32(tmp); + shift = tcg_constant_i32(tmp); if (insn & 8) { rotate(reg, shift, left, 8); } else { TCGv X = rotate_x(reg, shift, left, 8); rotate_x_flags(reg, X, 8); - tcg_temp_free(X); } - tcg_temp_free(shift); gen_partset_reg(OS_BYTE, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } @@ -3942,15 +3763,13 @@ DISAS_INSN(rotate16_im) tmp = 8; } - shift = tcg_const_i32(tmp); + shift = tcg_constant_i32(tmp); if (insn & 8) { rotate(reg, shift, left, 16); } else { TCGv X = rotate_x(reg, shift, left, 16); rotate_x_flags(reg, X, 16); - tcg_temp_free(X); } - tcg_temp_free(shift); gen_partset_reg(OS_WORD, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } @@ -3982,10 +3801,7 @@ DISAS_INSN(rotate_reg) tcg_gen_remu_i32(t1, t0, t1); X = rotate32_x(DREG(insn, 0), t1, left); rotate_x_flags(DREG(insn, 0), X, 32); - tcg_temp_free(X); } - tcg_temp_free(t1); - tcg_temp_free(t0); set_cc_op(s, CC_OP_FLAGS); } @@ -4016,10 +3832,7 @@ DISAS_INSN(rotate8_reg) tcg_gen_remu_i32(t1, t0, t1); X = rotate_x(reg, t1, left, 8); rotate_x_flags(reg, X, 8); - tcg_temp_free(X); } - tcg_temp_free(t1); - tcg_temp_free(t0); gen_partset_reg(OS_BYTE, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } @@ -4051,10 +3864,7 @@ DISAS_INSN(rotate16_reg) tcg_gen_remu_i32(t1, t0, t1); X = rotate_x(reg, t1, left, 16); rotate_x_flags(reg, X, 16); - tcg_temp_free(X); } - tcg_temp_free(t1); - tcg_temp_free(t0); gen_partset_reg(OS_WORD, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } @@ -4068,15 +3878,13 @@ DISAS_INSN(rotate_mem) SRC_EA(env, src, OS_WORD, 0, &addr); - shift = tcg_const_i32(1); + shift = tcg_constant_i32(1); if (insn & 0x0200) { rotate(src, shift, left, 16); } else { TCGv X = rotate_x(src, shift, left, 16); rotate_x_flags(src, X, 16); - tcg_temp_free(X); } - tcg_temp_free(shift); DEST_EA(env, insn, OS_WORD, src, &addr); set_cc_op(s, CC_OP_FLAGS); } @@ -4117,7 +3925,6 @@ DISAS_INSN(bfext_reg) } else { tcg_gen_shr_i32(dst, tmp, shift); } - tcg_temp_free(shift); } else { /* Immediate width. */ if (ext & 0x800) { @@ -4146,7 +3953,6 @@ DISAS_INSN(bfext_reg) } } - tcg_temp_free(tmp); set_cc_op(s, CC_OP_LOGIC); } @@ -4166,12 +3972,12 @@ DISAS_INSN(bfext_mem) if (ext & 0x20) { len = DREG(ext, 0); } else { - len = tcg_const_i32(extract32(ext, 0, 5)); + len = tcg_constant_i32(extract32(ext, 0, 5)); } if (ext & 0x800) { ofs = DREG(ext, 6); } else { - ofs = tcg_const_i32(extract32(ext, 6, 5)); + ofs = tcg_constant_i32(extract32(ext, 6, 5)); } if (is_sign) { @@ -4181,16 +3987,8 @@ DISAS_INSN(bfext_mem) TCGv_i64 tmp = tcg_temp_new_i64(); gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len); tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp); - tcg_temp_free_i64(tmp); } set_cc_op(s, CC_OP_LOGIC); - - if (!(ext & 0x20)) { - tcg_temp_free(len); - } - if (!(ext & 0x800)) { - tcg_temp_free(ofs); - } } DISAS_INSN(bfop_reg) @@ -4199,14 +3997,8 @@ DISAS_INSN(bfop_reg) TCGv src = DREG(insn, 0); int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; int ofs = extract32(ext, 6, 5); /* big bit-endian */ - TCGv mask, tofs, tlen; - - tofs = NULL; - tlen = NULL; - if ((insn & 0x0f00) == 0x0d00) { /* bfffo */ - tofs = tcg_temp_new(); - tlen = tcg_temp_new(); - } + TCGv mask, tofs = NULL, tlen = NULL; + bool is_bfffo = (insn & 0x0f00) == 0x0d00; if ((ext & 0x820) == 0) { /* Immediate width and offset. */ @@ -4217,48 +4009,51 @@ DISAS_INSN(bfop_reg) tcg_gen_rotli_i32(QREG_CC_N, src, ofs); } tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski); - mask = tcg_const_i32(ror32(maski, ofs)); - if (tofs) { - tcg_gen_movi_i32(tofs, ofs); - tcg_gen_movi_i32(tlen, len); + + mask = tcg_constant_i32(ror32(maski, ofs)); + if (is_bfffo) { + tofs = tcg_constant_i32(ofs); + tlen = tcg_constant_i32(len); } } else { TCGv tmp = tcg_temp_new(); + + mask = tcg_temp_new(); if (ext & 0x20) { /* Variable width */ tcg_gen_subi_i32(tmp, DREG(ext, 0), 1); tcg_gen_andi_i32(tmp, tmp, 31); - mask = tcg_const_i32(0x7fffffffu); - tcg_gen_shr_i32(mask, mask, tmp); - if (tlen) { + tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp); + if (is_bfffo) { + tlen = tcg_temp_new(); tcg_gen_addi_i32(tlen, tmp, 1); } } else { /* Immediate width */ - mask = tcg_const_i32(0x7fffffffu >> (len - 1)); - if (tlen) { - tcg_gen_movi_i32(tlen, len); + tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1)); + if (is_bfffo) { + tlen = tcg_constant_i32(len); } } + if (ext & 0x800) { /* Variable offset */ tcg_gen_andi_i32(tmp, DREG(ext, 6), 31); tcg_gen_rotl_i32(QREG_CC_N, src, tmp); tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask); tcg_gen_rotr_i32(mask, mask, tmp); - if (tofs) { - tcg_gen_mov_i32(tofs, tmp); + if (is_bfffo) { + tofs = tmp; } } else { /* Immediate offset (and variable width) */ tcg_gen_rotli_i32(QREG_CC_N, src, ofs); tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask); tcg_gen_rotri_i32(mask, mask, ofs); - if (tofs) { - tcg_gen_movi_i32(tofs, ofs); + if (is_bfffo) { + tofs = tcg_constant_i32(ofs); } } - tcg_temp_free(tmp); } set_cc_op(s, CC_OP_LOGIC); @@ -4271,8 +4066,6 @@ DISAS_INSN(bfop_reg) break; case 0x0d00: /* bfffo */ gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen); - tcg_temp_free(tlen); - tcg_temp_free(tofs); break; case 0x0e00: /* bfset */ tcg_gen_orc_i32(src, src, mask); @@ -4283,7 +4076,6 @@ DISAS_INSN(bfop_reg) default: g_assert_not_reached(); } - tcg_temp_free(mask); } DISAS_INSN(bfop_mem) @@ -4301,12 +4093,12 @@ DISAS_INSN(bfop_mem) if (ext & 0x20) { len = DREG(ext, 0); } else { - len = tcg_const_i32(extract32(ext, 0, 5)); + len = tcg_constant_i32(extract32(ext, 0, 5)); } if (ext & 0x800) { ofs = DREG(ext, 6); } else { - ofs = tcg_const_i32(extract32(ext, 6, 5)); + ofs = tcg_constant_i32(extract32(ext, 6, 5)); } switch (insn & 0x0f00) { @@ -4320,7 +4112,6 @@ DISAS_INSN(bfop_mem) t64 = tcg_temp_new_i64(); gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len); tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64); - tcg_temp_free_i64(t64); break; case 0x0e00: /* bfset */ gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len); @@ -4332,13 +4123,6 @@ DISAS_INSN(bfop_mem) g_assert_not_reached(); } set_cc_op(s, CC_OP_LOGIC); - - if (!(ext & 0x20)) { - tcg_temp_free(len); - } - if (!(ext & 0x800)) { - tcg_temp_free(ofs); - } } DISAS_INSN(bfins_reg) @@ -4408,11 +4192,7 @@ DISAS_INSN(bfins_reg) tcg_gen_rotr_i32(tmp, tmp, rot); tcg_gen_and_i32(dst, dst, mask); tcg_gen_or_i32(dst, dst, tmp); - - tcg_temp_free(rot); - tcg_temp_free(mask); } - tcg_temp_free(tmp); } DISAS_INSN(bfins_mem) @@ -4430,23 +4210,16 @@ DISAS_INSN(bfins_mem) if (ext & 0x20) { len = DREG(ext, 0); } else { - len = tcg_const_i32(extract32(ext, 0, 5)); + len = tcg_constant_i32(extract32(ext, 0, 5)); } if (ext & 0x800) { ofs = DREG(ext, 6); } else { - ofs = tcg_const_i32(extract32(ext, 6, 5)); + ofs = tcg_constant_i32(extract32(ext, 6, 5)); } gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len); set_cc_op(s, CC_OP_LOGIC); - - if (!(ext & 0x20)) { - tcg_temp_free(len); - } - if (!(ext & 0x800)) { - tcg_temp_free(ofs); - } } DISAS_INSN(ff1) @@ -4515,9 +4288,7 @@ DISAS_INSN(chk2) tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize)); bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s)); - tcg_temp_free(addr1); bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s)); - tcg_temp_free(addr2); reg = tcg_temp_new(); if (ext & 0x8000) { @@ -4528,9 +4299,6 @@ DISAS_INSN(chk2) gen_flush_flags(s); gen_helper_chk2(cpu_env, reg, bound1, bound2); - tcg_temp_free(reg); - tcg_temp_free(bound1); - tcg_temp_free(bound2); } static void m68k_copy_line(TCGv dst, TCGv src, int index) @@ -4544,18 +4312,14 @@ static void m68k_copy_line(TCGv dst, TCGv src, int index) t1 = tcg_temp_new_i64(); tcg_gen_andi_i32(addr, src, ~15); - tcg_gen_qemu_ld64(t0, addr, index); + tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ); tcg_gen_addi_i32(addr, addr, 8); - tcg_gen_qemu_ld64(t1, addr, index); + tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ); tcg_gen_andi_i32(addr, dst, ~15); - tcg_gen_qemu_st64(t0, addr, index); + tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ); tcg_gen_addi_i32(addr, addr, 8); - tcg_gen_qemu_st64(t1, addr, index); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free(addr); + tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ); } DISAS_INSN(move16_reg) @@ -4576,7 +4340,6 @@ DISAS_INSN(move16_reg) tcg_gen_mov_i32(tmp, AREG(ext, 12)); tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16); tcg_gen_addi_i32(AREG(ext, 12), tmp, 16); - tcg_temp_free(tmp); } DISAS_INSN(move16_mem) @@ -4585,7 +4348,7 @@ DISAS_INSN(move16_mem) TCGv reg, addr; reg = AREG(insn, 0); - addr = tcg_const_i32(read_im32(env, s)); + addr = tcg_constant_i32(read_im32(env, s)); if ((insn >> 3) & 1) { /* MOVE16 (xxx).L, (Ay) */ @@ -4595,8 +4358,6 @@ DISAS_INSN(move16_mem) m68k_copy_line(addr, reg, index); } - tcg_temp_free(addr); - if (((insn >> 3) & 2) == 0) { /* (Ay)+ */ tcg_gen_addi_i32(reg, reg, 16); @@ -4681,7 +4442,6 @@ DISAS_INSN(moves) } else { gen_partset_reg(opsize, reg, tmp); } - tcg_temp_free(tmp); } switch (extract32(insn, 3, 3)) { case 3: /* Indirect postincrement. */ @@ -4778,14 +4538,14 @@ DISAS_INSN(cf_movec) } else { reg = DREG(ext, 12); } - gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg); + gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg); gen_exit_tb(s); } DISAS_INSN(m68k_movec) { uint16_t ext; - TCGv reg; + TCGv reg, creg; if (IS_USER(s)) { gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); @@ -4799,10 +4559,11 @@ DISAS_INSN(m68k_movec) } else { reg = DREG(ext, 12); } + creg = tcg_constant_i32(ext & 0xfff); if (insn & 1) { - gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg); + gen_helper_m68k_movec_to(cpu_env, creg, reg); } else { - gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff)); + gen_helper_m68k_movec_from(reg, cpu_env, creg); } gen_exit_tb(s); } @@ -4853,9 +4614,8 @@ DISAS_INSN(pflush) return; } - opmode = tcg_const_i32((insn >> 3) & 3); + opmode = tcg_constant_i32((insn >> 3) & 3); gen_helper_pflush(cpu_env, AREG(insn, 0), opmode); - tcg_temp_free(opmode); } DISAS_INSN(ptest) @@ -4866,9 +4626,8 @@ DISAS_INSN(ptest) gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); return; } - is_read = tcg_const_i32((insn >> 5) & 1); + is_read = tcg_constant_i32((insn >> 5) & 1); gen_helper_ptest(cpu_env, AREG(insn, 0), is_read); - tcg_temp_free(is_read); } #endif @@ -4914,7 +4673,6 @@ static void do_trapcc(DisasContext *s, DisasCompare *c) s->base.is_jmp = DISAS_NEXT; } } - free_cond(c); } DISAS_INSN(trapcc) @@ -4984,8 +4742,7 @@ static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg) tmp = tcg_temp_new(); gen_load_fcr(s, tmp, reg); - tcg_gen_qemu_st32(tmp, addr, index); - tcg_temp_free(tmp); + tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL); } static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg) @@ -4994,9 +4751,8 @@ static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg) TCGv tmp; tmp = tcg_temp_new(); - tcg_gen_qemu_ld32u(tmp, addr, index); + tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL); gen_store_fcr(s, tmp, reg); - tcg_temp_free(tmp); } @@ -5040,9 +4796,8 @@ static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s, gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); return; } - tmp = tcg_const_i32(read_im32(env, s)); + tmp = tcg_constant_i32(read_im32(env, s)); gen_store_fcr(s, tmp, mask); - tcg_temp_free(tmp); return; } break; @@ -5095,7 +4850,6 @@ static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s, tcg_gen_mov_i32(AREG(insn, 0), addr); } } - tcg_temp_free_i32(addr); } static void gen_op_fmovem(CPUM68KState *env, DisasContext *s, @@ -5156,7 +4910,6 @@ static void gen_op_fmovem(CPUM68KState *env, DisasContext *s, if ((insn & 070) == 030 || (insn & 070) == 040) { tcg_gen_mov_i32(AREG(insn, 0), tmp); } - tcg_temp_free(tmp); } /* @@ -5180,11 +4933,9 @@ DISAS_INSN(fpu) case 2: if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) { /* fmovecr */ - TCGv rom_offset = tcg_const_i32(opmode); + TCGv rom_offset = tcg_constant_i32(opmode); cpu_dest = gen_fp_ptr(REG(ext, 7)); gen_helper_fconst(cpu_env, cpu_dest, rom_offset); - tcg_temp_free_ptr(cpu_dest); - tcg_temp_free(rom_offset); return; } break; @@ -5196,7 +4947,6 @@ DISAS_INSN(fpu) gen_addr_fault(s); } gen_helper_ftst(cpu_env, cpu_src); - tcg_temp_free_ptr(cpu_src); return; case 4: /* fmove to control register. */ case 5: /* fmove from control register. */ @@ -5384,7 +5134,6 @@ DISAS_INSN(fpu) case 0x36: case 0x37: { TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0)); gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src); - tcg_temp_free_ptr(cpu_dest2); } break; case 0x38: /* fcmp */ @@ -5396,9 +5145,7 @@ DISAS_INSN(fpu) default: goto undef; } - tcg_temp_free_ptr(cpu_src); gen_helper_ftst(cpu_env, cpu_dest); - tcg_temp_free_ptr(cpu_dest); return; undef: /* FIXME: Is this right for offset addressing modes? */ @@ -5410,9 +5157,7 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) { TCGv fpsr; - c->g1 = 1; - c->v2 = tcg_const_i32(0); - c->g2 = 0; + c->v2 = tcg_constant_i32(0); /* TODO: Raise BSUN exception. */ fpsr = tcg_temp_new(); gen_load_fcr(s, fpsr, M68K_FPSR); @@ -5425,14 +5170,12 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 1: /* EQual Z */ case 17: /* Signaling EQual Z */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); c->tcond = TCG_COND_NE; break; case 2: /* Ordered Greater Than !(A || Z || N) */ case 18: /* Greater Than !(A || Z || N) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); c->tcond = TCG_COND_EQ; @@ -5440,7 +5183,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 3: /* Ordered Greater than or Equal Z || !(A || N) */ case 19: /* Greater than or Equal Z || !(A || N) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N); @@ -5451,7 +5193,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 4: /* Ordered Less Than !(!N || A || Z); */ case 20: /* Less Than !(!N || A || Z); */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N); tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z); c->tcond = TCG_COND_EQ; @@ -5459,7 +5200,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 5: /* Ordered Less than or Equal Z || (N && !A) */ case 21: /* Less than or Equal Z || (N && !A) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); tcg_gen_andc_i32(c->v1, fpsr, c->v1); @@ -5469,35 +5209,30 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 6: /* Ordered Greater or Less than !(A || Z) */ case 22: /* Greater or Less than !(A || Z) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); c->tcond = TCG_COND_EQ; break; case 7: /* Ordered !A */ case 23: /* Greater, Less or Equal !A */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); c->tcond = TCG_COND_EQ; break; case 8: /* Unordered A */ case 24: /* Not Greater, Less or Equal A */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); c->tcond = TCG_COND_NE; break; case 9: /* Unordered or Equal A || Z */ case 25: /* Not Greater or Less then A || Z */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); c->tcond = TCG_COND_NE; break; case 10: /* Unordered or Greater Than A || !(N || Z)) */ case 26: /* Not Less or Equal A || !(N || Z)) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N); @@ -5508,7 +5243,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 11: /* Unordered or Greater or Equal A || Z || !N */ case 27: /* Not Less Than A || Z || !N */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N); c->tcond = TCG_COND_NE; @@ -5516,7 +5250,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 12: /* Unordered or Less Than A || (N && !Z) */ case 28: /* Not Greater than or Equal A || (N && !Z) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); tcg_gen_andc_i32(c->v1, fpsr, c->v1); @@ -5526,14 +5259,12 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 13: /* Unordered or Less or Equal A || Z || N */ case 29: /* Not Greater Than A || Z || N */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); c->tcond = TCG_COND_NE; break; case 14: /* Not Equal !Z */ case 30: /* Signaling Not Equal !Z */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); c->tcond = TCG_COND_EQ; break; @@ -5543,7 +5274,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) c->tcond = TCG_COND_ALWAYS; break; } - tcg_temp_free(fpsr); } static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1) @@ -5553,7 +5283,6 @@ static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1) gen_fcc_cond(&c, s, cond); update_cc_op(s); tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1); - free_cond(&c); } DISAS_INSN(fbcc) @@ -5589,11 +5318,9 @@ DISAS_INSN(fscc) tmp = tcg_temp_new(); tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2); - free_cond(&c); tcg_gen_neg_i32(tmp, tmp); DEST_EA(env, insn, OS_BYTE, tmp, NULL); - tcg_temp_free(tmp); } DISAS_INSN(ftrapcc) @@ -5650,9 +5377,8 @@ DISAS_INSN(fsave) if (m68k_feature(s->env, M68K_FEATURE_M68040)) { /* always write IDLE */ - TCGv idle = tcg_const_i32(0x41000000); + TCGv idle = tcg_constant_i32(0x41000000); DEST_EA(env, insn, OS_LONG, idle, NULL); - tcg_temp_free(idle); } else { disas_undef(env, s, insn); } @@ -5781,7 +5507,7 @@ DISAS_INSN(mac) /* Skip the accumulate if the value is already saturated. */ l1 = gen_new_label(); tmp = tcg_temp_new(); - gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc)); + gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc)); gen_op_jmp_nz32(tmp, l1); } #endif @@ -5792,11 +5518,11 @@ DISAS_INSN(mac) tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp); if (s->env->macsr & MACSR_FI) - gen_helper_macsatf(cpu_env, tcg_const_i32(acc)); + gen_helper_macsatf(cpu_env, tcg_constant_i32(acc)); else if (s->env->macsr & MACSR_SU) - gen_helper_macsats(cpu_env, tcg_const_i32(acc)); + gen_helper_macsats(cpu_env, tcg_constant_i32(acc)); else - gen_helper_macsatu(cpu_env, tcg_const_i32(acc)); + gen_helper_macsatu(cpu_env, tcg_constant_i32(acc)); #if 0 /* Disabled because conditional branches clobber temporary vars. */ @@ -5815,7 +5541,7 @@ DISAS_INSN(mac) /* Skip the accumulate if the value is already saturated. */ l1 = gen_new_label(); tmp = tcg_temp_new(); - gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc)); + gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc)); gen_op_jmp_nz32(tmp, l1); } #endif @@ -5824,18 +5550,18 @@ DISAS_INSN(mac) else tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp); if (s->env->macsr & MACSR_FI) - gen_helper_macsatf(cpu_env, tcg_const_i32(acc)); + gen_helper_macsatf(cpu_env, tcg_constant_i32(acc)); else if (s->env->macsr & MACSR_SU) - gen_helper_macsats(cpu_env, tcg_const_i32(acc)); + gen_helper_macsats(cpu_env, tcg_constant_i32(acc)); else - gen_helper_macsatu(cpu_env, tcg_const_i32(acc)); + gen_helper_macsatu(cpu_env, tcg_constant_i32(acc)); #if 0 /* Disabled because conditional branches clobber temporary vars. */ if (l1 != -1) gen_set_label(l1); #endif } - gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc)); + gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc)); if (insn & 0x30) { TCGv rw; @@ -5852,7 +5578,6 @@ DISAS_INSN(mac) case 4: /* Pre-decrement. */ tcg_gen_mov_i32(AREG(insn, 0), addr); } - tcg_temp_free(loadval); } } @@ -5886,8 +5611,8 @@ DISAS_INSN(move_mac) int src; TCGv dest; src = insn & 3; - dest = tcg_const_i32((insn >> 9) & 3); - gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src)); + dest = tcg_constant_i32((insn >> 9) & 3); + gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src)); gen_mac_clear_flags(); gen_helper_mac_set_flags(cpu_env, dest); } @@ -5912,7 +5637,7 @@ DISAS_INSN(from_mext) TCGv reg; TCGv acc; reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); - acc = tcg_const_i32((insn & 0x400) ? 2 : 0); + acc = tcg_constant_i32((insn & 0x400) ? 2 : 0); if (s->env->macsr & MACSR_FI) gen_helper_get_mac_extf(reg, cpu_env, acc); else @@ -5926,7 +5651,6 @@ DISAS_INSN(macsr_to_ccr) /* Note that X and C are always cleared. */ tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V); gen_helper_set_ccr(cpu_env, tmp); - tcg_temp_free(tmp); set_cc_op(s, CC_OP_FLAGS); } @@ -5948,7 +5672,7 @@ DISAS_INSN(to_mac) } tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum)); gen_mac_clear_flags(); - gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum)); + gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum)); } DISAS_INSN(to_macsr) @@ -5971,7 +5695,7 @@ DISAS_INSN(to_mext) TCGv val; TCGv acc; SRC_EA(env, val, OS_LONG, 0, NULL); - acc = tcg_const_i32((insn & 0x400) ? 2 : 0); + acc = tcg_constant_i32((insn & 0x400) ? 2 : 0); if (s->env->macsr & MACSR_FI) gen_helper_set_mac_extf(cpu_env, val, acc); else if (s->env->macsr & MACSR_SU) @@ -6287,7 +6011,6 @@ static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->cc_op_synced = 1; dc->done_mac = 0; dc->writeback_mask = 0; - init_release_array(dc); dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS); /* If architectural single step active, limit to 1 */ @@ -6314,7 +6037,6 @@ static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) opcode_table[insn](env, dc, insn); do_writebacks(dc); - do_release(dc); dc->pc_prev = dc->base.pc_next; dc->base.pc_next = dc->pc; @@ -6393,7 +6115,7 @@ static const TranslatorOps m68k_tr_ops = { .disas_log = m68k_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/microblaze/cpu-param.h b/target/microblaze/cpu-param.h index 5e54ea0108..9770b0eb52 100644 --- a/target/microblaze/cpu-param.h +++ b/target/microblaze/cpu-param.h @@ -28,6 +28,5 @@ /* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */ #define TARGET_PAGE_BITS 12 -#define NB_MMU_MODES 3 #endif diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index a2d2f5c340..03c2c4db1f 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -30,6 +30,7 @@ #include "exec/exec-all.h" #include "exec/gdbstub.h" #include "fpu/softfloat-helpers.h" +#include "tcg/tcg.h" static const struct { const char *name; @@ -97,7 +98,8 @@ static void mb_cpu_synchronize_from_tb(CPUState *cs, { MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - cpu->env.pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + cpu->env.pc = tb->pc; cpu->env.iflags = tb->flags & IFLAGS_TB_MASK; } diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h index e541fbb0b3..88324d0bc1 100644 --- a/target/microblaze/cpu.h +++ b/target/microblaze/cpu.h @@ -358,13 +358,13 @@ struct ArchCPU { #ifndef CONFIG_USER_ONLY void mb_cpu_do_interrupt(CPUState *cs); bool mb_cpu_exec_interrupt(CPUState *cs, int int_req); +hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, + MemTxAttrs *attrs); #endif /* !CONFIG_USER_ONLY */ G_NORETURN void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); void mb_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, - MemTxAttrs *attrs); int mb_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int mb_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); int mb_cpu_gdb_read_stack_protect(CPUArchState *cpu, GByteArray *buf, int reg); @@ -394,7 +394,7 @@ void mb_tcg_init(void); #define MMU_NOMMU_IDX 0 #define MMU_KERNEL_IDX 1 #define MMU_USER_IDX 2 -/* See NB_MMU_MODES further up the file. */ +/* See NB_MMU_MODES in cpu-defs.h. */ #include "exec/cpu-all.h" diff --git a/target/microblaze/gdbstub.c b/target/microblaze/gdbstub.c index 8143fcae88..29ac6e9c0f 100644 --- a/target/microblaze/gdbstub.c +++ b/target/microblaze/gdbstub.c @@ -19,7 +19,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" /* * GDB expects SREGs in the following order: diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index 974f21eb31..ee0d7b81ad 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -101,10 +101,7 @@ static void t_sync_flags(DisasContext *dc) static void gen_raise_exception(DisasContext *dc, uint32_t index) { - TCGv_i32 tmp = tcg_const_i32(index); - - gen_helper_raise_exception(cpu_env, tmp); - tcg_temp_free_i32(tmp); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(index)); dc->base.is_jmp = DISAS_NORETURN; } @@ -117,9 +114,8 @@ static void gen_raise_exception_sync(DisasContext *dc, uint32_t index) static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec) { - TCGv_i32 tmp = tcg_const_i32(esr_ec); + TCGv_i32 tmp = tcg_constant_i32(esr_ec); tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr)); - tcg_temp_free_i32(tmp); gen_raise_exception_sync(dc, EXCP_HW_EXCP); } @@ -262,11 +258,9 @@ static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects, rd = reg_for_write(dc, arg->rd); ra = reg_for_read(dc, arg->ra); - imm = tcg_const_i32(arg->imm); + imm = tcg_constant_i32(arg->imm); fn(rd, ra, imm); - - tcg_temp_free_i32(imm); return true; } @@ -309,24 +303,19 @@ static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects, /* No input carry, but output carry. */ static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero); - - tcg_temp_free_i32(zero); } /* Input and output carry. */ static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero); tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(zero); } /* Input carry, but no output carry. */ @@ -361,7 +350,6 @@ static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, inb, 31); tcg_gen_sar_i32(out, ina, tmp); - tcg_temp_free_i32(tmp); } static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) @@ -369,7 +357,6 @@ static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, inb, 31); tcg_gen_shr_i32(out, ina, tmp); - tcg_temp_free_i32(tmp); } static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) @@ -377,7 +364,6 @@ static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, inb, 31); tcg_gen_shl_i32(out, ina, tmp); - tcg_temp_free_i32(tmp); } static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm) @@ -436,7 +422,6 @@ static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina); tcg_gen_sub_i32(out, inb, ina); tcg_gen_deposit_i32(out, out, lt, 31, 1); - tcg_temp_free_i32(lt); } static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) @@ -446,7 +431,6 @@ static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina); tcg_gen_sub_i32(out, inb, ina); tcg_gen_deposit_i32(out, out, lt, 31, 1); - tcg_temp_free_i32(lt); } DO_TYPEA(cmp, false, gen_cmp) @@ -513,21 +497,18 @@ static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_muls2_i32(tmp, out, ina, inb); - tcg_temp_free_i32(tmp); } static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mulu2_i32(tmp, out, ina, inb); - tcg_temp_free_i32(tmp); } static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mulsu2_i32(tmp, out, ina, inb); - tcg_temp_free_i32(tmp); } DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32) @@ -563,15 +544,12 @@ static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) /* Input and output carry. */ static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_not_i32(tmp, ina); tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero); tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero); - - tcg_temp_free_i32(zero); - tcg_temp_free_i32(tmp); } /* No input or output carry. */ @@ -588,8 +566,6 @@ static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) tcg_gen_not_i32(nota, ina); tcg_gen_add_i32(out, inb, nota); tcg_gen_add_i32(out, out, cpu_msr_c); - - tcg_temp_free_i32(nota); } DO_TYPEA(rsub, true, gen_rsub) @@ -618,8 +594,6 @@ static void gen_src(TCGv_i32 out, TCGv_i32 ina) tcg_gen_mov_i32(tmp, cpu_msr_c); tcg_gen_andi_i32(cpu_msr_c, ina, 1); tcg_gen_extract2_i32(out, ina, tmp, 1); - - tcg_temp_free_i32(tmp); } static void gen_srl(TCGv_i32 out, TCGv_i32 ina) @@ -659,7 +633,6 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]); tcg_gen_extu_i32_tl(ret, tmp); - tcg_temp_free_i32(tmp); } else if (ra) { tcg_gen_extu_i32_tl(ret, cpu_R[ra]); } else if (rb) { @@ -683,7 +656,6 @@ static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_addi_i32(tmp, cpu_R[ra], imm); tcg_gen_extu_i32_tl(ret, tmp); - tcg_temp_free_i32(tmp); } else { tcg_gen_movi_tl(ret, (uint32_t)imm); } @@ -772,8 +744,6 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, #endif tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop); - - tcg_temp_free(addr); return true; } @@ -879,7 +849,6 @@ static bool trans_lwx(DisasContext *dc, arg_typea *arg) tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL); tcg_gen_mov_tl(cpu_res_addr, addr); - tcg_temp_free(addr); if (arg->rd) { tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val); @@ -925,8 +894,6 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, #endif tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop); - - tcg_temp_free(addr); return true; } @@ -1040,7 +1007,6 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg) * In either case, addr is no longer needed. */ tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail); - tcg_temp_free(addr); /* * Compare the value loaded during lwx with current contents of @@ -1053,7 +1019,6 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg) dc->mem_index, MO_TEUL); tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail); - tcg_temp_free_i32(tval); /* Success */ tcg_gen_movi_i32(cpu_msr_c, 0); @@ -1150,13 +1115,11 @@ static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm, } /* Compute the final destination into btarget. */ - zero = tcg_const_i32(0); - next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4); + zero = tcg_constant_i32(0); + next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4); tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget, reg_for_read(dc, ra), zero, cpu_btarget, next); - tcg_temp_free_i32(zero); - tcg_temp_free_i32(next); return true; } @@ -1261,8 +1224,6 @@ static bool trans_mbar(DisasContext *dc, arg_mbar *arg) /* Sleep. */ if (mbar_imm & 16) { - TCGv_i32 tmp_1; - if (trap_userspace(dc, true)) { /* Sleep is a privileged instruction. */ return true; @@ -1270,11 +1231,9 @@ static bool trans_mbar(DisasContext *dc, arg_mbar *arg) t_sync_flags(dc); - tmp_1 = tcg_const_i32(1); - tcg_gen_st_i32(tmp_1, cpu_env, + tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, -offsetof(MicroBlazeCPU, env) +offsetof(CPUState, halted)); - tcg_temp_free_i32(tmp_1); tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4); @@ -1345,7 +1304,6 @@ static void msr_read(DisasContext *dc, TCGv_i32 d) t = tcg_temp_new_i32(); tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC); tcg_gen_or_i32(d, cpu_msr, t); - tcg_temp_free_i32(t); } static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set) @@ -1438,12 +1396,10 @@ static bool trans_mts(DisasContext *dc, arg_mts *arg) case 0x1004: /* TLBHI */ case 0x1005: /* TLBSX */ { - TCGv_i32 tmp_ext = tcg_const_i32(arg->e); - TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7); + TCGv_i32 tmp_ext = tcg_constant_i32(arg->e); + TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7); gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src); - tcg_temp_free_i32(tmp_reg); - tcg_temp_free_i32(tmp_ext); } break; @@ -1467,7 +1423,6 @@ static bool trans_mfs(DisasContext *dc, arg_mfs *arg) TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear)); tcg_gen_extrh_i64_i32(dest, t64); - tcg_temp_free_i64(t64); } return true; #ifndef CONFIG_USER_ONLY @@ -1498,7 +1453,6 @@ static bool trans_mfs(DisasContext *dc, arg_mfs *arg) TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear)); tcg_gen_extrl_i64_i32(dest, t64); - tcg_temp_free_i64(t64); } break; case SR_ESR: @@ -1528,12 +1482,10 @@ static bool trans_mfs(DisasContext *dc, arg_mfs *arg) case 0x1004: /* TLBHI */ case 0x1005: /* TLBSX */ { - TCGv_i32 tmp_ext = tcg_const_i32(arg->e); - TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7); + TCGv_i32 tmp_ext = tcg_constant_i32(arg->e); + TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7); gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg); - tcg_temp_free_i32(tmp_reg); - tcg_temp_free_i32(tmp_ext); } break; #endif @@ -1559,8 +1511,6 @@ static void do_rti(DisasContext *dc) tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM); tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM)); tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); - - tcg_temp_free_i32(tmp); } static void do_rtb(DisasContext *dc) @@ -1571,8 +1521,6 @@ static void do_rtb(DisasContext *dc) tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP)); tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM)); tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); - - tcg_temp_free_i32(tmp); } static void do_rte(DisasContext *dc) @@ -1584,8 +1532,6 @@ static void do_rte(DisasContext *dc) tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM)); tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP)); tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); - - tcg_temp_free_i32(tmp); } /* Insns connected to FSL or AXI stream attached devices. */ @@ -1604,10 +1550,8 @@ static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl) tcg_gen_movi_i32(t_id, imm); } - t_ctrl = tcg_const_i32(ctrl); + t_ctrl = tcg_constant_i32(ctrl); gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl); - tcg_temp_free_i32(t_id); - tcg_temp_free_i32(t_ctrl); return true; } @@ -1636,10 +1580,8 @@ static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl) tcg_gen_movi_i32(t_id, imm); } - t_ctrl = tcg_const_i32(ctrl); + t_ctrl = tcg_constant_i32(ctrl); gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra)); - tcg_temp_free_i32(t_id); - tcg_temp_free_i32(t_ctrl); return true; } @@ -1704,7 +1646,6 @@ static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs) } if (dc->r0) { - tcg_temp_free_i32(dc->r0); dc->r0 = NULL; dc->r0_set = false; } @@ -1849,7 +1790,7 @@ static const TranslatorOps mb_tr_ops = { .disas_log = mb_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc index 480e60aeec..d45f245a67 100644 --- a/target/mips/cpu-defs.c.inc +++ b/target/mips/cpu-defs.c.inc @@ -332,7 +332,11 @@ const mips_def_t mips_defs[] = (0x1 << CP0C0_AR) | (MMU_TYPE_FMT << CP0C0_MT), .CP0_Config1 = MIPS_CONFIG1, .CP0_Config2 = MIPS_CONFIG2, - .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (1 << CP0C3_VInt), + .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (1 << CP0C3_VInt) | + (1 << CP0C3_M), + .CP0_Config4 = MIPS_CONFIG4 | (1 << CP0C4_M), + .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_NFExists), + .CP0_Config7 = 1 << CP0C7_WII, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, @@ -353,7 +357,11 @@ const mips_def_t mips_defs[] = (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA), .CP0_Config2 = MIPS_CONFIG2, - .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (0 << CP0C3_VInt), + .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (0 << CP0C3_VInt) | + (1 << CP0C3_M), + .CP0_Config4 = MIPS_CONFIG4 | (1 << CP0C4_M), + .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_NFExists), + .CP0_Config7 = 1 << CP0C7_WII, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 4, .SYNCI_Step = 32, @@ -392,6 +400,7 @@ const mips_def_t mips_defs[] = .CP0_Config5_rw_bitmask = (1 << CP0C5_K) | (1 << CP0C5_CV) | (1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) | (1 << CP0C5_FRE) | (1 << CP0C5_UFR), + .CP0_Config7 = 1 << CP0C7_WII, .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, diff --git a/target/mips/cpu-param.h b/target/mips/cpu-param.h index f4c76994ea..594c91a156 100644 --- a/target/mips/cpu-param.h +++ b/target/mips/cpu-param.h @@ -29,6 +29,5 @@ #define TARGET_PAGE_BITS_VARY #define TARGET_PAGE_BITS_MIN 12 #endif -#define NB_MMU_MODES 4 #endif diff --git a/target/mips/cpu.c b/target/mips/cpu.c index 05caf54999..01e0fbe10d 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "qemu/qemu-print.h" +#include "qemu/error-report.h" #include "qapi/error.h" #include "cpu.h" #include "internal.h" @@ -143,11 +144,13 @@ static bool mips_cpu_has_work(CPUState *cs) /* * Prior to MIPS Release 6 it is implementation dependent if non-enabled * interrupts wake-up the CPU, however most of the implementations only - * check for interrupts that can be taken. + * check for interrupts that can be taken. For pre-release 6 CPUs, + * check for CP0 Config7 'Wait IE ignore' bit. */ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && cpu_mips_hw_interrupts_pending(env)) { if (cpu_mips_hw_interrupts_enabled(env) || + (env->CP0_Config7 & (1 << CP0C7_WII)) || (env->insn_flags & ISA_MIPS_R6)) { has_work = true; } diff --git a/target/mips/cpu.h b/target/mips/cpu.h index caf2b06911..142c55af47 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -980,6 +980,7 @@ typedef struct CPUArchState { #define CP0C6_DATAPREF 0 int32_t CP0_Config7; int64_t CP0_Config7_rw_bitmask; +#define CP0C7_WII 31 #define CP0C7_NAPCGEN 2 #define CP0C7_UNIMUEN 1 #define CP0C7_VFPUCGEN 0 diff --git a/target/mips/gdbstub.c b/target/mips/gdbstub.c index f1c2a2cf6d..62d7b72407 100644 --- a/target/mips/gdbstub.c +++ b/target/mips/gdbstub.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "fpu_helper.h" int mips_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) diff --git a/target/mips/sysemu/physaddr.c b/target/mips/sysemu/physaddr.c index 2970df8a09..05990aa5bb 100644 --- a/target/mips/sysemu/physaddr.c +++ b/target/mips/sysemu/physaddr.c @@ -70,8 +70,7 @@ static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx) /* is this AM mapped in current execution mode */ return ((adetlb_mask << am) < 0); default: - assert(0); - return TLBRET_BADADDR; + g_assert_not_reached(); }; } diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c index 96e61170e6..da49a93912 100644 --- a/target/mips/tcg/exception.c +++ b/target/mips/tcg/exception.c @@ -82,7 +82,8 @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; - env->active_tc.PC = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + env->active_tc.PC = tb->pc; env->hflags &= ~MIPS_HFLAG_BMASK; env->hflags |= tb->flags & MIPS_HFLAG_BMASK; } diff --git a/target/mips/tcg/ldst_helper.c b/target/mips/tcg/ldst_helper.c index d0bd0267b2..c1a8380e34 100644 --- a/target/mips/tcg/ldst_helper.c +++ b/target/mips/tcg/ldst_helper.c @@ -248,14 +248,14 @@ void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, target_ulong i; for (i = 0; i < base_reglist; i++) { - cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], + cpu_stl_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx, GETPC()); addr += 4; } } if (do_r31) { - cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); + cpu_stl_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); } } diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc index 632895cc9e..211d102cf6 100644 --- a/target/mips/tcg/micromips_translate.c.inc +++ b/target/mips/tcg/micromips_translate.c.inc @@ -704,8 +704,8 @@ static void gen_ldst_multiple(DisasContext *ctx, uint32_t opc, int reglist, gen_base_offset_addr(ctx, t0, base, offset); - t1 = tcg_const_tl(reglist); - t2 = tcg_const_i32(ctx->mem_idx); + t1 = tcg_constant_tl(reglist); + t2 = tcg_constant_i32(ctx->mem_idx); save_cpu_state(ctx, 1); switch (opc) { @@ -724,9 +724,6 @@ static void gen_ldst_multiple(DisasContext *ctx, uint32_t opc, int reglist, break; #endif } - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free_i32(t2); } @@ -980,20 +977,24 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, gen_reserved_instruction(ctx); return; } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | + ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd); tcg_gen_movi_tl(t1, 4); gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | + ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd + 1); break; case SWP: gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); tcg_gen_movi_tl(t1, 4); gen_op_addr_add(ctx, t0, t0, t1); gen_load_gpr(t1, rd + 1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); break; #ifdef TARGET_MIPS64 case LDP: @@ -1001,25 +1002,27 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, gen_reserved_instruction(ctx); return; } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd); tcg_gen_movi_tl(t1, 8); gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd + 1); break; case SDP: gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + ctx->default_tcg_memop_mask); tcg_gen_movi_tl(t1, 8); gen_op_addr_add(ctx, t0, t0, t1); gen_load_gpr(t1, rd + 1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + ctx->default_tcg_memop_mask); break; #endif } - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) @@ -1067,7 +1070,6 @@ static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) gen_load_gpr(t0, rt); gen_mtc0(ctx, t0, rs, (ctx->opcode >> 11) & 0x7); - tcg_temp_free(t0); } break; #endif @@ -1276,7 +1278,6 @@ static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) * mode. */ ctx->base.is_jmp = DISAS_STOP; - tcg_temp_free(t0); } break; case EI: @@ -1293,7 +1294,6 @@ static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) */ gen_save_pc(ctx->base.pc_next + 4); ctx->base.is_jmp = DISAS_EXIT; - tcg_temp_free(t0); } break; default: diff --git a/target/mips/tcg/mips16e_translate.c.inc b/target/mips/tcg/mips16e_translate.c.inc index 918b15d55c..5cffe0e412 100644 --- a/target/mips/tcg/mips16e_translate.c.inc +++ b/target/mips/tcg/mips16e_translate.c.inc @@ -172,22 +172,26 @@ static void gen_mips16_save(DisasContext *ctx, case 4: gen_base_offset_addr(ctx, t0, 29, 12); gen_load_gpr(t1, 7); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); /* Fall through */ case 3: gen_base_offset_addr(ctx, t0, 29, 8); gen_load_gpr(t1, 6); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); /* Fall through */ case 2: gen_base_offset_addr(ctx, t0, 29, 4); gen_load_gpr(t1, 5); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); /* Fall through */ case 1: gen_base_offset_addr(ctx, t0, 29, 0); gen_load_gpr(t1, 4); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); } gen_load_gpr(t0, 29); @@ -196,7 +200,8 @@ static void gen_mips16_save(DisasContext *ctx, tcg_gen_movi_tl(t2, -4); \ gen_op_addr_add(ctx, t0, t0, t2); \ gen_load_gpr(t1, reg); \ - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); \ + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | \ + ctx->default_tcg_memop_mask); \ } while (0) if (do_ra) { @@ -280,9 +285,6 @@ static void gen_mips16_save(DisasContext *ctx, tcg_gen_movi_tl(t2, -framesize); gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } static void gen_mips16_restore(DisasContext *ctx, @@ -301,7 +303,8 @@ static void gen_mips16_restore(DisasContext *ctx, #define DECR_AND_LOAD(reg) do { \ tcg_gen_movi_tl(t2, -4); \ gen_op_addr_add(ctx, t0, t0, t2); \ - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); \ + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | \ + ctx->default_tcg_memop_mask); \ gen_store_gpr(t1, reg); \ } while (0) @@ -386,9 +389,6 @@ static void gen_mips16_restore(DisasContext *ctx, tcg_gen_movi_tl(t2, framesize); gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } #if defined(TARGET_MIPS64) diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c index 736283e2af..29b31d70fe 100644 --- a/target/mips/tcg/msa_helper.c +++ b/target/mips/tcg/msa_helper.c @@ -5333,7 +5333,7 @@ void helper_msa_shf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } msa_move_v(pwd, pwx); } @@ -5368,7 +5368,7 @@ void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \ } \ break; \ default: \ - assert(0); \ + g_assert_not_reached(); \ } \ } @@ -5413,7 +5413,7 @@ void helper_msa_ldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } } @@ -5461,7 +5461,7 @@ void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \ } \ break; \ default: \ - assert(0); \ + g_assert_not_reached(); \ } \ } @@ -5511,7 +5511,7 @@ void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \ } \ break; \ default: \ - assert(0); \ + g_assert_not_reached(); \ } \ } @@ -5557,7 +5557,7 @@ static inline void msa_sld_df(uint32_t df, wr_t *pwd, } break; default: - assert(0); + g_assert_not_reached(); } } @@ -5632,7 +5632,7 @@ void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \ pwd->d[1] = msa_ ## func ## _df(df, pws->d[1], pwt->d[1]); \ break; \ default: \ - assert(0); \ + g_assert_not_reached(); \ } \ } @@ -5771,7 +5771,7 @@ void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \ pwd->d[1] = msa_ ## func ## _df(df, pwd->d[1], pws->d[1], pwt->d[1]); \ break; \ default: \ - assert(0); \ + g_assert_not_reached(); \ } \ } @@ -5811,7 +5811,7 @@ static inline void msa_splat_df(uint32_t df, wr_t *pwd, } break; default: - assert(0); + g_assert_not_reached(); } } @@ -5869,7 +5869,7 @@ void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ MSA_LOOP_D; \ break; \ default: \ - assert(0); \ + g_assert_not_reached(); \ } \ msa_move_v(pwd, pwx); \ } @@ -6090,7 +6090,7 @@ void helper_msa_insve_df(CPUMIPSState *env, uint32_t df, uint32_t wd, pwd->d[n] = (int64_t)pws->d[0]; break; default: - assert(0); + g_assert_not_reached(); } } @@ -6150,7 +6150,7 @@ void helper_msa_fill_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } } @@ -6565,7 +6565,7 @@ static inline void compare_af(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -6596,7 +6596,7 @@ static inline void compare_un(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -6625,7 +6625,7 @@ static inline void compare_eq(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -6654,7 +6654,7 @@ static inline void compare_ueq(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -6683,7 +6683,7 @@ static inline void compare_lt(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -6712,7 +6712,7 @@ static inline void compare_ult(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -6741,7 +6741,7 @@ static inline void compare_le(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -6770,7 +6770,7 @@ static inline void compare_ule(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -6799,7 +6799,7 @@ static inline void compare_or(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -6828,7 +6828,7 @@ static inline void compare_une(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -6857,7 +6857,7 @@ static inline void compare_ne(CPUMIPSState *env, wr_t *pwd, wr_t *pws, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, retaddr); @@ -7107,7 +7107,7 @@ void helper_msa_fadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7137,7 +7137,7 @@ void helper_msa_fsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7167,7 +7167,7 @@ void helper_msa_fmul_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7198,7 +7198,7 @@ void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7245,7 +7245,7 @@ void helper_msa_fmadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7280,7 +7280,7 @@ void helper_msa_fmsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7317,7 +7317,7 @@ void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7371,7 +7371,7 @@ void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7417,7 +7417,7 @@ void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7526,7 +7526,7 @@ void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } else { - assert(0); + g_assert_not_reached(); } @@ -7555,7 +7555,7 @@ void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, FMAXMIN_A(min, max, pwx->d[0], pws->d[0], pwt->d[0], 64, status); FMAXMIN_A(min, max, pwx->d[1], pws->d[1], pwt->d[1], 64, status); } else { - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7628,7 +7628,7 @@ void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } else { - assert(0); + g_assert_not_reached(); } @@ -7657,7 +7657,7 @@ void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, FMAXMIN_A(max, min, pwx->d[0], pws->d[0], pwt->d[0], 64, status); FMAXMIN_A(max, min, pwx->d[1], pws->d[1], pwt->d[1], 64, status); } else { - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7681,7 +7681,7 @@ void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df, pwd->d[0] = float_class_d(pws->d[0], status); pwd->d[1] = float_class_d(pws->d[1], status); } else { - assert(0); + g_assert_not_reached(); } } @@ -7723,7 +7723,7 @@ void helper_msa_ftrunc_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7753,7 +7753,7 @@ void helper_msa_ftrunc_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7783,7 +7783,7 @@ void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7832,7 +7832,7 @@ void helper_msa_frsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7862,7 +7862,7 @@ void helper_msa_frcp_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7892,7 +7892,7 @@ void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7946,7 +7946,7 @@ void helper_msa_flog2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -7983,7 +7983,7 @@ void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -8019,7 +8019,7 @@ void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -8046,7 +8046,7 @@ void helper_msa_ffql_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } msa_move_v(pwd, pwx); @@ -8072,7 +8072,7 @@ void helper_msa_ffqr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } msa_move_v(pwd, pwx); @@ -8100,7 +8100,7 @@ void helper_msa_ftint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -8130,7 +8130,7 @@ void helper_msa_ftint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -8166,7 +8166,7 @@ void helper_msa_ffint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); @@ -8196,7 +8196,7 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, } break; default: - assert(0); + g_assert_not_reached(); } check_msacsr_cause(env, GETPC()); diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c index 1bcdbb1121..220cd3b048 100644 --- a/target/mips/tcg/msa_translate.c +++ b/target/mips/tcg/msa_translate.c @@ -217,8 +217,6 @@ static void gen_check_zero_element(TCGv tresult, uint8_t df, uint8_t wt, /* if some bit is non-zero then some element is zero */ tcg_gen_setcondi_i64(cond, t0, t0, 0); tcg_gen_trunc_i64_tl(tresult, t0); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int sa, TCGCond cond) @@ -237,7 +235,6 @@ static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int sa, TCGCond cond) tcg_gen_or_i64(t0, msa_wr_d[wt << 1], msa_wr_d[(wt << 1) + 1]); tcg_gen_setcondi_i64(cond, t0, t0, 0); tcg_gen_trunc_i64_tl(bcond, t0); - tcg_temp_free_i64(t0); ctx->btarget = ctx->base.pc_next + (sa << 2) + 4; @@ -545,8 +542,6 @@ static bool trans_CTCMSA(DisasContext *ctx, arg_msa_elm *a) gen_load_gpr(telm, a->ws); gen_helper_msa_ctcmsa(cpu_env, telm, tcg_constant_i32(a->wd)); - tcg_temp_free(telm); - return true; } @@ -563,8 +558,6 @@ static bool trans_CFCMSA(DisasContext *ctx, arg_msa_elm *a) gen_helper_msa_cfcmsa(telm, cpu_env, tcg_constant_i32(a->ws)); gen_store_gpr(telm, a->wd); - tcg_temp_free(telm); - return true; } @@ -782,8 +775,6 @@ static bool trans_msa_ldst(DisasContext *ctx, arg_msa_i *a, gen_base_offset_addr(ctx, taddr, a->ws, a->sa << a->df); gen_msa_ldst(cpu_env, tcg_constant_i32(a->wd), taddr); - tcg_temp_free(taddr); - return true; } diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c index f52244e1b2..be038b5f07 100644 --- a/target/mips/tcg/mxu_translate.c +++ b/target/mips/tcg/mxu_translate.c @@ -513,8 +513,6 @@ static void gen_mxu_s32i2m(DisasContext *ctx) } else if (XRa == 16) { gen_store_mxu_cr(t0); } - - tcg_temp_free(t0); } /* @@ -537,8 +535,6 @@ static void gen_mxu_s32m2i(DisasContext *ctx) } gen_store_gpr(t0, Rb); - - tcg_temp_free(t0); } /* @@ -613,9 +609,6 @@ static void gen_mxu_s8ldd(DisasContext *ctx) } gen_store_mxu_gpr(t0, XRa); - - tcg_temp_free(t0); - tcg_temp_free(t1); } /* @@ -664,11 +657,6 @@ static void gen_mxu_d16mul(DisasContext *ctx) } gen_store_mxu_gpr(t3, XRa); gen_store_mxu_gpr(t2, XRd); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(t3); } /* @@ -741,11 +729,6 @@ static void gen_mxu_d16mac(DisasContext *ctx) } gen_store_mxu_gpr(t3, XRa); gen_store_mxu_gpr(t2, XRd); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(t3); } /* @@ -821,15 +804,6 @@ static void gen_mxu_q8mul_q8mulsu(DisasContext *ctx) gen_store_mxu_gpr(t0, XRd); gen_store_mxu_gpr(t1, XRa); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(t3); - tcg_temp_free(t4); - tcg_temp_free(t5); - tcg_temp_free(t6); - tcg_temp_free(t7); } /* @@ -857,12 +831,10 @@ static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx) tcg_gen_ori_tl(t1, t1, 0xFFFFF000); } tcg_gen_add_tl(t1, t0, t1); - tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_TESL ^ (sel * MO_BSWAP)); + tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, (MO_TESL ^ (sel * MO_BSWAP)) | + ctx->default_tcg_memop_mask); gen_store_mxu_gpr(t1, XRa); - - tcg_temp_free(t0); - tcg_temp_free(t1); } @@ -1101,7 +1073,7 @@ static void gen_mxu_D16MAX_D16MIN(DisasContext *ctx) uint32_t XRx = XRb ? XRb : XRc; /* ...and do half-word-wise max/min with one operand 0 */ TCGv_i32 t0 = tcg_temp_new(); - TCGv_i32 t1 = tcg_const_i32(0); + TCGv_i32 t1 = tcg_constant_i32(0); /* the left half-word first */ tcg_gen_andi_i32(t0, mxu_gpr[XRx - 1], 0xFFFF0000); @@ -1125,9 +1097,6 @@ static void gen_mxu_D16MAX_D16MIN(DisasContext *ctx) tcg_gen_shri_i32(t0, t0, 16); /* finally update the destination */ tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0); - - tcg_temp_free(t1); - tcg_temp_free(t0); } else if (unlikely(XRb == XRc)) { /* both operands same -> just set destination to one of them */ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]); @@ -1161,9 +1130,6 @@ static void gen_mxu_D16MAX_D16MIN(DisasContext *ctx) tcg_gen_shri_i32(t0, t0, 16); /* finally update the destination */ tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0); - - tcg_temp_free(t1); - tcg_temp_free(t0); } } @@ -1198,7 +1164,7 @@ static void gen_mxu_Q8MAX_Q8MIN(DisasContext *ctx) uint32_t XRx = XRb ? XRb : XRc; /* ...and do byte-wise max/min with one operand 0 */ TCGv_i32 t0 = tcg_temp_new(); - TCGv_i32 t1 = tcg_const_i32(0); + TCGv_i32 t1 = tcg_constant_i32(0); int32_t i; /* the leftmost byte (byte 3) first */ @@ -1226,9 +1192,6 @@ static void gen_mxu_Q8MAX_Q8MIN(DisasContext *ctx) /* finally update the destination */ tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0); } - - tcg_temp_free(t1); - tcg_temp_free(t0); } else if (unlikely(XRb == XRc)) { /* both operands same -> just set destination to one of them */ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]); @@ -1266,9 +1229,6 @@ static void gen_mxu_Q8MAX_Q8MIN(DisasContext *ctx) /* finally update the destination */ tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0); } - - tcg_temp_free(t1); - tcg_temp_free(t0); } } @@ -1384,9 +1344,6 @@ static void gen_mxu_S32ALNI(DisasContext *ctx) tcg_gen_shri_i32(t1, t1, 24); tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1); - - tcg_temp_free(t1); - tcg_temp_free(t0); } break; case MXU_OPTN3_PTN2: @@ -1410,9 +1367,6 @@ static void gen_mxu_S32ALNI(DisasContext *ctx) tcg_gen_shri_i32(t1, t1, 16); tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1); - - tcg_temp_free(t1); - tcg_temp_free(t0); } break; case MXU_OPTN3_PTN3: @@ -1436,9 +1390,6 @@ static void gen_mxu_S32ALNI(DisasContext *ctx) tcg_gen_shri_i32(t1, t1, 8); tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1); - - tcg_temp_free(t1); - tcg_temp_free(t0); } break; case MXU_OPTN3_PTN4: @@ -1598,7 +1549,6 @@ bool decode_ase_mxu(DisasContext *ctx, uint32_t insn) } gen_set_label(l_exit); - tcg_temp_free(t_mxu_cr); } return true; diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc index 812c111e3c..a98dde0d2e 100644 --- a/target/mips/tcg/nanomips_translate.c.inc +++ b/target/mips/tcg/nanomips_translate.c.inc @@ -998,27 +998,23 @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset, TCGv tmp2 = tcg_temp_new(); gen_base_offset_addr(ctx, taddr, base, offset); - tcg_gen_qemu_ld64(tval, taddr, ctx->mem_idx); + tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ | MO_ALIGN); if (cpu_is_bigendian(ctx)) { tcg_gen_extr_i64_tl(tmp2, tmp1, tval); } else { tcg_gen_extr_i64_tl(tmp1, tmp2, tval); } gen_store_gpr(tmp1, reg1); - tcg_temp_free(tmp1); gen_store_gpr(tmp2, reg2); - tcg_temp_free(tmp2); tcg_gen_st_i64(tval, cpu_env, offsetof(CPUMIPSState, llval_wp)); - tcg_temp_free_i64(tval); tcg_gen_st_tl(taddr, cpu_env, offsetof(CPUMIPSState, lladdr)); - tcg_temp_free(taddr); } static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, uint32_t reg1, uint32_t reg2, bool eva) { - TCGv taddr = tcg_temp_local_new(); - TCGv lladdr = tcg_temp_local_new(); + TCGv taddr = tcg_temp_new(); + TCGv lladdr = tcg_temp_new(); TCGv_i64 tval = tcg_temp_new_i64(); TCGv_i64 llval = tcg_temp_new_i64(); TCGv_i64 val = tcg_temp_new_i64(); @@ -1043,7 +1039,8 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, tcg_gen_ld_i64(llval, cpu_env, offsetof(CPUMIPSState, llval_wp)); tcg_gen_atomic_cmpxchg_i64(val, taddr, llval, tval, - eva ? MIPS_HFLAG_UM : ctx->mem_idx, MO_64); + eva ? MIPS_HFLAG_UM : ctx->mem_idx, + MO_64 | MO_ALIGN); if (reg1 != 0) { tcg_gen_movi_tl(cpu_gpr[reg1], 1); } @@ -1084,9 +1081,6 @@ static void gen_save(DisasContext *ctx, uint8_t rt, uint8_t count, /* adjust stack pointer */ gen_adjust_sp(ctx, -u); - - tcg_temp_free(t0); - tcg_temp_free(va); } static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count, @@ -1110,9 +1104,6 @@ static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count, /* adjust stack pointer */ gen_adjust_sp(ctx, u); - - tcg_temp_free(t0); - tcg_temp_free(va); } static void gen_compute_branch_nm(DisasContext *ctx, uint32_t opc, @@ -1232,8 +1223,6 @@ static void gen_compute_branch_nm(DisasContext *ctx, uint32_t opc, if (insn_bytes == 2) { ctx->hflags |= MIPS_HFLAG_B16; } - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_pool16c_nanomips_insn(DisasContext *ctx) @@ -1358,7 +1347,6 @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) } break; } - tcg_temp_free(t0); #endif } else { gen_slt(ctx, OPC_SLTU, rd, rs, rt); @@ -1381,10 +1369,6 @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) /* operands of same sign, result different sign */ tcg_gen_setcondi_tl(TCG_COND_LT, t0, t1, 0); gen_store_gpr(t0, rd); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } break; case NM_MUL: @@ -1427,7 +1411,6 @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) gen_load_gpr(t0, rt); gen_mtc0(ctx, t0, rs, extract32(ctx->opcode, 11, 3)); - tcg_temp_free(t0); } break; case NM_D_E_MT_VPE: @@ -1467,8 +1450,6 @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) } break; } - - tcg_temp_free(t0); } break; case NM_FORK: @@ -1480,8 +1461,6 @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) gen_load_gpr(t0, rt); gen_load_gpr(t1, rs); gen_helper_fork(t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); } break; case NM_MFTR: @@ -1508,7 +1487,6 @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) gen_load_gpr(t0, rs); gen_helper_yield(t0, cpu_env, t0); gen_store_gpr(t0, rt); - tcg_temp_free(t0); } break; #endif @@ -1557,11 +1535,6 @@ static void gen_pool32axf_1_5_nanomips_insn(DisasContext *ctx, uint32_t opc, gen_reserved_instruction(ctx); break; } - - tcg_temp_free_i32(t0); - - tcg_temp_free(v0_t); - tcg_temp_free(v1_t); } @@ -1682,10 +1655,6 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, gen_reserved_instruction(ctx); break; } - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(v0_t); } static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, @@ -1802,8 +1771,6 @@ static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, gen_reserved_instruction(ctx); break; } - - tcg_temp_free_i32(t0); } static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, @@ -1855,10 +1822,8 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_add_i64(t2, t2, t3); - tcg_temp_free_i64(t3); gen_move_low32(cpu_LO[acc], t2); gen_move_high32(cpu_HI[acc], t2); - tcg_temp_free_i64(t2); } break; case NM_MULT: @@ -1878,8 +1843,6 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, tcg_gen_muls2_i32(t2, t3, t2, t3); tcg_gen_ext_i32_tl(cpu_LO[acc], t2); tcg_gen_ext_i32_tl(cpu_HI[acc], t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; case NM_EXTRV_W: @@ -1915,10 +1878,8 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_add_i64(t2, t2, t3); - tcg_temp_free_i64(t3); gen_move_low32(cpu_LO[acc], t2); gen_move_high32(cpu_HI[acc], t2); - tcg_temp_free_i64(t2); } break; case NM_MULTU: @@ -1938,8 +1899,6 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, tcg_gen_mulu2_i32(t2, t3, t2, t3); tcg_gen_ext_i32_tl(cpu_LO[acc], t2); tcg_gen_ext_i32_tl(cpu_HI[acc], t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; case NM_EXTRV_R_W: @@ -1982,10 +1941,8 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_sub_i64(t2, t3, t2); - tcg_temp_free_i64(t3); gen_move_low32(cpu_LO[acc], t2); gen_move_high32(cpu_HI[acc], t2); - tcg_temp_free_i64(t2); } break; case NM_EXTRV_RS_W: @@ -2027,10 +1984,8 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_sub_i64(t2, t3, t2); - tcg_temp_free_i64(t3); gen_move_low32(cpu_LO[acc], t2); gen_move_high32(cpu_HI[acc], t2); - tcg_temp_free_i64(t2); } break; case NM_EXTRV_S_H: @@ -2045,12 +2000,6 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, gen_reserved_instruction(ctx); break; } - - tcg_temp_free(t0); - tcg_temp_free(t1); - - tcg_temp_free(v0_t); - tcg_temp_free(v1_t); } static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc, @@ -2162,7 +2111,6 @@ static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc, gen_load_gpr(tv0, rt); gen_helper_insv(v0_t, cpu_env, v0_t, tv0); gen_store_gpr(v0_t, ret); - tcg_temp_free(tv0); } break; case NM_RADDU_W_QB: @@ -2188,9 +2136,6 @@ static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc, gen_reserved_instruction(ctx); break; } - - tcg_temp_free(v0_t); - tcg_temp_free(t0); } static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, @@ -2243,8 +2188,6 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, gen_reserved_instruction(ctx); break; } - tcg_temp_free(t0); - tcg_temp_free(rs_t); } @@ -2304,7 +2247,6 @@ static void gen_pool32axf_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) gen_store_gpr(t0, rt); /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; - tcg_temp_free(t0); } break; case NM_EI: @@ -2317,7 +2259,6 @@ static void gen_pool32axf_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) gen_store_gpr(t0, rt); /* Stop translation as we may have switched the execution mode */ ctx->base.is_jmp = DISAS_STOP; - tcg_temp_free(t0); } break; case NM_RDPGPR: @@ -2374,7 +2315,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, /* Unconditional branch */ } else if (rt == 0 && imm != 0) { /* Treat as NOP */ - goto out; + return; } else { cond = TCG_COND_EQ; } @@ -2384,12 +2325,12 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, check_nms(ctx); if (imm >= 32 && !(ctx->hflags & MIPS_HFLAG_64)) { gen_reserved_instruction(ctx); - goto out; + return; } else if (rt == 0 && opc == NM_BBEQZC) { /* Unconditional branch */ } else if (rt == 0 && opc == NM_BBNEZC) { /* Treat as NOP */ - goto out; + return; } else { tcg_gen_shri_tl(t0, t0, imm); tcg_gen_andi_tl(t0, t0, 1); @@ -2404,7 +2345,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, case NM_BNEIC: if (rt == 0 && imm == 0) { /* Treat as NOP */ - goto out; + return; } else if (rt == 0 && imm != 0) { /* Unconditional branch */ } else { @@ -2434,7 +2375,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, default: MIPS_INVAL("Immediate Value Compact branch"); gen_reserved_instruction(ctx); - goto out; + return; } /* branch completion */ @@ -2455,10 +2396,6 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, gen_goto_tb(ctx, 0, ctx->base.pc_next + 4); } - -out: - tcg_temp_free(t0); - tcg_temp_free(t1); } /* P.BALRSC type nanoMIPS R6 branches: BALRSC and BRSC */ @@ -2488,9 +2425,6 @@ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs, /* unconditional branch to register */ tcg_gen_mov_tl(cpu_PC, btarget); tcg_gen_lookup_and_goto_ptr(); - - tcg_temp_free(t0); - tcg_temp_free(t1); } /* nanoMIPS Branches */ @@ -2540,14 +2474,12 @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, gen_load_gpr(tbase, rt); tcg_gen_movi_tl(toffset, offset); gen_op_addr_add(ctx, btarget, tbase, toffset); - tcg_temp_free(tbase); - tcg_temp_free(toffset); } break; default: MIPS_INVAL("Compact branch/jump"); gen_reserved_instruction(ctx); - goto out; + return; } if (bcond_compute == 0) { @@ -2559,7 +2491,7 @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, default: MIPS_INVAL("Compact branch/jump"); gen_reserved_instruction(ctx); - goto out; + return; } } else { /* Conditional compact branch */ @@ -2620,7 +2552,7 @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, default: MIPS_INVAL("Compact conditional branch/jump"); gen_reserved_instruction(ctx); - goto out; + return; } /* branch completion */ @@ -2633,10 +2565,6 @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, gen_goto_tb(ctx, 0, ctx->base.pc_next + 4); } - -out: - tcg_temp_free(t0); - tcg_temp_free(t1); } @@ -2664,15 +2592,12 @@ static void gen_compute_branch_cp1_nm(DisasContext *ctx, uint32_t op, default: MIPS_INVAL("cp1 cond branch"); gen_reserved_instruction(ctx); - goto out; + return; } tcg_gen_trunc_i64_tl(bcond, t0); ctx->btarget = btarget; - -out: - tcg_temp_free_i64(t0); } @@ -2709,59 +2634,56 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) break; default: gen_reserved_instruction(ctx); - goto out; + return; } } gen_op_addr_add(ctx, t0, t0, t1); switch (extract32(ctx->opcode, 7, 4)) { case NM_LBX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_SB); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_SB); gen_store_gpr(t0, rd); break; case NM_LHX: /*case NM_LHXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TESW); + MO_TESW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_LWX: /*case NM_LWXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TESL); + MO_TESL | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_LBUX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_UB); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_UB); gen_store_gpr(t0, rd); break; case NM_LHUX: /*case NM_LHUXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TEUW); + MO_TEUW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_SBX: check_nms(ctx); gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_8); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_8); break; case NM_SHX: /*case NM_SHXS:*/ check_nms(ctx); gen_load_gpr(t1, rd); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_TEUW); + MO_TEUW | ctx->default_tcg_memop_mask); break; case NM_SWX: /*case NM_SWXS:*/ check_nms(ctx); gen_load_gpr(t1, rd); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_TEUL); + MO_TEUL | ctx->default_tcg_memop_mask); break; case NM_LWC1X: /*case NM_LWC1XS:*/ @@ -2799,10 +2721,6 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) gen_reserved_instruction(ctx); break; } - -out: - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_pool32f_nanomips_insn(DisasContext *ctx) @@ -3439,21 +3357,19 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, case 0: /* PRECR_SRA_PH_W */ { - TCGv_i32 sa_t = tcg_const_i32(rd); + TCGv_i32 sa_t = tcg_constant_i32(rd); gen_helper_precr_sra_ph_w(v1_t, sa_t, v1_t, cpu_gpr[rt]); gen_store_gpr(v1_t, rt); - tcg_temp_free_i32(sa_t); } break; case 1: /* PRECR_SRA_R_PH_W */ { - TCGv_i32 sa_t = tcg_const_i32(rd); + TCGv_i32 sa_t = tcg_constant_i32(rd); gen_helper_precr_sra_r_ph_w(v1_t, sa_t, v1_t, cpu_gpr[rt]); gen_store_gpr(v1_t, rt); - tcg_temp_free_i32(sa_t); } break; } @@ -3536,8 +3452,6 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, tcg_gen_movi_tl(tv0, rd >> 3); tcg_gen_movi_tl(tv1, imm); gen_helper_shilo(tv0, tv1, cpu_env); - tcg_temp_free(tv1); - tcg_temp_free(tv0); } break; case NM_MULEQ_S_W_PHL: @@ -3652,10 +3566,6 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, gen_reserved_instruction(ctx); break; } - - tcg_temp_free(v2_t); - tcg_temp_free(v1_t); - tcg_temp_free(t0); } static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) @@ -3826,8 +3736,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) addr_off); tcg_gen_movi_tl(t0, addr); - tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, MO_TESL); - tcg_temp_free(t0); + tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, + MO_TESL | ctx->default_tcg_memop_mask); } break; case NM_SWPC48: @@ -3843,10 +3753,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) tcg_gen_movi_tl(t0, addr); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); - - tcg_temp_free(t0); - tcg_temp_free(t1); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, + MO_TEUL | ctx->default_tcg_memop_mask); } break; default: @@ -3908,8 +3816,6 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) gen_load_gpr(t0, rs); tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, imm); gen_store_gpr(t0, rt); - - tcg_temp_free(t0); } break; case NM_ADDIUNEG: @@ -3958,18 +3864,15 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) check_nms(ctx); if (rt != 0) { TCGv t0 = tcg_temp_new(); - TCGv_i32 shift = tcg_const_i32(extract32(ctx->opcode, 0, 5)); - TCGv_i32 shiftx = tcg_const_i32(extract32(ctx->opcode, 7, 4) - << 1); - TCGv_i32 stripe = tcg_const_i32(extract32(ctx->opcode, 6, 1)); + TCGv_i32 shift = + tcg_constant_i32(extract32(ctx->opcode, 0, 5)); + TCGv_i32 shiftx = + tcg_constant_i32(extract32(ctx->opcode, 7, 4) << 1); + TCGv_i32 stripe = + tcg_constant_i32(extract32(ctx->opcode, 6, 1)); gen_load_gpr(t0, rs); gen_helper_rotx(cpu_gpr[rt], t0, shift, shiftx, stripe); - tcg_temp_free(t0); - - tcg_temp_free_i32(shift); - tcg_temp_free_i32(shiftx); - tcg_temp_free_i32(stripe); } break; case NM_P_INS: @@ -4239,8 +4142,6 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) MO_UNALN); break; } - tcg_temp_free(t0); - tcg_temp_free(t1); } break; case NM_P_LL: @@ -4404,7 +4305,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) TCGv va = tcg_temp_new(); TCGv t1 = tcg_temp_new(); MemOp memop = (extract32(ctx->opcode, 8, 3)) == - NM_P_LS_UAWM ? MO_UNALN : 0; + NM_P_LS_UAWM ? MO_UNALN : MO_ALIGN; count = (count == 0) ? 8 : count; while (counter != count) { @@ -4432,8 +4333,6 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) } counter++; } - tcg_temp_free(va); - tcg_temp_free(t1); } break; default: @@ -4454,7 +4353,6 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) gen_load_gpr(t0, rt); tcg_gen_mov_tl(cpu_gpr[rd], t0); gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s); - tcg_temp_free(t0); } break; case NM_P_BAL: @@ -4604,9 +4502,8 @@ static int decode_isa_nanomips(CPUMIPSState *env, DisasContext *ctx) /* make sure instructions are on a halfword boundary */ if (ctx->base.pc_next & 0x1) { - TCGv tmp = tcg_const_tl(ctx->base.pc_next); + TCGv tmp = tcg_constant_tl(ctx->base.pc_next); tcg_gen_st_tl(tmp, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); - tcg_temp_free(tmp); generate_exception_end(ctx, EXCP_AdEL); return 2; } @@ -4941,8 +4838,6 @@ static int decode_isa_nanomips(CPUMIPSState *env, DisasContext *ctx) gen_load_gpr(t1, rt); tcg_gen_mov_tl(cpu_gpr[rd], t0); tcg_gen_mov_tl(cpu_gpr[re], t1); - tcg_temp_free(t0); - tcg_temp_free(t1); } break; default: diff --git a/target/mips/tcg/octeon_translate.c b/target/mips/tcg/octeon_translate.c index 6a207d2e7e..103c304d10 100644 --- a/target/mips/tcg/octeon_translate.c +++ b/target/mips/tcg/octeon_translate.c @@ -40,8 +40,6 @@ static bool trans_BBIT(DisasContext *ctx, arg_BBIT *a) ctx->hflags |= MIPS_HFLAG_BC; ctx->btarget = ctx->base.pc_next + 4 + a->offset * 4; ctx->hflags |= MIPS_HFLAG_BDS32; - - tcg_temp_free(t0); return true; } @@ -61,10 +59,6 @@ static bool trans_BADDU(DisasContext *ctx, arg_BADDU *a) tcg_gen_add_tl(t0, t0, t1); tcg_gen_andi_i64(cpu_gpr[a->rd], t0, 0xff); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; } @@ -83,10 +77,6 @@ static bool trans_DMUL(DisasContext *ctx, arg_DMUL *a) gen_load_gpr(t1, a->rt); tcg_gen_mul_i64(cpu_gpr[a->rd], t0, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; } @@ -103,8 +93,6 @@ static bool trans_EXTS(DisasContext *ctx, arg_EXTS *a) gen_load_gpr(t0, a->rs); tcg_gen_sextract_tl(t0, t0, a->p, a->lenm1 + 1); gen_store_gpr(t0, a->rt); - tcg_temp_free(t0); - return true; } @@ -121,8 +109,6 @@ static bool trans_CINS(DisasContext *ctx, arg_CINS *a) gen_load_gpr(t0, a->rs); tcg_gen_deposit_z_tl(t0, t0, a->p, a->lenm1 + 1); gen_store_gpr(t0, a->rt); - tcg_temp_free(t0); - return true; } @@ -142,8 +128,6 @@ static bool trans_POP(DisasContext *ctx, arg_POP *a) } tcg_gen_ctpop_tl(t0, t0); gen_store_gpr(t0, a->rd); - tcg_temp_free(t0); - return true; } @@ -167,10 +151,6 @@ static bool trans_SEQNE(DisasContext *ctx, arg_SEQNE *a) } else { tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr[a->rd], t1, t0); } - - tcg_temp_free(t0); - tcg_temp_free(t1); - return true; } @@ -194,8 +174,5 @@ static bool trans_SEQNEI(DisasContext *ctx, arg_SEQNEI *a) } else { tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr[a->rt], t0, imm); } - - tcg_temp_free(t0); - return true; } diff --git a/target/mips/tcg/sysemu/mips-semi.c b/target/mips/tcg/sysemu/mips-semi.c index 85f0567a7f..f3735df7b9 100644 --- a/target/mips/tcg/sysemu/mips-semi.c +++ b/target/mips/tcg/sysemu/mips-semi.c @@ -20,7 +20,8 @@ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/log.h" -#include "exec/gdbstub.h" +#include "gdbstub/syscalls.h" +#include "gdbstub/helpers.h" #include "semihosting/softmmu-uaccess.h" #include "semihosting/semihost.h" #include "semihosting/console.h" diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c index 3c5f35c759..93276f789d 100644 --- a/target/mips/tcg/sysemu/special_helper.c +++ b/target/mips/tcg/sysemu/special_helper.c @@ -94,7 +94,7 @@ bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb) CPUMIPSState *env = &cpu->env; if ((env->hflags & MIPS_HFLAG_BMASK) != 0 - && env->active_tc.PC != tb_pc(tb)) { + && !(cs->tcg_cflags & CF_PCREL) && env->active_tc.PC != tb->pc) { env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); env->hflags &= ~MIPS_HFLAG_BMASK; return true; diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index aa12bb708a..a6ca2e5a3b 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -1223,6 +1223,7 @@ static const char regnames_LO[][4] = { /* General purpose registers moves. */ void gen_load_gpr(TCGv t, int reg) { + assert(reg >= 0 && reg <= ARRAY_SIZE(cpu_gpr)); if (reg == 0) { tcg_gen_movi_tl(t, 0); } else { @@ -1232,6 +1233,7 @@ void gen_load_gpr(TCGv t, int reg) void gen_store_gpr(TCGv t, int reg) { + assert(reg >= 0 && reg <= ARRAY_SIZE(cpu_gpr)); if (reg != 0) { tcg_gen_mov_tl(cpu_gpr[reg], t); } @@ -1240,6 +1242,7 @@ void gen_store_gpr(TCGv t, int reg) #if defined(TARGET_MIPS64) void gen_load_gpr_hi(TCGv_i64 t, int reg) { + assert(reg >= 0 && reg <= ARRAY_SIZE(cpu_gpr_hi)); if (reg == 0) { tcg_gen_movi_i64(t, 0); } else { @@ -1249,6 +1252,7 @@ void gen_load_gpr_hi(TCGv_i64 t, int reg) void gen_store_gpr_hi(TCGv_i64 t, int reg) { + assert(reg >= 0 && reg <= ARRAY_SIZE(cpu_gpr_hi)); if (reg != 0) { tcg_gen_mov_i64(cpu_gpr_hi[reg], t); } @@ -1274,11 +1278,8 @@ static inline void gen_load_srsgpr(int from, int to) tcg_gen_add_ptr(addr, cpu_env, addr); tcg_gen_ld_tl(t0, addr, sizeof(target_ulong) * from); - tcg_temp_free_ptr(addr); - tcg_temp_free_i32(t2); } gen_store_gpr(t0, to); - tcg_temp_free(t0); } static inline void gen_store_srsgpr(int from, int to) @@ -1297,9 +1298,6 @@ static inline void gen_store_srsgpr(int from, int to) tcg_gen_add_ptr(addr, cpu_env, addr); tcg_gen_st_tl(t0, addr, sizeof(target_ulong) * to); - tcg_temp_free_ptr(addr); - tcg_temp_free_i32(t2); - tcg_temp_free(t0); } } @@ -1396,7 +1394,6 @@ void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg) t64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(t64, t); tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 0, 32); - tcg_temp_free_i64(t64); } static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) @@ -1414,7 +1411,6 @@ static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(t64, t); tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32); - tcg_temp_free_i64(t64); } else { gen_store_fpr32(ctx, t, reg | 1); } @@ -1439,7 +1435,6 @@ void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) t0 = tcg_temp_new_i64(); tcg_gen_shri_i64(t0, t, 32); tcg_gen_deposit_i64(fpu_f64[reg | 1], fpu_f64[reg | 1], t0, 0, 32); - tcg_temp_free_i64(t0); } } @@ -1852,8 +1847,6 @@ static inline void gen_cmp ## type ## _ ## fmt(DisasContext *ctx, int n, \ default: \ abort(); \ } \ - tcg_temp_free_i##bits(fp0); \ - tcg_temp_free_i##bits(fp1); \ } FOP_CONDS(, 0, d, FMT_D, 64) @@ -1946,8 +1939,6 @@ static inline void gen_r6_cmp_ ## fmt(DisasContext *ctx, int n, \ abort(); \ } \ STORE; \ - tcg_temp_free_i ## bits(fp0); \ - tcg_temp_free_i ## bits(fp1); \ } FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd)) @@ -1958,16 +1949,15 @@ FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd)) /* load/store instructions. */ #ifdef CONFIG_USER_ONLY -#define OP_LD_ATOMIC(insn, fname) \ +#define OP_LD_ATOMIC(insn, memop) \ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ DisasContext *ctx) \ { \ TCGv t0 = tcg_temp_new(); \ tcg_gen_mov_tl(t0, arg1); \ - tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \ + tcg_gen_qemu_ld_tl(ret, arg1, ctx->mem_idx, memop); \ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \ tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \ - tcg_temp_free(t0); \ } #else #define OP_LD_ATOMIC(insn, fname) \ @@ -1977,9 +1967,9 @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ gen_helper_##insn(ret, cpu_env, arg1, tcg_constant_i32(mem_idx)); \ } #endif -OP_LD_ATOMIC(ll, ld32s); +OP_LD_ATOMIC(ll, MO_TESL); #if defined(TARGET_MIPS64) -OP_LD_ATOMIC(lld, ld64); +OP_LD_ATOMIC(lld, MO_TEUQ); #endif #undef OP_LD_ATOMIC @@ -2009,11 +1999,65 @@ static target_ulong pc_relative_pc(DisasContext *ctx) return pc; } +/* LWL or LDL, depending on MemOp. */ +static void gen_lxl(DisasContext *ctx, TCGv reg, TCGv addr, + int mem_idx, MemOp mop) +{ + int sizem1 = memop_size(mop) - 1; + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + /* + * Do a byte access to possibly trigger a page + * fault with the unaligned address. + */ + tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB); + tcg_gen_andi_tl(t1, addr, sizem1); + if (!cpu_is_bigendian(ctx)) { + tcg_gen_xori_tl(t1, t1, sizem1); + } + tcg_gen_shli_tl(t1, t1, 3); + tcg_gen_andi_tl(t0, addr, ~sizem1); + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mop); + tcg_gen_shl_tl(t0, t0, t1); + tcg_gen_shl_tl(t1, tcg_constant_tl(-1), t1); + tcg_gen_andc_tl(t1, reg, t1); + tcg_gen_or_tl(reg, t0, t1); +} + +/* LWR or LDR, depending on MemOp. */ +static void gen_lxr(DisasContext *ctx, TCGv reg, TCGv addr, + int mem_idx, MemOp mop) +{ + int size = memop_size(mop); + int sizem1 = size - 1; + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + /* + * Do a byte access to possibly trigger a page + * fault with the unaligned address. + */ + tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB); + tcg_gen_andi_tl(t1, addr, sizem1); + if (cpu_is_bigendian(ctx)) { + tcg_gen_xori_tl(t1, t1, sizem1); + } + tcg_gen_shli_tl(t1, t1, 3); + tcg_gen_andi_tl(t0, addr, ~sizem1); + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mop); + tcg_gen_shr_tl(t0, t0, t1); + tcg_gen_xori_tl(t1, t1, size * 8 - 1); + tcg_gen_shl_tl(t1, tcg_constant_tl(~1), t1); + tcg_gen_and_tl(t1, reg, t1); + tcg_gen_or_tl(reg, t0, t1); +} + /* Load */ static void gen_ld(DisasContext *ctx, uint32_t opc, int rt, int base, int offset) { - TCGv t0, t1, t2; + TCGv t0, t1; int mem_idx = ctx->mem_idx; if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F | @@ -2048,65 +2092,26 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, break; case OPC_LDL: t1 = tcg_temp_new(); - /* - * Do a byte access to possibly trigger a page - * fault with the unaligned address. - */ - tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); - tcg_gen_andi_tl(t1, t0, 7); - if (!cpu_is_bigendian(ctx)) { - tcg_gen_xori_tl(t1, t1, 7); - } - tcg_gen_shli_tl(t1, t1, 3); - tcg_gen_andi_tl(t0, t0, ~7); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); - tcg_gen_shl_tl(t0, t0, t1); - t2 = tcg_const_tl(-1); - tcg_gen_shl_tl(t2, t2, t1); gen_load_gpr(t1, rt); - tcg_gen_andc_tl(t1, t1, t2); - tcg_temp_free(t2); - tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t1); - gen_store_gpr(t0, rt); + gen_lxl(ctx, t1, t0, mem_idx, MO_TEUQ); + gen_store_gpr(t1, rt); break; case OPC_LDR: t1 = tcg_temp_new(); - /* - * Do a byte access to possibly trigger a page - * fault with the unaligned address. - */ - tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); - tcg_gen_andi_tl(t1, t0, 7); - if (cpu_is_bigendian(ctx)) { - tcg_gen_xori_tl(t1, t1, 7); - } - tcg_gen_shli_tl(t1, t1, 3); - tcg_gen_andi_tl(t0, t0, ~7); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); - tcg_gen_shr_tl(t0, t0, t1); - tcg_gen_xori_tl(t1, t1, 63); - t2 = tcg_const_tl(0xfffffffffffffffeull); - tcg_gen_shl_tl(t2, t2, t1); gen_load_gpr(t1, rt); - tcg_gen_and_tl(t1, t1, t2); - tcg_temp_free(t2); - tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t1); - gen_store_gpr(t0, rt); + gen_lxr(ctx, t1, t0, mem_idx, MO_TEUQ); + gen_store_gpr(t1, rt); break; case OPC_LDPC: - t1 = tcg_const_tl(pc_relative_pc(ctx)); + t1 = tcg_constant_tl(pc_relative_pc(ctx)); gen_op_addr_add(ctx, t0, t0, t1); - tcg_temp_free(t1); tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); gen_store_gpr(t0, rt); break; #endif case OPC_LWPC: - t1 = tcg_const_tl(pc_relative_pc(ctx)); + t1 = tcg_constant_tl(pc_relative_pc(ctx)); gen_op_addr_add(ctx, t0, t0, t1); - tcg_temp_free(t1); tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL); gen_store_gpr(t0, rt); break; @@ -2153,57 +2158,20 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, /* fall through */ case OPC_LWL: t1 = tcg_temp_new(); - /* - * Do a byte access to possibly trigger a page - * fault with the unaligned address. - */ - tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); - tcg_gen_andi_tl(t1, t0, 3); - if (!cpu_is_bigendian(ctx)) { - tcg_gen_xori_tl(t1, t1, 3); - } - tcg_gen_shli_tl(t1, t1, 3); - tcg_gen_andi_tl(t0, t0, ~3); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL); - tcg_gen_shl_tl(t0, t0, t1); - t2 = tcg_const_tl(-1); - tcg_gen_shl_tl(t2, t2, t1); gen_load_gpr(t1, rt); - tcg_gen_andc_tl(t1, t1, t2); - tcg_temp_free(t2); - tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t1); - tcg_gen_ext32s_tl(t0, t0); - gen_store_gpr(t0, rt); + gen_lxl(ctx, t1, t0, mem_idx, MO_TEUL); + tcg_gen_ext32s_tl(t1, t1); + gen_store_gpr(t1, rt); break; case OPC_LWRE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LWR: t1 = tcg_temp_new(); - /* - * Do a byte access to possibly trigger a page - * fault with the unaligned address. - */ - tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); - tcg_gen_andi_tl(t1, t0, 3); - if (cpu_is_bigendian(ctx)) { - tcg_gen_xori_tl(t1, t1, 3); - } - tcg_gen_shli_tl(t1, t1, 3); - tcg_gen_andi_tl(t0, t0, ~3); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL); - tcg_gen_shr_tl(t0, t0, t1); - tcg_gen_xori_tl(t1, t1, 31); - t2 = tcg_const_tl(0xfffffffeull); - tcg_gen_shl_tl(t2, t2, t1); gen_load_gpr(t1, rt); - tcg_gen_and_tl(t1, t1, t2); - tcg_temp_free(t2); - tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t1); - tcg_gen_ext32s_tl(t0, t0); - gen_store_gpr(t0, rt); + gen_lxr(ctx, t1, t0, mem_idx, MO_TEUL); + tcg_gen_ext32s_tl(t1, t1); + gen_store_gpr(t1, rt); break; case OPC_LLE: mem_idx = MIPS_HFLAG_UM; @@ -2214,7 +2182,6 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, gen_store_gpr(t0, rt); break; } - tcg_temp_free(t0); } /* Store */ @@ -2273,8 +2240,6 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, gen_helper_0e2i(swr, t1, t0, mem_idx); break; } - tcg_temp_free(t0); - tcg_temp_free(t1); } @@ -2291,7 +2256,6 @@ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, /* compare the address against that of the preceding LL */ gen_base_offset_addr(ctx, addr, base, offset); tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1); - tcg_temp_free(addr); tcg_gen_movi_tl(t0, 0); gen_store_gpr(t0, rt); tcg_gen_br(done); @@ -2304,10 +2268,8 @@ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, eva ? MIPS_HFLAG_UM : ctx->mem_idx, tcg_mo); tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_llval); gen_store_gpr(t0, rt); - tcg_temp_free(val); gen_set_label(done); - tcg_temp_free(t0); } /* Load and store */ @@ -2325,7 +2287,6 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL | ctx->default_tcg_memop_mask); gen_store_fpr32(ctx, fp0, ft); - tcg_temp_free_i32(fp0); } break; case OPC_SWC1: @@ -2334,7 +2295,6 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, gen_load_fpr32(ctx, fp0, ft); tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL | ctx->default_tcg_memop_mask); - tcg_temp_free_i32(fp0); } break; case OPC_LDC1: @@ -2343,7 +2303,6 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, fp0, ft); - tcg_temp_free_i64(fp0); } break; case OPC_SDC1: @@ -2352,7 +2311,6 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, gen_load_fpr64(ctx, fp0, ft); tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); - tcg_temp_free_i64(fp0); } break; default: @@ -2381,7 +2339,6 @@ static void gen_cop1_ldst(DisasContext *ctx, uint32_t op, int rt, } else { generate_exception_err(ctx, EXCP_CpU, 1); } - tcg_temp_free(t0); } /* Arithmetic with immediate operand */ @@ -2400,7 +2357,7 @@ static void gen_arith_imm(DisasContext *ctx, uint32_t opc, switch (opc) { case OPC_ADDI: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2412,15 +2369,12 @@ static void gen_arith_imm(DisasContext *ctx, uint32_t opc, tcg_gen_xori_tl(t1, t1, ~uimm); tcg_gen_xori_tl(t2, t0, uimm); tcg_gen_and_tl(t1, t1, t2); - tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); - tcg_temp_free(t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); tcg_gen_ext32s_tl(t0, t0); gen_store_gpr(t0, rt); - tcg_temp_free(t0); } break; case OPC_ADDIU: @@ -2434,7 +2388,7 @@ static void gen_arith_imm(DisasContext *ctx, uint32_t opc, #if defined(TARGET_MIPS64) case OPC_DADDI: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2445,14 +2399,11 @@ static void gen_arith_imm(DisasContext *ctx, uint32_t opc, tcg_gen_xori_tl(t1, t1, ~uimm); tcg_gen_xori_tl(t2, t0, uimm); tcg_gen_and_tl(t1, t1, t2); - tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); - tcg_temp_free(t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); gen_store_gpr(t0, rt); - tcg_temp_free(t0); } break; case OPC_DADDIU: @@ -2535,7 +2486,6 @@ static void gen_slt_imm(DisasContext *ctx, uint32_t opc, tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, uimm); break; } - tcg_temp_free(t0); } /* Shifts with immediate operand */ @@ -2575,7 +2525,6 @@ static void gen_shift_imm(DisasContext *ctx, uint32_t opc, tcg_gen_trunc_tl_i32(t1, t0); tcg_gen_rotri_i32(t1, t1, uimm); tcg_gen_ext_i32_tl(cpu_gpr[rt], t1); - tcg_temp_free_i32(t1); } else { tcg_gen_ext32s_tl(cpu_gpr[rt], t0); } @@ -2611,7 +2560,6 @@ static void gen_shift_imm(DisasContext *ctx, uint32_t opc, break; #endif } - tcg_temp_free(t0); } /* Arithmetic */ @@ -2630,7 +2578,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, switch (opc) { case OPC_ADD: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2642,14 +2590,11 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, tcg_gen_xor_tl(t1, t1, t2); tcg_gen_xor_tl(t2, t0, t2); tcg_gen_andc_tl(t1, t2, t1); - tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); - tcg_temp_free(t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); gen_store_gpr(t0, rd); - tcg_temp_free(t0); } break; case OPC_ADDU: @@ -2666,7 +2611,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, break; case OPC_SUB: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2678,9 +2623,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, tcg_gen_xor_tl(t2, t1, t2); tcg_gen_xor_tl(t1, t0, t1); tcg_gen_and_tl(t1, t1, t2); - tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); - tcg_temp_free(t1); /* * operands of different sign, first operand and the result * of different sign @@ -2688,7 +2631,6 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); gen_store_gpr(t0, rd); - tcg_temp_free(t0); } break; case OPC_SUBU: @@ -2707,7 +2649,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, #if defined(TARGET_MIPS64) case OPC_DADD: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2718,14 +2660,11 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, tcg_gen_xor_tl(t1, t1, t2); tcg_gen_xor_tl(t2, t0, t2); tcg_gen_andc_tl(t1, t2, t1); - tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); - tcg_temp_free(t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); gen_store_gpr(t0, rd); - tcg_temp_free(t0); } break; case OPC_DADDU: @@ -2741,7 +2680,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, break; case OPC_DSUB: { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); @@ -2752,9 +2691,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, tcg_gen_xor_tl(t2, t1, t2); tcg_gen_xor_tl(t1, t0, t1); tcg_gen_and_tl(t1, t1, t2); - tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); - tcg_temp_free(t1); /* * Operands of different sign, first operand and result different * sign. @@ -2762,7 +2699,6 @@ static void gen_arith(DisasContext *ctx, uint32_t opc, generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); gen_store_gpr(t0, rd); - tcg_temp_free(t0); } break; case OPC_DSUBU: @@ -2801,7 +2737,7 @@ static void gen_cond_move(DisasContext *ctx, uint32_t opc, t0 = tcg_temp_new(); gen_load_gpr(t0, rt); - t1 = tcg_const_tl(0); + t1 = tcg_constant_tl(0); t2 = tcg_temp_new(); gen_load_gpr(t2, rs); switch (opc) { @@ -2818,9 +2754,6 @@ static void gen_cond_move(DisasContext *ctx, uint32_t opc, tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, t1); break; } - tcg_temp_free(t2); - tcg_temp_free(t1); - tcg_temp_free(t0); } /* Logic */ @@ -2899,8 +2832,6 @@ static void gen_slt(DisasContext *ctx, uint32_t opc, tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t0, t1); break; } - tcg_temp_free(t0); - tcg_temp_free(t1); } /* Shifts */ @@ -2947,8 +2878,6 @@ static void gen_shift(DisasContext *ctx, uint32_t opc, tcg_gen_andi_i32(t2, t2, 0x1f); tcg_gen_rotr_i32(t2, t3, t2); tcg_gen_ext_i32_tl(cpu_gpr[rd], t2); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; #if defined(TARGET_MIPS64) @@ -2970,8 +2899,6 @@ static void gen_shift(DisasContext *ctx, uint32_t opc, break; #endif } - tcg_temp_free(t0); - tcg_temp_free(t1); } /* Arithmetic on HI/LO registers */ @@ -3041,10 +2968,9 @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) static inline void gen_r6_ld(target_long addr, int reg, int memidx, MemOp memop) { - TCGv t0 = tcg_const_tl(addr); - tcg_gen_qemu_ld_tl(t0, t0, memidx, memop); + TCGv t0 = tcg_temp_new(); + tcg_gen_qemu_ld_tl(t0, tcg_constant_tl(addr), memidx, memop); gen_store_gpr(t0, reg); - tcg_temp_free(t0); } static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc, @@ -3141,8 +3067,6 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_div_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case R6_OPC_MOD: @@ -3160,34 +3084,28 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case R6_OPC_DIVU: { - TCGv t2 = tcg_const_tl(0); - TCGv t3 = tcg_const_tl(1); + TCGv t2 = tcg_constant_tl(0); + TCGv t3 = tcg_constant_tl(1); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case R6_OPC_MODU: { - TCGv t2 = tcg_const_tl(0); - TCGv t3 = tcg_const_tl(1); + TCGv t2 = tcg_constant_tl(0); + TCGv t3 = tcg_constant_tl(1); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case R6_OPC_MUL: @@ -3198,8 +3116,6 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_trunc_tl_i32(t3, t1); tcg_gen_mul_i32(t2, t2, t3); tcg_gen_ext_i32_tl(cpu_gpr[rd], t2); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; case R6_OPC_MUH: @@ -3210,8 +3126,6 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_trunc_tl_i32(t3, t1); tcg_gen_muls2_i32(t2, t3, t2, t3); tcg_gen_ext_i32_tl(cpu_gpr[rd], t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; case R6_OPC_MULU: @@ -3222,8 +3136,6 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_trunc_tl_i32(t3, t1); tcg_gen_mul_i32(t2, t2, t3); tcg_gen_ext_i32_tl(cpu_gpr[rd], t2); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; case R6_OPC_MUHU: @@ -3234,8 +3146,6 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_trunc_tl_i32(t3, t1); tcg_gen_mulu2_i32(t2, t3, t2, t3); tcg_gen_ext_i32_tl(cpu_gpr[rd], t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; #if defined(TARGET_MIPS64) @@ -3251,8 +3161,6 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_movi_tl(t3, 0); tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_div_tl(cpu_gpr[rd], t0, t1); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case R6_OPC_DMOD: @@ -3267,28 +3175,22 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_movi_tl(t3, 0); tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case R6_OPC_DDIVU: { - TCGv t2 = tcg_const_tl(0); - TCGv t3 = tcg_const_tl(1); + TCGv t2 = tcg_constant_tl(0); + TCGv t3 = tcg_constant_tl(1); tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_divu_i64(cpu_gpr[rd], t0, t1); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case R6_OPC_DMODU: { - TCGv t2 = tcg_const_tl(0); - TCGv t3 = tcg_const_tl(1); + TCGv t2 = tcg_constant_tl(0); + TCGv t3 = tcg_constant_tl(1); tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_remu_i64(cpu_gpr[rd], t0, t1); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case R6_OPC_DMUL: @@ -3298,7 +3200,6 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) { TCGv t2 = tcg_temp_new(); tcg_gen_muls2_i64(t2, cpu_gpr[rd], t0, t1); - tcg_temp_free(t2); } break; case R6_OPC_DMULU: @@ -3308,18 +3209,14 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) { TCGv t2 = tcg_temp_new(); tcg_gen_mulu2_i64(t2, cpu_gpr[rd], t0, t1); - tcg_temp_free(t2); } break; #endif default: MIPS_INVAL("r6 mul/div"); gen_reserved_instruction(ctx); - goto out; + break; } - out: - tcg_temp_free(t0); - tcg_temp_free(t1); } #if defined(TARGET_MIPS64) @@ -3351,14 +3248,12 @@ static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt) tcg_gen_rem_tl(cpu_HI[1], t0, t1); tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]); tcg_gen_ext32s_tl(cpu_HI[1], cpu_HI[1]); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case MMI_OPC_DIVU1: { - TCGv t2 = tcg_const_tl(0); - TCGv t3 = tcg_const_tl(1); + TCGv t2 = tcg_constant_tl(0); + TCGv t3 = tcg_constant_tl(1); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); @@ -3366,18 +3261,13 @@ static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt) tcg_gen_remu_tl(cpu_HI[1], t0, t1); tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]); tcg_gen_ext32s_tl(cpu_HI[1], cpu_HI[1]); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; default: MIPS_INVAL("div1 TX79"); gen_reserved_instruction(ctx); - goto out; + break; } - out: - tcg_temp_free(t0); - tcg_temp_free(t1); } #endif @@ -3414,14 +3304,12 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_rem_tl(cpu_HI[acc], t0, t1); tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]); tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case OPC_DIVU: { - TCGv t2 = tcg_const_tl(0); - TCGv t3 = tcg_const_tl(1); + TCGv t2 = tcg_constant_tl(0); + TCGv t3 = tcg_constant_tl(1); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); @@ -3429,8 +3317,6 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_remu_tl(cpu_HI[acc], t0, t1); tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]); tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case OPC_MULT: @@ -3442,8 +3328,6 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_muls2_i32(t2, t3, t2, t3); tcg_gen_ext_i32_tl(cpu_LO[acc], t2); tcg_gen_ext_i32_tl(cpu_HI[acc], t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; case OPC_MULTU: @@ -3455,8 +3339,6 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_mulu2_i32(t2, t3, t2, t3); tcg_gen_ext_i32_tl(cpu_LO[acc], t2); tcg_gen_ext_i32_tl(cpu_HI[acc], t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; #if defined(TARGET_MIPS64) @@ -3473,19 +3355,15 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_div_tl(cpu_LO[acc], t0, t1); tcg_gen_rem_tl(cpu_HI[acc], t0, t1); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case OPC_DDIVU: { - TCGv t2 = tcg_const_tl(0); - TCGv t3 = tcg_const_tl(1); + TCGv t2 = tcg_constant_tl(0); + TCGv t3 = tcg_constant_tl(1); tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); tcg_gen_divu_i64(cpu_LO[acc], t0, t1); tcg_gen_remu_i64(cpu_HI[acc], t0, t1); - tcg_temp_free(t3); - tcg_temp_free(t2); } break; case OPC_DMULT: @@ -3505,10 +3383,8 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_add_i64(t2, t2, t3); - tcg_temp_free_i64(t3); gen_move_low32(cpu_LO[acc], t2); gen_move_high32(cpu_HI[acc], t2); - tcg_temp_free_i64(t2); } break; case OPC_MADDU: @@ -3523,10 +3399,8 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_add_i64(t2, t2, t3); - tcg_temp_free_i64(t3); gen_move_low32(cpu_LO[acc], t2); gen_move_high32(cpu_HI[acc], t2); - tcg_temp_free_i64(t2); } break; case OPC_MSUB: @@ -3539,10 +3413,8 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_sub_i64(t2, t3, t2); - tcg_temp_free_i64(t3); gen_move_low32(cpu_LO[acc], t2); gen_move_high32(cpu_HI[acc], t2); - tcg_temp_free_i64(t2); } break; case OPC_MSUBU: @@ -3557,20 +3429,15 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_sub_i64(t2, t3, t2); - tcg_temp_free_i64(t3); gen_move_low32(cpu_LO[acc], t2); gen_move_high32(cpu_HI[acc], t2); - tcg_temp_free_i64(t2); } break; default: MIPS_INVAL("mul/div"); gen_reserved_instruction(ctx); - goto out; + break; } - out: - tcg_temp_free(t0); - tcg_temp_free(t1); } /* @@ -3625,8 +3492,6 @@ static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, } tcg_gen_ext_i32_tl(cpu_LO[acc], t2); tcg_gen_ext_i32_tl(cpu_HI[acc], t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; case MMI_OPC_MULTU1: @@ -3644,8 +3509,6 @@ static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, } tcg_gen_ext_i32_tl(cpu_LO[acc], t2); tcg_gen_ext_i32_tl(cpu_HI[acc], t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } break; case MMI_OPC_MADD1: @@ -3661,13 +3524,11 @@ static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_add_i64(t2, t2, t3); - tcg_temp_free_i64(t3); gen_move_low32(cpu_LO[acc], t2); gen_move_high32(cpu_HI[acc], t2); if (rd) { gen_move_low32(cpu_gpr[rd], t2); } - tcg_temp_free_i64(t2); } break; case MMI_OPC_MADDU1: @@ -3685,24 +3546,18 @@ static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_add_i64(t2, t2, t3); - tcg_temp_free_i64(t3); gen_move_low32(cpu_LO[acc], t2); gen_move_high32(cpu_HI[acc], t2); if (rd) { gen_move_low32(cpu_gpr[rd], t2); } - tcg_temp_free_i64(t2); } break; default: MIPS_INVAL("mul/madd TXx9"); gen_reserved_instruction(ctx); - goto out; + break; } - - out: - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_cl(DisasContext *ctx, uint32_t opc, @@ -3759,26 +3614,8 @@ static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, return; } - switch (opc) { - case OPC_MULT_G_2E: - case OPC_MULT_G_2F: - case OPC_MULTU_G_2E: - case OPC_MULTU_G_2F: -#if defined(TARGET_MIPS64) - case OPC_DMULT_G_2E: - case OPC_DMULT_G_2F: - case OPC_DMULTU_G_2E: - case OPC_DMULTU_G_2F: -#endif - t0 = tcg_temp_new(); - t1 = tcg_temp_new(); - break; - default: - t0 = tcg_temp_local_new(); - t1 = tcg_temp_local_new(); - break; - } - + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); @@ -3942,9 +3779,6 @@ static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, break; #endif } - - tcg_temp_free(t0); - tcg_temp_free(t1); } /* Loongson multimedia instructions */ @@ -3955,21 +3789,10 @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) TCGCond cond; opc = MASK_LMMI(ctx->opcode); - switch (opc) { - case OPC_ADD_CP2: - case OPC_SUB_CP2: - case OPC_DADD_CP2: - case OPC_DSUB_CP2: - t0 = tcg_temp_local_new_i64(); - t1 = tcg_temp_local_new_i64(); - break; - default: - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); - break; - } - check_cp1_enabled(ctx); + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); gen_load_fpr64(ctx, t0, rs); gen_load_fpr64(ctx, t1, rt); @@ -4250,7 +4073,6 @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) tcg_gen_xor_i64(t1, t1, t2); tcg_gen_xor_i64(t2, t2, t0); tcg_gen_andc_i64(t1, t2, t1); - tcg_temp_free_i64(t2); tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab); generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(lab); @@ -4271,7 +4093,6 @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) tcg_gen_xor_i64(t1, t1, t2); tcg_gen_xor_i64(t2, t2, t0); tcg_gen_and_i64(t1, t1, t2); - tcg_temp_free_i64(t2); tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab); generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(lab); @@ -4313,12 +4134,8 @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) tcg_gen_extrl_i64_i32(t32, t64); tcg_gen_deposit_i32(fpu_fcr31, fpu_fcr31, t32, get_fp_bit(cc), 1); - - tcg_temp_free_i32(t32); - tcg_temp_free_i64(t64); } - goto no_rd; - break; + return; default: MIPS_INVAL("loongson_cp2"); gen_reserved_instruction(ctx); @@ -4326,16 +4143,12 @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) } gen_store_fpr64(ctx, t0, rd); - -no_rd: - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static void gen_loongson_lswc2(DisasContext *ctx, int rt, int rs, int rd) { - TCGv t0, t1, t2; + TCGv t0, t1; TCGv_i32 fp0; #if defined(TARGET_MIPS64) int lsq_rt1 = ctx->opcode & 0x1f; @@ -4357,7 +4170,6 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, ctx->default_tcg_memop_mask); gen_store_gpr(t1, rt); gen_store_gpr(t0, lsq_rt1); - tcg_temp_free(t1); break; case OPC_GSLQC1: check_cp1_enabled(ctx); @@ -4370,7 +4182,6 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, t1, rt); gen_store_fpr64(ctx, t0, lsq_rt1); - tcg_temp_free(t1); break; case OPC_GSSQ: t1 = tcg_temp_new(); @@ -4382,7 +4193,6 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_load_gpr(t1, lsq_rt1); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); - tcg_temp_free(t1); break; case OPC_GSSQC1: check_cp1_enabled(ctx); @@ -4395,7 +4205,6 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_load_fpr64(ctx, t1, lsq_rt1); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); - tcg_temp_free(t1); break; #endif case OPC_GSSHFL: @@ -4403,109 +4212,41 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, case OPC_GSLWLC1: check_cp1_enabled(ctx); gen_base_offset_addr(ctx, t0, rs, shf_offset); - t1 = tcg_temp_new(); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); - tcg_gen_andi_tl(t1, t0, 3); - if (!cpu_is_bigendian(ctx)) { - tcg_gen_xori_tl(t1, t1, 3); - } - tcg_gen_shli_tl(t1, t1, 3); - tcg_gen_andi_tl(t0, t0, ~3); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL); - tcg_gen_shl_tl(t0, t0, t1); - t2 = tcg_const_tl(-1); - tcg_gen_shl_tl(t2, t2, t1); fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, rt); + t1 = tcg_temp_new(); tcg_gen_ext_i32_tl(t1, fp0); - tcg_gen_andc_tl(t1, t1, t2); - tcg_temp_free(t2); - tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t1); -#if defined(TARGET_MIPS64) - tcg_gen_extrl_i64_i32(fp0, t0); -#else - tcg_gen_ext32s_tl(fp0, t0); -#endif + gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_trunc_tl_i32(fp0, t1); gen_store_fpr32(ctx, fp0, rt); - tcg_temp_free_i32(fp0); break; case OPC_GSLWRC1: check_cp1_enabled(ctx); gen_base_offset_addr(ctx, t0, rs, shf_offset); - t1 = tcg_temp_new(); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); - tcg_gen_andi_tl(t1, t0, 3); - if (cpu_is_bigendian(ctx)) { - tcg_gen_xori_tl(t1, t1, 3); - } - tcg_gen_shli_tl(t1, t1, 3); - tcg_gen_andi_tl(t0, t0, ~3); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL); - tcg_gen_shr_tl(t0, t0, t1); - tcg_gen_xori_tl(t1, t1, 31); - t2 = tcg_const_tl(0xfffffffeull); - tcg_gen_shl_tl(t2, t2, t1); fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, rt); + t1 = tcg_temp_new(); tcg_gen_ext_i32_tl(t1, fp0); - tcg_gen_and_tl(t1, t1, t2); - tcg_temp_free(t2); - tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t1); -#if defined(TARGET_MIPS64) - tcg_gen_extrl_i64_i32(fp0, t0); -#else - tcg_gen_ext32s_tl(fp0, t0); -#endif + gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_trunc_tl_i32(fp0, t1); gen_store_fpr32(ctx, fp0, rt); - tcg_temp_free_i32(fp0); break; #if defined(TARGET_MIPS64) case OPC_GSLDLC1: check_cp1_enabled(ctx); gen_base_offset_addr(ctx, t0, rs, shf_offset); t1 = tcg_temp_new(); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); - tcg_gen_andi_tl(t1, t0, 7); - if (!cpu_is_bigendian(ctx)) { - tcg_gen_xori_tl(t1, t1, 7); - } - tcg_gen_shli_tl(t1, t1, 3); - tcg_gen_andi_tl(t0, t0, ~7); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ); - tcg_gen_shl_tl(t0, t0, t1); - t2 = tcg_const_tl(-1); - tcg_gen_shl_tl(t2, t2, t1); gen_load_fpr64(ctx, t1, rt); - tcg_gen_andc_tl(t1, t1, t2); - tcg_temp_free(t2); - tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t1); - gen_store_fpr64(ctx, t0, rt); + gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUQ); + gen_store_fpr64(ctx, t1, rt); break; case OPC_GSLDRC1: check_cp1_enabled(ctx); gen_base_offset_addr(ctx, t0, rs, shf_offset); t1 = tcg_temp_new(); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); - tcg_gen_andi_tl(t1, t0, 7); - if (cpu_is_bigendian(ctx)) { - tcg_gen_xori_tl(t1, t1, 7); - } - tcg_gen_shli_tl(t1, t1, 3); - tcg_gen_andi_tl(t0, t0, ~7); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ); - tcg_gen_shr_tl(t0, t0, t1); - tcg_gen_xori_tl(t1, t1, 63); - t2 = tcg_const_tl(0xfffffffffffffffeull); - tcg_gen_shl_tl(t2, t2, t1); gen_load_fpr64(ctx, t1, rt); - tcg_gen_and_tl(t1, t1, t2); - tcg_temp_free(t2); - tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t1); - gen_store_fpr64(ctx, t0, rt); + gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUQ); + gen_store_fpr64(ctx, t1, rt); break; #endif default: @@ -4524,8 +4265,6 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_load_fpr32(ctx, fp0, rt); tcg_gen_ext_i32_tl(t1, fp0); gen_helper_0e2i(swl, t1, t0, ctx->mem_idx); - tcg_temp_free_i32(fp0); - tcg_temp_free(t1); break; case OPC_GSSWRC1: check_cp1_enabled(ctx); @@ -4535,8 +4274,6 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_load_fpr32(ctx, fp0, rt); tcg_gen_ext_i32_tl(t1, fp0); gen_helper_0e2i(swr, t1, t0, ctx->mem_idx); - tcg_temp_free_i32(fp0); - tcg_temp_free(t1); break; #if defined(TARGET_MIPS64) case OPC_GSSDLC1: @@ -4545,7 +4282,6 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_base_offset_addr(ctx, t0, rs, shf_offset); gen_load_fpr64(ctx, t1, rt); gen_helper_0e2i(sdl, t1, t0, ctx->mem_idx); - tcg_temp_free(t1); break; case OPC_GSSDRC1: check_cp1_enabled(ctx); @@ -4553,7 +4289,6 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_base_offset_addr(ctx, t0, rs, shf_offset); gen_load_fpr64(ctx, t1, rt); gen_helper_0e2i(sdr, t1, t0, ctx->mem_idx); - tcg_temp_free(t1); break; #endif default: @@ -4567,7 +4302,6 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_reserved_instruction(ctx); break; } - tcg_temp_free(t0); } /* Loongson EXT LDC2/SDC2 */ @@ -4662,7 +4396,6 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL | ctx->default_tcg_memop_mask); gen_store_fpr32(ctx, fp0, rt); - tcg_temp_free_i32(fp0); break; #if defined(TARGET_MIPS64) case OPC_GSLDXC1: @@ -4679,21 +4412,18 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); gen_load_gpr(t1, rt); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_SB); - tcg_temp_free(t1); break; case OPC_GSSHX: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW | ctx->default_tcg_memop_mask); - tcg_temp_free(t1); break; case OPC_GSSWX: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | ctx->default_tcg_memop_mask); - tcg_temp_free(t1); break; #if defined(TARGET_MIPS64) case OPC_GSSDX: @@ -4701,7 +4431,6 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, gen_load_gpr(t1, rt); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); - tcg_temp_free(t1); break; #endif case OPC_GSSWXC1: @@ -4709,7 +4438,6 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, gen_load_fpr32(ctx, fp0, rt); tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL | ctx->default_tcg_memop_mask); - tcg_temp_free_i32(fp0); break; #if defined(TARGET_MIPS64) case OPC_GSSDXC1: @@ -4717,14 +4445,11 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, gen_load_fpr64(ctx, t1, rt); tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ | ctx->default_tcg_memop_mask); - tcg_temp_free(t1); break; #endif default: break; } - - tcg_temp_free(t0); } /* Traps */ @@ -4834,8 +4559,6 @@ static void gen_trap(DisasContext *ctx, uint32_t opc, generate_exception(ctx, EXCP_TRAP); gen_set_label(l1); } - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) @@ -4916,6 +4639,14 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, break; case OPC_J: case OPC_JAL: + { + /* Jump to immediate */ + int jal_mask = ctx->hflags & MIPS_HFLAG_M16 ? 0xF8000000 + : 0xF0000000; + btgt = ((ctx->base.pc_next + insn_bytes) & jal_mask) + | (uint32_t)offset; + break; + } case OPC_JALX: /* Jump to immediate */ btgt = ((ctx->base.pc_next + insn_bytes) & (int32_t)0xF0000000) | @@ -5101,8 +4832,6 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, if (insn_bytes == 2) { ctx->hflags |= MIPS_HFLAG_B16; } - tcg_temp_free(t0); - tcg_temp_free(t1); } @@ -5171,13 +4900,9 @@ static void gen_bitops(DisasContext *ctx, uint32_t opc, int rt, fail: MIPS_INVAL("bitops"); gen_reserved_instruction(ctx); - tcg_temp_free(t0); - tcg_temp_free(t1); return; } gen_store_gpr(t0, rt); - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) @@ -5195,15 +4920,13 @@ static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) case OPC_WSBH: { TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_const_tl(0x00FF00FF); + TCGv t2 = tcg_constant_tl(0x00FF00FF); tcg_gen_shri_tl(t1, t0, 8); tcg_gen_and_tl(t1, t1, t2); tcg_gen_and_tl(t0, t0, t2); tcg_gen_shli_tl(t0, t0, 8); tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t2); - tcg_temp_free(t1); tcg_gen_ext32s_tl(cpu_gpr[rd], t0); } break; @@ -5217,21 +4940,19 @@ static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) case OPC_DSBH: { TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_const_tl(0x00FF00FF00FF00FFULL); + TCGv t2 = tcg_constant_tl(0x00FF00FF00FF00FFULL); tcg_gen_shri_tl(t1, t0, 8); tcg_gen_and_tl(t1, t1, t2); tcg_gen_and_tl(t0, t0, t2); tcg_gen_shli_tl(t0, t0, 8); tcg_gen_or_tl(cpu_gpr[rd], t0, t1); - tcg_temp_free(t2); - tcg_temp_free(t1); } break; case OPC_DSHD: { TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_const_tl(0x0000FFFF0000FFFFULL); + TCGv t2 = tcg_constant_tl(0x0000FFFF0000FFFFULL); tcg_gen_shri_tl(t1, t0, 16); tcg_gen_and_tl(t1, t1, t2); @@ -5241,18 +4962,14 @@ static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) tcg_gen_shri_tl(t1, t0, 32); tcg_gen_shli_tl(t0, t0, 32); tcg_gen_or_tl(cpu_gpr[rd], t0, t1); - tcg_temp_free(t2); - tcg_temp_free(t1); } break; #endif default: MIPS_INVAL("bsfhl"); gen_reserved_instruction(ctx); - tcg_temp_free(t0); return; } - tcg_temp_free(t0); } static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs, @@ -5291,7 +5008,6 @@ static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs, tcg_gen_concat_tl_i64(t2, t1, t0); tcg_gen_shri_i64(t2, t2, 32 - bits); gen_move_low32(cpu_gpr[rd], t2); - tcg_temp_free_i64(t2); } break; #if defined(TARGET_MIPS64) @@ -5302,10 +5018,7 @@ static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs, break; #endif } - tcg_temp_free(t1); } - - tcg_temp_free(t0); } void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt, int bp) @@ -5332,7 +5045,6 @@ static void gen_bitswap(DisasContext *ctx, int opc, int rd, int rt) break; #endif } - tcg_temp_free(t0); } #ifndef CONFIG_USER_ONLY @@ -5350,8 +5062,6 @@ static inline void gen_mthc0_entrylo(TCGv arg, target_ulong off) tcg_gen_concat32_i64(t1, t1, t0); #endif tcg_gen_st_i64(t1, cpu_env, off); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t0); } static inline void gen_mthc0_store64(TCGv arg, target_ulong off) @@ -5363,8 +5073,6 @@ static inline void gen_mthc0_store64(TCGv arg, target_ulong off) tcg_gen_ld_i64(t1, cpu_env, off); tcg_gen_concat32_i64(t1, t1, t0); tcg_gen_st_i64(t1, cpu_env, off); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t0); } static inline void gen_mfhc0_entrylo(TCGv arg, target_ulong off) @@ -5378,7 +5086,6 @@ static inline void gen_mfhc0_entrylo(TCGv arg, target_ulong off) tcg_gen_shri_i64(t0, t0, 32); #endif gen_move_low32(arg, t0); - tcg_temp_free_i64(t0); } static inline void gen_mfhc0_load64(TCGv arg, target_ulong off, int shift) @@ -5388,7 +5095,6 @@ static inline void gen_mfhc0_load64(TCGv arg, target_ulong off, int shift) tcg_gen_ld_i64(t0, cpu_env, off); tcg_gen_shri_i64(t0, t0, 32 + shift); gen_move_low32(arg, t0); - tcg_temp_free_i64(t0); } static inline void gen_mfc0_load32(TCGv arg, target_ulong off) @@ -5397,7 +5103,6 @@ static inline void gen_mfc0_load32(TCGv arg, target_ulong off) tcg_gen_ld_i32(t0, cpu_env, off); tcg_gen_ext_i32_tl(arg, t0); - tcg_temp_free_i32(t0); } static inline void gen_mfc0_load64(TCGv arg, target_ulong off) @@ -5412,7 +5117,6 @@ static inline void gen_mtc0_store32(TCGv arg, target_ulong off) tcg_gen_trunc_tl_i32(t0, arg); tcg_gen_st_i32(t0, cpu_env, off); - tcg_temp_free_i32(t0); } #define CP0_CHECK(c) \ @@ -5734,7 +5438,6 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) } #endif gen_move_low32(arg, tmp); - tcg_temp_free_i64(tmp); } register_name = "EntryLo0"; break; @@ -5792,7 +5495,6 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) } #endif gen_move_low32(arg, tmp); - tcg_temp_free_i64(tmp); } register_name = "EntryLo1"; break; @@ -6321,7 +6023,6 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUMIPSState, CP0_TagLo)); gen_move_low32(arg, tmp); - tcg_temp_free_i64(tmp); } register_name = "TagLo"; break; @@ -8650,7 +8351,7 @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, int u, int sel, int h) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) != @@ -8762,7 +8463,7 @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, case 5: case 6: case 7: - gen_helper_mftc0_configx(t0, cpu_env, tcg_const_tl(sel)); + gen_helper_mftc0_configx(t0, cpu_env, tcg_constant_tl(sel)); break; default: goto die; @@ -8842,13 +8543,11 @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, gen_load_fpr32(ctx, fp0, rt); tcg_gen_ext_i32_tl(t0, fp0); - tcg_temp_free_i32(fp0); } else { TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32h(ctx, fp0, rt); tcg_gen_ext_i32_tl(t0, fp0); - tcg_temp_free_i32(fp0); } break; case 3: @@ -8865,11 +8564,9 @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, } trace_mips_translate_tr("mftr", rt, u, sel, h); gen_store_gpr(t0, rd); - tcg_temp_free(t0); return; die: - tcg_temp_free(t0); LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h); gen_reserved_instruction(ctx); } @@ -8878,7 +8575,7 @@ static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, int u, int sel, int h) { int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); gen_load_gpr(t0, rt); if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && @@ -9046,13 +8743,11 @@ static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, tcg_gen_trunc_tl_i32(fp0, t0); gen_store_fpr32(ctx, fp0, rd); - tcg_temp_free_i32(fp0); } else { TCGv_i32 fp0 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(fp0, t0); gen_store_fpr32h(ctx, fp0, rd); - tcg_temp_free_i32(fp0); } break; case 3: @@ -9070,11 +8765,9 @@ static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, } } trace_mips_translate_tr("mttr", rd, u, sel, h); - tcg_temp_free(t0); return; die: - tcg_temp_free(t0); LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h); gen_reserved_instruction(ctx); } @@ -9100,7 +8793,6 @@ static void gen_cp0(CPUMIPSState *env, DisasContext *ctx, uint32_t opc, gen_load_gpr(t0, rt); gen_mtc0(ctx, t0, rd, ctx->opcode & 0x7); - tcg_temp_free(t0); } opn = "mtc0"; break; @@ -9121,7 +8813,6 @@ static void gen_cp0(CPUMIPSState *env, DisasContext *ctx, uint32_t opc, gen_load_gpr(t0, rt); gen_dmtc0(ctx, t0, rd, ctx->opcode & 0x7); - tcg_temp_free(t0); } opn = "dmtc0"; break; @@ -9141,7 +8832,6 @@ static void gen_cp0(CPUMIPSState *env, DisasContext *ctx, uint32_t opc, TCGv t0 = tcg_temp_new(); gen_load_gpr(t0, rt); gen_mthc0(ctx, t0, rd, ctx->opcode & 0x7); - tcg_temp_free(t0); } opn = "mthc0"; break; @@ -9275,7 +8965,7 @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, if ((ctx->insn_flags & ISA_MIPS_R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) { gen_reserved_instruction(ctx); - goto out; + return; } if (cc != 0) { @@ -9315,7 +9005,6 @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc)); tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1)); tcg_gen_nand_i32(t0, t0, t1); - tcg_temp_free_i32(t1); tcg_gen_andi_i32(t0, t0, 1); tcg_gen_extu_i32_tl(bcond, t0); } @@ -9326,7 +9015,6 @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc)); tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1)); tcg_gen_or_i32(t0, t0, t1); - tcg_temp_free_i32(t1); tcg_gen_andi_i32(t0, t0, 1); tcg_gen_extu_i32_tl(bcond, t0); } @@ -9341,7 +9029,6 @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, tcg_gen_and_i32(t0, t0, t1); tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 3)); tcg_gen_nand_i32(t0, t0, t1); - tcg_temp_free_i32(t1); tcg_gen_andi_i32(t0, t0, 1); tcg_gen_extu_i32_tl(bcond, t0); } @@ -9356,7 +9043,6 @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, tcg_gen_or_i32(t0, t0, t1); tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 3)); tcg_gen_or_i32(t0, t0, t1); - tcg_temp_free_i32(t1); tcg_gen_andi_i32(t0, t0, 1); tcg_gen_extu_i32_tl(bcond, t0); } @@ -9366,12 +9052,10 @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, default: MIPS_INVAL("cp1 cond branch"); gen_reserved_instruction(ctx); - goto out; + return; } ctx->btarget = btarget; ctx->hflags |= MIPS_HFLAG_BDS32; - out: - tcg_temp_free_i32(t0); } /* R6 CP1 Branches */ @@ -9388,7 +9072,7 @@ static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, "\n", ctx->base.pc_next); #endif gen_reserved_instruction(ctx); - goto out; + return; } gen_load_fpr64(ctx, t0, ft); @@ -9408,7 +9092,7 @@ static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, default: MIPS_INVAL("cp1 cond branch"); gen_reserved_instruction(ctx); - goto out; + return; } tcg_gen_trunc_i64_tl(bcond, t0); @@ -9423,9 +9107,6 @@ static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, ctx->hflags |= MIPS_HFLAG_BDS32; break; } - -out: - tcg_temp_free_i64(t0); } /* Coprocessor 1 (FPU) */ @@ -9653,7 +9334,6 @@ static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) gen_load_fpr32(ctx, fp0, fs); tcg_gen_ext_i32_tl(t0, fp0); - tcg_temp_free_i32(fp0); } gen_store_gpr(t0, rt); break; @@ -9664,7 +9344,6 @@ static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) tcg_gen_trunc_tl_i32(fp0, t0); gen_store_fpr32(ctx, fp0, fs); - tcg_temp_free_i32(fp0); } break; case OPC_CFC1: @@ -9694,7 +9373,6 @@ static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) gen_load_fpr32h(ctx, fp0, fs); tcg_gen_ext_i32_tl(t0, fp0); - tcg_temp_free_i32(fp0); } gen_store_gpr(t0, rt); break; @@ -9705,17 +9383,13 @@ static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) tcg_gen_trunc_tl_i32(fp0, t0); gen_store_fpr32h(ctx, fp0, fs); - tcg_temp_free_i32(fp0); } break; default: MIPS_INVAL("cp1 move"); gen_reserved_instruction(ctx); - goto out; + return; } - - out: - tcg_temp_free(t0); } static void gen_movci(DisasContext *ctx, int rd, int rs, int cc, int tf) @@ -9739,7 +9413,6 @@ static void gen_movci(DisasContext *ctx, int rd, int rs, int cc, int tf) t0 = tcg_temp_new_i32(); tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc)); tcg_gen_brcondi_i32(cond, t0, 0, l1); - tcg_temp_free_i32(t0); gen_load_gpr(cpu_gpr[rd], rs); gen_set_label(l1); } @@ -9762,7 +9435,6 @@ static inline void gen_movcf_s(DisasContext *ctx, int fs, int fd, int cc, gen_load_fpr32(ctx, t0, fs); gen_store_fpr32(ctx, t0, fd); gen_set_label(l1); - tcg_temp_free_i32(t0); } static inline void gen_movcf_d(DisasContext *ctx, int fs, int fd, int cc, @@ -9781,11 +9453,9 @@ static inline void gen_movcf_d(DisasContext *ctx, int fs, int fd, int cc, tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc)); tcg_gen_brcondi_i32(cond, t0, 0, l1); - tcg_temp_free_i32(t0); fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); gen_set_label(l1); } @@ -9813,14 +9483,13 @@ static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd, tcg_gen_brcondi_i32(cond, t0, 0, l2); gen_load_fpr32h(ctx, t0, fs); gen_store_fpr32h(ctx, t0, fd); - tcg_temp_free_i32(t0); gen_set_label(l2); } static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft, int fs) { - TCGv_i32 t1 = tcg_const_i32(0); + TCGv_i32 t1 = tcg_constant_i32(0); TCGv_i32 fp0 = tcg_temp_new_i32(); TCGv_i32 fp1 = tcg_temp_new_i32(); TCGv_i32 fp2 = tcg_temp_new_i32(); @@ -9848,16 +9517,12 @@ static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft, } gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp2); - tcg_temp_free_i32(fp1); - tcg_temp_free_i32(fp0); - tcg_temp_free_i32(t1); } static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft, int fs) { - TCGv_i64 t1 = tcg_const_i64(0); + TCGv_i64 t1 = tcg_constant_i64(0); TCGv_i64 fp0 = tcg_temp_new_i64(); TCGv_i64 fp1 = tcg_temp_new_i64(); TCGv_i64 fp2 = tcg_temp_new_i64(); @@ -9885,10 +9550,6 @@ static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft, } gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp2); - tcg_temp_free_i64(fp1); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(t1); } static void gen_farith(DisasContext *ctx, enum fopcode op1, @@ -9904,9 +9565,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_add_s(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i32(fp1); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_SUB_S: @@ -9917,9 +9576,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i32(fp1); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_MUL_S: @@ -9930,9 +9587,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i32(fp1); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_DIV_S: @@ -9943,9 +9598,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_div_s(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i32(fp1); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_SQRT_S: @@ -9955,7 +9608,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_helper_float_sqrt_s(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_ABS_S: @@ -9969,7 +9621,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_abs_s(fp0, fp0); } gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_MOV_S: @@ -9978,7 +9629,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_NEG_S: @@ -9992,7 +9642,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_chs_s(fp0, fp0); } gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_ROUND_L_S: @@ -10007,9 +9656,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, } else { gen_helper_float_round_l_s(fp64, cpu_env, fp32); } - tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(fp64); } break; case OPC_TRUNC_L_S: @@ -10024,9 +9671,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, } else { gen_helper_float_trunc_l_s(fp64, cpu_env, fp32); } - tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(fp64); } break; case OPC_CEIL_L_S: @@ -10041,9 +9686,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, } else { gen_helper_float_ceil_l_s(fp64, cpu_env, fp32); } - tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(fp64); } break; case OPC_FLOOR_L_S: @@ -10058,9 +9701,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, } else { gen_helper_float_floor_l_s(fp64, cpu_env, fp32); } - tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(fp64); } break; case OPC_ROUND_W_S: @@ -10074,7 +9715,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_round_w_s(fp0, cpu_env, fp0); } gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_TRUNC_W_S: @@ -10088,7 +9728,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_trunc_w_s(fp0, cpu_env, fp0); } gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_CEIL_W_S: @@ -10102,7 +9741,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_ceil_w_s(fp0, cpu_env, fp0); } gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_FLOOR_W_S: @@ -10116,7 +9754,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_floor_w_s(fp0, cpu_env, fp0); } gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_SEL_S: @@ -10147,7 +9784,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); gen_set_label(l1); } break; @@ -10162,7 +9798,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); gen_set_label(l1); } } @@ -10174,7 +9809,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_helper_float_recip_s(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_RSQRT_S: @@ -10184,7 +9818,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_helper_float_rsqrt_s(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_MADDF_S: @@ -10198,9 +9831,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp2, fd); gen_helper_float_maddf_s(fp2, cpu_env, fp0, fp1, fp2); gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(fp2); - tcg_temp_free_i32(fp1); - tcg_temp_free_i32(fp0); } break; case OPC_MSUBF_S: @@ -10214,9 +9844,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp2, fd); gen_helper_float_msubf_s(fp2, cpu_env, fp0, fp1, fp2); gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(fp2); - tcg_temp_free_i32(fp1); - tcg_temp_free_i32(fp0); } break; case OPC_RINT_S: @@ -10226,7 +9853,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_helper_float_rint_s(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_CLASS_S: @@ -10236,7 +9862,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_helper_float_class_s(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_MIN_S: /* OPC_RECIP2_S */ @@ -10249,9 +9874,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp1, ft); gen_helper_float_min_s(fp2, cpu_env, fp0, fp1); gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(fp2); - tcg_temp_free_i32(fp1); - tcg_temp_free_i32(fp0); } else { /* OPC_RECIP2_S */ check_cp1_64bitmode(ctx); @@ -10262,9 +9884,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_recip2_s(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i32(fp1); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } } break; @@ -10278,9 +9898,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp1, ft); gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1); gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(fp2); - tcg_temp_free_i32(fp1); - tcg_temp_free_i32(fp0); } else { /* OPC_RECIP1_S */ check_cp1_64bitmode(ctx); @@ -10290,7 +9907,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_helper_float_recip1_s(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } } break; @@ -10303,8 +9919,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp1, ft); gen_helper_float_max_s(fp1, cpu_env, fp0, fp1); gen_store_fpr32(ctx, fp1, fd); - tcg_temp_free_i32(fp1); - tcg_temp_free_i32(fp0); } else { /* OPC_RSQRT1_S */ check_cp1_64bitmode(ctx); @@ -10314,7 +9928,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_helper_float_rsqrt1_s(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } } break; @@ -10327,8 +9940,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp1, ft); gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1); gen_store_fpr32(ctx, fp1, fd); - tcg_temp_free_i32(fp1); - tcg_temp_free_i32(fp0); } else { /* OPC_RSQRT2_S */ check_cp1_64bitmode(ctx); @@ -10339,9 +9950,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_load_fpr32(ctx, fp1, ft); gen_helper_float_rsqrt2_s(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i32(fp1); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } } break; @@ -10353,9 +9962,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp32, fs); gen_helper_float_cvtd_s(fp64, cpu_env, fp32); - tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(fp64); } break; case OPC_CVT_W_S: @@ -10369,7 +9976,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_cvt_w_s(fp0, cpu_env, fp0); } gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_CVT_L_S: @@ -10384,9 +9990,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, } else { gen_helper_float_cvt_l_s(fp64, cpu_env, fp32); } - tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(fp64); } break; case OPC_CVT_PS_S: @@ -10399,10 +10003,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp32_0, fs); gen_load_fpr32(ctx, fp32_1, ft); tcg_gen_concat_i32_i64(fp64, fp32_1, fp32_0); - tcg_temp_free_i32(fp32_1); - tcg_temp_free_i32(fp32_0); gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(fp64); } break; case OPC_CMP_F_S: @@ -10437,9 +10038,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_add_d(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_SUB_D: @@ -10451,9 +10050,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_MUL_D: @@ -10465,9 +10062,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_DIV_D: @@ -10479,9 +10074,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_div_d(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_SQRT_D: @@ -10492,7 +10085,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_sqrt_d(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_ABS_D: @@ -10507,7 +10099,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_abs_d(fp0, fp0); } gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_MOV_D: @@ -10517,7 +10108,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_NEG_D: @@ -10532,7 +10122,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_chs_d(fp0, fp0); } gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_ROUND_L_D: @@ -10547,7 +10136,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_round_l_d(fp0, cpu_env, fp0); } gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_TRUNC_L_D: @@ -10562,7 +10150,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_trunc_l_d(fp0, cpu_env, fp0); } gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_CEIL_L_D: @@ -10577,7 +10164,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_ceil_l_d(fp0, cpu_env, fp0); } gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_FLOOR_L_D: @@ -10592,7 +10178,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_floor_l_d(fp0, cpu_env, fp0); } gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_ROUND_W_D: @@ -10607,9 +10192,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, } else { gen_helper_float_round_w_d(fp32, cpu_env, fp64); } - tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(fp32); } break; case OPC_TRUNC_W_D: @@ -10624,9 +10207,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, } else { gen_helper_float_trunc_w_d(fp32, cpu_env, fp64); } - tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(fp32); } break; case OPC_CEIL_W_D: @@ -10641,9 +10222,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, } else { gen_helper_float_ceil_w_d(fp32, cpu_env, fp64); } - tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(fp32); } break; case OPC_FLOOR_W_D: @@ -10658,9 +10237,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, } else { gen_helper_float_floor_w_d(fp32, cpu_env, fp64); } - tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(fp32); } break; case OPC_SEL_D: @@ -10691,7 +10268,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); gen_set_label(l1); } break; @@ -10706,7 +10282,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); gen_set_label(l1); } } @@ -10719,7 +10294,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_recip_d(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_RSQRT_D: @@ -10730,7 +10304,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_rsqrt_d(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_MADDF_D: @@ -10744,9 +10317,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp2, fd); gen_helper_float_maddf_d(fp2, cpu_env, fp0, fp1, fp2); gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(fp2); - tcg_temp_free_i64(fp1); - tcg_temp_free_i64(fp0); } break; case OPC_MSUBF_D: @@ -10760,9 +10330,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp2, fd); gen_helper_float_msubf_d(fp2, cpu_env, fp0, fp1, fp2); gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(fp2); - tcg_temp_free_i64(fp1); - tcg_temp_free_i64(fp0); } break; case OPC_RINT_D: @@ -10772,7 +10339,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_rint_d(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_CLASS_D: @@ -10782,7 +10348,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_class_d(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_MIN_D: /* OPC_RECIP2_D */ @@ -10794,8 +10359,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp1, ft); gen_helper_float_min_d(fp1, cpu_env, fp0, fp1); gen_store_fpr64(ctx, fp1, fd); - tcg_temp_free_i64(fp1); - tcg_temp_free_i64(fp0); } else { /* OPC_RECIP2_D */ check_cp1_64bitmode(ctx); @@ -10806,9 +10369,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_recip2_d(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } } break; @@ -10821,8 +10382,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp1, ft); gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1); gen_store_fpr64(ctx, fp1, fd); - tcg_temp_free_i64(fp1); - tcg_temp_free_i64(fp0); } else { /* OPC_RECIP1_D */ check_cp1_64bitmode(ctx); @@ -10832,7 +10391,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_recip1_d(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } } break; @@ -10845,8 +10403,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp1, ft); gen_helper_float_max_d(fp1, cpu_env, fp0, fp1); gen_store_fpr64(ctx, fp1, fd); - tcg_temp_free_i64(fp1); - tcg_temp_free_i64(fp0); } else { /* OPC_RSQRT1_D */ check_cp1_64bitmode(ctx); @@ -10856,7 +10412,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_rsqrt1_d(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } } break; @@ -10869,8 +10424,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp1, ft); gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1); gen_store_fpr64(ctx, fp1, fd); - tcg_temp_free_i64(fp1); - tcg_temp_free_i64(fp0); } else { /* OPC_RSQRT2_D */ check_cp1_64bitmode(ctx); @@ -10881,9 +10434,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_rsqrt2_d(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } } break; @@ -10918,9 +10469,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp64, fs); gen_helper_float_cvts_d(fp32, cpu_env, fp64); - tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(fp32); } break; case OPC_CVT_W_D: @@ -10935,9 +10484,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, } else { gen_helper_float_cvt_w_d(fp32, cpu_env, fp64); } - tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(fp32); } break; case OPC_CVT_L_D: @@ -10952,7 +10499,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_helper_float_cvt_l_d(fp0, cpu_env, fp0); } gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_CVT_S_W: @@ -10962,7 +10508,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_helper_float_cvts_w(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_CVT_D_W: @@ -10973,9 +10518,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp32, fs); gen_helper_float_cvtd_w(fp64, cpu_env, fp32); - tcg_temp_free_i32(fp32); gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(fp64); } break; case OPC_CVT_S_L: @@ -10986,9 +10529,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp64, fs); gen_helper_float_cvts_l(fp32, cpu_env, fp64); - tcg_temp_free_i64(fp64); gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(fp32); } break; case OPC_CVT_D_L: @@ -10999,7 +10540,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_cvtd_l(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_CVT_PS_PW: @@ -11010,7 +10550,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_cvtps_pw(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_ADD_PS: @@ -11022,9 +10561,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_add_ps(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_SUB_PS: @@ -11036,9 +10573,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_sub_ps(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_MUL_PS: @@ -11050,9 +10585,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_mul_ps(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_ABS_PS: @@ -11063,7 +10596,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_abs_ps(fp0, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_MOV_PS: @@ -11073,7 +10605,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_NEG_PS: @@ -11084,7 +10615,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_chs_ps(fp0, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_MOVCF_PS: @@ -11103,7 +10633,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); gen_set_label(l1); } break; @@ -11118,7 +10647,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); gen_set_label(l1); } } @@ -11132,9 +10660,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, ft); gen_load_fpr64(ctx, fp1, fs); gen_helper_float_addr_ps(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_MULR_PS: @@ -11146,9 +10672,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, ft); gen_load_fpr64(ctx, fp1, fs); gen_helper_float_mulr_ps(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_RECIP2_PS: @@ -11160,9 +10684,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_recip2_ps(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_RECIP1_PS: @@ -11173,7 +10695,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_recip1_ps(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_RSQRT1_PS: @@ -11184,7 +10705,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_rsqrt1_ps(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_RSQRT2_PS: @@ -11196,9 +10716,7 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_load_fpr64(ctx, fp1, ft); gen_helper_float_rsqrt2_ps(fp0, cpu_env, fp0, fp1); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_CVT_S_PU: @@ -11209,7 +10727,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32h(ctx, fp0, fs); gen_helper_float_cvts_pu(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_CVT_PW_PS: @@ -11220,7 +10737,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr64(ctx, fp0, fs); gen_helper_float_cvtpw_ps(fp0, cpu_env, fp0); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_CVT_S_PL: @@ -11231,7 +10747,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp0, fs); gen_helper_float_cvts_pl(fp0, cpu_env, fp0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_PLL_PS: @@ -11244,8 +10759,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp1, ft); gen_store_fpr32h(ctx, fp0, fd); gen_store_fpr32(ctx, fp1, fd); - tcg_temp_free_i32(fp0); - tcg_temp_free_i32(fp1); } break; case OPC_PLU_PS: @@ -11258,8 +10771,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32h(ctx, fp1, ft); gen_store_fpr32(ctx, fp1, fd); gen_store_fpr32h(ctx, fp0, fd); - tcg_temp_free_i32(fp0); - tcg_temp_free_i32(fp1); } break; case OPC_PUL_PS: @@ -11272,8 +10783,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32(ctx, fp1, ft); gen_store_fpr32(ctx, fp1, fd); gen_store_fpr32h(ctx, fp0, fd); - tcg_temp_free_i32(fp0); - tcg_temp_free_i32(fp1); } break; case OPC_PUU_PS: @@ -11286,8 +10795,6 @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, gen_load_fpr32h(ctx, fp1, ft); gen_store_fpr32(ctx, fp1, fd); gen_store_fpr32h(ctx, fp0, fd); - tcg_temp_free_i32(fp0); - tcg_temp_free_i32(fp1); } break; case OPC_CMP_F_PS: @@ -11345,7 +10852,6 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL); tcg_gen_trunc_tl_i32(fp0, t0); gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(fp0); } break; case OPC_LDXC1: @@ -11355,7 +10861,6 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, TCGv_i64 fp0 = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_LUXC1: @@ -11366,7 +10871,6 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(fp0); } break; case OPC_SWXC1: @@ -11375,7 +10879,6 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL); - tcg_temp_free_i32(fp0); } break; case OPC_SDXC1: @@ -11385,7 +10888,6 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); - tcg_temp_free_i64(fp0); } break; case OPC_SUXC1: @@ -11395,11 +10897,9 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); - tcg_temp_free_i64(fp0); } break; } - tcg_temp_free(t0); } static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, @@ -11409,7 +10909,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, case OPC_ALNV_PS: check_ps(ctx); { - TCGv t0 = tcg_temp_local_new(); + TCGv t0 = tcg_temp_new(); TCGv_i32 fp = tcg_temp_new_i32(); TCGv_i32 fph = tcg_temp_new_i32(); TCGLabel *l1 = gen_new_label(); @@ -11426,7 +10926,6 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, tcg_gen_br(l2); gen_set_label(l1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2); - tcg_temp_free(t0); if (cpu_is_bigendian(ctx)) { gen_load_fpr32(ctx, fp, fs); gen_load_fpr32h(ctx, fph, ft); @@ -11439,8 +10938,6 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_store_fpr32h(ctx, fp, fd); } gen_set_label(l2); - tcg_temp_free_i32(fp); - tcg_temp_free_i32(fph); } break; case OPC_MADD_S: @@ -11454,10 +10951,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fr); gen_helper_float_madd_s(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i32(fp0); - tcg_temp_free_i32(fp1); gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(fp2); } break; case OPC_MADD_D: @@ -11472,10 +10966,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_madd_d(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(fp2); } break; case OPC_MADD_PS: @@ -11489,10 +10980,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_madd_ps(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(fp2); } break; case OPC_MSUB_S: @@ -11506,10 +10994,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fr); gen_helper_float_msub_s(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i32(fp0); - tcg_temp_free_i32(fp1); gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(fp2); } break; case OPC_MSUB_D: @@ -11524,10 +11009,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_msub_d(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(fp2); } break; case OPC_MSUB_PS: @@ -11541,10 +11023,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_msub_ps(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(fp2); } break; case OPC_NMADD_S: @@ -11558,10 +11037,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fr); gen_helper_float_nmadd_s(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i32(fp0); - tcg_temp_free_i32(fp1); gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(fp2); } break; case OPC_NMADD_D: @@ -11576,10 +11052,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_nmadd_d(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(fp2); } break; case OPC_NMADD_PS: @@ -11593,10 +11066,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_nmadd_ps(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(fp2); } break; case OPC_NMSUB_S: @@ -11610,10 +11080,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr32(ctx, fp1, ft); gen_load_fpr32(ctx, fp2, fr); gen_helper_float_nmsub_s(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i32(fp0); - tcg_temp_free_i32(fp1); gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(fp2); } break; case OPC_NMSUB_D: @@ -11628,10 +11095,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_nmsub_d(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(fp2); } break; case OPC_NMSUB_PS: @@ -11645,10 +11109,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, gen_load_fpr64(ctx, fp1, ft); gen_load_fpr64(ctx, fp2, fr); gen_helper_float_nmsub_ps(fp2, cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(fp1); gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(fp2); } break; default: @@ -11737,7 +11198,6 @@ void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel) gen_reserved_instruction(ctx); break; } - tcg_temp_free(t0); } static inline void clear_branch_hflags(DisasContext *ctx) @@ -11796,11 +11256,9 @@ static void gen_branch(DisasContext *ctx, int insn_bytes) tcg_gen_andi_tl(t0, btarget, 0x1); tcg_gen_trunc_tl_i32(t1, t0); - tcg_temp_free(t0); tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16); tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT); tcg_gen_or_i32(hflags, hflags, t1); - tcg_temp_free_i32(t1); tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1); } else { @@ -11830,7 +11288,7 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, "\n", ctx->base.pc_next); #endif gen_reserved_instruction(ctx); - goto out; + return; } /* Load needed operands and calculate btarget */ @@ -11884,13 +11342,12 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, gen_load_gpr(tbase, rt); gen_op_addr_add(ctx, btarget, tbase, toffset); - tcg_temp_free(tbase); } break; default: MIPS_INVAL("Compact branch/jump"); gen_reserved_instruction(ctx); - goto out; + return; } if (bcond_compute == 0) { @@ -11911,7 +11368,7 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, default: MIPS_INVAL("Compact branch/jump"); gen_reserved_instruction(ctx); - goto out; + return; } /* Generating branch here as compact branches don't have delay slot */ @@ -12001,10 +11458,6 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, /* OPC_BNVC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t4, 0, fs); } - tcg_temp_free(input_overflow); - tcg_temp_free(t4); - tcg_temp_free(t3); - tcg_temp_free(t2); } else if (rs < rt && rs == 0) { /* OPC_BEQZALC, OPC_BNEZALC */ if (opc == OPC_BEQZALC) { @@ -12034,7 +11487,7 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, default: MIPS_INVAL("Compact conditional branch/jump"); gen_reserved_instruction(ctx); - goto out; + return; } /* Generating branch here as compact branches don't have delay slot */ @@ -12043,10 +11496,6 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, ctx->hflags |= MIPS_HFLAG_FBNSLOT; } - -out: - tcg_temp_free(t0); - tcg_temp_free(t1); } void gen_addiupc(DisasContext *ctx, int rx, int imm, @@ -12066,19 +11515,15 @@ void gen_addiupc(DisasContext *ctx, int rx, int imm, if (!is_64_bit) { tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]); } - - tcg_temp_free(t0); } static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base, int16_t offset) { - TCGv_i32 t0 = tcg_const_i32(op); + TCGv_i32 t0 = tcg_constant_i32(op); TCGv t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t1, base, offset); gen_helper_cache(cpu_env, t1, t0); - tcg_temp_free(t1); - tcg_temp_free_i32(t0); } static inline bool is_uhi(DisasContext *ctx, int sdbbp_code) @@ -12106,9 +11551,6 @@ void gen_ldxs(DisasContext *ctx, int base, int index, int rd) tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); gen_store_gpr(t1, rd); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_sync(int stype) @@ -12212,7 +11654,6 @@ static void gen_mips_lx(DisasContext *ctx, uint32_t opc, break; #endif } - tcg_temp_free(t0); } static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, @@ -12423,19 +11864,17 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, case OPC_PRECR_SRA_PH_W: check_dsp_r2(ctx); { - TCGv_i32 sa_t = tcg_const_i32(v2); + TCGv_i32 sa_t = tcg_constant_i32(v2); gen_helper_precr_sra_ph_w(cpu_gpr[ret], sa_t, v1_t, cpu_gpr[ret]); - tcg_temp_free_i32(sa_t); break; } case OPC_PRECR_SRA_R_PH_W: check_dsp_r2(ctx); { - TCGv_i32 sa_t = tcg_const_i32(v2); + TCGv_i32 sa_t = tcg_constant_i32(v2); gen_helper_precr_sra_r_ph_w(cpu_gpr[ret], sa_t, v1_t, cpu_gpr[ret]); - tcg_temp_free_i32(sa_t); break; } case OPC_PRECRQ_PH_W: @@ -12622,17 +12061,15 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, case OPC_PRECR_SRA_QH_PW: check_dsp_r2(ctx); { - TCGv_i32 ret_t = tcg_const_i32(ret); + TCGv_i32 ret_t = tcg_constant_i32(ret); gen_helper_precr_sra_qh_pw(v2_t, v1_t, v2_t, ret_t); - tcg_temp_free_i32(ret_t); break; } case OPC_PRECR_SRA_R_QH_PW: check_dsp_r2(ctx); { - TCGv_i32 sa_v = tcg_const_i32(ret); + TCGv_i32 sa_v = tcg_constant_i32(ret); gen_helper_precr_sra_r_qh_pw(v2_t, v1_t, v2_t, sa_v); - tcg_temp_free_i32(sa_v); break; } case OPC_PRECRQ_OB_QH: @@ -12659,9 +12096,6 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, break; #endif } - - tcg_temp_free(v1_t); - tcg_temp_free(v2_t); } static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, @@ -12901,10 +12335,6 @@ static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, break; #endif } - - tcg_temp_free(t0); - tcg_temp_free(v1_t); - tcg_temp_free(v2_t); } static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, @@ -13211,10 +12641,6 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, break; #endif } - - tcg_temp_free_i32(t0); - tcg_temp_free(v1_t); - tcg_temp_free(v2_t); } static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, @@ -13351,8 +12777,6 @@ static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, break; #endif } - tcg_temp_free(t0); - tcg_temp_free(val_t); } static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, @@ -13535,10 +12959,6 @@ static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, break; #endif } - - tcg_temp_free(t1); - tcg_temp_free(v1_t); - tcg_temp_free(v2_t); } static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx, @@ -13626,7 +13046,6 @@ static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx, break; #endif } - tcg_temp_free(t0); } static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, @@ -13843,10 +13262,6 @@ static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, break; #endif } - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(v1_t); } /* End MIPSDSP functions. */ @@ -14697,9 +14112,6 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) gen_load_gpr(t1, rs); gen_helper_insv(cpu_gpr[rt], cpu_env, t1, t0); - - tcg_temp_free(t0); - tcg_temp_free(t1); break; } default: /* Invalid */ @@ -14969,9 +14381,6 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) gen_load_gpr(t1, rs); gen_helper_dinsv(cpu_gpr[rt], cpu_env, t1, t0); - - tcg_temp_free(t0); - tcg_temp_free(t1); break; } default: /* Invalid */ @@ -15198,8 +14607,6 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) gen_load_gpr(t0, rt); gen_load_gpr(t1, rs); gen_helper_fork(t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); } break; case OPC_YIELD: @@ -15210,7 +14617,6 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) gen_load_gpr(t0, rs); gen_helper_yield(t0, cpu_env, t0); gen_store_gpr(t0, rd); - tcg_temp_free(t0); } break; default: @@ -15453,7 +14859,6 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) gen_reserved_instruction(ctx); break; } - tcg_temp_free(t0); } #endif /* !CONFIG_USER_ONLY */ break; @@ -15901,7 +15306,6 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) TCGv t0 = tcg_temp_new(); gen_load_gpr(t0, rs); tcg_gen_addi_tl(cpu_gpr[rt], t0, imm << 16); - tcg_temp_free(t0); } #else gen_reserved_instruction(ctx); @@ -16159,7 +15563,7 @@ static const TranslatorOps mips_tr_ops = { .disas_log = mips_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/mips/tcg/translate_addr_const.c b/target/mips/tcg/translate_addr_const.c index 96f483418e..a510da406c 100644 --- a/target/mips/tcg/translate_addr_const.c +++ b/target/mips/tcg/translate_addr_const.c @@ -30,10 +30,6 @@ bool gen_lsa(DisasContext *ctx, int rd, int rt, int rs, int sa) tcg_gen_shli_tl(t0, t0, sa + 1); tcg_gen_add_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - - tcg_temp_free(t1); - tcg_temp_free(t0); - return true; } @@ -54,8 +50,5 @@ bool gen_dlsa(DisasContext *ctx, int rd, int rt, int rs, int sa) gen_load_gpr(t1, rt); tcg_gen_shli_tl(t0, t0, sa + 1); tcg_gen_add_tl(cpu_gpr[rd], t0, t1); - tcg_temp_free(t1); - tcg_temp_free(t0); - return true; } diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c index 4e479c2d10..3a45a1bfea 100644 --- a/target/mips/tcg/tx79_translate.c +++ b/target/mips/tcg/tx79_translate.c @@ -138,10 +138,6 @@ static bool trans_parallel_arith(DisasContext *ctx, arg_r *a, gen_load_gpr_hi(ax, a->rs); gen_load_gpr_hi(bx, a->rt); gen_logic_i64(cpu_gpr_hi[a->rd], ax, bx); - - tcg_temp_free(bx); - tcg_temp_free(ax); - return true; } @@ -247,8 +243,8 @@ static bool trans_parallel_compare(DisasContext *ctx, arg_r *a, return true; } - c0 = tcg_const_tl(0); - c1 = tcg_const_tl(0xffffffff); + c0 = tcg_constant_tl(0); + c1 = tcg_constant_tl(0xffffffff); ax = tcg_temp_new_i64(); bx = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); @@ -273,15 +269,6 @@ static bool trans_parallel_compare(DisasContext *ctx, arg_r *a, tcg_gen_movcond_i64(cond, t2, t1, t0, c1, c0); tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], cpu_gpr_hi[a->rd], t2, wlen * i, wlen); } - - tcg_temp_free(t2); - tcg_temp_free(t1); - tcg_temp_free(t0); - tcg_temp_free(bx); - tcg_temp_free(ax); - tcg_temp_free(c1); - tcg_temp_free(c0); - return true; } @@ -362,10 +349,6 @@ static bool trans_LQ(DisasContext *ctx, arg_i *a) tcg_gen_addi_i64(addr, addr, 8); tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ); gen_store_gpr_hi(t0, a->rt); - - tcg_temp_free(t0); - tcg_temp_free(addr); - return true; } @@ -389,10 +372,6 @@ static bool trans_SQ(DisasContext *ctx, arg_i *a) tcg_gen_addi_i64(addr, addr, 8); gen_load_gpr_hi(t0, a->rt); tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ); - - tcg_temp_free(addr); - tcg_temp_free(t0); - return true; } @@ -458,11 +437,6 @@ static bool trans_PPACW(DisasContext *ctx, arg_r *a) gen_load_gpr_hi(t0, a->rs); /* a1 */ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], a0, t0, 32, 32); - - tcg_temp_free(t0); - tcg_temp_free(b0); - tcg_temp_free(a0); - return true; } @@ -506,10 +480,6 @@ static bool trans_PEXTLx(DisasContext *ctx, arg_r *a, unsigned wlen) tcg_gen_shri_i64(bx, bx, wlen); tcg_gen_shri_i64(ax, ax, wlen); } - - tcg_temp_free(bx); - tcg_temp_free(ax); - return true; } @@ -541,10 +511,6 @@ static bool trans_PEXTLW(DisasContext *ctx, arg_r *a) gen_load_gpr(ax, a->rs); gen_load_gpr(bx, a->rt); gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx); - - tcg_temp_free(bx); - tcg_temp_free(ax); - return true; } @@ -564,10 +530,6 @@ static bool trans_PEXTUW(DisasContext *ctx, arg_r *a) gen_load_gpr_hi(ax, a->rs); gen_load_gpr_hi(bx, a->rt); gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx); - - tcg_temp_free(bx); - tcg_temp_free(ax); - return true; } @@ -678,8 +640,5 @@ static bool trans_PROT3W(DisasContext *ctx, arg_r *a) tcg_gen_deposit_i64(cpu_gpr[a->rd], cpu_gpr[a->rt], ax, 0, 32); tcg_gen_rotri_i64(cpu_gpr[a->rd], cpu_gpr[a->rd], 32); - - tcg_temp_free(ax); - return true; } diff --git a/target/mips/tcg/vr54xx_translate.c b/target/mips/tcg/vr54xx_translate.c index 3e2c98f2c6..804672f84c 100644 --- a/target/mips/tcg/vr54xx_translate.c +++ b/target/mips/tcg/vr54xx_translate.c @@ -49,11 +49,7 @@ static bool trans_mult_acc(DisasContext *ctx, arg_r *a, gen_helper_mult_acc(t0, cpu_env, t0, t1); gen_store_gpr(t0, a->rd); - - tcg_temp_free(t0); - tcg_temp_free(t1); - - return false; + return true; } TRANS(MACC, trans_mult_acc, gen_helper_macc); diff --git a/target/nios2/cpu-param.h b/target/nios2/cpu-param.h index 177d720864..767bba4b7b 100644 --- a/target/nios2/cpu-param.h +++ b/target/nios2/cpu-param.h @@ -16,6 +16,5 @@ #else # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif -#define NB_MMU_MODES 2 #endif diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c index cff30823da..bc5cbf81c2 100644 --- a/target/nios2/cpu.c +++ b/target/nios2/cpu.c @@ -23,7 +23,7 @@ #include "qapi/error.h" #include "cpu.h" #include "exec/log.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "hw/qdev-properties.h" static void nios2_cpu_set_pc(CPUState *cs, vaddr value) diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h index b1a5549074..20042c4332 100644 --- a/target/nios2/cpu.h +++ b/target/nios2/cpu.h @@ -262,7 +262,6 @@ void nios2_tcg_init(void); void nios2_cpu_do_interrupt(CPUState *cs); void dump_mmu(CPUNios2State *env); void nios2_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr nios2_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); G_NORETURN void nios2_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); @@ -288,6 +287,7 @@ static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch) } #ifndef CONFIG_USER_ONLY +hwaddr nios2_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); diff --git a/target/nios2/nios2-semi.c b/target/nios2/nios2-semi.c index f76e8588c5..3738774976 100644 --- a/target/nios2/nios2-semi.c +++ b/target/nios2/nios2-semi.c @@ -23,7 +23,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/syscalls.h" +#include "gdbstub/helpers.h" #include "semihosting/syscalls.h" #include "semihosting/softmmu-uaccess.h" #include "qemu/log.h" diff --git a/target/nios2/translate.c b/target/nios2/translate.c index 7aee65a089..a548e16ed5 100644 --- a/target/nios2/translate.c +++ b/target/nios2/translate.c @@ -233,7 +233,6 @@ static void gen_jumpr(DisasContext *dc, int regno, bool is_call) tcg_gen_andi_tl(test, dest, 3); tcg_gen_brcondi_tl(TCG_COND_NE, test, 0, l); - tcg_temp_free(test); tcg_gen_mov_tl(cpu_pc, dest); if (is_call) { @@ -299,8 +298,12 @@ static void gen_ldx(DisasContext *dc, uint32_t code, uint32_t flags) TCGv data = dest_gpr(dc, instr.b); tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s); +#ifdef CONFIG_USER_ONLY + flags |= MO_UNALN; +#else + flags |= MO_ALIGN; +#endif tcg_gen_qemu_ld_tl(data, addr, dc->mem_idx, flags); - tcg_temp_free(addr); } /* Store instructions */ @@ -311,8 +314,12 @@ static void gen_stx(DisasContext *dc, uint32_t code, uint32_t flags) TCGv addr = tcg_temp_new(); tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s); +#ifdef CONFIG_USER_ONLY + flags |= MO_UNALN; +#else + flags |= MO_ALIGN; +#endif tcg_gen_qemu_st_tl(val, addr, dc->mem_idx, flags); - tcg_temp_free(addr); } /* Branch instructions */ @@ -500,7 +507,6 @@ static void eret(DisasContext *dc, uint32_t code, uint32_t flags) TCGv tmp = tcg_temp_new(); tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPUNios2State, ctrl[CR_ESTATUS])); gen_helper_eret(cpu_env, tmp, load_gpr(dc, R_EA)); - tcg_temp_free(tmp); } else { gen_helper_eret(cpu_env, load_gpr(dc, R_SSTATUS), load_gpr(dc, R_EA)); } @@ -530,7 +536,6 @@ static void bret(DisasContext *dc, uint32_t code, uint32_t flags) TCGv tmp = tcg_temp_new(); tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPUNios2State, ctrl[CR_BSTATUS])); gen_helper_eret(cpu_env, tmp, load_gpr(dc, R_BA)); - tcg_temp_free(tmp); dc->base.is_jmp = DISAS_NORETURN; #endif @@ -597,8 +602,6 @@ static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags) tcg_gen_ld_tl(t1, cpu_env, offsetof(CPUNios2State, ctrl[CR_IPENDING])); tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUNios2State, ctrl[CR_IENABLE])); tcg_gen_and_tl(dest, t1, t2); - tcg_temp_free(t1); - tcg_temp_free(t2); break; default: tcg_gen_ld_tl(dest, cpu_env, @@ -662,11 +665,9 @@ static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags) tcg_gen_ld_tl(o, cpu_env, ofs); tcg_gen_andi_tl(o, o, ro); tcg_gen_or_tl(n, n, o); - tcg_temp_free(o); } tcg_gen_st_tl(n, cpu_env, ofs); - tcg_temp_free(n); } break; } @@ -753,7 +754,6 @@ static void do_rr_mul_high(DisasContext *dc, uint32_t insn, GenFn4 *fn) fn(discard, dest_gpr(dc, instr.c), load_gpr(dc, instr.a), load_gpr(dc, instr.b)); - tcg_temp_free(discard); } #define gen_rr_mul_high(fname, insn) \ @@ -771,7 +771,6 @@ static void do_rr_shift(DisasContext *dc, uint32_t insn, GenFn3 *fn) tcg_gen_andi_tl(sh, load_gpr(dc, instr.b), 31); fn(dest_gpr(dc, instr.c), load_gpr(dc, instr.a), sh); - tcg_temp_free(sh); } #define gen_rr_shift(fname, insn) \ @@ -990,10 +989,6 @@ static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) instr = &i_type_instructions[op]; instr->handler(dc, code, instr->flags); - - if (dc->sink) { - tcg_temp_free(dc->sink); - } } static void nios2_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) @@ -1037,7 +1032,7 @@ static const TranslatorOps nios2_tr_ops = { .disas_log = nios2_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/openrisc/cpu-param.h b/target/openrisc/cpu-param.h index 73be699f36..3f08207485 100644 --- a/target/openrisc/cpu-param.h +++ b/target/openrisc/cpu-param.h @@ -12,6 +12,5 @@ #define TARGET_PAGE_BITS 13 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define NB_MMU_MODES 3 #endif diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index 4c11a1f7ad..61d748cfdc 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -22,6 +22,8 @@ #include "qemu/qemu-print.h" #include "cpu.h" #include "exec/exec-all.h" +#include "fpu/softfloat-helpers.h" +#include "tcg/tcg.h" static void openrisc_cpu_set_pc(CPUState *cs, vaddr value) { @@ -43,7 +45,8 @@ static void openrisc_cpu_synchronize_from_tb(CPUState *cs, { OpenRISCCPU *cpu = OPENRISC_CPU(cs); - cpu->env.pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + cpu->env.pc = tb->pc; } static void openrisc_restore_state_to_opc(CPUState *cs, @@ -88,6 +91,9 @@ static void openrisc_cpu_reset_hold(Object *obj) s->exception_index = -1; cpu_set_fpcsr(&cpu->env, 0); + set_float_detect_tininess(float_tininess_before_rounding, + &cpu->env.fp_status); + #ifndef CONFIG_USER_ONLY cpu->env.picmr = 0x00000000; cpu->env.picsr = 0x00000000; diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index 5f60749705..f16e8b3274 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -312,7 +312,6 @@ struct ArchCPU { void cpu_openrisc_list(void); void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void openrisc_translate_init(void); @@ -321,6 +320,8 @@ int print_insn_or1k(bfd_vma addr, disassemble_info *info); #define cpu_list cpu_openrisc_list #ifndef CONFIG_USER_ONLY +hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); + bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); diff --git a/target/openrisc/fpu_helper.c b/target/openrisc/fpu_helper.c index f9e34fa2cc..8b81d2f62f 100644 --- a/target/openrisc/fpu_helper.c +++ b/target/openrisc/fpu_helper.c @@ -20,8 +20,8 @@ #include "qemu/osdep.h" #include "cpu.h" +#include "exec/exec-all.h" #include "exec/helper-proto.h" -#include "exception.h" #include "fpu/softfloat.h" static int ieee_ex_to_openrisc(int fexcp) @@ -45,6 +45,15 @@ static int ieee_ex_to_openrisc(int fexcp) return ret; } +static G_NORETURN +void do_fpe(CPUOpenRISCState *env, uintptr_t pc) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = EXCP_FPE; + cpu_loop_exit_restore(cs, pc); +} + void HELPER(update_fpcsr)(CPUOpenRISCState *env) { int tmp = get_float_exception_flags(&env->fp_status); @@ -55,7 +64,7 @@ void HELPER(update_fpcsr)(CPUOpenRISCState *env) if (tmp) { env->fpcsr |= tmp; if (env->fpcsr & FPCSR_FPEE) { - helper_exception(env, EXCP_FPE); + do_fpe(env, GETPC()); } } } diff --git a/target/openrisc/gdbstub.c b/target/openrisc/gdbstub.c index 095bf76c12..d1074a0581 100644 --- a/target/openrisc/gdbstub.c +++ b/target/openrisc/gdbstub.c @@ -19,7 +19,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" int openrisc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c index c31c6f12c4..3887812810 100644 --- a/target/openrisc/interrupt.c +++ b/target/openrisc/interrupt.c @@ -21,7 +21,7 @@ #include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #ifndef CONFIG_USER_ONLY #include "hw/loader.h" diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c index 0b8afdbacf..603c26715e 100644 --- a/target/openrisc/mmu.c +++ b/target/openrisc/mmu.c @@ -22,7 +22,7 @@ #include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "hw/loader.h" diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c index ec145960e3..ccdee3b8be 100644 --- a/target/openrisc/sys_helper.c +++ b/target/openrisc/sys_helper.c @@ -29,17 +29,37 @@ #define TO_SPR(group, number) (((group) << 11) + (number)) +static inline bool is_user(CPUOpenRISCState *env) +{ +#ifdef CONFIG_USER_ONLY + return true; +#else + return (env->sr & SR_SM) == 0; +#endif +} + void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) { -#ifndef CONFIG_USER_ONLY OpenRISCCPU *cpu = env_archcpu(env); +#ifndef CONFIG_USER_ONLY CPUState *cs = env_cpu(env); target_ulong mr; int idx; #endif + /* Handle user accessible SPRs first. */ switch (spr) { + case TO_SPR(0, 20): /* FPCSR */ + cpu_set_fpcsr(env, rb); + return; + } + + if (is_user(env)) { + raise_exception(cpu, EXCP_ILLEGAL); + } + #ifndef CONFIG_USER_ONLY + switch (spr) { case TO_SPR(0, 11): /* EVBAR */ env->evbar = rb; break; @@ -187,27 +207,33 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) cpu_openrisc_timer_update(cpu); qemu_mutex_unlock_iothread(); break; -#endif - - case TO_SPR(0, 20): /* FPCSR */ - cpu_set_fpcsr(env, rb); - break; } +#endif } target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd, target_ulong spr) { + OpenRISCCPU *cpu = env_archcpu(env); #ifndef CONFIG_USER_ONLY uint64_t data[TARGET_INSN_START_WORDS]; MachineState *ms = MACHINE(qdev_get_machine()); - OpenRISCCPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); int idx; #endif + /* Handle user accessible SPRs first. */ switch (spr) { + case TO_SPR(0, 20): /* FPCSR */ + return env->fpcsr; + } + + if (is_user(env)) { + raise_exception(cpu, EXCP_ILLEGAL); + } + #ifndef CONFIG_USER_ONLY + switch (spr) { case TO_SPR(0, 0): /* VR */ return env->vr; @@ -324,11 +350,8 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd, cpu_openrisc_count_update(cpu); qemu_mutex_unlock_iothread(); return cpu_openrisc_count_get(cpu); -#endif - - case TO_SPR(0, 20): /* FPCSR */ - return env->fpcsr; } +#endif /* for rd is passed in, if rd unchanged, just keep it back. */ return rd; diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index 2f3d7c5fd1..43ba0cc1ad 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -206,10 +206,8 @@ static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); tcg_gen_xor_tl(t0, res, srcb); tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); - tcg_temp_free(t0); tcg_gen_mov_tl(dest, res); - tcg_temp_free(res); gen_ove_cyov(dc); } @@ -224,10 +222,8 @@ static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); tcg_gen_xor_tl(t0, res, srcb); tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); - tcg_temp_free(t0); tcg_gen_mov_tl(dest, res); - tcg_temp_free(res); gen_ove_cyov(dc); } @@ -243,7 +239,6 @@ static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb); tcg_gen_mov_tl(dest, res); - tcg_temp_free(res); gen_ove_cyov(dc); } @@ -255,7 +250,6 @@ static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb); tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1); tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0); - tcg_temp_free(t0); tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); gen_ove_ov(dc); @@ -278,7 +272,6 @@ static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) Supress the host-side exception by dividing by 1. */ tcg_gen_or_tl(t0, srcb, cpu_sr_ov); tcg_gen_div_tl(dest, srca, t0); - tcg_temp_free(t0); tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); gen_ove_ov(dc); @@ -293,7 +286,6 @@ static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) Supress the host-side exception by dividing by 1. */ tcg_gen_or_tl(t0, srcb, cpu_sr_cy); tcg_gen_divu_tl(dest, srca, t0); - tcg_temp_free(t0); gen_ove_cy(dc); } @@ -314,14 +306,11 @@ static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_muls2_i64(cpu_mac, high, t1, t2); tcg_gen_sari_i64(t1, cpu_mac, 63); tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high); - tcg_temp_free_i64(high); tcg_gen_trunc_i64_tl(cpu_sr_ov, t1); tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); gen_ove_ov(dc); } - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb) @@ -340,12 +329,9 @@ static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_mulu2_i64(cpu_mac, high, t1, t2); tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0); tcg_gen_trunc_i64_tl(cpu_sr_cy, high); - tcg_temp_free_i64(high); gen_ove_cy(dc); } - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb) @@ -362,14 +348,12 @@ static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_add_i64(cpu_mac, cpu_mac, t1); tcg_gen_xor_i64(t1, t1, cpu_mac); tcg_gen_andc_i64(t1, t1, t2); - tcg_temp_free_i64(t2); #if TARGET_LONG_BITS == 32 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); #else tcg_gen_mov_i64(cpu_sr_ov, t1); #endif - tcg_temp_free_i64(t1); gen_ove_ov(dc); } @@ -382,13 +366,11 @@ static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_extu_tl_i64(t1, srca); tcg_gen_extu_tl_i64(t2, srcb); tcg_gen_mul_i64(t1, t1, t2); - tcg_temp_free_i64(t2); /* Note that overflow is only computed during addition stage. */ tcg_gen_add_i64(cpu_mac, cpu_mac, t1); tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1); tcg_gen_trunc_i64_tl(cpu_sr_cy, t1); - tcg_temp_free_i64(t1); gen_ove_cy(dc); } @@ -407,14 +389,12 @@ static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); tcg_gen_xor_i64(t1, t1, cpu_mac); tcg_gen_and_i64(t1, t1, t2); - tcg_temp_free_i64(t2); #if TARGET_LONG_BITS == 32 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); #else tcg_gen_mov_i64(cpu_sr_ov, t1); #endif - tcg_temp_free_i64(t1); gen_ove_ov(dc); } @@ -432,8 +412,6 @@ static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1); tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); tcg_gen_trunc_i64_tl(cpu_sr_cy, t2); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t1); gen_ove_cy(dc); } @@ -672,7 +650,6 @@ static bool trans_l_lwa(DisasContext *dc, arg_load *a) tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, MO_TEUL); tcg_gen_mov_tl(cpu_lock_addr, ea); tcg_gen_mov_tl(cpu_lock_value, cpu_R(dc, a->d)); - tcg_temp_free(ea); return true; } @@ -684,7 +661,6 @@ static void do_load(DisasContext *dc, arg_load *a, MemOp mop) ea = tcg_temp_new(); tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i); tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, mop); - tcg_temp_free(ea); } static bool trans_l_lwz(DisasContext *dc, arg_load *a) @@ -734,13 +710,11 @@ static bool trans_l_swa(DisasContext *dc, arg_store *a) lab_fail = gen_new_label(); lab_done = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail); - tcg_temp_free(ea); val = tcg_temp_new(); tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value, cpu_R(dc, a->b), dc->mem_idx, MO_TEUL); tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value); - tcg_temp_free(val); tcg_gen_br(lab_done); @@ -757,7 +731,6 @@ static void do_store(DisasContext *dc, arg_store *a, MemOp mop) TCGv t0 = tcg_temp_new(); tcg_gen_addi_tl(t0, cpu_R(dc, a->a), a->i); tcg_gen_qemu_st_tl(cpu_R(dc, a->b), t0, dc->mem_idx, mop); - tcg_temp_free(t0); } static bool trans_l_sw(DisasContext *dc, arg_store *a) @@ -846,46 +819,12 @@ static bool trans_l_xori(DisasContext *dc, arg_rri *a) static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a) { + TCGv spr = tcg_temp_new(); + check_r0_write(dc, a->d); - if (is_user(dc)) { - gen_illegal_exception(dc); - } else { - TCGv spr = tcg_temp_new(); - - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - if (dc->delayed_branch) { - tcg_gen_mov_tl(cpu_pc, jmp_pc); - tcg_gen_discard_tl(jmp_pc); - } else { - tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4); - } - dc->base.is_jmp = DISAS_EXIT; - } - - tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k); - gen_helper_mfspr(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->d), spr); - tcg_temp_free(spr); - } - return true; -} - -static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a) -{ - if (is_user(dc)) { - gen_illegal_exception(dc); - } else { - TCGv spr; - - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_start(); - } - /* For SR, we will need to exit the TB to recognize the new - * exception state. For NPC, in theory this counts as a branch - * (although the SPR only exists for use by an ICE). Save all - * of the cpu state first, allowing it to be overwritten. - */ + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); if (dc->delayed_branch) { tcg_gen_mov_tl(cpu_pc, jmp_pc); tcg_gen_discard_tl(jmp_pc); @@ -893,12 +832,36 @@ static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a) tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4); } dc->base.is_jmp = DISAS_EXIT; - - spr = tcg_temp_new(); - tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k); - gen_helper_mtspr(cpu_env, spr, cpu_R(dc, a->b)); - tcg_temp_free(spr); } + + tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k); + gen_helper_mfspr(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->d), spr); + return true; +} + +static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a) +{ + TCGv spr = tcg_temp_new(); + + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } + /* + * For SR, we will need to exit the TB to recognize the new + * exception state. For NPC, in theory this counts as a branch + * (although the SPR only exists for use by an ICE). Save all + * of the cpu state first, allowing it to be overwritten. + */ + if (dc->delayed_branch) { + tcg_gen_mov_tl(cpu_pc, jmp_pc); + tcg_gen_discard_tl(jmp_pc); + } else { + tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4); + } + dc->base.is_jmp = DISAS_EXIT; + + tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k); + gen_helper_mtspr(cpu_env, spr, cpu_R(dc, a->b)); return true; } @@ -1349,8 +1312,6 @@ static bool do_dp3(DisasContext *dc, arg_dab_pair *a, load_pair(dc, t1, a->b, a->bp); fn(t0, cpu_env, t0, t1); save_pair(dc, t0, a->d, a->dp); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); gen_helper_update_fpcsr(cpu_env); return true; @@ -1372,7 +1333,6 @@ static bool do_dp2(DisasContext *dc, arg_da_pair *a, load_pair(dc, t0, a->a, a->ap); fn(t0, cpu_env, t0); save_pair(dc, t0, a->d, a->dp); - tcg_temp_free_i64(t0); gen_helper_update_fpcsr(cpu_env); return true; @@ -1399,8 +1359,6 @@ static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a, } else { fn(cpu_sr_f, cpu_env, t0, t1); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); if (inv) { tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1); @@ -1457,7 +1415,6 @@ static bool trans_lf_stod_d(DisasContext *dc, arg_lf_stod_d *a) t0 = tcg_temp_new_i64(); gen_helper_stod(t0, cpu_env, cpu_R(dc, a->a)); save_pair(dc, t0, a->d, a->dp); - tcg_temp_free_i64(t0); gen_helper_update_fpcsr(cpu_env); return true; @@ -1476,7 +1433,6 @@ static bool trans_lf_dtos_d(DisasContext *dc, arg_lf_dtos_d *a) t0 = tcg_temp_new_i64(); load_pair(dc, t0, a->a, a->ap); gen_helper_dtos(cpu_R(dc, a->d), cpu_env, t0); - tcg_temp_free_i64(t0); gen_helper_update_fpcsr(cpu_env); return true; @@ -1502,9 +1458,6 @@ static bool trans_lf_madd_d(DisasContext *dc, arg_dab_pair *a) load_pair(dc, t2, a->b, a->bp); gen_helper_float_madd_d(t0, cpu_env, t0, t1, t2); save_pair(dc, t0, a->d, a->dp); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); gen_helper_update_fpcsr(cpu_env); return true; @@ -1705,7 +1658,7 @@ static const TranslatorOps openrisc_tr_ops = { .disas_log = openrisc_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c index 912b037c63..7dbb47de64 100644 --- a/target/ppc/cpu-models.c +++ b/target/ppc/cpu-models.c @@ -732,6 +732,8 @@ "POWER9 v1.0") POWERPC_DEF("power9_v2.0", CPU_POWERPC_POWER9_DD20, POWER9, "POWER9 v2.0") + POWERPC_DEF("power9_v2.2", CPU_POWERPC_POWER9_DD22, POWER9, + "POWER9 v2.2") POWERPC_DEF("power10_v1.0", CPU_POWERPC_POWER10_DD1, POWER10, "POWER10 v1.0") POWERPC_DEF("power10_v2.0", CPU_POWERPC_POWER10_DD20, POWER10, @@ -907,7 +909,7 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { { "power8e", "power8e_v2.1" }, { "power8", "power8_v2.0" }, { "power8nvl", "power8nvl_v1.0" }, - { "power9", "power9_v2.0" }, + { "power9", "power9_v2.2" }, { "power10", "power10_v2.0" }, #endif diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h index 1326493a9a..572b5e553a 100644 --- a/target/ppc/cpu-models.h +++ b/target/ppc/cpu-models.h @@ -348,11 +348,12 @@ enum { CPU_POWERPC_POWER8NVL_BASE = 0x004C0000, CPU_POWERPC_POWER8NVL_v10 = 0x004C0100, CPU_POWERPC_POWER9_BASE = 0x004E0000, - CPU_POWERPC_POWER9_DD1 = 0x004E0100, + CPU_POWERPC_POWER9_DD1 = 0x004E1100, CPU_POWERPC_POWER9_DD20 = 0x004E1200, + CPU_POWERPC_POWER9_DD22 = 0x004E1202, CPU_POWERPC_POWER10_BASE = 0x00800000, - CPU_POWERPC_POWER10_DD1 = 0x00800100, - CPU_POWERPC_POWER10_DD20 = 0x00800200, + CPU_POWERPC_POWER10_DD1 = 0x00801100, + CPU_POWERPC_POWER10_DD20 = 0x00801200, CPU_POWERPC_970_v22 = 0x00390202, CPU_POWERPC_970FX_v10 = 0x00391100, CPU_POWERPC_970FX_v20 = 0x003C0200, diff --git a/target/ppc/cpu-param.h b/target/ppc/cpu-param.h index ea377b7d06..0a0416e0a8 100644 --- a/target/ppc/cpu-param.h +++ b/target/ppc/cpu-param.h @@ -32,6 +32,5 @@ # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif #define TARGET_PAGE_BITS 12 -#define NB_MMU_MODES 10 #endif diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h index 0fbd8b7246..9666f54f65 100644 --- a/target/ppc/cpu-qom.h +++ b/target/ppc/cpu-qom.h @@ -31,6 +31,8 @@ OBJECT_DECLARE_CPU_TYPE(PowerPCCPU, PowerPCCPUClass, POWERPC_CPU) +ObjectClass *ppc_cpu_class_by_name(const char *name); + typedef struct CPUArchState CPUPPCState; typedef struct ppc_tb_t ppc_tb_t; typedef struct ppc_dcr_t ppc_dcr_t; diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c index 1a97b41c6b..424f2e1741 100644 --- a/target/ppc/cpu.c +++ b/target/ppc/cpu.c @@ -67,6 +67,23 @@ uint32_t ppc_get_vscr(CPUPPCState *env) return env->vscr | (sat << VSCR_SAT); } +void ppc_set_cr(CPUPPCState *env, uint64_t cr) +{ + for (int i = 7; i >= 0; i--) { + env->crf[i] = cr & 0xf; + cr >>= 4; + } +} + +uint64_t ppc_get_cr(const CPUPPCState *env) +{ + uint64_t cr = 0; + for (int i = 0; i < 8; i++) { + cr |= (env->crf[i] & 0xf) << (4 * (7 - i)); + } + return cr; +} + /* GDBstub can read and write MSR... */ void ppc_store_msr(CPUPPCState *env, target_ulong value) { diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 3923f174f8..0f9f2e1a0c 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1124,7 +1124,6 @@ struct CPUArchState { /* used to speed-up TLB assist handlers */ target_ulong nip; /* next instruction pointer */ - uint64_t retxh; /* high part of 128-bit helper return */ /* when a memory exception occurs, the access type is stored here */ int access_type; @@ -1361,12 +1360,12 @@ static inline bool vhyp_cpu_in_nested(PowerPCCPU *cpu) #endif /* CONFIG_USER_ONLY */ void ppc_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int ppc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int ppc_cpu_gdb_read_register_apple(CPUState *cpu, GByteArray *buf, int reg); int ppc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); int ppc_cpu_gdb_write_register_apple(CPUState *cpu, uint8_t *buf, int reg); #ifndef CONFIG_USER_ONLY +hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu); const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name); #endif @@ -2773,6 +2772,8 @@ void dump_mmu(CPUPPCState *env); void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len); void ppc_store_vscr(CPUPPCState *env, uint32_t vscr); uint32_t ppc_get_vscr(CPUPPCState *env); +void ppc_set_cr(CPUPPCState *env, uint64_t cr); +uint64_t ppc_get_cr(const CPUPPCState *env); /*****************************************************************************/ /* Power management enable checks */ diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index abee71d407..05bf73296b 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "disas/dis-asm.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "kvm_ppc.h" #include "sysemu/cpus.h" #include "sysemu/hw_accel.h" @@ -40,7 +40,6 @@ #include "qemu/cutils.h" #include "disas/capstone.h" #include "fpu/softfloat.h" -#include "qapi/qapi-commands-machine-target.h" #include "helper_regs.h" #include "internal.h" @@ -5086,8 +5085,8 @@ static void register_book3s_altivec_sprs(CPUPPCState *env) } spr_register_kvm(env, SPR_VRSAVE, "VRSAVE", - &spr_read_generic, &spr_write_generic, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic32, + &spr_read_generic, &spr_write_generic32, KVM_REG_PPC_VRSAVE, 0x00000000); } @@ -5121,7 +5120,7 @@ static void register_book3s_207_dbg_sprs(CPUPPCState *env) spr_register_kvm_hv(env, SPR_DAWRX0, "DAWRX0", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic32, KVM_REG_PPC_DAWRX, 0x00000000); spr_register_kvm_hv(env, SPR_CIABR, "CIABR", SPR_NOACCESS, SPR_NOACCESS, @@ -5377,7 +5376,7 @@ static void register_book3s_ids_sprs(CPUPPCState *env) spr_register_hv(env, SPR_TSCR, "TSCR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic32, 0x00000000); spr_register_hv(env, SPR_HMER, "HMER", SPR_NOACCESS, SPR_NOACCESS, @@ -5407,7 +5406,7 @@ static void register_book3s_ids_sprs(CPUPPCState *env) spr_register_hv(env, SPR_MMCRC, "MMCRC", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic32, 0x00000000); spr_register_hv(env, SPR_MMCRH, "MMCRH", SPR_NOACCESS, SPR_NOACCESS, @@ -5442,7 +5441,7 @@ static void register_book3s_ids_sprs(CPUPPCState *env) spr_register_hv(env, SPR_HDSISR, "HDSISR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic32, 0x00000000); spr_register_hv(env, SPR_HRMOR, "HRMOR", SPR_NOACCESS, SPR_NOACCESS, @@ -5666,7 +5665,7 @@ static void register_power7_book4_sprs(CPUPPCState *env) KVM_REG_PPC_ACOP, 0); spr_register_kvm(env, SPR_BOOKS_PID, "PID", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic32, KVM_REG_PPC_PID, 0); #endif } @@ -5731,7 +5730,7 @@ static void register_power10_dexcr_sprs(CPUPPCState *env) { spr_register(env, SPR_DEXCR, "DEXCR", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic32, 0); spr_register(env, SPR_UDEXCR, "DEXCR", @@ -5742,7 +5741,7 @@ static void register_power10_dexcr_sprs(CPUPPCState *env) spr_register_hv(env, SPR_HDEXCR, "HDEXCR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic32, 0); spr_register(env, SPR_UHDEXCR, "HDEXCR", @@ -6285,9 +6284,26 @@ static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr, bool best) return false; } - if ((pvr & 0x0f00) == (pcc->pvr & 0x0f00)) { - /* Major DD version matches to power9_v1.0 and power9_v2.0 */ + if ((pvr & 0x0f00) != (pcc->pvr & 0x0f00)) { + /* Major DD version does not match */ + return false; + } + + if ((pvr & 0x0f00) == 0x100) { + /* DD1.x always matches power9_v1.0 */ return true; + } else if ((pvr & 0x0f00) == 0x200) { + if ((pvr & 0xf) < 2) { + /* DD2.0, DD2.1 match power9_v2.0 */ + if ((pcc->pvr & 0xf) == 0) { + return true; + } + } else { + /* DD2.2, DD2.3 match power9_v2.2 */ + if ((pcc->pvr & 0xf) == 2) { + return true; + } + } } return false; @@ -6841,7 +6857,7 @@ static const char *ppc_cpu_lookup_alias(const char *alias) return NULL; } -static ObjectClass *ppc_cpu_class_by_name(const char *name) +ObjectClass *ppc_cpu_class_by_name(const char *name) { char *cpu_model, *typename; ObjectClass *oc; @@ -6981,51 +6997,6 @@ void ppc_cpu_list(void) #endif } -static void ppc_cpu_defs_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CpuDefinitionInfoList **first = user_data; - const char *typename; - CpuDefinitionInfo *info; - - typename = object_class_get_name(oc); - info = g_malloc0(sizeof(*info)); - info->name = g_strndup(typename, - strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX)); - - QAPI_LIST_PREPEND(*first, info); -} - -CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) -{ - CpuDefinitionInfoList *cpu_list = NULL; - GSList *list; - int i; - - list = object_class_get_list(TYPE_POWERPC_CPU, false); - g_slist_foreach(list, ppc_cpu_defs_entry, &cpu_list); - g_slist_free(list); - - for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) { - PowerPCCPUAlias *alias = &ppc_cpu_aliases[i]; - ObjectClass *oc; - CpuDefinitionInfo *info; - - oc = ppc_cpu_class_by_name(alias->model); - if (oc == NULL) { - continue; - } - - info = g_malloc0(sizeof(*info)); - info->name = g_strdup(alias->alias); - info->q_typename = g_strdup(object_class_get_name(oc)); - - QAPI_LIST_PREPEND(cpu_list, info); - } - - return cpu_list; -} - static void ppc_cpu_set_pc(CPUState *cs, vaddr value) { PowerPCCPU *cpu = POWERPC_CPU(cs); diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c index cc024316d5..5967ea07a9 100644 --- a/target/ppc/dfp_helper.c +++ b/target/ppc/dfp_helper.c @@ -121,7 +121,7 @@ static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc, case 3: /* use FPSCR rounding mode */ return; default: - assert(0); /* cannot get here */ + g_assert_not_reached(); } } else { /* r == 1 */ switch (rmc & 3) { @@ -138,7 +138,7 @@ static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc, rnd = DEC_ROUND_HALF_DOWN; break; default: - assert(0); /* cannot get here */ + g_assert_not_reached(); } } decContextSetRounding(&dfp->context, rnd); diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 287659c74d..fea9221501 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -1431,13 +1431,16 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) break; } case POWERPC_EXCP_ALIGN: /* Alignment exception */ - /* Get rS/rD and rA from faulting opcode */ - /* - * Note: the opcode fields will not be set properly for a - * direct store load/store, but nobody cares as nobody - * actually uses direct store segments. - */ - env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; + /* Optional DSISR update was removed from ISA v3.0 */ + if (!(env->insns_flags2 & PPC2_ISA300)) { + /* Get rS/rD and rA from faulting opcode */ + /* + * Note: the opcode fields will not be set properly for a + * direct store load/store, but nobody cares as nobody + * actually uses direct store segments. + */ + env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; + } break; case POWERPC_EXCP_PROGRAM: /* Program exception */ switch (env->error_code & ~0xF) { @@ -2622,7 +2625,7 @@ void helper_scv(CPUPPCState *env, uint32_t lev) } } -void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) +void helper_pminsn(CPUPPCState *env, uint32_t insn) { CPUState *cs; diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c index a66e16c212..03150a0f10 100644 --- a/target/ppc/fpu_helper.c +++ b/target/ppc/fpu_helper.c @@ -141,62 +141,28 @@ static inline int ppc_float64_get_unbiased_exp(float64 f) return ((f >> 52) & 0x7FF) - 1023; } -/* Classify a floating-point number. */ -enum { - is_normal = 1, - is_zero = 2, - is_denormal = 4, - is_inf = 8, - is_qnan = 16, - is_snan = 32, - is_neg = 64, -}; - -#define COMPUTE_CLASS(tp) \ -static int tp##_classify(tp arg) \ -{ \ - int ret = tp##_is_neg(arg) * is_neg; \ - if (unlikely(tp##_is_any_nan(arg))) { \ - float_status dummy = { }; /* snan_bit_is_one = 0 */ \ - ret |= (tp##_is_signaling_nan(arg, &dummy) \ - ? is_snan : is_qnan); \ - } else if (unlikely(tp##_is_infinity(arg))) { \ - ret |= is_inf; \ - } else if (tp##_is_zero(arg)) { \ - ret |= is_zero; \ - } else if (tp##_is_zero_or_denormal(arg)) { \ - ret |= is_denormal; \ - } else { \ - ret |= is_normal; \ - } \ - return ret; \ -} - -COMPUTE_CLASS(float16) -COMPUTE_CLASS(float32) -COMPUTE_CLASS(float64) -COMPUTE_CLASS(float128) - -static void set_fprf_from_class(CPUPPCState *env, int class) -{ - static const uint8_t fprf[6][2] = { - { 0x04, 0x08 }, /* normalized */ - { 0x02, 0x12 }, /* zero */ - { 0x14, 0x18 }, /* denormalized */ - { 0x05, 0x09 }, /* infinity */ - { 0x11, 0x11 }, /* qnan */ - { 0x00, 0x00 }, /* snan -- flags are undefined */ - }; - bool isneg = class & is_neg; - - env->fpscr &= ~FP_FPRF; - env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF; -} - -#define COMPUTE_FPRF(tp) \ -void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \ -{ \ - set_fprf_from_class(env, tp##_classify(arg)); \ +#define COMPUTE_FPRF(tp) \ +void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \ +{ \ + bool neg = tp##_is_neg(arg); \ + target_ulong fprf; \ + if (likely(tp##_is_normal(arg))) { \ + fprf = neg ? 0x08 << FPSCR_FPRF : 0x04 << FPSCR_FPRF; \ + } else if (tp##_is_zero(arg)) { \ + fprf = neg ? 0x12 << FPSCR_FPRF : 0x02 << FPSCR_FPRF; \ + } else if (tp##_is_zero_or_denormal(arg)) { \ + fprf = neg ? 0x18 << FPSCR_FPRF : 0x14 << FPSCR_FPRF; \ + } else if (tp##_is_infinity(arg)) { \ + fprf = neg ? 0x09 << FPSCR_FPRF : 0x05 << FPSCR_FPRF; \ + } else { \ + float_status dummy = { }; /* snan_bit_is_one = 0 */ \ + if (tp##_is_signaling_nan(arg, &dummy)) { \ + fprf = 0x00 << FPSCR_FPRF; \ + } else { \ + fprf = 0x11 << FPSCR_FPRF; \ + } \ + } \ + env->fpscr = (env->fpscr & ~FP_FPRF) | fprf; \ } COMPUTE_FPRF(float16) diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c index 1a0b9ca82c..63c9abe4f1 100644 --- a/target/ppc/gdbstub.c +++ b/target/ppc/gdbstub.c @@ -20,6 +20,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "internal.h" static int ppc_gdb_register_len_apple(int n) @@ -144,11 +145,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n) break; case 66: { - uint32_t cr = 0; - int i; - for (i = 0; i < 8; i++) { - cr |= env->crf[i] << (32 - ((i + 1) * 4)); - } + uint32_t cr = ppc_get_cr(env); gdb_get_reg32(buf, cr); break; } @@ -202,11 +199,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, GByteArray *buf, int n) break; case 66 + 32: { - uint32_t cr = 0; - int i; - for (i = 0; i < 8; i++) { - cr |= env->crf[i] << (32 - ((i + 1) * 4)); - } + uint32_t cr = ppc_get_cr(env); gdb_get_reg32(buf, cr); break; } @@ -256,10 +249,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) case 66: { uint32_t cr = ldl_p(mem_buf); - int i; - for (i = 0; i < 8; i++) { - env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF; - } + ppc_set_cr(env, cr); break; } case 67: @@ -306,10 +296,7 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n) case 66 + 32: { uint32_t cr = ldl_p(mem_buf); - int i; - for (i = 0; i < 8; i++) { - env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF; - } + ppc_set_cr(env, cr); break; } case 67 + 32: diff --git a/target/ppc/helper.h b/target/ppc/helper.h index 0beaca5c7a..38efbc351c 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -810,12 +810,3 @@ DEF_HELPER_4(DSCLIQ, void, env, fprp, fprp, i32) DEF_HELPER_1(tbegin, void, env) DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env) - -#ifdef TARGET_PPC64 -DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32) -DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32) -DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG, - void, env, tl, i64, i64, i32) -DEF_HELPER_FLAGS_5(stq_be_parallel, TCG_CALL_NO_WG, - void, env, tl, i64, i64, i32) -#endif diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index 779e7db513..fb351c303f 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -448,7 +448,7 @@ void register_non_embedded_sprs(CPUPPCState *env) /* Exception processing */ spr_register_kvm(env, SPR_DSISR, "DSISR", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic32, KVM_REG_PPC_DSISR, 0x00000000); spr_register_kvm(env, SPR_DAR, "DAR", SPR_NOACCESS, SPR_NOACCESS, diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index f8f589e9fd..4fcf3af8d0 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -390,13 +390,19 @@ SETNBCR 011111 ..... ..... ----- 0111100000 - @X_bi ### Move To/From FPSCR -MFFS 111111 ..... 00000 ----- 1001000111 . @X_t_rc -MFFSCE 111111 ..... 00001 ----- 1001000111 - @X_t -MFFSCRN 111111 ..... 10110 ..... 1001000111 - @X_tb -MFFSCDRN 111111 ..... 10100 ..... 1001000111 - @X_tb -MFFSCRNI 111111 ..... 10111 ---.. 1001000111 - @X_imm2 -MFFSCDRNI 111111 ..... 10101 --... 1001000111 - @X_imm3 -MFFSL 111111 ..... 11000 ----- 1001000111 - @X_t +{ + # Before Power ISA v3.0, MFFS bits 11~15 were reserved and should be ignored + MFFS_ISA207 111111 ..... ----- ----- 1001000111 . @X_t_rc + [ + MFFS 111111 ..... 00000 ----- 1001000111 . @X_t_rc + MFFSCE 111111 ..... 00001 ----- 1001000111 - @X_t + MFFSCRN 111111 ..... 10110 ..... 1001000111 - @X_tb + MFFSCDRN 111111 ..... 10100 ..... 1001000111 - @X_tb + MFFSCRNI 111111 ..... 10111 ---.. 1001000111 - @X_imm2 + MFFSCDRNI 111111 ..... 10101 --... 1001000111 - @X_imm3 + MFFSL 111111 ..... 11000 ----- 1001000111 - @X_t + ] +} ### Decimal Floating-Point Arithmetic Instructions diff --git a/target/ppc/internal.h b/target/ppc/internal.h index 337a362205..901bae6d39 100644 --- a/target/ppc/internal.h +++ b/target/ppc/internal.h @@ -242,9 +242,12 @@ static inline int prot_for_access_type(MMUAccessType access_type) g_assert_not_reached(); } +#ifndef CONFIG_USER_ONLY + /* PowerPC MMU emulation */ typedef struct mmu_ctx_t mmu_ctx_t; + bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, bool guest_visible); @@ -266,6 +269,8 @@ struct mmu_ctx_t { int nx; /* Non-execute area */ }; +#endif /* !CONFIG_USER_ONLY */ + /* Common routines used by software and hardware TLBs emulation */ static inline int pte_is_valid(target_ulong pte0) { diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 7c25348b7b..a7f2de9d10 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -32,7 +32,6 @@ #include "sysemu/device_tree.h" #include "mmu-hash64.h" -#include "hw/sysbus.h" #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_cpu_core.h" #include "hw/hw.h" @@ -89,6 +88,7 @@ static int cap_ppc_nested_kvm_hv; static int cap_large_decr; static int cap_fwnmi; static int cap_rpt_invalidate; +static int cap_ail_mode_3; static uint32_t debug_inst_opcode; @@ -153,6 +153,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } cap_rpt_invalidate = kvm_vm_check_extension(s, KVM_CAP_PPC_RPT_INVALIDATE); + cap_ail_mode_3 = kvm_vm_check_extension(s, KVM_CAP_PPC_AIL_MODE_3); kvm_ppc_register_host_cpu_type(); return 0; @@ -928,10 +929,7 @@ int kvm_arch_put_registers(CPUState *cs, int level) regs.gpr[i] = env->gpr[i]; } - regs.cr = 0; - for (i = 0; i < 8; i++) { - regs.cr |= (env->crf[i] & 15) << (4 * (7 - i)); - } + regs.cr = ppc_get_cr(env); ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); if (ret < 0) { @@ -1206,7 +1204,6 @@ int kvm_arch_get_registers(CPUState *cs) PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; struct kvm_regs regs; - uint32_t cr; int i, ret; ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); @@ -1214,12 +1211,7 @@ int kvm_arch_get_registers(CPUState *cs) return ret; } - cr = regs.cr; - for (i = 7; i >= 0; i--) { - env->crf[i] = cr & 15; - cr >>= 4; - } - + ppc_set_cr(env, regs.cr); env->ctr = regs.ctr; env->lr = regs.lr; cpu_write_xer(env, regs.xer); @@ -2570,6 +2562,11 @@ int kvmppc_has_cap_rpt_invalidate(void) return cap_rpt_invalidate; } +bool kvmppc_supports_ail_3(void) +{ + return cap_ail_mode_3; +} + PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) { uint32_t host_pvr = mfpvr(); diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h index 5fd9753953..611debc3ce 100644 --- a/target/ppc/kvm_ppc.h +++ b/target/ppc/kvm_ppc.h @@ -76,6 +76,7 @@ int kvmppc_set_cap_nested_kvm_hv(int enable); int kvmppc_get_cap_large_decr(void); int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable); int kvmppc_has_cap_rpt_invalidate(void); +bool kvmppc_supports_ail_3(void); int kvmppc_enable_hwrng(void); int kvmppc_put_books_sregs(PowerPCCPU *cpu); PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void); @@ -396,6 +397,11 @@ static inline int kvmppc_has_cap_rpt_invalidate(void) return false; } +static inline bool kvmppc_supports_ail_3(void) +{ + return false; +} + static inline int kvmppc_enable_hwrng(void) { return -1; diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index 1578887a8f..46eae65819 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -367,54 +367,6 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, return i; } -#ifdef TARGET_PPC64 -uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, - uint32_t opidx) -{ - Int128 ret; - - /* We will have raised EXCP_ATOMIC from the translator. */ - assert(HAVE_ATOMIC128); - ret = cpu_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); - env->retxh = int128_gethi(ret); - return int128_getlo(ret); -} - -uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr, - uint32_t opidx) -{ - Int128 ret; - - /* We will have raised EXCP_ATOMIC from the translator. */ - assert(HAVE_ATOMIC128); - ret = cpu_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); - env->retxh = int128_gethi(ret); - return int128_getlo(ret); -} - -void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr, - uint64_t lo, uint64_t hi, uint32_t opidx) -{ - Int128 val; - - /* We will have raised EXCP_ATOMIC from the translator. */ - assert(HAVE_ATOMIC128); - val = int128_make128(lo, hi); - cpu_atomic_sto_le_mmu(env, addr, val, opidx, GETPC()); -} - -void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr, - uint64_t lo, uint64_t hi, uint32_t opidx) -{ - Int128 val; - - /* We will have raised EXCP_ATOMIC from the translator. */ - assert(HAVE_ATOMIC128); - val = int128_make128(lo, hi); - cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); -} -#endif - /*****************************************************************************/ /* Altivec extension helpers */ #if HOST_BIG_ENDIAN diff --git a/target/ppc/meson.build b/target/ppc/meson.build index 79beaff147..7929de8360 100644 --- a/target/ppc/meson.build +++ b/target/ppc/meson.build @@ -39,7 +39,7 @@ ppc_softmmu_ss.add(files( 'machine.c', 'mmu-hash32.c', 'mmu_common.c', - 'monitor.c', + 'ppc-qmp-cmds.c', )) ppc_softmmu_ss.add(when: 'CONFIG_TCG', if_true: files( 'mmu_helper.c', diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c index a9bc1522e2..40ddc5c08c 100644 --- a/target/ppc/misc_helper.c +++ b/target/ppc/misc_helper.c @@ -190,13 +190,13 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val) void helper_store_pidr(CPUPPCState *env, target_ulong val) { - env->spr[SPR_BOOKS_PID] = val; + env->spr[SPR_BOOKS_PID] = (uint32_t)val; tlb_flush(env_cpu(env)); } void helper_store_lpidr(CPUPPCState *env, target_ulong val) { - env->spr[SPR_LPIDR] = val; + env->spr[SPR_LPIDR] = (uint32_t)val; /* * We need to flush the TLB on LPID changes as we only tag HV vs diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc index c3cc919ee4..d900e13cad 100644 --- a/target/ppc/power8-pmu-regs.c.inc +++ b/target/ppc/power8-pmu-regs.c.inc @@ -58,8 +58,6 @@ static bool spr_groupA_write_allowed(DisasContext *ctx) /* * Helper function to avoid code repetition between MMCR0 and * MMCR2 problem state write functions. - * - * 'ret' must be tcg_temp_freed() by the caller. */ static TCGv masked_gprn_for_spr_write(int gprn, int sprn, uint64_t spr_mask) @@ -77,8 +75,6 @@ static TCGv masked_gprn_for_spr_write(int gprn, int sprn, /* Add the masked gprn bits into 'ret' */ tcg_gen_or_tl(ret, ret, t0); - tcg_temp_free(t0); - return ret; } @@ -100,8 +96,6 @@ void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn) gen_load_spr(t0, SPR_POWER_MMCR0); tcg_gen_andi_tl(t0, t0, MMCR0_UREG_MASK); tcg_gen_mov_tl(cpu_gpr[gprn], t0); - - tcg_temp_free(t0); } static void write_MMCR0_common(DisasContext *ctx, TCGv val) @@ -137,8 +131,6 @@ void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn) masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR0, MMCR0_UREG_MASK); write_MMCR0_common(ctx, masked_gprn); - - tcg_temp_free(masked_gprn); } void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn) @@ -164,8 +156,6 @@ void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn) gen_load_spr(t0, SPR_POWER_MMCR2); tcg_gen_andi_tl(t0, t0, MMCR2_UREG_MASK); tcg_gen_mov_tl(cpu_gpr[gprn], t0); - - tcg_temp_free(t0); } void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) @@ -183,18 +173,14 @@ void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR2, MMCR2_UREG_MASK); gen_store_spr(SPR_POWER_MMCR2, masked_gprn); - - tcg_temp_free(masked_gprn); } void spr_read_PMC(DisasContext *ctx, int gprn, int sprn) { - TCGv_i32 t_sprn = tcg_const_i32(sprn); + TCGv_i32 t_sprn = tcg_constant_i32(sprn); gen_icount_io_start(ctx); gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn); - - tcg_temp_free_i32(t_sprn); } void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn) @@ -224,12 +210,10 @@ void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn) void spr_write_PMC(DisasContext *ctx, int sprn, int gprn) { - TCGv_i32 t_sprn = tcg_const_i32(sprn); + TCGv_i32 t_sprn = tcg_constant_i32(sprn); gen_icount_io_start(ctx); gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]); - - tcg_temp_free_i32(t_sprn); } void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn) diff --git a/target/ppc/power8-pmu.c b/target/ppc/power8-pmu.c index 1381072b9e..64a64865d7 100644 --- a/target/ppc/power8-pmu.c +++ b/target/ppc/power8-pmu.c @@ -272,7 +272,7 @@ void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value) { pmu_update_cycles(env); - env->spr[sprn] = value; + env->spr[sprn] = (uint32_t)value; pmc_update_overflow_timer(env, sprn); } diff --git a/target/ppc/monitor.c b/target/ppc/ppc-qmp-cmds.c similarity index 77% rename from target/ppc/monitor.c rename to target/ppc/ppc-qmp-cmds.c index 8250b1304e..f9acc21056 100644 --- a/target/ppc/monitor.c +++ b/target/ppc/ppc-qmp-cmds.c @@ -1,5 +1,5 @@ /* - * QEMU monitor + * QEMU PPC (monitor definitions) * * Copyright (c) 2003-2004 Fabrice Bellard * @@ -28,18 +28,17 @@ #include "qemu/ctype.h" #include "monitor/hmp-target.h" #include "monitor/hmp.h" +#include "qapi/qapi-commands-machine-target.h" +#include "cpu-models.h" +#include "cpu-qom.h" static target_long monitor_get_ccr(Monitor *mon, const struct MonitorDef *md, int val) { CPUArchState *env = mon_get_cpu_env(mon); unsigned int u; - int i; - u = 0; - for (i = 0; i < 8; i++) { - u |= env->crf[i] << (32 - (4 * (i + 1))); - } + u = ppc_get_cr(env); return u; } @@ -172,3 +171,48 @@ int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval) return -EINVAL; } + +static void ppc_cpu_defs_entry(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CpuDefinitionInfoList **first = user_data; + const char *typename; + CpuDefinitionInfo *info; + + typename = object_class_get_name(oc); + info = g_malloc0(sizeof(*info)); + info->name = g_strndup(typename, + strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX)); + + QAPI_LIST_PREPEND(*first, info); +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + CpuDefinitionInfoList *cpu_list = NULL; + GSList *list; + int i; + + list = object_class_get_list(TYPE_POWERPC_CPU, false); + g_slist_foreach(list, ppc_cpu_defs_entry, &cpu_list); + g_slist_free(list); + + for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) { + PowerPCCPUAlias *alias = &ppc_cpu_aliases[i]; + ObjectClass *oc; + CpuDefinitionInfo *info; + + oc = ppc_cpu_class_by_name(alias->model); + if (oc == NULL) { + continue; + } + + info = g_malloc0(sizeof(*info)); + info->name = g_strdup(alias->alias); + info->q_typename = g_strdup(object_class_get_name(oc)); + + QAPI_LIST_PREPEND(cpu_list, info); + } + + return cpu_list; +} diff --git a/target/ppc/spr_common.h b/target/ppc/spr_common.h index 8437eb0340..4c0f2bed77 100644 --- a/target/ppc/spr_common.h +++ b/target/ppc/spr_common.h @@ -81,6 +81,7 @@ void _spr_register(CPUPPCState *env, int num, const char *name, void spr_noaccess(DisasContext *ctx, int gprn, int sprn); void spr_read_generic(DisasContext *ctx, int gprn, int sprn); void spr_write_generic(DisasContext *ctx, int sprn, int gprn); +void spr_write_generic32(DisasContext *ctx, int sprn, int gprn); void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn); void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn); void spr_write_PMC(DisasContext *ctx, int sprn, int gprn); @@ -109,7 +110,6 @@ void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn); void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn); #ifndef CONFIG_USER_ONLY -void spr_write_generic32(DisasContext *ctx, int sprn, int gprn); void spr_write_clear(DisasContext *ctx, int sprn, int gprn); void spr_access_nop(DisasContext *ctx, int sprn, int gprn); void spr_read_decr(DisasContext *ctx, int gprn, int sprn); diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 1c17d5a558..9b7884586c 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -263,11 +263,9 @@ static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) * faulting instruction */ gen_update_nip(ctx, ctx->cia); - t0 = tcg_const_i32(excp); - t1 = tcg_const_i32(error); + t0 = tcg_constant_i32(excp); + t1 = tcg_constant_i32(error); gen_helper_raise_exception_err(cpu_env, t0, t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); ctx->base.is_jmp = DISAS_NORETURN; } @@ -280,9 +278,8 @@ static void gen_exception(DisasContext *ctx, uint32_t excp) * faulting instruction */ gen_update_nip(ctx, ctx->cia); - t0 = tcg_const_i32(excp); + t0 = tcg_constant_i32(excp); gen_helper_raise_exception(cpu_env, t0); - tcg_temp_free_i32(t0); ctx->base.is_jmp = DISAS_NORETURN; } @@ -292,9 +289,8 @@ static void gen_exception_nip(DisasContext *ctx, uint32_t excp, TCGv_i32 t0; gen_update_nip(ctx, nip); - t0 = tcg_const_i32(excp); + t0 = tcg_constant_i32(excp); gen_helper_raise_exception(cpu_env, t0); - tcg_temp_free_i32(t0); ctx->base.is_jmp = DISAS_NORETURN; } @@ -341,7 +337,6 @@ static uint32_t gen_prep_dbgex(DisasContext *ctx) gen_load_spr(t0, SPR_BOOKE_DBSR); tcg_gen_ori_tl(t0, t0, dbsr); gen_store_spr(SPR_BOOKE_DBSR, t0); - tcg_temp_free(t0); return POWERPC_EXCP_DEBUG; } else { return POWERPC_EXCP_TRACE; @@ -391,9 +386,8 @@ void spr_noaccess(DisasContext *ctx, int gprn, int sprn) static void spr_load_dump_spr(int sprn) { #ifdef PPC_DUMP_SPR_ACCESSES - TCGv_i32 t0 = tcg_const_i32(sprn); + TCGv_i32 t0 = tcg_constant_i32(sprn); gen_helper_load_dump_spr(cpu_env, t0); - tcg_temp_free_i32(t0); #endif } @@ -406,9 +400,8 @@ void spr_read_generic(DisasContext *ctx, int gprn, int sprn) static void spr_store_dump_spr(int sprn) { #ifdef PPC_DUMP_SPR_ACCESSES - TCGv_i32 t0 = tcg_const_i32(sprn); + TCGv_i32 t0 = tcg_constant_i32(sprn); gen_helper_store_dump_spr(cpu_env, t0); - tcg_temp_free_i32(t0); #endif } @@ -418,9 +411,21 @@ void spr_write_generic(DisasContext *ctx, int sprn, int gprn) spr_store_dump_spr(sprn); } +void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) +{ +#ifdef TARGET_PPC64 + TCGv t0 = tcg_temp_new(); + tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]); + gen_store_spr(sprn, t0); + spr_store_dump_spr(sprn); +#else + spr_write_generic(ctx, sprn, gprn); +#endif +} + void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn) { - spr_write_generic(ctx, sprn, gprn); + spr_write_generic32(ctx, sprn, gprn); /* * SPR_CTRL writes must force a new translation block, @@ -431,19 +436,6 @@ void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn) } #if !defined(CONFIG_USER_ONLY) -void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) -{ -#ifdef TARGET_PPC64 - TCGv t0 = tcg_temp_new(); - tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]); - gen_store_spr(sprn, t0); - tcg_temp_free(t0); - spr_store_dump_spr(sprn); -#else - spr_write_generic(ctx, sprn, gprn); -#endif -} - void spr_write_clear(DisasContext *ctx, int sprn, int gprn) { TCGv t0 = tcg_temp_new(); @@ -452,8 +444,6 @@ void spr_write_clear(DisasContext *ctx, int sprn, int gprn) tcg_gen_neg_tl(t1, cpu_gpr[gprn]); tcg_gen_and_tl(t0, t0, t1); gen_store_spr(sprn, t0); - tcg_temp_free(t0); - tcg_temp_free(t1); } void spr_access_nop(DisasContext *ctx, int sprn, int gprn) @@ -483,9 +473,6 @@ void spr_read_xer(DisasContext *ctx, int gprn, int sprn) tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32); tcg_gen_or_tl(dst, dst, t0); } - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } void spr_write_xer(DisasContext *ctx, int sprn, int gprn) @@ -685,30 +672,26 @@ void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn) void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) { - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); + TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2); gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn) { - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4); + TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4); gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn) { - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2); + TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2); gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn) { - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4); + TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4); gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } /* DBAT0U...DBAT7U */ @@ -729,30 +712,26 @@ void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn) void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) { - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2); + TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2); gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn) { - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4); + TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4); gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn) { - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2); + TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2); gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn) { - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4); + TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4); gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } /* SDR1 */ @@ -784,7 +763,6 @@ void spr_write_hior(DisasContext *ctx, int sprn, int gprn) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL); tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); - tcg_temp_free(t0); } void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn) { @@ -855,7 +833,6 @@ void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF); gen_helper_store_40x_pid(cpu_env, t0); - tcg_temp_free(t0); } void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn) @@ -878,7 +855,6 @@ void spr_write_pir(DisasContext *ctx, int sprn, int gprn) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF); gen_store_spr(SPR_PIR, t0); - tcg_temp_free(t0); } #endif @@ -888,7 +864,6 @@ void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn) TCGv_i32 t0 = tcg_temp_new_i32(); tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0); - tcg_temp_free_i32(t0); } void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) @@ -896,7 +871,6 @@ void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) TCGv_i32 t0 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]); tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); - tcg_temp_free_i32(t0); } #if !defined(CONFIG_USER_ONLY) @@ -908,7 +882,6 @@ void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn) tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); gen_store_spr(sprn, t0); - tcg_temp_free(t0); } void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) @@ -933,7 +906,6 @@ void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs])); gen_store_spr(sprn, t0); - tcg_temp_free(t0); } #endif @@ -968,10 +940,6 @@ void spr_write_amr(DisasContext *ctx, int sprn, int gprn) tcg_gen_or_tl(t0, t0, t2); gen_store_spr(SPR_AMR, t0); spr_store_dump_spr(SPR_AMR); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) @@ -999,10 +967,6 @@ void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) tcg_gen_or_tl(t0, t0, t2); gen_store_spr(SPR_UAMOR, t0); spr_store_dump_spr(SPR_UAMOR); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) @@ -1030,10 +994,6 @@ void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) tcg_gen_or_tl(t0, t0, t2); gen_store_spr(SPR_IAMR, t0); spr_store_dump_spr(SPR_IAMR); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } #endif #endif @@ -1054,7 +1014,6 @@ void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn) tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE); gen_store_spr(sprn, t0); - tcg_temp_free(t0); } void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) @@ -1063,7 +1022,6 @@ void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE); gen_store_spr(sprn, t0); - tcg_temp_free(t0); } void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn) @@ -1073,7 +1031,6 @@ void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn) tcg_gen_andi_tl(t0, cpu_gpr[gprn], ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC)); gen_store_spr(sprn, t0); - tcg_temp_free(t0); } void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) @@ -1083,14 +1040,15 @@ void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn) { - TCGv_i32 t0 = tcg_const_i32(sprn); + TCGv_i32 t0 = tcg_constant_i32(sprn); gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } + void spr_write_eplc(DisasContext *ctx, int sprn, int gprn) { gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]); } + void spr_write_epsc(DisasContext *ctx, int sprn, int gprn) { gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]); @@ -1106,7 +1064,6 @@ void spr_write_mas73(DisasContext *ctx, int sprn, int gprn) gen_store_spr(SPR_BOOKE_MAS3, val); tcg_gen_shri_tl(val, cpu_gpr[gprn], 32); gen_store_spr(SPR_BOOKE_MAS7, val); - tcg_temp_free(val); } void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) @@ -1117,8 +1074,6 @@ void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) tcg_gen_shli_tl(mas7, mas7, 32); gen_load_spr(mas3, SPR_BOOKE_MAS3); tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7); - tcg_temp_free(mas3); - tcg_temp_free(mas7); } #endif @@ -1127,29 +1082,21 @@ void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, int bit, int sprn, int cause) { - TCGv_i32 t1 = tcg_const_i32(bit); - TCGv_i32 t2 = tcg_const_i32(sprn); - TCGv_i32 t3 = tcg_const_i32(cause); + TCGv_i32 t1 = tcg_constant_i32(bit); + TCGv_i32 t2 = tcg_constant_i32(sprn); + TCGv_i32 t3 = tcg_constant_i32(cause); gen_helper_fscr_facility_check(cpu_env, t1, t2, t3); - - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, int bit, int sprn, int cause) { - TCGv_i32 t1 = tcg_const_i32(bit); - TCGv_i32 t2 = tcg_const_i32(sprn); - TCGv_i32 t3 = tcg_const_i32(cause); + TCGv_i32 t1 = tcg_constant_i32(bit); + TCGv_i32 t2 = tcg_constant_i32(sprn); + TCGv_i32 t3 = tcg_constant_i32(cause); gen_helper_msr_facility_check(cpu_env, t1, t2, t3); - - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) @@ -1160,9 +1107,6 @@ void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) gen_load_spr(spr, sprn - 1); tcg_gen_shri_tl(spr_up, spr, 32); tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up); - - tcg_temp_free(spr); - tcg_temp_free(spr_up); } void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) @@ -1172,8 +1116,6 @@ void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) gen_load_spr(spr, sprn - 1); tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32); gen_store_spr(sprn - 1, spr); - - tcg_temp_free(spr); } #if !defined(CONFIG_USER_ONLY) @@ -1185,7 +1127,6 @@ void spr_write_hmer(DisasContext *ctx, int sprn, int gprn) tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer); gen_store_spr(sprn, hmer); spr_store_dump_spr(sprn); - tcg_temp_free(hmer); } void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) @@ -1269,8 +1210,6 @@ void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn) gen_load_spr(t0, sprn + 16); tcg_gen_ext32u_tl(cpu_gpr[gprn], t0); - - tcg_temp_free(t0); } #endif @@ -1447,17 +1386,12 @@ static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf) tcg_gen_trunc_tl_i32(t, t0); tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free_i32(t); } static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf) { - TCGv t0 = tcg_const_tl(arg1); + TCGv t0 = tcg_constant_tl(arg1); gen_op_cmp(arg0, t0, s, crf); - tcg_temp_free(t0); } static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf) @@ -1473,15 +1407,12 @@ static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf) tcg_gen_ext32u_tl(t1, arg1); } gen_op_cmp(t0, t1, s, crf); - tcg_temp_free(t1); - tcg_temp_free(t0); } static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf) { - TCGv t0 = tcg_const_tl(arg1); + TCGv t0 = tcg_constant_tl(arg1); gen_op_cmp32(arg0, t0, s, crf); - tcg_temp_free(t0); } static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg) @@ -1525,10 +1456,6 @@ static void gen_cmprb(DisasContext *ctx) tcg_gen_or_i32(crf, crf, src2lo); } tcg_gen_shli_i32(crf, crf, CRF_GT_BIT); - tcg_temp_free_i32(src1); - tcg_temp_free_i32(src2); - tcg_temp_free_i32(src2lo); - tcg_temp_free_i32(src2hi); } #if defined(TARGET_PPC64) @@ -1551,12 +1478,10 @@ static void gen_isel(DisasContext *ctx) tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]); tcg_gen_andi_tl(t0, t0, mask); - zr = tcg_const_tl(0); + zr = tcg_constant_tl(0); tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr, rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr, cpu_gpr[rB(ctx->opcode)]); - tcg_temp_free(zr); - tcg_temp_free(t0); } /* cmpb: PowerPC 2.05 specification */ @@ -1580,7 +1505,6 @@ static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, } else { tcg_gen_andc_tl(cpu_ov, cpu_ov, t0); } - tcg_temp_free(t0); if (NARROW_MODE(ctx)) { tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1); if (is_isa300(ctx)) { @@ -1613,7 +1537,6 @@ static inline void gen_op_arith_compute_ca32(DisasContext *ctx, } tcg_gen_xor_tl(t0, t0, res); tcg_gen_extract_tl(ca32, t0, 32, 1); - tcg_temp_free(t0); } /* Common add function */ @@ -1642,13 +1565,12 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_add_tl(t0, t0, ca); } tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */ - tcg_temp_free(t1); tcg_gen_extract_tl(ca, ca, 32, 1); if (is_isa300(ctx)) { tcg_gen_mov_tl(ca32, ca); } } else { - TCGv zero = tcg_const_tl(0); + TCGv zero = tcg_constant_tl(0); if (add_ca) { tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero); tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero); @@ -1656,7 +1578,6 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero); } gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0); - tcg_temp_free(zero); } } else { tcg_gen_add_tl(t0, arg1, arg2); @@ -1674,7 +1595,6 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, if (t0 != ret) { tcg_gen_mov_tl(ret, t0); - tcg_temp_free(t0); } } /* Add functions with two operands */ @@ -1691,12 +1611,11 @@ static void glue(gen_, name)(DisasContext *ctx) \ add_ca, compute_ca, compute_ov) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ - TCGv t0 = tcg_const_tl(const_val); \ + TCGv t0 = tcg_constant_tl(const_val); \ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], t0, \ ca, glue(ca, 32), \ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ - tcg_temp_free(t0); \ } /* add add. addo addo. */ @@ -1719,10 +1638,9 @@ GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1) /* addic addic.*/ static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0) { - TCGv c = tcg_const_tl(SIMM(ctx->opcode)); + TCGv c = tcg_constant_tl(SIMM(ctx->opcode)); gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0); - tcg_temp_free(c); } static void gen_addic(DisasContext *ctx) @@ -1769,10 +1687,6 @@ static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, } tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); } - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, ret); @@ -1797,10 +1711,9 @@ GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1); #define GEN_DIVE(name, hlpr, compute_ov) \ static void gen_##name(DisasContext *ctx) \ { \ - TCGv_i32 t0 = tcg_const_i32(compute_ov); \ + TCGv_i32 t0 = tcg_constant_i32(compute_ov); \ gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \ - tcg_temp_free_i32(t0); \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \ } \ @@ -1844,10 +1757,6 @@ static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, } tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, ret); @@ -1894,19 +1803,13 @@ static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_rem_i32(t3, t0, t1); tcg_gen_ext_i32_tl(ret, t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } else { - TCGv_i32 t2 = tcg_const_i32(1); - TCGv_i32 t3 = tcg_const_i32(0); + TCGv_i32 t2 = tcg_constant_i32(1); + TCGv_i32 t3 = tcg_constant_i32(0); tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1); - tcg_gen_remu_i32(t3, t0, t1); - tcg_gen_extu_i32_tl(ret, t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); + tcg_gen_remu_i32(t0, t0, t1); + tcg_gen_extu_i32_tl(ret, t0); } - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); } #define GEN_INT_ARITH_MODW(name, opc3, sign) \ @@ -1940,18 +1843,12 @@ static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_movi_i64(t3, 0); tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_rem_i64(ret, t0, t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } else { - TCGv_i64 t2 = tcg_const_i64(1); - TCGv_i64 t3 = tcg_const_i64(0); + TCGv_i64 t2 = tcg_constant_i64(1); + TCGv_i64 t3 = tcg_constant_i64(0); tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1); tcg_gen_remu_i64(ret, t0, t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } #define GEN_INT_ARITH_MODD(name, opc3, sign) \ @@ -1976,8 +1873,6 @@ static void gen_mulhw(DisasContext *ctx) tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_muls2_i32(t0, t1, t0, t1); tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -1993,8 +1888,6 @@ static void gen_mulhwu(DisasContext *ctx) tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_mulu2_i32(t0, t1, t0, t1); tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -2010,8 +1903,6 @@ static void gen_mullw(DisasContext *ctx) tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); #else tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); @@ -2044,8 +1935,6 @@ static void gen_mullwo(DisasContext *ctx) } tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -2065,7 +1954,6 @@ static void gen_mulhd(DisasContext *ctx) TCGv lo = tcg_temp_new(); tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); - tcg_temp_free(lo); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -2077,7 +1965,6 @@ static void gen_mulhdu(DisasContext *ctx) TCGv lo = tcg_temp_new(); tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); - tcg_temp_free(lo); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -2110,9 +1997,6 @@ static void gen_mulldo(DisasContext *ctx) } tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -2148,9 +2032,7 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, } tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */ tcg_gen_add_tl(t0, t0, inv1); - tcg_temp_free(inv1); tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */ - tcg_temp_free(t1); tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1); if (is_isa300(ctx)) { tcg_gen_mov_tl(cpu_ca32, cpu_ca); @@ -2158,12 +2040,10 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, } else if (add_ca) { TCGv zero, inv1 = tcg_temp_new(); tcg_gen_not_tl(inv1, arg1); - zero = tcg_const_tl(0); + zero = tcg_constant_tl(0); tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero); tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero); gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0); - tcg_temp_free(zero); - tcg_temp_free(inv1); } else { tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1); tcg_gen_sub_tl(t0, arg2, arg1); @@ -2190,7 +2070,6 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, if (t0 != ret) { tcg_gen_mov_tl(ret, t0); - tcg_temp_free(t0); } } /* Sub functions with Two operands functions */ @@ -2206,11 +2085,10 @@ static void glue(gen_, name)(DisasContext *ctx) \ add_ca, compute_ca, compute_ov) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ - TCGv t0 = tcg_const_tl(const_val); \ + TCGv t0 = tcg_constant_tl(const_val); \ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], t0, \ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ - tcg_temp_free(t0); \ } /* subf subf. subfo subfo. */ GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) @@ -2231,19 +2109,17 @@ GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) /* subfic */ static void gen_subfic(DisasContext *ctx) { - TCGv c = tcg_const_tl(SIMM(ctx->opcode)); + TCGv c = tcg_constant_tl(SIMM(ctx->opcode)); gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], c, 0, 1, 0, 0); - tcg_temp_free(c); } /* neg neg. nego nego. */ static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) { - TCGv zero = tcg_const_tl(0); + TCGv zero = tcg_constant_tl(0); gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], zero, 0, 0, compute_ov, Rc(ctx->opcode)); - tcg_temp_free(zero); } static void gen_neg(DisasContext *ctx) @@ -2306,7 +2182,6 @@ static void gen_cntlzw(DisasContext *ctx) tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); tcg_gen_clzi_i32(t, t, 32); tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); - tcg_temp_free_i32(t); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); @@ -2321,7 +2196,6 @@ static void gen_cnttzw(DisasContext *ctx) tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); tcg_gen_ctzi_i32(t, t, 32); tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); - tcg_temp_free_i32(t); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); @@ -2342,10 +2216,9 @@ GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER); #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) static void gen_pause(DisasContext *ctx) { - TCGv_i32 t0 = tcg_const_i32(0); + TCGv_i32 t0 = tcg_constant_i32(0); tcg_gen_st_i32(t0, cpu_env, -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); - tcg_temp_free_i32(t0); /* Stop translation, this gives other CPUs a chance to run */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); @@ -2424,7 +2297,6 @@ static void gen_or(DisasContext *ctx) tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL); tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50); gen_store_spr(SPR_PPR, t0); - tcg_temp_free(t0); } #if !defined(CONFIG_USER_ONLY) /* @@ -2539,7 +2411,6 @@ static void gen_prtyw(DisasContext *ctx) tcg_gen_shri_tl(t0, ra, 8); tcg_gen_xor_tl(ra, ra, t0); tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL); - tcg_temp_free(t0); } #if defined(TARGET_PPC64) @@ -2556,7 +2427,6 @@ static void gen_prtyd(DisasContext *ctx) tcg_gen_shri_tl(t0, ra, 8); tcg_gen_xor_tl(ra, ra, t0); tcg_gen_andi_tl(ra, ra, 1); - tcg_temp_free(t0); } #endif @@ -2645,7 +2515,6 @@ static void gen_rlwimi(DisasContext *ctx) tcg_gen_trunc_tl_i32(t0, t_rs); tcg_gen_rotli_i32(t0, t0, sh); tcg_gen_extu_i32_tl(t1, t0); - tcg_temp_free_i32(t0); } else { #if defined(TARGET_PPC64) tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32); @@ -2658,7 +2527,6 @@ static void gen_rlwimi(DisasContext *ctx) tcg_gen_andi_tl(t1, t1, mask); tcg_gen_andi_tl(t_ra, t_ra, ~mask); tcg_gen_or_tl(t_ra, t_ra, t1); - tcg_temp_free(t1); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t_ra); @@ -2702,7 +2570,6 @@ static void gen_rlwinm(DisasContext *ctx) tcg_gen_rotli_i32(t0, t0, sh); tcg_gen_andi_i32(t0, t0, mask); tcg_gen_extu_i32_tl(t_ra, t0); - tcg_temp_free_i32(t0); } } else { #if defined(TARGET_PPC64) @@ -2749,15 +2616,12 @@ static void gen_rlwnm(DisasContext *ctx) tcg_gen_andi_i32(t0, t0, 0x1f); tcg_gen_rotl_i32(t1, t1, t0); tcg_gen_extu_i32_tl(t_ra, t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); } else { #if defined(TARGET_PPC64) TCGv_i64 t0 = tcg_temp_new_i64(); tcg_gen_andi_i64(t0, t_rb, 0x1f); tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32); tcg_gen_rotl_i64(t_ra, t_ra, t0); - tcg_temp_free_i64(t0); #else g_assert_not_reached(); #endif @@ -2865,7 +2729,6 @@ static void gen_rldnm(DisasContext *ctx, int mb, int me) t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, t_rb, 0x3f); tcg_gen_rotl_tl(t_ra, t_rs, t0); - tcg_temp_free(t0); tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me)); if (unlikely(Rc(ctx->opcode) != 0)) { @@ -2912,7 +2775,6 @@ static void gen_rldimi(DisasContext *ctx, int mbn, int shn) tcg_gen_andi_tl(t1, t1, mask); tcg_gen_andi_tl(t_ra, t_ra, ~mask); tcg_gen_or_tl(t_ra, t_ra, t1); - tcg_temp_free(t1); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t_ra); @@ -2941,8 +2803,6 @@ static void gen_slw(DisasContext *ctx) t1 = tcg_temp_new(); tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t1); - tcg_temp_free(t0); tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); @@ -2978,7 +2838,6 @@ static void gen_srawi(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1); tcg_gen_and_tl(cpu_ca, cpu_ca, t0); - tcg_temp_free(t0); tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0); if (is_isa300(ctx)) { tcg_gen_mov_tl(cpu_ca32, cpu_ca); @@ -3009,8 +2868,6 @@ static void gen_srw(DisasContext *ctx) t1 = tcg_temp_new(); tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t1); - tcg_temp_free(t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } @@ -3030,8 +2887,6 @@ static void gen_sld(DisasContext *ctx) t1 = tcg_temp_new(); tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t1); - tcg_temp_free(t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } @@ -3064,7 +2919,6 @@ static inline void gen_sradi(DisasContext *ctx, int n) t0 = tcg_temp_new(); tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1); tcg_gen_and_tl(cpu_ca, cpu_ca, t0); - tcg_temp_free(t0); tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0); if (is_isa300(ctx)) { tcg_gen_mov_tl(cpu_ca32, cpu_ca); @@ -3123,8 +2977,6 @@ static void gen_srd(DisasContext *ctx) t1 = tcg_temp_new(); tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t1); - tcg_temp_free(t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } @@ -3296,7 +3148,6 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ } #define GEN_LDX(name, ldop, opc2, opc3, type) \ @@ -3314,7 +3165,6 @@ static void glue(gen_, name##epx)(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\ - tcg_temp_free(EA); \ } GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) @@ -3342,7 +3192,6 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ } #define GEN_STX(name, stop, opc2, opc3, type) \ GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE) @@ -3360,7 +3209,6 @@ static void glue(gen_, name##epx)(DisasContext *ctx) \ gen_addr_reg_index(ctx, EA); \ tcg_gen_qemu_st_tl( \ cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \ - tcg_temp_free(EA); \ } GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) @@ -3410,11 +3258,9 @@ static void gen_lmw(DisasContext *ctx) } gen_set_access_type(ctx, ACCESS_INT); t0 = tcg_temp_new(); - t1 = tcg_const_i32(rD(ctx->opcode)); + t1 = tcg_constant_i32(rD(ctx->opcode)); gen_addr_imm_index(ctx, t0, 0); gen_helper_lmw(cpu_env, t0, t1); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); } /* stmw */ @@ -3429,11 +3275,9 @@ static void gen_stmw(DisasContext *ctx) } gen_set_access_type(ctx, ACCESS_INT); t0 = tcg_temp_new(); - t1 = tcg_const_i32(rS(ctx->opcode)); + t1 = tcg_constant_i32(rS(ctx->opcode)); gen_addr_imm_index(ctx, t0, 0); gen_helper_stmw(cpu_env, t0, t1); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); } /*** Integer load and store strings ***/ @@ -3469,12 +3313,9 @@ static void gen_lswi(DisasContext *ctx) gen_set_access_type(ctx, ACCESS_INT); t0 = tcg_temp_new(); gen_addr_register(ctx, t0); - t1 = tcg_const_i32(nb); - t2 = tcg_const_i32(start); + t1 = tcg_constant_i32(nb); + t2 = tcg_constant_i32(start); gen_helper_lsw(cpu_env, t0, t1, t2); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); } /* lswx */ @@ -3490,14 +3331,10 @@ static void gen_lswx(DisasContext *ctx) gen_set_access_type(ctx, ACCESS_INT); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); - t1 = tcg_const_i32(rD(ctx->opcode)); - t2 = tcg_const_i32(rA(ctx->opcode)); - t3 = tcg_const_i32(rB(ctx->opcode)); + t1 = tcg_constant_i32(rD(ctx->opcode)); + t2 = tcg_constant_i32(rA(ctx->opcode)); + t3 = tcg_constant_i32(rB(ctx->opcode)); gen_helper_lswx(cpu_env, t0, t1, t2, t3); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } /* stswi */ @@ -3517,12 +3354,9 @@ static void gen_stswi(DisasContext *ctx) if (nb == 0) { nb = 32; } - t1 = tcg_const_i32(nb); - t2 = tcg_const_i32(rS(ctx->opcode)); + t1 = tcg_constant_i32(nb); + t2 = tcg_constant_i32(rS(ctx->opcode)); gen_helper_stsw(cpu_env, t0, t1, t2); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); } /* stswx */ @@ -3541,11 +3375,8 @@ static void gen_stswx(DisasContext *ctx) t1 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(t1, cpu_xer); tcg_gen_andi_i32(t1, t1, 0x7F); - t2 = tcg_const_i32(rS(ctx->opcode)); + t2 = tcg_constant_i32(rS(ctx->opcode)); gen_helper_stsw(cpu_env, t0, t1, t2); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); } /*** Memory synchronisation ***/ @@ -3620,7 +3451,6 @@ static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) gen_helper_check_tlb_flush_local(cpu_env); } gen_set_label(l); - tcg_temp_free_i32(t); } #else static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { } @@ -3653,7 +3483,6 @@ static void gen_load_locked(DisasContext *ctx, MemOp memop) tcg_gen_mov_tl(cpu_reserve, t0); tcg_gen_mov_tl(cpu_reserve_val, gpr); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); - tcg_temp_free(t0); } #define LARX(name, memop) \ @@ -3687,10 +3516,6 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */ tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1)); tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u); - - tcg_temp_free(t); - tcg_temp_free(t2); - tcg_temp_free(u); } static void gen_ld_atomic(DisasContext *ctx, MemOp memop) @@ -3753,9 +3578,6 @@ static void gen_ld_atomic(DisasContext *ctx, MemOp memop) cpu_gpr[(rt + 2) & 31], t0); tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop); tcg_gen_mov_tl(dst, t0); - - tcg_temp_free(t0); - tcg_temp_free(t1); } break; @@ -3785,7 +3607,6 @@ static void gen_ld_atomic(DisasContext *ctx, MemOp memop) /* invoke data storage error handler */ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); } - tcg_temp_free(EA); if (need_serial) { /* Restart with exclusive lock. */ @@ -3861,20 +3682,12 @@ static void gen_st_atomic(DisasContext *ctx, MemOp memop) tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2); tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop); tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop); - - tcg_temp_free(ea_plus_s); - tcg_temp_free(s2); - tcg_temp_free(s); - tcg_temp_free(t2); - tcg_temp_free(t); } break; default: /* invoke data storage error handler */ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); } - tcg_temp_free(discard); - tcg_temp_free(EA); } static void gen_stwat(DisasContext *ctx) @@ -3899,7 +3712,6 @@ static void gen_conditional_store(DisasContext *ctx, MemOp memop) gen_set_access_type(ctx, ACCESS_RES); gen_addr_reg_index(ctx, t0); tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1); - tcg_temp_free(t0); t0 = tcg_temp_new(); tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val, @@ -3909,7 +3721,6 @@ static void gen_conditional_store(DisasContext *ctx, MemOp memop) tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT); tcg_gen_or_tl(t0, t0, cpu_so); tcg_gen_trunc_tl_i32(cpu_crf[0], t0); - tcg_temp_free(t0); tcg_gen_br(l2); gen_set_label(l1); @@ -3946,6 +3757,7 @@ static void gen_lqarx(DisasContext *ctx) { int rd = rD(ctx->opcode); TCGv EA, hi, lo; + TCGv_i128 t16; if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) || (rd == rB(ctx->opcode)))) { @@ -3961,39 +3773,9 @@ static void gen_lqarx(DisasContext *ctx) lo = cpu_gpr[rd + 1]; hi = cpu_gpr[rd]; - if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { - if (HAVE_ATOMIC128) { - TCGv_i32 oi = tcg_temp_new_i32(); - if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN, - ctx->mem_idx)); - gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); - } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN, - ctx->mem_idx)); - gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); - } - tcg_temp_free_i32(oi); - tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh)); - } else { - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(EA); - return; - } - } else if (ctx->le_mode) { - tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEUQ | MO_ALIGN_16); - tcg_gen_mov_tl(cpu_reserve, EA); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEUQ); - } else { - tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEUQ | MO_ALIGN_16); - tcg_gen_mov_tl(cpu_reserve, EA); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEUQ); - } - tcg_temp_free(EA); + t16 = tcg_temp_new_i128(); + tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN)); + tcg_gen_extr_i128_i64(lo, hi, t16); tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val)); tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2)); @@ -4020,7 +3802,6 @@ static void gen_stqcx_(DisasContext *ctx) gen_addr_reg_index(ctx, EA); tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail); - tcg_temp_free(EA); cmp = tcg_temp_new_i128(); val = tcg_temp_new_i128(); @@ -4032,23 +3813,19 @@ static void gen_stqcx_(DisasContext *ctx) tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN)); - tcg_temp_free_i128(cmp); t0 = tcg_temp_new(); t1 = tcg_temp_new(); tcg_gen_extr_i128_i64(t1, t0, val); - tcg_temp_free_i128(val); tcg_gen_xor_tl(t1, t1, cpu_reserve_val2); tcg_gen_xor_tl(t0, t0, cpu_reserve_val); tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t1); tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0); tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT); tcg_gen_or_tl(t0, t0, cpu_so); tcg_gen_trunc_tl_i32(cpu_crf[0], t0); - tcg_temp_free(t0); tcg_gen_br(lab_over); gen_set_label(lab_fail); @@ -4142,10 +3919,9 @@ static void gen_wait(DisasContext *ctx) * to occur. */ if (wc == 0) { - TCGv_i32 t0 = tcg_const_i32(1); + TCGv_i32 t0 = tcg_constant_i32(1); tcg_gen_st_i32(t0, cpu_env, -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); - tcg_temp_free_i32(t0); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); } @@ -4189,9 +3965,8 @@ static void gen_doze(DisasContext *ctx) TCGv_i32 t; CHK_HV(ctx); - t = tcg_const_i32(PPC_PM_DOZE); + t = tcg_constant_i32(PPC_PM_DOZE); gen_helper_pminsn(cpu_env, t); - tcg_temp_free_i32(t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ @@ -4205,9 +3980,8 @@ static void gen_nap(DisasContext *ctx) TCGv_i32 t; CHK_HV(ctx); - t = tcg_const_i32(PPC_PM_NAP); + t = tcg_constant_i32(PPC_PM_NAP); gen_helper_pminsn(cpu_env, t); - tcg_temp_free_i32(t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ @@ -4221,9 +3995,8 @@ static void gen_stop(DisasContext *ctx) TCGv_i32 t; CHK_HV(ctx); - t = tcg_const_i32(PPC_PM_STOP); + t = tcg_constant_i32(PPC_PM_STOP); gen_helper_pminsn(cpu_env, t); - tcg_temp_free_i32(t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ @@ -4237,9 +4010,8 @@ static void gen_sleep(DisasContext *ctx) TCGv_i32 t; CHK_HV(ctx); - t = tcg_const_i32(PPC_PM_SLEEP); + t = tcg_constant_i32(PPC_PM_SLEEP); gen_helper_pminsn(cpu_env, t); - tcg_temp_free_i32(t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ @@ -4253,9 +4025,8 @@ static void gen_rvwinkle(DisasContext *ctx) TCGv_i32 t; CHK_HV(ctx); - t = tcg_const_i32(PPC_PM_RVWINKLE); + t = tcg_constant_i32(PPC_PM_RVWINKLE); gen_helper_pminsn(cpu_env, t); - tcg_temp_free_i32(t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ @@ -4309,7 +4080,6 @@ static void pmu_count_insns(DisasContext *ctx) } gen_set_label(l); - tcg_temp_free(t0); } else { gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns)); } @@ -4324,8 +4094,6 @@ static void pmu_count_insns(DisasContext *ctx) gen_load_spr(t0, SPR_POWER_PMC5); tcg_gen_addi_tl(t0, t0, ctx->base.num_insns); gen_store_spr(SPR_POWER_PMC5, t0); - - tcg_temp_free(t0); #endif /* #if !defined(CONFIG_USER_ONLY) */ } #else @@ -4415,7 +4183,7 @@ static void gen_bcond(DisasContext *ctx, int type) TCGv target; if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { - target = tcg_temp_local_new(); + target = tcg_temp_new(); if (type == BCOND_CTR) { tcg_gen_mov_tl(target, cpu_ctr); } else if (type == BCOND_TAR) { @@ -4451,8 +4219,6 @@ static void gen_bcond(DisasContext *ctx, int type) */ if (unlikely(!is_book3s_arch2x(ctx))) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - tcg_temp_free(temp); - tcg_temp_free(target); return; } @@ -4480,7 +4246,6 @@ static void gen_bcond(DisasContext *ctx, int type) tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1); } } - tcg_temp_free(temp); } if ((bo & 0x10) == 0) { /* Test CR */ @@ -4495,7 +4260,6 @@ static void gen_bcond(DisasContext *ctx, int type) tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1); } - tcg_temp_free_i32(temp); } gen_update_cfar(ctx, ctx->cia); if (type == BCOND_IM) { @@ -4512,7 +4276,6 @@ static void gen_bcond(DisasContext *ctx, int type) tcg_gen_andi_tl(cpu_nip, target, ~3); } gen_lookup_and_goto_ptr(ctx); - tcg_temp_free(target); } if ((bo & 0x14) != 0x14) { /* fallthrough case */ @@ -4570,8 +4333,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ tcg_gen_andi_i32(t0, t0, bitmask); \ tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \ tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i32(t1); \ } /* crand */ @@ -4721,10 +4482,9 @@ static void gen_tw(DisasContext *ctx) if (check_unconditional_trap(ctx)) { return; } - t0 = tcg_const_i32(TO(ctx->opcode)); + t0 = tcg_constant_i32(TO(ctx->opcode)); gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); - tcg_temp_free_i32(t0); } /* twi */ @@ -4736,11 +4496,9 @@ static void gen_twi(DisasContext *ctx) if (check_unconditional_trap(ctx)) { return; } - t0 = tcg_const_tl(SIMM(ctx->opcode)); - t1 = tcg_const_i32(TO(ctx->opcode)); + t0 = tcg_constant_tl(SIMM(ctx->opcode)); + t1 = tcg_constant_i32(TO(ctx->opcode)); gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); } #if defined(TARGET_PPC64) @@ -4752,10 +4510,9 @@ static void gen_td(DisasContext *ctx) if (check_unconditional_trap(ctx)) { return; } - t0 = tcg_const_i32(TO(ctx->opcode)); + t0 = tcg_constant_i32(TO(ctx->opcode)); gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); - tcg_temp_free_i32(t0); } /* tdi */ @@ -4767,11 +4524,9 @@ static void gen_tdi(DisasContext *ctx) if (check_unconditional_trap(ctx)) { return; } - t0 = tcg_const_tl(SIMM(ctx->opcode)); - t1 = tcg_const_i32(TO(ctx->opcode)); + t0 = tcg_constant_tl(SIMM(ctx->opcode)); + t1 = tcg_constant_i32(TO(ctx->opcode)); gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); } #endif @@ -4792,8 +4547,6 @@ static void gen_mcrxr(DisasContext *ctx) tcg_gen_shli_i32(dst, dst, 1); tcg_gen_or_i32(dst, dst, t0); tcg_gen_or_i32(dst, dst, t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); tcg_gen_movi_tl(cpu_so, 0); tcg_gen_movi_tl(cpu_ov, 0); @@ -4817,8 +4570,6 @@ static void gen_mcrxrx(DisasContext *ctx) tcg_gen_or_tl(t1, t1, cpu_ca32); tcg_gen_or_tl(t0, t0, t1); tcg_gen_trunc_tl_i32(dst, t0); - tcg_temp_free(t0); - tcg_temp_free(t1); } #endif @@ -4853,7 +4604,6 @@ static void gen_mfcr(DisasContext *ctx) tcg_gen_shli_i32(t0, t0, 4); tcg_gen_or_i32(t0, t0, cpu_crf[7]); tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free_i32(t0); } } @@ -4950,7 +4700,6 @@ static void gen_mtcrf(DisasContext *ctx) tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]); tcg_gen_shri_i32(temp, temp, crn * 4); tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf); - tcg_temp_free_i32(temp); } } else { TCGv_i32 temp = tcg_temp_new_i32(); @@ -4961,7 +4710,6 @@ static void gen_mtcrf(DisasContext *ctx) tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf); } } - tcg_temp_free_i32(temp); } } @@ -5008,9 +4756,6 @@ static void gen_mtmsrd(DisasContext *ctx) /* Must stop the translation as machine state (may have) changed */ ctx->base.is_jmp = DISAS_EXIT_UPDATE; - - tcg_temp_free(t0); - tcg_temp_free(t1); #endif /* !defined(CONFIG_USER_ONLY) */ } #endif /* defined(TARGET_PPC64) */ @@ -5050,9 +4795,6 @@ static void gen_mtmsr(DisasContext *ctx) /* Must stop the translation as machine state (may have) changed */ ctx->base.is_jmp = DISAS_EXIT_UPDATE; - - tcg_temp_free(t0); - tcg_temp_free(t1); #endif } @@ -5125,8 +4867,6 @@ static void gen_setb(DisasContext *ctx) tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4); tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0); tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); - - tcg_temp_free_i32(t0); } #endif @@ -5141,7 +4881,6 @@ static void gen_dcbf(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_qemu_ld8u(ctx, t0, t0); - tcg_temp_free(t0); } /* dcbfep (external PID dcbf) */ @@ -5154,7 +4893,6 @@ static void gen_dcbfep(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); - tcg_temp_free(t0); } /* dcbi (Supervisor only) */ @@ -5173,8 +4911,6 @@ static void gen_dcbi(DisasContext *ctx) /* XXX: specification says this should be treated as a store by the MMU */ gen_qemu_ld8u(ctx, val, EA); gen_qemu_st8(ctx, val, EA); - tcg_temp_free(val); - tcg_temp_free(EA); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5187,7 +4923,6 @@ static void gen_dcbst(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_qemu_ld8u(ctx, t0, t0); - tcg_temp_free(t0); } /* dcbstep (dcbstep External PID version) */ @@ -5199,7 +4934,6 @@ static void gen_dcbstep(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); - tcg_temp_free(t0); } /* dcbt */ @@ -5250,7 +4984,14 @@ static void gen_dcbtls(DisasContext *ctx) gen_load_spr(t0, SPR_Exxx_L1CSR0); tcg_gen_ori_tl(t0, t0, L1CSR0_CUL); gen_store_spr(SPR_Exxx_L1CSR0, t0); - tcg_temp_free(t0); +} + +/* dcblc */ +static void gen_dcblc(DisasContext *ctx) +{ + /* + * interpreted as no-op + */ } /* dcbz */ @@ -5261,11 +5002,9 @@ static void gen_dcbz(DisasContext *ctx) gen_set_access_type(ctx, ACCESS_CACHE); tcgv_addr = tcg_temp_new(); - tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000); + tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000); gen_addr_reg_index(ctx, tcgv_addr); gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op); - tcg_temp_free(tcgv_addr); - tcg_temp_free_i32(tcgv_op); } /* dcbzep */ @@ -5276,11 +5015,9 @@ static void gen_dcbzep(DisasContext *ctx) gen_set_access_type(ctx, ACCESS_CACHE); tcgv_addr = tcg_temp_new(); - tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000); + tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000); gen_addr_reg_index(ctx, tcgv_addr); gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op); - tcg_temp_free(tcgv_addr); - tcg_temp_free_i32(tcgv_op); } /* dst / dstt */ @@ -5318,7 +5055,6 @@ static void gen_icbi(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_icbi(cpu_env, t0); - tcg_temp_free(t0); } /* icbiep */ @@ -5329,7 +5065,6 @@ static void gen_icbiep(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_icbiep(cpu_env, t0); - tcg_temp_free(t0); } /* Optional: */ @@ -5355,9 +5090,8 @@ static void gen_mfsr(DisasContext *ctx) TCGv t0; CHK_SV(ctx); - t0 = tcg_const_tl(SR(ctx->opcode)); + t0 = tcg_constant_tl(SR(ctx->opcode)); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5373,7 +5107,6 @@ static void gen_mfsrin(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5386,9 +5119,8 @@ static void gen_mtsr(DisasContext *ctx) TCGv t0; CHK_SV(ctx); - t0 = tcg_const_tl(SR(ctx->opcode)); + t0 = tcg_constant_tl(SR(ctx->opcode)); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5404,7 +5136,6 @@ static void gen_mtsrin(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5420,9 +5151,8 @@ static void gen_mfsr_64b(DisasContext *ctx) TCGv t0; CHK_SV(ctx); - t0 = tcg_const_tl(SR(ctx->opcode)); + t0 = tcg_constant_tl(SR(ctx->opcode)); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5438,7 +5168,6 @@ static void gen_mfsrin_64b(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5451,9 +5180,8 @@ static void gen_mtsr_64b(DisasContext *ctx) TCGv t0; CHK_SV(ctx); - t0 = tcg_const_tl(SR(ctx->opcode)); + t0 = tcg_constant_tl(SR(ctx->opcode)); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5469,7 +5197,6 @@ static void gen_mtsrin_64b(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5523,7 +5250,6 @@ static void gen_eciwx(DisasContext *ctx) gen_addr_reg_index(ctx, t0); tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, DEF_MEMOP(MO_UL | MO_ALIGN)); - tcg_temp_free(t0); } /* ecowx */ @@ -5536,7 +5262,6 @@ static void gen_ecowx(DisasContext *ctx) gen_addr_reg_index(ctx, t0); tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, DEF_MEMOP(MO_UL | MO_ALIGN)); - tcg_temp_free(t0); } /* 602 - 603 - G2 TLB management */ @@ -5584,7 +5309,6 @@ static void gen_tlbiva(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5594,8 +5318,8 @@ static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3, { TCGv t0, t1; - t0 = tcg_temp_local_new(); - t1 = tcg_temp_local_new(); + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); switch (opc3 & 0x0D) { case 0x05: @@ -5702,8 +5426,6 @@ static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3, } else { tcg_gen_mul_tl(cpu_gpr[rt], t0, t1); } - tcg_temp_free(t0); - tcg_temp_free(t1); if (unlikely(Rc) != 0) { /* Update Rc0 */ gen_set_Rc0(ctx, cpu_gpr[rt]); @@ -5812,9 +5534,8 @@ static void gen_mfdcr(DisasContext *ctx) TCGv dcrn; CHK_SV(ctx); - dcrn = tcg_const_tl(SPR(ctx->opcode)); + dcrn = tcg_constant_tl(SPR(ctx->opcode)); gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn); - tcg_temp_free(dcrn); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5827,9 +5548,8 @@ static void gen_mtdcr(DisasContext *ctx) TCGv dcrn; CHK_SV(ctx); - dcrn = tcg_const_tl(SPR(ctx->opcode)); + dcrn = tcg_constant_tl(SPR(ctx->opcode)); gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); - tcg_temp_free(dcrn); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5882,9 +5602,7 @@ static void gen_dcread(DisasContext *ctx) gen_addr_reg_index(ctx, EA); val = tcg_temp_new(); gen_qemu_ld32u(ctx, val, EA); - tcg_temp_free(val); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA); - tcg_temp_free(EA); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6002,7 +5720,6 @@ static void gen_tlbsx_40x(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); if (Rc(ctx->opcode)) { TCGLabel *l1 = gen_new_label(); tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); @@ -6052,10 +5769,9 @@ static void gen_tlbre_440(DisasContext *ctx) case 1: case 2: { - TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode)); + TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode)); gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env, t0, cpu_gpr[rA(ctx->opcode)]); - tcg_temp_free_i32(t0); } break; default: @@ -6077,7 +5793,6 @@ static void gen_tlbsx_440(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); if (Rc(ctx->opcode)) { TCGLabel *l1 = gen_new_label(); tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); @@ -6100,10 +5815,9 @@ static void gen_tlbwe_440(DisasContext *ctx) case 1: case 2: { - TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode)); + TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode)); gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); - tcg_temp_free_i32(t0); } break; default: @@ -6137,14 +5851,11 @@ static void gen_tlbsx_booke206(DisasContext *ctx) CHK_SV(ctx); if (rA(ctx->opcode)) { t0 = tcg_temp_new(); - tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]); + tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); } else { - t0 = tcg_const_tl(0); + t0 = cpu_gpr[rB(ctx->opcode)]; } - - tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]); gen_helper_booke206_tlbsx(cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6170,7 +5881,6 @@ static void gen_tlbivax_booke206(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_booke206_tlbivax(cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6199,8 +5909,6 @@ static void gen_tlbilx_booke206(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); break; } - - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6218,7 +5926,6 @@ static void gen_wrtee(DisasContext *ctx) tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); tcg_gen_or_tl(cpu_msr, cpu_msr, t0); gen_ppc_maybe_interrupt(ctx); - tcg_temp_free(t0); /* * Stop translation to have a chance to raise an exception if we * just set msr_ee to 1 @@ -6250,10 +5957,9 @@ static void gen_wrteei(DisasContext *ctx) /* dlmzb */ static void gen_dlmzb(DisasContext *ctx) { - TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode)); + TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode)); gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); - tcg_temp_free_i32(t0); } /* mbar replaces eieio on 440 */ @@ -6290,7 +5996,6 @@ static void gen_maddld(DisasContext *ctx) tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]); - tcg_temp_free_i64(t1); } /* maddhd maddhdu */ @@ -6311,9 +6016,6 @@ static void gen_maddhd_maddhdu(DisasContext *ctx) } tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi, cpu_gpr[rC(ctx->opcode)], t1); - tcg_temp_free_i64(lo); - tcg_temp_free_i64(hi); - tcg_temp_free_i64(t1); } #endif /* defined(TARGET_PPC64) */ @@ -6664,9 +6366,6 @@ static void gen_brh(DisasContext *ctx) tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask); tcg_gen_shli_i64(t1, t1, 8); tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } #endif @@ -6824,6 +6523,7 @@ GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE), GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206), +GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206), GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ), GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC), @@ -7618,8 +7318,6 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) { ctx->base.is_jmp = DISAS_TOO_MANY; } - - translator_loop_temp_check(&ctx->base); } static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) @@ -7707,7 +7405,7 @@ static const TranslatorOps ppc_tr_ops = { .disas_log = ppc_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/ppc/translate/dfp-impl.c.inc b/target/ppc/translate/dfp-impl.c.inc index f9f1d58d44..62911e04c7 100644 --- a/target/ppc/translate/dfp-impl.c.inc +++ b/target/ppc/translate/dfp-impl.c.inc @@ -20,9 +20,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ if (unlikely(a->rc)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -36,8 +33,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ rb = gen_fprp_ptr(a->rb); \ gen_helper_##NAME(cpu_crf[a->bf], \ cpu_env, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -50,7 +45,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ rb = gen_fprp_ptr(a->rb); \ gen_helper_##NAME(cpu_crf[a->bf], \ cpu_env, tcg_constant_i32(a->uim), rb);\ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -63,7 +57,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ ra = gen_fprp_ptr(a->fra); \ gen_helper_##NAME(cpu_crf[a->bf], \ cpu_env, ra, tcg_constant_i32(a->dm)); \ - tcg_temp_free_ptr(ra); \ return true; \ } @@ -81,8 +74,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ if (unlikely(a->rc)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -100,9 +91,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ if (unlikely(a->rc)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -118,8 +106,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ if (unlikely(a->rc)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -136,8 +122,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ if (unlikely(a->rc)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rx); \ return true; \ } @@ -205,8 +189,6 @@ static bool trans_DCFFIXQQ(DisasContext *ctx, arg_DCFFIXQQ *a) rt = gen_fprp_ptr(a->frtp); rb = gen_avr_ptr(a->vrb); gen_helper_DCFFIXQQ(cpu_env, rt, rb); - tcg_temp_free_ptr(rt); - tcg_temp_free_ptr(rb); return true; } @@ -222,8 +204,6 @@ static bool trans_DCTFIXQQ(DisasContext *ctx, arg_DCTFIXQQ *a) rt = gen_avr_ptr(a->vrt); rb = gen_fprp_ptr(a->frbp); gen_helper_DCTFIXQQ(cpu_env, rt, rb); - tcg_temp_free_ptr(rt); - tcg_temp_free_ptr(rb); return true; } diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc index 1ba56cbed5..f47f1a50e8 100644 --- a/target/ppc/translate/fixedpoint-impl.c.inc +++ b/target/ppc/translate/fixedpoint-impl.c.inc @@ -42,8 +42,6 @@ static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update, if (update) { tcg_gen_mov_tl(cpu_gpr[ra], ea); } - tcg_temp_free(ea); - return true; } @@ -74,7 +72,7 @@ static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed) #if defined(TARGET_PPC64) TCGv ea; TCGv_i64 low_addr_gpr, high_addr_gpr; - MemOp mop; + TCGv_i128 t16; REQUIRE_INSNS_FLAGS(ctx, 64BX); @@ -103,53 +101,15 @@ static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed) low_addr_gpr = cpu_gpr[a->rt + 1]; high_addr_gpr = cpu_gpr[a->rt]; } + t16 = tcg_temp_new_i128(); - if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { - if (HAVE_ATOMIC128) { - mop = DEF_MEMOP(MO_128); - TCGv_i32 oi = tcg_constant_i32(make_memop_idx(mop, ctx->mem_idx)); - if (store) { - if (ctx->le_mode) { - gen_helper_stq_le_parallel(cpu_env, ea, low_addr_gpr, - high_addr_gpr, oi); - } else { - gen_helper_stq_be_parallel(cpu_env, ea, high_addr_gpr, - low_addr_gpr, oi); - - } - } else { - if (ctx->le_mode) { - gen_helper_lq_le_parallel(low_addr_gpr, cpu_env, ea, oi); - tcg_gen_ld_i64(high_addr_gpr, cpu_env, - offsetof(CPUPPCState, retxh)); - } else { - gen_helper_lq_be_parallel(high_addr_gpr, cpu_env, ea, oi); - tcg_gen_ld_i64(low_addr_gpr, cpu_env, - offsetof(CPUPPCState, retxh)); - } - } - } else { - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; - } + if (store) { + tcg_gen_concat_i64_i128(t16, low_addr_gpr, high_addr_gpr); + tcg_gen_qemu_st_i128(t16, ea, ctx->mem_idx, DEF_MEMOP(MO_128)); } else { - mop = DEF_MEMOP(MO_UQ); - if (store) { - tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop); - } else { - tcg_gen_qemu_ld_i64(low_addr_gpr, ea, ctx->mem_idx, mop); - } - - gen_addr_add(ctx, ea, ea, 8); - - if (store) { - tcg_gen_qemu_st_i64(high_addr_gpr, ea, ctx->mem_idx, mop); - } else { - tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop); - } + tcg_gen_qemu_ld_i128(t16, ea, ctx->mem_idx, DEF_MEMOP(MO_128)); + tcg_gen_extr_i128_i64(low_addr_gpr, high_addr_gpr, t16); } - tcg_temp_free(ea); #else qemu_build_not_reached(); #endif @@ -389,8 +349,6 @@ static bool do_set_bool_cond(DisasContext *ctx, arg_X_bi *a, bool neg, bool rev) if (neg) { tcg_gen_neg_tl(cpu_gpr[a->rt], cpu_gpr[a->rt]); } - tcg_temp_free(temp); - return true; } @@ -437,9 +395,6 @@ static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail) } tcg_gen_ctpop_i64(dst, t0); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a) @@ -492,38 +447,35 @@ static bool trans_PEXTD(DisasContext *ctx, arg_X *a) static bool trans_ADDG6S(DisasContext *ctx, arg_X *a) { - const uint64_t carry_bits = 0x1111111111111111ULL; - TCGv t0, t1, carry, zero = tcg_constant_tl(0); + const target_ulong carry_bits = (target_ulong)-1 / 0xf; + TCGv in1, in2, carryl, carryh, tmp; + TCGv zero = tcg_constant_tl(0); REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); - t0 = tcg_temp_new(); - t1 = tcg_const_tl(0); - carry = tcg_const_tl(0); + in1 = cpu_gpr[a->ra]; + in2 = cpu_gpr[a->rb]; + tmp = tcg_temp_new(); + carryl = tcg_temp_new(); + carryh = tcg_temp_new(); - for (int i = 0; i < 16; i++) { - tcg_gen_shri_tl(t0, cpu_gpr[a->ra], i * 4); - tcg_gen_andi_tl(t0, t0, 0xf); - tcg_gen_add_tl(t1, t1, t0); + /* Addition with carry. */ + tcg_gen_add2_tl(carryl, carryh, in1, zero, in2, zero); + /* Addition without carry. */ + tcg_gen_xor_tl(tmp, in1, in2); + /* Difference between the two is carry in to each bit. */ + tcg_gen_xor_tl(carryl, carryl, tmp); - tcg_gen_shri_tl(t0, cpu_gpr[a->rb], i * 4); - tcg_gen_andi_tl(t0, t0, 0xf); - tcg_gen_add_tl(t1, t1, t0); - - tcg_gen_andi_tl(t1, t1, 0x10); - tcg_gen_setcond_tl(TCG_COND_NE, t1, t1, zero); - - tcg_gen_shli_tl(t0, t1, i * 4); - tcg_gen_or_tl(carry, carry, t0); - } - - tcg_gen_xori_tl(carry, carry, (target_long)carry_bits); - tcg_gen_muli_tl(cpu_gpr[a->rt], carry, 6); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(carry); + /* + * The carry-out that we're looking for is the carry-in to + * the next nibble. Shift the double-word down one nibble, + * which puts all of the bits back into one word. + */ + tcg_gen_extract2_tl(carryl, carryl, carryh, 4); + /* Invert, isolate the carry bits, and produce 6's. */ + tcg_gen_andc_tl(carryl, tcg_constant_tl(carry_bits), carryl); + tcg_gen_muli_tl(cpu_gpr[a->rt], carryl, 6); return true; } @@ -564,9 +516,6 @@ static bool do_hash(DisasContext *ctx, arg_X *a, bool priv, ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->rt)); helper(cpu_env, ea, cpu_gpr[a->ra], cpu_gpr[a->rb]); - - tcg_temp_free(ea); - return true; } diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc index 8d5cf0f982..874774eade 100644 --- a/target/ppc/translate/fp-impl.c.inc +++ b/target/ppc/translate/fp-impl.c.inc @@ -21,7 +21,6 @@ static void gen_set_cr1_from_fpscr(DisasContext *ctx) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(tmp, cpu_fpscr); tcg_gen_shri_i32(cpu_crf[1], tmp, 28); - tcg_temp_free_i32(tmp); } #else static void gen_set_cr1_from_fpscr(DisasContext *ctx) @@ -58,10 +57,6 @@ static void gen_f##name(DisasContext *ctx) \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ - tcg_temp_free_i64(t2); \ - tcg_temp_free_i64(t3); \ } #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ @@ -92,9 +87,6 @@ static void gen_f##name(DisasContext *ctx) \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ - tcg_temp_free_i64(t2); \ } #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ _GEN_FLOAT_AB(name, 0x3F, op2, inval, set_fprf, type); \ @@ -124,9 +116,6 @@ static void gen_f##name(DisasContext *ctx) \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ - tcg_temp_free_i64(t2); \ } #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ _GEN_FLOAT_AC(name, 0x3F, op2, inval, set_fprf, type); \ @@ -154,8 +143,6 @@ static void gen_f##name(DisasContext *ctx) \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ @@ -179,8 +166,6 @@ static void gen_f##name(DisasContext *ctx) \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } /* fadd - fadds */ @@ -218,8 +203,6 @@ static void gen_frsqrtes(DisasContext *ctx) if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static bool trans_FSEL(DisasContext *ctx, arg_A *a) @@ -242,11 +225,6 @@ static bool trans_FSEL(DisasContext *ctx, arg_A *a) if (a->rc) { gen_set_cr1_from_fpscr(ctx); } - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - return true; } @@ -273,10 +251,6 @@ static bool do_helper_fsqrt(DisasContext *ctx, arg_A_tb *a, if (unlikely(a->rc != 0)) { gen_set_cr1_from_fpscr(ctx); } - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -343,8 +317,6 @@ static void gen_ftdiv(DisasContext *ctx) get_fpr(t0, rA(ctx->opcode)); get_fpr(t1, rB(ctx->opcode)); gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], t0, t1); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static void gen_ftsqrt(DisasContext *ctx) @@ -357,7 +329,6 @@ static void gen_ftsqrt(DisasContext *ctx) t0 = tcg_temp_new_i64(); get_fpr(t0, rB(ctx->opcode)); gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], t0); - tcg_temp_free_i64(t0); } @@ -377,14 +348,11 @@ static void gen_fcmpo(DisasContext *ctx) t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); gen_reset_fpstatus(); - crf = tcg_const_i32(crfD(ctx->opcode)); + crf = tcg_constant_i32(crfD(ctx->opcode)); get_fpr(t0, rA(ctx->opcode)); get_fpr(t1, rB(ctx->opcode)); gen_helper_fcmpo(cpu_env, t0, t1, crf); - tcg_temp_free_i32(crf); gen_helper_float_check_status(cpu_env); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /* fcmpu */ @@ -400,14 +368,11 @@ static void gen_fcmpu(DisasContext *ctx) t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); gen_reset_fpstatus(); - crf = tcg_const_i32(crfD(ctx->opcode)); + crf = tcg_constant_i32(crfD(ctx->opcode)); get_fpr(t0, rA(ctx->opcode)); get_fpr(t1, rB(ctx->opcode)); gen_helper_fcmpu(cpu_env, t0, t1, crf); - tcg_temp_free_i32(crf); gen_helper_float_check_status(cpu_env); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /*** Floating-point move ***/ @@ -429,8 +394,6 @@ static void gen_fabs(DisasContext *ctx) if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /* fmr - fmr. */ @@ -448,7 +411,6 @@ static void gen_fmr(DisasContext *ctx) if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); } /* fnabs */ @@ -469,8 +431,6 @@ static void gen_fnabs(DisasContext *ctx) if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /* fneg */ @@ -491,8 +451,6 @@ static void gen_fneg(DisasContext *ctx) if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /* fcpsgn: PowerPC 2.05 specification */ @@ -516,9 +474,6 @@ static void gen_fcpsgn(DisasContext *ctx) if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } static void gen_fmrgew(DisasContext *ctx) @@ -538,9 +493,6 @@ static void gen_fmrgew(DisasContext *ctx) get_fpr(t0, rA(ctx->opcode)); tcg_gen_deposit_i64(t1, t0, b0, 0, 32); set_fpr(rD(ctx->opcode), t1); - tcg_temp_free_i64(b0); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static void gen_fmrgow(DisasContext *ctx) @@ -559,9 +511,6 @@ static void gen_fmrgow(DisasContext *ctx) get_fpr(t1, rA(ctx->opcode)); tcg_gen_deposit_i64(t2, t0, t1, 32, 32); set_fpr(rD(ctx->opcode), t2); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } /*** Floating-Point status & ctrl register ***/ @@ -587,16 +536,13 @@ static void gen_mcrfs(DisasContext *ctx) tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp); tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf); - tcg_temp_free(tmp); tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr); /* Only the exception bits (including FX) should be cleared if read */ tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS)); /* FEX and VX need to be updated, so don't set fpscr directly */ - tmask = tcg_const_i32(1 << nibble); + tmask = tcg_constant_i32(1 << nibble); gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask); - tcg_temp_free_i32(tmask); - tcg_temp_free_i64(tnew_fpscr); } static TCGv_i64 place_from_fpscr(int rt, uint64_t mask) @@ -608,8 +554,6 @@ static TCGv_i64 place_from_fpscr(int rt, uint64_t mask) tcg_gen_andi_i64(fpscr_masked, fpscr, mask); set_fpr(rt, fpscr_masked); - tcg_temp_free_i64(fpscr_masked); - return fpscr; } @@ -622,24 +566,33 @@ static void store_fpscr_masked(TCGv_i64 fpscr, uint64_t clear_mask, tcg_gen_andi_i64(fpscr_masked, fpscr, ~clear_mask); tcg_gen_or_i64(fpscr_masked, fpscr_masked, set_mask); gen_helper_store_fpscr(cpu_env, fpscr_masked, st_mask); +} - tcg_temp_free_i64(fpscr_masked); +static bool trans_MFFS_ISA207(DisasContext *ctx, arg_X_t_rc *a) +{ + if (!(ctx->insns_flags2 & PPC2_ISA300)) { + /* + * Before Power ISA v3.0, MFFS bits 11~15 were reserved, any instruction + * with OPCD=63 and XO=583 should be decoded as MFFS. + */ + return trans_MFFS(ctx, a); + } + /* + * For Power ISA v3.0+, return false and let the pattern group + * select the correct instruction. + */ + return false; } static bool trans_MFFS(DisasContext *ctx, arg_X_t_rc *a) { - TCGv_i64 fpscr; - REQUIRE_FPU(ctx); gen_reset_fpstatus(); - fpscr = place_from_fpscr(a->rt, UINT64_MAX); + place_from_fpscr(a->rt, UINT64_MAX); if (a->rc) { gen_set_cr1_from_fpscr(ctx); } - - tcg_temp_free_i64(fpscr); - return true; } @@ -647,15 +600,11 @@ static bool trans_MFFSCE(DisasContext *ctx, arg_X_t *a) { TCGv_i64 fpscr; - REQUIRE_INSNS_FLAGS2(ctx, ISA300); REQUIRE_FPU(ctx); gen_reset_fpstatus(); fpscr = place_from_fpscr(a->rt, UINT64_MAX); store_fpscr_masked(fpscr, FP_ENABLES, tcg_constant_i64(0), 0x0003); - - tcg_temp_free_i64(fpscr); - return true; } @@ -663,7 +612,6 @@ static bool trans_MFFSCRN(DisasContext *ctx, arg_X_tb *a) { TCGv_i64 t1, fpscr; - REQUIRE_INSNS_FLAGS2(ctx, ISA300); REQUIRE_FPU(ctx); t1 = tcg_temp_new_i64(); @@ -673,10 +621,6 @@ static bool trans_MFFSCRN(DisasContext *ctx, arg_X_tb *a) gen_reset_fpstatus(); fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(fpscr); - return true; } @@ -684,7 +628,6 @@ static bool trans_MFFSCDRN(DisasContext *ctx, arg_X_tb *a) { TCGv_i64 t1, fpscr; - REQUIRE_INSNS_FLAGS2(ctx, ISA300); REQUIRE_FPU(ctx); t1 = tcg_temp_new_i64(); @@ -694,10 +637,6 @@ static bool trans_MFFSCDRN(DisasContext *ctx, arg_X_tb *a) gen_reset_fpstatus(); fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(fpscr); - return true; } @@ -705,7 +644,6 @@ static bool trans_MFFSCRNI(DisasContext *ctx, arg_X_imm2 *a) { TCGv_i64 t1, fpscr; - REQUIRE_INSNS_FLAGS2(ctx, ISA300); REQUIRE_FPU(ctx); t1 = tcg_temp_new_i64(); @@ -714,10 +652,6 @@ static bool trans_MFFSCRNI(DisasContext *ctx, arg_X_imm2 *a) gen_reset_fpstatus(); fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(fpscr); - return true; } @@ -725,7 +659,6 @@ static bool trans_MFFSCDRNI(DisasContext *ctx, arg_X_imm3 *a) { TCGv_i64 t1, fpscr; - REQUIRE_INSNS_FLAGS2(ctx, ISA300); REQUIRE_FPU(ctx); t1 = tcg_temp_new_i64(); @@ -734,26 +667,15 @@ static bool trans_MFFSCDRNI(DisasContext *ctx, arg_X_imm3 *a) gen_reset_fpstatus(); fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(fpscr); - return true; } static bool trans_MFFSL(DisasContext *ctx, arg_X_t *a) { - TCGv_i64 fpscr; - - REQUIRE_INSNS_FLAGS2(ctx, ISA300); REQUIRE_FPU(ctx); gen_reset_fpstatus(); - fpscr = place_from_fpscr(a->rt, - FP_DRN | FP_STATUS | FP_ENABLES | FP_NI | FP_RN); - - tcg_temp_free_i64(fpscr); - + place_from_fpscr(a->rt, FP_DRN | FP_STATUS | FP_ENABLES | FP_NI | FP_RN); return true; } @@ -769,10 +691,7 @@ static void gen_mtfsb0(DisasContext *ctx) crb = 31 - crbD(ctx->opcode); gen_reset_fpstatus(); if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) { - TCGv_i32 t0; - t0 = tcg_const_i32(crb); - gen_helper_fpscr_clrbit(cpu_env, t0); - tcg_temp_free_i32(t0); + gen_helper_fpscr_clrbit(cpu_env, tcg_constant_i32(crb)); } if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); @@ -792,10 +711,7 @@ static void gen_mtfsb1(DisasContext *ctx) crb = 31 - crbD(ctx->opcode); /* XXX: we pretend we can only do IEEE floating-point computations */ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) { - TCGv_i32 t0; - t0 = tcg_const_i32(crb); - gen_helper_fpscr_setbit(cpu_env, t0); - tcg_temp_free_i32(t0); + gen_helper_fpscr_setbit(cpu_env, tcg_constant_i32(crb)); } if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); @@ -823,22 +739,22 @@ static void gen_mtfsf(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } - if (l) { - t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff); + if (!l) { + t0 = tcg_constant_i32(flm << (w * 8)); + } else if (ctx->insns_flags2 & PPC2_ISA205) { + t0 = tcg_constant_i32(0xffff); } else { - t0 = tcg_const_i32(flm << (w * 8)); + t0 = tcg_constant_i32(0xff); } t1 = tcg_temp_new_i64(); get_fpr(t1, rB(ctx->opcode)); gen_helper_store_fpscr(cpu_env, t1, t0); - tcg_temp_free_i32(t0); if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); } /* We can raise a deferred exception */ gen_helper_fpscr_check_status(cpu_env); - tcg_temp_free_i64(t1); } /* mtfsfi */ @@ -859,11 +775,9 @@ static void gen_mtfsfi(DisasContext *ctx) return; } sh = (8 * w) + 7 - bf; - t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); - t1 = tcg_const_i32(1 << sh); + t0 = tcg_constant_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); + t1 = tcg_constant_i32(1 << sh); gen_helper_store_fpscr(cpu_env, t0, t1); - tcg_temp_free_i64(t0); - tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); @@ -877,7 +791,6 @@ static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); gen_helper_todouble(dest, tmp); - tcg_temp_free_i32(tmp); } /* lfdepx (external PID lfdx) */ @@ -896,8 +809,6 @@ static void gen_lfdepx(DisasContext *ctx) gen_addr_reg_index(ctx, EA); tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UQ)); set_fpr(rD(ctx->opcode), t0); - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* lfdp */ @@ -930,8 +841,6 @@ static void gen_lfdp(DisasContext *ctx) gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(rD(ctx->opcode) + 1, t0); } - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* lfdpx */ @@ -964,8 +873,6 @@ static void gen_lfdpx(DisasContext *ctx) gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(rD(ctx->opcode) + 1, t0); } - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* lfiwax */ @@ -986,9 +893,6 @@ static void gen_lfiwax(DisasContext *ctx) gen_qemu_ld32s(ctx, t0, EA); tcg_gen_ext_tl_i64(t1, t0); set_fpr(rD(ctx->opcode), t1); - tcg_temp_free(EA); - tcg_temp_free(t0); - tcg_temp_free_i64(t1); } /* lfiwzx */ @@ -1006,8 +910,6 @@ static void gen_lfiwzx(DisasContext *ctx) gen_addr_reg_index(ctx, EA); gen_qemu_ld32u_i64(ctx, t0, EA); set_fpr(rD(ctx->opcode), t0); - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } #define GEN_STXF(name, stop, opc2, opc3, type) \ @@ -1025,8 +927,6 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ gen_addr_reg_index(ctx, EA); \ get_fpr(t0, rS(ctx->opcode)); \ gen_qemu_##stop(ctx, t0, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ } static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) @@ -1034,7 +934,6 @@ static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) TCGv_i32 tmp = tcg_temp_new_i32(); gen_helper_tosingle(tmp, src); tcg_gen_qemu_st_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); - tcg_temp_free_i32(tmp); } /* stfdepx (external PID lfdx) */ @@ -1053,8 +952,6 @@ static void gen_stfdepx(DisasContext *ctx) gen_addr_reg_index(ctx, EA); get_fpr(t0, rD(ctx->opcode)); tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_UQ)); - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* stfdp */ @@ -1087,8 +984,6 @@ static void gen_stfdp(DisasContext *ctx) get_fpr(t0, rD(ctx->opcode) + 1); gen_qemu_st64_i64(ctx, t0, EA); } - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* stfdpx */ @@ -1121,8 +1016,6 @@ static void gen_stfdpx(DisasContext *ctx) get_fpr(t0, rD(ctx->opcode) + 1); gen_qemu_st64_i64(ctx, t0, EA); } - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* Optional: */ @@ -1131,7 +1024,6 @@ static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) TCGv t0 = tcg_temp_new(); tcg_gen_trunc_i64_tl(t0, arg1), gen_qemu_st32(ctx, t0, arg2); - tcg_temp_free(t0); } /* stfiwx */ GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX); @@ -1169,8 +1061,6 @@ static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ, if (update) { tcg_gen_mov_tl(cpu_gpr[ra], ea); } - tcg_temp_free_i64(t0); - tcg_temp_free(ea); return true; } diff --git a/target/ppc/translate/spe-impl.c.inc b/target/ppc/translate/spe-impl.c.inc index 2e6e799a25..f4a858487d 100644 --- a/target/ppc/translate/spe-impl.c.inc +++ b/target/ppc/translate/spe-impl.c.inc @@ -23,7 +23,6 @@ static inline void gen_evmra(DisasContext *ctx) /* spe_acc := tmp */ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc)); - tcg_temp_free_i64(tmp); /* rD := rA */ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); @@ -96,8 +95,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \ tcg_opi(t0, t0, rB(ctx->opcode)); \ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \ - \ - tcg_temp_free_i32(t0); \ } GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32); GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32); @@ -122,8 +119,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \ tcg_op(t0, t0); \ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \ - \ - tcg_temp_free_i32(t0); \ } GEN_SPEOP_ARITH1(evabs, tcg_gen_abs_i32); @@ -159,16 +154,13 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t1, cpu_gprh[rB(ctx->opcode)]); \ tcg_op(t0, t0, t1); \ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \ - \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i32(t1); \ } static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - TCGv_i32 t0 = tcg_temp_local_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); /* No error here: 6 bits are used */ tcg_gen_andi_i32(t0, arg2, 0x3F); @@ -178,14 +170,13 @@ static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) gen_set_label(l1); tcg_gen_movi_i32(ret, 0); gen_set_label(l2); - tcg_temp_free_i32(t0); } GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu); static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - TCGv_i32 t0 = tcg_temp_local_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); /* No error here: 6 bits are used */ tcg_gen_andi_i32(t0, arg2, 0x3F); @@ -195,14 +186,13 @@ static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) gen_set_label(l1); tcg_gen_movi_i32(ret, 0); gen_set_label(l2); - tcg_temp_free_i32(t0); } GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws); static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - TCGv_i32 t0 = tcg_temp_local_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); /* No error here: 6 bits are used */ tcg_gen_andi_i32(t0, arg2, 0x3F); @@ -212,7 +202,6 @@ static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) gen_set_label(l1); tcg_gen_movi_i32(ret, 0); gen_set_label(l2); - tcg_temp_free_i32(t0); } GEN_SPEOP_ARITH2(evslw, gen_op_evslw); static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) @@ -220,7 +209,6 @@ static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) TCGv_i32 t0 = tcg_temp_new_i32(); tcg_gen_andi_i32(t0, arg2, 0x1F); tcg_gen_rotl_i32(ret, arg1, t0); - tcg_temp_free_i32(t0); } GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw); static inline void gen_evmergehi(DisasContext *ctx) @@ -257,8 +245,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rB(ctx->opcode)]); \ tcg_op(t0, t0, rA(ctx->opcode)); \ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \ - \ - tcg_temp_free_i32(t0); \ } GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32); GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32); @@ -341,7 +327,6 @@ static inline void gen_evmergelohi(DisasContext *ctx) tcg_gen_mov_tl(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], tmp); - tcg_temp_free(tmp); } else { tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); @@ -378,7 +363,7 @@ static inline void gen_evsel(DisasContext *ctx) TCGLabel *l2 = gen_new_label(); TCGLabel *l3 = gen_new_label(); TCGLabel *l4 = gen_new_label(); - TCGv_i32 t0 = tcg_temp_local_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 3); tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1); @@ -394,7 +379,6 @@ static inline void gen_evsel(DisasContext *ctx) gen_set_label(l3); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); gen_set_label(l4); - tcg_temp_free_i32(t0); } static void gen_evsel0(DisasContext *ctx) @@ -456,9 +440,6 @@ static inline void gen_evmwumi(DisasContext *ctx) tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */ - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static inline void gen_evmwumia(DisasContext *ctx) @@ -477,7 +458,6 @@ static inline void gen_evmwumia(DisasContext *ctx) /* acc := rD */ gen_load_gpr64(tmp, rD(ctx->opcode)); tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc)); - tcg_temp_free_i64(tmp); } static inline void gen_evmwumiaa(DisasContext *ctx) @@ -509,9 +489,6 @@ static inline void gen_evmwumiaa(DisasContext *ctx) /* rD := acc */ gen_store_gpr64(rD(ctx->opcode), acc); - - tcg_temp_free_i64(acc); - tcg_temp_free_i64(tmp); } static inline void gen_evmwsmi(DisasContext *ctx) @@ -535,9 +512,6 @@ static inline void gen_evmwsmi(DisasContext *ctx) tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */ - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static inline void gen_evmwsmia(DisasContext *ctx) @@ -556,8 +530,6 @@ static inline void gen_evmwsmia(DisasContext *ctx) /* acc := rD */ gen_load_gpr64(tmp, rD(ctx->opcode)); tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc)); - - tcg_temp_free_i64(tmp); } static inline void gen_evmwsmiaa(DisasContext *ctx) @@ -589,9 +561,6 @@ static inline void gen_evmwsmiaa(DisasContext *ctx) /* rD := acc */ gen_store_gpr64(rD(ctx->opcode), acc); - - tcg_temp_free_i64(acc); - tcg_temp_free_i64(tmp); } GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// @@ -644,7 +613,6 @@ static inline void gen_op_evldd(DisasContext *ctx, TCGv addr) TCGv_i64 t0 = tcg_temp_new_i64(); gen_qemu_ld64_i64(ctx, t0, addr); gen_store_gpr64(rD(ctx->opcode), t0); - tcg_temp_free_i64(t0); } static inline void gen_op_evldw(DisasContext *ctx, TCGv addr) @@ -668,7 +636,6 @@ static inline void gen_op_evldh(DisasContext *ctx, TCGv addr) gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr) @@ -678,7 +645,6 @@ static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr) tcg_gen_shli_tl(t0, t0, 16); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr) @@ -687,7 +653,6 @@ static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr) gen_qemu_ld16u(ctx, t0, addr); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr) @@ -696,7 +661,6 @@ static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr) gen_qemu_ld16s(ctx, t0, addr); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr) @@ -707,7 +671,6 @@ static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr) gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16); - tcg_temp_free(t0); } static inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr) @@ -730,7 +693,6 @@ static inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr) gen_qemu_ld32u(ctx, t0, addr); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr) @@ -743,7 +705,6 @@ static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr) gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16); tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr) @@ -751,7 +712,6 @@ static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr) TCGv_i64 t0 = tcg_temp_new_i64(); gen_load_gpr64(t0, rS(ctx->opcode)); gen_qemu_st64_i64(ctx, t0, addr); - tcg_temp_free_i64(t0); } static inline void gen_op_evstdw(DisasContext *ctx, TCGv addr) @@ -771,7 +731,6 @@ static inline void gen_op_evstdh(DisasContext *ctx, TCGv addr) gen_addr_add(ctx, addr, addr, 2); tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16); gen_qemu_st16(ctx, t0, addr); - tcg_temp_free(t0); gen_addr_add(ctx, addr, addr, 2); gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr); } @@ -784,7 +743,6 @@ static inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr) gen_addr_add(ctx, addr, addr, 2); tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16); gen_qemu_st16(ctx, t0, addr); - tcg_temp_free(t0); } static inline void gen_op_evstwho(DisasContext *ctx, TCGv addr) @@ -820,7 +778,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ gen_addr_reg_index(ctx, t0); \ } \ gen_op_##name(ctx, t0); \ - tcg_temp_free(t0); \ } GEN_SPEOP_LDST(evldd, 0x00, 3); @@ -923,7 +880,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(t0, cpu_env, t0); \ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \ - tcg_temp_free_i32(t0); \ } #define GEN_SPEFPUOP_CONV_32_64(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -939,8 +895,6 @@ static inline void gen_##name(DisasContext *ctx) \ gen_load_gpr64(t0, rB(ctx->opcode)); \ gen_helper_##name(t1, cpu_env, t0); \ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i32(t1); \ } #define GEN_SPEFPUOP_CONV_64_32(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -956,8 +910,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(t0, cpu_env, t1); \ gen_store_gpr64(rD(ctx->opcode), t0); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i32(t1); \ } #define GEN_SPEFPUOP_CONV_64_64(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -971,7 +923,6 @@ static inline void gen_##name(DisasContext *ctx) \ gen_load_gpr64(t0, rB(ctx->opcode)); \ gen_helper_##name(t0, cpu_env, t0); \ gen_store_gpr64(rD(ctx->opcode), t0); \ - tcg_temp_free_i64(t0); \ } #define GEN_SPEFPUOP_ARITH2_32_32(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -982,9 +933,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(t0, cpu_env, t0, t1); \ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \ - \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i32(t1); \ } #define GEN_SPEFPUOP_ARITH2_64_64(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -1000,8 +948,6 @@ static inline void gen_##name(DisasContext *ctx) \ gen_load_gpr64(t1, rB(ctx->opcode)); \ gen_helper_##name(t0, cpu_env, t0, t1); \ gen_store_gpr64(rD(ctx->opcode), t0); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } #define GEN_SPEFPUOP_COMP_32(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -1012,9 +958,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \ - \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i32(t1); \ } #define GEN_SPEFPUOP_COMP_64(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -1029,8 +972,6 @@ static inline void gen_##name(DisasContext *ctx) \ gen_load_gpr64(t0, rA(ctx->opcode)); \ gen_load_gpr64(t1, rB(ctx->opcode)); \ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } /* Single precision floating-point vectors operations */ diff --git a/target/ppc/translate/storage-ctrl-impl.c.inc b/target/ppc/translate/storage-ctrl-impl.c.inc index 6ea1d22ef9..faa7b04bbc 100644 --- a/target/ppc/translate/storage-ctrl-impl.c.inc +++ b/target/ppc/translate/storage-ctrl-impl.c.inc @@ -212,7 +212,6 @@ static bool do_tlbie(DisasContext *ctx, arg_X_tlbie *a, bool local) TCGv t0 = tcg_temp_new(); tcg_gen_ext32u_tl(t0, cpu_gpr[rb]); gen_helper_tlbie(cpu_env, t0); - tcg_temp_free(t0); #if defined(TARGET_PPC64) /* @@ -240,7 +239,6 @@ static bool do_tlbie(DisasContext *ctx, arg_X_tlbie *a, bool local) tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH); tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); - tcg_temp_free_i32(t1); return true; #endif diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 7741f2eb49..c8712dd7d8 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -45,8 +45,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ gen_qemu_ld64_i64(ctx, avr, EA); \ set_avr64(rD(ctx->opcode), avr, false); \ } \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(avr); \ } #define GEN_VR_STX(name, opc2, opc3) \ @@ -80,8 +78,6 @@ static void gen_st##name(DisasContext *ctx) \ get_avr64(avr, rD(ctx->opcode), false); \ gen_qemu_st64_i64(ctx, avr, EA); \ } \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(avr); \ } #define GEN_VR_LVE(name, opc2, opc3, size) \ @@ -101,8 +97,6 @@ static void gen_lve##name(DisasContext *ctx) \ } \ rs = gen_avr_ptr(rS(ctx->opcode)); \ gen_helper_lve##name(cpu_env, rs, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_ptr(rs); \ } #define GEN_VR_STVE(name, opc2, opc3, size) \ @@ -122,8 +116,6 @@ static void gen_stve##name(DisasContext *ctx) \ } \ rs = gen_avr_ptr(rS(ctx->opcode)); \ gen_helper_stve##name(cpu_env, rs, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_ptr(rs); \ } GEN_VR_LDX(lvx, 0x07, 0x03); @@ -157,8 +149,6 @@ static void gen_mfvscr(DisasContext *ctx) gen_helper_mfvscr(t, cpu_env); tcg_gen_extu_i32_i64(avr, t); set_avr64(rD(ctx->opcode), avr, false); - tcg_temp_free_i32(t); - tcg_temp_free_i64(avr); } static void gen_mtvscr(DisasContext *ctx) @@ -179,63 +169,58 @@ static void gen_mtvscr(DisasContext *ctx) tcg_gen_ld_i32(val, cpu_env, bofs); gen_helper_mtvscr(cpu_env, val); - tcg_temp_free_i32(val); +} + +static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry) +{ + TCGv_i64 t0; + TCGv_i64 t1; + TCGv_i64 t2; + TCGv_i64 avr; + TCGv_i64 ten, z; + + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + avr = tcg_temp_new_i64(); + ten = tcg_constant_i64(10); + z = tcg_constant_i64(0); + + if (add_cin) { + get_avr64(avr, rA(ctx->opcode), false); + tcg_gen_mulu2_i64(t0, t1, avr, ten); + get_avr64(avr, rB(ctx->opcode), false); + tcg_gen_andi_i64(t2, avr, 0xF); + tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); + set_avr64(rD(ctx->opcode), avr, false); + } else { + get_avr64(avr, rA(ctx->opcode), false); + tcg_gen_mulu2_i64(avr, t2, avr, ten); + set_avr64(rD(ctx->opcode), avr, false); + } + + if (ret_carry) { + get_avr64(avr, rA(ctx->opcode), true); + tcg_gen_mulu2_i64(t0, t1, avr, ten); + tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); + set_avr64(rD(ctx->opcode), avr, false); + set_avr64(rD(ctx->opcode), z, true); + } else { + get_avr64(avr, rA(ctx->opcode), true); + tcg_gen_mul_i64(t0, avr, ten); + tcg_gen_add_i64(avr, t0, t2); + set_avr64(rD(ctx->opcode), avr, true); + } } #define GEN_VX_VMUL10(name, add_cin, ret_carry) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv_i64 t0; \ - TCGv_i64 t1; \ - TCGv_i64 t2; \ - TCGv_i64 avr; \ - TCGv_i64 ten, z; \ - \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - \ - t0 = tcg_temp_new_i64(); \ - t1 = tcg_temp_new_i64(); \ - t2 = tcg_temp_new_i64(); \ - avr = tcg_temp_new_i64(); \ - ten = tcg_const_i64(10); \ - z = tcg_const_i64(0); \ - \ - if (add_cin) { \ - get_avr64(avr, rA(ctx->opcode), false); \ - tcg_gen_mulu2_i64(t0, t1, avr, ten); \ - get_avr64(avr, rB(ctx->opcode), false); \ - tcg_gen_andi_i64(t2, avr, 0xF); \ - tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \ - set_avr64(rD(ctx->opcode), avr, false); \ - } else { \ - get_avr64(avr, rA(ctx->opcode), false); \ - tcg_gen_mulu2_i64(avr, t2, avr, ten); \ - set_avr64(rD(ctx->opcode), avr, false); \ - } \ - \ - if (ret_carry) { \ - get_avr64(avr, rA(ctx->opcode), true); \ - tcg_gen_mulu2_i64(t0, t1, avr, ten); \ - tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \ - set_avr64(rD(ctx->opcode), avr, false); \ - set_avr64(rD(ctx->opcode), z, true); \ - } else { \ - get_avr64(avr, rA(ctx->opcode), true); \ - tcg_gen_mul_i64(t0, avr, ten); \ - tcg_gen_add_i64(avr, t0, t2); \ - set_avr64(rD(ctx->opcode), avr, true); \ - } \ - \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ - tcg_temp_free_i64(t2); \ - tcg_temp_free_i64(avr); \ - tcg_temp_free_i64(ten); \ - tcg_temp_free_i64(z); \ -} \ + static void glue(gen_, name)(DisasContext *ctx) \ + { gen_vx_vmul10(ctx, add_cin, ret_carry); } GEN_VX_VMUL10(vmul10uq, 0, 0); GEN_VX_VMUL10(vmul10euq, 1, 0); @@ -279,9 +264,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(rd, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM_TRANS(name, opc2, opc3) \ @@ -306,9 +288,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(cpu_env, rd, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM3(name, opc2, opc3) \ @@ -324,10 +303,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rc = gen_avr_ptr(rC(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(rd, ra, rb, rc); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rc); \ - tcg_temp_free_ptr(rd); \ } /* @@ -400,7 +375,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ } \ rb = gen_avr_ptr(rB(ctx->opcode)); \ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \ - tcg_temp_free_ptr(rb); \ } GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0); @@ -457,9 +431,6 @@ static void trans_vmrgew(DisasContext *ctx) get_avr64(avr, VA, false); tcg_gen_deposit_i64(avr, avr, tmp, 0, 32); set_avr64(VT, avr, false); - - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(avr); } static void trans_vmrgow(DisasContext *ctx) @@ -480,10 +451,6 @@ static void trans_vmrgow(DisasContext *ctx) get_avr64(t1, VA, false); tcg_gen_deposit_i64(avr, t0, t1, 32, 32); set_avr64(VT, avr, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(avr); } /* @@ -518,10 +485,6 @@ static void trans_lvsl(DisasContext *ctx) */ tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL); set_avr64(VT, result, false); - - tcg_temp_free_i64(result); - tcg_temp_free_i64(sh); - tcg_temp_free(EA); } /* @@ -557,10 +520,6 @@ static void trans_lvsr(DisasContext *ctx) */ tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh); set_avr64(VT, result, false); - - tcg_temp_free_i64(result); - tcg_temp_free_i64(sh); - tcg_temp_free(EA); } /* @@ -603,11 +562,6 @@ static void trans_vsl(DisasContext *ctx) tcg_gen_shl_i64(avr, avr, sh); tcg_gen_or_i64(avr, avr, carry); set_avr64(VT, avr, true); - - tcg_temp_free_i64(avr); - tcg_temp_free_i64(sh); - tcg_temp_free_i64(carry); - tcg_temp_free_i64(tmp); } /* @@ -649,11 +603,6 @@ static void trans_vsr(DisasContext *ctx) tcg_gen_shr_i64(avr, avr, sh); tcg_gen_or_i64(avr, avr, carry); set_avr64(VT, avr, false); - - tcg_temp_free_i64(avr); - tcg_temp_free_i64(sh); - tcg_temp_free_i64(carry); - tcg_temp_free_i64(tmp); } /* @@ -722,13 +671,6 @@ static void trans_vgbbd(DisasContext *ctx) for (j = 0; j < 2; j++) { set_avr64(VT, result[j], j); } - - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(tcg_mask); - tcg_temp_free_i64(result[0]); - tcg_temp_free_i64(result[1]); - tcg_temp_free_i64(avr[0]); - tcg_temp_free_i64(avr[1]); } /* @@ -753,8 +695,6 @@ static void trans_vclzw(DisasContext *ctx) tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4); } - - tcg_temp_free_i32(tmp); } /* @@ -779,8 +719,6 @@ static void trans_vclzd(DisasContext *ctx) get_avr64(avr, VB, false); tcg_gen_clzi_i64(avr, avr, 64); set_avr64(VT, avr, false); - - tcg_temp_free_i64(avr); } GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2); @@ -849,9 +787,6 @@ static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb) /* negate the mask */ tcg_gen_xor_vec(vece, t0, t0, t2); - tcg_temp_free_vec(t1); - tcg_temp_free_vec(t2); - return t0; } @@ -870,9 +805,6 @@ static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra, /* Rotate and mask */ tcg_gen_rotlv_vec(vece, vrt, vra, n); tcg_gen_and_vec(vece, vrt, vrt, mask); - - tcg_temp_free_vec(n); - tcg_temp_free_vec(mask); } static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece) @@ -926,10 +858,6 @@ static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra, /* Rotate and insert */ tcg_gen_rotlv_vec(vece, tmp, vra, n); tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt); - - tcg_temp_free_vec(n); - tcg_temp_free_vec(tmp); - tcg_temp_free_vec(mask); } static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece) @@ -978,7 +906,6 @@ static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right, hi = tcg_temp_new_i64(); lo = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); - t1 = tcg_const_i64(0); get_avr64(lo, a->vra, false); get_avr64(hi, a->vra, true); @@ -989,7 +916,10 @@ static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right, if (right) { tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo); if (alg) { + t1 = tcg_temp_new_i64(); tcg_gen_sari_i64(t1, lo, 63); + } else { + t1 = zero; } tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi); } else { @@ -1024,13 +954,6 @@ static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right, } tcg_gen_or_i64(hi, hi, lo); set_avr64(a->vrt, hi, !right); - - tcg_temp_free_i64(hi); - tcg_temp_free_i64(lo); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(n); - return true; } @@ -1083,11 +1006,6 @@ static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e) tcg_gen_xor_i64(mh, mh, t0); tcg_gen_xor_i64(ml, ml, t0); - - tcg_temp_free_i64(th); - tcg_temp_free_i64(tl); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask, @@ -1149,14 +1067,6 @@ static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask, set_avr64(a->vrt, t0, true); set_avr64(a->vrt, t1, false); - - tcg_temp_free_i64(ah); - tcg_temp_free_i64(al); - tcg_temp_free_i64(vrb); - tcg_temp_free_i64(n); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -1174,7 +1084,6 @@ static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \ glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b); \ tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t); \ tcg_gen_or_vec(VECE, sat, sat, x); \ - tcg_temp_free_vec(x); \ } \ static void glue(gen_, NAME)(DisasContext *ctx) \ { \ @@ -1266,9 +1175,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##opname(cpu_env, rd, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXRFORM(name, opc2, opc3) \ @@ -1325,10 +1231,6 @@ static void do_vcmp_rc(int vrt) tcg_gen_or_i64(tmp, set, clr); tcg_gen_extrl_i64_i32(cpu_crf[6], tmp); - - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(set); - tcg_temp_free_i64(clr); } static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece) @@ -1377,9 +1279,6 @@ static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) tcg_gen_or_vec(vece, t, t, t0); tcg_gen_or_vec(vece, t, t, t1); - - tcg_temp_free_vec(t0); - tcg_temp_free_vec(t1); } static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece) @@ -1453,11 +1352,6 @@ static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a) tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa); tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2); } - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - return true; } @@ -1489,11 +1383,6 @@ static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign) tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa); tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2); } - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - return true; } @@ -1508,8 +1397,8 @@ static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign) REQUIRE_INSNS_FLAGS2(ctx, ISA310); REQUIRE_VECTOR(ctx); - vra = tcg_temp_local_new_i64(); - vrb = tcg_temp_local_new_i64(); + vra = tcg_temp_new_i64(); + vrb = tcg_temp_new_i64(); gt = gen_new_label(); lt = gen_new_label(); done = gen_new_label(); @@ -1536,9 +1425,6 @@ static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign) tcg_gen_br(done); gen_set_label(done); - tcg_temp_free_i64(vra); - tcg_temp_free_i64(vrb); - return true; } @@ -1581,8 +1467,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(rd, rb); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \ @@ -1597,8 +1481,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(cpu_env, rd, rb); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \ @@ -1612,8 +1494,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(rd, rb); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \ @@ -1626,7 +1506,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ } \ rb = gen_avr_ptr(rB(ctx->opcode)); \ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb); \ - tcg_temp_free_ptr(rb); \ } GEN_VXFORM_NOA(vupkhsb, 7, 8); GEN_VXFORM_NOA(vupkhsh, 7, 9); @@ -1655,7 +1534,6 @@ static void gen_vprtyb_vec(unsigned vece, TCGv_vec t, TCGv_vec b) tcg_gen_xor_vec(vece, b, tmp, b); } tcg_gen_and_vec(vece, t, b, tcg_constant_vec_matching(t, vece, 1)); - tcg_temp_free_vec(tmp); } /* vprtybw */ @@ -1746,13 +1624,10 @@ static void glue(gen_, name)(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_VPU); \ return; \ } \ - uimm = tcg_const_i32(UIMM5(ctx->opcode)); \ + uimm = tcg_constant_i32(UIMM5(ctx->opcode)); \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(cpu_env, rd, rb, uimm); \ - tcg_temp_free_i32(uimm); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \ @@ -1773,9 +1648,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(rd, rb, t0); \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8); @@ -1922,12 +1794,6 @@ static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a) tcg_gen_shri_i64(lo, lo, nbits); tcg_gen_or_i64(hi, hi, lo); tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi); - - tcg_temp_free_i64(hi); - tcg_temp_free_i64(lo); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -1950,11 +1816,6 @@ static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right, tcg_gen_subfi_tl(rc, 32 - size, rc); } gen_helper(cpu_env, vrt, vra, vrb, rc); - - tcg_temp_free_ptr(vrt); - tcg_temp_free_ptr(vra); - tcg_temp_free_ptr(vrb); - tcg_temp_free(rc); return true; } @@ -1983,31 +1844,22 @@ static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, } gen_helper(cpu_env, t, rb, idx); - - tcg_temp_free_ptr(t); - tcg_temp_free(idx); - return true; } static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) { - bool ok; TCGv_i64 val; val = tcg_temp_new_i64(); get_avr64(val, vrb, true); - ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper); - - tcg_temp_free_i64(val); - return ok; + return do_vinsx(ctx, vrt, size, right, ra, val, gen_helper); } static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) { - bool ok; TCGv_i64 val; REQUIRE_INSNS_FLAGS2(ctx, ISA310); @@ -2016,10 +1868,7 @@ static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, val = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); - ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper); - - tcg_temp_free_i64(val); - return ok; + return do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper); } static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, @@ -2035,7 +1884,6 @@ static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) { - bool ok; TCGv_i64 val; REQUIRE_INSNS_FLAGS2(ctx, ISA310); @@ -2059,11 +1907,8 @@ static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, val = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); - ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val, - gen_helper); - - tcg_temp_free_i64(val); - return ok; + return do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val, + gen_helper); } static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, @@ -2120,12 +1965,8 @@ static void gen_vsldoi(DisasContext *ctx) ra = gen_avr_ptr(rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); - sh = tcg_const_i32(VSH(ctx->opcode)); + sh = tcg_constant_i32(VSH(ctx->opcode)); gen_helper_vsldoi(rd, ra, rb, sh); - tcg_temp_free_ptr(ra); - tcg_temp_free_ptr(rb); - tcg_temp_free_ptr(rd); - tcg_temp_free_i32(sh); } static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a) @@ -2148,16 +1989,10 @@ static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a) tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh); tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh); - - tcg_temp_free_i64(t2); } set_avr64(a->vrt, t0, true); set_avr64(a->vrt, t1, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -2181,16 +2016,10 @@ static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a) tcg_gen_extract2_i64(t0, t0, t1, a->sh); tcg_gen_extract2_i64(t1, t1, t2, a->sh); - - tcg_temp_free_i64(t2); } set_avr64(a->vrt, t0, false); set_avr64(a->vrt, t1, true); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -2223,15 +2052,13 @@ static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a) tcg_gen_sari_i64(tmp, tmp, 63); set_avr64(a->vrt, tmp, false); set_avr64(a->vrt, tmp, true); - - tcg_temp_free_i64(tmp); return true; } static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece) { const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece, - mask = dup_const(vece, 1 << (elem_width - 1)); + mask = dup_const(vece, 1ULL << (elem_width - 1)); uint64_t i, j; TCGv_i64 lo, hi, t0, t1; @@ -2278,12 +2105,6 @@ static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece) tcg_gen_shri_i64(hi, hi, 64 - elem_count_half); tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half); tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo); - - tcg_temp_free_i64(hi); - tcg_temp_free_i64(lo); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -2304,9 +2125,6 @@ static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a) get_avr64(tmp, a->vrb, true); tcg_gen_shri_i64(tmp, tmp, 63); tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp); - - tcg_temp_free_i64(tmp); - return true; } @@ -2367,12 +2185,6 @@ static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece) set_avr64(a->vrt, lo, false); set_avr64(a->vrt, hi, true); - - tcg_temp_free_i64(hi); - tcg_temp_free_i64(lo); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -2394,9 +2206,6 @@ static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a) tcg_gen_sextract_i64(tmp, tmp, 0, 1); set_avr64(a->vrt, tmp, false); set_avr64(a->vrt, tmp, true); - - tcg_temp_free_i64(tmp); - return true; } @@ -2427,28 +2236,25 @@ static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a) static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece) { - TCGv_i64 rt, vrb, mask; - rt = tcg_const_i64(0); - vrb = tcg_temp_new_i64(); + TCGv_i64 r[2], mask; + + r[0] = tcg_temp_new_i64(); + r[1] = tcg_temp_new_i64(); mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1))); for (int i = 0; i < 2; i++) { - get_avr64(vrb, a->vrb, i); + get_avr64(r[i], a->vrb, i); if (a->mp) { - tcg_gen_and_i64(vrb, mask, vrb); + tcg_gen_and_i64(r[i], mask, r[i]); } else { - tcg_gen_andc_i64(vrb, mask, vrb); + tcg_gen_andc_i64(r[i], mask, r[i]); } - tcg_gen_ctpop_i64(vrb, vrb); - tcg_gen_add_i64(rt, rt, vrb); + tcg_gen_ctpop_i64(r[i], r[i]); } - tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece); - tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt); - - tcg_temp_free_i64(vrb); - tcg_temp_free_i64(rt); - + tcg_gen_add_i64(r[0], r[0], r[1]); + tcg_gen_shli_i64(r[0], r[0], TARGET_LONG_BITS - 8 + vece); + tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], r[0]); return true; } @@ -2473,12 +2279,7 @@ static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a, } else { TCGv_i32 discard = tcg_temp_new_i32(); gen_helper(discard, vrt, vrb); - tcg_temp_free_i32(discard); } - - tcg_temp_free_ptr(vrt); - tcg_temp_free_ptr(vrb); - return true; } @@ -2531,12 +2332,6 @@ static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right) get_avr64(tmp, a->vra, false); tcg_gen_and_i64(tmp, tmp, ml); set_avr64(a->vrt, tmp, false); - - tcg_temp_free_i64(rb); - tcg_temp_free_i64(mh); - tcg_temp_free_i64(ml); - tcg_temp_free_i64(tmp); - return true; } @@ -2560,10 +2355,6 @@ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ } else { \ gen_helper_##name0(cpu_env, rd, ra, rb, rc); \ } \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rc); \ - tcg_temp_free_ptr(rd); \ } GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23) @@ -2579,11 +2370,6 @@ static bool do_va_helper(DisasContext *ctx, arg_VA *a, vrb = gen_avr_ptr(a->vrb); vrc = gen_avr_ptr(a->rc); gen_helper(vrt, vra, vrb, vrc); - tcg_temp_free_ptr(vrt); - tcg_temp_free_ptr(vra); - tcg_temp_free_ptr(vrb); - tcg_temp_free_ptr(vrc); - return true; } @@ -2654,11 +2440,6 @@ static bool do_va_env_helper(DisasContext *ctx, arg_VA *a, vrb = gen_avr_ptr(a->vrb); vrc = gen_avr_ptr(a->rc); gen_helper(cpu_env, vrt, vra, vrb, vrc); - tcg_temp_free_ptr(vrt); - tcg_temp_free_ptr(vra); - tcg_temp_free_ptr(vrb); - tcg_temp_free_ptr(vrc); - return true; } @@ -2751,8 +2532,6 @@ static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a) set_avr64(a->vrt, tmp, false); tcg_gen_sari_i64(tmp, tmp, 63); set_avr64(a->vrt, tmp, true); - - tcg_temp_free_i64(tmp); return true; } @@ -2796,14 +2575,9 @@ static void gen_##op(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ \ - ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ + ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \ \ gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \ - \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ - tcg_temp_free_i32(ps); \ } #define GEN_BCD2(op) \ @@ -2820,13 +2594,9 @@ static void gen_##op(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ \ - ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ + ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \ \ gen_helper_##op(cpu_crf[6], rd, rb, ps); \ - \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ - tcg_temp_free_i32(ps); \ } GEN_BCD(bcdadd) @@ -2933,8 +2703,6 @@ static void gen_vsbox(DisasContext *ctx) ra = gen_avr_ptr(rA(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsbox(rd, ra); - tcg_temp_free_ptr(ra); - tcg_temp_free_ptr(rd); } GEN_VXFORM(vcipher, 4, 20) @@ -2958,11 +2726,8 @@ static void gen_##op(DisasContext *ctx) \ } \ ra = gen_avr_ptr(rA(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ - st_six = tcg_const_i32(rB(ctx->opcode)); \ + st_six = tcg_constant_i32(rB(ctx->opcode)); \ gen_helper_##op(rd, ra, st_six); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rd); \ - tcg_temp_free_i32(st_six); \ } VSHASIGMA(vshasigmaw) @@ -3077,12 +2842,6 @@ static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a) set_avr64(a->vrt, rl, false); set_avr64(a->vrt, rh, true); - - tcg_temp_free_i64(rl); - tcg_temp_free_i64(rh); - tcg_temp_free_i64(src1); - tcg_temp_free_i64(src2); - return true; } @@ -3128,14 +2887,6 @@ static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a) /* Discard 64 more bits to complete the CHOP128(temp >> 128) */ set_avr64(a->vrt, tmp0, false); set_avr64(a->vrt, zero, true); - - tcg_temp_free_i64(tmp0); - tcg_temp_free_i64(tmp1); - tcg_temp_free_i64(prod1h); - tcg_temp_free_i64(prod1l); - tcg_temp_free_i64(prod0h); - tcg_temp_free_i64(prod0l); - return true; } @@ -3149,10 +2900,6 @@ static bool do_vx_helper(DisasContext *ctx, arg_VX *a, rb = gen_avr_ptr(a->vrb); rd = gen_avr_ptr(a->vrt); gen_helper(rd, ra, rb); - tcg_temp_free_ptr(ra); - tcg_temp_free_ptr(rb); - tcg_temp_free_ptr(rd); - return true; } @@ -3237,12 +2984,6 @@ static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even, gen_mul(vrt0, vrt1, vra, vrb); set_avr64(a->vrt, vrt0, false); set_avr64(a->vrt, vrt1, true); - - tcg_temp_free_i64(vra); - tcg_temp_free_i64(vrb); - tcg_temp_free_i64(vrt0); - tcg_temp_free_i64(vrt1); - return true; } @@ -3302,10 +3043,6 @@ static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign) tcg_gen_shri_i64(lh, lh, 32); tcg_gen_deposit_i64(t, hh, lh, 0, 32); - - tcg_temp_free_i64(hh); - tcg_temp_free_i64(lh); - tcg_temp_free_i64(temp); } static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign) @@ -3318,8 +3055,6 @@ static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign) } else { tcg_gen_mulu2_i64(tlow, t, a, b); } - - tcg_temp_free_i64(tlow); } static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign, @@ -3344,13 +3079,7 @@ static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign, set_avr64(a->vrt, vrt, i); } - - tcg_temp_free_i64(vra); - tcg_temp_free_i64(vrb); - tcg_temp_free_i64(vrt); - return true; - } TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64) @@ -3368,7 +3097,6 @@ static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, gen_shr_vec(vece, b, b, 1); tcg_gen_add_vec(vece, t, a, b); tcg_gen_add_vec(vece, t, t, tmp); - tcg_temp_free_vec(tmp); } QEMU_FLATTEN @@ -3538,8 +3266,6 @@ static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \ tcg_gen_movi_i32(t1, 0); \ tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b); \ DIV(t, a, b); \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i32(t1); \ } #define DIVU64(NAME, DIV) \ @@ -3564,8 +3290,6 @@ static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \ tcg_gen_movi_i64(t1, 0); \ tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b); \ DIV(t, a, b); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } DIVS32(do_divsw, tcg_gen_div_i32) @@ -3596,9 +3320,6 @@ static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) /* if quotient doesn't fit in 32 bits the result is undefined */ tcg_gen_extrl_i64_i32(t, val1); - - tcg_temp_free_i64(val1); - tcg_temp_free_i64(val2); } static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) @@ -3617,9 +3338,6 @@ static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) /* if quotient doesn't fit in 32 bits the result is undefined */ tcg_gen_extrl_i64_i32(t, val1); - - tcg_temp_free_i64(val1); - tcg_temp_free_i64(val2); } DIVS32(do_divesw, do_dives_i32) diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc index 4deb29ee42..0f5b0056f1 100644 --- a/target/ppc/translate/vsx-impl.c.inc +++ b/target/ppc/translate/vsx-impl.c.inc @@ -40,8 +40,6 @@ static void gen_##name(DisasContext *ctx) \ gen_qemu_##operation(ctx, t0, EA); \ set_cpu_vsr(xT(ctx->opcode), t0, true); \ /* NOTE: cpu_vsrl is undefined */ \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ } VSX_LOAD_SCALAR(lxsdx, ld64_i64) @@ -68,8 +66,6 @@ static void gen_lxvd2x(DisasContext *ctx) tcg_gen_addi_tl(EA, EA, 8); gen_qemu_ld64_i64(ctx, t0, EA); set_cpu_vsr(xT(ctx->opcode), t0, false); - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } static void gen_lxvw4x(DisasContext *ctx) @@ -99,8 +95,6 @@ static void gen_lxvw4x(DisasContext *ctx) tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ); tcg_gen_shri_i64(t1, t0, 32); tcg_gen_deposit_i64(xtl, t1, t0, 32, 32); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } else { tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); @@ -108,9 +102,6 @@ static void gen_lxvw4x(DisasContext *ctx) } set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - tcg_temp_free(EA); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); } static void gen_lxvwsx(DisasContext *ctx) @@ -138,9 +129,6 @@ static void gen_lxvwsx(DisasContext *ctx) data = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL)); tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data); - - tcg_temp_free(EA); - tcg_temp_free_i32(data); } static void gen_lxvdsx(DisasContext *ctx) @@ -161,15 +149,12 @@ static void gen_lxvdsx(DisasContext *ctx) data = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ)); tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data); - - tcg_temp_free(EA); - tcg_temp_free_i64(data); } static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl, TCGv_i64 inh, TCGv_i64 inl) { - TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF); + TCGv_i64 mask = tcg_constant_i64(0x00FF00FF00FF00FF); TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); @@ -186,10 +171,6 @@ static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl, tcg_gen_shri_i64(t1, inl, 8); tcg_gen_and_i64(t1, t1, mask); tcg_gen_or_i64(outl, t0, t1); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(mask); } static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl, @@ -204,10 +185,8 @@ static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl, tcg_gen_deposit_i64(outh, outh, hi, 32, 32); tcg_gen_shri_i64(outl, lo, 32); tcg_gen_deposit_i64(outl, outl, lo, 32, 32); - - tcg_temp_free_i64(hi); - tcg_temp_free_i64(lo); } + static void gen_lxvh8x(DisasContext *ctx) { TCGv EA; @@ -232,9 +211,6 @@ static void gen_lxvh8x(DisasContext *ctx) } set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - tcg_temp_free(EA); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); } static void gen_lxvb16x(DisasContext *ctx) @@ -257,9 +233,6 @@ static void gen_lxvb16x(DisasContext *ctx) tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ); set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - tcg_temp_free(EA); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); } #ifdef TARGET_PPC64 @@ -285,8 +258,6 @@ static void gen_##name(DisasContext *ctx) \ gen_set_access_type(ctx, ACCESS_INT); \ gen_addr_register(ctx, EA); \ gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \ - tcg_temp_free(EA); \ - tcg_temp_free_ptr(xt); \ } VSX_VECTOR_LOAD_STORE_LENGTH(lxvl) @@ -310,8 +281,6 @@ static void gen_##name(DisasContext *ctx) \ gen_addr_reg_index(ctx, EA); \ get_cpu_vsr(t0, xS(ctx->opcode), true); \ gen_qemu_##operation(ctx, t0, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ } VSX_STORE_SCALAR(stxsdx, st64_i64) @@ -338,8 +307,6 @@ static void gen_stxvd2x(DisasContext *ctx) tcg_gen_addi_tl(EA, EA, 8); get_cpu_vsr(t0, xS(ctx->opcode), false); gen_qemu_st64_i64(ctx, t0, EA); - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } static void gen_stxvw4x(DisasContext *ctx) @@ -370,16 +337,11 @@ static void gen_stxvw4x(DisasContext *ctx) tcg_gen_shri_i64(t0, xsl, 32); tcg_gen_deposit_i64(t1, t0, xsl, 32, 32); tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } else { tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); } - tcg_temp_free(EA); - tcg_temp_free_i64(xsh); - tcg_temp_free_i64(xsl); } static void gen_stxvh8x(DisasContext *ctx) @@ -407,16 +369,11 @@ static void gen_stxvh8x(DisasContext *ctx) tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ); - tcg_temp_free_i64(outh); - tcg_temp_free_i64(outl); } else { tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); } - tcg_temp_free(EA); - tcg_temp_free_i64(xsh); - tcg_temp_free_i64(xsl); } static void gen_stxvb16x(DisasContext *ctx) @@ -439,9 +396,6 @@ static void gen_stxvb16x(DisasContext *ctx) tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); - tcg_temp_free(EA); - tcg_temp_free_i64(xsh); - tcg_temp_free_i64(xsl); } static void gen_mfvsrwz(DisasContext *ctx) @@ -462,8 +416,6 @@ static void gen_mfvsrwz(DisasContext *ctx) get_cpu_vsr(xsh, xS(ctx->opcode), true); tcg_gen_ext32u_i64(tmp, xsh); tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp); - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(xsh); } static void gen_mtvsrwa(DisasContext *ctx) @@ -484,8 +436,6 @@ static void gen_mtvsrwa(DisasContext *ctx) tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32s_i64(xsh, tmp); set_cpu_vsr(xT(ctx->opcode), xsh, true); - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(xsh); } static void gen_mtvsrwz(DisasContext *ctx) @@ -506,8 +456,6 @@ static void gen_mtvsrwz(DisasContext *ctx) tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32u_i64(xsh, tmp); set_cpu_vsr(xT(ctx->opcode), xsh, true); - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(xsh); } #if defined(TARGET_PPC64) @@ -528,7 +476,6 @@ static void gen_mfvsrd(DisasContext *ctx) t0 = tcg_temp_new_i64(); get_cpu_vsr(t0, xS(ctx->opcode), true); tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); - tcg_temp_free_i64(t0); } static void gen_mtvsrd(DisasContext *ctx) @@ -548,7 +495,6 @@ static void gen_mtvsrd(DisasContext *ctx) t0 = tcg_temp_new_i64(); tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]); set_cpu_vsr(xT(ctx->opcode), t0, true); - tcg_temp_free_i64(t0); } static void gen_mfvsrld(DisasContext *ctx) @@ -568,7 +514,6 @@ static void gen_mfvsrld(DisasContext *ctx) t0 = tcg_temp_new_i64(); get_cpu_vsr(t0, xS(ctx->opcode), false); tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); - tcg_temp_free_i64(t0); } static void gen_mtvsrdd(DisasContext *ctx) @@ -596,7 +541,6 @@ static void gen_mtvsrdd(DisasContext *ctx) tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]); set_cpu_vsr(xT(ctx->opcode), t0, false); - tcg_temp_free_i64(t0); } static void gen_mtvsrws(DisasContext *ctx) @@ -619,7 +563,6 @@ static void gen_mtvsrws(DisasContext *ctx) cpu_gpr[rA(ctx->opcode)], 32, 32); set_cpu_vsr(xT(ctx->opcode), t0, false); set_cpu_vsr(xT(ctx->opcode), t0, true); - tcg_temp_free_i64(t0); } #endif @@ -666,14 +609,11 @@ static void glue(gen_, name)(DisasContext *ctx) \ tcg_gen_and_i64(xa, xa, sgm); \ tcg_gen_andc_i64(xb, xb, sgm); \ tcg_gen_or_i64(xb, xb, xa); \ - tcg_temp_free_i64(xa); \ break; \ } \ } \ set_cpu_vsr(xT(ctx->opcode), xb, true); \ set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \ - tcg_temp_free_i64(xb); \ - tcg_temp_free_i64(sgm); \ } VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP) @@ -717,15 +657,10 @@ static void glue(gen_, name)(DisasContext *ctx) \ tcg_gen_and_i64(xah, tmp, sgm); \ tcg_gen_andc_i64(xbh, xbh, sgm); \ tcg_gen_or_i64(xbh, xbh, xah); \ - tcg_temp_free_i64(xah); \ break; \ } \ set_cpu_vsr(xt, xbh, true); \ set_cpu_vsr(xt, xbl, false); \ - tcg_temp_free_i64(xbl); \ - tcg_temp_free_i64(xbh); \ - tcg_temp_free_i64(sgm); \ - tcg_temp_free_i64(tmp); \ } VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP) @@ -870,11 +805,7 @@ static void gen_##name(DisasContext *ctx) \ } else { \ ignored = tcg_temp_new_i32(); \ gen_helper_##name(ignored, cpu_env, xt, xa, xb); \ - tcg_temp_free_i32(ignored); \ } \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(xb); \ } VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX) @@ -894,15 +825,11 @@ static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a) REQUIRE_INSNS_FLAGS2(ctx, ISA300); REQUIRE_VSX(ctx); - ro = tcg_const_i32(a->rc); + ro = tcg_constant_i32(a->rc); xt = gen_avr_ptr(a->rt); xb = gen_avr_ptr(a->rb); gen_helper_XSCVQPDP(cpu_env, ro, xt, xb); - tcg_temp_free_i32(ro); - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xb); - return true; } @@ -917,9 +844,6 @@ static bool do_helper_env_X_tb(DisasContext *ctx, arg_X_tb *a, xt = gen_avr_ptr(a->rt); xb = gen_avr_ptr(a->rb); gen_helper(cpu_env, xt, xb); - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xb); - return true; } @@ -936,9 +860,8 @@ static void gen_##name(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ - opc = tcg_const_i32(ctx->opcode); \ + opc = tcg_constant_i32(ctx->opcode); \ gen_helper_##name(cpu_env, opc); \ - tcg_temp_free_i32(opc); \ } #define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \ @@ -953,9 +876,6 @@ static void gen_##name(DisasContext *ctx) \ xa = gen_vsr_ptr(xA(ctx->opcode)); \ xb = gen_vsr_ptr(xB(ctx->opcode)); \ gen_helper_##name(cpu_env, xt, xa, xb); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \ @@ -969,8 +889,6 @@ static void gen_##name(DisasContext *ctx) \ xt = gen_vsr_ptr(xT(ctx->opcode)); \ xb = gen_vsr_ptr(xB(ctx->opcode)); \ gen_helper_##name(cpu_env, xt, xb); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \ @@ -982,13 +900,10 @@ static void gen_##name(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ - opc = tcg_const_i32(ctx->opcode); \ + opc = tcg_constant_i32(ctx->opcode); \ xa = gen_vsr_ptr(xA(ctx->opcode)); \ xb = gen_vsr_ptr(xB(ctx->opcode)); \ gen_helper_##name(cpu_env, opc, xa, xb); \ - tcg_temp_free_i32(opc); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \ @@ -1000,11 +915,9 @@ static void gen_##name(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ - opc = tcg_const_i32(ctx->opcode); \ + opc = tcg_constant_i32(ctx->opcode); \ xb = gen_vsr_ptr(xB(ctx->opcode)); \ gen_helper_##name(cpu_env, opc, xb); \ - tcg_temp_free_i32(opc); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \ @@ -1016,15 +929,11 @@ static void gen_##name(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ - opc = tcg_const_i32(ctx->opcode); \ + opc = tcg_constant_i32(ctx->opcode); \ xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \ xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ gen_helper_##name(cpu_env, opc, xt, xa, xb); \ - tcg_temp_free_i32(opc); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \ @@ -1036,13 +945,10 @@ static void gen_##name(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ - opc = tcg_const_i32(ctx->opcode); \ + opc = tcg_constant_i32(ctx->opcode); \ xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ gen_helper_##name(cpu_env, opc, xt, xb); \ - tcg_temp_free_i32(opc); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \ @@ -1054,13 +960,10 @@ static void gen_##name(DisasContext *ctx) \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ - opc = tcg_const_i32(ctx->opcode); \ + opc = tcg_constant_i32(ctx->opcode); \ xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ gen_helper_##name(cpu_env, opc, xa, xb); \ - tcg_temp_free_i32(opc); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \ @@ -1078,8 +981,6 @@ static void gen_##name(DisasContext *ctx) \ gen_helper_##name(t1, cpu_env, t0); \ set_cpu_vsr(xT(ctx->opcode), t1, true); \ set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX) @@ -1291,8 +1192,6 @@ static bool do_XX2_bf_uim(DisasContext *ctx, arg_XX2_bf_uim *a, bool vsr, REQUIRE_VSX(ctx); xb = vsr ? gen_vsr_ptr(a->xb) : gen_avr_ptr(a->xb); gen_helper(cpu_env, tcg_constant_i32(a->bf), tcg_constant_i32(a->uim), xb); - tcg_temp_free_ptr(xb); - return true; } @@ -1314,9 +1213,6 @@ bool trans_XSCVSPDPN(DisasContext *ctx, arg_XX2 *a) set_cpu_vsr(a->xt, tmp, true); set_cpu_vsr(a->xt, tcg_constant_i64(0), false); - - tcg_temp_free_i64(tmp); - return true; } @@ -1413,11 +1309,6 @@ static bool trans_XXPERM(DisasContext *ctx, arg_XX3 *a) xb = gen_vsr_ptr(a->xb); gen_helper_VPERM(xt, xa, xt, xb); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xa); - tcg_temp_free_ptr(xb); - return true; } @@ -1433,11 +1324,6 @@ static bool trans_XXPERMR(DisasContext *ctx, arg_XX3 *a) xb = gen_vsr_ptr(a->xb); gen_helper_VPERMR(xt, xa, xt, xb); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xa); - tcg_temp_free_ptr(xb); - return true; } @@ -1458,8 +1344,6 @@ static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a) set_cpu_vsr(a->xt, t0, true); set_cpu_vsr(a->xt, t1, false); - - tcg_temp_free_i64(t1); } else { get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0); set_cpu_vsr(a->xt, t0, true); @@ -1467,9 +1351,6 @@ static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a) get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0); set_cpu_vsr(a->xt, t0, false); } - - tcg_temp_free_i64(t0); - return true; } @@ -1486,12 +1367,6 @@ static bool trans_XXPERMX(DisasContext *ctx, arg_8RR_XX4_uim3 *a) xc = gen_vsr_ptr(a->xc); gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3)); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xa); - tcg_temp_free_ptr(xb); - tcg_temp_free_ptr(xc); - return true; } @@ -1514,10 +1389,6 @@ static bool do_xxgenpcv(DisasContext *ctx, arg_X_imm5 *a, vrb = gen_avr_ptr(a->vrb); fn[a->imm](xt, vrb); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(vrb); - return true; } @@ -1550,12 +1421,6 @@ static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3, s3 = gen_vsr_ptr(src3); gen_helper(cpu_env, t, s1, s2, s3); - - tcg_temp_free_ptr(t); - tcg_temp_free_ptr(s1); - tcg_temp_free_ptr(s2); - tcg_temp_free_ptr(s3); - return true; } @@ -1636,10 +1501,6 @@ static void gen_##name(DisasContext *ctx) \ s3 = gen_vsr_ptr(xB(ctx->opcode)); \ } \ gen_helper_##name(cpu_env, xt, s1, s2, s3); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(s1); \ - tcg_temp_free_ptr(s2); \ - tcg_temp_free_ptr(s3); \ } GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX) @@ -1673,11 +1534,6 @@ static void gen_xxbrd(DisasContext *ctx) tcg_gen_bswap64_i64(xtl, xbl); set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xxbrh(DisasContext *ctx) @@ -1701,11 +1557,6 @@ static void gen_xxbrh(DisasContext *ctx) gen_bswap16x8(xth, xtl, xbh, xbl); set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xxbrq(DisasContext *ctx) @@ -1733,12 +1584,6 @@ static void gen_xxbrq(DisasContext *ctx) set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_gen_mov_i64(xth, t0); set_cpu_vsr(xT(ctx->opcode), xth, true); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xxbrw(DisasContext *ctx) @@ -1762,11 +1607,6 @@ static void gen_xxbrw(DisasContext *ctx) gen_bswap32x4(xth, xtl, xbh, xbl); set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } #define VSX_LOGICAL(name, vece, tcg_op) \ @@ -1813,11 +1653,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ set_cpu_vsr(xT(ctx->opcode), tmp, true); \ tcg_gen_deposit_i64(tmp, b1, a1, 32, 32); \ set_cpu_vsr(xT(ctx->opcode), tmp, false); \ - tcg_temp_free_i64(a0); \ - tcg_temp_free_i64(a1); \ - tcg_temp_free_i64(b0); \ - tcg_temp_free_i64(b1); \ - tcg_temp_free_i64(tmp); \ } VSX_XXMRG(xxmrghw, 1) @@ -1974,13 +1809,6 @@ static bool trans_XVTLSBB(DisasContext *ctx, arg_XX2_bf_xb *a) tcg_gen_or_i64(t0, all_false, all_true); tcg_gen_extrl_i64_i32(cpu_crf[a->bf], t0); - - tcg_temp_free_i64(xb); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(all_true); - tcg_temp_free_i64(all_false); - return true; } @@ -2012,7 +1840,6 @@ static void gen_xxsldwi(DisasContext *ctx) get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xtl, xtl, t0); - tcg_temp_free_i64(t0); break; } case 2: { @@ -2032,16 +1859,12 @@ static void gen_xxsldwi(DisasContext *ctx) get_cpu_vsr(t0, xB(ctx->opcode), false); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xtl, xtl, t0); - tcg_temp_free_i64(t0); break; } } set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); } static bool do_vsx_extract_insert(DisasContext *ctx, arg_XX2_uim *a, @@ -2064,10 +1887,7 @@ static bool do_vsx_extract_insert(DisasContext *ctx, arg_XX2_uim *a, xt = gen_vsr_ptr(a->xt); xb = gen_vsr_ptr(a->xb); gen_helper(xt, xb, tcg_constant_i32(a->uim)); - tcg_temp_free_ptr(xb); - tcg_temp_free_ptr(xt); } - return true; } @@ -2086,7 +1906,6 @@ static void gen_xsxexpdp(DisasContext *ctx) t0 = tcg_temp_new_i64(); get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_extract_i64(rt, t0, 52, 11); - tcg_temp_free_i64(t0); } static void gen_xsxexpqp(DisasContext *ctx) @@ -2108,10 +1927,6 @@ static void gen_xsxexpqp(DisasContext *ctx) set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_movi_i64(xtl, 0); set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); - - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); } static void gen_xsiexpdp(DisasContext *ctx) @@ -2133,8 +1948,6 @@ static void gen_xsiexpdp(DisasContext *ctx) tcg_gen_or_i64(xth, xth, t0); set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(xth); } static void gen_xsiexpqp(DisasContext *ctx) @@ -2167,13 +1980,6 @@ static void gen_xsiexpqp(DisasContext *ctx) set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_mov_i64(xtl, xal); set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xah); - tcg_temp_free_i64(xal); - tcg_temp_free_i64(xbh); } static void gen_xsxsigdp(DisasContext *ctx) @@ -2188,8 +1994,8 @@ static void gen_xsxsigdp(DisasContext *ctx) exp = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); - zr = tcg_const_i64(0); - nan = tcg_const_i64(2047); + zr = tcg_constant_i64(0); + nan = tcg_constant_i64(2047); get_cpu_vsr(t1, xB(ctx->opcode), true); tcg_gen_extract_i64(exp, t1, 52, 11); @@ -2198,12 +2004,6 @@ static void gen_xsxsigdp(DisasContext *ctx) tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); get_cpu_vsr(t1, xB(ctx->opcode), true); tcg_gen_deposit_i64(rt, t0, t1, 0, 52); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(exp); - tcg_temp_free_i64(zr); - tcg_temp_free_i64(nan); } static void gen_xsxsigqp(DisasContext *ctx) @@ -2226,8 +2026,8 @@ static void gen_xsxsigqp(DisasContext *ctx) get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false); exp = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); - zr = tcg_const_i64(0); - nan = tcg_const_i64(32767); + zr = tcg_constant_i64(0); + nan = tcg_constant_i64(32767); tcg_gen_extract_i64(exp, xbh, 48, 15); tcg_gen_movi_i64(t0, 0x0001000000000000); @@ -2237,15 +2037,6 @@ static void gen_xsxsigqp(DisasContext *ctx) set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_mov_i64(xtl, xbl); set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(exp); - tcg_temp_free_i64(zr); - tcg_temp_free_i64(nan); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } #endif @@ -2285,14 +2076,6 @@ static void gen_xviexpsp(DisasContext *ctx) tcg_gen_shli_i64(t0, t0, 23); tcg_gen_or_i64(xtl, xtl, t0); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xah); - tcg_temp_free_i64(xal); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xviexpdp(DisasContext *ctx) @@ -2324,13 +2107,6 @@ static void gen_xviexpdp(DisasContext *ctx) tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xah); - tcg_temp_free_i64(xal); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xvxexpsp(DisasContext *ctx) @@ -2357,11 +2133,6 @@ static void gen_xvxexpsp(DisasContext *ctx) tcg_gen_shri_i64(xtl, xbl, 23); tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xvxexpdp(DisasContext *ctx) @@ -2386,11 +2157,6 @@ static void gen_xvxexpdp(DisasContext *ctx) set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_extract_i64(xtl, xbl, 52, 11); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static bool trans_XVXSIGSP(DisasContext *ctx, arg_XX2 *a) @@ -2404,10 +2170,6 @@ static bool trans_XVXSIGSP(DisasContext *ctx, arg_XX2 *a) b = gen_vsr_ptr(a->xb); gen_helper_XVXSIGSP(t, b); - - tcg_temp_free_ptr(t); - tcg_temp_free_ptr(b); - return true; } @@ -2431,8 +2193,8 @@ static void gen_xvxsigdp(DisasContext *ctx) get_cpu_vsr(xbl, xB(ctx->opcode), false); exp = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); - zr = tcg_const_i64(0); - nan = tcg_const_i64(2047); + zr = tcg_constant_i64(0); + nan = tcg_constant_i64(2047); tcg_gen_extract_i64(exp, xbh, 52, 11); tcg_gen_movi_i64(t0, 0x0010000000000000); @@ -2447,15 +2209,6 @@ static void gen_xvxsigdp(DisasContext *ctx) tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(exp); - tcg_temp_free_i64(zr); - tcg_temp_free_i64(nan); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ, @@ -2510,9 +2263,6 @@ static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ, set_cpu_vsr(rt2, xt, ctx->le_mode); } } - - tcg_temp_free(ea); - tcg_temp_free_i64(xt); return true; } @@ -2577,10 +2327,6 @@ static bool do_lstxsd(DisasContext *ctx, int rt, int ra, TCGv displ, bool store) set_cpu_vsr(rt + 32, xt, true); set_cpu_vsr(rt + 32, tcg_constant_i64(0), false); } - - tcg_temp_free(ea); - tcg_temp_free_i64(xt); - return true; } @@ -2620,10 +2366,6 @@ static bool do_lstxssp(DisasContext *ctx, int rt, int ra, TCGv displ, bool store set_cpu_vsr(rt + 32, xt, true); set_cpu_vsr(rt + 32, tcg_constant_i64(0), false); } - - tcg_temp_free(ea); - tcg_temp_free_i64(xt); - return true; } @@ -2684,9 +2426,6 @@ static bool do_lstrm(DisasContext *ctx, arg_X *a, MemOp mop, bool store) set_cpu_vsr(a->rt, xt, false); set_cpu_vsr(a->rt, tcg_constant_i64(0), true); } - - tcg_temp_free(ea); - tcg_temp_free_i64(xt); return true; } @@ -2710,7 +2449,8 @@ static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c, TCGv_i64 conj, disj; conj = tcg_temp_new_i64(); - disj = tcg_const_i64(0); + disj = tcg_temp_new_i64(); + tcg_gen_movi_i64(disj, 0); /* Iterate over set bits from the least to the most significant bit */ while (imm) { @@ -2741,9 +2481,6 @@ static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c, } tcg_gen_mov_i64(t, disj); - - tcg_temp_free_i64(conj); - tcg_temp_free_i64(disj); } static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, @@ -2756,8 +2493,9 @@ static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, int bit; TCGv_vec disj, conj; - disj = tcg_const_zeros_vec_matching(t); conj = tcg_temp_new_vec_matching(t); + disj = tcg_temp_new_vec_matching(t); + tcg_gen_dupi_vec(vece, disj, 0); /* Iterate over set bits from the least to the most significant bit */ while (imm) { @@ -2788,9 +2526,6 @@ static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, } tcg_gen_mov_vec(t, disj); - - tcg_temp_free_vec(disj); - tcg_temp_free_vec(conj); } static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a) @@ -2813,7 +2548,7 @@ static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a) /* Equivalent functions that can be implemented with a single gen_gvec */ switch (a->imm) { - case 0b00000000: /* true */ + case 0b00000000: /* false */ set_cpu_vsr(a->xt, tcg_constant_i64(0), true); set_cpu_vsr(a->xt, tcg_constant_i64(0), false); break; @@ -2925,7 +2660,6 @@ static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, TCGv_vec tmp = tcg_temp_new_vec_matching(c); tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1); tcg_gen_bitsel_vec(vece, t, tmp, b, a); - tcg_temp_free_vec(tmp); } static bool do_xxblendv(DisasContext *ctx, arg_8RR_XX4 *a, unsigned vece) @@ -2987,11 +2721,6 @@ static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a, xb = gen_vsr_ptr(a->xb); helper(cpu_env, xt, xa, xb); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xa); - tcg_temp_free_ptr(xb); - return true; } @@ -3013,11 +2742,6 @@ static bool do_helper_X(arg_X *a, rb = gen_avr_ptr(a->rb); helper(cpu_env, rt, ra, rb); - - tcg_temp_free_ptr(rt); - tcg_temp_free_ptr(ra); - tcg_temp_free_ptr(rb); - return true; } @@ -3047,10 +2771,6 @@ static bool trans_XVCVSPBF16(DisasContext *ctx, arg_XX2 *a) xb = gen_vsr_ptr(a->xb); gen_helper_XVCVSPBF16(cpu_env, xt, xb); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xb); - return true; } @@ -3114,9 +2834,6 @@ static bool do_ger(DisasContext *ctx, arg_MMIRR_XX3 *a, mask = ger_pack_masks(a->pmsk, a->ymsk, a->xmsk); helper(cpu_env, xa, xb, xt, tcg_constant_i32(mask)); - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xa); - tcg_temp_free_ptr(xb); return true; } diff --git a/target/riscv/arch_dump.c b/target/riscv/arch_dump.c index 736a232956..434c8a3dbb 100644 --- a/target/riscv/arch_dump.c +++ b/target/riscv/arch_dump.c @@ -1,4 +1,5 @@ -/* Support for writing ELF notes for RISC-V architectures +/* + * Support for writing ELF notes for RISC-V architectures * * Copyright (C) 2021 Huawei Technologies Co., Ltd * @@ -180,8 +181,8 @@ int cpu_get_dump_info(ArchDumpInfo *info, info->d_class = ELFCLASS32; #endif - info->d_endian = (env->mstatus & MSTATUS_UBE) != 0 - ? ELFDATA2MSB : ELFDATA2LSB; + info->d_endian = (env->mstatus & MSTATUS_UBE) != 0 ? + ELFDATA2MSB : ELFDATA2LSB; return 0; } diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h index ebaf26d26d..b2a9396dec 100644 --- a/target/riscv/cpu-param.h +++ b/target/riscv/cpu-param.h @@ -27,6 +27,5 @@ * - S mode HLV/HLVX/HSV 0b101 * - M mode HLV/HLVX/HSV 0b111 */ -#define NB_MMU_MODES 8 #endif diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h new file mode 100644 index 0000000000..04af50983e --- /dev/null +++ b/target/riscv/cpu-qom.h @@ -0,0 +1,71 @@ +/* + * QEMU RISC-V CPU QOM header + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef RISCV_CPU_QOM_H +#define RISCV_CPU_QOM_H + +#include "hw/core/cpu.h" +#include "qom/object.h" + +#define TYPE_RISCV_CPU "riscv-cpu" +#define TYPE_RISCV_DYNAMIC_CPU "riscv-dynamic-cpu" + +#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU +#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX) +#define CPU_RESOLVING_TYPE TYPE_RISCV_CPU + +#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any") +#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") +#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") +#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128") +#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex") +#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c") +#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") +#define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34") +#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51") +#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34") +#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54") +#define TYPE_RISCV_CPU_THEAD_C906 RISCV_CPU_TYPE_NAME("thead-c906") +#define TYPE_RISCV_CPU_VEYRON_V1 RISCV_CPU_TYPE_NAME("veyron-v1") +#define TYPE_RISCV_CPU_HOST RISCV_CPU_TYPE_NAME("host") + +#if defined(TARGET_RISCV32) +# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32 +#elif defined(TARGET_RISCV64) +# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64 +#endif + +typedef struct CPUArchState CPURISCVState; + +OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU) + +/** + * RISCVCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * A RISCV CPU model. + */ +struct RISCVCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + DeviceRealize parent_realize; + ResettablePhases parent_phases; +}; +#endif /* RISCV_CPU_QOM_H */ diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 93b52b826c..db0875fb43 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -28,12 +28,14 @@ #include "time_helper.h" #include "exec/exec-all.h" #include "qapi/error.h" +#include "qapi/visitor.h" #include "qemu/error-report.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "fpu/softfloat-helpers.h" #include "sysemu/kvm.h" #include "kvm_riscv.h" +#include "tcg/tcg.h" /* RISC-V CPU definitions */ @@ -46,15 +48,14 @@ static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; struct isa_ext_data { const char *name; - bool multi_letter; int min_version; int ext_enable_offset; }; -#define ISA_EXT_DATA_ENTRY(_name, _m_letter, _min_ver, _prop) \ -{#_name, _m_letter, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} +#define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ + {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} -/** +/* * Here are the ordering rules of extension naming defined by RISC-V * specification : * 1. All extensions should be separated from other multi-letter extensions @@ -70,58 +71,73 @@ struct isa_ext_data { * 4. Non-standard extensions (starts with 'X') must be listed after all * standard extensions. They must be separated from other multi-letter * extensions by an underscore. + * + * Single letter extensions are checked in riscv_cpu_validate_misa_priv() + * instead. */ static const struct isa_ext_data isa_edata_arr[] = { - ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h), - ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_10_0, ext_v), - ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr), - ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei), - ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause), - ISA_EXT_DATA_ENTRY(zawrs, true, PRIV_VERSION_1_12_0, ext_zawrs), - ISA_EXT_DATA_ENTRY(zfh, true, PRIV_VERSION_1_11_0, ext_zfh), - ISA_EXT_DATA_ENTRY(zfhmin, true, PRIV_VERSION_1_12_0, ext_zfhmin), - ISA_EXT_DATA_ENTRY(zfinx, true, PRIV_VERSION_1_12_0, ext_zfinx), - ISA_EXT_DATA_ENTRY(zdinx, true, PRIV_VERSION_1_12_0, ext_zdinx), - ISA_EXT_DATA_ENTRY(zba, true, PRIV_VERSION_1_12_0, ext_zba), - ISA_EXT_DATA_ENTRY(zbb, true, PRIV_VERSION_1_12_0, ext_zbb), - ISA_EXT_DATA_ENTRY(zbc, true, PRIV_VERSION_1_12_0, ext_zbc), - ISA_EXT_DATA_ENTRY(zbkb, true, PRIV_VERSION_1_12_0, ext_zbkb), - ISA_EXT_DATA_ENTRY(zbkc, true, PRIV_VERSION_1_12_0, ext_zbkc), - ISA_EXT_DATA_ENTRY(zbkx, true, PRIV_VERSION_1_12_0, ext_zbkx), - ISA_EXT_DATA_ENTRY(zbs, true, PRIV_VERSION_1_12_0, ext_zbs), - ISA_EXT_DATA_ENTRY(zk, true, PRIV_VERSION_1_12_0, ext_zk), - ISA_EXT_DATA_ENTRY(zkn, true, PRIV_VERSION_1_12_0, ext_zkn), - ISA_EXT_DATA_ENTRY(zknd, true, PRIV_VERSION_1_12_0, ext_zknd), - ISA_EXT_DATA_ENTRY(zkne, true, PRIV_VERSION_1_12_0, ext_zkne), - ISA_EXT_DATA_ENTRY(zknh, true, PRIV_VERSION_1_12_0, ext_zknh), - ISA_EXT_DATA_ENTRY(zkr, true, PRIV_VERSION_1_12_0, ext_zkr), - ISA_EXT_DATA_ENTRY(zks, true, PRIV_VERSION_1_12_0, ext_zks), - ISA_EXT_DATA_ENTRY(zksed, true, PRIV_VERSION_1_12_0, ext_zksed), - ISA_EXT_DATA_ENTRY(zksh, true, PRIV_VERSION_1_12_0, ext_zksh), - ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt), - ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_12_0, ext_zve32f), - ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_12_0, ext_zve64f), - ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx), - ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin), - ISA_EXT_DATA_ENTRY(smaia, true, PRIV_VERSION_1_12_0, ext_smaia), - ISA_EXT_DATA_ENTRY(ssaia, true, PRIV_VERSION_1_12_0, ext_ssaia), - ISA_EXT_DATA_ENTRY(sscofpmf, true, PRIV_VERSION_1_12_0, ext_sscofpmf), - ISA_EXT_DATA_ENTRY(sstc, true, PRIV_VERSION_1_12_0, ext_sstc), - ISA_EXT_DATA_ENTRY(svinval, true, PRIV_VERSION_1_12_0, ext_svinval), - ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot), - ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt), - ISA_EXT_DATA_ENTRY(xtheadba, true, PRIV_VERSION_1_11_0, ext_xtheadba), - ISA_EXT_DATA_ENTRY(xtheadbb, true, PRIV_VERSION_1_11_0, ext_xtheadbb), - ISA_EXT_DATA_ENTRY(xtheadbs, true, PRIV_VERSION_1_11_0, ext_xtheadbs), - ISA_EXT_DATA_ENTRY(xtheadcmo, true, PRIV_VERSION_1_11_0, ext_xtheadcmo), - ISA_EXT_DATA_ENTRY(xtheadcondmov, true, PRIV_VERSION_1_11_0, ext_xtheadcondmov), - ISA_EXT_DATA_ENTRY(xtheadfmemidx, true, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), - ISA_EXT_DATA_ENTRY(xtheadfmv, true, PRIV_VERSION_1_11_0, ext_xtheadfmv), - ISA_EXT_DATA_ENTRY(xtheadmac, true, PRIV_VERSION_1_11_0, ext_xtheadmac), - ISA_EXT_DATA_ENTRY(xtheadmemidx, true, PRIV_VERSION_1_11_0, ext_xtheadmemidx), - ISA_EXT_DATA_ENTRY(xtheadmempair, true, PRIV_VERSION_1_11_0, ext_xtheadmempair), - ISA_EXT_DATA_ENTRY(xtheadsync, true, PRIV_VERSION_1_11_0, ext_xtheadsync), - ISA_EXT_DATA_ENTRY(xventanacondops, true, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), + ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), + ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), + ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), + ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), + ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), + ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), + ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), + ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), + ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), + ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), + ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), + ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), + ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), + ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), + ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), + ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), + ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), + ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), + ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), + ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), + ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), + ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), + ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), + ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), + ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), + ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), + ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), + ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), + ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), + ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), + ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), + ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), + ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), + ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), + ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), + ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), + ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), + ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), + ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), + ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), + ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), + ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), + ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), + ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), + ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), + ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), + ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), + ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), + ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), + ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), + ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), + ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), + ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), + ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), + ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), + ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), + ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), + ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), + ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), + ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), + ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), + ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), }; static bool isa_ext_is_enabled(RISCVCPU *cpu, @@ -141,29 +157,29 @@ static void isa_ext_update_enabled(RISCVCPU *cpu, } const char * const riscv_int_regnames[] = { - "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", - "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", - "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", - "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", - "x28/t3", "x29/t4", "x30/t5", "x31/t6" + "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", + "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", + "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", + "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", + "x28/t3", "x29/t4", "x30/t5", "x31/t6" }; const char * const riscv_int_regnamesh[] = { - "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", - "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", - "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", - "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", - "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", - "x30h/t5h", "x31h/t6h" + "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", + "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", + "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", + "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", + "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", + "x30h/t5h", "x31h/t6h" }; const char * const riscv_fpr_regnames[] = { - "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", - "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", - "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", - "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", - "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", - "f30/ft10", "f31/ft11" + "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", + "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", + "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", + "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", + "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", + "f30/ft10", "f31/ft11" }; static const char * const riscv_excp_names[] = { @@ -212,7 +228,7 @@ static const char * const riscv_intr_names[] = { "reserved" }; -static void register_cpu_props(DeviceState *dev); +static void riscv_cpu_add_user_properties(Object *obj); const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) { @@ -241,6 +257,89 @@ static void set_vext_version(CPURISCVState *env, int vext_ver) env->vext_ver = vext_ver; } +#ifndef CONFIG_USER_ONLY +static uint8_t satp_mode_from_str(const char *satp_mode_str) +{ + if (!strncmp(satp_mode_str, "mbare", 5)) { + return VM_1_10_MBARE; + } + + if (!strncmp(satp_mode_str, "sv32", 4)) { + return VM_1_10_SV32; + } + + if (!strncmp(satp_mode_str, "sv39", 4)) { + return VM_1_10_SV39; + } + + if (!strncmp(satp_mode_str, "sv48", 4)) { + return VM_1_10_SV48; + } + + if (!strncmp(satp_mode_str, "sv57", 4)) { + return VM_1_10_SV57; + } + + if (!strncmp(satp_mode_str, "sv64", 4)) { + return VM_1_10_SV64; + } + + g_assert_not_reached(); +} + +uint8_t satp_mode_max_from_map(uint32_t map) +{ + /* map here has at least one bit set, so no problem with clz */ + return 31 - __builtin_clz(map); +} + +const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) +{ + if (is_32_bit) { + switch (satp_mode) { + case VM_1_10_SV32: + return "sv32"; + case VM_1_10_MBARE: + return "none"; + } + } else { + switch (satp_mode) { + case VM_1_10_SV64: + return "sv64"; + case VM_1_10_SV57: + return "sv57"; + case VM_1_10_SV48: + return "sv48"; + case VM_1_10_SV39: + return "sv39"; + case VM_1_10_MBARE: + return "none"; + } + } + + g_assert_not_reached(); +} + +static void set_satp_mode_max_supported(RISCVCPU *cpu, + uint8_t satp_mode) +{ + bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; + const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; + + for (int i = 0; i <= satp_mode; ++i) { + if (valid_vm[i]) { + cpu->cfg.satp_mode.supported |= (1 << i); + } + } +} + +/* Set the satp mode to the max supported */ +static void set_satp_mode_default_map(RISCVCPU *cpu) +{ + cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; +} +#endif + static void riscv_any_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; @@ -249,8 +348,14 @@ static void riscv_any_cpu_init(Object *obj) #elif defined(TARGET_RISCV64) set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); #endif + +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), + riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? + VM_1_10_SV32 : VM_1_10_SV57); +#endif + set_priv_version(env, PRIV_VERSION_1_12_0); - register_cpu_props(DEVICE(obj)); } #if defined(TARGET_RISCV64) @@ -259,17 +364,22 @@ static void rv64_base_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ set_misa(env, MXL_RV64, 0); - register_cpu_props(DEVICE(obj)); + riscv_cpu_add_user_properties(obj); /* Set latest version of privileged specification */ set_priv_version(env, PRIV_VERSION_1_12_0); +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); +#endif } static void rv64_sifive_u_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - register_cpu_props(DEVICE(obj)); set_priv_version(env, PRIV_VERSION_1_10_0); +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); +#endif } static void rv64_sifive_e_cpu_init(Object *obj) @@ -278,9 +388,11 @@ static void rv64_sifive_e_cpu_init(Object *obj) RISCVCPU *cpu = RISCV_CPU(obj); set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); - register_cpu_props(DEVICE(obj)); set_priv_version(env, PRIV_VERSION_1_10_0); cpu->cfg.mmu = false; +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_MBARE); +#endif } static void rv64_thead_c906_cpu_init(Object *obj) @@ -288,14 +400,9 @@ static void rv64_thead_c906_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); set_priv_version(env, PRIV_VERSION_1_11_0); - cpu->cfg.ext_g = true; - cpu->cfg.ext_c = true; - cpu->cfg.ext_u = true; - cpu->cfg.ext_s = true; - cpu->cfg.ext_icsr = true; cpu->cfg.ext_zfh = true; cpu->cfg.mmu = true; cpu->cfg.ext_xtheadba = true; @@ -310,6 +417,46 @@ static void rv64_thead_c906_cpu_init(Object *obj) cpu->cfg.ext_xtheadsync = true; cpu->cfg.mvendorid = THEAD_VENDOR_ID; +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_SV39); +#endif +} + +static void rv64_veyron_v1_cpu_init(Object *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + RISCVCPU *cpu = RISCV_CPU(obj); + + set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); + env->priv_ver = PRIV_VERSION_1_12_0; + + /* Enable ISA extensions */ + cpu->cfg.mmu = true; + cpu->cfg.ext_icbom = true; + cpu->cfg.cbom_blocksize = 64; + cpu->cfg.cboz_blocksize = 64; + cpu->cfg.ext_icboz = true; + cpu->cfg.ext_smaia = true; + cpu->cfg.ext_ssaia = true; + cpu->cfg.ext_sscofpmf = true; + cpu->cfg.ext_sstc = true; + cpu->cfg.ext_svinval = true; + cpu->cfg.ext_svnapot = true; + cpu->cfg.ext_svpbmt = true; + cpu->cfg.ext_smstateen = true; + cpu->cfg.ext_zba = true; + cpu->cfg.ext_zbb = true; + cpu->cfg.ext_zbc = true; + cpu->cfg.ext_zbs = true; + cpu->cfg.ext_XVentanaCondOps = true; + + cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; + cpu->cfg.marchid = VEYRON_V1_MARCHID; + cpu->cfg.mimpid = VEYRON_V1_MIMPID; + +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_SV48); +#endif } static void rv128_base_cpu_init(Object *obj) @@ -323,9 +470,12 @@ static void rv128_base_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ set_misa(env, MXL_RV128, 0); - register_cpu_props(DEVICE(obj)); + riscv_cpu_add_user_properties(obj); /* Set latest version of privileged specification */ set_priv_version(env, PRIV_VERSION_1_12_0); +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); +#endif } #else static void rv32_base_cpu_init(Object *obj) @@ -333,17 +483,22 @@ static void rv32_base_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ set_misa(env, MXL_RV32, 0); - register_cpu_props(DEVICE(obj)); + riscv_cpu_add_user_properties(obj); /* Set latest version of privileged specification */ set_priv_version(env, PRIV_VERSION_1_12_0); +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); +#endif } static void rv32_sifive_u_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - register_cpu_props(DEVICE(obj)); set_priv_version(env, PRIV_VERSION_1_10_0); +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); +#endif } static void rv32_sifive_e_cpu_init(Object *obj) @@ -352,9 +507,11 @@ static void rv32_sifive_e_cpu_init(Object *obj) RISCVCPU *cpu = RISCV_CPU(obj); set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); - register_cpu_props(DEVICE(obj)); set_priv_version(env, PRIV_VERSION_1_10_0); cpu->cfg.mmu = false; +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_MBARE); +#endif } static void rv32_ibex_cpu_init(Object *obj) @@ -363,9 +520,11 @@ static void rv32_ibex_cpu_init(Object *obj) RISCVCPU *cpu = RISCV_CPU(obj); set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); - register_cpu_props(DEVICE(obj)); set_priv_version(env, PRIV_VERSION_1_11_0); cpu->cfg.mmu = false; +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_MBARE); +#endif cpu->cfg.epmp = true; } @@ -375,9 +534,11 @@ static void rv32_imafcu_nommu_cpu_init(Object *obj) RISCVCPU *cpu = RISCV_CPU(obj); set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); - register_cpu_props(DEVICE(obj)); set_priv_version(env, PRIV_VERSION_1_10_0); cpu->cfg.mmu = false; +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_MBARE); +#endif } #endif @@ -390,7 +551,7 @@ static void riscv_host_cpu_init(Object *obj) #elif defined(TARGET_RISCV64) set_misa(env, MXL_RV64, 0); #endif - register_cpu_props(DEVICE(obj)); + riscv_cpu_add_user_properties(obj); } #endif @@ -420,7 +581,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s %d\n", "V = ", riscv_cpu_virt_enabled(env)); + qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); } #endif qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); @@ -533,10 +694,12 @@ static void riscv_cpu_synchronize_from_tb(CPUState *cs, CPURISCVState *env = &cpu->env; RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + if (xl == MXL_RV32) { - env->pc = (int32_t)tb_pc(tb); + env->pc = (int32_t) tb->pc; } else { - env->pc = tb_pc(tb); + env->pc = tb->pc; } } @@ -613,6 +776,11 @@ static void riscv_cpu_reset_hold(Object *obj) env->bins = 0; env->two_stage_lookup = false; + env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | + (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); + env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | + (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); + /* Initialized default priorities of local interrupts. */ for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { iprio = riscv_cpu_default_priority(i); @@ -628,7 +796,7 @@ static void riscv_cpu_reset_hold(Object *obj) i++; } /* mmte is supposed to have pm.current hardwired to 1 */ - env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT); + env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); #endif env->xl = riscv_cpu_mxl(env); riscv_cpu_update_mask(env); @@ -637,7 +805,7 @@ static void riscv_cpu_reset_hold(Object *obj) set_default_nan_mode(1, &env->fp_status); #ifndef CONFIG_USER_ONLY - if (riscv_feature(env, RISCV_FEATURE_DEBUG)) { + if (cpu->cfg.debug) { riscv_trigger_init(env); } @@ -668,91 +836,126 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) /* * Check consistency between chosen extensions while setting - * cpu->cfg accordingly, doing a set_misa() in the end. + * cpu->cfg accordingly. */ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) { CPURISCVState *env = &cpu->env; - uint32_t ext = 0; /* Do some ISA extension error checking */ - if (cpu->cfg.ext_g && !(cpu->cfg.ext_i && cpu->cfg.ext_m && - cpu->cfg.ext_a && cpu->cfg.ext_f && - cpu->cfg.ext_d && - cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { + if (riscv_has_ext(env, RVG) && + !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && + riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && + riscv_has_ext(env, RVD) && + cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); - cpu->cfg.ext_i = true; - cpu->cfg.ext_m = true; - cpu->cfg.ext_a = true; - cpu->cfg.ext_f = true; - cpu->cfg.ext_d = true; cpu->cfg.ext_icsr = true; cpu->cfg.ext_ifencei = true; + + env->misa_ext |= RVI | RVM | RVA | RVF | RVD; + env->misa_ext_mask = env->misa_ext; } - if (cpu->cfg.ext_i && cpu->cfg.ext_e) { + if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { error_setg(errp, "I and E extensions are incompatible"); return; } - if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) { + if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { error_setg(errp, "Either I or E extension must be set"); return; } - if (cpu->cfg.ext_s && !cpu->cfg.ext_u) { + if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { error_setg(errp, "Setting S extension without U extension is illegal"); return; } - if (cpu->cfg.ext_h && !cpu->cfg.ext_i) { + if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { error_setg(errp, "H depends on an I base integer ISA with 32 x registers"); return; } - if (cpu->cfg.ext_h && !cpu->cfg.ext_s) { + if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { error_setg(errp, "H extension implicitly requires S-mode"); return; } - if (cpu->cfg.ext_f && !cpu->cfg.ext_icsr) { + if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { error_setg(errp, "F extension requires Zicsr"); return; } - if ((cpu->cfg.ext_zawrs) && !cpu->cfg.ext_a) { + if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { error_setg(errp, "Zawrs extension requires A extension"); return; } - if ((cpu->cfg.ext_zfh || cpu->cfg.ext_zfhmin) && !cpu->cfg.ext_f) { + if (cpu->cfg.ext_zfh) { + cpu->cfg.ext_zfhmin = true; + } + + if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { error_setg(errp, "Zfh/Zfhmin extensions require F extension"); return; } - if (cpu->cfg.ext_d && !cpu->cfg.ext_f) { + if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { error_setg(errp, "D extension requires F extension"); return; } - if (cpu->cfg.ext_v && !cpu->cfg.ext_d) { - error_setg(errp, "V extension requires D extension"); + /* The V vector extension depends on the Zve64d extension */ + if (riscv_has_ext(env, RVV)) { + cpu->cfg.ext_zve64d = true; + } + + /* The Zve64d extension depends on the Zve64f extension */ + if (cpu->cfg.ext_zve64d) { + cpu->cfg.ext_zve64f = true; + } + + /* The Zve64f extension depends on the Zve32f extension */ + if (cpu->cfg.ext_zve64f) { + cpu->cfg.ext_zve32f = true; + } + + if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { + error_setg(errp, "Zve64d/V extensions require D extension"); return; } - if ((cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) && !cpu->cfg.ext_f) { + if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { error_setg(errp, "Zve32f/Zve64f extensions require F extension"); return; } + if (cpu->cfg.ext_zvfh) { + cpu->cfg.ext_zvfhmin = true; + } + + if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { + error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); + return; + } + + if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { + error_setg(errp, "Zvfh extensions requires Zfhmin extension"); + return; + } + /* Set the ISA extensions, checks should have happened above */ - if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx || - cpu->cfg.ext_zhinxmin) { - cpu->cfg.ext_zfinx = true; + if (cpu->cfg.ext_zhinx) { + cpu->cfg.ext_zhinxmin = true; + } + + if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { + error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); + return; } if (cpu->cfg.ext_zfinx) { @@ -760,13 +963,66 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) error_setg(errp, "Zfinx extension requires Zicsr"); return; } - if (cpu->cfg.ext_f) { + if (riscv_has_ext(env, RVF)) { error_setg(errp, - "Zfinx cannot be supported together with F extension"); + "Zfinx cannot be supported together with F extension"); return; } } + if (cpu->cfg.ext_zce) { + cpu->cfg.ext_zca = true; + cpu->cfg.ext_zcb = true; + cpu->cfg.ext_zcmp = true; + cpu->cfg.ext_zcmt = true; + if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { + cpu->cfg.ext_zcf = true; + } + } + + if (riscv_has_ext(env, RVC)) { + cpu->cfg.ext_zca = true; + if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { + cpu->cfg.ext_zcf = true; + } + if (riscv_has_ext(env, RVD)) { + cpu->cfg.ext_zcd = true; + } + } + + if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { + error_setg(errp, "Zcf extension is only relevant to RV32"); + return; + } + + if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { + error_setg(errp, "Zcf extension requires F extension"); + return; + } + + if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { + error_setg(errp, "Zcd extension requires D extension"); + return; + } + + if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || + cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { + error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " + "extension"); + return; + } + + if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { + error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " + "Zcd extension"); + return; + } + + if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { + error_setg(errp, "Zcmt extension requires Zicsr extension"); + return; + } + if (cpu->cfg.ext_zk) { cpu->cfg.ext_zkn = true; cpu->cfg.ext_zkr = true; @@ -790,81 +1046,135 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) cpu->cfg.ext_zksh = true; } - if (cpu->cfg.ext_i) { - ext |= RVI; - } - if (cpu->cfg.ext_e) { - ext |= RVE; - } - if (cpu->cfg.ext_m) { - ext |= RVM; - } - if (cpu->cfg.ext_a) { - ext |= RVA; - } - if (cpu->cfg.ext_f) { - ext |= RVF; - } - if (cpu->cfg.ext_d) { - ext |= RVD; - } - if (cpu->cfg.ext_c) { - ext |= RVC; - } - if (cpu->cfg.ext_s) { - ext |= RVS; - } - if (cpu->cfg.ext_u) { - ext |= RVU; - } - if (cpu->cfg.ext_h) { - ext |= RVH; - } - if (cpu->cfg.ext_v) { + if (riscv_has_ext(env, RVV)) { int vext_version = VEXT_VERSION_1_00_0; - ext |= RVV; if (!is_power_of_2(cpu->cfg.vlen)) { error_setg(errp, - "Vector extension VLEN must be power of 2"); + "Vector extension VLEN must be power of 2"); return; } if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) { error_setg(errp, - "Vector extension implementation only supports VLEN " - "in the range [128, %d]", RV_VLEN_MAX); + "Vector extension implementation only supports VLEN " + "in the range [128, %d]", RV_VLEN_MAX); return; } if (!is_power_of_2(cpu->cfg.elen)) { error_setg(errp, - "Vector extension ELEN must be power of 2"); + "Vector extension ELEN must be power of 2"); return; } - if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) { - error_setg(errp, - "Vector extension implementation only supports ELEN " - "in the range [8, 64]"); - return; - } - if (cpu->cfg.vext_spec) { - if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) { - vext_version = VEXT_VERSION_1_00_0; - } else { + if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) { error_setg(errp, - "Unsupported vector spec version '%s'", - cpu->cfg.vext_spec); + "Vector extension implementation only supports ELEN " + "in the range [8, 64]"); return; } - } else { - qemu_log("vector version is not specified, " - "use the default value v1.0\n"); + if (cpu->cfg.vext_spec) { + if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) { + vext_version = VEXT_VERSION_1_00_0; + } else { + error_setg(errp, + "Unsupported vector spec version '%s'", + cpu->cfg.vext_spec); + return; + } + } else { + qemu_log("vector version is not specified, " + "use the default value v1.0\n"); + } + set_vext_version(env, vext_version); } - set_vext_version(env, vext_version); - } - if (cpu->cfg.ext_j) { - ext |= RVJ; +} + +#ifndef CONFIG_USER_ONLY +static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) +{ + bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; + uint8_t satp_mode_map_max; + uint8_t satp_mode_supported_max = + satp_mode_max_from_map(cpu->cfg.satp_mode.supported); + + if (cpu->cfg.satp_mode.map == 0) { + if (cpu->cfg.satp_mode.init == 0) { + /* If unset by the user, we fallback to the default satp mode. */ + set_satp_mode_default_map(cpu); + } else { + /* + * Find the lowest level that was disabled and then enable the + * first valid level below which can be found in + * valid_vm_1_10_32/64. + */ + for (int i = 1; i < 16; ++i) { + if ((cpu->cfg.satp_mode.init & (1 << i)) && + (cpu->cfg.satp_mode.supported & (1 << i))) { + for (int j = i - 1; j >= 0; --j) { + if (cpu->cfg.satp_mode.supported & (1 << j)) { + cpu->cfg.satp_mode.map |= (1 << j); + break; + } + } + break; + } + } + } } - set_misa(env, env->misa_mxl, ext); + satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); + + /* Make sure the user asked for a supported configuration (HW and qemu) */ + if (satp_mode_map_max > satp_mode_supported_max) { + error_setg(errp, "satp_mode %s is higher than hw max capability %s", + satp_mode_str(satp_mode_map_max, rv32), + satp_mode_str(satp_mode_supported_max, rv32)); + return; + } + + /* + * Make sure the user did not ask for an invalid configuration as per + * the specification. + */ + if (!rv32) { + for (int i = satp_mode_map_max - 1; i >= 0; --i) { + if (!(cpu->cfg.satp_mode.map & (1 << i)) && + (cpu->cfg.satp_mode.init & (1 << i)) && + (cpu->cfg.satp_mode.supported & (1 << i))) { + error_setg(errp, "cannot disable %s satp mode if %s " + "is enabled", satp_mode_str(i, false), + satp_mode_str(satp_mode_map_max, false)); + return; + } + } + } + + /* Finally expand the map so that all valid modes are set */ + for (int i = satp_mode_map_max - 1; i >= 0; --i) { + if (cpu->cfg.satp_mode.supported & (1 << i)) { + cpu->cfg.satp_mode.map |= (1 << i); + } + } +} +#endif + +static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) +{ +#ifndef CONFIG_USER_ONLY + Error *local_err = NULL; + + riscv_cpu_satp_mode_finalize(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } +#endif +} + +static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) +{ + if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { + error_setg(errp, "H extension requires priv spec 1.12.0"); + return; + } } static void riscv_cpu_realize(DeviceState *dev, Error **errp) @@ -902,6 +1212,12 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) set_priv_version(env, priv_version); } + riscv_cpu_validate_misa_priv(env, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + /* Force disable extensions if priv spec version does not match */ for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && @@ -919,24 +1235,13 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } } - if (cpu->cfg.mmu) { - riscv_set_feature(env, RISCV_FEATURE_MMU); - } - - if (cpu->cfg.pmp) { - riscv_set_feature(env, RISCV_FEATURE_PMP); - + if (cpu->cfg.epmp && !cpu->cfg.pmp) { /* * Enhanced PMP should only be available * on harts with PMP support */ - if (cpu->cfg.epmp) { - riscv_set_feature(env, RISCV_FEATURE_EPMP); - } - } - - if (cpu->cfg.debug) { - riscv_set_feature(env, RISCV_FEATURE_DEBUG); + error_setg(errp, "Invalid configuration: EPMP requires PMP support"); + return; } @@ -977,6 +1282,12 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } #endif + riscv_cpu_finalize_features(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + riscv_cpu_register_gdb_regs_for_features(cs); qemu_init_vcpu(cs); @@ -986,6 +1297,52 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } #ifndef CONFIG_USER_ONLY +static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVSATPMap *satp_map = opaque; + uint8_t satp = satp_mode_from_str(name); + bool value; + + value = satp_map->map & (1 << satp); + + visit_type_bool(v, name, &value, errp); +} + +static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVSATPMap *satp_map = opaque; + uint8_t satp = satp_mode_from_str(name); + bool value; + + if (!visit_type_bool(v, name, &value, errp)) { + return; + } + + satp_map->map = deposit32(satp_map->map, satp, 1, value); + satp_map->init |= 1 << satp; +} + +static void riscv_add_satp_mode_properties(Object *obj) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + + if (cpu->env.misa_mxl == MXL_RV32) { + object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, + cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + } else { + object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, + cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, + cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, + cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, + cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + } +} + static void riscv_cpu_set_irq(void *opaque, int irq, int level) { RISCVCPU *cpu = RISCV_CPU(opaque); @@ -1007,7 +1364,7 @@ static void riscv_cpu_set_irq(void *opaque, int irq, int level) if (kvm_enabled()) { kvm_riscv_set_irq(cpu, irq, level); } else { - riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level)); + riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); } break; case IRQ_S_EXT: @@ -1015,7 +1372,7 @@ static void riscv_cpu_set_irq(void *opaque, int irq, int level) kvm_riscv_set_irq(cpu, irq, level); } else { env->external_seip = level; - riscv_cpu_update_mip(cpu, 1 << irq, + riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level | env->software_seip)); } break; @@ -1041,7 +1398,7 @@ static void riscv_cpu_set_irq(void *opaque, int irq, int level) } /* Update mip.SGEIP bit */ - riscv_cpu_update_mip(cpu, MIP_SGEIP, + riscv_cpu_update_mip(env, MIP_SGEIP, BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); } else { g_assert_not_reached(); @@ -1066,20 +1423,98 @@ static void riscv_cpu_init(Object *obj) #endif /* CONFIG_USER_ONLY */ } +typedef struct RISCVCPUMisaExtConfig { + const char *name; + const char *description; + target_ulong misa_bit; + bool enabled; +} RISCVCPUMisaExtConfig; + +static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; + target_ulong misa_bit = misa_ext_cfg->misa_bit; + RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + bool value; + + if (!visit_type_bool(v, name, &value, errp)) { + return; + } + + if (value) { + env->misa_ext |= misa_bit; + env->misa_ext_mask |= misa_bit; + } else { + env->misa_ext &= ~misa_bit; + env->misa_ext_mask &= ~misa_bit; + } +} + +static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; + target_ulong misa_bit = misa_ext_cfg->misa_bit; + RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + bool value; + + value = env->misa_ext & misa_bit; + + visit_type_bool(v, name, &value, errp); +} + +static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { + {.name = "a", .description = "Atomic instructions", + .misa_bit = RVA, .enabled = true}, + {.name = "c", .description = "Compressed instructions", + .misa_bit = RVC, .enabled = true}, + {.name = "d", .description = "Double-precision float point", + .misa_bit = RVD, .enabled = true}, + {.name = "f", .description = "Single-precision float point", + .misa_bit = RVF, .enabled = true}, + {.name = "i", .description = "Base integer instruction set", + .misa_bit = RVI, .enabled = true}, + {.name = "e", .description = "Base integer instruction set (embedded)", + .misa_bit = RVE, .enabled = false}, + {.name = "m", .description = "Integer multiplication and division", + .misa_bit = RVM, .enabled = true}, + {.name = "s", .description = "Supervisor-level instructions", + .misa_bit = RVS, .enabled = true}, + {.name = "u", .description = "User-level instructions", + .misa_bit = RVU, .enabled = true}, + {.name = "h", .description = "Hypervisor", + .misa_bit = RVH, .enabled = true}, + {.name = "x-j", .description = "Dynamic translated languages", + .misa_bit = RVJ, .enabled = false}, + {.name = "v", .description = "Vector operations", + .misa_bit = RVV, .enabled = false}, + {.name = "g", .description = "General purpose (IMAFD_Zicsr_Zifencei)", + .misa_bit = RVG, .enabled = false}, +}; + +static void riscv_cpu_add_misa_properties(Object *cpu_obj) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { + const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; + + object_property_add(cpu_obj, misa_cfg->name, "bool", + cpu_get_misa_ext_cfg, + cpu_set_misa_ext_cfg, + NULL, (void *)misa_cfg); + object_property_set_description(cpu_obj, misa_cfg->name, + misa_cfg->description); + object_property_set_bool(cpu_obj, misa_cfg->name, + misa_cfg->enabled, NULL); + } +} + static Property riscv_cpu_extensions[] = { /* Defaults for standard extensions */ - DEFINE_PROP_BOOL("i", RISCVCPU, cfg.ext_i, true), - DEFINE_PROP_BOOL("e", RISCVCPU, cfg.ext_e, false), - DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, false), - DEFINE_PROP_BOOL("m", RISCVCPU, cfg.ext_m, true), - DEFINE_PROP_BOOL("a", RISCVCPU, cfg.ext_a, true), - DEFINE_PROP_BOOL("f", RISCVCPU, cfg.ext_f, true), - DEFINE_PROP_BOOL("d", RISCVCPU, cfg.ext_d, true), - DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true), - DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true), - DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true), - DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false), - DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true), DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), @@ -1090,6 +1525,7 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), + DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), @@ -1099,6 +1535,8 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), + DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), + DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), @@ -1126,6 +1564,11 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), + DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), + DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), + DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), + DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), + DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), /* Vendor-specific custom extensions */ @@ -1143,56 +1586,47 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), /* These are experimental so mark with 'x-' */ - DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false), + DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), + + DEFINE_PROP_BOOL("x-zca", RISCVCPU, cfg.ext_zca, false), + DEFINE_PROP_BOOL("x-zcb", RISCVCPU, cfg.ext_zcb, false), + DEFINE_PROP_BOOL("x-zcd", RISCVCPU, cfg.ext_zcd, false), + DEFINE_PROP_BOOL("x-zce", RISCVCPU, cfg.ext_zce, false), + DEFINE_PROP_BOOL("x-zcf", RISCVCPU, cfg.ext_zcf, false), + DEFINE_PROP_BOOL("x-zcmp", RISCVCPU, cfg.ext_zcmp, false), + DEFINE_PROP_BOOL("x-zcmt", RISCVCPU, cfg.ext_zcmt, false), + /* ePMP 0.9.3 */ DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), + DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), + DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), + DEFINE_PROP_END_OF_LIST(), }; /* - * Register CPU props based on env.misa_ext. If a non-zero - * value was set, register only the required cpu->cfg.ext_* - * properties and leave. env.misa_ext = 0 means that we want - * all the default properties to be registered. + * Add CPU properties with user-facing flags. + * + * This will overwrite existing env->misa_ext values with the + * defaults set via riscv_cpu_add_misa_properties(). */ -static void register_cpu_props(DeviceState *dev) +static void riscv_cpu_add_user_properties(Object *obj) { - RISCVCPU *cpu = RISCV_CPU(OBJECT(dev)); - uint32_t misa_ext = cpu->env.misa_ext; Property *prop; + DeviceState *dev = DEVICE(obj); - /* - * If misa_ext is not zero, set cfg properties now to - * allow them to be read during riscv_cpu_realize() - * later on. - */ - if (cpu->env.misa_ext != 0) { - cpu->cfg.ext_i = misa_ext & RVI; - cpu->cfg.ext_e = misa_ext & RVE; - cpu->cfg.ext_m = misa_ext & RVM; - cpu->cfg.ext_a = misa_ext & RVA; - cpu->cfg.ext_f = misa_ext & RVF; - cpu->cfg.ext_d = misa_ext & RVD; - cpu->cfg.ext_v = misa_ext & RVV; - cpu->cfg.ext_c = misa_ext & RVC; - cpu->cfg.ext_s = misa_ext & RVS; - cpu->cfg.ext_u = misa_ext & RVU; - cpu->cfg.ext_h = misa_ext & RVH; - cpu->cfg.ext_j = misa_ext & RVJ; - - /* - * We don't want to set the default riscv_cpu_extensions - * in this case. - */ - return; - } + riscv_cpu_add_misa_properties(obj); for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { qdev_property_add_static(dev, prop); } + +#ifndef CONFIG_USER_ONLY + riscv_add_satp_mode_properties(obj); +#endif } static Property riscv_cpu_properties[] = { @@ -1210,6 +1644,12 @@ static Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), + + /* + * write_misa() is marked as experimental for now so mark + * it with -x and default to 'false'. + */ + DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), DEFINE_PROP_END_OF_LIST(), }; @@ -1243,6 +1683,13 @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) } #ifndef CONFIG_USER_ONLY +static int64_t riscv_get_arch_id(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + + return cpu->env.mhartid; +} + #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps riscv_sysemu_ops = { @@ -1297,6 +1744,7 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) cc->disas_set_info = riscv_cpu_disas_set_info; #ifndef CONFIG_USER_ONLY cc->sysemu_ops = &riscv_sysemu_ops; + cc->get_arch_id = riscv_get_arch_id; #endif cc->gdb_arch_name = riscv_gdb_arch_name; cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; @@ -1305,15 +1753,15 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) device_class_set_props(dc, riscv_cpu_properties); } -static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, int max_str_len) +static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, + int max_str_len) { char *old = *isa_str; char *new = *isa_str; int i; for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { - if (isa_edata_arr[i].multi_letter && - isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { + if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); g_free(old); old = new; @@ -1377,6 +1825,13 @@ void riscv_cpu_list(void) .instance_init = initfn \ } +#define DEFINE_DYNAMIC_CPU(type_name, initfn) \ + { \ + .name = type_name, \ + .parent = TYPE_RISCV_DYNAMIC_CPU, \ + .instance_init = initfn \ + } + static const TypeInfo riscv_cpu_type_infos[] = { { .name = TYPE_RISCV_CPU, @@ -1388,23 +1843,29 @@ static const TypeInfo riscv_cpu_type_infos[] = { .class_size = sizeof(RISCVCPUClass), .class_init = riscv_cpu_class_init, }, - DEFINE_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), + { + .name = TYPE_RISCV_DYNAMIC_CPU, + .parent = TYPE_RISCV_CPU, + .abstract = true, + }, + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), #if defined(CONFIG_KVM) DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), #endif #if defined(TARGET_RISCV32) - DEFINE_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), #elif defined(TARGET_RISCV64) - DEFINE_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), + DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), #endif }; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 7128438d8e..de7e43126a 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -27,6 +27,8 @@ #include "qom/object.h" #include "qemu/int128.h" #include "cpu_bits.h" +#include "qapi/qapi-types-common.h" +#include "cpu-qom.h" #define TCG_GUEST_DEFAULT_MO 0 @@ -36,38 +38,9 @@ */ #define TARGET_INSN_START_EXTRA_WORDS 1 -#define TYPE_RISCV_CPU "riscv-cpu" - -#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU -#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX) -#define CPU_RESOLVING_TYPE TYPE_RISCV_CPU - -#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any") -#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") -#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") -#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128") -#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex") -#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c") -#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") -#define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34") -#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51") -#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34") -#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54") -#define TYPE_RISCV_CPU_THEAD_C906 RISCV_CPU_TYPE_NAME("thead-c906") -#define TYPE_RISCV_CPU_HOST RISCV_CPU_TYPE_NAME("host") - -#if defined(TARGET_RISCV32) -# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32 -#elif defined(TARGET_RISCV64) -# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64 -#endif - #define RV(x) ((target_ulong)1 << (x - 'A')) -/* - * Consider updating register_cpu_props() when adding - * new MISA bits here. - */ +/* Consider updating misa_ext_cfgs[] when adding new MISA bits here */ #define RVI RV('I') #define RVE RV('E') /* E and I are mutually exclusive */ #define RVM RV('M') @@ -80,18 +53,8 @@ #define RVU RV('U') #define RVH RV('H') #define RVJ RV('J') +#define RVG RV('G') -/* S extension denotes that Supervisor mode exists, however it is possible - to have a core that support S mode but does not have an MMU and there - is currently no bit in misa to indicate whether an MMU exists or not - so a cpu features bitfield is required, likewise for optional PMP support */ -enum { - RISCV_FEATURE_MMU, - RISCV_FEATURE_PMP, - RISCV_FEATURE_EPMP, - RISCV_FEATURE_MISA, - RISCV_FEATURE_DEBUG -}; /* Privileged specification version */ enum { @@ -109,12 +72,18 @@ enum { TRANSLATE_G_STAGE_FAIL }; +/* Extension context status */ +typedef enum { + EXT_STATUS_DISABLED = 0, + EXT_STATUS_INITIAL, + EXT_STATUS_CLEAN, + EXT_STATUS_DIRTY, +} RISCVExtStatus; + #define MMU_USER_IDX 3 #define MAX_RISCV_PMPS (16) -typedef struct CPUArchState CPURISCVState; - #if !defined(CONFIG_USER_ONLY) #include "pmp.h" #include "debug.h" @@ -134,7 +103,7 @@ FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11) typedef struct PMUCTRState { /* Current value of a counter */ target_ulong mhpmcounter_val; - /* Current value of a counter in RV32*/ + /* Current value of a counter in RV32 */ target_ulong mhpmcounterh_val; /* Snapshot values of counter */ target_ulong mhpmcounter_prev; @@ -148,7 +117,6 @@ typedef struct PMUCTRState { struct CPUArchState { target_ulong gpr[32]; target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */ - uint64_t fpr[32]; /* assume both F and D extensions */ /* vector coprocessor state. */ uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16); @@ -163,7 +131,10 @@ struct CPUArchState { target_ulong load_res; target_ulong load_val; + /* Floating-Point state */ + uint64_t fpr[32]; /* assume both F and D extensions */ target_ulong frm; + float_status fp_status; target_ulong badaddr; target_ulong bins; @@ -184,7 +155,7 @@ struct CPUArchState { /* 128-bit helpers upper part return value */ target_ulong retxh; - uint32_t features; + target_ulong jvt; #ifdef CONFIG_USER_ONLY uint32_t elf_flags; @@ -193,7 +164,7 @@ struct CPUArchState { #ifndef CONFIG_USER_ONLY target_ulong priv; /* This contains QEMU specific information about the virt state. */ - target_ulong virt; + bool virt_enabled; target_ulong geilen; uint64_t resetvec; @@ -288,8 +259,10 @@ struct CPUArchState { target_ulong satp_hs; uint64_t mstatus_hs; - /* Signals whether the current exception occurred with two-stage address - translation active. */ + /* + * Signals whether the current exception occurred with two-stage address + * translation active. + */ bool two_stage_lookup; /* * Signals whether the current exception occurred while doing two-stage @@ -305,10 +278,10 @@ struct CPUArchState { /* PMU counter state */ PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS]; - /* PMU event selector configured values. First three are unused*/ + /* PMU event selector configured values. First three are unused */ target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS]; - /* PMU event selector configured values for RV32*/ + /* PMU event selector configured values for RV32 */ target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS]; target_ulong sscratch; @@ -379,8 +352,6 @@ struct CPUArchState { target_ulong cur_pmmask; target_ulong cur_pmbase; - float_status fp_status; - /* Fields from here on are preserved across CPU reset. */ QEMUTimer *stimer; /* Internal timer for S-mode interrupt */ QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */ @@ -397,37 +368,22 @@ struct CPUArchState { uint64_t kvm_timer_frequency; }; -OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU) - -/** - * RISCVCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. +/* + * map is a 16-bit bitmap: the most significant set bit in map is the maximum + * satp mode that is supported. It may be chosen by the user and must respect + * what qemu implements (valid_1_10_32/64) and what the hw is capable of + * (supported bitmap below). * - * A RISCV CPU model. + * init is a 16-bit bitmap used to make sure the user selected a correct + * configuration as per the specification. + * + * supported is a 16-bit bitmap used to reflect the hw capabilities. */ -struct RISCVCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - DeviceRealize parent_realize; - ResettablePhases parent_phases; -}; +typedef struct { + uint16_t map, init, supported; +} RISCVSATPMap; struct RISCVCPUConfig { - bool ext_i; - bool ext_e; - bool ext_g; - bool ext_m; - bool ext_a; - bool ext_f; - bool ext_d; - bool ext_c; - bool ext_s; - bool ext_u; - bool ext_h; - bool ext_j; - bool ext_v; bool ext_zba; bool ext_zbb; bool ext_zbc; @@ -435,6 +391,13 @@ struct RISCVCPUConfig { bool ext_zbkc; bool ext_zbkx; bool ext_zbs; + bool ext_zca; + bool ext_zcb; + bool ext_zcd; + bool ext_zce; + bool ext_zcf; + bool ext_zcmp; + bool ext_zcmt; bool ext_zk; bool ext_zkn; bool ext_zknd; @@ -447,9 +410,13 @@ struct RISCVCPUConfig { bool ext_zkt; bool ext_ifencei; bool ext_icsr; + bool ext_icbom; + bool ext_icboz; + bool ext_zicond; bool ext_zihintpause; bool ext_smstateen; bool ext_sstc; + bool ext_svadu; bool ext_svinval; bool ext_svnapot; bool ext_svpbmt; @@ -462,7 +429,10 @@ struct RISCVCPUConfig { bool ext_zhinxmin; bool ext_zve32f; bool ext_zve64f; + bool ext_zve64d; bool ext_zmmul; + bool ext_zvfh; + bool ext_zvfhmin; bool ext_smaia; bool ext_ssaia; bool ext_sscofpmf; @@ -494,26 +464,33 @@ struct RISCVCPUConfig { char *vext_spec; uint16_t vlen; uint16_t elen; + uint16_t cbom_blocksize; + uint16_t cboz_blocksize; bool mmu; bool pmp; bool epmp; bool debug; + bool misa_w; bool short_isa_string; + +#ifndef CONFIG_USER_ONLY + RISCVSATPMap satp_mode; +#endif }; typedef struct RISCVCPUConfig RISCVCPUConfig; -/** +/* * RISCVCPU: * @env: #CPURISCVState * * A RISCV CPU. */ struct ArchCPU { - /*< private >*/ + /* < private > */ CPUState parent_obj; - /*< public >*/ + /* < public > */ CPUNegativeOffsetState neg; CPURISCVState env; @@ -535,16 +512,6 @@ static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) return (env->misa_ext & ext) != 0; } -static inline bool riscv_feature(CPURISCVState *env, int feature) -{ - return env->features & (1ULL << feature); -} - -static inline void riscv_set_feature(CPURISCVState *env, int feature) -{ - env->features |= (1ULL << feature); -} - #include "cpu_user.h" extern const char * const riscv_int_regnames[]; @@ -569,22 +536,14 @@ bool riscv_cpu_fp_enabled(CPURISCVState *env); target_ulong riscv_cpu_get_geilen(CPURISCVState *env); void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen); bool riscv_cpu_vector_enabled(CPURISCVState *env); -bool riscv_cpu_virt_enabled(CPURISCVState *env); void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); -bool riscv_cpu_two_stage_lookup(int mmu_idx); int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch); -hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, int mmu_idx, - uintptr_t retaddr); + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); -void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, - vaddr addr, unsigned size, - MMUAccessType access_type, - int mmu_idx, MemTxAttrs attrs, - MemTxResult response, uintptr_t retaddr); char *riscv_isa_string(RISCVCPU *cpu); void riscv_cpu_list(void); @@ -592,10 +551,17 @@ void riscv_cpu_list(void); #define cpu_mmu_index riscv_cpu_mmu_index #ifndef CONFIG_USER_ONLY +void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); +hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request); void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env); int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts); -uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value); +uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, + uint64_t value); #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */ void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), void *arg); @@ -606,6 +572,8 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, target_ulong new_val, target_ulong write_mask), void *rmw_fn_arg); + +RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit); #endif void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv); @@ -616,33 +584,29 @@ G_NORETURN void riscv_raise_exception(CPURISCVState *env, target_ulong riscv_cpu_get_fflags(CPURISCVState *env); void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); -#define TB_FLAGS_PRIV_MMU_MASK 3 -#define TB_FLAGS_PRIV_HYP_ACCESS_MASK (1 << 2) -#define TB_FLAGS_MSTATUS_FS MSTATUS_FS -#define TB_FLAGS_MSTATUS_VS MSTATUS_VS - #include "exec/cpu-all.h" FIELD(TB_FLAGS, MEM_IDX, 0, 3) -FIELD(TB_FLAGS, LMUL, 3, 3) -FIELD(TB_FLAGS, SEW, 6, 3) -/* Skip MSTATUS_VS (0x600) bits */ -FIELD(TB_FLAGS, VL_EQ_VLMAX, 11, 1) -FIELD(TB_FLAGS, VILL, 12, 1) -/* Skip MSTATUS_FS (0x6000) bits */ -/* Is a Hypervisor instruction load/store allowed? */ -FIELD(TB_FLAGS, HLSX, 15, 1) -FIELD(TB_FLAGS, MSTATUS_HS_FS, 16, 2) -FIELD(TB_FLAGS, MSTATUS_HS_VS, 18, 2) +FIELD(TB_FLAGS, FS, 3, 2) +/* Vector flags */ +FIELD(TB_FLAGS, VS, 5, 2) +FIELD(TB_FLAGS, LMUL, 7, 3) +FIELD(TB_FLAGS, SEW, 10, 3) +FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1) +FIELD(TB_FLAGS, VILL, 14, 1) +FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1) /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */ -FIELD(TB_FLAGS, XL, 20, 2) +FIELD(TB_FLAGS, XL, 16, 2) /* If PointerMasking should be applied */ -FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1) -FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1) -FIELD(TB_FLAGS, VTA, 24, 1) -FIELD(TB_FLAGS, VMA, 25, 1) +FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1) +FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1) +FIELD(TB_FLAGS, VTA, 20, 1) +FIELD(TB_FLAGS, VMA, 21, 1) /* Native debug itrigger */ -FIELD(TB_FLAGS, ITRIGGER, 26, 1) +FIELD(TB_FLAGS, ITRIGGER, 22, 1) +/* Virtual mode enabled */ +FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1) +FIELD(TB_FLAGS, PRIV, 24, 2) #ifdef TARGET_RISCV32 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32) @@ -654,6 +618,11 @@ static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) #endif #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env))) +static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env) +{ + return &env_archcpu(env)->cfg; +} + #if defined(TARGET_RISCV32) #define cpu_recompute_xl(env) ((void)(env), MXL_RV32) #else @@ -674,7 +643,7 @@ static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env) case PRV_U: xl = get_field(env->mstatus, MSTATUS64_UXL); break; - default: /* PRV_S | PRV_H */ + default: /* PRV_S */ xl = get_field(env->mstatus, MSTATUS64_SXL); break; } @@ -790,7 +759,7 @@ enum { CSR_TABLE_SIZE = 0x1000 }; -/** +/* * The event id are encoded based on the encoding specified in the * SBI specification v0.3 */ @@ -806,9 +775,14 @@ enum riscv_pmu_event_idx { /* CSR function table */ extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE]; +extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[]; + void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops); void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); +uint8_t satp_mode_max_from_map(uint32_t map); +const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit); + #endif /* RISCV_CPU_H */ diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 8b0d7e20ea..59f0ffd9e1 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -9,6 +9,9 @@ (((uint64_t)(val) * ((mask) & ~((mask) << 1))) & \ (uint64_t)(mask))) +/* Extension context status mask */ +#define EXT_STATUS_MASK 0x3ULL + /* Floating point round mode */ #define FSR_RD_SHIFT 5 #define FSR_RD (0x7 << FSR_RD_SHIFT) @@ -319,6 +322,7 @@ #define SMSTATEEN_MAX_COUNT 4 #define SMSTATEEN0_CS (1ULL << 0) #define SMSTATEEN0_FCSR (1ULL << 1) +#define SMSTATEEN0_JVT (1ULL << 2) #define SMSTATEEN0_HSCONTXT (1ULL << 57) #define SMSTATEEN0_IMSIC (1ULL << 58) #define SMSTATEEN0_AIA (1ULL << 59) @@ -523,6 +527,9 @@ /* Crypto Extension */ #define CSR_SEED 0x015 +/* Zcmt Extension */ +#define CSR_JVT 0x017 + /* mstatus CSR bits */ #define MSTATUS_UIE 0x00000001 #define MSTATUS_SIE 0x00000002 @@ -604,12 +611,9 @@ typedef enum { /* Privilege modes */ #define PRV_U 0 #define PRV_S 1 -#define PRV_H 2 /* Reserved */ +#define PRV_RESERVED 2 #define PRV_M 3 -/* Virtulisation Register Fields */ -#define VIRT_ONOFF 1 - /* RV32 satp CSR field masks */ #define SATP32_MODE 0x80000000 #define SATP32_ASID 0x7fc00000 @@ -640,6 +644,7 @@ typedef enum { #define PTE_SOFT 0x300 /* Reserved for Software */ #define PTE_PBMT 0x6000000000000000ULL /* Page-based memory types */ #define PTE_N 0x8000000000000000ULL /* NAPOT translation */ +#define PTE_RESERVED 0x1FC0000000000000ULL /* Reserved bits */ #define PTE_ATTR (PTE_N | PTE_PBMT) /* All attributes bits */ /* Page table PPN shift amount */ @@ -730,27 +735,22 @@ typedef enum RISCVException { #define MIE_SSIE (1 << IRQ_S_SOFT) #define MIE_USIE (1 << IRQ_U_SOFT) -/* General PointerMasking CSR bits*/ +/* General PointerMasking CSR bits */ #define PM_ENABLE 0x00000001ULL #define PM_CURRENT 0x00000002ULL #define PM_INSN 0x00000004ULL -#define PM_XS_MASK 0x00000003ULL - -/* PointerMasking XS bits values */ -#define PM_EXT_DISABLE 0x00000000ULL -#define PM_EXT_INITIAL 0x00000001ULL -#define PM_EXT_CLEAN 0x00000002ULL -#define PM_EXT_DIRTY 0x00000003ULL /* Execution enviornment configuration bits */ #define MENVCFG_FIOM BIT(0) #define MENVCFG_CBIE (3UL << 4) #define MENVCFG_CBCFE BIT(6) #define MENVCFG_CBZE BIT(7) +#define MENVCFG_HADE (1ULL << 61) #define MENVCFG_PBMTE (1ULL << 62) #define MENVCFG_STCE (1ULL << 63) /* For RV32 */ +#define MENVCFGH_HADE BIT(29) #define MENVCFGH_PBMTE BIT(30) #define MENVCFGH_STCE BIT(31) @@ -763,10 +763,12 @@ typedef enum RISCVException { #define HENVCFG_CBIE MENVCFG_CBIE #define HENVCFG_CBCFE MENVCFG_CBCFE #define HENVCFG_CBZE MENVCFG_CBZE +#define HENVCFG_HADE MENVCFG_HADE #define HENVCFG_PBMTE MENVCFG_PBMTE #define HENVCFG_STCE MENVCFG_STCE /* For RV32 */ +#define HENVCFGH_HADE MENVCFGH_HADE #define HENVCFGH_PBMTE MENVCFGH_PBMTE #define HENVCFGH_STCE MENVCFGH_STCE @@ -776,7 +778,7 @@ typedef enum RISCVException { #define S_OFFSET 5ULL #define M_OFFSET 8ULL -#define PM_XS_BITS (PM_XS_MASK << XS_OFFSET) +#define PM_XS_BITS (EXT_STATUS_MASK << XS_OFFSET) #define U_PM_ENABLE (PM_ENABLE << U_OFFSET) #define U_PM_CURRENT (PM_CURRENT << U_OFFSET) #define U_PM_INSN (PM_INSN << U_OFFSET) @@ -894,4 +896,7 @@ typedef enum RISCVException { #define MHPMEVENT_IDX_MASK 0xFFFFF #define MHPMEVENT_SSCOF_RESVD 16 +/* JVT CSR bits */ +#define JVT_MODE 0x3F +#define JVT_BASE (~0x3F) #endif diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 3a9472a2ff..57d04385f1 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -21,6 +21,7 @@ #include "qemu/log.h" #include "qemu/main-loop.h" #include "cpu.h" +#include "internals.h" #include "pmu.h" #include "exec/exec-all.h" #include "instmap.h" @@ -36,7 +37,26 @@ int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) #ifdef CONFIG_USER_ONLY return 0; #else - return env->priv; + bool virt = env->virt_enabled; + int mode = env->priv; + + /* All priv -> mmu_idx mapping are here */ + if (!ifetch) { + uint64_t status = env->mstatus; + + if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) { + mode = get_field(env->mstatus, MSTATUS_MPP); + virt = get_field(env->mstatus, MSTATUS_MPV); + if (virt) { + status = env->vsstatus; + } + } + if (mode == PRV_S && get_field(status, MSTATUS_SUM)) { + mode = MMUIdx_S_SUM; + } + } + + return mode | (virt ? MMU_2STAGE_BIT : 0); #endif } @@ -45,13 +65,13 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, { CPUState *cs = env_cpu(env); RISCVCPU *cpu = RISCV_CPU(cs); - + RISCVExtStatus fs, vs; uint32_t flags = 0; *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; *cs_base = 0; - if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { + if (cpu->cfg.ext_zve32f) { /* * If env->vl equals to VLMAX, we can use generic vector operation * expanders (GVEC) to accerlate the vector operations. @@ -68,48 +88,44 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew); flags = FIELD_DP32(flags, TB_FLAGS, LMUL, - FIELD_EX64(env->vtype, VTYPE, VLMUL)); + FIELD_EX64(env->vtype, VTYPE, VLMUL)); flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); flags = FIELD_DP32(flags, TB_FLAGS, VTA, - FIELD_EX64(env->vtype, VTYPE, VTA)); + FIELD_EX64(env->vtype, VTYPE, VTA)); flags = FIELD_DP32(flags, TB_FLAGS, VMA, - FIELD_EX64(env->vtype, VTYPE, VMA)); + FIELD_EX64(env->vtype, VTYPE, VMA)); + flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0); } else { flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); } #ifdef CONFIG_USER_ONLY - flags |= TB_FLAGS_MSTATUS_FS; - flags |= TB_FLAGS_MSTATUS_VS; + fs = EXT_STATUS_DIRTY; + vs = EXT_STATUS_DIRTY; #else + flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); + flags |= cpu_mmu_index(env, 0); - if (riscv_cpu_fp_enabled(env)) { - flags |= env->mstatus & MSTATUS_FS; + fs = get_field(env->mstatus, MSTATUS_FS); + vs = get_field(env->mstatus, MSTATUS_VS); + + if (env->virt_enabled) { + flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1); + /* + * Merge DISABLED and !DIRTY states using MIN. + * We will set both fields when dirtying. + */ + fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS)); + vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS)); } - if (riscv_cpu_vector_enabled(env)) { - flags |= env->mstatus & MSTATUS_VS; - } - - if (riscv_has_ext(env, RVH)) { - if (env->priv == PRV_M || - (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || - (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && - get_field(env->hstatus, HSTATUS_HU))) { - flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1); - } - - flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS, - get_field(env->mstatus_hs, MSTATUS_FS)); - - flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS, - get_field(env->mstatus_hs, MSTATUS_VS)); - } - if (riscv_feature(env, RISCV_FEATURE_DEBUG) && !icount_enabled()) { + if (cpu->cfg.debug && !icount_enabled()) { flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); } #endif + flags = FIELD_DP32(flags, TB_FLAGS, FS, fs); + flags = FIELD_DP32(flags, TB_FLAGS, VS, vs); flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) { flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1); @@ -230,75 +246,75 @@ int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero) * ---------------------------------------------------------------- */ static const uint8_t default_iprio[64] = { - /* Custom interrupts 48 to 63 */ - [63] = IPRIO_MMAXIPRIO, - [62] = IPRIO_MMAXIPRIO, - [61] = IPRIO_MMAXIPRIO, - [60] = IPRIO_MMAXIPRIO, - [59] = IPRIO_MMAXIPRIO, - [58] = IPRIO_MMAXIPRIO, - [57] = IPRIO_MMAXIPRIO, - [56] = IPRIO_MMAXIPRIO, - [55] = IPRIO_MMAXIPRIO, - [54] = IPRIO_MMAXIPRIO, - [53] = IPRIO_MMAXIPRIO, - [52] = IPRIO_MMAXIPRIO, - [51] = IPRIO_MMAXIPRIO, - [50] = IPRIO_MMAXIPRIO, - [49] = IPRIO_MMAXIPRIO, - [48] = IPRIO_MMAXIPRIO, + /* Custom interrupts 48 to 63 */ + [63] = IPRIO_MMAXIPRIO, + [62] = IPRIO_MMAXIPRIO, + [61] = IPRIO_MMAXIPRIO, + [60] = IPRIO_MMAXIPRIO, + [59] = IPRIO_MMAXIPRIO, + [58] = IPRIO_MMAXIPRIO, + [57] = IPRIO_MMAXIPRIO, + [56] = IPRIO_MMAXIPRIO, + [55] = IPRIO_MMAXIPRIO, + [54] = IPRIO_MMAXIPRIO, + [53] = IPRIO_MMAXIPRIO, + [52] = IPRIO_MMAXIPRIO, + [51] = IPRIO_MMAXIPRIO, + [50] = IPRIO_MMAXIPRIO, + [49] = IPRIO_MMAXIPRIO, + [48] = IPRIO_MMAXIPRIO, - /* Custom interrupts 24 to 31 */ - [31] = IPRIO_MMAXIPRIO, - [30] = IPRIO_MMAXIPRIO, - [29] = IPRIO_MMAXIPRIO, - [28] = IPRIO_MMAXIPRIO, - [27] = IPRIO_MMAXIPRIO, - [26] = IPRIO_MMAXIPRIO, - [25] = IPRIO_MMAXIPRIO, - [24] = IPRIO_MMAXIPRIO, + /* Custom interrupts 24 to 31 */ + [31] = IPRIO_MMAXIPRIO, + [30] = IPRIO_MMAXIPRIO, + [29] = IPRIO_MMAXIPRIO, + [28] = IPRIO_MMAXIPRIO, + [27] = IPRIO_MMAXIPRIO, + [26] = IPRIO_MMAXIPRIO, + [25] = IPRIO_MMAXIPRIO, + [24] = IPRIO_MMAXIPRIO, - [47] = IPRIO_DEFAULT_UPPER, - [23] = IPRIO_DEFAULT_UPPER + 1, - [46] = IPRIO_DEFAULT_UPPER + 2, - [45] = IPRIO_DEFAULT_UPPER + 3, - [22] = IPRIO_DEFAULT_UPPER + 4, - [44] = IPRIO_DEFAULT_UPPER + 5, + [47] = IPRIO_DEFAULT_UPPER, + [23] = IPRIO_DEFAULT_UPPER + 1, + [46] = IPRIO_DEFAULT_UPPER + 2, + [45] = IPRIO_DEFAULT_UPPER + 3, + [22] = IPRIO_DEFAULT_UPPER + 4, + [44] = IPRIO_DEFAULT_UPPER + 5, - [43] = IPRIO_DEFAULT_UPPER + 6, - [21] = IPRIO_DEFAULT_UPPER + 7, - [42] = IPRIO_DEFAULT_UPPER + 8, - [41] = IPRIO_DEFAULT_UPPER + 9, - [20] = IPRIO_DEFAULT_UPPER + 10, - [40] = IPRIO_DEFAULT_UPPER + 11, + [43] = IPRIO_DEFAULT_UPPER + 6, + [21] = IPRIO_DEFAULT_UPPER + 7, + [42] = IPRIO_DEFAULT_UPPER + 8, + [41] = IPRIO_DEFAULT_UPPER + 9, + [20] = IPRIO_DEFAULT_UPPER + 10, + [40] = IPRIO_DEFAULT_UPPER + 11, - [11] = IPRIO_DEFAULT_M, - [3] = IPRIO_DEFAULT_M + 1, - [7] = IPRIO_DEFAULT_M + 2, + [11] = IPRIO_DEFAULT_M, + [3] = IPRIO_DEFAULT_M + 1, + [7] = IPRIO_DEFAULT_M + 2, - [9] = IPRIO_DEFAULT_S, - [1] = IPRIO_DEFAULT_S + 1, - [5] = IPRIO_DEFAULT_S + 2, + [9] = IPRIO_DEFAULT_S, + [1] = IPRIO_DEFAULT_S + 1, + [5] = IPRIO_DEFAULT_S + 2, - [12] = IPRIO_DEFAULT_SGEXT, + [12] = IPRIO_DEFAULT_SGEXT, - [10] = IPRIO_DEFAULT_VS, - [2] = IPRIO_DEFAULT_VS + 1, - [6] = IPRIO_DEFAULT_VS + 2, + [10] = IPRIO_DEFAULT_VS, + [2] = IPRIO_DEFAULT_VS + 1, + [6] = IPRIO_DEFAULT_VS + 2, - [39] = IPRIO_DEFAULT_LOWER, - [19] = IPRIO_DEFAULT_LOWER + 1, - [38] = IPRIO_DEFAULT_LOWER + 2, - [37] = IPRIO_DEFAULT_LOWER + 3, - [18] = IPRIO_DEFAULT_LOWER + 4, - [36] = IPRIO_DEFAULT_LOWER + 5, + [39] = IPRIO_DEFAULT_LOWER, + [19] = IPRIO_DEFAULT_LOWER + 1, + [38] = IPRIO_DEFAULT_LOWER + 2, + [37] = IPRIO_DEFAULT_LOWER + 3, + [18] = IPRIO_DEFAULT_LOWER + 4, + [36] = IPRIO_DEFAULT_LOWER + 5, - [35] = IPRIO_DEFAULT_LOWER + 6, - [17] = IPRIO_DEFAULT_LOWER + 7, - [34] = IPRIO_DEFAULT_LOWER + 8, - [33] = IPRIO_DEFAULT_LOWER + 9, - [16] = IPRIO_DEFAULT_LOWER + 10, - [32] = IPRIO_DEFAULT_LOWER + 11, + [35] = IPRIO_DEFAULT_LOWER + 6, + [17] = IPRIO_DEFAULT_LOWER + 7, + [34] = IPRIO_DEFAULT_LOWER + 8, + [33] = IPRIO_DEFAULT_LOWER + 9, + [16] = IPRIO_DEFAULT_LOWER + 10, + [32] = IPRIO_DEFAULT_LOWER + 11, }; uint8_t riscv_cpu_default_priority(int irq) @@ -314,7 +330,6 @@ static int riscv_cpu_pending_to_irq(CPURISCVState *env, int extirq, unsigned int extirq_def_prio, uint64_t pending, uint8_t *iprio) { - RISCVCPU *cpu = env_archcpu(env); int irq, best_irq = RISCV_EXCP_NONE; unsigned int prio, best_prio = UINT_MAX; @@ -323,7 +338,8 @@ static int riscv_cpu_pending_to_irq(CPURISCVState *env, } irq = ctz64(pending); - if (!((extirq == IRQ_M_EXT) ? cpu->cfg.ext_smaia : cpu->cfg.ext_ssaia)) { + if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia : + riscv_cpu_cfg(env)->ext_ssaia)) { return irq; } @@ -391,7 +407,7 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env) uint64_t irqs, pending, mie, hsie, vsie; /* Determine interrupt enable state of all privilege modes */ - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { mie = 1; hsie = 1; vsie = (env->priv < PRV_S) || @@ -452,7 +468,7 @@ bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) bool riscv_cpu_fp_enabled(CPURISCVState *env) { if (env->mstatus & MSTATUS_FS) { - if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) { + if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) { return false; } return true; @@ -465,7 +481,7 @@ bool riscv_cpu_fp_enabled(CPURISCVState *env) bool riscv_cpu_vector_enabled(CPURISCVState *env) { if (env->mstatus & MSTATUS_VS) { - if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) { + if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) { return false; } return true; @@ -483,7 +499,7 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) if (riscv_has_ext(env, RVF)) { mstatus_mask |= MSTATUS_FS; } - bool current_virt = riscv_cpu_virt_enabled(env); + bool current_virt = env->virt_enabled; g_assert(riscv_has_ext(env, RVH)); @@ -558,27 +574,15 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen) env->geilen = geilen; } -bool riscv_cpu_virt_enabled(CPURISCVState *env) -{ - if (!riscv_has_ext(env, RVH)) { - return false; - } - - return get_field(env->virt, VIRT_ONOFF); -} - +/* This function can only be called to set virt when RVH is enabled */ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) { - if (!riscv_has_ext(env, RVH)) { - return; - } - /* Flush the TLB on all virt mode changes. */ - if (get_field(env->virt, VIRT_ONOFF) != enable) { + if (env->virt_enabled != enable) { tlb_flush(env_cpu(env)); } - env->virt = set_field(env->virt, VIRT_ONOFF, enable); + env->virt_enabled = enable; if (enable) { /* @@ -590,15 +594,10 @@ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) * * To solve this, we check and inject interrupt after setting V=1. */ - riscv_cpu_update_mip(env_archcpu(env), 0, 0); + riscv_cpu_update_mip(env, 0, 0); } } -bool riscv_cpu_two_stage_lookup(int mmu_idx) -{ - return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK; -} - int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) { CPURISCVState *env = &cpu->env; @@ -610,13 +609,13 @@ int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) } } -uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value) +uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, + uint64_t value) { - CPURISCVState *env = &cpu->env; - CPUState *cs = CPU(cpu); + CPUState *cs = env_cpu(env); uint64_t gein, vsgein = 0, vstip = 0, old = env->mip; - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { gein = get_field(env->hstatus, HSTATUS_VGEIN); vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; } @@ -659,12 +658,8 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) { - if (newpriv > PRV_M) { - g_assert_not_reached(); - } - if (newpriv == PRV_H) { - newpriv = PRV_U; - } + g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED); + if (icount_enabled() && newpriv != env->priv) { riscv_itrigger_update_priv(env); } @@ -706,7 +701,7 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, pmp_priv_t pmp_priv; int pmp_index = -1; - if (!riscv_feature(env, RISCV_FEATURE_PMP)) { + if (!riscv_cpu_cfg(env)->pmp) { *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; } @@ -729,7 +724,8 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, return TRANSLATE_SUCCESS; } -/* get_physical_address - get the physical address for this virtual address +/* + * get_physical_address - get the physical address for this virtual address * * Do a page table walk to obtain the physical address corresponding to a * virtual address. Returns 0 if the translation was successful @@ -739,7 +735,7 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, * @env: CPURISCVState * @physical: This will be set to the calculated physical address * @prot: The returned protection attributes - * @addr: The virtual address to be translated + * @addr: The virtual address or guest physical address to be translated * @fault_pte_addr: If not NULL, this will be set to fault pte address * when a error occurs on pte address translation. * This will already be shifted to match htval. @@ -751,21 +747,22 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, * @is_debug: Is this access from a debugger or the monitor? */ static int get_physical_address(CPURISCVState *env, hwaddr *physical, - int *prot, target_ulong addr, + int *ret_prot, vaddr addr, target_ulong *fault_pte_addr, int access_type, int mmu_idx, bool first_stage, bool two_stage, bool is_debug) { - /* NOTE: the env->pc value visible here will not be + /* + * NOTE: the env->pc value visible here will not be * correct, but the value visible to the exception handler - * (riscv_cpu_do_interrupt) is correct */ + * (riscv_cpu_do_interrupt) is correct + */ MemTxResult res; MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; - int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK; + int mode = mmuidx_priv(mmu_idx); bool use_background = false; hwaddr ppn; - RISCVCPU *cpu = env_archcpu(env); int napot_bits = 0; target_ulong napot_mask; @@ -776,42 +773,20 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, * was called. Background registers will be used if the guest has * forced a two stage translation to be on (in HS or M mode). */ - if (!riscv_cpu_virt_enabled(env) && two_stage) { + if (!env->virt_enabled && two_stage) { use_background = true; } - /* MPRV does not affect the virtual-machine load/store - instructions, HLV, HLVX, and HSV. */ - if (riscv_cpu_two_stage_lookup(mmu_idx)) { - mode = get_field(env->hstatus, HSTATUS_SPVP); - } else if (mode == PRV_M && access_type != MMU_INST_FETCH) { - if (get_field(env->mstatus, MSTATUS_MPRV)) { - mode = get_field(env->mstatus, MSTATUS_MPP); - } - } - - if (first_stage == false) { - /* We are in stage 2 translation, this is similar to stage 1. */ - /* Stage 2 is always taken as U-mode */ - mode = PRV_U; - } - - if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { + if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) { *physical = addr; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; } - *prot = 0; + *ret_prot = 0; hwaddr base; - int levels, ptidxbits, ptesize, vm, sum, mxr, widened; - - if (first_stage == true) { - mxr = get_field(env->mstatus, MSTATUS_MXR); - } else { - mxr = get_field(env->vsstatus, MSTATUS_MXR); - } + int levels, ptidxbits, ptesize, vm, widened; if (first_stage == true) { if (use_background) { @@ -842,8 +817,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, } widened = 2; } - /* status.SUM will be ignored if execute on background */ - sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug; + switch (vm) { case VM_1_10_SV32: levels = 2; ptidxbits = 10; ptesize = 4; break; @@ -855,7 +829,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, levels = 5; ptidxbits = 9; ptesize = 8; break; case VM_1_10_MBARE: *physical = addr; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; default: g_assert_not_reached(); @@ -863,20 +837,37 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, CPUState *cs = env_cpu(env); int va_bits = PGSHIFT + levels * ptidxbits + widened; - target_ulong mask, masked_msbs; - if (TARGET_LONG_BITS > (va_bits - 1)) { - mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; + if (first_stage == true) { + target_ulong mask, masked_msbs; + + if (TARGET_LONG_BITS > (va_bits - 1)) { + mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; + } else { + mask = 0; + } + masked_msbs = (addr >> (va_bits - 1)) & mask; + + if (masked_msbs != 0 && masked_msbs != mask) { + return TRANSLATE_FAIL; + } } else { - mask = 0; + if (vm != VM_1_10_SV32 && addr >> va_bits != 0) { + return TRANSLATE_FAIL; + } } - masked_msbs = (addr >> (va_bits - 1)) & mask; - if (masked_msbs != 0 && masked_msbs != mask) { - return TRANSLATE_FAIL; + bool pbmte = env->menvcfg & MENVCFG_PBMTE; + bool hade = env->menvcfg & MENVCFG_HADE; + + if (first_stage && two_stage && env->virt_enabled) { + pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); + hade = hade && (env->henvcfg & HENVCFG_HADE); } int ptshift = (levels - 1) * ptidxbits; + target_ulong pte; + hwaddr pte_addr; int i; #if !TCG_OVERSIZED_GUEST @@ -893,7 +884,6 @@ restart: } /* check that physical address of PTE is legal */ - hwaddr pte_addr; if (two_stage && first_stage) { int vbase_prot; @@ -902,7 +892,7 @@ restart: /* Do the second stage translation on the base PTE address. */ int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, base, NULL, MMU_DATA_LOAD, - mmu_idx, false, true, + MMUIdx_U, false, true, is_debug); if (vbase_ret != TRANSLATE_SUCCESS) { @@ -925,7 +915,6 @@ restart: return TRANSLATE_PMP_FAIL; } - target_ulong pte; if (riscv_cpu_mxl(env) == MXL_RV32) { pte = address_space_ldl(cs->as, pte_addr, attrs, &res); } else { @@ -938,128 +927,168 @@ restart: if (riscv_cpu_sxl(env) == MXL_RV32) { ppn = pte >> PTE_PPN_SHIFT; - } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) { - ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; } else { - ppn = pte >> PTE_PPN_SHIFT; - if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) { + if (pte & PTE_RESERVED) { return TRANSLATE_FAIL; } + + if (!pbmte && (pte & PTE_PBMT)) { + return TRANSLATE_FAIL; + } + + if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) { + return TRANSLATE_FAIL; + } + + ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; } if (!(pte & PTE_V)) { /* Invalid PTE */ return TRANSLATE_FAIL; - } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) { + } + if (pte & (PTE_R | PTE_W | PTE_X)) { + goto leaf; + } + + /* Inner PTE, continue walking */ + if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) { return TRANSLATE_FAIL; - } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { - /* Inner PTE, continue walking */ - if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) { + } + base = ppn << PGSHIFT; + } + + /* No leaf pte at any translation level. */ + return TRANSLATE_FAIL; + + leaf: + if (ppn & ((1ULL << ptshift) - 1)) { + /* Misaligned PPN */ + return TRANSLATE_FAIL; + } + if (!pbmte && (pte & PTE_PBMT)) { + /* Reserved without Svpbmt. */ + return TRANSLATE_FAIL; + } + + /* Check for reserved combinations of RWX flags. */ + switch (pte & (PTE_R | PTE_W | PTE_X)) { + case PTE_W: + case PTE_W | PTE_X: + return TRANSLATE_FAIL; + } + + int prot = 0; + if (pte & PTE_R) { + prot |= PAGE_READ; + } + if (pte & PTE_W) { + prot |= PAGE_WRITE; + } + if (pte & PTE_X) { + bool mxr; + + if (first_stage == true) { + mxr = get_field(env->mstatus, MSTATUS_MXR); + } else { + mxr = get_field(env->vsstatus, MSTATUS_MXR); + } + if (mxr) { + prot |= PAGE_READ; + } + prot |= PAGE_EXEC; + } + + if (pte & PTE_U) { + if (mode != PRV_U) { + if (!mmuidx_sum(mmu_idx)) { return TRANSLATE_FAIL; } - base = ppn << PGSHIFT; - } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { - /* Reserved leaf PTE flags: PTE_W */ - return TRANSLATE_FAIL; - } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { - /* Reserved leaf PTE flags: PTE_W + PTE_X */ - return TRANSLATE_FAIL; - } else if ((pte & PTE_U) && ((mode != PRV_U) && - (!sum || access_type == MMU_INST_FETCH))) { - /* User PTE flags when not U mode and mstatus.SUM is not set, - or the access type is an instruction fetch */ - return TRANSLATE_FAIL; - } else if (!(pte & PTE_U) && (mode != PRV_S)) { - /* Supervisor PTE flags when not S mode */ - return TRANSLATE_FAIL; - } else if (ppn & ((1ULL << ptshift) - 1)) { - /* Misaligned PPN */ - return TRANSLATE_FAIL; - } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || - ((pte & PTE_X) && mxr))) { - /* Read access check failed */ - return TRANSLATE_FAIL; - } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { - /* Write access check failed */ - return TRANSLATE_FAIL; - } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { - /* Fetch access check failed */ - return TRANSLATE_FAIL; - } else { - /* if necessary, set accessed and dirty bits. */ - target_ulong updated_pte = pte | PTE_A | + /* SUM allows only read+write, not execute. */ + prot &= PAGE_READ | PAGE_WRITE; + } + } else if (mode != PRV_S) { + /* Supervisor PTE flags when not S mode */ + return TRANSLATE_FAIL; + } + + if (!((prot >> access_type) & 1)) { + /* Access check failed */ + return TRANSLATE_FAIL; + } + + /* If necessary, set accessed and dirty bits. */ + target_ulong updated_pte = pte | PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0); - /* Page table updates need to be atomic with MTTCG enabled */ - if (updated_pte != pte) { - /* - * - if accessed or dirty bits need updating, and the PTE is - * in RAM, then we do so atomically with a compare and swap. - * - if the PTE is in IO space or ROM, then it can't be updated - * and we return TRANSLATE_FAIL. - * - if the PTE changed by the time we went to update it, then - * it is no longer valid and we must re-walk the page table. - */ - MemoryRegion *mr; - hwaddr l = sizeof(target_ulong), addr1; - mr = address_space_translate(cs->as, pte_addr, - &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); - if (memory_region_is_ram(mr)) { - target_ulong *pte_pa = - qemu_map_ram_ptr(mr->ram_block, addr1); + /* Page table updates need to be atomic with MTTCG enabled */ + if (updated_pte != pte && !is_debug) { + if (!hade) { + return TRANSLATE_FAIL; + } + + /* + * - if accessed or dirty bits need updating, and the PTE is + * in RAM, then we do so atomically with a compare and swap. + * - if the PTE is in IO space or ROM, then it can't be updated + * and we return TRANSLATE_FAIL. + * - if the PTE changed by the time we went to update it, then + * it is no longer valid and we must re-walk the page table. + */ + MemoryRegion *mr; + hwaddr l = sizeof(target_ulong), addr1; + mr = address_space_translate(cs->as, pte_addr, &addr1, &l, + false, MEMTXATTRS_UNSPECIFIED); + if (memory_region_is_ram(mr)) { + target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1); #if TCG_OVERSIZED_GUEST - /* MTTCG is not enabled on oversized TCG guests so - * page table updates do not need to be atomic */ - *pte_pa = pte = updated_pte; + /* + * MTTCG is not enabled on oversized TCG guests so + * page table updates do not need to be atomic + */ + *pte_pa = pte = updated_pte; #else - target_ulong old_pte = - qatomic_cmpxchg(pte_pa, pte, updated_pte); - if (old_pte != pte) { - goto restart; - } else { - pte = updated_pte; - } + target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte); + if (old_pte != pte) { + goto restart; + } + pte = updated_pte; #endif - } else { - /* misconfigured PTE in ROM (AD bits are not preset) or - * PTE is in IO space and can't be updated atomically */ - return TRANSLATE_FAIL; - } - } - - /* for superpage mappings, make a fake leaf PTE for the TLB's - benefit. */ - target_ulong vpn = addr >> PGSHIFT; - - if (cpu->cfg.ext_svnapot && (pte & PTE_N)) { - napot_bits = ctzl(ppn) + 1; - if ((i != (levels - 1)) || (napot_bits != 4)) { - return TRANSLATE_FAIL; - } - } - - napot_mask = (1 << napot_bits) - 1; - *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) | - (vpn & (((target_ulong)1 << ptshift) - 1)) - ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK); - - /* set permissions on the TLB entry */ - if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { - *prot |= PAGE_READ; - } - if ((pte & PTE_X)) { - *prot |= PAGE_EXEC; - } - /* add write permission on stores or if the page is already dirty, - so that we TLB miss on later writes to update the dirty bit */ - if ((pte & PTE_W) && - (access_type == MMU_DATA_STORE || (pte & PTE_D))) { - *prot |= PAGE_WRITE; - } - return TRANSLATE_SUCCESS; + } else { + /* + * Misconfigured PTE in ROM (AD bits are not preset) or + * PTE is in IO space and can't be updated atomically. + */ + return TRANSLATE_FAIL; } } - return TRANSLATE_FAIL; + + /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */ + target_ulong vpn = addr >> PGSHIFT; + + if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) { + napot_bits = ctzl(ppn) + 1; + if ((i != (levels - 1)) || (napot_bits != 4)) { + return TRANSLATE_FAIL; + } + } + + napot_mask = (1 << napot_bits) - 1; + *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) | + (vpn & (((target_ulong)1 << ptshift) - 1)) + ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK); + + /* + * Remove write permission unless this is a store, or the page is + * already dirty, so that we TLB miss on later writes to update + * the dirty bit. + */ + if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) { + prot &= ~PAGE_WRITE; + } + *ret_prot = prot; + + return TRANSLATE_SUCCESS; } static void raise_mmu_exception(CPURISCVState *env, target_ulong address, @@ -1087,7 +1116,7 @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address, switch (access_type) { case MMU_INST_FETCH: - if (riscv_cpu_virt_enabled(env) && !first_stage) { + if (env->virt_enabled && !first_stage) { cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; } else { cs->exception_index = page_fault_exceptions ? @@ -1107,7 +1136,8 @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address, cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; } else { cs->exception_index = page_fault_exceptions ? - RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; + RISCV_EXCP_STORE_PAGE_FAULT : + RISCV_EXCP_STORE_AMO_ACCESS_FAULT; } break; default: @@ -1127,11 +1157,11 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) int mmu_idx = cpu_mmu_index(&cpu->env, false); if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, - true, riscv_cpu_virt_enabled(env), true)) { + true, env->virt_enabled, true)) { return -1; } - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, 0, mmu_idx, false, true, true)) { return -1; @@ -1159,8 +1189,7 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, } env->badaddr = addr; - env->two_stage_lookup = riscv_cpu_virt_enabled(env) || - riscv_cpu_two_stage_lookup(mmu_idx); + env->two_stage_lookup = mmuidx_2stage(mmu_idx); env->two_stage_indirect_lookup = false; cpu_loop_exit_restore(cs, retaddr); } @@ -1185,8 +1214,7 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, g_assert_not_reached(); } env->badaddr = addr; - env->two_stage_lookup = riscv_cpu_virt_enabled(env) || - riscv_cpu_two_stage_lookup(mmu_idx); + env->two_stage_lookup = mmuidx_2stage(mmu_idx); env->two_stage_indirect_lookup = false; cpu_loop_exit_restore(cs, retaddr); } @@ -1224,7 +1252,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, int prot, prot2, prot_pmp; bool pmp_violation = false; bool first_stage_error = true; - bool two_stage_lookup = false; + bool two_stage_lookup = mmuidx_2stage(mmu_idx); bool two_stage_indirect_error = false; int ret = TRANSLATE_FAIL; int mode = mmu_idx; @@ -1236,22 +1264,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", __func__, address, access_type, mmu_idx); - /* MPRV does not affect the virtual-machine load/store - instructions, HLV, HLVX, and HSV. */ - if (riscv_cpu_two_stage_lookup(mmu_idx)) { - mode = get_field(env->hstatus, HSTATUS_SPVP); - } else if (mode == PRV_M && access_type != MMU_INST_FETCH && - get_field(env->mstatus, MSTATUS_MPRV)) { - mode = get_field(env->mstatus, MSTATUS_MPP); - if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) { - two_stage_lookup = true; - } - } - pmu_tlb_fill_incr_ctr(cpu, access_type); - if (riscv_cpu_virt_enabled(env) || - ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) && - access_type != MMU_INST_FETCH)) { + if (two_stage_lookup) { /* Two stage lookup */ ret = get_physical_address(env, &pa, &prot, address, &env->guest_phys_fault_addr, access_type, @@ -1278,13 +1292,14 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, im_address = pa; ret = get_physical_address(env, &pa, &prot2, im_address, NULL, - access_type, mmu_idx, false, true, + access_type, MMUIdx_U, false, true, false); qemu_log_mask(CPU_LOG_MMU, - "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical " - HWADDR_FMT_plx " prot %d\n", - __func__, im_address, ret, pa, prot2); + "%s 2nd-stage address=%" VADDR_PRIx + " ret %d physical " + HWADDR_FMT_plx " prot %d\n", + __func__, im_address, ret, pa, prot2); prot &= prot2; @@ -1346,9 +1361,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, return false; } else { raise_mmu_exception(env, address, access_type, pmp_violation, - first_stage_error, - riscv_cpu_virt_enabled(env) || - riscv_cpu_two_stage_lookup(mmu_idx), + first_stage_error, two_stage_lookup, two_stage_indirect_error); cpu_loop_exit_restore(cs, retaddr); } @@ -1577,7 +1590,8 @@ void riscv_cpu_do_interrupt(CPUState *cs) bool write_gva = false; uint64_t s; - /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide + /* + * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide * so we mask off the MSB and separate into trap type and cause. */ bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); @@ -1654,9 +1668,9 @@ void riscv_cpu_do_interrupt(CPUState *cs) if (env->priv == PRV_M) { cause = RISCV_EXCP_M_ECALL; - } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { + } else if (env->priv == PRV_S && env->virt_enabled) { cause = RISCV_EXCP_VS_ECALL; - } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) { + } else if (env->priv == PRV_S && !env->virt_enabled) { cause = RISCV_EXCP_S_ECALL; } else if (env->priv == PRV_U) { cause = RISCV_EXCP_U_ECALL; @@ -1679,7 +1693,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) if (riscv_has_ext(env, RVH)) { uint64_t hdeleg = async ? env->hideleg : env->hedeleg; - if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) { + if (env->virt_enabled && ((hdeleg >> cause) & 1)) { /* Trap to VS mode */ /* * See if we need to adjust cause. Yes if its VS mode interrupt @@ -1690,14 +1704,12 @@ void riscv_cpu_do_interrupt(CPUState *cs) cause = cause - 1; } write_gva = false; - } else if (riscv_cpu_virt_enabled(env)) { + } else if (env->virt_enabled) { /* Trap into HS mode, from virt */ riscv_cpu_swap_hypervisor_regs(env); env->hstatus = set_field(env->hstatus, HSTATUS_SPVP, env->priv); - env->hstatus = set_field(env->hstatus, HSTATUS_SPV, - riscv_cpu_virt_enabled(env)); - + env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true); htval = env->guest_phys_fault_addr; @@ -1721,17 +1733,17 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->htval = htval; env->htinst = tinst; env->pc = (env->stvec >> 2 << 2) + - ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); + ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); riscv_cpu_set_mode(env, PRV_S); } else { /* handle the trap in M-mode */ if (riscv_has_ext(env, RVH)) { - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { riscv_cpu_swap_hypervisor_regs(env); } env->mstatus = set_field(env->mstatus, MSTATUS_MPV, - riscv_cpu_virt_enabled(env)); - if (riscv_cpu_virt_enabled(env) && tval) { + env->virt_enabled); + if (env->virt_enabled && tval) { env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1); } @@ -1752,11 +1764,12 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->mtval2 = mtval2; env->mtinst = tinst; env->pc = (env->mtvec >> 2 << 2) + - ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); + ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); riscv_cpu_set_mode(env, PRV_M); } - /* NOTE: it is not necessary to yield load reservations here. It is only + /* + * NOTE: it is not necessary to yield load reservations here. It is only * necessary for an SC from "another hart" to cause a load reservation * to be yielded. Refer to the memory consistency model section of the * RISC-V ISA Specification. diff --git a/target/riscv/cpu_vendorid.h b/target/riscv/cpu_vendorid.h index a5aa249bc9..96b6b9c2cb 100644 --- a/target/riscv/cpu_vendorid.h +++ b/target/riscv/cpu_vendorid.h @@ -3,4 +3,8 @@ #define THEAD_VENDOR_ID 0x5b7 +#define VEYRON_V1_MARCHID 0x8000000000010000 +#define VEYRON_V1_MIMPID 0x111 +#define VEYRON_V1_MVENDORID 0x61f + #endif /* TARGET_RISCV_CPU_VENDORID_H */ diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 1b0a0c1693..4451bd1263 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -25,6 +25,7 @@ #include "time_helper.h" #include "qemu/main-loop.h" #include "exec/exec-all.h" +#include "exec/tb-flush.h" #include "sysemu/cpu-timers.h" #include "qemu/guest-random.h" #include "qapi/error.h" @@ -42,14 +43,11 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) /* Predicates */ #if !defined(CONFIG_USER_ONLY) -static RISCVException smstateen_acc_ok(CPURISCVState *env, int index, - uint64_t bit) +RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit) { - bool virt = riscv_cpu_virt_enabled(env); - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + bool virt = env->virt_enabled; - if (env->priv == PRV_M || !cpu->cfg.ext_smstateen) { + if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) { return RISCV_EXCP_NONE; } @@ -81,7 +79,7 @@ static RISCVException fs(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_fp_enabled(env) && - !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + !riscv_cpu_cfg(env)->ext_zfinx) { return RISCV_EXCP_ILLEGAL_INST; } #endif @@ -90,11 +88,7 @@ static RISCVException fs(CPURISCVState *env, int csrno) static RISCVException vs(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - - if (env->misa_ext & RVV || - cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { + if (riscv_cpu_cfg(env)->ext_zve32f) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_vector_enabled(env)) { return RISCV_EXCP_ILLEGAL_INST; @@ -108,8 +102,7 @@ static RISCVException vs(CPURISCVState *env, int csrno) static RISCVException ctr(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); int ctr_index; target_ulong ctr_mask; int base_csrno = CSR_CYCLE; @@ -134,11 +127,15 @@ static RISCVException ctr(CPURISCVState *env, int csrno) skip_ext_pmu_check: + if (env->debugger) { + return RISCV_EXCP_NONE; + } + if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) { return RISCV_EXCP_ILLEGAL_INST; } - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { if (!get_field(env->hcounteren, ctr_mask) || (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; @@ -163,11 +160,26 @@ static RISCVException ctr32(CPURISCVState *env, int csrno) return ctr(env, csrno); } +static RISCVException zcmt(CPURISCVState *env, int csrno) +{ + if (!riscv_cpu_cfg(env)->ext_zcmt) { + return RISCV_EXCP_ILLEGAL_INST; + } + +#if !defined(CONFIG_USER_ONLY) + RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT); + if (ret != RISCV_EXCP_NONE) { + return ret; + } +#endif + + return RISCV_EXCP_NONE; +} + #if !defined(CONFIG_USER_ONLY) static RISCVException mctr(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + int pmu_num = riscv_cpu_cfg(env)->pmu_num; int ctr_index; int base_csrno = CSR_MHPMCOUNTER3; @@ -176,8 +188,8 @@ static RISCVException mctr(CPURISCVState *env, int csrno) base_csrno += 0x80; } ctr_index = csrno - base_csrno; - if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) { - /* The PMU is not enabled or counter is out of range*/ + if (!pmu_num || ctr_index >= pmu_num) { + /* The PMU is not enabled or counter is out of range */ return RISCV_EXCP_ILLEGAL_INST; } @@ -195,10 +207,7 @@ static RISCVException mctr32(CPURISCVState *env, int csrno) static RISCVException sscofpmf(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - - if (!cpu->cfg.ext_sscofpmf) { + if (!riscv_cpu_cfg(env)->ext_sscofpmf) { return RISCV_EXCP_ILLEGAL_INST; } @@ -222,9 +231,7 @@ static RISCVException any32(CPURISCVState *env, int csrno) static int aia_any(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_smaia) { + if (!riscv_cpu_cfg(env)->ext_smaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -233,9 +240,7 @@ static int aia_any(CPURISCVState *env, int csrno) static int aia_any32(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_smaia) { + if (!riscv_cpu_cfg(env)->ext_smaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -262,9 +267,7 @@ static int smode32(CPURISCVState *env, int csrno) static int aia_smode(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -273,9 +276,7 @@ static int aia_smode(CPURISCVState *env, int csrno) static int aia_smode32(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -321,10 +322,7 @@ static RISCVException umode32(CPURISCVState *env, int csrno) static RISCVException mstateen(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - - if (!cpu->cfg.ext_smstateen) { + if (!riscv_cpu_cfg(env)->ext_smstateen) { return RISCV_EXCP_ILLEGAL_INST; } @@ -333,20 +331,26 @@ static RISCVException mstateen(CPURISCVState *env, int csrno) static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - - if (!cpu->cfg.ext_smstateen) { + if (!riscv_cpu_cfg(env)->ext_smstateen) { return RISCV_EXCP_ILLEGAL_INST; } + RISCVException ret = hmode(env, csrno); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (env->debugger) { + return RISCV_EXCP_NONE; + } + if (env->priv < PRV_M) { if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) { return RISCV_EXCP_ILLEGAL_INST; } } - return hmode(env, csrno); + return RISCV_EXCP_NONE; } static RISCVException hstateen(CPURISCVState *env, int csrno) @@ -361,15 +365,22 @@ static RISCVException hstateenh(CPURISCVState *env, int csrno) static RISCVException sstateen(CPURISCVState *env, int csrno) { - bool virt = riscv_cpu_virt_enabled(env); + bool virt = env->virt_enabled; int index = csrno - CSR_SSTATEEN0; - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - if (!cpu->cfg.ext_smstateen) { + if (!riscv_cpu_cfg(env)->ext_smstateen) { return RISCV_EXCP_ILLEGAL_INST; } + RISCVException ret = smode(env, csrno); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (env->debugger) { + return RISCV_EXCP_NONE; + } + if (env->priv < PRV_M) { if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) { return RISCV_EXCP_ILLEGAL_INST; @@ -382,9 +393,86 @@ static RISCVException sstateen(CPURISCVState *env, int csrno) } } + return RISCV_EXCP_NONE; +} + +static RISCVException sstc(CPURISCVState *env, int csrno) +{ + bool hmode_check = false; + + if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) { + hmode_check = true; + } + + RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (env->debugger) { + return RISCV_EXCP_NONE; + } + + if (env->priv == PRV_M) { + return RISCV_EXCP_NONE; + } + + /* + * No need of separate function for rv32 as menvcfg stores both menvcfg + * menvcfgh for RV32. + */ + if (!(get_field(env->mcounteren, COUNTEREN_TM) && + get_field(env->menvcfg, MENVCFG_STCE))) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if (env->virt_enabled) { + if (!(get_field(env->hcounteren, COUNTEREN_TM) && + get_field(env->henvcfg, HENVCFG_STCE))) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + } + + return RISCV_EXCP_NONE; +} + +static RISCVException sstc_32(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return sstc(env, csrno); +} + +static RISCVException satp(CPURISCVState *env, int csrno) +{ + if (env->priv == PRV_S && !env->virt_enabled && + get_field(env->mstatus, MSTATUS_TVM)) { + return RISCV_EXCP_ILLEGAL_INST; + } + if (env->priv == PRV_S && env->virt_enabled && + get_field(env->hstatus, HSTATUS_VTVM)) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + return smode(env, csrno); } +static RISCVException hgatp(CPURISCVState *env, int csrno) +{ + if (env->priv == PRV_S && !env->virt_enabled && + get_field(env->mstatus, MSTATUS_TVM)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return hmode(env, csrno); +} + /* Checks if PointerMasking registers could be accessed */ static RISCVException pointer_masking(CPURISCVState *env, int csrno) { @@ -397,9 +485,7 @@ static RISCVException pointer_masking(CPURISCVState *env, int csrno) static int aia_hmode(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -408,9 +494,7 @@ static int aia_hmode(CPURISCVState *env, int csrno) static int aia_hmode32(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -419,7 +503,16 @@ static int aia_hmode32(CPURISCVState *env, int csrno) static RISCVException pmp(CPURISCVState *env, int csrno) { - if (riscv_feature(env, RISCV_FEATURE_PMP)) { + if (riscv_cpu_cfg(env)->pmp) { + if (csrno <= CSR_PMPCFG3) { + uint32_t reg_index = csrno - CSR_PMPCFG0; + + /* TODO: RV128 restriction check */ + if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { + return RISCV_EXCP_ILLEGAL_INST; + } + } + return RISCV_EXCP_NONE; } @@ -428,7 +521,7 @@ static RISCVException pmp(CPURISCVState *env, int csrno) static RISCVException epmp(CPURISCVState *env, int csrno) { - if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) { + if (riscv_cpu_cfg(env)->epmp) { return RISCV_EXCP_NONE; } @@ -437,7 +530,7 @@ static RISCVException epmp(CPURISCVState *env, int csrno) static RISCVException debug(CPURISCVState *env, int csrno) { - if (riscv_feature(env, RISCV_FEATURE_DEBUG)) { + if (riscv_cpu_cfg(env)->debug) { return RISCV_EXCP_NONE; } @@ -447,13 +540,15 @@ static RISCVException debug(CPURISCVState *env, int csrno) static RISCVException seed(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_zkr) { + if (!riscv_cpu_cfg(env)->ext_zkr) { return RISCV_EXCP_ILLEGAL_INST; } #if !defined(CONFIG_USER_ONLY) + if (env->debugger) { + return RISCV_EXCP_NONE; + } + /* * With a CSR read-write instruction: * 1) The seed CSR is always available in machine mode as normal. @@ -465,7 +560,7 @@ static RISCVException seed(CPURISCVState *env, int csrno) */ if (env->priv == PRV_M) { return RISCV_EXCP_NONE; - } else if (riscv_cpu_virt_enabled(env)) { + } else if (env->virt_enabled) { if (env->mseccfg & MSECCFG_SSEED) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } else { @@ -572,7 +667,7 @@ static RISCVException read_vl(CPURISCVState *env, int csrno, static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env_archcpu(env)->cfg.vlen >> 3; + *val = riscv_cpu_cfg(env)->vlen >> 3; return RISCV_EXCP_NONE; } @@ -627,7 +722,7 @@ static RISCVException write_vstart(CPURISCVState *env, int csrno, * The vstart CSR is defined to have only enough writable bits * to hold the largest element index, i.e. lg2(VLEN) bits. */ - env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen)); + env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlen)); return RISCV_EXCP_NONE; } @@ -806,7 +901,7 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, counter.mhpmcounter_val; if (get_field(env->mcountinhibit, BIT(ctr_idx))) { - /** + /* * Counter should not increment if inhibit bit is set. We can't really * stop the icount counting. Just return the counter value written by * the supervisor to indicate that counter was not incremented. @@ -820,7 +915,7 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, } } - /** + /* * The kernel computes the perf delta by subtracting the current value from * the value it initialized previously (ctr_val). */ @@ -893,7 +988,7 @@ static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val) static RISCVException read_time(CPURISCVState *env, int csrno, target_ulong *val) { - uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; + uint64_t delta = env->virt_enabled ? env->htimedelta : 0; if (!env->rdtime_fn) { return RISCV_EXCP_ILLEGAL_INST; @@ -906,7 +1001,7 @@ static RISCVException read_time(CPURISCVState *env, int csrno, static RISCVException read_timeh(CPURISCVState *env, int csrno, target_ulong *val) { - uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; + uint64_t delta = env->virt_enabled ? env->htimedelta : 0; if (!env->rdtime_fn) { return RISCV_EXCP_ILLEGAL_INST; @@ -916,54 +1011,8 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException sstc(CPURISCVState *env, int csrno) -{ - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - bool hmode_check = false; - - if (!cpu->cfg.ext_sstc || !env->rdtime_fn) { - return RISCV_EXCP_ILLEGAL_INST; - } - - if (env->priv == PRV_M) { - return RISCV_EXCP_NONE; - } - - /* - * No need of separate function for rv32 as menvcfg stores both menvcfg - * menvcfgh for RV32. - */ - if (!(get_field(env->mcounteren, COUNTEREN_TM) && - get_field(env->menvcfg, MENVCFG_STCE))) { - return RISCV_EXCP_ILLEGAL_INST; - } - - if (riscv_cpu_virt_enabled(env)) { - if (!(get_field(env->hcounteren, COUNTEREN_TM) && - get_field(env->henvcfg, HENVCFG_STCE))) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - } - - if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) { - hmode_check = true; - } - - return hmode_check ? hmode(env, csrno) : smode(env, csrno); -} - -static RISCVException sstc_32(CPURISCVState *env, int csrno) -{ - if (riscv_cpu_mxl(env) != MXL_RV32) { - return RISCV_EXCP_ILLEGAL_INST; - } - - return sstc(env, csrno); -} - static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->vstimecmp; @@ -971,7 +1020,7 @@ static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, } static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->vstimecmp >> 32; @@ -979,29 +1028,25 @@ static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, } static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { - RISCVCPU *cpu = env_archcpu(env); - if (riscv_cpu_mxl(env) == MXL_RV32) { env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val); } else { env->vstimecmp = val; } - riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, + riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, env->htimedelta, MIP_VSTIP); return RISCV_EXCP_NONE; } static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { - RISCVCPU *cpu = env_archcpu(env); - env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val); - riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, + riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, env->htimedelta, MIP_VSTIP); return RISCV_EXCP_NONE; @@ -1010,7 +1055,7 @@ static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, static RISCVException read_stimecmp(CPURISCVState *env, int csrno, target_ulong *val) { - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { *val = env->vstimecmp; } else { *val = env->stimecmp; @@ -1020,9 +1065,9 @@ static RISCVException read_stimecmp(CPURISCVState *env, int csrno, } static RISCVException read_stimecmph(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { *val = env->vstimecmp >> 32; } else { *val = env->stimecmp >> 32; @@ -1032,11 +1077,9 @@ static RISCVException read_stimecmph(CPURISCVState *env, int csrno, } static RISCVException write_stimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { - RISCVCPU *cpu = env_archcpu(env); - - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { if (env->hvictl & HVICTL_VTI) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } @@ -1049,17 +1092,15 @@ static RISCVException write_stimecmp(CPURISCVState *env, int csrno, env->stimecmp = val; } - riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP); + riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP); return RISCV_EXCP_NONE; } static RISCVException write_stimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { - RISCVCPU *cpu = env_archcpu(env); - - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { if (env->hvictl & HVICTL_VTI) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } @@ -1067,7 +1108,7 @@ static RISCVException write_stimecmph(CPURISCVState *env, int csrno, } env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val); - riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP); + riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP); return RISCV_EXCP_NONE; } @@ -1120,19 +1161,20 @@ static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE | static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP | SIP_LCOFIP; static const target_ulong hip_writable_mask = MIP_VSSIP; -static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP; +static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | + MIP_VSEIP; static const target_ulong vsip_writable_mask = MIP_VSSIP; -static const char valid_vm_1_10_32[16] = { - [VM_1_10_MBARE] = 1, - [VM_1_10_SV32] = 1 +const bool valid_vm_1_10_32[16] = { + [VM_1_10_MBARE] = true, + [VM_1_10_SV32] = true }; -static const char valid_vm_1_10_64[16] = { - [VM_1_10_MBARE] = 1, - [VM_1_10_SV39] = 1, - [VM_1_10_SV48] = 1, - [VM_1_10_SV57] = 1 +const bool valid_vm_1_10_64[16] = { + [VM_1_10_MBARE] = true, + [VM_1_10_SV39] = true, + [VM_1_10_SV48] = true, + [VM_1_10_SV57] = true }; /* Machine Information Registers */ @@ -1152,30 +1194,21 @@ static RISCVException write_ignore(CPURISCVState *env, int csrno, static RISCVException read_mvendorid(CPURISCVState *env, int csrno, target_ulong *val) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - - *val = cpu->cfg.mvendorid; + *val = riscv_cpu_cfg(env)->mvendorid; return RISCV_EXCP_NONE; } static RISCVException read_marchid(CPURISCVState *env, int csrno, target_ulong *val) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - - *val = cpu->cfg.marchid; + *val = riscv_cpu_cfg(env)->marchid; return RISCV_EXCP_NONE; } static RISCVException read_mimpid(CPURISCVState *env, int csrno, target_ulong *val) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - - *val = cpu->cfg.mimpid; + *val = riscv_cpu_cfg(env)->mimpid; return RISCV_EXCP_NONE; } @@ -1215,13 +1248,36 @@ static RISCVException read_mstatus(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int validate_vm(CPURISCVState *env, target_ulong vm) +static bool validate_vm(CPURISCVState *env, target_ulong vm) { - if (riscv_cpu_mxl(env) == MXL_RV32) { - return valid_vm_1_10_32[vm & 0xf]; - } else { - return valid_vm_1_10_64[vm & 0xf]; + return (vm & 0xf) <= + satp_mode_max_from_map(riscv_cpu_cfg(env)->satp_mode.map); +} + +static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp, + target_ulong val) +{ + bool valid = false; + target_ulong new_mpp = get_field(val, MSTATUS_MPP); + + switch (new_mpp) { + case PRV_M: + valid = true; + break; + case PRV_S: + valid = riscv_has_ext(env, RVS); + break; + case PRV_U: + valid = riscv_has_ext(env, RVU); + break; } + + /* Remain field unchanged if new_mpp value is invalid */ + if (!valid) { + val = set_field(val, MSTATUS_MPP, old_mpp); + } + + return val; } static RISCVException write_mstatus(CPURISCVState *env, int csrno, @@ -1231,9 +1287,14 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, uint64_t mask = 0; RISCVMXL xl = riscv_cpu_mxl(env); + /* + * MPP field have been made WARL since priv version 1.11. However, + * legalization for it will not break any software running on 1.10. + */ + val = legalize_mpp(env, get_field(mstatus, MSTATUS_MPP), val); + /* flush tlb on mstatus fields that affect VM */ - if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV | - MSTATUS_MPRV | MSTATUS_SUM)) { + if ((val ^ mstatus) & MSTATUS_MXR) { tlb_flush(env_cpu(env)); } mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | @@ -1281,10 +1342,6 @@ static RISCVException write_mstatush(CPURISCVState *env, int csrno, uint64_t valh = (uint64_t)val << 32; uint64_t mask = MSTATUS_MPV | MSTATUS_GVA; - if ((valh ^ env->mstatus) & (MSTATUS_MPV)) { - tlb_flush(env_cpu(env)); - } - env->mstatus = (env->mstatus & ~mask) | (valh & mask); return RISCV_EXCP_NONE; @@ -1293,7 +1350,8 @@ static RISCVException write_mstatush(CPURISCVState *env, int csrno, static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno, Int128 *val) { - *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus)); + *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, + env->mstatus)); return RISCV_EXCP_NONE; } @@ -1329,7 +1387,7 @@ static RISCVException read_misa(CPURISCVState *env, int csrno, static RISCVException write_misa(CPURISCVState *env, int csrno, target_ulong val) { - if (!riscv_feature(env, RISCV_FEATURE_MISA)) { + if (!riscv_cpu_cfg(env)->misa_w) { /* drop write to misa */ return RISCV_EXCP_NONE; } @@ -1342,7 +1400,8 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, /* 'E' excludes all other extensions */ if (val & RVE) { - /* when we support 'E' we can do "val = RVE;" however + /* + * when we support 'E' we can do "val = RVE;" however * for now we just drop writes if 'E' is present. */ return RISCV_EXCP_NONE; @@ -1356,15 +1415,13 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, /* Mask extensions that are not supported by this hart */ val &= env->misa_ext_mask; - /* Mask extensions that are not supported by QEMU */ - val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV); - /* 'D' depends on 'F', so clear 'D' if 'F' is not present */ if ((val & RVD) && !(val & RVF)) { val &= ~RVD; } - /* Suppress 'C' if next instruction is not aligned + /* + * Suppress 'C' if next instruction is not aligned * TODO: this should check next_pc */ if ((val & RVC) && (GETPC() & ~3) != 0) { @@ -1526,7 +1583,7 @@ static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val) static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno) { - if (!riscv_cpu_virt_enabled(env)) { + if (!env->virt_enabled) { return csrno; } @@ -1683,7 +1740,7 @@ static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val, done: if (ret) { - return (riscv_cpu_virt_enabled(env) && virt) ? + return (env->virt_enabled && virt) ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; } return RISCV_EXCP_NONE; @@ -1737,7 +1794,7 @@ static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val, done: if (ret) { - return (riscv_cpu_virt_enabled(env) && virt) ? + return (env->virt_enabled && virt) ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; } return RISCV_EXCP_NONE; @@ -1833,28 +1890,28 @@ static RISCVException write_mscratch(CPURISCVState *env, int csrno, } static RISCVException read_mepc(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->mepc; return RISCV_EXCP_NONE; } static RISCVException write_mepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { env->mepc = val; return RISCV_EXCP_NONE; } static RISCVException read_mcause(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->mcause; return RISCV_EXCP_NONE; } static RISCVException write_mcause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { env->mcause = val; return RISCV_EXCP_NONE; @@ -1876,19 +1933,22 @@ static RISCVException write_mtval(CPURISCVState *env, int csrno, /* Execution environment configuration setup */ static RISCVException read_menvcfg(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->menvcfg; return RISCV_EXCP_NONE; } static RISCVException write_menvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { + const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE; if (riscv_cpu_mxl(env) == MXL_RV64) { - mask |= MENVCFG_PBMTE | MENVCFG_STCE; + mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | + (cfg->ext_sstc ? MENVCFG_STCE : 0) | + (cfg->ext_svadu ? MENVCFG_HADE : 0); } env->menvcfg = (env->menvcfg & ~mask) | (val & mask); @@ -1896,16 +1956,19 @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno, } static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->menvcfg >> 32; return RISCV_EXCP_NONE; } static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { - uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE; + const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); + uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | + (cfg->ext_sstc ? MENVCFG_STCE : 0) | + (cfg->ext_svadu ? MENVCFG_HADE : 0); uint64_t valh = (uint64_t)val << 32; env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); @@ -1914,7 +1977,7 @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, } static RISCVException read_senvcfg(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { RISCVException ret; @@ -1928,7 +1991,7 @@ static RISCVException read_senvcfg(CPURISCVState *env, int csrno, } static RISCVException write_senvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; RISCVException ret; @@ -1943,7 +2006,7 @@ static RISCVException write_senvcfg(CPURISCVState *env, int csrno, } static RISCVException read_henvcfg(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { RISCVException ret; @@ -1952,12 +2015,18 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno, return ret; } - *val = env->henvcfg; + /* + * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0 + * henvcfg.stce is read_only 0 when menvcfg.stce = 0 + * henvcfg.hade is read_only 0 when menvcfg.hade = 0 + */ + *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) | + env->menvcfg); return RISCV_EXCP_NONE; } static RISCVException write_henvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; RISCVException ret; @@ -1968,7 +2037,7 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, } if (riscv_cpu_mxl(env) == MXL_RV64) { - mask |= HENVCFG_PBMTE | HENVCFG_STCE; + mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE); } env->henvcfg = (env->henvcfg & ~mask) | (val & mask); @@ -1977,7 +2046,7 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, } static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { RISCVException ret; @@ -1986,14 +2055,16 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, return ret; } - *val = env->henvcfg >> 32; + *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) | + env->menvcfg)) >> 32; return RISCV_EXCP_NONE; } static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { - uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE; + uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | + HENVCFG_HADE); uint64_t valh = (uint64_t)val << 32; RISCVException ret; @@ -2034,13 +2105,13 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno, } static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } static RISCVException read_mstateenh(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32; @@ -2061,7 +2132,7 @@ static RISCVException write_mstateenh(CPURISCVState *env, int csrno, } static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -2069,7 +2140,7 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, } static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2106,7 +2177,7 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno, } static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2145,7 +2216,7 @@ static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, } static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2153,7 +2224,7 @@ static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno, static RISCVException read_sstateen(CPURISCVState *env, int csrno, target_ulong *val) { - bool virt = riscv_cpu_virt_enabled(env); + bool virt = env->virt_enabled; int index = csrno - CSR_SSTATEEN0; *val = env->sstateen[index] & env->mstateen[index]; @@ -2167,7 +2238,7 @@ static RISCVException read_sstateen(CPURISCVState *env, int csrno, static RISCVException write_sstateen(CPURISCVState *env, int csrno, uint64_t mask, target_ulong new_val) { - bool virt = riscv_cpu_virt_enabled(env); + bool virt = env->virt_enabled; int index = csrno - CSR_SSTATEEN0; uint64_t wr_mask; uint64_t *reg; @@ -2201,7 +2272,6 @@ static RISCVException rmw_mip64(CPURISCVState *env, int csrno, uint64_t *ret_val, uint64_t new_val, uint64_t wr_mask) { - RISCVCPU *cpu = env_archcpu(env); uint64_t old_mip, mask = wr_mask & delegable_ints; uint32_t gin; @@ -2210,14 +2280,14 @@ static RISCVException rmw_mip64(CPURISCVState *env, int csrno, new_val |= env->external_seip * MIP_SEIP; } - if (cpu->cfg.ext_sstc && (env->priv == PRV_M) && + if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) && get_field(env->menvcfg, MENVCFG_STCE)) { /* sstc extension forbids STIP & VSTIP to be writeable in mip */ mask = mask & ~(MIP_STIP | MIP_VSTIP); } if (mask) { - old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask)); + old_mip = riscv_cpu_update_mip(env, mask, (new_val & mask)); } else { old_mip = env->mip; } @@ -2363,7 +2433,7 @@ static RISCVException rmw_sie64(CPURISCVState *env, int csrno, RISCVException ret; uint64_t mask = env->mideleg & S_MODE_INTERRUPTS; - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { if (env->hvictl & HVICTL_VTI) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } @@ -2573,7 +2643,7 @@ static RISCVException rmw_sip64(CPURISCVState *env, int csrno, RISCVException ret; uint64_t mask = env->mideleg & sip_writable_mask; - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { if (env->hvictl & HVICTL_VTI) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } @@ -2624,26 +2694,21 @@ static RISCVException rmw_siph(CPURISCVState *env, int csrno, static RISCVException read_satp(CPURISCVState *env, int csrno, target_ulong *val) { - if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + if (!riscv_cpu_cfg(env)->mmu) { *val = 0; return RISCV_EXCP_NONE; } - - if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { - return RISCV_EXCP_ILLEGAL_INST; - } else { - *val = env->satp; - } - + *val = env->satp; return RISCV_EXCP_NONE; } static RISCVException write_satp(CPURISCVState *env, int csrno, target_ulong val) { - target_ulong vm, mask; + target_ulong mask; + bool vm; - if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + if (!riscv_cpu_cfg(env)->mmu) { return RISCV_EXCP_NONE; } @@ -2656,18 +2721,14 @@ static RISCVException write_satp(CPURISCVState *env, int csrno, } if (vm && mask) { - if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { - return RISCV_EXCP_ILLEGAL_INST; - } else { - /* - * The ISA defines SATP.MODE=Bare as "no translation", but we still - * pass these through QEMU's TLB emulation as it improves - * performance. Flushing the TLB on SATP writes with paging - * enabled avoids leaking those invalid cached mappings. - */ - tlb_flush(env_cpu(env)); - env->satp = val; - } + /* + * The ISA defines SATP.MODE=Bare as "no translation", but we still + * pass these through QEMU's TLB emulation as it improves + * performance. Flushing the TLB on SATP writes with paging + * enabled avoids leaking those invalid cached mappings. + */ + tlb_flush(env_cpu(env)); + env->satp = val; } return RISCV_EXCP_NONE; } @@ -2765,7 +2826,7 @@ static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val) int irq; uint8_t iprio; - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { return read_vstopi(env, CSR_VSTOPI, val); } @@ -2805,7 +2866,8 @@ static RISCVException write_hstatus(CPURISCVState *env, int csrno, { env->hstatus = val; if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) { - qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options."); + qemu_log_mask(LOG_UNIMP, + "QEMU does not support mixed HSXLEN options."); } if (get_field(val, HSTATUS_VSBE) != 0) { qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests."); @@ -2976,7 +3038,7 @@ static RISCVException write_hgeie(CPURISCVState *env, int csrno, val &= ((((target_ulong)1) << env->geilen) - 1) << 1; env->hgeie = val; /* Update mip.SGEIP bit */ - riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP, + riscv_cpu_update_mip(env, MIP_SGEIP, BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); return RISCV_EXCP_NONE; } @@ -3045,8 +3107,6 @@ static RISCVException read_htimedelta(CPURISCVState *env, int csrno, static RISCVException write_htimedelta(CPURISCVState *env, int csrno, target_ulong val) { - RISCVCPU *cpu = env_archcpu(env); - if (!env->rdtime_fn) { return RISCV_EXCP_ILLEGAL_INST; } @@ -3057,8 +3117,8 @@ static RISCVException write_htimedelta(CPURISCVState *env, int csrno, env->htimedelta = val; } - if (cpu->cfg.ext_sstc && env->rdtime_fn) { - riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, + if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) { + riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, env->htimedelta, MIP_VSTIP); } @@ -3079,16 +3139,14 @@ static RISCVException read_htimedeltah(CPURISCVState *env, int csrno, static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, target_ulong val) { - RISCVCPU *cpu = env_archcpu(env); - if (!env->rdtime_fn) { return RISCV_EXCP_ILLEGAL_INST; } env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val); - if (cpu->cfg.ext_sstc && env->rdtime_fn) { - riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, + if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) { + riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, env->htimedelta, MIP_VSTIP); } @@ -3114,7 +3172,7 @@ static int read_hvipriox(CPURISCVState *env, int first_index, /* First index has to be a multiple of number of irqs per register */ if (first_index % num_irqs) { - return (riscv_cpu_virt_enabled(env)) ? + return (env->virt_enabled) ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; } @@ -3140,7 +3198,7 @@ static int write_hvipriox(CPURISCVState *env, int first_index, /* First index has to be a multiple of number of irqs per register */ if (first_index % num_irqs) { - return (riscv_cpu_virt_enabled(env)) ? + return (env->virt_enabled) ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; } @@ -3338,30 +3396,18 @@ static RISCVException read_mseccfg(CPURISCVState *env, int csrno, } static RISCVException write_mseccfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { mseccfg_csr_write(env, val); return RISCV_EXCP_NONE; } -static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index) -{ - /* TODO: RV128 restriction check */ - if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { - return false; - } - return true; -} - static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, target_ulong *val) { uint32_t reg_index = csrno - CSR_PMPCFG0; - if (!check_pmp_reg_index(env, reg_index)) { - return RISCV_EXCP_ILLEGAL_INST; - } - *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); + *val = pmpcfg_csr_read(env, reg_index); return RISCV_EXCP_NONE; } @@ -3370,10 +3416,7 @@ static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, { uint32_t reg_index = csrno - CSR_PMPCFG0; - if (!check_pmp_reg_index(env, reg_index)) { - return RISCV_EXCP_ILLEGAL_INST; - } - pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val); + pmpcfg_csr_write(env, reg_index, val); return RISCV_EXCP_NONE; } @@ -3491,16 +3534,16 @@ static RISCVException write_mmte(CPURISCVState *env, int csrno, target_ulong wpri_val = val & MMTE_MASK; if (val != wpri_val) { - qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", - "MMTE: WPRI violation written 0x", val, - "vs expected 0x", wpri_val); + qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" + TARGET_FMT_lx "\n", "MMTE: WPRI violation written 0x", + val, "vs expected 0x", wpri_val); } /* for machine mode pm.current is hardwired to 1 */ wpri_val |= MMTE_M_PM_CURRENT; /* hardwiring pm.instruction bit to 0, since it's not supported yet */ wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN); - env->mmte = wpri_val | PM_EXT_DIRTY; + env->mmte = wpri_val | EXT_STATUS_DIRTY; riscv_cpu_update_mask(env); /* Set XS and SD bits, since PM CSRs are dirty */ @@ -3522,9 +3565,9 @@ static RISCVException write_smte(CPURISCVState *env, int csrno, target_ulong wpri_val = val & SMTE_MASK; if (val != wpri_val) { - qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", - "SMTE: WPRI violation written 0x", val, - "vs expected 0x", wpri_val); + qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" + TARGET_FMT_lx "\n", "SMTE: WPRI violation written 0x", + val, "vs expected 0x", wpri_val); } /* if pm.current==0 we can't modify current PM CSRs */ @@ -3550,9 +3593,9 @@ static RISCVException write_umte(CPURISCVState *env, int csrno, target_ulong wpri_val = val & UMTE_MASK; if (val != wpri_val) { - qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", - "UMTE: WPRI violation written 0x", val, - "vs expected 0x", wpri_val); + qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" + TARGET_FMT_lx "\n", "UMTE: WPRI violation written 0x", + val, "vs expected 0x", wpri_val); } if (check_pm_current_disabled(env, csrno)) { @@ -3580,7 +3623,7 @@ static RISCVException write_mpmmask(CPURISCVState *env, int csrno, if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) { env->cur_pmmask = val; } - env->mmte |= PM_EXT_DIRTY; + env->mmte |= EXT_STATUS_DIRTY; /* Set XS and SD bits, since PM CSRs are dirty */ mstatus = env->mstatus | MSTATUS_XS; @@ -3608,7 +3651,7 @@ static RISCVException write_spmmask(CPURISCVState *env, int csrno, if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) { env->cur_pmmask = val; } - env->mmte |= PM_EXT_DIRTY; + env->mmte |= EXT_STATUS_DIRTY; /* Set XS and SD bits, since PM CSRs are dirty */ mstatus = env->mstatus | MSTATUS_XS; @@ -3636,7 +3679,7 @@ static RISCVException write_upmmask(CPURISCVState *env, int csrno, if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) { env->cur_pmmask = val; } - env->mmte |= PM_EXT_DIRTY; + env->mmte |= EXT_STATUS_DIRTY; /* Set XS and SD bits, since PM CSRs are dirty */ mstatus = env->mstatus | MSTATUS_XS; @@ -3660,7 +3703,7 @@ static RISCVException write_mpmbase(CPURISCVState *env, int csrno, if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) { env->cur_pmbase = val; } - env->mmte |= PM_EXT_DIRTY; + env->mmte |= EXT_STATUS_DIRTY; /* Set XS and SD bits, since PM CSRs are dirty */ mstatus = env->mstatus | MSTATUS_XS; @@ -3688,7 +3731,7 @@ static RISCVException write_spmbase(CPURISCVState *env, int csrno, if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) { env->cur_pmbase = val; } - env->mmte |= PM_EXT_DIRTY; + env->mmte |= EXT_STATUS_DIRTY; /* Set XS and SD bits, since PM CSRs are dirty */ mstatus = env->mstatus | MSTATUS_XS; @@ -3716,7 +3759,7 @@ static RISCVException write_upmbase(CPURISCVState *env, int csrno, if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) { env->cur_pmbase = val; } - env->mmte |= PM_EXT_DIRTY; + env->mmte |= EXT_STATUS_DIRTY; /* Set XS and SD bits, since PM CSRs are dirty */ mstatus = env->mstatus | MSTATUS_XS; @@ -3772,31 +3815,39 @@ static RISCVException rmw_seed(CPURISCVState *env, int csrno, static inline RISCVException riscv_csrrw_check(CPURISCVState *env, int csrno, - bool write_mask, - RISCVCPU *cpu) + bool write_mask) { /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */ - int read_only = get_field(csrno, 0xC00) == 3; + bool read_only = get_field(csrno, 0xC00) == 3; int csr_min_priv = csr_ops[csrno].min_priv_ver; - /* ensure the CSR extension is enabled. */ - if (!cpu->cfg.ext_icsr) { + /* ensure the CSR extension is enabled */ + if (!riscv_cpu_cfg(env)->ext_icsr) { return RISCV_EXCP_ILLEGAL_INST; } - if (env->priv_ver < csr_min_priv) { - return RISCV_EXCP_ILLEGAL_INST; - } - - /* check predicate */ + /* ensure CSR is implemented by checking predicate */ if (!csr_ops[csrno].predicate) { return RISCV_EXCP_ILLEGAL_INST; } + /* privileged spec version check */ + if (env->priv_ver < csr_min_priv) { + return RISCV_EXCP_ILLEGAL_INST; + } + + /* read / write check */ if (write_mask && read_only) { return RISCV_EXCP_ILLEGAL_INST; } + /* + * The predicate() not only does existence check but also does some + * access control check which triggers for example virtual instruction + * exception in some cases. When writing read-only CSRs in those cases + * illegal instruction exception should be triggered instead of virtual + * instruction exception. Hence this comes after the read / write check. + */ RISCVException ret = csr_ops[csrno].predicate(env, csrno); if (ret != RISCV_EXCP_NONE) { return ret; @@ -3806,7 +3857,7 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env, int csr_priv, effective_priv = env->priv; if (riscv_has_ext(env, RVH) && env->priv == PRV_S && - !riscv_cpu_virt_enabled(env)) { + !env->virt_enabled) { /* * We are in HS mode. Add 1 to the effective privledge level to * allow us to access the Hypervisor CSRs. @@ -3816,7 +3867,7 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env, csr_priv = get_field(csrno, 0x300); if (!env->debugger && (effective_priv < csr_priv)) { - if (csr_priv == (PRV_S + 1) && riscv_cpu_virt_enabled(env)) { + if (csr_priv == (PRV_S + 1) && env->virt_enabled) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } return RISCV_EXCP_ILLEGAL_INST; @@ -3871,9 +3922,7 @@ RISCVException riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask) { - RISCVCPU *cpu = env_archcpu(env); - - RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu); + RISCVException ret = riscv_csrrw_check(env, csrno, write_mask); if (ret != RISCV_EXCP_NONE) { return ret; } @@ -3926,9 +3975,8 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, Int128 new_value, Int128 write_mask) { RISCVException ret; - RISCVCPU *cpu = env_archcpu(env); - ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu); + ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask)); if (ret != RISCV_EXCP_NONE) { return ret; } @@ -3941,7 +3989,8 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, * Fall back to 64-bit version for now, if the 128-bit alternative isn't * at all defined. * Note, some CSRs don't need to extend to MXLEN (64 upper bits non - * significant), for those, this fallback is correctly handling the accesses + * significant), for those, this fallback is correctly handling the + * accesses */ target_ulong old_value; ret = riscv_csrrw_do64(env, csrno, &old_value, @@ -3973,7 +4022,24 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, return ret; } -/* Control and Status Register function table */ +static RISCVException read_jvt(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->jvt; + return RISCV_EXCP_NONE; +} + +static RISCVException write_jvt(CPURISCVState *env, int csrno, + target_ulong val) +{ + env->jvt = val; + return RISCV_EXCP_NONE; +} + +/* + * Control and Status Register function table + * riscv_csr_operations::predicate() must be provided for an implemented CSR + */ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { /* User Floating-Point CSRs */ [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags }, @@ -4003,6 +4069,9 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { /* Crypto Extension */ [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed }, + /* Zcmt Extension */ + [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt}, + #if !defined(CONFIG_USER_ONLY) /* Machine Timers and Counters */ [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter, @@ -4137,11 +4206,11 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { /* Supervisor Trap Setup */ [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, - NULL, read_sstatus_i128 }, - [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie }, - [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec }, + NULL, read_sstatus_i128 }, + [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie }, + [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec }, [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, - write_scounteren }, + write_scounteren }, /* Supervisor Trap Handling */ [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, @@ -4162,7 +4231,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { .min_priv_ver = PRIV_VERSION_1_12_0 }, /* Supervisor Protection and Translation */ - [CSR_SATP] = { "satp", smode, read_satp, write_satp }, + [CSR_SATP] = { "satp", satp, read_satp, write_satp }, /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect }, @@ -4199,7 +4268,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { .min_priv_ver = PRIV_VERSION_1_12_0 }, [CSR_HGEIP] = { "hgeip", hmode, read_hgeip, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp, + [CSR_HGATP] = { "hgatp", hgatp, read_hgatp, write_hgatp, .min_priv_ver = PRIV_VERSION_1_12_0 }, [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, write_htimedelta, diff --git a/target/riscv/debug.c b/target/riscv/debug.c index b091293069..75ee1c4971 100644 --- a/target/riscv/debug.c +++ b/target/riscv/debug.c @@ -282,8 +282,8 @@ static target_ulong type2_mcontrol_validate(CPURISCVState *env, /* validate size encoding */ size = type2_breakpoint_size(env, ctrl); if (access_size[size] == -1) { - qemu_log_mask(LOG_UNIMP, "access size %d is not supported, using SIZE_ANY\n", - size); + qemu_log_mask(LOG_UNIMP, "access size %d is not supported, using " + "SIZE_ANY\n", size); } else { val |= (ctrl & TYPE2_SIZELO); if (riscv_cpu_mxl(env) == MXL_RV64) { @@ -411,8 +411,8 @@ static target_ulong type6_mcontrol6_validate(CPURISCVState *env, /* validate size encoding */ size = extract32(ctrl, 16, 4); if (access_size[size] == -1) { - qemu_log_mask(LOG_UNIMP, "access size %d is not supported, using SIZE_ANY\n", - size); + qemu_log_mask(LOG_UNIMP, "access size %d is not supported, using " + "SIZE_ANY\n", size); } else { val |= (ctrl & TYPE6_SIZE); } @@ -515,7 +515,7 @@ itrigger_set_count(CPURISCVState *env, int index, int value) static bool check_itrigger_priv(CPURISCVState *env, int index) { target_ulong tdata1 = env->tdata1[index]; - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { /* check VU/VS bit against current privilege level */ return (get_field(tdata1, ITRIGGER_VS) == env->priv) || (get_field(tdata1, ITRIGGER_VU) == env->priv); @@ -696,7 +696,8 @@ target_ulong tdata_csr_read(CPURISCVState *env, int tdata_index) int trigger_type; switch (tdata_index) { case TDATA1: - trigger_type = extract_trigger_type(env, env->tdata1[env->trigger_cur]); + trigger_type = extract_trigger_type(env, + env->tdata1[env->trigger_cur]); if ((trigger_type == TRIGGER_TYPE_INST_CNT) && icount_enabled()) { return deposit64(env->tdata1[env->trigger_cur], 10, 14, itrigger_get_adjust_count(env)); @@ -787,7 +788,7 @@ bool riscv_cpu_debug_check_breakpoint(CPUState *cs) switch (trigger_type) { case TRIGGER_TYPE_AD_MATCH: /* type 2 trigger cannot be fired in VU/VS mode */ - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { return false; } @@ -806,7 +807,7 @@ bool riscv_cpu_debug_check_breakpoint(CPUState *cs) pc = env->tdata2[i]; if ((ctrl & TYPE6_EXEC) && (bp->pc == pc)) { - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { /* check VU/VS bit against current privilege level */ if ((ctrl >> 23) & BIT(env->priv)) { return true; @@ -845,7 +846,7 @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) switch (trigger_type) { case TRIGGER_TYPE_AD_MATCH: /* type 2 trigger cannot be fired in VU/VS mode */ - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { return false; } @@ -880,7 +881,7 @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) } if ((wp->flags & flags) && (wp->vaddr == addr)) { - if (riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { /* check VU/VS bit against current privilege level */ if ((ctrl >> 23) & BIT(env->priv)) { return true; diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c index 449d236df6..5dd14d8390 100644 --- a/target/riscv/fpu_helper.c +++ b/target/riscv/fpu_helper.c @@ -248,8 +248,8 @@ uint64_t helper_fmin_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) float32 frs1 = check_nanbox_s(env, rs1); float32 frs2 = check_nanbox_s(env, rs2); return nanbox_s(env, env->priv_ver < PRIV_VERSION_1_11_0 ? - float32_minnum(frs1, frs2, &env->fp_status) : - float32_minimum_number(frs1, frs2, &env->fp_status)); + float32_minnum(frs1, frs2, &env->fp_status) : + float32_minimum_number(frs1, frs2, &env->fp_status)); } uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) @@ -257,8 +257,8 @@ uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) float32 frs1 = check_nanbox_s(env, rs1); float32 frs2 = check_nanbox_s(env, rs2); return nanbox_s(env, env->priv_ver < PRIV_VERSION_1_11_0 ? - float32_maxnum(frs1, frs2, &env->fp_status) : - float32_maximum_number(frs1, frs2, &env->fp_status)); + float32_maxnum(frs1, frs2, &env->fp_status) : + float32_maximum_number(frs1, frs2, &env->fp_status)); } uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1) @@ -361,15 +361,15 @@ uint64_t helper_fdiv_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return env->priv_ver < PRIV_VERSION_1_11_0 ? - float64_minnum(frs1, frs2, &env->fp_status) : - float64_minimum_number(frs1, frs2, &env->fp_status); + float64_minnum(frs1, frs2, &env->fp_status) : + float64_minimum_number(frs1, frs2, &env->fp_status); } uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { return env->priv_ver < PRIV_VERSION_1_11_0 ? - float64_maxnum(frs1, frs2, &env->fp_status) : - float64_maximum_number(frs1, frs2, &env->fp_status); + float64_maxnum(frs1, frs2, &env->fp_status) : + float64_maximum_number(frs1, frs2, &env->fp_status); } uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1) @@ -481,8 +481,8 @@ uint64_t helper_fmin_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) float16 frs1 = check_nanbox_h(env, rs1); float16 frs2 = check_nanbox_h(env, rs2); return nanbox_h(env, env->priv_ver < PRIV_VERSION_1_11_0 ? - float16_minnum(frs1, frs2, &env->fp_status) : - float16_minimum_number(frs1, frs2, &env->fp_status)); + float16_minnum(frs1, frs2, &env->fp_status) : + float16_minimum_number(frs1, frs2, &env->fp_status)); } uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) @@ -490,8 +490,8 @@ uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2) float16 frs1 = check_nanbox_h(env, rs1); float16 frs2 = check_nanbox_h(env, rs2); return nanbox_h(env, env->priv_ver < PRIV_VERSION_1_11_0 ? - float16_maxnum(frs1, frs2, &env->fp_status) : - float16_maximum_number(frs1, frs2, &env->fp_status)); + float16_maxnum(frs1, frs2, &env->fp_status) : + float16_maximum_number(frs1, frs2, &env->fp_status)); } uint64_t helper_fsqrt_h(CPURISCVState *env, uint64_t rs1) diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index 6e7bbdbd5e..524bede865 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -18,6 +18,7 @@ #include "qemu/osdep.h" #include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "cpu.h" struct TypeSize { @@ -127,43 +128,9 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n) return 0; } -/* - * Convert register index number passed by GDB to the correspond - * vector CSR number. Vector CSRs are defined after vector registers - * in dynamic generated riscv-vector.xml, thus the starting register index - * of vector CSRs is 32. - * Return 0 if register index number is out of range. - */ -static int riscv_gdb_vector_csrno(int num_regs) -{ - /* - * The order of vector CSRs in the switch case - * should match with the order defined in csr_ops[]. - */ - switch (num_regs) { - case 32: - return CSR_VSTART; - case 33: - return CSR_VXSAT; - case 34: - return CSR_VXRM; - case 35: - return CSR_VCSR; - case 36: - return CSR_VL; - case 37: - return CSR_VTYPE; - case 38: - return CSR_VLENB; - default: - /* Unknown register. */ - return 0; - } -} - static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) { - uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + uint16_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; if (n < 32) { int i; int cnt = 0; @@ -174,25 +141,12 @@ static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) return cnt; } - int csrno = riscv_gdb_vector_csrno(n); - - if (!csrno) { - return 0; - } - - target_ulong val = 0; - int result = riscv_csrrw_debug(env, csrno, &val, 0, 0); - - if (result == RISCV_EXCP_NONE) { - return gdb_get_regl(buf, val); - } - return 0; } static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n) { - uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + uint16_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; if (n < 32) { int i; for (i = 0; i < vlenb; i += 8) { @@ -201,19 +155,6 @@ static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n) return vlenb; } - int csrno = riscv_gdb_vector_csrno(n); - - if (!csrno) { - return 0; - } - - target_ulong val = ldtul_p(mem_buf); - int result = riscv_csrrw_debug(env, csrno, NULL, val, -1); - - if (result == RISCV_EXCP_NONE) { - return sizeof(target_ulong); - } - return 0; } @@ -262,7 +203,7 @@ static int riscv_gdb_set_virtual(CPURISCVState *cs, uint8_t *mem_buf, int n) if (n == 0) { #ifndef CONFIG_USER_ONLY cs->priv = ldtul_p(mem_buf) & 0x3; - if (cs->priv == PRV_H) { + if (cs->priv == PRV_RESERVED) { cs->priv = PRV_S; } #endif @@ -280,6 +221,10 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) int bitsize = 16 << env->misa_mxl_max; int i; +#if !defined(CONFIG_USER_ONLY) + env->debugger = true; +#endif + /* Until gdb knows about 128-bit registers */ if (bitsize > 64) { bitsize = 64; @@ -290,6 +235,9 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) g_string_append_printf(s, ""); for (i = 0; i < CSR_TABLE_SIZE; i++) { + if (env->priv_ver < csr_ops[i].min_priv_ver) { + continue; + } predicate = csr_ops[i].predicate; if (predicate && (predicate(env, i) == RISCV_EXCP_NONE)) { if (csr_ops[i].name) { @@ -305,6 +253,11 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) g_string_append_printf(s, ""); cpu->dyn_csr_xml = g_string_free(s, false); + +#if !defined(CONFIG_USER_ONLY) + env->debugger = false; +#endif + return CSR_TABLE_SIZE; } @@ -349,21 +302,6 @@ static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg) num_regs++; } - /* Define vector CSRs */ - const char *vector_csrs[7] = { - "vstart", "vxsat", "vxrm", "vcsr", - "vl", "vtype", "vlenb" - }; - - for (i = 0; i < 7; i++) { - g_string_append_printf(s, - "", - vector_csrs[i], TARGET_LONG_BITS, base_reg++); - num_regs++; - } - g_string_append_printf(s, ""); cpu->dyn_vreg_xml = g_string_free(s, false); @@ -382,9 +320,10 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) 32, "riscv-32bit-fpu.xml", 0); } if (env->misa_ext & RVV) { - gdb_register_coprocessor(cs, riscv_gdb_get_vector, riscv_gdb_set_vector, - ricsv_gen_dynamic_vector_xml(cs, - cs->gdb_num_regs), + int base_reg = cs->gdb_num_regs; + gdb_register_coprocessor(cs, riscv_gdb_get_vector, + riscv_gdb_set_vector, + ricsv_gen_dynamic_vector_xml(cs, base_reg), "riscv-vector.xml", 0); } switch (env->misa_mxl_max) { @@ -403,7 +342,10 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) g_assert_not_reached(); } - gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr, - riscv_gen_dynamic_csr_xml(cs, cs->gdb_num_regs), - "riscv-csr.xml", 0); + if (cpu->cfg.ext_icsr) { + int base_reg = cs->gdb_num_regs; + gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr, + riscv_gen_dynamic_csr_xml(cs, base_reg), + "riscv-csr.xml", 0); + } } diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 0497370afd..98e97810fd 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -97,6 +97,11 @@ DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fclass_h, TCG_CALL_NO_RWG_SE, tl, env, i64) +/* Cache-block operations */ +DEF_HELPER_2(cbo_clean_flush, void, env, tl) +DEF_HELPER_2(cbo_inval, void, env, tl) +DEF_HELPER_2(cbo_zero, void, env, tl) + /* Special functions */ DEF_HELPER_2(csrr, tl, env, int) DEF_HELPER_3(csrw, void, env, int, tl) @@ -118,8 +123,16 @@ DEF_HELPER_1(itrigger_match, void, env) #ifndef CONFIG_USER_ONLY DEF_HELPER_1(hyp_tlb_flush, void, env) DEF_HELPER_1(hyp_gvma_tlb_flush, void, env) -DEF_HELPER_2(hyp_hlvx_hu, tl, env, tl) -DEF_HELPER_2(hyp_hlvx_wu, tl, env, tl) +DEF_HELPER_FLAGS_2(hyp_hlv_bu, TCG_CALL_NO_WG, tl, env, tl) +DEF_HELPER_FLAGS_2(hyp_hlv_hu, TCG_CALL_NO_WG, tl, env, tl) +DEF_HELPER_FLAGS_2(hyp_hlv_wu, TCG_CALL_NO_WG, tl, env, tl) +DEF_HELPER_FLAGS_2(hyp_hlv_d, TCG_CALL_NO_WG, tl, env, tl) +DEF_HELPER_FLAGS_2(hyp_hlvx_hu, TCG_CALL_NO_WG, tl, env, tl) +DEF_HELPER_FLAGS_2(hyp_hlvx_wu, TCG_CALL_NO_WG, tl, env, tl) +DEF_HELPER_FLAGS_3(hyp_hsv_b, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(hyp_hsv_h, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(hyp_hsv_w, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(hyp_hsv_d, TCG_CALL_NO_WG, void, env, tl, tl) #endif /* Vector functions */ @@ -1137,3 +1150,6 @@ DEF_HELPER_FLAGS_1(aes64im, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_3(sm4ed, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) DEF_HELPER_FLAGS_3(sm4ks, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) + +/* Zce helper */ +DEF_HELPER_FLAGS_2(cm_jalt, TCG_CALL_NO_WG, tl, env, i32) diff --git a/target/riscv/insn16.decode b/target/riscv/insn16.decode index ccfe59f294..b96c534e73 100644 --- a/target/riscv/insn16.decode +++ b/target/riscv/insn16.decode @@ -21,6 +21,8 @@ %rs1_3 7:3 !function=ex_rvc_register %rs2_3 2:3 !function=ex_rvc_register %rs2_5 2:5 +%r1s 7:3 !function=ex_sreg_register +%r2s 2:3 !function=ex_sreg_register # Immediates: %imm_ci 12:s1 2:5 @@ -43,6 +45,11 @@ %imm_addi16sp 12:s1 3:2 5:1 2:1 6:1 !function=ex_shift_4 %imm_lui 12:s1 2:5 !function=ex_shift_12 +%uimm_cl_b 5:1 6:1 +%uimm_cl_h 5:1 !function=ex_shift_1 +%spimm 2:2 !function=ex_shift_4 +%urlist 4:4 +%index 2:8 # Argument sets imported from insn32.decode: &empty !extern @@ -53,7 +60,11 @@ &b imm rs2 rs1 !extern &u imm rd !extern &shift shamt rs1 rd !extern +&r2 rd rs1 !extern +&r2_s rs1 rs2 !extern +&cmpp urlist spimm +&cmjt index # Formats 16: @cr .... ..... ..... .. &r rs2=%rs2_5 rs1=%rd %rd @@ -89,6 +100,15 @@ @c_andi ... . .. ... ..... .. &i imm=%imm_ci rs1=%rs1_3 rd=%rs1_3 +@cu ... ... ... .. ... .. &r2 rs1=%rs1_3 rd=%rs1_3 +@cl_b ... . .. ... .. ... .. &i imm=%uimm_cl_b rs1=%rs1_3 rd=%rs2_3 +@cl_h ... . .. ... .. ... .. &i imm=%uimm_cl_h rs1=%rs1_3 rd=%rs2_3 +@cs_b ... . .. ... .. ... .. &s imm=%uimm_cl_b rs1=%rs1_3 rs2=%rs2_3 +@cs_h ... . .. ... .. ... .. &s imm=%uimm_cl_h rs1=%rs1_3 rs2=%rs2_3 +@cm_pp ... ... ........ .. &cmpp %urlist %spimm +@cm_mv ... ... ... .. ... .. &r2_s rs2=%r2s rs1=%r1s +@cm_jt ... ... ........ .. &cmjt %index + # *** RV32/64C Standard Extension (Quadrant 0) *** { # Opcode of all zeros is illegal; rd != 0, nzuimm == 0 is reserved. @@ -97,23 +117,23 @@ } { lq 001 ... ... .. ... 00 @cl_q - fld 001 ... ... .. ... 00 @cl_d + c_fld 001 ... ... .. ... 00 @cl_d } lw 010 ... ... .. ... 00 @cl_w { sq 101 ... ... .. ... 00 @cs_q - fsd 101 ... ... .. ... 00 @cs_d + c_fsd 101 ... ... .. ... 00 @cs_d } sw 110 ... ... .. ... 00 @cs_w # *** RV32C and RV64C specific Standard Extension (Quadrant 0) *** { ld 011 ... ... .. ... 00 @cl_d - flw 011 ... ... .. ... 00 @cl_w + c_flw 011 ... ... .. ... 00 @cl_w } { sd 111 ... ... .. ... 00 @cs_d - fsw 111 ... ... .. ... 00 @cs_w + c_fsw 111 ... ... .. ... 00 @cs_w } # *** RV32/64C Standard Extension (Quadrant 1) *** @@ -148,7 +168,7 @@ addw 100 1 11 ... 01 ... 01 @cs_2 slli 000 . ..... ..... 10 @c_shift2 { lq 001 ... ... .. ... 10 @c_lqsp - fld 001 . ..... ..... 10 @c_ldsp + c_fld 001 . ..... ..... 10 @c_ldsp } { illegal 010 - 00000 ----- 10 # c.lwsp, RES rd=0 @@ -166,7 +186,19 @@ slli 000 . ..... ..... 10 @c_shift2 } { sq 101 ... ... .. ... 10 @c_sqsp - fsd 101 ...... ..... 10 @c_sdsp + c_fsd 101 ...... ..... 10 @c_sdsp + + # *** RV64 and RV32 Zcmp/Zcmt Extension *** + [ + cm_push 101 11000 .... .. 10 @cm_pp + cm_pop 101 11010 .... .. 10 @cm_pp + cm_popret 101 11110 .... .. 10 @cm_pp + cm_popretz 101 11100 .... .. 10 @cm_pp + cm_mva01s 101 011 ... 11 ... 10 @cm_mv + cm_mvsa01 101 011 ... 01 ... 10 @cm_mv + + cm_jalt 101 000 ........ 10 @cm_jt + ] } sw 110 . ..... ..... 10 @c_swsp @@ -174,9 +206,23 @@ sw 110 . ..... ..... 10 @c_swsp { c64_illegal 011 - 00000 ----- 10 # c.ldsp, RES rd=0 ld 011 . ..... ..... 10 @c_ldsp - flw 011 . ..... ..... 10 @c_lwsp + c_flw 011 . ..... ..... 10 @c_lwsp } { sd 111 . ..... ..... 10 @c_sdsp - fsw 111 . ..... ..... 10 @c_swsp + c_fsw 111 . ..... ..... 10 @c_swsp } + +# *** RV64 and RV32 Zcb Extension *** +c_zext_b 100 111 ... 11 000 01 @cu +c_sext_b 100 111 ... 11 001 01 @cu +c_zext_h 100 111 ... 11 010 01 @cu +c_sext_h 100 111 ... 11 011 01 @cu +c_zext_w 100 111 ... 11 100 01 @cu +c_not 100 111 ... 11 101 01 @cu +c_mul 100 111 ... 10 ... 01 @cs_2 +c_lbu 100 000 ... .. ... 00 @cl_b +c_lhu 100 001 ... 0. ... 00 @cl_h +c_lh 100 001 ... 1. ... 00 @cl_h +c_sb 100 010 ... .. ... 00 @cs_b +c_sh 100 011 ... 0. ... 00 @cs_h diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index b7e7613ea2..73d5d1b045 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -134,6 +134,7 @@ addi ............ ..... 000 ..... 0010011 @i slti ............ ..... 010 ..... 0010011 @i sltiu ............ ..... 011 ..... 0010011 @i xori ............ ..... 100 ..... 0010011 @i +# cbo.prefetch_{i,r,m} instructions are ori with rd=x0 and not decoded. ori ............ ..... 110 ..... 0010011 @i andi ............ ..... 111 ..... 0010011 @i slli 00000. ...... ..... 001 ..... 0010011 @sh @@ -179,7 +180,20 @@ sraw 0100000 ..... ..... 101 ..... 0111011 @r # *** RV128I Base Instruction Set (in addition to RV64I) *** ldu ............ ..... 111 ..... 0000011 @i -lq ............ ..... 010 ..... 0001111 @i +{ + [ + # *** RV32 Zicbom Standard Extension *** + cbo_clean 0000000 00001 ..... 010 00000 0001111 @sfence_vm + cbo_flush 0000000 00010 ..... 010 00000 0001111 @sfence_vm + cbo_inval 0000000 00000 ..... 010 00000 0001111 @sfence_vm + + # *** RV32 Zicboz Standard Extension *** + cbo_zero 0000000 00100 ..... 010 00000 0001111 @sfence_vm + ] + + # *** RVI128 lq *** + lq ............ ..... 010 ..... 0001111 @i +} sq ............ ..... 100 ..... 0100011 @s addid ............ ..... 000 ..... 1011011 @i sllid 000000 ...... ..... 001 ..... 1011011 @sh6 @@ -890,3 +904,7 @@ sm3p1 00 01000 01001 ..... 001 ..... 0010011 @r2 # *** RV32 Zksed Standard Extension *** sm4ed .. 11000 ..... ..... 000 ..... 0110011 @k_aes sm4ks .. 11010 ..... ..... 000 ..... 0110011 @k_aes + +# *** RV32 Zicond Standard Extension *** +czero_eqz 0000111 ..... ..... 101 ..... 0110011 @r +czero_nez 0000111 ..... ..... 111 ..... 0110011 @r diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc index 59501b2780..7c2837194c 100644 --- a/target/riscv/insn_trans/trans_privileged.c.inc +++ b/target/riscv/insn_trans/trans_privileged.c.inc @@ -52,7 +52,7 @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a) * that no exception will be raised when fetching them. */ - if (semihosting_enabled(ctx->mem_idx < PRV_S) && + if (semihosting_enabled(ctx->priv == PRV_U) && (pre_addr & TARGET_PAGE_MASK) == (post_addr & TARGET_PAGE_MASK)) { pre = opcode_at(&ctx->base, pre_addr); ebreak = opcode_at(&ctx->base, ebreak_addr); @@ -77,6 +77,9 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a) #ifndef CONFIG_USER_ONLY if (has_ext(ctx, RVS)) { decode_save_opc(ctx); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } gen_helper_sret(cpu_pc, cpu_env); exit_tb(ctx); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; @@ -93,6 +96,9 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a) { #ifndef CONFIG_USER_ONLY decode_save_opc(ctx); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } gen_helper_mret(cpu_pc, cpu_env); exit_tb(ctx); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc index 990bc94b98..e4dcc7c991 100644 --- a/target/riscv/insn_trans/trans_rvb.c.inc +++ b/target/riscv/insn_trans/trans_rvb.c.inc @@ -64,7 +64,6 @@ static void gen_clzw(TCGv ret, TCGv arg1) TCGv t = tcg_temp_new(); tcg_gen_shli_tl(t, arg1, 32); tcg_gen_clzi_tl(ret, t, 32); - tcg_temp_free(t); } static bool trans_clz(DisasContext *ctx, arg_clz *a) @@ -161,8 +160,6 @@ static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt) gen_sbop_mask(t, shamt); tcg_gen_or_tl(ret, arg1, t); - - tcg_temp_free(t); } static bool trans_bset(DisasContext *ctx, arg_bset *a) @@ -183,8 +180,6 @@ static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt) gen_sbop_mask(t, shamt); tcg_gen_andc_tl(ret, arg1, t); - - tcg_temp_free(t); } static bool trans_bclr(DisasContext *ctx, arg_bclr *a) @@ -205,8 +200,6 @@ static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt) gen_sbop_mask(t, shamt); tcg_gen_xor_tl(ret, arg1, t); - - tcg_temp_free(t); } static bool trans_binv(DisasContext *ctx, arg_binv *a) @@ -252,9 +245,6 @@ static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2) /* sign-extend 64-bits */ tcg_gen_ext_i32_tl(ret, t1); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); } static bool trans_ror(DisasContext *ctx, arg_ror *a) @@ -270,8 +260,6 @@ static void gen_roriw(TCGv ret, TCGv arg1, target_long shamt) tcg_gen_trunc_tl_i32(t1, arg1); tcg_gen_rotri_i32(t1, t1, shamt); tcg_gen_ext_i32_tl(ret, t1); - - tcg_temp_free_i32(t1); } static bool trans_rori(DisasContext *ctx, arg_rori *a) @@ -294,9 +282,6 @@ static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2) /* sign-extend 64-bits */ tcg_gen_ext_i32_tl(ret, t1); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); } static bool trans_rol(DisasContext *ctx, arg_rol *a) @@ -340,8 +325,6 @@ static void gen_orc_b(TCGv ret, TCGv source1) /* Replicate the lsb of each byte across the byte. */ tcg_gen_muli_tl(ret, tmp, 0xff); - - tcg_temp_free(tmp); } static bool trans_orc_b(DisasContext *ctx, arg_orc_b *a) @@ -357,8 +340,6 @@ static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \ \ tcg_gen_shli_tl(t, arg1, SHAMT); \ tcg_gen_add_tl(ret, t, arg2); \ - \ - tcg_temp_free(t); \ } GEN_SHADD(1) @@ -446,8 +427,6 @@ static void gen_sh##SHAMT##add_uw(TCGv ret, TCGv arg1, TCGv arg2) \ \ tcg_gen_shli_tl(t, t, SHAMT); \ tcg_gen_add_tl(ret, t, arg2); \ - \ - tcg_temp_free(t); \ } GEN_SHADD_UW(1) @@ -472,7 +451,6 @@ static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2) TCGv t = tcg_temp_new(); tcg_gen_ext32u_tl(t, arg1); tcg_gen_add_tl(ret, t, arg2); - tcg_temp_free(t); } static bool trans_add_uw(DisasContext *ctx, arg_add_uw *a) @@ -531,7 +509,6 @@ static void gen_packh(TCGv ret, TCGv src1, TCGv src2) tcg_gen_ext8u_tl(t, src2); tcg_gen_deposit_tl(ret, src1, t, 8, TARGET_LONG_BITS - 8); - tcg_temp_free(t); } static void gen_packw(TCGv ret, TCGv src1, TCGv src2) @@ -540,7 +517,6 @@ static void gen_packw(TCGv ret, TCGv src1, TCGv src2) tcg_gen_ext16s_tl(t, src2); tcg_gen_deposit_tl(ret, src1, t, 16, TARGET_LONG_BITS - 16); - tcg_temp_free(t); } static bool trans_brev8(DisasContext *ctx, arg_brev8 *a) diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index 6e3159b797..2c51e01c40 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -31,6 +31,12 @@ } \ } while (0) +#define REQUIRE_ZCD(ctx) do { \ + if (!ctx->cfg_ptr->ext_zcd) { \ + return false; \ + } \ +} while (0) + static bool trans_fld(DisasContext *ctx, arg_fld *a) { TCGv addr; @@ -59,6 +65,18 @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a) return true; } +static bool trans_c_fld(DisasContext *ctx, arg_fld *a) +{ + REQUIRE_ZCD(ctx); + return trans_fld(ctx, a); +} + +static bool trans_c_fsd(DisasContext *ctx, arg_fsd *a) +{ + REQUIRE_ZCD(ctx); + return trans_fsd(ctx, a); +} + static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a) { REQUIRE_FPU; @@ -250,7 +268,6 @@ static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a) TCGv_i64 t0 = tcg_temp_new_i64(); tcg_gen_not_i64(t0, src2); tcg_gen_deposit_i64(dest, t0, src1, 0, 63); - tcg_temp_free_i64(t0); } gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); @@ -273,7 +290,6 @@ static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a) TCGv_i64 t0 = tcg_temp_new_i64(); tcg_gen_andi_i64(t0, src2, INT64_MIN); tcg_gen_xor_i64(dest, src1, t0); - tcg_temp_free_i64(t0); } gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index 965e1f8d11..b2de4fcf3f 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -19,7 +19,7 @@ */ #define REQUIRE_FPU do {\ - if (ctx->mstatus_fs == 0) \ + if (ctx->mstatus_fs == EXT_STATUS_DISABLED) \ if (!ctx->cfg_ptr->ext_zfinx) \ return false; \ } while (0) @@ -30,6 +30,12 @@ } \ } while (0) +#define REQUIRE_ZCF(ctx) do { \ + if (!ctx->cfg_ptr->ext_zcf) { \ + return false; \ + } \ +} while (0) + static bool trans_flw(DisasContext *ctx, arg_flw *a) { TCGv_i64 dest; @@ -61,6 +67,18 @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a) return true; } +static bool trans_c_flw(DisasContext *ctx, arg_flw *a) +{ + REQUIRE_ZCF(ctx); + return trans_flw(ctx, a); +} + +static bool trans_c_fsw(DisasContext *ctx, arg_fsw *a) +{ + REQUIRE_ZCF(ctx); + return trans_fsw(ctx, a); +} + static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a) { REQUIRE_FPU; @@ -233,9 +251,6 @@ static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a) /* This formulation retains the nanboxing of rs2 in normal 'F'. */ tcg_gen_deposit_i64(dest, rs2, rs1, 0, 31); - - tcg_temp_free_i64(rs1); - tcg_temp_free_i64(rs2); } else { tcg_gen_deposit_i64(dest, src2, src1, 0, 31); tcg_gen_ext32s_i64(dest, dest); @@ -281,15 +296,12 @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a) tcg_gen_nor_i64(rs2, rs2, mask); tcg_gen_and_i64(dest, mask, rs1); tcg_gen_or_i64(dest, dest, rs2); - - tcg_temp_free_i64(rs2); } /* signed-extended intead of nanboxing for result if enable zfinx */ if (ctx->cfg_ptr->ext_zfinx) { tcg_gen_ext32s_i64(dest, dest); } gen_set_fpr_hs(ctx, a->rd, dest); - tcg_temp_free_i64(rs1); mark_fs_dirty(ctx); return true; } @@ -329,14 +341,11 @@ static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a) */ tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(31, 1)); tcg_gen_xor_i64(dest, rs1, dest); - - tcg_temp_free_i64(rs2); } /* signed-extended intead of nanboxing for result if enable zfinx */ if (ctx->cfg_ptr->ext_zfinx) { tcg_gen_ext32s_i64(dest, dest); } - tcg_temp_free_i64(rs1); gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc index 9248b48c36..3e9322130f 100644 --- a/target/riscv/insn_trans/trans_rvh.c.inc +++ b/target/riscv/insn_trans/trans_rvh.c.inc @@ -16,156 +16,131 @@ * this program. If not, see . */ -#ifndef CONFIG_USER_ONLY -static bool check_access(DisasContext *ctx) -{ - if (!ctx->hlsx) { - if (ctx->virt_enabled) { - generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT); - } else { - generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST); - } - return false; - } - return true; -} -#endif - -static bool do_hlv(DisasContext *ctx, arg_r2 *a, MemOp mop) -{ #ifdef CONFIG_USER_ONLY - return false; +#define do_hlv(ctx, a, func) false +#define do_hsv(ctx, a, func) false #else - decode_save_opc(ctx); - if (check_access(ctx)) { - TCGv dest = dest_gpr(ctx, a->rd); - TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); - int mem_idx = ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK; - tcg_gen_qemu_ld_tl(dest, addr, mem_idx, mop); - gen_set_gpr(ctx, a->rd, dest); - } - return true; -#endif +static void gen_helper_hyp_hlv_b(TCGv r, TCGv_env e, TCGv a) +{ + gen_helper_hyp_hlv_bu(r, e, a); + tcg_gen_ext8s_tl(r, r); } +static void gen_helper_hyp_hlv_h(TCGv r, TCGv_env e, TCGv a) +{ + gen_helper_hyp_hlv_hu(r, e, a); + tcg_gen_ext16s_tl(r, r); +} + +static void gen_helper_hyp_hlv_w(TCGv r, TCGv_env e, TCGv a) +{ + gen_helper_hyp_hlv_wu(r, e, a); + tcg_gen_ext32s_tl(r, r); +} + +static bool do_hlv(DisasContext *ctx, arg_r2 *a, + void (*func)(TCGv, TCGv_env, TCGv)) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); + + decode_save_opc(ctx); + func(dest, cpu_env, addr); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool do_hsv(DisasContext *ctx, arg_r2_s *a, + void (*func)(TCGv_env, TCGv, TCGv)) +{ + TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); + + decode_save_opc(ctx); + func(cpu_env, addr, data); + return true; +} +#endif /* CONFIG_USER_ONLY */ + static bool trans_hlv_b(DisasContext *ctx, arg_hlv_b *a) { REQUIRE_EXT(ctx, RVH); - return do_hlv(ctx, a, MO_SB); + return do_hlv(ctx, a, gen_helper_hyp_hlv_b); } static bool trans_hlv_h(DisasContext *ctx, arg_hlv_h *a) { REQUIRE_EXT(ctx, RVH); - return do_hlv(ctx, a, MO_TESW); + return do_hlv(ctx, a, gen_helper_hyp_hlv_h); } static bool trans_hlv_w(DisasContext *ctx, arg_hlv_w *a) { REQUIRE_EXT(ctx, RVH); - return do_hlv(ctx, a, MO_TESL); + return do_hlv(ctx, a, gen_helper_hyp_hlv_w); } static bool trans_hlv_bu(DisasContext *ctx, arg_hlv_bu *a) { REQUIRE_EXT(ctx, RVH); - return do_hlv(ctx, a, MO_UB); + return do_hlv(ctx, a, gen_helper_hyp_hlv_bu); } static bool trans_hlv_hu(DisasContext *ctx, arg_hlv_hu *a) { REQUIRE_EXT(ctx, RVH); - return do_hlv(ctx, a, MO_TEUW); -} - -static bool do_hsv(DisasContext *ctx, arg_r2_s *a, MemOp mop) -{ -#ifdef CONFIG_USER_ONLY - return false; -#else - decode_save_opc(ctx); - if (check_access(ctx)) { - TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); - TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); - int mem_idx = ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK; - tcg_gen_qemu_st_tl(data, addr, mem_idx, mop); - } - return true; -#endif + return do_hlv(ctx, a, gen_helper_hyp_hlv_hu); } static bool trans_hsv_b(DisasContext *ctx, arg_hsv_b *a) { REQUIRE_EXT(ctx, RVH); - return do_hsv(ctx, a, MO_SB); + return do_hsv(ctx, a, gen_helper_hyp_hsv_b); } static bool trans_hsv_h(DisasContext *ctx, arg_hsv_h *a) { REQUIRE_EXT(ctx, RVH); - return do_hsv(ctx, a, MO_TESW); + return do_hsv(ctx, a, gen_helper_hyp_hsv_h); } static bool trans_hsv_w(DisasContext *ctx, arg_hsv_w *a) { REQUIRE_EXT(ctx, RVH); - return do_hsv(ctx, a, MO_TESL); + return do_hsv(ctx, a, gen_helper_hyp_hsv_w); } static bool trans_hlv_wu(DisasContext *ctx, arg_hlv_wu *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVH); - return do_hlv(ctx, a, MO_TEUL); + return do_hlv(ctx, a, gen_helper_hyp_hlv_wu); } static bool trans_hlv_d(DisasContext *ctx, arg_hlv_d *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVH); - return do_hlv(ctx, a, MO_TEUQ); + return do_hlv(ctx, a, gen_helper_hyp_hlv_d); } static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVH); - return do_hsv(ctx, a, MO_TEUQ); + return do_hsv(ctx, a, gen_helper_hyp_hsv_d); } -#ifndef CONFIG_USER_ONLY -static bool do_hlvx(DisasContext *ctx, arg_r2 *a, - void (*func)(TCGv, TCGv_env, TCGv)) -{ - decode_save_opc(ctx); - if (check_access(ctx)) { - TCGv dest = dest_gpr(ctx, a->rd); - TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); - func(dest, cpu_env, addr); - gen_set_gpr(ctx, a->rd, dest); - } - return true; -} -#endif - static bool trans_hlvx_hu(DisasContext *ctx, arg_hlvx_hu *a) { REQUIRE_EXT(ctx, RVH); -#ifndef CONFIG_USER_ONLY - return do_hlvx(ctx, a, gen_helper_hyp_hlvx_hu); -#else - return false; -#endif + return do_hlv(ctx, a, gen_helper_hyp_hlvx_hu); } static bool trans_hlvx_wu(DisasContext *ctx, arg_hlvx_wu *a) { REQUIRE_EXT(ctx, RVH); -#ifndef CONFIG_USER_ONLY - return do_hlvx(ctx, a, gen_helper_hyp_hlvx_wu); -#else - return false; -#endif + return do_hlv(ctx, a, gen_helper_hyp_hlvx_wu); } static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a) diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index 4496f21266..c70c495fc5 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -56,13 +56,12 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a) tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2); gen_set_pc(ctx, cpu_pc); - if (!has_ext(ctx, RVC)) { + if (!ctx->cfg_ptr->ext_zca) { TCGv t0 = tcg_temp_new(); misaligned = gen_new_label(); tcg_gen_andi_tl(t0, cpu_pc, 0x2); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned); - tcg_temp_free(t0); } gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn); @@ -108,8 +107,6 @@ static TCGCond gen_compare_i128(bool bz, TCGv rl, tcg_gen_xor_tl(tmp, ah, bh); tcg_gen_and_tl(rl, rl, tmp); tcg_gen_xor_tl(rl, rh, rl); - - tcg_temp_free(tmp); } break; @@ -128,8 +125,6 @@ static TCGCond gen_compare_i128(bool bz, TCGv rl, /* seed third word with 1, which will be result */ tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero); tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero); - - tcg_temp_free(tmp); } break; @@ -140,8 +135,6 @@ static TCGCond gen_compare_i128(bool bz, TCGv rl, if (invert) { cond = tcg_invert_cond(cond); } - - tcg_temp_free(rh); return cond; } @@ -169,8 +162,6 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) cond = gen_compare_i128(a->rs2 == 0, tmp, src1, src1h, src2, src2h, cond); tcg_gen_brcondi_tl(cond, tmp, 0, l); - - tcg_temp_free(tmp); } else { tcg_gen_brcond_tl(cond, src1, src2, l); } @@ -178,7 +169,7 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) gen_set_label(l); /* branch taken */ - if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) { + if (!ctx->cfg_ptr->ext_zca && ((ctx->base.pc_next + a->imm) & 0x3)) { /* misaligned */ gen_exception_inst_addr_mis(ctx); } else { @@ -254,8 +245,6 @@ static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop) } gen_set_gpr128(ctx, a->rd, destl, desth); - - tcg_temp_free(addrl); return true; } @@ -344,8 +333,6 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) tcg_gen_addi_tl(addrl, addrl, 8); tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ); } - - tcg_temp_free(addrl); return true; } @@ -568,14 +555,6 @@ static void gen_sll_i128(TCGv destl, TCGv desth, tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll); tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1); - - tcg_temp_free(ls); - tcg_temp_free(rs); - tcg_temp_free(hs); - tcg_temp_free(ll); - tcg_temp_free(lr); - tcg_temp_free(h0); - tcg_temp_free(h1); } static bool trans_sll(DisasContext *ctx, arg_sll *a) @@ -618,14 +597,6 @@ static void gen_srl_i128(TCGv destl, TCGv desth, tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0); tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1); - - tcg_temp_free(ls); - tcg_temp_free(rs); - tcg_temp_free(hs); - tcg_temp_free(ll); - tcg_temp_free(lr); - tcg_temp_free(h0); - tcg_temp_free(h1); } static bool trans_srl(DisasContext *ctx, arg_srl *a) @@ -659,14 +630,6 @@ static void gen_sra_i128(TCGv destl, TCGv desth, tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0); tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1); - - tcg_temp_free(ls); - tcg_temp_free(rs); - tcg_temp_free(hs); - tcg_temp_free(ll); - tcg_temp_free(lr); - tcg_temp_free(h0); - tcg_temp_free(h1); } static bool trans_sra(DisasContext *ctx, arg_sra *a) diff --git a/target/riscv/insn_trans/trans_rvk.c.inc b/target/riscv/insn_trans/trans_rvk.c.inc index 90f4eeff60..6600c710a7 100644 --- a/target/riscv/insn_trans/trans_rvk.c.inc +++ b/target/riscv/insn_trans/trans_rvk.c.inc @@ -161,9 +161,6 @@ static bool gen_sha256(DisasContext *ctx, arg_r2 *a, DisasExtend ext, tcg_gen_ext_i32_tl(dest, t1); gen_set_gpr(ctx, a->rd, dest); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); return true; } @@ -212,9 +209,6 @@ static bool gen_sha512_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext, tcg_gen_trunc_i64_tl(dest, t1); gen_set_gpr(ctx, a->rd, dest); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); return true; } @@ -271,9 +265,6 @@ static bool gen_sha512h_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext, tcg_gen_trunc_i64_tl(dest, t1); gen_set_gpr(ctx, a->rd, dest); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); return true; } @@ -310,9 +301,6 @@ static bool gen_sha512_rv64(DisasContext *ctx, arg_r2 *a, DisasExtend ext, tcg_gen_trunc_i64_tl(dest, t1); gen_set_gpr(ctx, a->rd, dest); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); return true; } @@ -359,9 +347,6 @@ static bool gen_sm3(DisasContext *ctx, arg_r2 *a, int32_t b, int32_t c) tcg_gen_xor_i32(t1, t1, t0); tcg_gen_ext_i32_tl(dest, t1); gen_set_gpr(ctx, a->rd, dest); - - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); return true; } diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc index ec7f705aab..2f0fd1f700 100644 --- a/target/riscv/insn_trans/trans_rvm.c.inc +++ b/target/riscv/insn_trans/trans_rvm.c.inc @@ -45,9 +45,6 @@ static void gen_mulhu_i128(TCGv r2, TCGv r3, TCGv al, TCGv ah, TCGv bl, TCGv bh) tcg_gen_mulu2_tl(tmpl, tmph, ah, bh); tcg_gen_add2_tl(r2, r3, r2, r3, tmpl, tmph); - - tcg_temp_free(tmpl); - tcg_temp_free(tmph); } static void gen_mul_i128(TCGv rl, TCGv rh, @@ -63,10 +60,6 @@ static void gen_mul_i128(TCGv rl, TCGv rh, tcg_gen_add2_tl(rh, tmpx, rh, zero, tmpl, tmph); tcg_gen_mulu2_tl(tmpl, tmph, rs1h, rs2l); tcg_gen_add2_tl(rh, tmph, rh, tmpx, tmpl, tmph); - - tcg_temp_free(tmpl); - tcg_temp_free(tmph); - tcg_temp_free(tmpx); } static bool trans_mul(DisasContext *ctx, arg_mul *a) @@ -92,11 +85,6 @@ static void gen_mulh_i128(TCGv rl, TCGv rh, tcg_gen_and_tl(t1h, t1h, rs1h); tcg_gen_sub2_tl(t0l, t0h, rl, rh, t0l, t0h); tcg_gen_sub2_tl(rl, rh, t0l, t0h, t1l, t1h); - - tcg_temp_free(t0l); - tcg_temp_free(t0h); - tcg_temp_free(t1l); - tcg_temp_free(t1h); } static void gen_mulh(TCGv ret, TCGv s1, TCGv s2) @@ -104,7 +92,6 @@ static void gen_mulh(TCGv ret, TCGv s1, TCGv s2) TCGv discard = tcg_temp_new(); tcg_gen_muls2_tl(discard, ret, s1, s2); - tcg_temp_free(discard); } static void gen_mulh_w(TCGv ret, TCGv s1, TCGv s2) @@ -132,9 +119,6 @@ static void gen_mulhsu_i128(TCGv rl, TCGv rh, tcg_gen_and_tl(t0l, t0h, rs2l); tcg_gen_and_tl(t0h, t0h, rs2h); tcg_gen_sub2_tl(rl, rh, rl, rh, t0l, t0h); - - tcg_temp_free(t0l); - tcg_temp_free(t0h); } static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2) @@ -147,9 +131,6 @@ static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2) tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1); tcg_gen_and_tl(rl, rl, arg2); tcg_gen_sub_tl(ret, rh, rl); - - tcg_temp_free(rl); - tcg_temp_free(rh); } static void gen_mulhsu_w(TCGv ret, TCGv arg1, TCGv arg2) @@ -160,8 +141,6 @@ static void gen_mulhsu_w(TCGv ret, TCGv arg1, TCGv arg2) tcg_gen_ext32s_tl(t1, arg1); tcg_gen_ext32u_tl(t2, arg2); tcg_gen_mul_tl(ret, t1, t2); - tcg_temp_free(t1); - tcg_temp_free(t2); tcg_gen_sari_tl(ret, ret, 32); } @@ -177,7 +156,6 @@ static void gen_mulhu(TCGv ret, TCGv s1, TCGv s2) TCGv discard = tcg_temp_new(); tcg_gen_mulu2_tl(discard, ret, s1, s2); - tcg_temp_free(discard); } static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a) @@ -223,9 +201,6 @@ static void gen_div(TCGv ret, TCGv source1, TCGv source2) tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, temp2); tcg_gen_div_tl(ret, temp1, temp2); - - tcg_temp_free(temp1); - tcg_temp_free(temp2); } static bool trans_div(DisasContext *ctx, arg_div *a) @@ -258,9 +233,6 @@ static void gen_divu(TCGv ret, TCGv source1, TCGv source2) tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, max, source1); tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2); tcg_gen_divu_tl(ret, temp1, temp2); - - tcg_temp_free(temp1); - tcg_temp_free(temp2); } static bool trans_divu(DisasContext *ctx, arg_divu *a) @@ -306,9 +278,6 @@ static void gen_rem(TCGv ret, TCGv source1, TCGv source2) /* If div by zero, the required result is the original dividend. */ tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp1); - - tcg_temp_free(temp1); - tcg_temp_free(temp2); } static bool trans_rem(DisasContext *ctx, arg_rem *a) @@ -342,8 +311,6 @@ static void gen_remu(TCGv ret, TCGv source1, TCGv source2) /* If div by zero, the required result is the original dividend. */ tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp); - - tcg_temp_free(temp); } static bool trans_remu(DisasContext *ctx, arg_remu *a) diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index bbb5c3a7b5..6c07eebc52 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -29,21 +29,22 @@ static inline bool is_overlapped(const int8_t astart, int8_t asize, static bool require_rvv(DisasContext *s) { - return s->mstatus_vs != 0; + return s->mstatus_vs != EXT_STATUS_DISABLED; } static bool require_rvf(DisasContext *s) { - if (s->mstatus_fs == 0) { + if (s->mstatus_fs == EXT_STATUS_DISABLED) { return false; } switch (s->sew) { case MO_16: + return s->cfg_ptr->ext_zvfh; case MO_32: - return has_ext(s, RVF); + return s->cfg_ptr->ext_zve32f; case MO_64: - return has_ext(s, RVD); + return s->cfg_ptr->ext_zve64d; default: return false; } @@ -51,63 +52,38 @@ static bool require_rvf(DisasContext *s) static bool require_scale_rvf(DisasContext *s) { - if (s->mstatus_fs == 0) { + if (s->mstatus_fs == EXT_STATUS_DISABLED) { return false; } switch (s->sew) { case MO_8: + return s->cfg_ptr->ext_zvfh; case MO_16: - return has_ext(s, RVF); + return s->cfg_ptr->ext_zve32f; case MO_32: - return has_ext(s, RVD); + return s->cfg_ptr->ext_zve64d; default: return false; } } -static bool require_zve32f(DisasContext *s) +static bool require_scale_rvfmin(DisasContext *s) { - /* RVV + Zve32f = RVV. */ - if (has_ext(s, RVV)) { - return true; + if (s->mstatus_fs == EXT_STATUS_DISABLED) { + return false; } - /* Zve32f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve32f ? s->sew <= MO_32 : true; -} - -static bool require_scale_zve32f(DisasContext *s) -{ - /* RVV + Zve32f = RVV. */ - if (has_ext(s, RVV)) { - return true; + switch (s->sew) { + case MO_8: + return s->cfg_ptr->ext_zvfhmin; + case MO_16: + return s->cfg_ptr->ext_zve32f; + case MO_32: + return s->cfg_ptr->ext_zve64d; + default: + return false; } - - /* Zve32f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; -} - -static bool require_zve64f(DisasContext *s) -{ - /* RVV + Zve64f = RVV. */ - if (has_ext(s, RVV)) { - return true; - } - - /* Zve64f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve64f ? s->sew <= MO_32 : true; -} - -static bool require_scale_zve64f(DisasContext *s) -{ - /* RVV + Zve64f = RVV. */ - if (has_ext(s, RVV)) { - return true; - } - - /* Zve64f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; } /* Destination vector register group cannot overlap source mask register. */ @@ -173,9 +149,7 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) { TCGv s1, dst; - if (!require_rvv(s) || - !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || - s->cfg_ptr->ext_zve64f)) { + if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) { return false; } @@ -198,11 +172,6 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) gen_set_pc_imm(s, s->pc_succ_insn); lookup_and_goto_ptr(s); s->base.is_jmp = DISAS_NORETURN; - - if (rd == 0 && rs1 == 0) { - tcg_temp_free(s1); - } - return true; } @@ -210,9 +179,7 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) { TCGv dst; - if (!require_rvv(s) || - !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || - s->cfg_ptr->ext_zve64f)) { + if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) { return false; } @@ -242,8 +209,8 @@ static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a) static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a) { - TCGv s1 = tcg_const_tl(a->rs1); - TCGv s2 = tcg_const_tl(a->zimm); + TCGv s1 = tcg_constant_tl(a->rs1); + TCGv s2 = tcg_constant_tl(a->zimm); return do_vsetivli(s, a->rd, s1, s2); } @@ -271,8 +238,8 @@ static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew) { int8_t emul = eew - s->sew + s->lmul; return (emul >= -3 && emul <= 3) && - require_align(vd, emul) && - require_nf(vd, nf, emul); + require_align(vd, emul) && + require_nf(vd, nf, emul); } /* @@ -315,13 +282,12 @@ static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf, require_nf(vd, nf, s->lmul); /* - * All Zve* extensions support all vector load and store instructions, - * except Zve64* extensions do not support EEW=64 for index values - * when XLEN=32. (Section 18.2) + * V extension supports all vector load and store instructions, + * except V extension does not support EEW=64 for index values + * when XLEN=32. (Section 18.3) */ if (get_xl(s) == MXL_RV32) { - ret &= (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? eew != MO_64 : true); + ret &= (eew != MO_64); } return ret; @@ -349,7 +315,7 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2, int8_t seg_vd; int8_t emul = eew - s->sew + s->lmul; bool ret = vext_check_st_index(s, vd, vs2, nf, eew) && - require_vm(vm, vd); + require_vm(vm, vd); /* Each segment register group has to follow overlap rules. */ for (int i = 0; i < nf; ++i) { @@ -379,8 +345,8 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2, static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm) { return require_vm(vm, vd) && - require_align(vd, s->lmul) && - require_align(vs, s->lmul); + require_align(vd, s->lmul) && + require_align(vs, s->lmul); } /* @@ -399,7 +365,7 @@ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm) static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ss(s, vd, vs2, vm) && - require_align(vs1, s->lmul); + require_align(vs1, s->lmul); } static bool vext_check_ms(DisasContext *s, int vd, int vs) @@ -430,7 +396,7 @@ static bool vext_check_ms(DisasContext *s, int vd, int vs) static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2) { bool ret = vext_check_ms(s, vd, vs2) && - require_align(vs1, s->lmul); + require_align(vs1, s->lmul); if (vd != vs1) { ret &= require_noover(vd, 0, vs1, s->lmul); } @@ -494,14 +460,14 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2, static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm) { return vext_wide_check_common(s, vd, vm) && - require_align(vs, s->lmul) && - require_noover(vd, s->lmul + 1, vs, s->lmul); + require_align(vs, s->lmul) && + require_noover(vd, s->lmul + 1, vs, s->lmul); } static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm) { return vext_wide_check_common(s, vd, vm) && - require_align(vs, s->lmul + 1); + require_align(vs, s->lmul + 1); } /* @@ -519,8 +485,8 @@ static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm) static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ds(s, vd, vs2, vm) && - require_align(vs1, s->lmul) && - require_noover(vd, s->lmul + 1, vs1, s->lmul); + require_align(vs1, s->lmul) && + require_noover(vd, s->lmul + 1, vs1, s->lmul); } /* @@ -541,7 +507,7 @@ static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm) static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ds(s, vd, vs1, vm) && - require_align(vs2, s->lmul + 1); + require_align(vs2, s->lmul + 1); } static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm) @@ -569,7 +535,7 @@ static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm) static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_sd(s, vd, vs2, vm) && - require_align(vs1, s->lmul); + require_align(vs1, s->lmul); } /* @@ -581,7 +547,7 @@ static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm) */ static bool vext_check_reduction(DisasContext *s, int vs2) { - return require_align(vs2, s->lmul) && (s->vstart == 0); + return require_align(vs2, s->lmul) && s->vstart_eq_zero; } /* @@ -673,9 +639,6 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, fn(dest, mask, base, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - if (!is_store) { mark_vs_dirty(s); } @@ -838,9 +801,6 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, fn(dest, mask, base, stride, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - if (!is_store) { mark_vs_dirty(s); } @@ -949,10 +909,6 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, fn(dest, mask, base, index, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(index); - if (!is_store) { mark_vs_dirty(s); } @@ -1092,8 +1048,6 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, fn(dest, mask, base, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); mark_vs_dirty(s); gen_set_label(over); return true; @@ -1154,8 +1108,6 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, fn(dest, base, cpu_env, desc); - tcg_temp_free_ptr(dest); - if (!is_store) { mark_vs_dirty(s); } @@ -1311,9 +1263,6 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, fn(dest, mask, src1, src2, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(src2); mark_vs_dirty(s); gen_set_label(over); return true; @@ -1344,7 +1293,6 @@ do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn, gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), src1, MAXSZ(s), MAXSZ(s)); - tcg_temp_free_i64(src1); mark_vs_dirty(s); return true; } @@ -1479,9 +1427,6 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, fn(dest, mask, src1, src2, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(src2); mark_vs_dirty(s); gen_set_label(over); return true; @@ -1850,7 +1795,6 @@ do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn, gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), src1, MAXSZ(s), MAXSZ(s)); - tcg_temp_free_i32(src1); mark_vs_dirty(s); return true; } @@ -2027,8 +1971,7 @@ static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a) * are not included for EEW=64 in Zve64*. (Section 18.2) */ return opivv_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) @@ -2041,8 +1984,7 @@ static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) * are not included for EEW=64 in Zve64*. (Section 18.2) */ return opivx_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } GEN_OPIVV_GVEC_TRANS(vmul_vv, mul) @@ -2145,7 +2087,6 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) tcg_gen_ext_tl_i64(s1_i64, s1); tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), s1_i64); - tcg_temp_free_i64(s1_i64); } else { tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), s1); @@ -2166,9 +2107,6 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1_i64, cpu_env, desc); - - tcg_temp_free_ptr(dest); - tcg_temp_free_i64(s1_i64); } mark_vs_dirty(s); @@ -2210,7 +2148,6 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a) tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1, cpu_env, desc); - tcg_temp_free_ptr(dest); mark_vs_dirty(s); gen_set_label(over); } @@ -2259,8 +2196,7 @@ static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a) * for EEW=64 in Zve64*. (Section 18.2) */ return opivv_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) @@ -2271,8 +2207,7 @@ static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) * for EEW=64 in Zve64*. (Section 18.2) */ return opivx_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check) @@ -2335,9 +2270,7 @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) && - require_zve32f(s) && - require_zve64f(s); + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); } /* OPFVV without GVEC IR */ @@ -2407,10 +2340,6 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, fn(dest, mask, t1, src2, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(src2); - tcg_temp_free_i64(t1); mark_vs_dirty(s); gen_set_label(over); return true; @@ -2425,9 +2354,7 @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_ss(s, a->rd, a->rs2, a->vm) && - require_zve32f(s) && - require_zve64f(s); + vext_check_ss(s, a->rd, a->rs2, a->vm); } /* OPFVF without GVEC IR */ @@ -2465,9 +2392,7 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); } /* OPFVV with WIDEN */ @@ -2510,9 +2435,7 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_ds(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_ds(s, a->rd, a->rs2, a->vm); } /* OPFVF with WIDEN */ @@ -2544,9 +2467,7 @@ static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm); } /* WIDEN OPFVV with WIDEN */ @@ -2589,9 +2510,7 @@ static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dd(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_dd(s, a->rd, a->rs2, a->vm); } /* WIDEN OPFVF with WIDEN */ @@ -2668,9 +2587,7 @@ static bool opfv_check(DisasContext *s, arg_rmr *a) require_rvf(s) && vext_check_isa_ill(s) && /* OPFV instructions ignore vs1 check */ - vext_check_ss(s, a->rd, a->rs2, a->vm) && - require_zve32f(s) && - require_zve64f(s); + vext_check_ss(s, a->rd, a->rs2, a->vm); } static bool do_opfv(DisasContext *s, arg_rmr *a, @@ -2735,9 +2652,7 @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_mss(s, a->rd, a->rs1, a->rs2) && - require_zve32f(s) && - require_zve64f(s); + vext_check_mss(s, a->rd, a->rs1, a->rs2); } GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check) @@ -2750,9 +2665,7 @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_ms(s, a->rd, a->rs2) && - require_zve32f(s) && - require_zve64f(s); + vext_check_ms(s, a->rd, a->rs2); } GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check) @@ -2773,9 +2686,7 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) if (require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - require_align(a->rd, s->lmul) && - require_zve32f(s) && - require_zve64f(s)) { + require_align(a->rd, s->lmul)) { gen_set_rm(s, RISCV_FRM_DYN); TCGv_i64 t1; @@ -2814,11 +2725,9 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) fns[s->sew - 1](dest, t1, cpu_env, desc); - tcg_temp_free_ptr(dest); mark_vs_dirty(s); gen_set_label(over); } - tcg_temp_free_i64(t1); return true; } return false; @@ -2860,18 +2769,14 @@ static bool opfv_widen_check(DisasContext *s, arg_rmr *a) static bool opxfv_widen_check(DisasContext *s, arg_rmr *a) { return opfv_widen_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } static bool opffv_widen_check(DisasContext *s, arg_rmr *a) { return opfv_widen_check(s, a) && - require_scale_rvf(s) && - (s->sew != MO_8) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + require_scale_rvfmin(s) && + (s->sew != MO_8); } #define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \ @@ -2922,9 +2827,7 @@ static bool opfxv_widen_check(DisasContext *s, arg_rmr *a) require_scale_rvf(s) && vext_check_isa_ill(s) && /* OPFV widening instructions ignore vs1 check */ - vext_check_ds(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_ds(s, a->rd, a->rs2, a->vm); } #define GEN_OPFXV_WIDEN_TRANS(NAME) \ @@ -2979,18 +2882,21 @@ static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a) { return opfv_narrow_check(s, a) && require_rvf(s) && - (s->sew != MO_64) && - require_zve32f(s) && - require_zve64f(s); + (s->sew != MO_64); } static bool opffv_narrow_check(DisasContext *s, arg_rmr *a) +{ + return opfv_narrow_check(s, a) && + require_scale_rvfmin(s) && + (s->sew != MO_8); +} + +static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a) { return opfv_narrow_check(s, a) && require_scale_rvf(s) && - (s->sew != MO_8) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + (s->sew != MO_8); } #define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \ @@ -3030,7 +2936,7 @@ GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w, GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w, RISCV_FRM_DYN) /* Reuse the helper function from vfncvt.f.f.w */ -GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_narrow_check, vfncvt_f_f_w, +GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_rod_narrow_check, vfncvt_f_f_w, RISCV_FRM_ROD) static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a) @@ -3039,9 +2945,7 @@ static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a) require_scale_rvf(s) && vext_check_isa_ill(s) && /* OPFV narrowing instructions ignore vs1 check */ - vext_check_sd(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_sd(s, a->rd, a->rs2, a->vm); } #define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \ @@ -3115,9 +3019,7 @@ GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check) static bool freduction_check(DisasContext *s, arg_rmrr *a) { return reduction_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } GEN_OPFVV_TRANS(vfredusum_vs, freduction_check) @@ -3181,7 +3083,7 @@ static bool trans_vcpop_m(DisasContext *s, arg_rmr *a) { if (require_rvv(s) && vext_check_isa_ill(s) && - s->vstart == 0) { + s->vstart_eq_zero) { TCGv_ptr src2, mask; TCGv dst; TCGv_i32 desc; @@ -3200,10 +3102,6 @@ static bool trans_vcpop_m(DisasContext *s, arg_rmr *a) gen_helper_vcpop_m(dst, mask, src2, cpu_env, desc); gen_set_gpr(s, a->rd, dst); - - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(src2); - return true; } return false; @@ -3214,7 +3112,7 @@ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a) { if (require_rvv(s) && vext_check_isa_ill(s) && - s->vstart == 0) { + s->vstart_eq_zero) { TCGv_ptr src2, mask; TCGv dst; TCGv_i32 desc; @@ -3233,17 +3131,16 @@ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a) gen_helper_vfirst_m(dst, mask, src2, cpu_env, desc); gen_set_gpr(s, a->rd, dst); - - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(src2); return true; } return false; } -/* vmsbf.m set-before-first mask bit */ -/* vmsif.m set-includ-first mask bit */ -/* vmsof.m set-only-first mask bit */ +/* + * vmsbf.m set-before-first mask bit + * vmsif.m set-including-first mask bit + * vmsof.m set-only-first mask bit + */ #define GEN_M_TRANS(NAME) \ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ { \ @@ -3251,7 +3148,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ vext_check_isa_ill(s) && \ require_vm(a->vm, a->rd) && \ (a->rd != a->rs2) && \ - (s->vstart == 0)) { \ + s->vstart_eq_zero) { \ uint32_t data = 0; \ gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \ TCGLabel *over = gen_new_label(); \ @@ -3292,7 +3189,7 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a) !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) && require_vm(a->vm, a->rd) && require_align(a->rd, s->lmul) && - (s->vstart == 0)) { + s->vstart_eq_zero) { uint32_t data = 0; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); @@ -3430,8 +3327,6 @@ static void vec_element_loadx(DisasContext *s, TCGv_i64 dest, /* Perform the load. */ load_element(dest, base, vreg_ofs(s, vreg), s->sew, false); - tcg_temp_free_ptr(base); - tcg_temp_free_i32(ofs); /* Flush out-of-range indexing to zero. */ t_vlmax = tcg_constant_i64(vlmax); @@ -3440,8 +3335,6 @@ static void vec_element_loadx(DisasContext *s, TCGv_i64 dest, tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx, t_vlmax, dest, t_zero); - - tcg_temp_free_i64(t_idx); } static void vec_element_loadi(DisasContext *s, TCGv_i64 dest, @@ -3501,9 +3394,6 @@ static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a) vec_element_loadi(s, t1, a->rs2, 0, true); tcg_gen_trunc_i64_tl(dest, t1); gen_set_gpr(s, a->rd, dest); - tcg_temp_free_i64(t1); - tcg_temp_free(dest); - return true; } return false; @@ -3531,7 +3421,6 @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a) s1 = get_gpr(s, a->rs1, EXT_NONE); tcg_gen_ext_tl_i64(t1, s1); vec_element_storei(s, a->rd, 0, t1); - tcg_temp_free_i64(t1); mark_vs_dirty(s); gen_set_label(over); return true; @@ -3544,9 +3433,7 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) { if (require_rvv(s) && require_rvf(s) && - vext_check_isa_ill(s) && - require_zve32f(s) && - require_zve64f(s)) { + vext_check_isa_ill(s)) { gen_set_rm(s, RISCV_FRM_DYN); unsigned int ofs = (8 << s->sew); @@ -3572,9 +3459,7 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) { if (require_rvv(s) && require_rvf(s) && - vext_check_isa_ill(s) && - require_zve32f(s) && - require_zve64f(s)) { + vext_check_isa_ill(s)) { gen_set_rm(s, RISCV_FRM_DYN); /* The instructions ignore LMUL and vector register group. */ @@ -3590,7 +3475,6 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) do_nanbox(s, t1, cpu_fpr[a->rs1]); vec_element_storei(s, a->rd, 0, t1); - tcg_temp_free_i64(t1); mark_vs_dirty(s); gen_set_label(over); return true; @@ -3625,17 +3509,13 @@ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) static bool fslideup_check(DisasContext *s, arg_rmrr *a) { return slideup_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } static bool fslidedown_check(DisasContext *s, arg_rmrr *a) { return slidedown_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check) @@ -3703,7 +3583,6 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a) tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), dest); - tcg_temp_free_i64(dest); mark_vs_dirty(s); } else { static gen_helper_opivx * const fns[4] = { @@ -3759,7 +3638,7 @@ static bool vcompress_vm_check(DisasContext *s, arg_r *a) require_align(a->rs2, s->lmul) && (a->rd != a->rs2) && !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) && - (s->vstart == 0); + s->vstart_eq_zero; } static bool trans_vcompress_vm(DisasContext *s, arg_r *a) @@ -3798,7 +3677,7 @@ static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ QEMU_IS_ALIGNED(a->rd, LEN) && \ QEMU_IS_ALIGNED(a->rs2, LEN)) { \ uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \ - if (s->vstart == 0) { \ + if (s->vstart_eq_zero) { \ /* EEW = 8 */ \ tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \ vreg_ofs(s, a->rs2), maxsz, maxsz); \ diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc new file mode 100644 index 0000000000..a727169a4b --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzce.c.inc @@ -0,0 +1,311 @@ +/* + * RISC-V translation routines for the Zc[b,mp,mt] Standard Extensions. + * + * Copyright (c) 2021-2022 PLCT Lab + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZCB(ctx) do { \ + if (!ctx->cfg_ptr->ext_zcb) \ + return false; \ +} while (0) + +#define REQUIRE_ZCMP(ctx) do { \ + if (!ctx->cfg_ptr->ext_zcmp) \ + return false; \ +} while (0) + +#define REQUIRE_ZCMT(ctx) do { \ + if (!ctx->cfg_ptr->ext_zcmt) \ + return false; \ +} while (0) + +static bool trans_c_zext_b(DisasContext *ctx, arg_c_zext_b *a) +{ + REQUIRE_ZCB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext8u_tl); +} + +static bool trans_c_zext_h(DisasContext *ctx, arg_c_zext_h *a) +{ + REQUIRE_ZCB(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16u_tl); +} + +static bool trans_c_sext_b(DisasContext *ctx, arg_c_sext_b *a) +{ + REQUIRE_ZCB(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext8s_tl); +} + +static bool trans_c_sext_h(DisasContext *ctx, arg_c_sext_h *a) +{ + REQUIRE_ZCB(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16s_tl); +} + +static bool trans_c_zext_w(DisasContext *ctx, arg_c_zext_w *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZCB(ctx); + REQUIRE_ZBA(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext32u_tl); +} + +static bool trans_c_not(DisasContext *ctx, arg_c_not *a) +{ + REQUIRE_ZCB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_not_tl); +} + +static bool trans_c_mul(DisasContext *ctx, arg_c_mul *a) +{ + REQUIRE_ZCB(ctx); + REQUIRE_M_OR_ZMMUL(ctx); + return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl, NULL); +} + +static bool trans_c_lbu(DisasContext *ctx, arg_c_lbu *a) +{ + REQUIRE_ZCB(ctx); + return gen_load(ctx, a, MO_UB); +} + +static bool trans_c_lhu(DisasContext *ctx, arg_c_lhu *a) +{ + REQUIRE_ZCB(ctx); + return gen_load(ctx, a, MO_UW); +} + +static bool trans_c_lh(DisasContext *ctx, arg_c_lh *a) +{ + REQUIRE_ZCB(ctx); + return gen_load(ctx, a, MO_SW); +} + +static bool trans_c_sb(DisasContext *ctx, arg_c_sb *a) +{ + REQUIRE_ZCB(ctx); + return gen_store(ctx, a, MO_UB); +} + +static bool trans_c_sh(DisasContext *ctx, arg_c_sh *a) +{ + REQUIRE_ZCB(ctx); + return gen_store(ctx, a, MO_UW); +} + +#define X_S0 8 +#define X_S1 9 +#define X_Sn 16 + +static uint32_t decode_push_pop_list(DisasContext *ctx, target_ulong rlist) +{ + uint32_t reg_bitmap = 0; + + if (has_ext(ctx, RVE) && rlist > 6) { + return 0; + } + + switch (rlist) { + case 15: + reg_bitmap |= 1 << (X_Sn + 11) ; + reg_bitmap |= 1 << (X_Sn + 10) ; + /* FALL THROUGH */ + case 14: + reg_bitmap |= 1 << (X_Sn + 9) ; + /* FALL THROUGH */ + case 13: + reg_bitmap |= 1 << (X_Sn + 8) ; + /* FALL THROUGH */ + case 12: + reg_bitmap |= 1 << (X_Sn + 7) ; + /* FALL THROUGH */ + case 11: + reg_bitmap |= 1 << (X_Sn + 6) ; + /* FALL THROUGH */ + case 10: + reg_bitmap |= 1 << (X_Sn + 5) ; + /* FALL THROUGH */ + case 9: + reg_bitmap |= 1 << (X_Sn + 4) ; + /* FALL THROUGH */ + case 8: + reg_bitmap |= 1 << (X_Sn + 3) ; + /* FALL THROUGH */ + case 7: + reg_bitmap |= 1 << (X_Sn + 2) ; + /* FALL THROUGH */ + case 6: + reg_bitmap |= 1 << X_S1 ; + /* FALL THROUGH */ + case 5: + reg_bitmap |= 1 << X_S0; + /* FALL THROUGH */ + case 4: + reg_bitmap |= 1 << xRA; + break; + default: + break; + } + + return reg_bitmap; +} + +static bool gen_pop(DisasContext *ctx, arg_cmpp *a, bool ret, bool ret_val) +{ + REQUIRE_ZCMP(ctx); + + uint32_t reg_bitmap = decode_push_pop_list(ctx, a->urlist); + if (reg_bitmap == 0) { + return false; + } + + MemOp memop = get_ol(ctx) == MXL_RV32 ? MO_TEUL : MO_TEUQ; + int reg_size = memop_size(memop); + target_ulong stack_adj = ROUND_UP(ctpop32(reg_bitmap) * reg_size, 16) + + a->spimm; + TCGv sp = dest_gpr(ctx, xSP); + TCGv addr = tcg_temp_new(); + int i; + + tcg_gen_addi_tl(addr, sp, stack_adj - reg_size); + + for (i = X_Sn + 11; i >= 0; i--) { + if (reg_bitmap & (1 << i)) { + TCGv dest = dest_gpr(ctx, i); + tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop); + gen_set_gpr(ctx, i, dest); + tcg_gen_subi_tl(addr, addr, reg_size); + } + } + + tcg_gen_addi_tl(sp, sp, stack_adj); + gen_set_gpr(ctx, xSP, sp); + + if (ret_val) { + gen_set_gpr(ctx, xA0, ctx->zero); + } + + if (ret) { + TCGv ret_addr = get_gpr(ctx, xRA, EXT_NONE); + gen_set_pc(ctx, ret_addr); + tcg_gen_lookup_and_goto_ptr(); + ctx->base.is_jmp = DISAS_NORETURN; + } + + return true; +} + +static bool trans_cm_push(DisasContext *ctx, arg_cm_push *a) +{ + REQUIRE_ZCMP(ctx); + + uint32_t reg_bitmap = decode_push_pop_list(ctx, a->urlist); + if (reg_bitmap == 0) { + return false; + } + + MemOp memop = get_ol(ctx) == MXL_RV32 ? MO_TEUL : MO_TEUQ; + int reg_size = memop_size(memop); + target_ulong stack_adj = ROUND_UP(ctpop32(reg_bitmap) * reg_size, 16) + + a->spimm; + TCGv sp = dest_gpr(ctx, xSP); + TCGv addr = tcg_temp_new(); + int i; + + tcg_gen_subi_tl(addr, sp, reg_size); + + for (i = X_Sn + 11; i >= 0; i--) { + if (reg_bitmap & (1 << i)) { + TCGv val = get_gpr(ctx, i, EXT_NONE); + tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, memop); + tcg_gen_subi_tl(addr, addr, reg_size); + } + } + + tcg_gen_subi_tl(sp, sp, stack_adj); + gen_set_gpr(ctx, xSP, sp); + + return true; +} + +static bool trans_cm_pop(DisasContext *ctx, arg_cm_pop *a) +{ + return gen_pop(ctx, a, false, false); +} + +static bool trans_cm_popret(DisasContext *ctx, arg_cm_popret *a) +{ + return gen_pop(ctx, a, true, false); +} + +static bool trans_cm_popretz(DisasContext *ctx, arg_cm_popret *a) +{ + return gen_pop(ctx, a, true, true); +} + +static bool trans_cm_mva01s(DisasContext *ctx, arg_cm_mva01s *a) +{ + REQUIRE_ZCMP(ctx); + + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + gen_set_gpr(ctx, xA0, src1); + gen_set_gpr(ctx, xA1, src2); + + return true; +} + +static bool trans_cm_mvsa01(DisasContext *ctx, arg_cm_mvsa01 *a) +{ + REQUIRE_ZCMP(ctx); + + if (a->rs1 == a->rs2) { + return false; + } + + TCGv a0 = get_gpr(ctx, xA0, EXT_NONE); + TCGv a1 = get_gpr(ctx, xA1, EXT_NONE); + + gen_set_gpr(ctx, a->rs1, a0); + gen_set_gpr(ctx, a->rs2, a1); + + return true; +} + +static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a) +{ + REQUIRE_ZCMT(ctx); + + /* + * Update pc to current for the non-unwinding exception + * that might come from cpu_ld*_code() in the helper. + */ + tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); + gen_helper_cm_jalt(cpu_pc, cpu_env, tcg_constant_i32(a->index)); + + /* c.jt vs c.jalt depends on the index. */ + if (a->index >= 32) { + gen_set_gpri(ctx, xRA, ctx->pc_succ_insn); + } + + tcg_gen_lookup_and_goto_ptr(); + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc index 2ad5716312..74dde37ff7 100644 --- a/target/riscv/insn_trans/trans_rvzfh.c.inc +++ b/target/riscv/insn_trans/trans_rvzfh.c.inc @@ -28,15 +28,14 @@ } \ } while (0) -#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \ - if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \ +#define REQUIRE_ZFHMIN(ctx) do { \ + if (!ctx->cfg_ptr->ext_zfhmin) { \ return false; \ } \ } while (0) -#define REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx) do { \ - if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin || \ - ctx->cfg_ptr->ext_zhinx || ctx->cfg_ptr->ext_zhinxmin)) { \ +#define REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx) do { \ + if (!(ctx->cfg_ptr->ext_zfhmin || ctx->cfg_ptr->ext_zhinxmin)) { \ return false; \ } \ } while (0) @@ -47,12 +46,12 @@ static bool trans_flh(DisasContext *ctx, arg_flh *a) TCGv t0; REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); decode_save_opc(ctx); t0 = get_gpr(ctx, a->rs1, EXT_NONE); if (a->imm) { - TCGv temp = temp_new(ctx); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, t0, a->imm); t0 = temp; } @@ -70,7 +69,7 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a) TCGv t0; REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); decode_save_opc(ctx); t0 = get_gpr(ctx, a->rs1, EXT_NONE); @@ -257,9 +256,6 @@ static bool trans_fsgnj_h(DisasContext *ctx, arg_fsgnj_h *a) /* This formulation retains the nanboxing of rs2 in normal 'Zfh'. */ tcg_gen_deposit_i64(dest, rs2, rs1, 0, 15); - - tcg_temp_free_i64(rs1); - tcg_temp_free_i64(rs2); } else { tcg_gen_deposit_i64(dest, src2, src1, 0, 15); tcg_gen_ext16s_i64(dest, dest); @@ -303,20 +299,16 @@ static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a) * Replace bit 15 in rs1 with inverse in rs2. * This formulation retains the nanboxing of rs1. */ - mask = tcg_const_i64(~MAKE_64BIT_MASK(15, 1)); + mask = tcg_constant_i64(~MAKE_64BIT_MASK(15, 1)); tcg_gen_not_i64(rs2, rs2); tcg_gen_andc_i64(rs2, rs2, mask); tcg_gen_and_i64(dest, mask, rs1); tcg_gen_or_i64(dest, dest, rs2); - - tcg_temp_free_i64(mask); - tcg_temp_free_i64(rs2); } /* signed-extended intead of nanboxing for result if enable zfinx */ if (ctx->cfg_ptr->ext_zfinx) { tcg_gen_ext16s_i64(dest, dest); } - tcg_temp_free_i64(rs1); mark_fs_dirty(ctx); return true; } @@ -356,14 +348,11 @@ static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a) */ tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(15, 1)); tcg_gen_xor_i64(dest, rs1, dest); - - tcg_temp_free_i64(rs2); } /* signed-extended intead of nanboxing for result if enable zfinx */ if (ctx->cfg_ptr->ext_zfinx) { tcg_gen_ext16s_i64(dest, dest); } - tcg_temp_free_i64(rs1); mark_fs_dirty(ctx); return true; } @@ -401,7 +390,7 @@ static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a) static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); @@ -418,7 +407,7 @@ static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a) static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); REQUIRE_ZDINX_OR_D(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); @@ -436,7 +425,7 @@ static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a) static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); @@ -452,7 +441,7 @@ static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a) static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); REQUIRE_ZDINX_OR_D(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); @@ -585,7 +574,7 @@ static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a) static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); TCGv dest = dest_gpr(ctx, a->rd); @@ -605,7 +594,7 @@ static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a) static bool trans_fmv_h_x(DisasContext *ctx, arg_fmv_h_x *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); TCGv t0 = get_gpr(ctx, a->rs1, EXT_ZERO); diff --git a/target/riscv/insn_trans/trans_rvzicbo.c.inc b/target/riscv/insn_trans/trans_rvzicbo.c.inc new file mode 100644 index 0000000000..7df9c30b58 --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzicbo.c.inc @@ -0,0 +1,57 @@ +/* + * RISC-V translation routines for the RISC-V CBO Extension. + * + * Copyright (c) 2021 Philipp Tomsich, philipp.tomsich@vrull.eu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZICBOM(ctx) do { \ + if (!ctx->cfg_ptr->ext_icbom) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZICBOZ(ctx) do { \ + if (!ctx->cfg_ptr->ext_icboz) { \ + return false; \ + } \ +} while (0) + +static bool trans_cbo_clean(DisasContext *ctx, arg_cbo_clean *a) +{ + REQUIRE_ZICBOM(ctx); + gen_helper_cbo_clean_flush(cpu_env, cpu_gpr[a->rs1]); + return true; +} + +static bool trans_cbo_flush(DisasContext *ctx, arg_cbo_flush *a) +{ + REQUIRE_ZICBOM(ctx); + gen_helper_cbo_clean_flush(cpu_env, cpu_gpr[a->rs1]); + return true; +} + +static bool trans_cbo_inval(DisasContext *ctx, arg_cbo_inval *a) +{ + REQUIRE_ZICBOM(ctx); + gen_helper_cbo_inval(cpu_env, cpu_gpr[a->rs1]); + return true; +} + +static bool trans_cbo_zero(DisasContext *ctx, arg_cbo_zero *a) +{ + REQUIRE_ZICBOZ(ctx); + gen_helper_cbo_zero(cpu_env, cpu_gpr[a->rs1]); + return true; +} diff --git a/target/riscv/insn_trans/trans_rvzicond.c.inc b/target/riscv/insn_trans/trans_rvzicond.c.inc new file mode 100644 index 0000000000..c8e43fa325 --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzicond.c.inc @@ -0,0 +1,55 @@ +/* + * RISC-V translation routines for the Zicond Standard Extension. + * + * Copyright (c) 2020-2023 PLCT Lab + * Copyright (c) 2022 VRULL GmbH. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZICOND(ctx) do { \ + if (!ctx->cfg_ptr->ext_zicond) { \ + return false; \ + } \ +} while (0) + +/* Emits "$rd = ($rs2 $zero) ? $zero : $rs1" */ +static void gen_czero(TCGv dest, TCGv src1, TCGv src2, TCGCond cond) +{ + TCGv zero = tcg_constant_tl(0); + tcg_gen_movcond_tl(cond, dest, src2, zero, zero, src1); +} + +static void gen_czero_eqz(TCGv dest, TCGv src1, TCGv src2) +{ + gen_czero(dest, src1, src2, TCG_COND_EQ); +} + +static void gen_czero_nez(TCGv dest, TCGv src1, TCGv src2) +{ + gen_czero(dest, src1, src2, TCG_COND_NE); +} + +static bool trans_czero_eqz(DisasContext *ctx, arg_r *a) +{ + REQUIRE_ZICOND(ctx); + + return gen_logic(ctx, a, gen_czero_eqz); +} + +static bool trans_czero_nez(DisasContext *ctx, arg_r *a) +{ + REQUIRE_ZICOND(ctx); + + return gen_logic(ctx, a, gen_czero_nez); +} diff --git a/target/riscv/insn_trans/trans_xthead.c.inc b/target/riscv/insn_trans/trans_xthead.c.inc index be87c34f56..3e13b1d74d 100644 --- a/target/riscv/insn_trans/trans_xthead.c.inc +++ b/target/riscv/insn_trans/trans_xthead.c.inc @@ -100,10 +100,7 @@ static TCGv get_th_address_indexed(DisasContext *ctx, int rs1, int rs2, tcg_gen_shli_tl(offs, src2, imm2); } - TCGv addr = get_address_indexed(ctx, rs1, offs); - - tcg_temp_free(offs); - return addr; + return get_address_indexed(ctx, rs1, offs); } /* XTheadBa */ @@ -120,7 +117,6 @@ static void gen_th_addsl##SHAMT(TCGv ret, TCGv arg1, TCGv arg2) \ TCGv t = tcg_temp_new(); \ tcg_gen_shli_tl(t, arg2, SHAMT); \ tcg_gen_add_tl(ret, t, arg1); \ - tcg_temp_free(t); \ } GEN_TH_ADDSL(1) @@ -204,7 +200,6 @@ static bool gen_th_ff0(DisasContext *ctx, arg_th_ff0 *a, DisasExtend ext) gen_clz(dest, t); } - tcg_temp_free(t); gen_set_gpr(ctx, a->rd, dest); return true; @@ -268,25 +263,13 @@ static bool trans_th_tst(DisasContext *ctx, arg_th_tst *a) /* XTheadCmo */ -static inline int priv_level(DisasContext *ctx) -{ -#ifdef CONFIG_USER_ONLY - return PRV_U; -#else - /* Priv level is part of mem_idx. */ - return ctx->mem_idx & TB_FLAGS_PRIV_MMU_MASK; -#endif -} - /* Test if priv level is M, S, or U (cannot fail). */ #define REQUIRE_PRIV_MSU(ctx) /* Test if priv level is M or S. */ #define REQUIRE_PRIV_MS(ctx) \ do { \ - int priv = priv_level(ctx); \ - if (!(priv == PRV_M || \ - priv == PRV_S)) { \ + if (ctx->priv == PRV_U) { \ return false; \ } \ } while (0) @@ -469,7 +452,6 @@ static bool trans_th_fmv_hw_x(DisasContext *ctx, arg_th_fmv_hw_x *a) tcg_gen_extu_tl_i64(t1, src1); tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd], t1, 32, 32); - tcg_temp_free_i64(t1); mark_fs_dirty(ctx); return true; } @@ -489,7 +471,6 @@ static bool trans_th_fmv_x_hw(DisasContext *ctx, arg_th_fmv_x_hw *a) tcg_gen_extract_i64(t1, cpu_fpr[a->rs1], 32, 32); tcg_gen_trunc_i64_tl(dst, t1); gen_set_gpr(ctx, a->rd, dst); - tcg_temp_free_i64(t1); mark_fs_dirty(ctx); return true; } @@ -511,15 +492,12 @@ static bool gen_th_mac(DisasContext *ctx, arg_r *a, extend_operand_func(tmp, src1); extend_operand_func(tmp2, src2); tcg_gen_mul_tl(tmp, tmp, tmp2); - tcg_temp_free(tmp2); } else { tcg_gen_mul_tl(tmp, src1, src2); } accumulate_func(dest, src0, tmp); gen_set_gpr(ctx, a->rd, dest); - tcg_temp_free(tmp); - return true; } @@ -594,8 +572,6 @@ static bool gen_load_inc(DisasContext *ctx, arg_th_meminc *a, MemOp memop, tcg_gen_addi_tl(rs1, rs1, imm); gen_set_gpr(ctx, a->rd, rd); gen_set_gpr(ctx, a->rs1, rs1); - - tcg_temp_free(addr); return true; } @@ -615,8 +591,6 @@ static bool gen_store_inc(DisasContext *ctx, arg_th_meminc *a, MemOp memop, tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop); tcg_gen_addi_tl(rs1, rs1, imm); gen_set_gpr(ctx, a->rs1, rs1); - - tcg_temp_free(addr); return true; } @@ -950,11 +924,6 @@ static bool gen_loadpair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop, tcg_gen_qemu_ld_tl(t2, addr2, ctx->mem_idx, memop); gen_set_gpr(ctx, a->rd1, t1); gen_set_gpr(ctx, a->rd2, t2); - - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(addr1); - tcg_temp_free(addr2); return true; } @@ -980,10 +949,6 @@ static bool trans_th_lwud(DisasContext *ctx, arg_th_pair *a) static bool gen_storepair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop, int shamt) { - if (a->rs == a->rd1 || a->rs == a->rd2 || a->rd1 == a->rd2) { - return false; - } - TCGv data1 = get_gpr(ctx, a->rd1, EXT_NONE); TCGv data2 = get_gpr(ctx, a->rd2, EXT_NONE); TCGv addr1 = tcg_temp_new(); @@ -995,9 +960,6 @@ static bool gen_storepair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop, tcg_gen_qemu_st_tl(data1, addr1, ctx->mem_idx, memop); tcg_gen_qemu_st_tl(data2, addr2, ctx->mem_idx, memop); - - tcg_temp_free(addr1); - tcg_temp_free(addr2); return true; } diff --git a/target/riscv/insn_trans/trans_xventanacondops.c.inc b/target/riscv/insn_trans/trans_xventanacondops.c.inc index 16849e6d4e..38c15f2825 100644 --- a/target/riscv/insn_trans/trans_xventanacondops.c.inc +++ b/target/riscv/insn_trans/trans_xventanacondops.c.inc @@ -1,7 +1,7 @@ /* * RISC-V translation routines for the XVentanaCondOps extension. * - * Copyright (c) 2021-2022 VRULL GmbH. + * Copyright (c) 2021-2023 VRULL GmbH. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -16,24 +16,12 @@ * this program. If not, see . */ -static bool gen_vt_condmask(DisasContext *ctx, arg_r *a, TCGCond cond) -{ - TCGv dest = dest_gpr(ctx, a->rd); - TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); - TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); - - tcg_gen_movcond_tl(cond, dest, src2, ctx->zero, src1, ctx->zero); - - gen_set_gpr(ctx, a->rd, dest); - return true; -} - static bool trans_vt_maskc(DisasContext *ctx, arg_r *a) { - return gen_vt_condmask(ctx, a, TCG_COND_NE); + return gen_logic(ctx, a, gen_czero_eqz); } static bool trans_vt_maskcn(DisasContext *ctx, arg_r *a) { - return gen_vt_condmask(ctx, a, TCG_COND_EQ); + return gen_logic(ctx, a, gen_czero_nez); } diff --git a/target/riscv/internals.h b/target/riscv/internals.h index 5620fbffb6..b5f823c7ec 100644 --- a/target/riscv/internals.h +++ b/target/riscv/internals.h @@ -21,6 +21,41 @@ #include "hw/registerfields.h" +/* + * The current MMU Modes are: + * - U 0b000 + * - S 0b001 + * - S+SUM 0b010 + * - M 0b011 + * - U+2STAGE 0b100 + * - S+2STAGE 0b101 + * - S+SUM+2STAGE 0b110 + */ +#define MMUIdx_U 0 +#define MMUIdx_S 1 +#define MMUIdx_S_SUM 2 +#define MMUIdx_M 3 +#define MMU_2STAGE_BIT (1 << 2) + +static inline int mmuidx_priv(int mmu_idx) +{ + int ret = mmu_idx & 3; + if (ret == MMUIdx_S_SUM) { + ret = PRV_S; + } + return ret; +} + +static inline bool mmuidx_sum(int mmu_idx) +{ + return (mmu_idx & 3) == MMUIdx_S_SUM; +} + +static inline bool mmuidx_2stage(int mmu_idx) +{ + return mmu_idx & MMU_2STAGE_BIT; +} + /* share data between vector helpers and decode code */ FIELD(VDATA, VM, 0, 1) FIELD(VDATA, LMUL, 1, 3) diff --git a/target/riscv/m128_helper.c b/target/riscv/m128_helper.c index 7bf115b85e..e6a4f6120a 100644 --- a/target/riscv/m128_helper.c +++ b/target/riscv/m128_helper.c @@ -24,8 +24,8 @@ #include "exec/helper-proto.h" target_ulong HELPER(divu_i128)(CPURISCVState *env, - target_ulong ul, target_ulong uh, - target_ulong vl, target_ulong vh) + target_ulong ul, target_ulong uh, + target_ulong vl, target_ulong vh) { target_ulong ql, qh; Int128 q; @@ -44,8 +44,8 @@ target_ulong HELPER(divu_i128)(CPURISCVState *env, } target_ulong HELPER(remu_i128)(CPURISCVState *env, - target_ulong ul, target_ulong uh, - target_ulong vl, target_ulong vh) + target_ulong ul, target_ulong uh, + target_ulong vl, target_ulong vh) { target_ulong rl, rh; Int128 r; @@ -64,8 +64,8 @@ target_ulong HELPER(remu_i128)(CPURISCVState *env, } target_ulong HELPER(divs_i128)(CPURISCVState *env, - target_ulong ul, target_ulong uh, - target_ulong vl, target_ulong vh) + target_ulong ul, target_ulong uh, + target_ulong vl, target_ulong vh) { target_ulong qh, ql; Int128 q; @@ -89,8 +89,8 @@ target_ulong HELPER(divs_i128)(CPURISCVState *env, } target_ulong HELPER(rems_i128)(CPURISCVState *env, - target_ulong ul, target_ulong uh, - target_ulong vl, target_ulong vh) + target_ulong ul, target_ulong uh, + target_ulong vl, target_ulong vh) { target_ulong rh, rl; Int128 r; diff --git a/target/riscv/machine.c b/target/riscv/machine.c index c6ce318cce..3ce2970785 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -27,9 +27,8 @@ static bool pmp_needed(void *opaque) { RISCVCPU *cpu = opaque; - CPURISCVState *env = &cpu->env; - return riscv_feature(env, RISCV_FEATURE_PMP); + return cpu->cfg.pmp; } static int pmp_post_load(void *opaque, int version_id) @@ -137,15 +136,15 @@ static const VMStateDescription vmstate_vector = { .minimum_version_id = 2, .needed = vector_needed, .fields = (VMStateField[]) { - VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64), - VMSTATE_UINTTL(env.vxrm, RISCVCPU), - VMSTATE_UINTTL(env.vxsat, RISCVCPU), - VMSTATE_UINTTL(env.vl, RISCVCPU), - VMSTATE_UINTTL(env.vstart, RISCVCPU), - VMSTATE_UINTTL(env.vtype, RISCVCPU), - VMSTATE_BOOL(env.vill, RISCVCPU), - VMSTATE_END_OF_LIST() - } + VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64), + VMSTATE_UINTTL(env.vxrm, RISCVCPU), + VMSTATE_UINTTL(env.vxsat, RISCVCPU), + VMSTATE_UINTTL(env.vl, RISCVCPU), + VMSTATE_UINTTL(env.vstart, RISCVCPU), + VMSTATE_UINTTL(env.vtype, RISCVCPU), + VMSTATE_BOOL(env.vill, RISCVCPU), + VMSTATE_END_OF_LIST() + } }; static bool pointermasking_needed(void *opaque) @@ -226,9 +225,8 @@ static const VMStateDescription vmstate_kvmtimer = { static bool debug_needed(void *opaque) { RISCVCPU *cpu = opaque; - CPURISCVState *env = &cpu->env; - return riscv_feature(env, RISCV_FEATURE_DEBUG); + return cpu->cfg.debug; } static int debug_post_load(void *opaque, int version_id) @@ -331,10 +329,28 @@ static const VMStateDescription vmstate_pmu_ctr_state = { } }; +static bool jvt_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + + return cpu->cfg.ext_zcmt; +} + +static const VMStateDescription vmstate_jvt = { + .name = "cpu/jvt", + .version_id = 1, + .minimum_version_id = 1, + .needed = jvt_needed, + .fields = (VMStateField[]) { + VMSTATE_UINTTL(env.jvt, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", - .version_id = 6, - .minimum_version_id = 6, + .version_id = 8, + .minimum_version_id = 8, .post_load = riscv_cpu_post_load, .fields = (VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), @@ -353,9 +369,8 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINT32(env.misa_ext, RISCVCPU), VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU), VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU), - VMSTATE_UINT32(env.features, RISCVCPU), VMSTATE_UINTTL(env.priv, RISCVCPU), - VMSTATE_UINTTL(env.virt, RISCVCPU), + VMSTATE_BOOL(env.virt_enabled, RISCVCPU), VMSTATE_UINT64(env.resetvec, RISCVCPU), VMSTATE_UINTTL(env.mhartid, RISCVCPU), VMSTATE_UINT64(env.mstatus, RISCVCPU), @@ -398,6 +413,7 @@ const VMStateDescription vmstate_riscv_cpu = { &vmstate_envcfg, &vmstate_debug, &vmstate_smstateen, + &vmstate_jvt, NULL } }; diff --git a/target/riscv/meson.build b/target/riscv/meson.build index 5dee37a242..e1ff6d9b95 100644 --- a/target/riscv/meson.build +++ b/target/riscv/meson.build @@ -19,7 +19,8 @@ riscv_ss.add(files( 'bitmanip_helper.c', 'translate.c', 'm128_helper.c', - 'crypto_helper.c' + 'crypto_helper.c', + 'zce_helper.c' )) riscv_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'), if_false: files('kvm-stub.c')) @@ -31,7 +32,8 @@ riscv_softmmu_ss.add(files( 'monitor.c', 'machine.c', 'pmu.c', - 'time_helper.c' + 'time_helper.c', + 'riscv-qmp-cmds.c', )) target_arch += {'riscv': riscv_ss} diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c index 236f93b9f5..f36ddfa967 100644 --- a/target/riscv/monitor.c +++ b/target/riscv/monitor.c @@ -218,7 +218,7 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict) return; } - if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + if (!riscv_cpu_cfg(env)->mmu) { monitor_printf(mon, "S-mode MMU unavailable\n"); return; } diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 48f918b71b..f563dc3981 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -3,6 +3,7 @@ * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2017-2018 SiFive, Inc. + * Copyright (c) 2022 VRULL GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -19,6 +20,7 @@ #include "qemu/osdep.h" #include "cpu.h" +#include "internals.h" #include "qemu/main-loop.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" @@ -123,6 +125,140 @@ target_ulong helper_csrrw_i128(CPURISCVState *env, int csr, return int128_getlo(rv); } + +/* + * check_zicbo_envcfg + * + * Raise virtual exceptions and illegal instruction exceptions for + * Zicbo[mz] instructions based on the settings of [mhs]envcfg as + * specified in section 2.5.1 of the CMO specification. + */ +static void check_zicbo_envcfg(CPURISCVState *env, target_ulong envbits, + uintptr_t ra) +{ +#ifndef CONFIG_USER_ONLY + if ((env->priv < PRV_M) && !get_field(env->menvcfg, envbits)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); + } + + if (env->virt_enabled && + (((env->priv <= PRV_S) && !get_field(env->henvcfg, envbits)) || + ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)))) { + riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, ra); + } + + if ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); + } +#endif +} + +void helper_cbo_zero(CPURISCVState *env, target_ulong address) +{ + RISCVCPU *cpu = env_archcpu(env); + uint16_t cbozlen = cpu->cfg.cboz_blocksize; + int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); + void *mem; + + check_zicbo_envcfg(env, MENVCFG_CBZE, ra); + + /* Mask off low-bits to align-down to the cache-block. */ + address &= ~(cbozlen - 1); + + /* + * cbo.zero requires MMU_DATA_STORE access. Do a probe_write() + * to raise any exceptions, including PMP. + */ + mem = probe_write(env, address, cbozlen, mmu_idx, ra); + + if (likely(mem)) { + memset(mem, 0, cbozlen); + } else { + /* + * This means that we're dealing with an I/O page. Section 4.2 + * of cmobase v1.0.1 says: + * + * "Cache-block zero instructions store zeros independently + * of whether data from the underlying memory locations are + * cacheable." + * + * Write zeros in address + cbozlen regardless of not being + * a RAM page. + */ + for (int i = 0; i < cbozlen; i++) { + cpu_stb_mmuidx_ra(env, address + i, 0, mmu_idx, ra); + } + } +} + +/* + * check_zicbom_access + * + * Check access permissions (LOAD, STORE or FETCH as specified in + * section 2.5.2 of the CMO specification) for Zicbom, raising + * either store page-fault (non-virtualized) or store guest-page + * fault (virtualized). + */ +static void check_zicbom_access(CPURISCVState *env, + target_ulong address, + uintptr_t ra) +{ + RISCVCPU *cpu = env_archcpu(env); + int mmu_idx = cpu_mmu_index(env, false); + uint16_t cbomlen = cpu->cfg.cbom_blocksize; + void *phost; + int ret; + + /* Mask off low-bits to align-down to the cache-block. */ + address &= ~(cbomlen - 1); + + /* + * Section 2.5.2 of cmobase v1.0.1: + * + * "A cache-block management instruction is permitted to + * access the specified cache block whenever a load instruction + * or store instruction is permitted to access the corresponding + * physical addresses. If neither a load instruction nor store + * instruction is permitted to access the physical addresses, + * but an instruction fetch is permitted to access the physical + * addresses, whether a cache-block management instruction is + * permitted to access the cache block is UNSPECIFIED." + */ + ret = probe_access_flags(env, address, cbomlen, MMU_DATA_LOAD, + mmu_idx, true, &phost, ra); + if (ret != TLB_INVALID_MASK) { + /* Success: readable */ + return; + } + + /* + * Since not readable, must be writable. On failure, store + * fault/store guest amo fault will be raised by + * riscv_cpu_tlb_fill(). PMP exceptions will be caught + * there as well. + */ + probe_write(env, address, cbomlen, mmu_idx, ra); +} + +void helper_cbo_clean_flush(CPURISCVState *env, target_ulong address) +{ + uintptr_t ra = GETPC(); + check_zicbo_envcfg(env, MENVCFG_CBCFE, ra); + check_zicbom_access(env, address, ra); + + /* We don't emulate the cache-hierarchy, so we're done. */ +} + +void helper_cbo_inval(CPURISCVState *env, target_ulong address) +{ + uintptr_t ra = GETPC(); + check_zicbo_envcfg(env, MENVCFG_CBIE, ra); + check_zicbom_access(env, address, ra); + + /* We don't emulate the cache-hierarchy, so we're done. */ +} + #ifndef CONFIG_USER_ONLY target_ulong helper_sret(CPURISCVState *env) @@ -143,8 +279,7 @@ target_ulong helper_sret(CPURISCVState *env) riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } - if (riscv_has_ext(env, RVH) && riscv_cpu_virt_enabled(env) && - get_field(env->hstatus, HSTATUS_VTSR)) { + if (env->virt_enabled && get_field(env->hstatus, HSTATUS_VTSR)) { riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); } @@ -159,7 +294,7 @@ target_ulong helper_sret(CPURISCVState *env) } env->mstatus = mstatus; - if (riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) { + if (riscv_has_ext(env, RVH) && !env->virt_enabled) { /* We support Hypervisor extensions and virtulisation is disabled */ target_ulong hstatus = env->hstatus; @@ -195,7 +330,7 @@ target_ulong helper_mret(CPURISCVState *env) uint64_t mstatus = env->mstatus; target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); - if (riscv_feature(env, RISCV_FEATURE_PMP) && + if (riscv_cpu_cfg(env)->pmp && !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC()); } @@ -204,7 +339,8 @@ target_ulong helper_mret(CPURISCVState *env) mstatus = set_field(mstatus, MSTATUS_MIE, get_field(mstatus, MSTATUS_MPIE)); mstatus = set_field(mstatus, MSTATUS_MPIE, 1); - mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U); + mstatus = set_field(mstatus, MSTATUS_MPP, + riscv_has_ext(env, RVU) ? PRV_U : PRV_M); mstatus = set_field(mstatus, MSTATUS_MPV, 0); if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) { mstatus = set_field(mstatus, MSTATUS_MPRV, 0); @@ -231,10 +367,10 @@ void helper_wfi(CPURISCVState *env) bool prv_s = env->priv == PRV_S; if (((prv_s || (!rvs && prv_u)) && get_field(env->mstatus, MSTATUS_TW)) || - (rvs && prv_u && !riscv_cpu_virt_enabled(env))) { + (rvs && prv_u && !env->virt_enabled)) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); - } else if (riscv_cpu_virt_enabled(env) && (prv_u || - (prv_s && get_field(env->hstatus, HSTATUS_VTW)))) { + } else if (env->virt_enabled && + (prv_u || (prv_s && get_field(env->hstatus, HSTATUS_VTW)))) { riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); } else { cs->halted = 1; @@ -246,12 +382,12 @@ void helper_wfi(CPURISCVState *env) void helper_tlb_flush(CPURISCVState *env) { CPUState *cs = env_cpu(env); - if (!(env->priv >= PRV_S) || - (env->priv == PRV_S && - get_field(env->mstatus, MSTATUS_TVM))) { + if (!env->virt_enabled && + (env->priv == PRV_U || + (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)))) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); - } else if (riscv_has_ext(env, RVH) && riscv_cpu_virt_enabled(env) && - get_field(env->hstatus, HSTATUS_VTVM)) { + } else if (env->virt_enabled && + (env->priv == PRV_U || get_field(env->hstatus, HSTATUS_VTVM))) { riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); } else { tlb_flush(cs); @@ -268,12 +404,12 @@ void helper_hyp_tlb_flush(CPURISCVState *env) { CPUState *cs = env_cpu(env); - if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { + if (env->virt_enabled) { riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); } if (env->priv == PRV_M || - (env->priv == PRV_S && !riscv_cpu_virt_enabled(env))) { + (env->priv == PRV_S && !env->virt_enabled)) { tlb_flush(cs); return; } @@ -283,7 +419,7 @@ void helper_hyp_tlb_flush(CPURISCVState *env) void helper_hyp_gvma_tlb_flush(CPURISCVState *env) { - if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env) && + if (env->priv == PRV_S && !env->virt_enabled && get_field(env->mstatus, MSTATUS_TVM)) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } @@ -291,18 +427,118 @@ void helper_hyp_gvma_tlb_flush(CPURISCVState *env) helper_hyp_tlb_flush(env); } -target_ulong helper_hyp_hlvx_hu(CPURISCVState *env, target_ulong address) +static int check_access_hlsv(CPURISCVState *env, bool x, uintptr_t ra) { - int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK; + if (env->priv == PRV_M) { + /* always allowed */ + } else if (env->virt_enabled) { + riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, ra); + } else if (env->priv == PRV_U && !get_field(env->hstatus, HSTATUS_HU)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); + } - return cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC()); + int mode = get_field(env->hstatus, HSTATUS_SPVP); + if (!x && mode == PRV_S && get_field(env->vsstatus, MSTATUS_SUM)) { + mode = MMUIdx_S_SUM; + } + return mode | MMU_2STAGE_BIT; } -target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong address) +target_ulong helper_hyp_hlv_bu(CPURISCVState *env, target_ulong addr) { - int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK; + uintptr_t ra = GETPC(); + int mmu_idx = check_access_hlsv(env, false, ra); + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); - return cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC()); + return cpu_ldb_mmu(env, addr, oi, ra); +} + +target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr) +{ + uintptr_t ra = GETPC(); + int mmu_idx = check_access_hlsv(env, false, ra); + MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx); + + return cpu_ldw_mmu(env, addr, oi, ra); +} + +target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr) +{ + uintptr_t ra = GETPC(); + int mmu_idx = check_access_hlsv(env, false, ra); + MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx); + + return cpu_ldl_mmu(env, addr, oi, ra); +} + +target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr) +{ + uintptr_t ra = GETPC(); + int mmu_idx = check_access_hlsv(env, false, ra); + MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx); + + return cpu_ldq_mmu(env, addr, oi, ra); +} + +void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val) +{ + uintptr_t ra = GETPC(); + int mmu_idx = check_access_hlsv(env, false, ra); + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + + cpu_stb_mmu(env, addr, val, oi, ra); +} + +void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val) +{ + uintptr_t ra = GETPC(); + int mmu_idx = check_access_hlsv(env, false, ra); + MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx); + + cpu_stw_mmu(env, addr, val, oi, ra); +} + +void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val) +{ + uintptr_t ra = GETPC(); + int mmu_idx = check_access_hlsv(env, false, ra); + MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx); + + cpu_stl_mmu(env, addr, val, oi, ra); +} + +void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val) +{ + uintptr_t ra = GETPC(); + int mmu_idx = check_access_hlsv(env, false, ra); + MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx); + + cpu_stq_mmu(env, addr, val, oi, ra); +} + +/* + * TODO: These implementations are not quite correct. They perform the + * access using execute permission just fine, but the final PMP check + * is supposed to have read permission as well. Without replicating + * a fair fraction of cputlb.c, fixing this requires adding new mmu_idx + * which would imply that exact check in tlb_fill. + */ +target_ulong helper_hyp_hlvx_hu(CPURISCVState *env, target_ulong addr) +{ + uintptr_t ra = GETPC(); + int mmu_idx = check_access_hlsv(env, true, ra); + MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx); + + return cpu_ldw_code_mmu(env, addr, oi, GETPC()); +} + +target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong addr) +{ + uintptr_t ra = GETPC(); + int mmu_idx = check_access_hlsv(env, true, ra); + MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx); + + return cpu_ldl_code_mmu(env, addr, oi, ra); } #endif /* !CONFIG_USER_ONLY */ diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 4bc4113531..1f5aca42e8 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -27,7 +27,7 @@ #include "exec/exec-all.h" static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, - uint8_t val); + uint8_t val); static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index); @@ -88,7 +88,7 @@ static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) if (pmp_index < MAX_RISCV_PMPS) { bool locked = true; - if (riscv_feature(env, RISCV_FEATURE_EPMP)) { + if (riscv_cpu_cfg(env)->epmp) { /* mseccfg.RLB is set */ if (MSECCFG_RLB_ISSET(env)) { locked = false; @@ -129,18 +129,19 @@ static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) } } -static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea) +static void pmp_decode_napot(target_ulong a, target_ulong *sa, + target_ulong *ea) { /* - aaaa...aaa0 8-byte NAPOT range - aaaa...aa01 16-byte NAPOT range - aaaa...a011 32-byte NAPOT range - ... - aa01...1111 2^XLEN-byte NAPOT range - a011...1111 2^(XLEN+1)-byte NAPOT range - 0111...1111 2^(XLEN+2)-byte NAPOT range - 1111...1111 Reserved - */ + * aaaa...aaa0 8-byte NAPOT range + * aaaa...aa01 16-byte NAPOT range + * aaaa...a011 32-byte NAPOT range + * ... + * aa01...1111 2^XLEN-byte NAPOT range + * a011...1111 2^(XLEN+1)-byte NAPOT range + * 0111...1111 2^(XLEN+2)-byte NAPOT range + * 1111...1111 Reserved + */ a = (a << 2) | 0x3; *sa = a & (a + 1); *ea = a | (a + 1); @@ -205,7 +206,8 @@ void pmp_update_rule_nums(CPURISCVState *env) } } -/* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea' +/* + * Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea' * end address values. * This function is called relatively infrequently whereas the check that * an address is within a pmp rule is called often, so optimise that one @@ -216,12 +218,13 @@ static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index) pmp_update_rule_nums(env); } -static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr) +static int pmp_is_in_range(CPURISCVState *env, int pmp_index, + target_ulong addr) { int result = 0; - if ((addr >= env->pmp_state.addr[pmp_index].sa) - && (addr <= env->pmp_state.addr[pmp_index].ea)) { + if ((addr >= env->pmp_state.addr[pmp_index].sa) && + (addr <= env->pmp_state.addr[pmp_index].ea)) { result = 1; } else { result = 0; @@ -234,12 +237,13 @@ static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr) * Check if the address has required RWX privs when no PMP entry is matched. */ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr, - target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, - target_ulong mode) + target_ulong size, pmp_priv_t privs, + pmp_priv_t *allowed_privs, + target_ulong mode) { bool ret; - if (riscv_feature(env, RISCV_FEATURE_EPMP)) { + if (riscv_cpu_cfg(env)->epmp) { if (MSECCFG_MMWP_ISSET(env)) { /* * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set @@ -265,7 +269,7 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr, } } - if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) { + if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) { /* * Privileged spec v1.10 states if HW doesn't implement any PMP entry * or no PMP entry matches an M-Mode access, the access succeeds. @@ -297,8 +301,8 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr, * Return negtive value if no match */ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, - target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, - target_ulong mode) + target_ulong size, pmp_priv_t privs, + pmp_priv_t *allowed_privs, target_ulong mode) { int i = 0; int ret = -1; @@ -315,7 +319,7 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, } if (size == 0) { - if (riscv_feature(env, RISCV_FEATURE_MMU)) { + if (riscv_cpu_cfg(env)->mmu) { /* * If size is unknown (0), assume that all bytes * from addr to the end of the page will be accessed. @@ -328,8 +332,10 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, pmp_size = size; } - /* 1.10 draft priv spec states there is an implicit order - from low to high */ + /* + * 1.10 draft priv spec states there is an implicit order + * from low to high + */ for (i = 0; i < MAX_RISCV_PMPS; i++) { s = pmp_is_in_range(env, i, addr); e = pmp_is_in_range(env, i, addr + pmp_size - 1); @@ -466,7 +472,7 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, * Handle a write to a pmpcfg CSR */ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, - target_ulong val) + target_ulong val) { int i; uint8_t cfg_val; @@ -508,7 +514,7 @@ target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) * Handle a write to a pmpaddr CSR */ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, - target_ulong val) + target_ulong val) { trace_pmpaddr_csr_write(env->mhartid, addr_index, val); @@ -608,13 +614,13 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index, return TARGET_PAGE_SIZE; } else { /* - * At this point we have a tlb_size that is the smallest possible size - * That fits within a TARGET_PAGE_SIZE and the PMP region. - * - * If the size is less then TARGET_PAGE_SIZE we drop the size to 1. - * This means the result isn't cached in the TLB and is only used for - * a single translation. - */ + * At this point we have a tlb_size that is the smallest possible size + * That fits within a TARGET_PAGE_SIZE and the PMP region. + * + * If the size is less then TARGET_PAGE_SIZE we drop the size to 1. + * This means the result isn't cached in the TLB and is only used for + * a single translation. + */ return 1; } } diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index da32c61c85..b296ea1fc6 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -63,18 +63,19 @@ typedef struct { } pmp_table_t; void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, - target_ulong val); + target_ulong val); target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index); void mseccfg_csr_write(CPURISCVState *env, target_ulong val); target_ulong mseccfg_csr_read(CPURISCVState *env); void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, - target_ulong val); + target_ulong val); target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, - target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, - target_ulong mode); + target_ulong size, pmp_priv_t privs, + pmp_priv_t *allowed_privs, + target_ulong mode); target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index, target_ulong tlb_sa, target_ulong tlb_ea); void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index); diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c index b8e56d2b7b..db06b3882f 100644 --- a/target/riscv/pmu.c +++ b/target/riscv/pmu.c @@ -35,7 +35,7 @@ */ void riscv_pmu_generate_fdt_node(void *fdt, int num_ctrs, char *pmu_name) { - uint32_t fdt_event_ctr_map[20] = {}; + uint32_t fdt_event_ctr_map[15] = {}; uint32_t cmask; /* All the programmable counters can map to any event */ @@ -109,7 +109,7 @@ static int riscv_pmu_incr_ctr_rv32(RISCVCPU *cpu, uint32_t ctr_idx) CPURISCVState *env = &cpu->env; target_ulong max_val = UINT32_MAX; PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; - bool virt_on = riscv_cpu_virt_enabled(env); + bool virt_on = env->virt_enabled; /* Privilege mode filtering */ if ((env->priv == PRV_M && @@ -133,7 +133,7 @@ static int riscv_pmu_incr_ctr_rv32(RISCVCPU *cpu, uint32_t ctr_idx) /* Generate interrupt only if OF bit is clear */ if (!(env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_OF)) { env->mhpmeventh_val[ctr_idx] |= MHPMEVENTH_BIT_OF; - riscv_cpu_update_mip(cpu, MIP_LCOFIP, BOOL_TO_MASK(1)); + riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1)); } } else { counter->mhpmcounterh_val++; @@ -150,7 +150,7 @@ static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx) CPURISCVState *env = &cpu->env; PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; uint64_t max_val = UINT64_MAX; - bool virt_on = riscv_cpu_virt_enabled(env); + bool virt_on = env->virt_enabled; /* Privilege mode filtering */ if ((env->priv == PRV_M && @@ -172,7 +172,7 @@ static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx) /* Generate interrupt only if OF bit is clear */ if (!(env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_OF)) { env->mhpmevent_val[ctr_idx] |= MHPMEVENT_BIT_OF; - riscv_cpu_update_mip(cpu, MIP_LCOFIP, BOOL_TO_MASK(1)); + riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1)); } } else { counter->mhpmcounter_val++; @@ -223,7 +223,7 @@ bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env, return true; } - cpu = RISCV_CPU(env_cpu(env)); + cpu = env_archcpu(env); if (!cpu->pmu_event_ctr_map) { return false; } @@ -249,7 +249,7 @@ bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr) return true; } - cpu = RISCV_CPU(env_cpu(env)); + cpu = env_archcpu(env); if (!cpu->pmu_event_ctr_map) { return false; } @@ -289,7 +289,7 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value, uint32_t ctr_idx) { uint32_t event_idx; - RISCVCPU *cpu = RISCV_CPU(env_cpu(env)); + RISCVCPU *cpu = env_archcpu(env); if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->pmu_event_ctr_map) { return -1; @@ -371,7 +371,7 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu, /* Generate interrupt only if OF bit is clear */ if (!(*mhpmevent_val & of_bit_mask)) { *mhpmevent_val |= of_bit_mask; - riscv_cpu_update_mip(cpu, MIP_LCOFIP, BOOL_TO_MASK(1)); + riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1)); } } } @@ -390,7 +390,7 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx) { uint64_t overflow_delta, overflow_at; int64_t overflow_ns, overflow_left = 0; - RISCVCPU *cpu = RISCV_CPU(env_cpu(env)); + RISCVCPU *cpu = env_archcpu(env); PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf) { @@ -419,7 +419,8 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx) } else { return -1; } - overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns; + overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + overflow_ns; if (overflow_at > INT64_MAX) { overflow_left += overflow_at - INT64_MAX; diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c new file mode 100644 index 0000000000..5ecff1afb3 --- /dev/null +++ b/target/riscv/riscv-qmp-cmds.c @@ -0,0 +1,57 @@ +/* + * QEMU CPU QMP commands for RISC-V + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" + +#include "qapi/qapi-commands-machine-target.h" +#include "cpu-qom.h" + +static void riscv_cpu_add_definition(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CpuDefinitionInfoList **cpu_list = user_data; + CpuDefinitionInfo *info = g_malloc0(sizeof(*info)); + const char *typename = object_class_get_name(oc); + ObjectClass *dyn_class; + + info->name = g_strndup(typename, + strlen(typename) - strlen("-" TYPE_RISCV_CPU)); + info->q_typename = g_strdup(typename); + + dyn_class = object_class_dynamic_cast(oc, TYPE_RISCV_DYNAMIC_CPU); + info->q_static = dyn_class == NULL; + + QAPI_LIST_PREPEND(*cpu_list, info); +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + CpuDefinitionInfoList *cpu_list = NULL; + GSList *list = object_class_get_list(TYPE_RISCV_CPU, false); + + g_slist_foreach(list, riscv_cpu_add_definition, &cpu_list); + g_slist_free(list); + + return cpu_list; +} diff --git a/target/riscv/sbi_ecall_interface.h b/target/riscv/sbi_ecall_interface.h index 77574ed4cb..43899d08f6 100644 --- a/target/riscv/sbi_ecall_interface.h +++ b/target/riscv/sbi_ecall_interface.h @@ -28,7 +28,7 @@ #define SBI_EXT_RFENCE 0x52464E43 #define SBI_EXT_HSM 0x48534D -/* SBI function IDs for BASE extension*/ +/* SBI function IDs for BASE extension */ #define SBI_EXT_BASE_GET_SPEC_VERSION 0x0 #define SBI_EXT_BASE_GET_IMP_ID 0x1 #define SBI_EXT_BASE_GET_IMP_VERSION 0x2 @@ -37,13 +37,13 @@ #define SBI_EXT_BASE_GET_MARCHID 0x5 #define SBI_EXT_BASE_GET_MIMPID 0x6 -/* SBI function IDs for TIME extension*/ +/* SBI function IDs for TIME extension */ #define SBI_EXT_TIME_SET_TIMER 0x0 -/* SBI function IDs for IPI extension*/ +/* SBI function IDs for IPI extension */ #define SBI_EXT_IPI_SEND_IPI 0x0 -/* SBI function IDs for RFENCE extension*/ +/* SBI function IDs for RFENCE extension */ #define SBI_EXT_RFENCE_REMOTE_FENCE_I 0x0 #define SBI_EXT_RFENCE_REMOTE_SFENCE_VMA 0x1 #define SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID 0x2 diff --git a/target/riscv/time_helper.c b/target/riscv/time_helper.c index b654f91af9..8d245bed3a 100644 --- a/target/riscv/time_helper.c +++ b/target/riscv/time_helper.c @@ -27,25 +27,24 @@ static void riscv_vstimer_cb(void *opaque) RISCVCPU *cpu = opaque; CPURISCVState *env = &cpu->env; env->vstime_irq = 1; - riscv_cpu_update_mip(cpu, 0, BOOL_TO_MASK(1)); + riscv_cpu_update_mip(env, 0, BOOL_TO_MASK(1)); } static void riscv_stimer_cb(void *opaque) { RISCVCPU *cpu = opaque; - riscv_cpu_update_mip(cpu, MIP_STIP, BOOL_TO_MASK(1)); + riscv_cpu_update_mip(&cpu->env, MIP_STIP, BOOL_TO_MASK(1)); } /* * Called when timecmp is written to update the QEMU timer or immediately * trigger timer interrupt if mtimecmp <= current timer value. */ -void riscv_timer_write_timecmp(RISCVCPU *cpu, QEMUTimer *timer, +void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer, uint64_t timecmp, uint64_t delta, uint32_t timer_irq) { uint64_t diff, ns_diff, next; - CPURISCVState *env = &cpu->env; RISCVAclintMTimerState *mtimer = env->rdtime_fn_arg; uint32_t timebase_freq = mtimer->timebase_freq; uint64_t rtc_r = env->rdtime_fn(env->rdtime_fn_arg) + delta; @@ -57,9 +56,9 @@ void riscv_timer_write_timecmp(RISCVCPU *cpu, QEMUTimer *timer, */ if (timer_irq == MIP_VSTIP) { env->vstime_irq = 1; - riscv_cpu_update_mip(cpu, 0, BOOL_TO_MASK(1)); + riscv_cpu_update_mip(env, 0, BOOL_TO_MASK(1)); } else { - riscv_cpu_update_mip(cpu, MIP_STIP, BOOL_TO_MASK(1)); + riscv_cpu_update_mip(env, MIP_STIP, BOOL_TO_MASK(1)); } return; } @@ -67,9 +66,9 @@ void riscv_timer_write_timecmp(RISCVCPU *cpu, QEMUTimer *timer, /* Clear the [VS|S]TIP bit in mip */ if (timer_irq == MIP_VSTIP) { env->vstime_irq = 0; - riscv_cpu_update_mip(cpu, 0, BOOL_TO_MASK(0)); + riscv_cpu_update_mip(env, 0, BOOL_TO_MASK(0)); } else { - riscv_cpu_update_mip(cpu, timer_irq, BOOL_TO_MASK(0)); + riscv_cpu_update_mip(env, timer_irq, BOOL_TO_MASK(0)); } /* diff --git a/target/riscv/time_helper.h b/target/riscv/time_helper.h index 7b3cdcc350..cacd79b80c 100644 --- a/target/riscv/time_helper.h +++ b/target/riscv/time_helper.h @@ -22,7 +22,7 @@ #include "cpu.h" #include "qemu/timer.h" -void riscv_timer_write_timecmp(RISCVCPU *cpu, QEMUTimer *timer, +void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer, uint64_t timecmp, uint64_t delta, uint32_t timer_irq); void riscv_timer_init(RISCVCPU *cpu); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 772f9d7973..928da0d3f0 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -64,22 +64,22 @@ typedef struct DisasContext { RISCVMXL xl; uint32_t misa_ext; uint32_t opcode; - uint32_t mstatus_fs; - uint32_t mstatus_vs; - uint32_t mstatus_hs_fs; - uint32_t mstatus_hs_vs; + RISCVExtStatus mstatus_fs; + RISCVExtStatus mstatus_vs; uint32_t mem_idx; - /* Remember the rounding mode encoded in the previous fp instruction, - which we have already installed into env->fp_status. Or -1 for - no previous fp instruction. Note that we exit the TB when writing - to any system register, which includes CSR_FRM, so we do not have - to reset this known value. */ + uint32_t priv; + /* + * Remember the rounding mode encoded in the previous fp instruction, + * which we have already installed into env->fp_status. Or -1 for + * no previous fp instruction. Note that we exit the TB when writing + * to any system register, which includes CSR_FRM, so we do not have + * to reset this known value. + */ int frm; RISCVMXL ol; bool virt_inst_excp; bool virt_enabled; const RISCVCPUConfig *cfg_ptr; - bool hlsx; /* vector extension */ bool vill; /* @@ -99,16 +99,10 @@ typedef struct DisasContext { uint8_t vta; uint8_t vma; bool cfg_vta_all_1s; - target_ulong vstart; + bool vstart_eq_zero; bool vl_eq_vlmax; - uint8_t ntemp; CPUState *cs; TCGv zero; - /* Space for 3 operands plus 1 extra for address computation. */ - TCGv temp[4]; - /* Space for 4 operands(1 dest and <=3 src) for float point computation */ - TCGv_i64 ftemp[4]; - uint8_t nftemp; /* PointerMasking extension */ bool pm_mask_enabled; bool pm_base_enabled; @@ -207,12 +201,10 @@ static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in) */ static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in) { - TCGv_i64 t_max = tcg_const_i64(0xffffffffffff0000ull); - TCGv_i64 t_nan = tcg_const_i64(0xffffffffffff7e00ull); + TCGv_i64 t_max = tcg_constant_i64(0xffffffffffff0000ull); + TCGv_i64 t_nan = tcg_constant_i64(0xffffffffffff7e00ull); tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan); - tcg_temp_free_i64(t_max); - tcg_temp_free_i64(t_nan); } static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in) @@ -315,12 +307,6 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) * * Further, we may provide an extension for word operations. */ -static TCGv temp_new(DisasContext *ctx) -{ - assert(ctx->ntemp < ARRAY_SIZE(ctx->temp)); - return ctx->temp[ctx->ntemp++] = tcg_temp_new(); -} - static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext) { TCGv t; @@ -335,11 +321,11 @@ static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext) case EXT_NONE: break; case EXT_SIGN: - t = temp_new(ctx); + t = tcg_temp_new(); tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]); return t; case EXT_ZERO: - t = temp_new(ctx); + t = tcg_temp_new(); tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]); return t; default: @@ -367,7 +353,7 @@ static TCGv get_gprh(DisasContext *ctx, int reg_num) static TCGv dest_gpr(DisasContext *ctx, int reg_num) { if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) { - return temp_new(ctx); + return tcg_temp_new(); } return cpu_gpr[reg_num]; } @@ -375,7 +361,7 @@ static TCGv dest_gpr(DisasContext *ctx, int reg_num) static TCGv dest_gprh(DisasContext *ctx, int reg_num) { if (reg_num == 0) { - return temp_new(ctx); + return tcg_temp_new(); } return cpu_gprh[reg_num]; } @@ -431,12 +417,6 @@ static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh) } } -static TCGv_i64 ftemp_new(DisasContext *ctx) -{ - assert(ctx->nftemp < ARRAY_SIZE(ctx->ftemp)); - return ctx->ftemp[ctx->nftemp++] = tcg_temp_new_i64(); -} - static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num) { if (!ctx->cfg_ptr->ext_zfinx) { @@ -450,7 +430,7 @@ static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num) case MXL_RV32: #ifdef TARGET_RISCV32 { - TCGv_i64 t = ftemp_new(ctx); + TCGv_i64 t = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(t, cpu_gpr[reg_num]); return t; } @@ -476,7 +456,7 @@ static TCGv_i64 get_fpr_d(DisasContext *ctx, int reg_num) switch (get_xl(ctx)) { case MXL_RV32: { - TCGv_i64 t = ftemp_new(ctx); + TCGv_i64 t = tcg_temp_new_i64(); tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]); return t; } @@ -496,12 +476,12 @@ static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num) } if (reg_num == 0) { - return ftemp_new(ctx); + return tcg_temp_new_i64(); } switch (get_xl(ctx)) { case MXL_RV32: - return ftemp_new(ctx); + return tcg_temp_new_i64(); #ifdef TARGET_RISCV64 case MXL_RV64: return cpu_gpr[reg_num]; @@ -511,7 +491,7 @@ static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num) } } -/* assume t is nanboxing (for normal) or sign-extended (for zfinx) */ +/* assume it is nanboxing (for normal) or sign-extended (for zfinx) */ static void gen_set_fpr_hs(DisasContext *ctx, int reg_num, TCGv_i64 t) { if (!ctx->cfg_ptr->ext_zfinx) { @@ -569,7 +549,7 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) /* check misaligned: */ next_pc = ctx->base.pc_next + imm; - if (!has_ext(ctx, RVC)) { + if (!ctx->cfg_ptr->ext_zca) { if ((next_pc & 0x3) != 0) { gen_exception_inst_addr_mis(ctx); return; @@ -584,7 +564,7 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) /* Compute a canonical address from a register plus offset. */ static TCGv get_address(DisasContext *ctx, int rs1, int imm) { - TCGv addr = temp_new(ctx); + TCGv addr = tcg_temp_new(); TCGv src1 = get_gpr(ctx, rs1, EXT_NONE); tcg_gen_addi_tl(addr, src1, imm); @@ -602,7 +582,7 @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm) /* Compute a canonical address from a register plus reg offset. */ static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs) { - TCGv addr = temp_new(ctx); + TCGv addr = tcg_temp_new(); TCGv src1 = get_gpr(ctx, rs1, EXT_NONE); tcg_gen_add_tl(addr, src1, offs); @@ -618,8 +598,7 @@ static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs) } #ifndef CONFIG_USER_ONLY -/* The states of mstatus_fs are: - * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty +/* * We will have already diagnosed disabled state, * and need to turn initial/clean into dirty. */ @@ -631,26 +610,20 @@ static void mark_fs_dirty(DisasContext *ctx) return; } - if (ctx->mstatus_fs != MSTATUS_FS) { + if (ctx->mstatus_fs != EXT_STATUS_DIRTY) { /* Remember the state change for the rest of the TB. */ - ctx->mstatus_fs = MSTATUS_FS; + ctx->mstatus_fs = EXT_STATUS_DIRTY; tmp = tcg_temp_new(); tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); - tcg_temp_free(tmp); - } - if (ctx->virt_enabled && ctx->mstatus_hs_fs != MSTATUS_FS) { - /* Remember the stage change for the rest of the TB. */ - ctx->mstatus_hs_fs = MSTATUS_FS; - - tmp = tcg_temp_new(); - tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); - tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); - tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); - tcg_temp_free(tmp); + if (ctx->virt_enabled) { + tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); + tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); + tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); + } } } #else @@ -658,8 +631,7 @@ static inline void mark_fs_dirty(DisasContext *ctx) { } #endif #ifndef CONFIG_USER_ONLY -/* The states of mstatus_vs are: - * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty +/* * We will have already diagnosed disabled state, * and need to turn initial/clean into dirty. */ @@ -667,26 +639,20 @@ static void mark_vs_dirty(DisasContext *ctx) { TCGv tmp; - if (ctx->mstatus_vs != MSTATUS_VS) { + if (ctx->mstatus_vs != EXT_STATUS_DIRTY) { /* Remember the state change for the rest of the TB. */ - ctx->mstatus_vs = MSTATUS_VS; + ctx->mstatus_vs = EXT_STATUS_DIRTY; tmp = tcg_temp_new(); tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS); tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); - tcg_temp_free(tmp); - } - if (ctx->virt_enabled && ctx->mstatus_hs_vs != MSTATUS_VS) { - /* Remember the stage change for the rest of the TB. */ - ctx->mstatus_hs_vs = MSTATUS_VS; - - tmp = tcg_temp_new(); - tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); - tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS); - tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); - tcg_temp_free(tmp); + if (ctx->virt_enabled) { + tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); + tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS); + tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); + } } } #else @@ -770,8 +736,8 @@ EX_SH(12) } while (0) #define REQUIRE_EITHER_EXT(ctx, A, B) do { \ - if (!ctx->cfg_ptr->ext_##A && \ - !ctx->cfg_ptr->ext_##B) { \ + if (!ctx->cfg_ptr->ext_##A && \ + !ctx->cfg_ptr->ext_##B) { \ return false; \ } \ } while (0) @@ -781,6 +747,11 @@ static int ex_rvc_register(DisasContext *ctx, int reg) return 8 + reg; } +static int ex_sreg_register(DisasContext *ctx, int reg) +{ + return reg < 2 ? reg + 8 : reg + 16; +} + static int ex_rvc_shiftli(DisasContext *ctx, int imm) { /* For RV128 a shamt of 0 means a shift by 64. */ @@ -1037,7 +1008,6 @@ static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext, f128(dest, desth, src1, src1h, ext2); gen_set_gpr128(ctx, a->rd, dest, desth); } - tcg_temp_free(ext2); return true; } @@ -1103,7 +1073,9 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "insn_trans/trans_rvh.c.inc" #include "insn_trans/trans_rvv.c.inc" #include "insn_trans/trans_rvb.c.inc" +#include "insn_trans/trans_rvzicond.c.inc" #include "insn_trans/trans_rvzawrs.c.inc" +#include "insn_trans/trans_rvzicbo.c.inc" #include "insn_trans/trans_rvzfh.c.inc" #include "insn_trans/trans_rvk.c.inc" #include "insn_trans/trans_privileged.c.inc" @@ -1114,6 +1086,8 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) /* Include the auto-generated decoder for 16 bit insn */ #include "decode-insn16.c.inc" +#include "insn_trans/trans_rvzce.c.inc" + /* Include decoders for factored-out extensions */ #include "decode-XVentanaCondOps.c.inc" @@ -1145,7 +1119,11 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) if (insn_len(opcode) == 2) { ctx->opcode = opcode; ctx->pc_succ_insn = ctx->base.pc_next + 2; - if (has_ext(ctx, RVC) && decode_insn16(ctx, opcode)) { + /* + * The Zca extension is added as way to refer to instructions in the C + * extension that do not include the floating-point loads and stores + */ + if (ctx->cfg_ptr->ext_zca && decode_insn16(ctx, opcode)) { return; } } else { @@ -1175,40 +1153,26 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) uint32_t tb_flags = ctx->base.tb->flags; ctx->pc_succ_insn = ctx->base.pc_first; + ctx->priv = FIELD_EX32(tb_flags, TB_FLAGS, PRIV); ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX); - ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS; - ctx->mstatus_vs = tb_flags & TB_FLAGS_MSTATUS_VS; + ctx->mstatus_fs = FIELD_EX32(tb_flags, TB_FLAGS, FS); + ctx->mstatus_vs = FIELD_EX32(tb_flags, TB_FLAGS, VS); ctx->priv_ver = env->priv_ver; -#if !defined(CONFIG_USER_ONLY) - if (riscv_has_ext(env, RVH)) { - ctx->virt_enabled = riscv_cpu_virt_enabled(env); - } else { - ctx->virt_enabled = false; - } -#else - ctx->virt_enabled = false; -#endif + ctx->virt_enabled = FIELD_EX32(tb_flags, TB_FLAGS, VIRT_ENABLED); ctx->misa_ext = env->misa_ext; ctx->frm = -1; /* unknown rounding mode */ ctx->cfg_ptr = &(cpu->cfg); - ctx->mstatus_hs_fs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_FS); - ctx->mstatus_hs_vs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_VS); - ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX); ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL); ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW); ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3); ctx->vta = FIELD_EX32(tb_flags, TB_FLAGS, VTA) && cpu->cfg.rvv_ta_all_1s; ctx->vma = FIELD_EX32(tb_flags, TB_FLAGS, VMA) && cpu->cfg.rvv_ma_all_1s; ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s; - ctx->vstart = env->vstart; + ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO); ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX); ctx->misa_mxl_max = env->misa_mxl_max; ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL); ctx->cs = cs; - ctx->ntemp = 0; - memset(ctx->temp, 0, sizeof(ctx->temp)); - ctx->nftemp = 0; - memset(ctx->ftemp, 0, sizeof(ctx->ftemp)); ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED); ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED); ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER); @@ -1233,23 +1197,11 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) DisasContext *ctx = container_of(dcbase, DisasContext, base); CPURISCVState *env = cpu->env_ptr; uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next); - int i; ctx->ol = ctx->xl; decode_opc(env, ctx, opcode16); ctx->base.pc_next = ctx->pc_succ_insn; - for (i = ctx->ntemp - 1; i >= 0; --i) { - tcg_temp_free(ctx->temp[i]); - ctx->temp[i] = NULL; - } - ctx->ntemp = 0; - for (i = ctx->nftemp - 1; i >= 0; --i) { - tcg_temp_free_i64(ctx->ftemp[i]); - ctx->ftemp[i] = NULL; - } - ctx->nftemp = 0; - /* Only the first insn within a TB is allowed to cross a page boundary. */ if (ctx->base.is_jmp == DISAS_NEXT) { if (ctx->itrigger || !is_same_page(&ctx->base, ctx->base.pc_next)) { @@ -1261,7 +1213,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next); int len = insn_len(next_insn); - if (!is_same_page(&ctx->base, ctx->base.pc_next + len)) { + if (!is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) { ctx->base.is_jmp = DISAS_TOO_MANY; } } @@ -1294,8 +1246,8 @@ static void riscv_tr_disas_log(const DisasContextBase *dcbase, fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); #ifndef CONFIG_USER_ONLY - fprintf(logfile, "Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", - env->priv, env->virt); + fprintf(logfile, "Priv: "TARGET_FMT_ld"; Virt: %d\n", + env->priv, env->virt_enabled); #endif target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); } @@ -1309,7 +1261,7 @@ static const TranslatorOps riscv_tr_ops = { .disas_log = riscv_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 3073c54871..f4d0438988 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -50,10 +50,7 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, } } - if ((sew > cpu->cfg.elen) - || vill - || (ediv != 0) - || (reserved != 0)) { + if ((sew > cpu->cfg.elen) || vill || (ediv != 0) || (reserved != 0)) { /* only set vill bit. */ env->vill = 1; env->vtype = 0; @@ -267,8 +264,30 @@ GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw) GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl) GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq) +static void vext_set_tail_elems_1s(CPURISCVState *env, target_ulong vl, + void *vd, uint32_t desc, uint32_t nf, + uint32_t esz, uint32_t max_elems) +{ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; + uint32_t vta = vext_vta(desc); + uint32_t registers_used; + int k; + + for (k = 0; k < nf; ++k) { + vext_set_elems_1s(vd, vta, (k * max_elems + vl) * esz, + (k * max_elems + max_elems) * esz); + } + + if (nf * max_elems % total_elems != 0) { + registers_used = ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; + vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, + registers_used * vlenb); + } +} + /* - *** stride: access vector element from strided memory + * stride: access vector element from strided memory */ static void vext_ldst_stride(void *vd, void *v0, target_ulong base, @@ -281,8 +300,6 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, uint32_t nf = vext_nf(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); for (i = env->vstart; i < env->vl; i++, env->vstart++) { @@ -301,18 +318,8 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems); } #define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \ @@ -346,10 +353,10 @@ GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w) GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d) /* - *** unit-stride: access elements stored contiguously in memory + * unit-stride: access elements stored contiguously in memory */ -/* unmasked unit-stride load and store operation*/ +/* unmasked unit-stride load and store operation */ static void vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uint32_t evl, @@ -359,8 +366,6 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, uint32_t nf = vext_nf(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); /* load bytes from guest memory */ for (i = env->vstart; i < evl; i++, env->vstart++) { @@ -372,23 +377,13 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + evl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, evl, vd, desc, nf, esz, max_elems); } /* - * masked unit-stride load and store operation will be a special case of stride, - * stride = NF * sizeof (MTYPE) + * masked unit-stride load and store operation will be a special case of + * stride, stride = NF * sizeof (MTYPE) */ #define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \ @@ -434,7 +429,7 @@ GEN_VEXT_ST_US(vse32_v, int32_t, ste_w) GEN_VEXT_ST_US(vse64_v, int64_t, ste_d) /* - *** unit stride mask load and store, EEW = 1 + * unit stride mask load and store, EEW = 1 */ void HELPER(vlm_v)(void *vd, void *v0, target_ulong base, CPURISCVState *env, uint32_t desc) @@ -455,7 +450,7 @@ void HELPER(vsm_v)(void *vd, void *v0, target_ulong base, } /* - *** index: access vector element from indexed memory + * index: access vector element from indexed memory */ typedef target_ulong vext_get_index_addr(target_ulong base, uint32_t idx, void *vs2); @@ -484,8 +479,6 @@ vext_ldst_index(void *vd, void *v0, target_ulong base, uint32_t vm = vext_vm(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); /* load bytes from guest memory */ @@ -505,18 +498,8 @@ vext_ldst_index(void *vd, void *v0, target_ulong base, } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems); } #define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \ @@ -571,7 +554,7 @@ GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w) GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d) /* - *** unit-stride fault-only-fisrt load instructions + * unit-stride fault-only-fisrt load instructions */ static inline void vext_ldff(void *vd, void *v0, target_ulong base, @@ -585,12 +568,10 @@ vext_ldff(void *vd, void *v0, target_ulong base, uint32_t vm = vext_vm(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); target_ulong addr, offset, remain; - /* probe every access*/ + /* probe every access */ for (i = env->vstart; i < env->vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { continue; @@ -647,18 +628,8 @@ ProbeSuccess: } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems); } #define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \ @@ -689,7 +660,7 @@ GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d) #define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M) /* - *** load and store whole register instructions + * load and store whole register instructions */ static void vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, @@ -697,7 +668,7 @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, { uint32_t i, k, off, pos; uint32_t nf = vext_nf(desc); - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; uint32_t max_elems = vlenb >> log2_esz; k = env->vstart / max_elems; @@ -707,7 +678,8 @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, /* load/store rest of elements of current segment pointed by vstart */ for (pos = off; pos < max_elems; pos++, env->vstart++) { target_ulong addr = base + ((pos + k * max_elems) << log2_esz); - ldst_elem(env, adjust_addr(env, addr), pos + k * max_elems, vd, ra); + ldst_elem(env, adjust_addr(env, addr), pos + k * max_elems, vd, + ra); } k++; } @@ -762,7 +734,7 @@ GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b) GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b) /* - *** Vector Integer Arithmetic Instructions + * Vector Integer Arithmetic Instructions */ /* expand macro args before macro */ @@ -1142,7 +1114,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ \ *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\ } \ - env->vstart = 0; \ + env->vstart = 0; \ /* set tail elements to 1s */ \ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ } @@ -1167,7 +1139,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vl = env->vl; \ uint32_t vm = vext_vm(desc); \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ @@ -1178,8 +1150,10 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ vext_set_elem_mask(vd, i, DO_OP(s2, s1, carry)); \ } \ env->vstart = 0; \ - /* mask destination register are always tail-agnostic */ \ - /* set tail elements to 1s */ \ + /* + * mask destination register are always tail-agnostic + * set tail elements to 1s + */ \ if (vta_all_1s) { \ for (; i < total_elems; i++) { \ vext_set_elem_mask(vd, i, 1); \ @@ -1203,7 +1177,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ { \ uint32_t vl = env->vl; \ uint32_t vm = vext_vm(desc); \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ @@ -1214,8 +1188,10 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ DO_OP(s2, (ETYPE)(target_long)s1, carry)); \ } \ env->vstart = 0; \ - /* mask destination register are always tail-agnostic */ \ - /* set tail elements to 1s */ \ + /* + * mask destination register are always tail-agnostic + * set tail elements to 1s + */ \ if (vta_all_1s) { \ for (; i < total_elems; i++) { \ vext_set_elem_mask(vd, i, 1); \ @@ -1331,10 +1307,13 @@ GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf) GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f) GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f) -/* generate the helpers for shift instructions with one vector and one scalar */ +/* + * generate the helpers for shift instructions with one vector and one scalar + */ #define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK) \ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ - void *vs2, CPURISCVState *env, uint32_t desc) \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ @@ -1402,7 +1381,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -1420,8 +1399,10 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ vext_set_elem_mask(vd, i, DO_OP(s2, s1)); \ } \ env->vstart = 0; \ - /* mask destination register are always tail-agnostic */ \ - /* set tail elements to 1s */ \ + /* + * mask destination register are always tail-agnostic + * set tail elements to 1s + */ \ if (vta_all_1s) { \ for (; i < total_elems; i++) { \ vext_set_elem_mask(vd, i, 1); \ @@ -1465,7 +1446,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -1483,8 +1464,10 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ DO_OP(s2, (ETYPE)(target_long)s1)); \ } \ env->vstart = 0; \ - /* mask destination register are always tail-agnostic */ \ - /* set tail elements to 1s */ \ + /* + * mask destination register are always tail-agnostic + * set tail elements to 1s + */ \ if (vta_all_1s) { \ for (; i < total_elems; i++) { \ vext_set_elem_mask(vd, i, 1); \ @@ -1761,9 +1744,9 @@ GEN_VEXT_VX(vmulhsu_vx_d, 8) /* Vector Integer Divide Instructions */ #define DO_DIVU(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : N / M) #define DO_REMU(N, M) (unlikely(M == 0) ? N : N % M) -#define DO_DIV(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) :\ +#define DO_DIV(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : \ unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M) -#define DO_REM(N, M) (unlikely(M == 0) ? N :\ +#define DO_REM(N, M) (unlikely(M == 0) ? N : \ unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M) RVVCALL(OPIVV2, vdivu_vv_b, OP_UUU_B, H1, H1, H1, DO_DIVU) @@ -1872,7 +1855,7 @@ GEN_VEXT_VX(vwmulsu_vx_h, 4) GEN_VEXT_VX(vwmulsu_vx_w, 8) /* Vector Single-Width Integer Multiply-Add Instructions */ -#define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ +#define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \ { \ TX1 s1 = *((T1 *)vs1 + HS1(i)); \ @@ -2103,7 +2086,7 @@ GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4) GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8) /* - *** Vector Fixed-Point Arithmetic Instructions + * Vector Fixed-Point Arithmetic Instructions */ /* Vector Single-Width Saturating Add and Subtract */ @@ -2185,7 +2168,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ do_##NAME, ESZ); \ } -static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) +static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, + uint8_t b) { uint8_t res = a + b; if (res < a) { @@ -2303,7 +2287,8 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2, /* generate helpers for fixed point instructions with OPIVX format */ #define GEN_VEXT_VX_RM(NAME, ESZ) \ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ - void *vs2, CPURISCVState *env, uint32_t desc) \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ { \ vext_vx_rm_2(vd, v0, s1, vs2, env, desc, \ do_##NAME, ESZ); \ @@ -2328,7 +2313,8 @@ static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) return res; } -static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) +static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, + int16_t b) { int16_t res = a + b; if ((res ^ a) & (res ^ b) & INT16_MIN) { @@ -2338,7 +2324,8 @@ static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) return res; } -static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, + int32_t b) { int32_t res = a + b; if ((res ^ a) & (res ^ b) & INT32_MIN) { @@ -2348,7 +2335,8 @@ static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) return res; } -static inline int64_t sadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +static inline int64_t sadd64(CPURISCVState *env, int vxrm, int64_t a, + int64_t b) { int64_t res = a + b; if ((res ^ a) & (res ^ b) & INT64_MIN) { @@ -2376,7 +2364,8 @@ GEN_VEXT_VX_RM(vsadd_vx_h, 2) GEN_VEXT_VX_RM(vsadd_vx_w, 4) GEN_VEXT_VX_RM(vsadd_vx_d, 8) -static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) +static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, + uint8_t b) { uint8_t res = a - b; if (res > a) { @@ -2447,7 +2436,8 @@ static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) return res; } -static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) +static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, + int16_t b) { int16_t res = a - b; if ((res ^ a) & (a ^ b) & INT16_MIN) { @@ -2457,7 +2447,8 @@ static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) return res; } -static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, + int32_t b) { int32_t res = a - b; if ((res ^ a) & (a ^ b) & INT32_MIN) { @@ -2467,7 +2458,8 @@ static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) return res; } -static inline int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +static inline int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, + int64_t b) { int64_t res = a - b; if ((res ^ a) & (a ^ b) & INT64_MIN) { @@ -2523,7 +2515,8 @@ static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift) return 0; /* round-down (truncate) */ } -static inline int32_t aadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +static inline int32_t aadd32(CPURISCVState *env, int vxrm, int32_t a, + int32_t b) { int64_t res = (int64_t)a + b; uint8_t round = get_round(vxrm, res, 1); @@ -2531,7 +2524,8 @@ static inline int32_t aadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) return (res >> 1) + round; } -static inline int64_t aadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +static inline int64_t aadd64(CPURISCVState *env, int vxrm, int64_t a, + int64_t b) { int64_t res = a + b; uint8_t round = get_round(vxrm, res, 1); @@ -2596,7 +2590,8 @@ GEN_VEXT_VX_RM(vaaddu_vx_h, 2) GEN_VEXT_VX_RM(vaaddu_vx_w, 4) GEN_VEXT_VX_RM(vaaddu_vx_d, 8) -static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, + int32_t b) { int64_t res = (int64_t)a - b; uint8_t round = get_round(vxrm, res, 1); @@ -2604,7 +2599,8 @@ static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) return (res >> 1) + round; } -static inline int64_t asub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +static inline int64_t asub64(CPURISCVState *env, int vxrm, int64_t a, + int64_t b) { int64_t res = (int64_t)a - b; uint8_t round = get_round(vxrm, res, 1); @@ -2677,7 +2673,7 @@ static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) res = (int16_t)a * (int16_t)b; round = get_round(vxrm, res, 7); - res = (res >> 7) + round; + res = (res >> 7) + round; if (res > INT8_MAX) { env->vxsat = 0x1; @@ -2697,7 +2693,7 @@ static int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) res = (int32_t)a * (int32_t)b; round = get_round(vxrm, res, 15); - res = (res >> 15) + round; + res = (res >> 15) + round; if (res > INT16_MAX) { env->vxsat = 0x1; @@ -2717,7 +2713,7 @@ static int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) res = (int64_t)a * (int64_t)b; round = get_round(vxrm, res, 31); - res = (res >> 31) + round; + res = (res >> 31) + round; if (res > INT32_MAX) { env->vxsat = 0x1; @@ -2784,7 +2780,7 @@ vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) uint8_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; + res = (a >> shift) + round; return res; } static inline uint16_t @@ -2888,7 +2884,7 @@ vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b) int16_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; + res = (a >> shift) + round; if (res > INT8_MAX) { env->vxsat = 0x1; return INT8_MAX; @@ -2907,7 +2903,7 @@ vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b) int32_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; + res = (a >> shift) + round; if (res > INT16_MAX) { env->vxsat = 0x1; return INT16_MAX; @@ -2926,7 +2922,7 @@ vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b) int64_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; + res = (a >> shift) + round; if (res > INT32_MAX) { env->vxsat = 0x1; return INT32_MAX; @@ -2959,7 +2955,7 @@ vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b) uint16_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; + res = (a >> shift) + round; if (res > UINT8_MAX) { env->vxsat = 0x1; return UINT8_MAX; @@ -2975,7 +2971,7 @@ vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b) uint32_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; + res = (a >> shift) + round; if (res > UINT16_MAX) { env->vxsat = 0x1; return UINT16_MAX; @@ -2991,7 +2987,7 @@ vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b) uint64_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; + res = (a >> shift) + round; if (res > UINT32_MAX) { env->vxsat = 0x1; return UINT32_MAX; @@ -3015,7 +3011,7 @@ GEN_VEXT_VX_RM(vnclipu_wx_h, 2) GEN_VEXT_VX_RM(vnclipu_wx_w, 4) /* - *** Vector Float Point Arithmetic Instructions + * Vector Float Point Arithmetic Instructions */ /* Vector Single-Width Floating-Point Add/Subtract Instructions */ #define OPFVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ @@ -3078,7 +3074,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ uint32_t total_elems = \ - vext_get_total_elems(env, desc, ESZ); \ + vext_get_total_elems(env, desc, ESZ); \ uint32_t vta = vext_vta(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -3144,13 +3140,13 @@ GEN_VEXT_VF(vfrsub_vf_d, 8) static uint32_t vfwadd16(uint16_t a, uint16_t b, float_status *s) { return float32_add(float16_to_float32(a, true, s), - float16_to_float32(b, true, s), s); + float16_to_float32(b, true, s), s); } static uint64_t vfwadd32(uint32_t a, uint32_t b, float_status *s) { return float64_add(float32_to_float64(a, s), - float32_to_float64(b, s), s); + float32_to_float64(b, s), s); } @@ -3166,13 +3162,13 @@ GEN_VEXT_VF(vfwadd_vf_w, 8) static uint32_t vfwsub16(uint16_t a, uint16_t b, float_status *s) { return float32_sub(float16_to_float32(a, true, s), - float16_to_float32(b, true, s), s); + float16_to_float32(b, true, s), s); } static uint64_t vfwsub32(uint32_t a, uint32_t b, float_status *s) { return float64_sub(float32_to_float64(a, s), - float32_to_float64(b, s), s); + float32_to_float64(b, s), s); } @@ -3276,13 +3272,13 @@ GEN_VEXT_VF(vfrdiv_vf_d, 8) static uint32_t vfwmul16(uint16_t a, uint16_t b, float_status *s) { return float32_mul(float16_to_float32(a, true, s), - float16_to_float32(b, true, s), s); + float16_to_float32(b, true, s), s); } static uint64_t vfwmul32(uint32_t a, uint32_t b, float_status *s) { return float64_mul(float32_to_float64(a, s), - float32_to_float64(b, s), s); + float32_to_float64(b, s), s); } RVVCALL(OPFVV2, vfwmul_vv_h, WOP_UUU_H, H4, H2, H2, vfwmul16) @@ -3297,7 +3293,7 @@ GEN_VEXT_VF(vfwmul_vf_w, 8) /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */ #define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \ - CPURISCVState *env) \ + CPURISCVState *env) \ { \ TX1 s1 = *((T1 *)vs1 + HS1(i)); \ TX2 s2 = *((T2 *)vs2 + HS2(i)); \ @@ -3329,7 +3325,7 @@ GEN_VEXT_VV_ENV(vfmacc_vv_d, 8) #define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ - CPURISCVState *env) \ + CPURISCVState *env) \ { \ TX2 s2 = *((T2 *)vs2 + HS2(i)); \ TD d = *((TD *)vd + HD(i)); \ @@ -3345,20 +3341,20 @@ GEN_VEXT_VF(vfmacc_vf_d, 8) static uint16_t fnmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s) { - return float16_muladd(a, b, d, - float_muladd_negate_c | float_muladd_negate_product, s); + return float16_muladd(a, b, d, float_muladd_negate_c | + float_muladd_negate_product, s); } static uint32_t fnmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s) { - return float32_muladd(a, b, d, - float_muladd_negate_c | float_muladd_negate_product, s); + return float32_muladd(a, b, d, float_muladd_negate_c | + float_muladd_negate_product, s); } static uint64_t fnmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s) { - return float64_muladd(a, b, d, - float_muladd_negate_c | float_muladd_negate_product, s); + return float64_muladd(a, b, d, float_muladd_negate_c | + float_muladd_negate_product, s); } RVVCALL(OPFVV3, vfnmacc_vv_h, OP_UUU_H, H2, H2, H2, fnmacc16) @@ -3460,20 +3456,20 @@ GEN_VEXT_VF(vfmadd_vf_d, 8) static uint16_t fnmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s) { - return float16_muladd(d, b, a, - float_muladd_negate_c | float_muladd_negate_product, s); + return float16_muladd(d, b, a, float_muladd_negate_c | + float_muladd_negate_product, s); } static uint32_t fnmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s) { - return float32_muladd(d, b, a, - float_muladd_negate_c | float_muladd_negate_product, s); + return float32_muladd(d, b, a, float_muladd_negate_c | + float_muladd_negate_product, s); } static uint64_t fnmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s) { - return float64_muladd(d, b, a, - float_muladd_negate_c | float_muladd_negate_product, s); + return float64_muladd(d, b, a, float_muladd_negate_c | + float_muladd_negate_product, s); } RVVCALL(OPFVV3, vfnmadd_vv_h, OP_UUU_H, H2, H2, H2, fnmadd16) @@ -3549,13 +3545,13 @@ GEN_VEXT_VF(vfnmsub_vf_d, 8) static uint32_t fwmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s) { return float32_muladd(float16_to_float32(a, true, s), - float16_to_float32(b, true, s), d, 0, s); + float16_to_float32(b, true, s), d, 0, s); } static uint64_t fwmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s) { return float64_muladd(float32_to_float64(a, s), - float32_to_float64(b, s), d, 0, s); + float32_to_float64(b, s), d, 0, s); } RVVCALL(OPFVV3, vfwmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwmacc16) @@ -3570,15 +3566,16 @@ GEN_VEXT_VF(vfwmacc_vf_w, 8) static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s) { return float32_muladd(float16_to_float32(a, true, s), - float16_to_float32(b, true, s), d, - float_muladd_negate_c | float_muladd_negate_product, s); + float16_to_float32(b, true, s), d, + float_muladd_negate_c | float_muladd_negate_product, + s); } static uint64_t fwnmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s) { - return float64_muladd(float32_to_float64(a, s), - float32_to_float64(b, s), d, - float_muladd_negate_c | float_muladd_negate_product, s); + return float64_muladd(float32_to_float64(a, s), float32_to_float64(b, s), + d, float_muladd_negate_c | + float_muladd_negate_product, s); } RVVCALL(OPFVV3, vfwnmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwnmacc16) @@ -3593,15 +3590,15 @@ GEN_VEXT_VF(vfwnmacc_vf_w, 8) static uint32_t fwmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s) { return float32_muladd(float16_to_float32(a, true, s), - float16_to_float32(b, true, s), d, - float_muladd_negate_c, s); + float16_to_float32(b, true, s), d, + float_muladd_negate_c, s); } static uint64_t fwmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s) { return float64_muladd(float32_to_float64(a, s), - float32_to_float64(b, s), d, - float_muladd_negate_c, s); + float32_to_float64(b, s), d, + float_muladd_negate_c, s); } RVVCALL(OPFVV3, vfwmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwmsac16) @@ -3616,15 +3613,15 @@ GEN_VEXT_VF(vfwmsac_vf_w, 8) static uint32_t fwnmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s) { return float32_muladd(float16_to_float32(a, true, s), - float16_to_float32(b, true, s), d, - float_muladd_negate_product, s); + float16_to_float32(b, true, s), d, + float_muladd_negate_product, s); } static uint64_t fwnmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s) { return float64_muladd(float32_to_float64(a, s), - float32_to_float64(b, s), d, - float_muladd_negate_product, s); + float32_to_float64(b, s), d, + float_muladd_negate_product, s); } RVVCALL(OPFVV3, vfwnmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwnmsac16) @@ -3642,9 +3639,9 @@ GEN_VEXT_VF(vfwnmsac_vf_w, 8) #define OP_UU_W uint32_t, uint32_t, uint32_t #define OP_UU_D uint64_t, uint64_t, uint64_t -#define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \ +#define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \ static void do_##NAME(void *vd, void *vs2, int i, \ - CPURISCVState *env) \ + CPURISCVState *env) \ { \ TX2 s2 = *((T2 *)vs2 + HS2(i)); \ *((TD *)vd + HD(i)) = OP(s2, &env->fp_status); \ @@ -3652,7 +3649,7 @@ static void do_##NAME(void *vd, void *vs2, int i, \ #define GEN_VEXT_V_ENV(NAME, ESZ) \ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ - CPURISCVState *env, uint32_t desc) \ + CPURISCVState *env, uint32_t desc) \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ @@ -3729,9 +3726,9 @@ static uint64_t frsqrt7(uint64_t f, int exp_size, int frac_size) } int idx = ((exp & 1) << (precision - 1)) | - (frac >> (frac_size - precision + 1)); + (frac >> (frac_size - precision + 1)); uint64_t out_frac = (uint64_t)(lookup_table[idx]) << - (frac_size - precision); + (frac_size - precision); uint64_t out_exp = (3 * MAKE_64BIT_MASK(0, exp_size - 1) + ~exp) / 2; uint64_t val = 0; @@ -3753,9 +3750,9 @@ static float16 frsqrt7_h(float16 f, float_status *s) * frsqrt7(-subnormal) = canonical NaN */ if (float16_is_signaling_nan(f, s) || - (float16_is_infinity(f) && sign) || - (float16_is_normal(f) && sign) || - (float16_is_zero_or_denormal(f) && !float16_is_zero(f) && sign)) { + (float16_is_infinity(f) && sign) || + (float16_is_normal(f) && sign) || + (float16_is_zero_or_denormal(f) && !float16_is_zero(f) && sign)) { s->float_exception_flags |= float_flag_invalid; return float16_default_nan(s); } @@ -3793,9 +3790,9 @@ static float32 frsqrt7_s(float32 f, float_status *s) * frsqrt7(-subnormal) = canonical NaN */ if (float32_is_signaling_nan(f, s) || - (float32_is_infinity(f) && sign) || - (float32_is_normal(f) && sign) || - (float32_is_zero_or_denormal(f) && !float32_is_zero(f) && sign)) { + (float32_is_infinity(f) && sign) || + (float32_is_normal(f) && sign) || + (float32_is_zero_or_denormal(f) && !float32_is_zero(f) && sign)) { s->float_exception_flags |= float_flag_invalid; return float32_default_nan(s); } @@ -3833,9 +3830,9 @@ static float64 frsqrt7_d(float64 f, float_status *s) * frsqrt7(-subnormal) = canonical NaN */ if (float64_is_signaling_nan(f, s) || - (float64_is_infinity(f) && sign) || - (float64_is_normal(f) && sign) || - (float64_is_zero_or_denormal(f) && !float64_is_zero(f) && sign)) { + (float64_is_infinity(f) && sign) || + (float64_is_normal(f) && sign) || + (float64_is_zero_or_denormal(f) && !float64_is_zero(f) && sign)) { s->float_exception_flags |= float_flag_invalid; return float64_default_nan(s); } @@ -3923,18 +3920,18 @@ static uint64_t frec7(uint64_t f, int exp_size, int frac_size, ((s->float_rounding_mode == float_round_up) && sign)) { /* Return greatest/negative finite value. */ return (sign << (exp_size + frac_size)) | - (MAKE_64BIT_MASK(frac_size, exp_size) - 1); + (MAKE_64BIT_MASK(frac_size, exp_size) - 1); } else { /* Return +-inf. */ return (sign << (exp_size + frac_size)) | - MAKE_64BIT_MASK(frac_size, exp_size); + MAKE_64BIT_MASK(frac_size, exp_size); } } } int idx = frac >> (frac_size - precision); uint64_t out_frac = (uint64_t)(lookup_table[idx]) << - (frac_size - precision); + (frac_size - precision); uint64_t out_exp = 2 * MAKE_64BIT_MASK(0, exp_size - 1) + ~exp; if (out_exp == 0 || out_exp == UINT64_MAX) { @@ -4178,7 +4175,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -4197,8 +4194,10 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ DO_OP(s2, s1, &env->fp_status)); \ } \ env->vstart = 0; \ - /* mask destination register are always tail-agnostic */ \ - /* set tail elements to 1s */ \ + /* + * mask destination register are always tail-agnostic + * set tail elements to 1s + */ \ if (vta_all_1s) { \ for (; i < total_elems; i++) { \ vext_set_elem_mask(vd, i, 1); \ @@ -4216,7 +4215,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -4234,8 +4233,10 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ DO_OP(s2, (ETYPE)s1, &env->fp_status)); \ } \ env->vstart = 0; \ - /* mask destination register are always tail-agnostic */ \ - /* set tail elements to 1s */ \ + /* + * mask destination register are always tail-agnostic + * set tail elements to 1s + */ \ if (vta_all_1s) { \ for (; i < total_elems; i++) { \ vext_set_elem_mask(vd, i, 1); \ @@ -4448,8 +4449,8 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ - *((ETYPE *)vd + H(i)) \ - = (!vm && !vext_elem_mask(v0, i) ? s2 : s1); \ + *((ETYPE *)vd + H(i)) = \ + (!vm && !vext_elem_mask(v0, i) ? s2 : s1); \ } \ env->vstart = 0; \ /* set tail elements to 1s */ \ @@ -4498,7 +4499,9 @@ GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8) #define WOP_UU_B uint16_t, uint8_t, uint8_t #define WOP_UU_H uint32_t, uint16_t, uint16_t #define WOP_UU_W uint64_t, uint32_t, uint32_t -/* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer.*/ +/* + * vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer. + */ RVVCALL(OPFVV1, vfwcvt_xu_f_v_h, WOP_UU_H, H4, H2, float16_to_uint32) RVVCALL(OPFVV1, vfwcvt_xu_f_v_w, WOP_UU_W, H8, H4, float32_to_uint64) GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 4) @@ -4510,7 +4513,9 @@ RVVCALL(OPFVV1, vfwcvt_x_f_v_w, WOP_UU_W, H8, H4, float32_to_int64) GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 4) GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 8) -/* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float */ +/* + * vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float. + */ RVVCALL(OPFVV1, vfwcvt_f_xu_v_b, WOP_UU_B, H2, H1, uint8_to_float16) RVVCALL(OPFVV1, vfwcvt_f_xu_v_h, WOP_UU_H, H4, H2, uint16_to_float32) RVVCALL(OPFVV1, vfwcvt_f_xu_v_w, WOP_UU_W, H8, H4, uint32_to_float64) @@ -4527,8 +4532,7 @@ GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 4) GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 8) /* - * vfwcvt.f.f.v vd, vs2, vm - * Convert single-width float to double-width float. + * vfwcvt.f.f.v vd, vs2, vm # Convert single-width float to double-width float. */ static uint32_t vfwcvtffv16(uint16_t a, float_status *s) { @@ -4561,7 +4565,9 @@ GEN_VEXT_V_ENV(vfncvt_x_f_w_b, 1) GEN_VEXT_V_ENV(vfncvt_x_f_w_h, 2) GEN_VEXT_V_ENV(vfncvt_x_f_w_w, 4) -/* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float */ +/* + * vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float. + */ RVVCALL(OPFVV1, vfncvt_f_xu_w_h, NOP_UU_H, H2, H4, uint32_to_float16) RVVCALL(OPFVV1, vfncvt_f_xu_w_w, NOP_UU_W, H4, H8, uint64_to_float32) GEN_VEXT_V_ENV(vfncvt_f_xu_w_h, 2) @@ -4585,12 +4591,13 @@ GEN_VEXT_V_ENV(vfncvt_f_f_w_h, 2) GEN_VEXT_V_ENV(vfncvt_f_f_w_w, 4) /* - *** Vector Reduction Operations + * Vector Reduction Operations */ /* Vector Single-Width Integer Reduction Instructions */ #define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP) \ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ - void *vs2, CPURISCVState *env, uint32_t desc) \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ @@ -4710,14 +4717,20 @@ GEN_VEXT_FRED(vfredosum_vs_w, uint32_t, uint32_t, H4, H4, float32_add) GEN_VEXT_FRED(vfredosum_vs_d, uint64_t, uint64_t, H8, H8, float64_add) /* Maximum value */ -GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maximum_number) -GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maximum_number) -GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maximum_number) +GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, + float16_maximum_number) +GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, + float32_maximum_number) +GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, + float64_maximum_number) /* Minimum value */ -GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minimum_number) -GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minimum_number) -GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minimum_number) +GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, + float16_minimum_number) +GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, + float32_minimum_number) +GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, + float64_minimum_number) /* Vector Widening Floating-Point Add Instructions */ static uint32_t fwadd16(uint32_t a, uint16_t b, float_status *s) @@ -4738,7 +4751,7 @@ GEN_VEXT_FRED(vfwredosum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16) GEN_VEXT_FRED(vfwredosum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32) /* - *** Vector Mask Operations + * Vector Mask Operations */ /* Vector Mask-Register Logical Instructions */ #define GEN_VEXT_MASK_VV(NAME, OP) \ @@ -4747,7 +4760,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t desc) \ { \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ int a, b; \ @@ -4758,10 +4771,10 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ vext_set_elem_mask(vd, i, OP(b, a)); \ } \ env->vstart = 0; \ - /* mask destination register are always tail- \ - * agnostic \ + /* + * mask destination register are always tail-agnostic + * set tail elements to 1s */ \ - /* set tail elements to 1s */ \ if (vta_all_1s) { \ for (; i < total_elems; i++) { \ vext_set_elem_mask(vd, i, 1); \ @@ -4804,7 +4817,7 @@ target_ulong HELPER(vcpop_m)(void *v0, void *vs2, CPURISCVState *env, return cnt; } -/* vfirst find-first-set mask bit*/ +/* vfirst find-first-set mask bit */ target_ulong HELPER(vfirst_m)(void *v0, void *vs2, CPURISCVState *env, uint32_t desc) { @@ -4834,7 +4847,7 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, { uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; - uint32_t total_elems = env_archcpu(env)->cfg.vlen; + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; uint32_t vta_all_1s = vext_vta_all_1s(desc); uint32_t vma = vext_vma(desc); int i; @@ -4869,8 +4882,10 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, } } env->vstart = 0; - /* mask destination register are always tail-agnostic */ - /* set tail elements to 1s */ + /* + * mask destination register are always tail-agnostic + * set tail elements to 1s + */ if (vta_all_1s) { for (; i < total_elems; i++) { vext_set_elem_mask(vd, i, 1); @@ -4962,7 +4977,7 @@ GEN_VEXT_VID_V(vid_v_w, uint32_t, H4) GEN_VEXT_VID_V(vid_v_d, uint64_t, H8) /* - *** Vector Permutation Instructions + * Vector Permutation Instructions */ /* Vector Slide Instructions */ @@ -5039,7 +5054,8 @@ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8) #define GEN_VEXT_VSLIE1UP(BITWIDTH, H) \ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \ - void *vs2, CPURISCVState *env, uint32_t desc) \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ { \ typedef uint##BITWIDTH##_t ETYPE; \ uint32_t vm = vext_vm(desc); \ @@ -5087,7 +5103,8 @@ GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, 64) #define GEN_VEXT_VSLIDE1DOWN(BITWIDTH, H) \ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \ - void *vs2, CPURISCVState *env, uint32_t desc) \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ { \ typedef uint##BITWIDTH##_t ETYPE; \ uint32_t vm = vext_vm(desc); \ diff --git a/target/riscv/zce_helper.c b/target/riscv/zce_helper.c new file mode 100644 index 0000000000..b433bda16d --- /dev/null +++ b/target/riscv/zce_helper.c @@ -0,0 +1,55 @@ +/* + * RISC-V Zcmt Extension Helper for QEMU. + * + * Copyright (c) 2021-2022 PLCT Lab + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +target_ulong HELPER(cm_jalt)(CPURISCVState *env, uint32_t index) +{ + +#if !defined(CONFIG_USER_ONLY) + RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT); + if (ret != RISCV_EXCP_NONE) { + riscv_raise_exception(env, ret, 0); + } +#endif + + target_ulong target; + target_ulong val = env->jvt; + int xlen = riscv_cpu_xlen(env); + uint8_t mode = get_field(val, JVT_MODE); + target_ulong base = val & JVT_BASE; + target_ulong t0; + + if (mode != 0) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, 0); + } + + if (xlen == 32) { + t0 = base + (index << 2); + target = cpu_ldl_code(env, t0); + } else { + t0 = base + (index << 3); + target = cpu_ldq_code(env, t0); + } + + return target & ~0x1; +} diff --git a/target/rx/cpu-param.h b/target/rx/cpu-param.h index b156ad1ca0..521d669bdf 100644 --- a/target/rx/cpu-param.h +++ b/target/rx/cpu-param.h @@ -25,6 +25,4 @@ #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define NB_MMU_MODES 1 - #endif diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 219ef28e46..67452e310c 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -44,7 +44,8 @@ static void rx_cpu_synchronize_from_tb(CPUState *cs, { RXCPU *cpu = RX_CPU(cs); - cpu->env.pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + cpu->env.pc = tb->pc; } static void rx_restore_state_to_opc(CPUState *cs, diff --git a/target/rx/cpu.h b/target/rx/cpu.h index 5655dffeff..555d230f24 100644 --- a/target/rx/cpu.h +++ b/target/rx/cpu.h @@ -123,11 +123,11 @@ const char *rx_crname(uint8_t cr); #ifndef CONFIG_USER_ONLY void rx_cpu_do_interrupt(CPUState *cpu); bool rx_cpu_exec_interrupt(CPUState *cpu, int int_req); +hwaddr rx_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); #endif /* !CONFIG_USER_ONLY */ void rx_cpu_dump_state(CPUState *cpu, FILE *f, int flags); int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -hwaddr rx_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void rx_translate_init(void); void rx_cpu_list(void); diff --git a/target/rx/gdbstub.c b/target/rx/gdbstub.c index 7eb2059a84..d7e0e6689b 100644 --- a/target/rx/gdbstub.c +++ b/target/rx/gdbstub.c @@ -17,7 +17,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" int rx_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { diff --git a/target/rx/helper.c b/target/rx/helper.c index f34945e7e2..dad5fb4976 100644 --- a/target/rx/helper.c +++ b/target/rx/helper.c @@ -144,9 +144,9 @@ bool rx_cpu_exec_interrupt(CPUState *cs, int interrupt_request) return false; } -#endif /* !CONFIG_USER_ONLY */ - hwaddr rx_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { return addr; } + +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/rx/translate.c b/target/rx/translate.c index 87a3f54adb..70fad98e93 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -429,7 +429,6 @@ static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a) mem = tcg_temp_new(); tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); rx_gen_st(a->sz, cpu_regs[a->rs], mem); - tcg_temp_free(mem); return true; } @@ -440,7 +439,6 @@ static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a) mem = tcg_temp_new(); tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); rx_gen_ld(a->sz, cpu_regs[a->rd], mem); - tcg_temp_free(mem); return true; } @@ -458,12 +456,10 @@ static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a) static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a) { TCGv imm, mem; - imm = tcg_const_i32(a->imm); + imm = tcg_constant_i32(a->imm); mem = tcg_temp_new(); tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); rx_gen_st(a->sz, imm, mem); - tcg_temp_free(imm); - tcg_temp_free(mem); return true; } @@ -474,7 +470,6 @@ static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a) mem = tcg_temp_new(); rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); rx_gen_ld(a->sz, cpu_regs[a->rd], mem); - tcg_temp_free(mem); return true; } @@ -485,7 +480,6 @@ static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a) mem = tcg_temp_new(); rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); rx_gen_st(a->sz, cpu_regs[a->rs], mem); - tcg_temp_free(mem); return true; } @@ -521,9 +515,7 @@ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a) rx_gen_ld(a->sz, tmp, addr); addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd); rx_gen_st(a->sz, tmp, addr); - tcg_temp_free(tmp); } - tcg_temp_free(mem); return true; } @@ -541,7 +533,6 @@ static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a) if (a->ad == 0) { tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } - tcg_temp_free(val); return true; } @@ -559,7 +550,6 @@ static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a) tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } tcg_gen_mov_i32(cpu_regs[a->rs], val); - tcg_temp_free(val); return true; } @@ -571,7 +561,6 @@ static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a) mem = tcg_temp_new(); tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); - tcg_temp_free(mem); return true; } @@ -592,7 +581,6 @@ static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a) mem = tcg_temp_new(); rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); - tcg_temp_free(mem); return true; } @@ -610,7 +598,6 @@ static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a) tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } tcg_gen_mov_i32(cpu_regs[a->rs], val); - tcg_temp_free(val); return true; } @@ -635,7 +622,6 @@ static bool trans_POPC(DisasContext *ctx, arg_POPC *a) val = tcg_temp_new(); pop(val); move_to_cr(ctx, val, a->cr); - tcg_temp_free(val); return true; } @@ -663,7 +649,6 @@ static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a) tcg_gen_mov_i32(val, cpu_regs[a->rs]); tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); rx_gen_st(a->sz, val, cpu_sp); - tcg_temp_free(val); return true; } @@ -677,8 +662,6 @@ static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a) rx_gen_ld(a->sz, val, addr); tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); rx_gen_st(a->sz, val, cpu_sp); - tcg_temp_free(mem); - tcg_temp_free(val); return true; } @@ -689,7 +672,6 @@ static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a) val = tcg_temp_new(); move_from_cr(ctx, val, a->cr, ctx->pc); push(val); - tcg_temp_free(val); return true; } @@ -717,7 +699,6 @@ static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a) tcg_gen_mov_i32(tmp, cpu_regs[a->rs]); tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]); tcg_gen_mov_i32(cpu_regs[a->rd], tmp); - tcg_temp_free(tmp); return true; } @@ -741,7 +722,6 @@ static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a) } tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd], 0, mi_to_mop(a->mi)); - tcg_temp_free(mem); return true; } @@ -749,12 +729,10 @@ static inline void stcond(TCGCond cond, int rd, int imm) { TCGv z; TCGv _imm; - z = tcg_const_i32(0); - _imm = tcg_const_i32(imm); + z = tcg_constant_i32(0); + _imm = tcg_constant_i32(imm); tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z, _imm, cpu_regs[rd]); - tcg_temp_free(z); - tcg_temp_free(_imm); } /* stz #imm,rd */ @@ -785,12 +763,9 @@ static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a) tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0); addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd); rx_gen_st(a->sz, val, addr); - tcg_temp_free(val); - tcg_temp_free(mem); } else { tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0); } - tcg_temp_free(dc.temp); return true; } @@ -840,9 +815,8 @@ static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2) static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2) { - TCGv imm = tcg_const_i32(src2); + TCGv imm = tcg_constant_i32(src2); opr(cpu_regs[dst], cpu_regs[src], imm); - tcg_temp_free(imm); } static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx, @@ -852,7 +826,6 @@ static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx, mem = tcg_temp_new(); val = rx_load_source(ctx, mem, ld, mi, src); opr(cpu_regs[dst], cpu_regs[dst], val); - tcg_temp_free(mem); } static void rx_and(TCGv ret, TCGv arg1, TCGv arg2) @@ -994,16 +967,14 @@ static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a) /* ret = arg1 + arg2 + psw_c */ static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2) { - TCGv z; - z = tcg_const_i32(0); + TCGv z = tcg_constant_i32(0); tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z); tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z); - tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); - tcg_gen_xor_i32(z, arg1, arg2); - tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z); + tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); + tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); tcg_gen_mov_i32(ret, cpu_psw_s); - tcg_temp_free(z); } /* adc #imm, rd */ @@ -1034,15 +1005,13 @@ static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a) /* ret = arg1 + arg2 */ static void rx_add(TCGv ret, TCGv arg1, TCGv arg2) { - TCGv z; - z = tcg_const_i32(0); + TCGv z = tcg_constant_i32(0); tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z); - tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); - tcg_gen_xor_i32(z, arg1, arg2); - tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z); + tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); + tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); tcg_gen_mov_i32(ret, cpu_psw_s); - tcg_temp_free(z); } /* add #uimm4, rd */ @@ -1071,24 +1040,23 @@ static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a) /* ret = arg1 - arg2 */ static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2) { - TCGv temp; tcg_gen_sub_i32(cpu_psw_s, arg1, arg2); - tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2); tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); - temp = tcg_temp_new_i32(); - tcg_gen_xor_i32(temp, arg1, arg2); - tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp); - tcg_temp_free_i32(temp); + tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); + tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); /* CMP not required return */ if (ret) { tcg_gen_mov_i32(ret, cpu_psw_s); } } + static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2) { rx_sub(NULL, arg1, arg2); } + /* ret = arg1 - arg2 - !psw_c */ /* -> ret = arg1 + ~arg2 + psw_c */ static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2) @@ -1097,7 +1065,6 @@ static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2) temp = tcg_temp_new(); tcg_gen_not_i32(temp, arg2); rx_adc(ret, arg1, temp); - tcg_temp_free(temp); } /* cmp #imm4, rs2 */ @@ -1157,23 +1124,11 @@ static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a) return true; } -static void rx_abs(TCGv ret, TCGv arg1) -{ - TCGv neg; - TCGv zero; - neg = tcg_temp_new(); - zero = tcg_const_i32(0); - tcg_gen_neg_i32(neg, arg1); - tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1); - tcg_temp_free(neg); - tcg_temp_free(zero); -} - /* abs rd */ /* abs rs, rd */ static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a) { - rx_gen_op_rr(rx_abs, a->rd, a->rs); + rx_gen_op_rr(tcg_gen_abs_i32, a->rd, a->rs); return true; } @@ -1233,13 +1188,12 @@ static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a) /* emul #imm, rd */ static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a) { - TCGv imm = tcg_const_i32(a->imm); + TCGv imm = tcg_constant_i32(a->imm); if (a->rd > 14) { qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); } tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], cpu_regs[a->rd], imm); - tcg_temp_free(imm); return true; } @@ -1255,20 +1209,18 @@ static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a) val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], cpu_regs[a->rd], val); - tcg_temp_free(mem); return true; } /* emulu #imm, rd */ static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a) { - TCGv imm = tcg_const_i32(a->imm); + TCGv imm = tcg_constant_i32(a->imm); if (a->rd > 14) { qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); } tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], cpu_regs[a->rd], imm); - tcg_temp_free(imm); return true; } @@ -1284,7 +1236,6 @@ static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a) val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], cpu_regs[a->rd], val); - tcg_temp_free(mem); return true; } @@ -1362,10 +1313,10 @@ static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a) done = gen_new_label(); /* if (cpu_regs[a->rs]) { */ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift); - count = tcg_const_i32(32); + count = tcg_temp_new(); tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31); - tcg_gen_sub_i32(count, count, tmp); + tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp); tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count); tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp); tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0); @@ -1381,8 +1332,6 @@ static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a) gen_set_label(done); tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); - tcg_temp_free(count); - tcg_temp_free(tmp); return true; } @@ -1436,7 +1385,6 @@ static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith) tcg_gen_movi_i32(cpu_psw_o, 0); tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); - tcg_temp_free(count); } /* shar #imm:5, rd */ @@ -1480,7 +1428,6 @@ static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a) tcg_gen_mov_i32(cpu_psw_c, tmp); tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); - tcg_temp_free(tmp); return true; } @@ -1570,7 +1517,6 @@ static bool trans_REVW(DisasContext *ctx, arg_REVW *a) tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8); tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff); tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp); - tcg_temp_free(tmp); return true; } @@ -1592,7 +1538,6 @@ static void rx_bcnd_main(DisasContext *ctx, int cd, int dst) gen_set_label(t); gen_goto_tb(ctx, 1, ctx->pc + dst); gen_set_label(done); - tcg_temp_free(dc.temp); break; case 14: /* always true case */ @@ -1640,9 +1585,8 @@ static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a) static inline void rx_save_pc(DisasContext *ctx) { - TCGv pc = tcg_const_i32(ctx->base.pc_next); + TCGv pc = tcg_constant_i32(ctx->base.pc_next); push(pc); - tcg_temp_free(pc); } /* jmp rs */ @@ -1724,9 +1668,8 @@ static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a) #define STRING(op) \ do { \ - TCGv size = tcg_const_i32(a->sz); \ + TCGv size = tcg_constant_i32(a->sz); \ gen_helper_##op(cpu_env, size); \ - tcg_temp_free(size); \ } while (0) /* suntile. */ @@ -1767,8 +1710,6 @@ static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2) tcg_gen_sari_i64(tmp1, tmp1, 16); tcg_gen_mul_i64(ret, tmp0, tmp1); tcg_gen_shli_i64(ret, ret, 16); - tcg_temp_free_i64(tmp0); - tcg_temp_free_i64(tmp1); } static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2) @@ -1782,8 +1723,6 @@ static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2) tcg_gen_ext16s_i64(tmp1, tmp1); tcg_gen_mul_i64(ret, tmp0, tmp1); tcg_gen_shli_i64(ret, ret, 16); - tcg_temp_free_i64(tmp0); - tcg_temp_free_i64(tmp1); } /* mulhi rs,rs2 */ @@ -1807,7 +1746,6 @@ static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a) tmp = tcg_temp_new_i64(); rx_mul64hi(tmp, a->rs, a->rs2); tcg_gen_add_i64(cpu_acc, cpu_acc, tmp); - tcg_temp_free_i64(tmp); return true; } @@ -1818,7 +1756,6 @@ static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a) tmp = tcg_temp_new_i64(); rx_mul64lo(tmp, a->rs, a->rs2); tcg_gen_add_i64(cpu_acc, cpu_acc, tmp); - tcg_temp_free_i64(tmp); return true; } @@ -1836,7 +1773,6 @@ static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a) rd64 = tcg_temp_new_i64(); tcg_gen_extract_i64(rd64, cpu_acc, 16, 32); tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64); - tcg_temp_free_i64(rd64); return true; } @@ -1847,7 +1783,6 @@ static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a) rs64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]); tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32); - tcg_temp_free_i64(rs64); return true; } @@ -1858,16 +1793,14 @@ static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a) rs64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]); tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32); - tcg_temp_free_i64(rs64); return true; } /* racw #imm */ static bool trans_RACW(DisasContext *ctx, arg_RACW *a) { - TCGv imm = tcg_const_i32(a->imm + 1); + TCGv imm = tcg_constant_i32(a->imm + 1); gen_helper_racw(cpu_env, imm); - tcg_temp_free(imm); return true; } @@ -1876,15 +1809,13 @@ static bool trans_SAT(DisasContext *ctx, arg_SAT *a) { TCGv tmp, z; tmp = tcg_temp_new(); - z = tcg_const_i32(0); + z = tcg_constant_i32(0); /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */ tcg_gen_sari_i32(tmp, cpu_psw_s, 31); /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */ tcg_gen_xori_i32(tmp, tmp, 0x80000000); tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd], cpu_psw_o, z, tmp, cpu_regs[a->rd]); - tcg_temp_free(tmp); - tcg_temp_free(z); return true; } @@ -1900,10 +1831,9 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a) static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ cat3(arg_, name, _ir) * a) \ { \ - TCGv imm = tcg_const_i32(li(ctx, 0)); \ + TCGv imm = tcg_constant_i32(li(ctx, 0)); \ gen_helper_##op(cpu_regs[a->rd], cpu_env, \ cpu_regs[a->rd], imm); \ - tcg_temp_free(imm); \ return true; \ } \ static bool cat3(trans_, name, _mr)(DisasContext *ctx, \ @@ -1914,7 +1844,6 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a) val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ gen_helper_##op(cpu_regs[a->rd], cpu_env, \ cpu_regs[a->rd], val); \ - tcg_temp_free(mem); \ return true; \ } @@ -1925,7 +1854,6 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a) mem = tcg_temp_new(); \ val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \ - tcg_temp_free(mem); \ return true; \ } @@ -1937,9 +1865,8 @@ FOP(FDIV, fdiv) /* fcmp #imm, rd */ static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a) { - TCGv imm = tcg_const_i32(li(ctx, 0)); + TCGv imm = tcg_constant_i32(li(ctx, 0)); gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm); - tcg_temp_free(imm); return true; } @@ -1951,7 +1878,6 @@ static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a) mem = tcg_temp_new(); val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val); - tcg_temp_free(mem); return true; } @@ -1966,7 +1892,6 @@ static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a) mem = tcg_temp_new(); val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); gen_helper_itof(cpu_regs[a->rd], cpu_env, val); - tcg_temp_free(mem); return true; } @@ -1977,7 +1902,6 @@ static void rx_bsetm(TCGv mem, TCGv mask) rx_gen_ld(MO_8, val, mem); tcg_gen_or_i32(val, val, mask); rx_gen_st(MO_8, val, mem); - tcg_temp_free(val); } static void rx_bclrm(TCGv mem, TCGv mask) @@ -1987,7 +1911,6 @@ static void rx_bclrm(TCGv mem, TCGv mask) rx_gen_ld(MO_8, val, mem); tcg_gen_andc_i32(val, val, mask); rx_gen_st(MO_8, val, mem); - tcg_temp_free(val); } static void rx_btstm(TCGv mem, TCGv mask) @@ -1998,7 +1921,6 @@ static void rx_btstm(TCGv mem, TCGv mask) tcg_gen_and_i32(val, val, mask); tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0); tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); - tcg_temp_free(val); } static void rx_bnotm(TCGv mem, TCGv mask) @@ -2008,7 +1930,6 @@ static void rx_bnotm(TCGv mem, TCGv mask) rx_gen_ld(MO_8, val, mem); tcg_gen_xor_i32(val, val, mask); rx_gen_st(MO_8, val, mem); - tcg_temp_free(val); } static void rx_bsetr(TCGv reg, TCGv mask) @@ -2028,7 +1949,6 @@ static inline void rx_btstr(TCGv reg, TCGv mask) tcg_gen_and_i32(t0, reg, mask); tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0); tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); - tcg_temp_free(t0); } static inline void rx_bnotr(TCGv reg, TCGv mask) @@ -2042,49 +1962,41 @@ static inline void rx_bnotr(TCGv reg, TCGv mask) { \ TCGv mask, mem, addr; \ mem = tcg_temp_new(); \ - mask = tcg_const_i32(1 << a->imm); \ + mask = tcg_constant_i32(1 << a->imm); \ addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ cat3(rx_, op, m)(addr, mask); \ - tcg_temp_free(mask); \ - tcg_temp_free(mem); \ return true; \ } \ static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ cat3(arg_, name, _ir) * a) \ { \ TCGv mask; \ - mask = tcg_const_i32(1 << a->imm); \ + mask = tcg_constant_i32(1 << a->imm); \ cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ - tcg_temp_free(mask); \ return true; \ } \ static bool cat3(trans_, name, _rr)(DisasContext *ctx, \ cat3(arg_, name, _rr) * a) \ { \ TCGv mask, b; \ - mask = tcg_const_i32(1); \ + mask = tcg_temp_new(); \ b = tcg_temp_new(); \ tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \ - tcg_gen_shl_i32(mask, mask, b); \ + tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \ cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ - tcg_temp_free(mask); \ - tcg_temp_free(b); \ return true; \ } \ static bool cat3(trans_, name, _rm)(DisasContext *ctx, \ cat3(arg_, name, _rm) * a) \ { \ TCGv mask, mem, addr, b; \ - mask = tcg_const_i32(1); \ + mask = tcg_temp_new(); \ b = tcg_temp_new(); \ tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \ - tcg_gen_shl_i32(mask, mask, b); \ + tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \ mem = tcg_temp_new(); \ addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ cat3(rx_, op, m)(addr, mask); \ - tcg_temp_free(mem); \ - tcg_temp_free(mask); \ - tcg_temp_free(b); \ return true; \ } @@ -2103,8 +2015,6 @@ static inline void bmcnd_op(TCGv val, TCGCond cond, int pos) tcg_gen_andi_i32(val, val, ~(1 << pos)); tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0); tcg_gen_deposit_i32(val, val, bit, pos, 1); - tcg_temp_free(bit); - tcg_temp_free(dc.temp); } /* bmcnd #imm, dsp[rd] */ @@ -2117,8 +2027,6 @@ static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a) rx_gen_ld(MO_8, val, addr); bmcnd_op(val, a->cd, a->imm); rx_gen_st(MO_8, val, addr); - tcg_temp_free(val); - tcg_temp_free(mem); return true; } @@ -2208,9 +2116,8 @@ static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a) { TCGv imm; - imm = tcg_const_i32(a->imm); + imm = tcg_constant_i32(a->imm); move_to_cr(ctx, imm, a->cr); - tcg_temp_free(imm); return true; } @@ -2238,7 +2145,6 @@ static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a) tcg_gen_mov_i32(psw, cpu_bpsw); gen_helper_set_psw_rte(cpu_env, psw); ctx->base.is_jmp = DISAS_EXIT; - tcg_temp_free(psw); } return true; } @@ -2253,7 +2159,6 @@ static bool trans_RTE(DisasContext *ctx, arg_RTE *a) pop(psw); gen_helper_set_psw_rte(cpu_env, psw); ctx->base.is_jmp = DISAS_EXIT; - tcg_temp_free(psw); } return true; } @@ -2273,10 +2178,9 @@ static bool trans_INT(DisasContext *ctx, arg_INT *a) TCGv vec; tcg_debug_assert(a->imm < 0x100); - vec = tcg_const_i32(a->imm); + vec = tcg_constant_i32(a->imm); tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); gen_helper_rxint(cpu_env, vec); - tcg_temp_free(vec); ctx->base.is_jmp = DISAS_NORETURN; return true; } @@ -2363,7 +2267,7 @@ static const TranslatorOps rx_tr_ops = { .disas_log = rx_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/s390x/cpu-param.h b/target/s390x/cpu-param.h index bf951a002e..84ca08626b 100644 --- a/target/s390x/cpu-param.h +++ b/target/s390x/cpu-param.h @@ -12,6 +12,5 @@ #define TARGET_PAGE_BITS 12 #define TARGET_PHYS_ADDR_SPACE_BITS 64 #define TARGET_VIRT_ADDR_SPACE_BITS 64 -#define NB_MMU_MODES 4 #endif diff --git a/target/s390x/cpu-sysemu.c b/target/s390x/cpu-sysemu.c index 948e4bd3e0..97d6c760a8 100644 --- a/target/s390x/cpu-sysemu.c +++ b/target/s390x/cpu-sysemu.c @@ -21,6 +21,7 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include "qapi/error.h" #include "cpu.h" #include "s390x-internal.h" diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index b10a8541ff..df167493c3 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -41,6 +41,26 @@ #define CR0_RESET 0xE0UL #define CR14_RESET 0xC2000000UL; +#ifndef CONFIG_USER_ONLY +static bool is_early_exception_psw(uint64_t mask, uint64_t addr) +{ + if (mask & PSW_MASK_RESERVED) { + return true; + } + + switch (mask & (PSW_MASK_32 | PSW_MASK_64)) { + case 0: + return addr & ~0xffffffULL; + case PSW_MASK_32: + return addr & ~0x7fffffffULL; + case PSW_MASK_32 | PSW_MASK_64: + return false; + default: /* PSW_MASK_64 */ + return true; + } +} +#endif + void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr) { #ifndef CONFIG_USER_ONLY @@ -57,6 +77,12 @@ void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr) env->cc_op = (mask >> 44) & 3; #ifndef CONFIG_USER_ONLY + if (is_early_exception_psw(mask, addr)) { + env->int_pgm_ilen = 0; + trigger_pgm_exception(env, PGM_SPECIFICATION); + return; + } + if ((old_mask ^ mask) & PSW_MASK_PER) { s390_cpu_recompute_watchpoints(env_cpu(env)); } @@ -114,6 +140,13 @@ static bool s390_cpu_has_work(CPUState *cs) return s390_cpu_has_int(cpu); } +static void s390_query_cpu_fast(CPUState *cpu, CpuInfoFast *value) +{ + S390CPU *s390_cpu = S390_CPU(cpu); + + value->u.s390x.cpu_state = s390_cpu->env.cpu_state; +} + /* S390CPUClass::reset() */ static void s390_cpu_reset(CPUState *s, cpu_reset_type type) { @@ -306,6 +339,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = s390_cpu_class_by_name, cc->has_work = s390_cpu_has_work; cc->dump_state = s390_cpu_dump_state; + cc->query_cpu_fast = s390_query_cpu_fast; cc->set_pc = s390_cpu_set_pc; cc->get_pc = s390_cpu_get_pc; cc->gdb_read_register = s390_cpu_gdb_read_register; diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index 7d6d01325b..f130c29f83 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -29,6 +29,7 @@ #include "cpu_models.h" #include "exec/cpu-defs.h" #include "qemu/cpu-float.h" +#include "tcg/tcg_s390x.h" #define ELF_MACHINE_UNAME "S390X" @@ -75,9 +76,6 @@ struct CPUArchState { float_status fpu_status; /* passed to softfloat lib */ - /* The low part of a 128-bit return, or remainder of a divide. */ - uint64_t retxl; - PSW psw; S390CrashReason crash_reason; @@ -87,6 +85,7 @@ struct CPUArchState { uint64_t cc_vr; uint64_t ex_value; + uint64_t ex_target; uint64_t __excp_addr; uint64_t psa; @@ -292,6 +291,7 @@ extern const VMStateDescription vmstate_s390_cpu; #define PSW_MASK_32 0x0000000080000000ULL #define PSW_MASK_SHORT_ADDR 0x000000007fffffffULL #define PSW_MASK_SHORT_CTRL 0xffffffff80000000ULL +#define PSW_MASK_RESERVED 0xb80800fe7fffffffULL #undef PSW_ASC_PRIMARY #undef PSW_ASC_ACCREG @@ -381,6 +381,14 @@ static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch) static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { + if (env->psw.addr & 1) { + /* + * Instructions must be at even addresses. + * This needs to be checked before address translation. + */ + env->int_pgm_ilen = 2; /* see s390_cpu_tlb_fill() */ + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, 0); + } *pc = env->psw.addr; *cs_base = env->ex_value; *flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW; diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index 065ec6d66c..457b5cb10c 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -17,6 +17,7 @@ #include "sysemu/kvm.h" #include "sysemu/tcg.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "qapi/visitor.h" #include "qemu/module.h" #include "qemu/hw-version.h" diff --git a/target/s390x/diag.c b/target/s390x/diag.c index 76b01dcd68..e5f0df19e7 100644 --- a/target/s390x/diag.c +++ b/target/s390x/diag.c @@ -22,6 +22,8 @@ #include "hw/s390x/pv.h" #include "sysemu/kvm.h" #include "kvm/kvm_s390x.h" +#include "qemu/error-report.h" + int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3) { diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c index a5d69d0e0b..6fbfd41bc8 100644 --- a/target/s390x/gdbstub.c +++ b/target/s390x/gdbstub.c @@ -23,6 +23,7 @@ #include "s390x-internal.h" #include "exec/exec-all.h" #include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/bitops.h" #include "sysemu/hw_accel.h" #include "sysemu/tcg.h" @@ -205,12 +206,8 @@ static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n) #define S390_VIRT_CPUTM_REGNUM 1 #define S390_VIRT_BEA_REGNUM 2 #define S390_VIRT_PREFIX_REGNUM 3 -#define S390_VIRT_PP_REGNUM 4 -#define S390_VIRT_PFT_REGNUM 5 -#define S390_VIRT_PFS_REGNUM 6 -#define S390_VIRT_PFC_REGNUM 7 /* total number of registers in s390-virt.xml */ -#define S390_NUM_VIRT_REGS 8 +#define S390_NUM_VIRT_REGS 4 static int cpu_read_virt_reg(CPUS390XState *env, GByteArray *mem_buf, int n) { @@ -223,14 +220,6 @@ static int cpu_read_virt_reg(CPUS390XState *env, GByteArray *mem_buf, int n) return gdb_get_regl(mem_buf, env->gbea); case S390_VIRT_PREFIX_REGNUM: return gdb_get_regl(mem_buf, env->psa); - case S390_VIRT_PP_REGNUM: - return gdb_get_regl(mem_buf, env->pp); - case S390_VIRT_PFT_REGNUM: - return gdb_get_regl(mem_buf, env->pfault_token); - case S390_VIRT_PFS_REGNUM: - return gdb_get_regl(mem_buf, env->pfault_select); - case S390_VIRT_PFC_REGNUM: - return gdb_get_regl(mem_buf, env->pfault_compare); default: return 0; } @@ -255,19 +244,51 @@ static int cpu_write_virt_reg(CPUS390XState *env, uint8_t *mem_buf, int n) env->psa = ldtul_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; - case S390_VIRT_PP_REGNUM: + default: + return 0; + } +} + +/* the values represent the positions in s390-virt-kvm.xml */ +#define S390_VIRT_KVM_PP_REGNUM 0 +#define S390_VIRT_KVM_PFT_REGNUM 1 +#define S390_VIRT_KVM_PFS_REGNUM 2 +#define S390_VIRT_KVM_PFC_REGNUM 3 +/* total number of registers in s390-virt-kvm.xml */ +#define S390_NUM_VIRT_KVM_REGS 4 + +static int cpu_read_virt_kvm_reg(CPUS390XState *env, GByteArray *mem_buf, int n) +{ + switch (n) { + case S390_VIRT_KVM_PP_REGNUM: + return gdb_get_regl(mem_buf, env->pp); + case S390_VIRT_KVM_PFT_REGNUM: + return gdb_get_regl(mem_buf, env->pfault_token); + case S390_VIRT_KVM_PFS_REGNUM: + return gdb_get_regl(mem_buf, env->pfault_select); + case S390_VIRT_KVM_PFC_REGNUM: + return gdb_get_regl(mem_buf, env->pfault_compare); + default: + return 0; + } +} + +static int cpu_write_virt_kvm_reg(CPUS390XState *env, uint8_t *mem_buf, int n) +{ + switch (n) { + case S390_VIRT_KVM_PP_REGNUM: env->pp = ldtul_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; - case S390_VIRT_PFT_REGNUM: + case S390_VIRT_KVM_PFT_REGNUM: env->pfault_token = ldtul_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; - case S390_VIRT_PFS_REGNUM: + case S390_VIRT_KVM_PFS_REGNUM: env->pfault_select = ldtul_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; - case S390_VIRT_PFC_REGNUM: + case S390_VIRT_KVM_PFC_REGNUM: env->pfault_compare = ldtul_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; @@ -320,10 +341,15 @@ void s390_cpu_gdb_init(CPUState *cs) cpu_write_c_reg, S390_NUM_C_REGS, "s390-cr.xml", 0); + gdb_register_coprocessor(cs, cpu_read_virt_reg, + cpu_write_virt_reg, + S390_NUM_VIRT_REGS, "s390-virt.xml", 0); + if (kvm_enabled()) { - gdb_register_coprocessor(cs, cpu_read_virt_reg, - cpu_write_virt_reg, - S390_NUM_VIRT_REGS, "s390-virt.xml", 0); + gdb_register_coprocessor(cs, cpu_read_virt_kvm_reg, + cpu_write_virt_kvm_reg, + S390_NUM_VIRT_KVM_REGS, "s390-virt-kvm.xml", + 0); } #endif } diff --git a/target/s390x/helper.c b/target/s390x/helper.c index 473c8e51b0..2b363aa959 100644 --- a/target/s390x/helper.c +++ b/target/s390x/helper.c @@ -21,7 +21,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "s390x-internal.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/timer.h" #include "hw/s390x/ioinst.h" #include "hw/s390x/pv.h" diff --git a/target/s390x/helper.h b/target/s390x/helper.h index 341bc51ec2..7529e725f2 100644 --- a/target/s390x/helper.h +++ b/target/s390x/helper.h @@ -108,10 +108,6 @@ DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_2(srnm, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_2(stfle, i32, env, i64) -DEF_HELPER_FLAGS_2(lpq, TCG_CALL_NO_WG, i64, env, i64) -DEF_HELPER_FLAGS_2(lpq_parallel, TCG_CALL_NO_WG, i64, env, i64) -DEF_HELPER_FLAGS_4(stpq, TCG_CALL_NO_WG, void, env, i64, i64, i64) -DEF_HELPER_FLAGS_4(stpq_parallel, TCG_CALL_NO_WG, void, env, i64, i64, i64) DEF_HELPER_4(mvcos, i32, env, i64, i64, i64) DEF_HELPER_4(cu12, i32, env, i32, i32, i32) DEF_HELPER_4(cu14, i32, env, i32, i32, i32) diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h index 5d4361d35b..825252d728 100644 --- a/target/s390x/s390x-internal.h +++ b/target/s390x/s390x-internal.h @@ -11,6 +11,7 @@ #define S390X_INTERNAL_H #include "cpu.h" +#include "fpu/softfloat.h" #ifndef CONFIG_USER_ONLY typedef struct LowCore { @@ -299,7 +300,7 @@ uint32_t set_cc_nz_f128(float128 v); uint8_t s390_softfloat_exc_to_ieee(unsigned int exc); int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3); void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode); -int float_comp_to_cc(CPUS390XState *env, int float_compare); +int float_comp_to_cc(CPUS390XState *env, FloatRelation float_compare); #define DCMASK_ZERO 0x0c00 #define DCMASK_NORMAL 0x0300 diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c index bc767f0443..228aa9f237 100644 --- a/target/s390x/tcg/excp_helper.c +++ b/target/s390x/tcg/excp_helper.c @@ -85,8 +85,8 @@ void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) /* * Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, - * this is only for the atomic operations, for which we want to raise a - * specification exception. + * this is only for the atomic and relative long operations, for which we want + * to raise a specification exception. */ static G_NORETURN void do_unaligned_access(CPUState *cs, uintptr_t retaddr) @@ -212,7 +212,8 @@ static void do_program_interrupt(CPUS390XState *env) LowCore *lowcore; int ilen = env->int_pgm_ilen; - assert(ilen == 2 || ilen == 4 || ilen == 6); + assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) || + ilen == 2 || ilen == 4 || ilen == 6); switch (env->int_pgm_code) { case PGM_PER: diff --git a/target/s390x/tcg/insn-data.h.inc b/target/s390x/tcg/insn-data.h.inc index 9d2d35f084..bcc70d99ba 100644 --- a/target/s390x/tcg/insn-data.h.inc +++ b/target/s390x/tcg/insn-data.h.inc @@ -199,8 +199,8 @@ C(0xe55c, CHSI, SIL, GIE, m1_32s, i2, 0, 0, 0, cmps64) C(0xe558, CGHSI, SIL, GIE, m1_64, i2, 0, 0, 0, cmps64) /* COMPARE HALFWORD RELATIVE LONG */ - C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32) - C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64) + C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_16s, 0, 0, 0, cmps32) + C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_16s, 0, 0, 0, cmps64) /* COMPARE HIGH */ C(0xb9cd, CHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmps32) C(0xb9dd, CHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmps32) @@ -410,12 +410,12 @@ /* LOAD */ C(0x1800, LR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, 0) - C(0x5800, L, RX_a, Z, 0, a2, new, r1_32, ld32s, 0) - C(0xe358, LY, RXY_a, LD, 0, a2, new, r1_32, ld32s, 0) + D(0x5800, L, RX_a, Z, 0, a2, new, r1_32, ld32s, 0, 0) + D(0xe358, LY, RXY_a, LD, 0, a2, new, r1_32, ld32s, 0, 0) C(0xb904, LGR, RRE, Z, 0, r2_o, 0, r1, mov2, 0) C(0xb914, LGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, 0) - C(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0) - C(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0) + D(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0, 0) + D(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0, 0) F(0x2800, LDR, RR_a, Z, 0, f2, 0, f1, mov2, 0, IF_AFP1 | IF_AFP2) F(0x6800, LD, RX_a, Z, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) F(0xed65, LDY, RXY_a, LD, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) @@ -426,9 +426,9 @@ /* LOAD IMMEDIATE */ C(0xc001, LGFI, RIL_a, EI, 0, i2, 0, r1, mov2, 0) /* LOAD RELATIVE LONG */ - C(0xc40d, LRL, RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0) - C(0xc408, LGRL, RIL_b, GIE, 0, ri2, r1, 0, ld64, 0) - C(0xc40c, LGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0) + D(0xc40d, LRL, RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0, MO_ALIGN) + D(0xc408, LGRL, RIL_b, GIE, 0, ri2, r1, 0, ld64, 0, MO_ALIGN) + D(0xc40c, LGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0, MO_ALIGN) /* LOAD ADDRESS */ C(0x4100, LA, RX_a, Z, 0, a2, 0, r1, mov2, 0) C(0xe371, LAY, RXY_a, LD, 0, a2, 0, r1, mov2, 0) @@ -456,9 +456,9 @@ C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32) C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64) C(0xb912, LTGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, s64) - C(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64) - C(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64) - C(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64) + D(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64, 0) + D(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64, 0) + D(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64, 0) F(0xb302, LTEBR, RRE, Z, 0, e2, 0, cond_e1e2, mov2, f32, IF_BFP) F(0xb312, LTDBR, RRE, Z, 0, f2, 0, f1, mov2, f64, IF_BFP) F(0xb342, LTXBR, RRE, Z, x2h, x2l, 0, x1_P, movx, f128, IF_BFP) @@ -502,16 +502,16 @@ C(0xc405, LHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16s, 0) C(0xc404, LGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16s, 0) /* LOAD HIGH */ - C(0xe3ca, LFH, RXY_a, HW, 0, a2, new, r1_32h, ld32u, 0) + D(0xe3ca, LFH, RXY_a, HW, 0, a2, new, r1_32h, ld32u, 0, 0) /* LOAG HIGH AND TRAP */ C(0xe3c8, LFHAT, RXY_a, LAT, 0, m2_32u, r1, 0, lfhat, 0) /* LOAD LOGICAL */ C(0xb916, LLGFR, RRE, Z, 0, r2_32u, 0, r1, mov2, 0) - C(0xe316, LLGF, RXY_a, Z, 0, a2, r1, 0, ld32u, 0) + D(0xe316, LLGF, RXY_a, Z, 0, a2, r1, 0, ld32u, 0, 0) /* LOAD LOGICAL AND TRAP */ C(0xe39d, LLGFAT, RXY_a, LAT, 0, a2, r1, 0, llgfat, 0) /* LOAD LOGICAL RELATIVE LONG */ - C(0xc40e, LLGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0) + D(0xc40e, LLGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0, MO_ALIGN) /* LOAD LOGICAL CHARACTER */ C(0xb994, LLCR, RRE, EI, 0, r2_8u, 0, r1_32, mov2, 0) C(0xb984, LLGCR, RRE, EI, 0, r2_8u, 0, r1, mov2, 0) @@ -570,7 +570,7 @@ D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL) D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEUQ) /* LOAD PAIR FROM QUADWORD */ - C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0) + C(0xe38f, LPQ, RXY_a, Z, 0, a2, 0, r1_D64, lpq, 0) /* LOAD POSITIVE */ C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32) C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64) @@ -606,7 +606,7 @@ F(0xed04, LDEB, RXE, Z, 0, m2_32u, new, f1, ldeb, 0, IF_BFP) F(0xed05, LXDB, RXE, Z, 0, m2_64, new_x, x1, lxdb, 0, IF_BFP) F(0xed06, LXEB, RXE, Z, 0, m2_32u, new_x, x1, lxeb, 0, IF_BFP) - F(0xb324, LDER, RXE, Z, 0, e2, new, f1, lde, 0, IF_AFP1) + F(0xb324, LDER, RRE, Z, 0, e2, new, f1, lde, 0, IF_AFP1) F(0xed24, LDE, RXE, Z, 0, m2_32u, new, f1, lde, 0, IF_AFP1) /* LOAD ROUNDED */ F(0xb344, LEDBR, RRF_e, Z, 0, f2, new, e1, ledb, 0, IF_BFP) @@ -840,16 +840,16 @@ F(0xed15, SQDB, RXE, Z, 0, m2_64, new, f1, sqdb, 0, IF_BFP) /* STORE */ - C(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0) - C(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0) - C(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0) - F(0x6000, STD, RX_a, Z, f1, a2, 0, 0, st64, 0, IF_AFP1) - F(0xed67, STDY, RXY_a, LD, f1, a2, 0, 0, st64, 0, IF_AFP1) - F(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0, IF_AFP1) - F(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0, IF_AFP1) + D(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0, 0) + D(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0, 0) + D(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0, 0) + E(0x6000, STD, RX_a, Z, f1, a2, 0, 0, st64, 0, 0, IF_AFP1) + E(0xed67, STDY, RXY_a, LD, f1, a2, 0, 0, st64, 0, 0, IF_AFP1) + E(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0, 0, IF_AFP1) + E(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0, 0, IF_AFP1) /* STORE RELATIVE LONG */ - C(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0) - C(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0) + D(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0, MO_ALIGN) + D(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0, MO_ALIGN) /* STORE CHARACTER */ C(0x4200, STC, RX_a, Z, r1_o, a2, 0, 0, st8, 0) C(0xe372, STCY, RXY_a, LD, r1_o, a2, 0, 0, st8, 0) @@ -867,7 +867,7 @@ /* STORE HALFWORD RELATIVE LONG */ C(0xc407, STHRL, RIL_b, GIE, r1_o, ri2, 0, 0, st16, 0) /* STORE HIGH */ - C(0xe3cb, STFH, RXY_a, HW, r1_sr32, a2, 0, 0, st32, 0) + D(0xe3cb, STFH, RXY_a, HW, r1_sr32, a2, 0, 0, st32, 0, 0) /* STORE ON CONDITION */ D(0xebf3, STOC, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 0) D(0xebe3, STOCG, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 1) diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c index e51a0db0fe..d02ec861d8 100644 --- a/target/s390x/tcg/mem_helper.c +++ b/target/s390x/tcg/mem_helper.c @@ -26,6 +26,7 @@ #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" +#include "hw/core/tcg-cpu-ops.h" #include "qemu/int128.h" #include "qemu/atomic128.h" #include "trace.h" @@ -145,11 +146,10 @@ static inline int s390_probe_access(CPUArchState *env, target_ulong addr, int mmu_idx, bool nonfault, void **phost, uintptr_t ra) { - int flags = probe_access_flags(env, addr, access_type, mmu_idx, + int flags = probe_access_flags(env, addr, 0, access_type, mmu_idx, nonfault, phost, ra); if (unlikely(flags & TLB_INVALID_MASK)) { - assert(!nonfault); #ifdef CONFIG_USER_ONLY /* Address is in TEC in system mode; see s390_cpu_record_sigsegv. */ env->__excp_addr = addr & TARGET_PAGE_MASK; @@ -1737,6 +1737,11 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2, bool parallel) { uint32_t mem_idx = cpu_mmu_index(env, false); + MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, mem_idx); + MemOpIdx oi8 = make_memop_idx(MO_TE | MO_64, mem_idx); + MemOpIdx oi4 = make_memop_idx(MO_TE | MO_32, mem_idx); + MemOpIdx oi2 = make_memop_idx(MO_TE | MO_16, mem_idx); + MemOpIdx oi1 = make_memop_idx(MO_8, mem_idx); uintptr_t ra = GETPC(); uint32_t fc = extract32(env->regs[0], 0, 8); uint32_t sc = extract32(env->regs[0], 8, 8); @@ -1775,34 +1780,30 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, max = 3; #endif if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) || - (HAVE_ATOMIC128 ? 0 : sc > max)) { + (HAVE_ATOMIC128_RW ? 0 : sc > max)) { cpu_loop_exit_atomic(env_cpu(env), ra); } } - /* All loads happen before all stores. For simplicity, load the entire - store value area from the parameter list. */ - svh = cpu_ldq_data_ra(env, pl + 16, ra); - svl = cpu_ldq_data_ra(env, pl + 24, ra); + /* + * All loads happen before all stores. For simplicity, load the entire + * store value area from the parameter list. + */ + svh = cpu_ldq_mmu(env, pl + 16, oi8, ra); + svl = cpu_ldq_mmu(env, pl + 24, oi8, ra); switch (fc) { case 0: { - uint32_t nv = cpu_ldl_data_ra(env, pl, ra); + uint32_t nv = cpu_ldl_mmu(env, pl, oi4, ra); uint32_t cv = env->regs[r3]; uint32_t ov; if (parallel) { -#ifdef CONFIG_USER_ONLY - uint32_t *haddr = g2h(env_cpu(env), a1); - ov = qatomic_cmpxchg__nocheck(haddr, cv, nv); -#else - MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); - ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra); -#endif + ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi4, ra); } else { - ov = cpu_ldl_data_ra(env, a1, ra); - cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra); + ov = cpu_ldl_mmu(env, a1, oi4, ra); + cpu_stl_mmu(env, a1, (ov == cv ? nv : ov), oi4, ra); } cc = (ov != cv); env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov); @@ -1811,21 +1812,20 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, case 1: { - uint64_t nv = cpu_ldq_data_ra(env, pl, ra); + uint64_t nv = cpu_ldq_mmu(env, pl, oi8, ra); uint64_t cv = env->regs[r3]; uint64_t ov; if (parallel) { #ifdef CONFIG_ATOMIC64 - MemOpIdx oi = make_memop_idx(MO_TEUQ | MO_ALIGN, mem_idx); - ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); + ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi8, ra); #else /* Note that we asserted !parallel above. */ g_assert_not_reached(); #endif } else { - ov = cpu_ldq_data_ra(env, a1, ra); - cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra); + ov = cpu_ldq_mmu(env, a1, oi8, ra); + cpu_stq_mmu(env, a1, (ov == cv ? nv : ov), oi8, ra); } cc = (ov != cv); env->regs[r3] = ov; @@ -1834,27 +1834,19 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, case 2: { - uint64_t nvh = cpu_ldq_data_ra(env, pl, ra); - uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra); - Int128 nv = int128_make128(nvl, nvh); + Int128 nv = cpu_ld16_mmu(env, pl, oi16, ra); Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); Int128 ov; if (!parallel) { - uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra); - uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra); - - ov = int128_make128(ol, oh); + ov = cpu_ld16_mmu(env, a1, oi16, ra); cc = !int128_eq(ov, cv); if (cc) { nv = ov; } - - cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); - cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); + cpu_st16_mmu(env, a1, nv, oi16, ra); } else if (HAVE_CMPXCHG128) { - MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx); - ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); + ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi16, ra); cc = !int128_eq(ov, cv); } else { /* Note that we asserted !parallel above. */ @@ -1876,29 +1868,19 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, if (cc == 0) { switch (sc) { case 0: - cpu_stb_data_ra(env, a2, svh >> 56, ra); + cpu_stb_mmu(env, a2, svh >> 56, oi1, ra); break; case 1: - cpu_stw_data_ra(env, a2, svh >> 48, ra); + cpu_stw_mmu(env, a2, svh >> 48, oi2, ra); break; case 2: - cpu_stl_data_ra(env, a2, svh >> 32, ra); + cpu_stl_mmu(env, a2, svh >> 32, oi4, ra); break; case 3: - cpu_stq_data_ra(env, a2, svh, ra); + cpu_stq_mmu(env, a2, svh, oi8, ra); break; case 4: - if (!parallel) { - cpu_stq_data_ra(env, a2 + 0, svh, ra); - cpu_stq_data_ra(env, a2 + 8, svl, ra); - } else if (HAVE_ATOMIC128) { - MemOpIdx oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx); - Int128 sv = int128_make128(svl, svh); - cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra); - } else { - /* Note that we asserted !parallel above. */ - g_assert_not_reached(); - } + cpu_st16_mmu(env, a2, int128_make128(svl, svh), oi16, ra); break; default: g_assert_not_reached(); @@ -2398,67 +2380,6 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr) } #endif -/* load pair from quadword */ -uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr) -{ - uintptr_t ra = GETPC(); - uint64_t hi, lo; - - check_alignment(env, addr, 16, ra); - hi = cpu_ldq_data_ra(env, addr + 0, ra); - lo = cpu_ldq_data_ra(env, addr + 8, ra); - - env->retxl = lo; - return hi; -} - -uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr) -{ - uintptr_t ra = GETPC(); - uint64_t hi, lo; - int mem_idx; - MemOpIdx oi; - Int128 v; - - assert(HAVE_ATOMIC128); - - mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx); - v = cpu_atomic_ldo_be_mmu(env, addr, oi, ra); - hi = int128_gethi(v); - lo = int128_getlo(v); - - env->retxl = lo; - return hi; -} - -/* store pair to quadword */ -void HELPER(stpq)(CPUS390XState *env, uint64_t addr, - uint64_t low, uint64_t high) -{ - uintptr_t ra = GETPC(); - - check_alignment(env, addr, 16, ra); - cpu_stq_data_ra(env, addr + 0, high, ra); - cpu_stq_data_ra(env, addr + 8, low, ra); -} - -void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr, - uint64_t low, uint64_t high) -{ - uintptr_t ra = GETPC(); - int mem_idx; - MemOpIdx oi; - Int128 v; - - assert(HAVE_ATOMIC128); - - mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx); - v = int128_make128(low, high); - cpu_atomic_sto_be_mmu(env, addr, v, oi, ra); -} - /* Execute instruction. This instruction executes an insn modified with the contents of r1. It does not change the executed instruction in memory; it does not change the program counter. @@ -2468,8 +2389,16 @@ void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr, */ void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr) { - uint64_t insn = cpu_lduw_code(env, addr); - uint8_t opc = insn >> 8; + uint64_t insn; + uint8_t opc; + + /* EXECUTE targets must be at even addresses. */ + if (addr & 1) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); + } + + insn = cpu_lduw_code(env, addr); + opc = insn >> 8; /* Or in the contents of R1[56:63]. */ insn |= r1 & 0xff; @@ -2530,6 +2459,7 @@ void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr) that ex_value is non-zero, which flags that we are in a state that requires such execution. */ env->ex_value = insn | ilen; + env->ex_target = addr; } uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src, diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index faa6f737ba..3eb3708d55 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -156,8 +156,6 @@ struct DisasContext { typedef struct { TCGCond cond:8; bool is_64; - bool g1; - bool g2; union { struct { TCGv_i64 a, b; } s64; struct { TCGv_i32 a, b; } s32; @@ -308,8 +306,6 @@ static TCGv_i128 load_freg_128(int reg) TCGv_i128 r = tcg_temp_new_i128(); tcg_gen_concat_i64_i128(r, l, h); - tcg_temp_free_i64(h); - tcg_temp_free_i64(l); return r; } @@ -339,11 +335,6 @@ static void store_freg32_i64(int reg, TCGv_i64 v) tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg)); } -static void return_low128(TCGv_i64 dest) -{ - tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl)); -} - static void update_psw_addr(DisasContext *s) { /* psw.addr */ @@ -422,7 +413,7 @@ static int get_mem_index(DisasContext *s) case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: return MMU_HOME_IDX; default: - tcg_abort(); + g_assert_not_reached(); break; } #endif @@ -656,7 +647,7 @@ static void gen_op_calc_cc(DisasContext *s) gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr); break; default: - tcg_abort(); + g_assert_not_reached(); } /* We now have cc in cc_op as constant */ @@ -722,7 +713,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER); c->u.s32.a = cc_op; c->u.s32.b = cc_op; - c->g1 = c->g2 = true; c->is_64 = false; return; } @@ -839,7 +829,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) /* Load up the arguments of the comparison. */ c->is_64 = true; - c->g1 = c->g2 = false; switch (old_cc_op) { case CC_OP_LTGT0_32: c->is_64 = false; @@ -861,13 +850,11 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_FLOGR: c->u.s64.a = cc_dst; c->u.s64.b = tcg_constant_i64(0); - c->g1 = true; break; case CC_OP_LTGT_64: case CC_OP_LTUGTU_64: c->u.s64.a = cc_src; c->u.s64.b = cc_dst; - c->g1 = c->g2 = true; break; case CC_OP_TM_32: @@ -882,7 +869,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_SUBU: c->is_64 = true; c->u.s64.b = tcg_constant_i64(0); - c->g1 = true; switch (mask) { case 8 | 2: case 4 | 1: /* result */ @@ -900,7 +886,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_STATIC: c->is_64 = false; c->u.s32.a = cc_op; - c->g1 = true; switch (mask) { case 0x8 | 0x4 | 0x2: /* cc != 3 */ cond = TCG_COND_NE; @@ -916,7 +901,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) break; case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */ cond = TCG_COND_EQ; - c->g1 = false; c->u.s32.a = tcg_temp_new_i32(); c->u.s32.b = tcg_constant_i32(0); tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); @@ -935,7 +919,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) break; case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */ cond = TCG_COND_NE; - c->g1 = false; c->u.s32.a = tcg_temp_new_i32(); c->u.s32.b = tcg_constant_i32(0); tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); @@ -959,7 +942,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) default: /* CC is masked by something else: (8 >> cc) & mask. */ cond = TCG_COND_NE; - c->g1 = false; c->u.s32.a = tcg_temp_new_i32(); c->u.s32.b = tcg_constant_i32(0); tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op); @@ -974,24 +956,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) c->cond = cond; } -static void free_compare(DisasCompare *c) -{ - if (!c->g1) { - if (c->is_64) { - tcg_temp_free_i64(c->u.s64.a); - } else { - tcg_temp_free_i32(c->u.s32.a); - } - } - if (!c->g2) { - if (c->is_64) { - tcg_temp_free_i64(c->u.s64.b); - } else { - tcg_temp_free_i32(c->u.s32.b); - } - } -} - /* ====================================================================== */ /* Define the insn format enumeration. */ #define F0(N) FMT_##N, @@ -1092,7 +1056,6 @@ static const DisasFormatInfo format_info[] = { them, and store them back. See the "in1", "in2", "prep", "wout" sets of routines below for more details. */ typedef struct { - bool g_out, g_out2, g_in1, g_in2; TCGv_i64 out, out2, in1, in2; TCGv_i64 addr1; TCGv_i128 out_128, in1_128, in2_128; @@ -1292,17 +1255,14 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, TCGv_i64 z = tcg_constant_i64(0); tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b); tcg_gen_extu_i32_i64(t1, t0); - tcg_temp_free_i32(t0); tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next); per_branch_cond(s, TCG_COND_NE, t1, z); - tcg_temp_free_i64(t1); } ret = DISAS_PC_UPDATED; } egress: - free_compare(c); return ret; } @@ -1462,11 +1422,11 @@ static DisasJumpType op_andi(DisasContext *s, DisasOps *o) int shift = s->insn->data & 0xff; int size = s->insn->data >> 8; uint64_t mask = ((1ull << size) - 1) << shift; + TCGv_i64 t = tcg_temp_new_i64(); - assert(!o->g_in2); - tcg_gen_shli_i64(o->in2, o->in2, shift); - tcg_gen_ori_i64(o->in2, o->in2, ~mask); - tcg_gen_and_i64(o->out, o->in1, o->in2); + tcg_gen_shli_i64(t, o->in2, shift); + tcg_gen_ori_i64(t, t, ~mask); + tcg_gen_and_i64(o->out, o->in1, t); /* Produce the CC from only the bits manipulated. */ tcg_gen_andi_i64(cc_dst, o->out, mask); @@ -1555,7 +1515,6 @@ static void save_link_info(DisasContext *s, DisasOps *o) tcg_gen_extu_i32_i64(t, cc_op); tcg_gen_shli_i64(t, t, 28); tcg_gen_or_i64(o->out, o->out, t); - tcg_temp_free_i64(t); } static DisasJumpType op_bal(DisasContext *s, DisasOps *o) @@ -1570,18 +1529,51 @@ static DisasJumpType op_bal(DisasContext *s, DisasOps *o) } } +/* + * Disassemble the target of a branch. The results are returned in a form + * suitable for passing into help_branch(): + * + * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd + * branches, whose DisasContext *S contains the relative immediate field RI, + * are considered fixed. All the other branches are considered computed. + * - int IMM is the value of RI. + * - TCGv_i64 CDEST is the address of the computed target. + */ +#define disas_jdest(s, ri, is_imm, imm, cdest) do { \ + if (have_field(s, ri)) { \ + if (unlikely(s->ex_value)) { \ + cdest = tcg_temp_new_i64(); \ + tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\ + tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \ + is_imm = false; \ + } else { \ + is_imm = true; \ + } \ + } else { \ + is_imm = false; \ + } \ + imm = is_imm ? get_field(s, ri) : 0; \ +} while (false) + static DisasJumpType op_basi(DisasContext *s, DisasOps *o) { + DisasCompare c; + bool is_imm; + int imm; + pc_to_link_info(o->out, s, s->pc_tmp); - return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2); + + disas_jdest(s, i2, is_imm, imm, o->in2); + disas_jcc(s, &c, 0xf); + return help_branch(s, &c, is_imm, imm, o->in2); } static DisasJumpType op_bc(DisasContext *s, DisasOps *o) { int m1 = get_field(s, m1); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; + int imm; /* BCR with R2 = 0 causes no branching */ if (have_field(s, r2) && get_field(s, r2) == 0) { @@ -1598,6 +1590,7 @@ static DisasJumpType op_bc(DisasContext *s, DisasOps *o) return DISAS_NEXT; } + disas_jdest(s, i2, is_imm, imm, o->in2); disas_jcc(s, &c, m1); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1605,15 +1598,13 @@ static DisasJumpType op_bc(DisasContext *s, DisasOps *o) static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; TCGv_i64 t; + int imm; c.cond = TCG_COND_NE; c.is_64 = false; - c.g1 = false; - c.g2 = false; t = tcg_temp_new_i64(); tcg_gen_subi_i64(t, regs[r1], 1); @@ -1621,8 +1612,8 @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) c.u.s32.a = tcg_temp_new_i32(); c.u.s32.b = tcg_constant_i32(0); tcg_gen_extrl_i64_i32(c.u.s32.a, t); - tcg_temp_free_i64(t); + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1635,8 +1626,6 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) c.cond = TCG_COND_NE; c.is_64 = false; - c.g1 = false; - c.g2 = false; t = tcg_temp_new_i64(); tcg_gen_shri_i64(t, regs[r1], 32); @@ -1645,7 +1634,6 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) c.u.s32.a = tcg_temp_new_i32(); c.u.s32.b = tcg_constant_i32(0); tcg_gen_extrl_i64_i32(c.u.s32.a, t); - tcg_temp_free_i64(t); return help_branch(s, &c, 1, imm, o->in2); } @@ -1653,19 +1641,18 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; + int imm; c.cond = TCG_COND_NE; c.is_64 = true; - c.g1 = true; - c.g2 = false; tcg_gen_subi_i64(regs[r1], regs[r1], 1); c.u.s64.a = regs[r1]; c.u.s64.b = tcg_constant_i64(0); + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1673,15 +1660,13 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); int r3 = get_field(s, r3); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; TCGv_i64 t; + int imm; c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); c.is_64 = false; - c.g1 = false; - c.g2 = false; t = tcg_temp_new_i64(); tcg_gen_add_i64(t, regs[r1], regs[r3]); @@ -1690,8 +1675,8 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) tcg_gen_extrl_i64_i32(c.u.s32.a, t); tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]); store_reg32_i64(r1, t); - tcg_temp_free_i64(t); + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1699,25 +1684,23 @@ static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); int r3 = get_field(s, r3); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; + int imm; c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); c.is_64 = true; if (r1 == (r3 | 1)) { c.u.s64.b = load_reg(r3 | 1); - c.g2 = false; } else { c.u.s64.b = regs[r3 | 1]; - c.g2 = true; } tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]); c.u.s64.a = regs[r1]; - c.g1 = true; + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1731,14 +1714,13 @@ static DisasJumpType op_cj(DisasContext *s, DisasOps *o) if (s->insn->data) { c.cond = tcg_unsigned_cond(c.cond); } - c.is_64 = c.g1 = c.g2 = true; + c.is_64 = true; c.u.s64.a = o->in1; c.u.s64.b = o->in2; - is_imm = have_field(s, i4); - if (is_imm) { - imm = get_field(s, i4); - } else { + o->out = NULL; + disas_jdest(s, i4, is_imm, imm, o->out); + if (!is_imm && !o->out) { imm = 0; o->out = get_address(s, 0, get_field(s, b4), get_field(s, d4)); @@ -2012,11 +1994,9 @@ static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]); set_cc_static(s); tcg_gen_extr_i128_i64(o->out, len, pair); - tcg_temp_free_i128(pair); tcg_gen_add_i64(regs[r2], regs[r2], len); tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len); - tcg_temp_free_i64(len); return DISAS_NEXT; } @@ -2025,32 +2005,24 @@ static DisasJumpType op_clc(DisasContext *s, DisasOps *o) { int l = get_field(s, l1); TCGv_i32 vl; + MemOp mop; switch (l + 1) { case 1: - tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s)); - tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s)); - break; case 2: - tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s)); - tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s)); - break; case 4: - tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s)); - tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s)); - break; case 8: - tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s)); - tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s)); - break; + mop = ctz32(l + 1) | MO_TE; + tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop); + tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop); + gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst); + return DISAS_NEXT; default: vl = tcg_constant_i32(l); gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; } - gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst); - return DISAS_NEXT; } static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) @@ -2118,7 +2090,6 @@ static DisasJumpType op_clm(DisasContext *s, DisasOps *o) tcg_gen_extrl_i64_i32(t1, o->in1); gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2); set_cc_static(s); - tcg_temp_free_i32(t1); return DISAS_NEXT; } @@ -2128,7 +2099,6 @@ static DisasJumpType op_clst(DisasContext *s, DisasOps *o) gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2); tcg_gen_extr_i128_i64(o->in2, o->in1, pair); - tcg_temp_free_i128(pair); set_cc_static(s); return DISAS_NEXT; @@ -2140,7 +2110,6 @@ static DisasJumpType op_cps(DisasContext *s, DisasOps *o) tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull); tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); tcg_gen_or_i64(o->out, o->out, t); - tcg_temp_free_i64(t); return DISAS_NEXT; } @@ -2156,14 +2125,12 @@ static DisasJumpType op_cs(DisasContext *s, DisasOps *o) addr = get_address(s, 0, b2, d2); tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1, get_mem_index(s), s->insn->data | MO_ALIGN); - tcg_temp_free_i64(addr); /* Are the memory and expected values (un)equal? Note that this setcond produces the output CC value, thus the NE sense of the test. */ cc = tcg_temp_new_i64(); tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out); tcg_gen_extrl_i64_i32(cc_op, cc); - tcg_temp_free_i64(cc); set_cc_static(s); return DISAS_NEXT; @@ -2223,7 +2190,6 @@ static DisasJumpType op_csp(DisasContext *s, DisasOps *o) tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE)); tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2, get_mem_index(s), mop | MO_ALIGN); - tcg_temp_free_i64(addr); /* Are the memory and expected values (un)equal? */ cc = tcg_temp_new_i64(); @@ -2237,14 +2203,12 @@ static DisasJumpType op_csp(DisasContext *s, DisasOps *o) } else { tcg_gen_mov_i64(o->out, old); } - tcg_temp_free_i64(old); /* If the comparison was equal, and the LSB of R2 was set, then we need to flush the TLB (for all cpus). */ tcg_gen_xori_i64(cc, cc, 1); tcg_gen_and_i64(cc, cc, o->in2); tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab); - tcg_temp_free_i64(cc); gen_helper_purge(cpu_env); gen_set_label(lab); @@ -2259,9 +2223,7 @@ static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) TCGv_i32 t2 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t2, o->in1); gen_helper_cvd(t1, t2); - tcg_temp_free_i32(t2); - tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s)); - tcg_temp_free_i64(t1); + tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ); return DISAS_NEXT; } @@ -2363,7 +2325,6 @@ static DisasJumpType op_divs64(DisasContext *s, DisasOps *o) gen_helper_divs64(t, cpu_env, o->in1, o->in2); tcg_gen_extr_i128_i64(o->out2, o->out, t); - tcg_temp_free_i128(t); return DISAS_NEXT; } @@ -2373,7 +2334,6 @@ static DisasJumpType op_divu64(DisasContext *s, DisasOps *o) gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2); tcg_gen_extr_i128_i64(o->out2, o->out, t); - tcg_temp_free_i128(t); return DISAS_NEXT; } @@ -2428,8 +2388,6 @@ static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) if (r2 != 0) { store_reg32_i64(r2, psw_mask); } - - tcg_temp_free_i64(t); return DISAS_NEXT; } @@ -2523,7 +2481,7 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) switch (m3) { case 0xf: /* Effectively a 32-bit load. */ - tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL); len = 32; goto one_insert; @@ -2531,7 +2489,7 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) case 0x6: case 0x3: /* Effectively a 16-bit load. */ - tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW); len = 16; goto one_insert; @@ -2540,7 +2498,7 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) case 0x2: case 0x1: /* Effectively an 8-bit load. */ - tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB); len = 8; goto one_insert; @@ -2556,7 +2514,7 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) ccm = 0; while (m3) { if (m3 & 0x8) { - tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB); tcg_gen_addi_i64(o->in2, o->in2, 1); tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8); ccm |= 0xffull << pos; @@ -2569,7 +2527,6 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) tcg_gen_movi_i64(tmp, ccm); gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -2592,8 +2549,6 @@ static DisasJumpType op_ipm(DisasContext *s, DisasOps *o) tcg_gen_extu_i32_i64(t2, cc_op); tcg_gen_deposit_i64(t1, t1, t2, 4, 60); tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); return DISAS_NEXT; } @@ -2815,43 +2770,46 @@ static DisasJumpType op_llgt(DisasContext *s, DisasOps *o) static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB); return DISAS_NEXT; } static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB); return DISAS_NEXT; } static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW); return DISAS_NEXT; } static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW); return DISAS_NEXT; } static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s), + MO_TESL | s->insn->data); return DISAS_NEXT; } static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s), + MO_TEUL | s->insn->data); return DISAS_NEXT; } static DisasJumpType op_ld64(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), + MO_TEUQ | s->insn->data); return DISAS_NEXT; } @@ -2869,7 +2827,7 @@ static DisasJumpType op_lat(DisasContext *s, DisasOps *o) static DisasJumpType op_lgat(DisasContext *s, DisasOps *o) { TCGLabel *lab = gen_new_label(); - tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ); /* The value is stored even in case of trap. */ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); gen_trap(s); @@ -2891,7 +2849,8 @@ static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o) static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o) { TCGLabel *lab = gen_new_label(); - tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); + + tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL); /* The value is stored even in case of trap. */ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); gen_trap(s); @@ -2925,21 +2884,17 @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o) if (c.is_64) { tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b, o->in2, o->in1); - free_compare(&c); } else { TCGv_i32 t32 = tcg_temp_new_i32(); TCGv_i64 t, z; tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b); - free_compare(&c); t = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(t, t32); - tcg_temp_free_i32(t32); z = tcg_constant_i64(0); tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1); - tcg_temp_free_i64(t); } return DISAS_NEXT; @@ -2983,21 +2938,21 @@ static DisasJumpType op_lpp(DisasContext *s, DisasOps *o) static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o) { - TCGv_i64 t1, t2; + TCGv_i64 mask, addr; per_breaking_event(s); - t1 = tcg_temp_new_i64(); - t2 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), - MO_TEUL | MO_ALIGN_8); - tcg_gen_addi_i64(o->in2, o->in2, 4); - tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s)); - /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */ - tcg_gen_shli_i64(t1, t1, 32); - gen_helper_load_psw(cpu_env, t1, t2); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); + /* + * Convert the short PSW into the normal PSW, similar to what + * s390_cpu_load_normal() does. + */ + mask = tcg_temp_new_i64(); + addr = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8); + tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR); + tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL); + tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW); + gen_helper_load_psw(cpu_env, mask, addr); return DISAS_NORETURN; } @@ -3012,10 +2967,8 @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8); tcg_gen_addi_i64(o->in2, o->in2, 8); - tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ); gen_helper_load_psw(cpu_env, t1, t2); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); return DISAS_NORETURN; } #endif @@ -3038,25 +2991,22 @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) /* Only one register to read. */ t1 = tcg_temp_new_i64(); if (unlikely(r1 == r3)) { - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); store_reg32_i64(r1, t1); - tcg_temp_free(t1); return DISAS_NEXT; } /* First load the values of the first and last registers to trigger possible page faults. */ t2 = tcg_temp_new_i64(); - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); - tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL); store_reg32_i64(r1, t1); store_reg32_i64(r3, t2); /* Only two registers to read. */ if (((r1 + 1) & 15) == r3) { - tcg_temp_free(t2); - tcg_temp_free(t1); return DISAS_NEXT; } @@ -3066,12 +3016,9 @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) while (r1 != r3) { r1 = (r1 + 1) & 15; tcg_gen_add_i64(o->in2, o->in2, t2); - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); store_reg32_i64(r1, t1); } - tcg_temp_free(t2); - tcg_temp_free(t1); - return DISAS_NEXT; } @@ -3084,25 +3031,22 @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) /* Only one register to read. */ t1 = tcg_temp_new_i64(); if (unlikely(r1 == r3)) { - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); store_reg32h_i64(r1, t1); - tcg_temp_free(t1); return DISAS_NEXT; } /* First load the values of the first and last registers to trigger possible page faults. */ t2 = tcg_temp_new_i64(); - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); - tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL); store_reg32h_i64(r1, t1); store_reg32h_i64(r3, t2); /* Only two registers to read. */ if (((r1 + 1) & 15) == r3) { - tcg_temp_free(t2); - tcg_temp_free(t1); return DISAS_NEXT; } @@ -3112,12 +3056,9 @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) while (r1 != r3) { r1 = (r1 + 1) & 15; tcg_gen_add_i64(o->in2, o->in2, t2); - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL); store_reg32h_i64(r1, t1); } - tcg_temp_free(t2); - tcg_temp_free(t1); - return DISAS_NEXT; } @@ -3129,7 +3070,7 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) /* Only one register to read. */ if (unlikely(r1 == r3)) { - tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ); return DISAS_NEXT; } @@ -3137,15 +3078,13 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) possible page faults. */ t1 = tcg_temp_new_i64(); t2 = tcg_temp_new_i64(); - tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ); tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15)); - tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ); tcg_gen_mov_i64(regs[r1], t1); - tcg_temp_free(t2); /* Only two registers to read. */ if (((r1 + 1) & 15) == r3) { - tcg_temp_free(t1); return DISAS_NEXT; } @@ -3155,10 +3094,8 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) while (r1 != r3) { r1 = (r1 + 1) & 15; tcg_gen_add_i64(o->in2, o->in2, t1); - tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ); } - tcg_temp_free(t1); - return DISAS_NEXT; } @@ -3180,8 +3117,6 @@ static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2)); tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN); tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN); - tcg_temp_free_i64(a1); - tcg_temp_free_i64(a2); /* ... and indicate that we performed them while interlocked. */ gen_op_movi_cc(s, 0); @@ -3190,15 +3125,9 @@ static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) { - if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { - gen_helper_lpq(o->out, cpu_env, o->in2); - } else if (HAVE_ATOMIC128) { - gen_helper_lpq_parallel(o->out, cpu_env, o->in2); - } else { - gen_helper_exit_atomic(cpu_env); - return DISAS_NORETURN; - } - return_low128(o->out2); + o->out_128 = tcg_temp_new_i128(); + tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s), + MO_TE | MO_128 | MO_ALIGN); return DISAS_NEXT; } @@ -3253,9 +3182,7 @@ static DisasJumpType op_mc(DisasContext *s, DisasOps *o) static DisasJumpType op_mov2(DisasContext *s, DisasOps *o) { o->out = o->in2; - o->g_out = o->g_in2; o->in2 = NULL; - o->g_in2 = false; return DISAS_NEXT; } @@ -3265,9 +3192,7 @@ static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) TCGv ar1 = tcg_temp_new_i64(); o->out = o->in2; - o->g_out = o->g_in2; o->in2 = NULL; - o->g_in2 = false; switch (s->base.tb->flags & FLAG_MASK_ASC) { case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: @@ -3289,8 +3214,6 @@ static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) } tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1])); - tcg_temp_free_i64(ar1); - return DISAS_NEXT; } @@ -3298,11 +3221,8 @@ static DisasJumpType op_movx(DisasContext *s, DisasOps *o) { o->out = o->in1; o->out2 = o->in2; - o->g_out = o->g_in1; - o->g_out2 = o->g_in2; o->in1 = NULL; o->in2 = NULL; - o->g_in1 = o->g_in2 = false; return DISAS_NEXT; } @@ -3509,7 +3429,6 @@ static DisasJumpType op_maeb(DisasContext *s, DisasOps *o) { TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3); - tcg_temp_free_i64(r3); return DISAS_NEXT; } @@ -3517,7 +3436,6 @@ static DisasJumpType op_madb(DisasContext *s, DisasOps *o) { TCGv_i64 r3 = load_freg(get_field(s, r3)); gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3); - tcg_temp_free_i64(r3); return DISAS_NEXT; } @@ -3525,7 +3443,6 @@ static DisasJumpType op_mseb(DisasContext *s, DisasOps *o) { TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3); - tcg_temp_free_i64(r3); return DISAS_NEXT; } @@ -3533,7 +3450,6 @@ static DisasJumpType op_msdb(DisasContext *s, DisasOps *o) { TCGv_i64 r3 = load_freg(get_field(s, r3)); gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3); - tcg_temp_free_i64(r3); return DISAS_NEXT; } @@ -3544,7 +3460,6 @@ static DisasJumpType op_nabs(DisasContext *s, DisasOps *o) tcg_gen_neg_i64(n, o->in2); tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2); - tcg_temp_free_i64(n); return DISAS_NEXT; } @@ -3621,10 +3536,10 @@ static DisasJumpType op_ori(DisasContext *s, DisasOps *o) int shift = s->insn->data & 0xff; int size = s->insn->data >> 8; uint64_t mask = ((1ull << size) - 1) << shift; + TCGv_i64 t = tcg_temp_new_i64(); - assert(!o->g_in2); - tcg_gen_shli_i64(o->in2, o->in2, shift); - tcg_gen_or_i64(o->out, o->in1, o->in2); + tcg_gen_shli_i64(t, o->in2, shift); + tcg_gen_or_i64(o->out, o->in1, t); /* Produce the CC from only the bits manipulated. */ tcg_gen_andi_i64(cc_dst, o->out, mask); @@ -3804,12 +3719,15 @@ static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) int i3 = get_field(s, i3); int i4 = get_field(s, i4); int i5 = get_field(s, i5); + TCGv_i64 orig_out; uint64_t mask; /* If this is a test-only form, arrange to discard the result. */ if (i3 & 0x80) { + tcg_debug_assert(o->out != NULL); + orig_out = o->out; o->out = tcg_temp_new_i64(); - o->g_out = false; + tcg_gen_mov_i64(o->out, orig_out); } i3 &= 63; @@ -3879,9 +3797,6 @@ static DisasJumpType op_rll32(DisasContext *s, DisasOps *o) tcg_gen_extrl_i64_i32(t2, o->in2); tcg_gen_rotl_i32(to, t1, t2); tcg_gen_extu_i32_i64(o->out, to); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(to); return DISAS_NEXT; } @@ -4022,27 +3937,24 @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o) } else { tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab); } - free_compare(&c); r1 = get_field(s, r1); a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); switch (s->insn->data) { case 1: /* STOCG */ - tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s)); + tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ); break; case 0: /* STOC */ - tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s)); + tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL); break; case 2: /* STOCFH */ h = tcg_temp_new_i64(); tcg_gen_shri_i64(h, regs[r1], 32); - tcg_gen_qemu_st32(h, a, get_mem_index(s)); - tcg_temp_free_i64(h); + tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL); break; default: g_assert_not_reached(); } - tcg_temp_free_i64(a); gen_set_label(lab); return DISAS_NEXT; @@ -4059,9 +3971,6 @@ static DisasJumpType op_sla(DisasContext *s, DisasOps *o) t = o->in1; } gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2); - if (s->insn->data == 31) { - tcg_temp_free_i64(t); - } tcg_gen_shl_i64(o->out, o->in1, o->in2); /* The arithmetic left shift is curious in that it does not affect the sign bit. Copy that over from the source unchanged. */ @@ -4128,8 +4037,6 @@ static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o) tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc)); tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3); tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc)); - - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -4162,7 +4069,7 @@ static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0); /* load the third operand into r3 before modifying anything */ - tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ); /* subtract CPU timer from first operand and store in GR0 */ gen_helper_stpt(tmp, cpu_env); @@ -4170,8 +4077,6 @@ static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) /* store second operand in GR1 */ tcg_gen_mov_i64(regs[1], o->in2); - - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -4189,9 +4094,23 @@ static DisasJumpType op_sske(DisasContext *s, DisasOps *o) return DISAS_NEXT; } +static void gen_check_psw_mask(DisasContext *s) +{ + TCGv_i64 reserved = tcg_temp_new_i64(); + TCGLabel *ok = gen_new_label(); + + tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED); + tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok); + gen_program_exception(s, PGM_SPECIFICATION); + gen_set_label(ok); +} + static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) { tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8); + + gen_check_psw_mask(s); + /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ s->exit_to_mainloop = true; return DISAS_TOO_MANY; @@ -4228,12 +4147,9 @@ static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) tcg_gen_shri_i64(c1, c1, 8); tcg_gen_ori_i64(c2, c2, 0x10000); tcg_gen_or_i64(c2, c2, todpr); - tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ); tcg_gen_addi_i64(o->in2, o->in2, 8); - tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s)); - tcg_temp_free_i64(c1); - tcg_temp_free_i64(c2); - tcg_temp_free_i64(todpr); + tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ); /* ??? We don't implement clock states. */ gen_op_movi_cc(s, 0); return DISAS_NEXT; @@ -4446,8 +4362,7 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) restart, we'll have the wrong SYSTEM MASK in place. */ t = tcg_temp_new_i64(); tcg_gen_shri_i64(t, psw_mask, 56); - tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s)); - tcg_temp_free_i64(t); + tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB); if (s->fields.op == 0xac) { tcg_gen_andi_i64(psw_mask, psw_mask, @@ -4456,6 +4371,8 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56); } + gen_check_psw_mask(s); + /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ s->exit_to_mainloop = true; return DISAS_TOO_MANY; @@ -4482,25 +4399,27 @@ static DisasJumpType op_stfle(DisasContext *s, DisasOps *o) static DisasJumpType op_st8(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB); return DISAS_NEXT; } static DisasJumpType op_st16(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW); return DISAS_NEXT; } static DisasJumpType op_st32(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s), + MO_TEUL | s->insn->data); return DISAS_NEXT; } static DisasJumpType op_st64(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), + MO_TEUQ | s->insn->data); return DISAS_NEXT; } @@ -4524,7 +4443,7 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) case 0xf: /* Effectively a 32-bit store. */ tcg_gen_shri_i64(tmp, o->in1, pos); - tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL); break; case 0xc: @@ -4532,7 +4451,7 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) case 0x3: /* Effectively a 16-bit store. */ tcg_gen_shri_i64(tmp, o->in1, pos); - tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW); break; case 0x8: @@ -4541,7 +4460,7 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) case 0x1: /* Effectively an 8-bit store. */ tcg_gen_shri_i64(tmp, o->in1, pos); - tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB); break; default: @@ -4550,7 +4469,7 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) while (m3) { if (m3 & 0x8) { tcg_gen_shri_i64(tmp, o->in1, pos); - tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB); tcg_gen_addi_i64(o->in2, o->in2, 1); } m3 = (m3 << 1) & 0xf; @@ -4558,7 +4477,6 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) } break; } - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -4570,11 +4488,8 @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o) TCGv_i64 tsize = tcg_constant_i64(size); while (1) { - if (size == 8) { - tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s)); - } else { - tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s)); - } + tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s), + size == 8 ? MO_TEUQ : MO_TEUL); if (r1 == r3) { break; } @@ -4595,28 +4510,23 @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) while (1) { tcg_gen_shl_i64(t, regs[r1], t32); - tcg_gen_qemu_st32(t, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL); if (r1 == r3) { break; } tcg_gen_add_i64(o->in2, o->in2, t4); r1 = (r1 + 1) & 15; } - - tcg_temp_free_i64(t); return DISAS_NEXT; } static DisasJumpType op_stpq(DisasContext *s, DisasOps *o) { - if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { - gen_helper_stpq(cpu_env, o->in2, o->out2, o->out); - } else if (HAVE_ATOMIC128) { - gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out); - } else { - gen_helper_exit_atomic(cpu_env); - return DISAS_NORETURN; - } + TCGv_i128 t16 = tcg_temp_new_i128(); + + tcg_gen_concat_i64_i128(t16, o->out2, o->out); + tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s), + MO_TE | MO_128 | MO_ALIGN); return DISAS_NEXT; } @@ -4790,7 +4700,6 @@ static DisasJumpType op_tre(DisasContext *s, DisasOps *o) gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2); tcg_gen_extr_i128_i64(o->out2, o->out, pair); - tcg_temp_free_i128(pair); set_cc_static(s); return DISAS_NEXT; } @@ -4836,7 +4745,6 @@ static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) } gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes); - tcg_temp_free_i32(tst); set_cc_static(s); return DISAS_NEXT; } @@ -4909,28 +4817,28 @@ static DisasJumpType op_xc(DisasContext *s, DisasOps *o) l++; while (l >= 8) { - tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ); l -= 8; if (l > 0) { tcg_gen_addi_i64(o->addr1, o->addr1, 8); } } if (l >= 4) { - tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL); l -= 4; if (l > 0) { tcg_gen_addi_i64(o->addr1, o->addr1, 4); } } if (l >= 2) { - tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW); l -= 2; if (l > 0) { tcg_gen_addi_i64(o->addr1, o->addr1, 2); } } if (l) { - tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB); } gen_op_movi_cc(s, 0); return DISAS_NEXT; @@ -4955,10 +4863,10 @@ static DisasJumpType op_xori(DisasContext *s, DisasOps *o) int shift = s->insn->data & 0xff; int size = s->insn->data >> 8; uint64_t mask = ((1ull << size) - 1) << shift; + TCGv_i64 t = tcg_temp_new_i64(); - assert(!o->g_in2); - tcg_gen_shli_i64(o->in2, o->in2, shift); - tcg_gen_xor_i64(o->out, o->in1, o->in2); + tcg_gen_shli_i64(t, o->in2, shift); + tcg_gen_xor_i64(o->out, o->in1, t); /* Produce the CC from only the bits manipulated. */ tcg_gen_andi_i64(cc_dst, o->out, mask); @@ -4989,15 +4897,14 @@ static DisasJumpType op_xi(DisasContext *s, DisasOps *o) static DisasJumpType op_zero(DisasContext *s, DisasOps *o) { - o->out = tcg_const_i64(0); + o->out = tcg_constant_i64(0); return DISAS_NEXT; } static DisasJumpType op_zero2(DisasContext *s, DisasOps *o) { - o->out = tcg_const_i64(0); + o->out = tcg_constant_i64(0); o->out2 = o->out; - o->g_out2 = true; return DISAS_NEXT; } @@ -5265,7 +5172,6 @@ static void prep_new_x(DisasContext *s, DisasOps *o) static void prep_r1(DisasContext *s, DisasOps *o) { o->out = regs[get_field(s, r1)]; - o->g_out = true; } #define SPEC_prep_r1 0 @@ -5274,7 +5180,6 @@ static void prep_r1_P(DisasContext *s, DisasOps *o) int r1 = get_field(s, r1); o->out = regs[r1]; o->out2 = regs[r1 + 1]; - o->g_out = o->g_out2 = true; } #define SPEC_prep_r1_P SPEC_r1_even @@ -5343,7 +5248,6 @@ static void wout_r1_D32(DisasContext *s, DisasOps *o) store_reg32_i64(r1 + 1, o->out); tcg_gen_shri_i64(t, o->out, 32); store_reg32_i64(r1, t); - tcg_temp_free_i64(t); } #define SPEC_wout_r1_D32 SPEC_r1_even @@ -5423,13 +5327,13 @@ static void wout_cond_e1e2(DisasContext *s, DisasOps *o) static void wout_m1_8(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB); } #define SPEC_wout_m1_8 0 static void wout_m1_16(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW); } #define SPEC_wout_m1_16 0 @@ -5443,7 +5347,7 @@ static void wout_m1_16a(DisasContext *s, DisasOps *o) static void wout_m1_32(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL); } #define SPEC_wout_m1_32 0 @@ -5457,7 +5361,7 @@ static void wout_m1_32a(DisasContext *s, DisasOps *o) static void wout_m1_64(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ); } #define SPEC_wout_m1_64 0 @@ -5471,7 +5375,7 @@ static void wout_m1_64a(DisasContext *s, DisasOps *o) static void wout_m2_32(DisasContext *s, DisasOps *o) { - tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s)); + tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL); } #define SPEC_wout_m2_32 0 @@ -5499,7 +5403,6 @@ static void in1_r1(DisasContext *s, DisasOps *o) static void in1_r1_o(DisasContext *s, DisasOps *o) { o->in1 = regs[get_field(s, r1)]; - o->g_in1 = true; } #define SPEC_in1_r1_o 0 @@ -5533,7 +5436,6 @@ static void in1_r1p1(DisasContext *s, DisasOps *o) static void in1_r1p1_o(DisasContext *s, DisasOps *o) { o->in1 = regs[get_field(s, r1) + 1]; - o->g_in1 = true; } #define SPEC_in1_r1p1_o SPEC_r1_even @@ -5588,7 +5490,6 @@ static void in1_r3(DisasContext *s, DisasOps *o) static void in1_r3_o(DisasContext *s, DisasOps *o) { o->in1 = regs[get_field(s, r3)]; - o->g_in1 = true; } #define SPEC_in1_r3_o 0 @@ -5669,7 +5570,7 @@ static void in1_m1_8u(DisasContext *s, DisasOps *o) { in1_la1(s, o); o->in1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB); } #define SPEC_in1_m1_8u 0 @@ -5677,7 +5578,7 @@ static void in1_m1_16s(DisasContext *s, DisasOps *o) { in1_la1(s, o); o->in1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW); } #define SPEC_in1_m1_16s 0 @@ -5685,7 +5586,7 @@ static void in1_m1_16u(DisasContext *s, DisasOps *o) { in1_la1(s, o); o->in1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW); } #define SPEC_in1_m1_16u 0 @@ -5693,7 +5594,7 @@ static void in1_m1_32s(DisasContext *s, DisasOps *o) { in1_la1(s, o); o->in1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL); } #define SPEC_in1_m1_32s 0 @@ -5701,7 +5602,7 @@ static void in1_m1_32u(DisasContext *s, DisasOps *o) { in1_la1(s, o); o->in1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL); } #define SPEC_in1_m1_32u 0 @@ -5709,7 +5610,7 @@ static void in1_m1_64(DisasContext *s, DisasOps *o) { in1_la1(s, o); o->in1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ); } #define SPEC_in1_m1_64 0 @@ -5719,7 +5620,6 @@ static void in1_m1_64(DisasContext *s, DisasOps *o) static void in2_r1_o(DisasContext *s, DisasOps *o) { o->in2 = regs[get_field(s, r1)]; - o->g_in2 = true; } #define SPEC_in2_r1_o 0 @@ -5754,7 +5654,6 @@ static void in2_r2(DisasContext *s, DisasOps *o) static void in2_r2_o(DisasContext *s, DisasOps *o) { o->in2 = regs[get_field(s, r2)]; - o->g_in2 = true; } #define SPEC_in2_r2_o 0 @@ -5886,9 +5785,23 @@ static void in2_a2(DisasContext *s, DisasOps *o) } #define SPEC_in2_a2 0 +static TCGv gen_ri2(DisasContext *s) +{ + TCGv ri2 = NULL; + bool is_imm; + int imm; + + disas_jdest(s, i2, is_imm, imm, ri2); + if (is_imm) { + ri2 = tcg_constant_i64(s->base.pc_next + imm * 2); + } + + return ri2; +} + static void in2_ri2(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2); + o->in2 = gen_ri2(s); } #define SPEC_in2_ri2 0 @@ -5898,7 +5811,7 @@ static void in2_sh(DisasContext *s, DisasOps *o) int d2 = get_field(s, d2); if (b2 == 0) { - o->in2 = tcg_const_i64(d2 & 0x3f); + o->in2 = tcg_constant_i64(d2 & 0x3f); } else { o->in2 = get_address(s, 0, b2, d2); tcg_gen_andi_i64(o->in2, o->in2, 0x3f); @@ -5909,35 +5822,35 @@ static void in2_sh(DisasContext *s, DisasOps *o) static void in2_m2_8u(DisasContext *s, DisasOps *o) { in2_a2(s, o); - tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB); } #define SPEC_in2_m2_8u 0 static void in2_m2_16s(DisasContext *s, DisasOps *o) { in2_a2(s, o); - tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW); } #define SPEC_in2_m2_16s 0 static void in2_m2_16u(DisasContext *s, DisasOps *o) { in2_a2(s, o); - tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW); } #define SPEC_in2_m2_16u 0 static void in2_m2_32s(DisasContext *s, DisasOps *o) { in2_a2(s, o); - tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL); } #define SPEC_in2_m2_32s 0 static void in2_m2_32u(DisasContext *s, DisasOps *o) { in2_a2(s, o); - tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL); } #define SPEC_in2_m2_32u 0 @@ -5953,14 +5866,14 @@ static void in2_m2_32ua(DisasContext *s, DisasOps *o) static void in2_m2_64(DisasContext *s, DisasOps *o) { in2_a2(s, o); - tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ); } #define SPEC_in2_m2_64 0 static void in2_m2_64w(DisasContext *s, DisasOps *o) { in2_a2(s, o); - tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ); gen_addi_and_wrap_i64(s, o->in2, o->in2, 0); } #define SPEC_in2_m2_64w 0 @@ -5974,76 +5887,86 @@ static void in2_m2_64a(DisasContext *s, DisasOps *o) #define SPEC_in2_m2_64a 0 #endif +static void in2_mri2_16s(DisasContext *s, DisasOps *o) +{ + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW); +} +#define SPEC_in2_mri2_16s 0 + static void in2_mri2_16u(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW); } #define SPEC_in2_mri2_16u 0 static void in2_mri2_32s(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s), + MO_TESL | MO_ALIGN); } #define SPEC_in2_mri2_32s 0 static void in2_mri2_32u(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s), + MO_TEUL | MO_ALIGN); } #define SPEC_in2_mri2_32u 0 static void in2_mri2_64(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), + MO_TEUQ | MO_ALIGN); } #define SPEC_in2_mri2_64 0 static void in2_i2(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64(get_field(s, i2)); + o->in2 = tcg_constant_i64(get_field(s, i2)); } #define SPEC_in2_i2 0 static void in2_i2_8u(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64((uint8_t)get_field(s, i2)); + o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2)); } #define SPEC_in2_i2_8u 0 static void in2_i2_16u(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64((uint16_t)get_field(s, i2)); + o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2)); } #define SPEC_in2_i2_16u 0 static void in2_i2_32u(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64((uint32_t)get_field(s, i2)); + o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2)); } #define SPEC_in2_i2_32u 0 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o) { uint64_t i2 = (uint16_t)get_field(s, i2); - o->in2 = tcg_const_i64(i2 << s->insn->data); + o->in2 = tcg_constant_i64(i2 << s->insn->data); } #define SPEC_in2_i2_16u_shl 0 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o) { uint64_t i2 = (uint32_t)get_field(s, i2); - o->in2 = tcg_const_i64(i2 << s->insn->data); + o->in2 = tcg_constant_i64(i2 << s->insn->data); } #define SPEC_in2_i2_32u_shl 0 #ifndef CONFIG_USER_ONLY static void in2_insn(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64(s->fields.raw_insn); + o->in2 = tcg_constant_i64(s->fields.raw_insn); } #define SPEC_in2_insn 0 #endif @@ -6469,31 +6392,6 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) } } - /* Free any temporaries created by the helpers. */ - if (o.out && !o.g_out) { - tcg_temp_free_i64(o.out); - } - if (o.out2 && !o.g_out2) { - tcg_temp_free_i64(o.out2); - } - if (o.in1 && !o.g_in1) { - tcg_temp_free_i64(o.in1); - } - if (o.in2 && !o.g_in2) { - tcg_temp_free_i64(o.in2); - } - if (o.addr1) { - tcg_temp_free_i64(o.addr1); - } - if (o.out_128) { - tcg_temp_free_i128(o.out_128); - } - if (o.in1_128) { - tcg_temp_free_i128(o.in1_128); - } - if (o.in2_128) { - tcg_temp_free_i128(o.in2_128); - } /* io should be the last instruction in tb when icount is enabled */ if (unlikely(icount && ret == DISAS_NEXT)) { ret = DISAS_TOO_MANY; @@ -6619,7 +6517,7 @@ static const TranslatorOps s390x_tr_ops = { .disas_log = s390x_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc; diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc index 3fadc82e5c..43dfbfd03f 100644 --- a/target/s390x/tcg/translate_vx.c.inc +++ b/target/s390x/tcg/translate_vx.c.inc @@ -183,8 +183,6 @@ static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr, /* generate the final ptr by adding cpu_env */ tcg_gen_trunc_i64_ptr(ptr, tmp); tcg_gen_add_ptr(ptr, ptr, cpu_env); - - tcg_temp_free_i64(tmp); } #define gen_gvec_2(v1, v2, gen) \ @@ -272,13 +270,6 @@ static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a, fn(dl, dh, al, ah, bl, bh); write_vec_element_i64(dh, d, 0, ES_64); write_vec_element_i64(dl, d, 1, ES_64); - - tcg_temp_free_i64(dh); - tcg_temp_free_i64(dl); - tcg_temp_free_i64(ah); - tcg_temp_free_i64(al); - tcg_temp_free_i64(bh); - tcg_temp_free_i64(bl); } typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, @@ -305,15 +296,6 @@ static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a, fn(dl, dh, al, ah, bl, bh, cl, ch); write_vec_element_i64(dh, d, 0, ES_64); write_vec_element_i64(dl, d, 1, ES_64); - - tcg_temp_free_i64(dh); - tcg_temp_free_i64(dl); - tcg_temp_free_i64(ah); - tcg_temp_free_i64(al); - tcg_temp_free_i64(bh); - tcg_temp_free_i64(bl); - tcg_temp_free_i64(ch); - tcg_temp_free_i64(cl); } static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, @@ -351,7 +333,6 @@ static DisasJumpType op_vge(DisasContext *s, DisasOps *o) tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); write_vec_element_i64(tmp, get_field(s, v1), enr, es); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -386,7 +367,6 @@ static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o) write_vec_element_i64(t, get_field(s, v1), 0, ES_64); tcg_gen_movi_i64(t, generate_byte_mask(i2)); write_vec_element_i64(t, get_field(s, v1), 1, ES_64); - tcg_temp_free_i64(t); } return DISAS_NEXT; } @@ -427,8 +407,6 @@ static DisasJumpType op_vl(DisasContext *s, DisasOps *o) tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ); write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); - tcg_temp_free(t0); - tcg_temp_free(t1); return DISAS_NEXT; } @@ -451,7 +429,6 @@ static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o) tmp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); gen_gvec_dup_i64(es, get_field(s, v1), tmp); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -469,7 +446,6 @@ static DisasJumpType op_vlebr(DisasContext *s, DisasOps *o) tmp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es); write_vec_element_i64(tmp, get_field(s, v1), enr, es); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -486,7 +462,6 @@ static DisasJumpType op_vlbrrep(DisasContext *s, DisasOps *o) tmp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es); gen_gvec_dup_i64(es, get_field(s, v1), tmp); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -518,7 +493,6 @@ static DisasJumpType op_vllebrz(DisasContext *s, DisasOps *o) write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); write_vec_element_i64(tcg_constant_i64(0), get_field(s, v1), 1, ES_64); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -572,9 +546,6 @@ static DisasJumpType op_vlbr(DisasContext *s, DisasOps *o) write: write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); - - tcg_temp_free(t0); - tcg_temp_free(t1); return DISAS_NEXT; } @@ -592,7 +563,6 @@ static DisasJumpType op_vle(DisasContext *s, DisasOps *o) tmp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); write_vec_element_i64(tmp, get_field(s, v1), enr, es); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -647,8 +617,6 @@ static DisasJumpType op_vler(DisasContext *s, DisasOps *o) write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); - tcg_temp_free(t0); - tcg_temp_free(t1); return DISAS_NEXT; } @@ -688,8 +656,6 @@ static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o) default: g_assert_not_reached(); } - tcg_temp_free_ptr(ptr); - return DISAS_NEXT; } @@ -730,7 +696,6 @@ static DisasJumpType op_vllez(DisasContext *s, DisasOps *o) tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es); gen_gvec_dup_imm(es, get_field(s, v1), 0); write_vec_element_i64(t, get_field(s, v1), enr, es); - tcg_temp_free_i64(t); return DISAS_NEXT; } @@ -768,9 +733,6 @@ static DisasJumpType op_vlm(DisasContext *s, DisasOps *o) /* Store the last element, loaded first */ write_vec_element_i64(t0, v1, 1, ES_64); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); return DISAS_NEXT; } @@ -794,8 +756,6 @@ static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o) tcg_gen_addi_ptr(a0, cpu_env, v1_offs); gen_helper_vll(cpu_env, a0, o->addr1, bytes); - tcg_temp_free_i64(bytes); - tcg_temp_free_ptr(a0); return DISAS_NEXT; } @@ -835,8 +795,6 @@ static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o) default: g_assert_not_reached(); } - tcg_temp_free_ptr(ptr); - return DISAS_NEXT; } @@ -856,7 +814,6 @@ static DisasJumpType op_vll(DisasContext *s, DisasOps *o) tcg_gen_addi_i64(o->in2, o->in2, 1); tcg_gen_addi_ptr(a0, cpu_env, v1_offs); gen_helper_vll(cpu_env, a0, o->addr1, o->in2); - tcg_temp_free_ptr(a0); return DISAS_NEXT; } @@ -898,7 +855,6 @@ static DisasJumpType op_vmr(DisasContext *s, DisasOps *o) write_vec_element_i64(tmp, v1, dst_idx, es); } } - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -974,7 +930,6 @@ static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) } write_vec_element_i64(tmp, v1, dst_idx, dst_es); } - tcg_temp_free_i64(tmp); } else { gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]); } @@ -1004,8 +959,6 @@ static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o) read_vec_element_i64(t1, get_field(s, v3), i3, ES_64); write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); return DISAS_NEXT; } @@ -1057,7 +1010,6 @@ static DisasJumpType op_vsce(DisasContext *s, DisasOps *o) read_vec_element_i64(tmp, get_field(s, v1), enr, es); tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -1098,7 +1050,6 @@ static DisasJumpType op_vseg(DisasContext *s, DisasOps *o) write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN); write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -1116,7 +1067,6 @@ static DisasJumpType op_vst(DisasContext *s, DisasOps *o) gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -1134,7 +1084,6 @@ static DisasJumpType op_vstebr(DisasContext *s, DisasOps *o) tmp = tcg_temp_new_i64(); read_vec_element_i64(tmp, get_field(s, v1), enr, es); tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -1189,9 +1138,6 @@ write: tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ); - - tcg_temp_free(t0); - tcg_temp_free(t1); return DISAS_NEXT; } @@ -1209,7 +1155,6 @@ static DisasJumpType op_vste(DisasContext *s, DisasOps *o) tmp = tcg_temp_new_i64(); read_vec_element_i64(tmp, get_field(s, v1), enr, es); tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -1251,9 +1196,6 @@ static DisasJumpType op_vster(DisasContext *s, DisasOps *o) tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_TEUQ); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ); - - tcg_temp_free(t0); - tcg_temp_free(t1); return DISAS_NEXT; } @@ -1284,7 +1226,6 @@ static DisasJumpType op_vstm(DisasContext *s, DisasOps *o) } gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); } - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -1297,7 +1238,6 @@ static DisasJumpType op_vstl(DisasContext *s, DisasOps *o) tcg_gen_addi_i64(o->in2, o->in2, 1); tcg_gen_addi_ptr(a0, cpu_env, v1_offs); gen_helper_vstl(cpu_env, a0, o->addr1, o->in2); - tcg_temp_free_ptr(a0); return DISAS_NEXT; } @@ -1335,7 +1275,6 @@ static DisasJumpType op_vup(DisasContext *s, DisasOps *o) write_vec_element_i64(tmp, v1, dst_idx, dst_es); } } - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -1377,10 +1316,6 @@ static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es) /* Isolate and shift the carry into position */ tcg_gen_and_i64(d, d, msb_mask); tcg_gen_shri_i64(d, d, msb_bit_nr); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) @@ -1399,7 +1334,6 @@ static void gen_acc_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) tcg_gen_add_i32(t, a, b); tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b); - tcg_temp_free_i32(t); } static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) @@ -1408,7 +1342,6 @@ static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) tcg_gen_add_i64(t, a, b); tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b); - tcg_temp_free_i64(t); } static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, @@ -1422,9 +1355,6 @@ static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, tcg_gen_add2_i64(tl, th, th, zero, ah, zero); tcg_gen_add2_i64(tl, dl, tl, th, bh, zero); tcg_gen_mov_i64(dh, zero); - - tcg_temp_free_i64(th); - tcg_temp_free_i64(tl); } static DisasJumpType op_vacc(DisasContext *s, DisasOps *o) @@ -1460,8 +1390,6 @@ static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, tcg_gen_extract_i64(tl, cl, 0, 1); tcg_gen_add2_i64(dl, dh, al, ah, bl, bh); tcg_gen_add2_i64(dl, dh, dl, dh, tl, zero); - - tcg_temp_free_i64(tl); } static DisasJumpType op_vac(DisasContext *s, DisasOps *o) @@ -1490,9 +1418,6 @@ static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, tcg_gen_add2_i64(tl, th, th, zero, ah, zero); tcg_gen_add2_i64(tl, dl, tl, th, bh, zero); tcg_gen_mov_i64(dh, zero); - - tcg_temp_free_i64(tl); - tcg_temp_free_i64(th); } static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o) @@ -1533,9 +1458,6 @@ static void gen_avg_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) tcg_gen_addi_i64(t0, t0, 1); tcg_gen_shri_i64(t0, t0, 1); tcg_gen_extrl_i64_i32(d, t0); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) @@ -1550,10 +1472,6 @@ static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) tcg_gen_add2_i64(dl, dh, al, ah, bl, bh); gen_addi2_i64(dl, dh, dl, dh, 1); tcg_gen_extract2_i64(dl, dl, dh, 1); - - tcg_temp_free_i64(dh); - tcg_temp_free_i64(ah); - tcg_temp_free_i64(bh); } static DisasJumpType op_vavg(DisasContext *s, DisasOps *o) @@ -1586,9 +1504,6 @@ static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) tcg_gen_addi_i64(t0, t0, 1); tcg_gen_shri_i64(t0, t0, 1); tcg_gen_extrl_i64_i32(d, t0); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) @@ -1599,8 +1514,6 @@ static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) tcg_gen_add2_i64(dl, dh, al, zero, bl, zero); gen_addi2_i64(dl, dh, dl, dh, 1); tcg_gen_extract2_i64(dl, dl, dh, 1); - - tcg_temp_free_i64(dh); } static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o) @@ -1635,9 +1548,6 @@ static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o) } gen_gvec_dup_imm(ES_32, get_field(s, v1), 0); write_vec_element_i32(sum, get_field(s, v1), 1, ES_32); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(sum); return DISAS_NEXT; } @@ -1682,9 +1592,6 @@ static DisasJumpType op_vc(DisasContext *s, DisasOps *o) read_vec_element_i64(high, get_field(s, v1), 0, ES_64); read_vec_element_i64(low, get_field(s, v1), 1, ES_64); gen_op_update2_cc_i64(s, CC_OP_VC, low, high); - - tcg_temp_free_i64(low); - tcg_temp_free_i64(high); } return DISAS_NEXT; } @@ -1853,8 +1760,6 @@ static void gen_mal_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) tcg_gen_mul_i32(t0, a, b); tcg_gen_add_i32(d, t0, c); - - tcg_temp_free_i32(t0); } static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) @@ -1869,10 +1774,6 @@ static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) tcg_gen_mul_i64(t0, t0, t1); tcg_gen_add_i64(t0, t0, t2); tcg_gen_extrh_i64_i32(d, t0); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) @@ -1887,10 +1788,6 @@ static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) tcg_gen_mul_i64(t0, t0, t1); tcg_gen_add_i64(t0, t0, t2); tcg_gen_extrh_i64_i32(d, t0); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } static DisasJumpType op_vma(DisasContext *s, DisasOps *o) @@ -1974,7 +1871,6 @@ static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) TCGv_i32 t = tcg_temp_new_i32(); tcg_gen_muls2_i32(t, d, a, b); - tcg_temp_free_i32(t); } static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) @@ -1982,7 +1878,6 @@ static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) TCGv_i32 t = tcg_temp_new_i32(); tcg_gen_mulu2_i32(t, d, a, b); - tcg_temp_free_i32(t); } static DisasJumpType op_vm(DisasContext *s, DisasOps *o) @@ -2099,11 +1994,6 @@ static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o) /* Store final result into v1. */ write_vec_element_i64(h1, get_field(s, v1), 0, ES_64); write_vec_element_i64(l1, get_field(s, v1), 1, ES_64); - - tcg_temp_free_i64(l1); - tcg_temp_free_i64(h1); - tcg_temp_free_i64(l2); - tcg_temp_free_i64(h2); return DISAS_NEXT; } @@ -2169,8 +2059,6 @@ static void gen_rim_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c) tcg_gen_and_i32(t, t, b); tcg_gen_andc_i32(d, d, b); tcg_gen_or_i32(d, d, t); - - tcg_temp_free_i32(t); } static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c) @@ -2181,8 +2069,6 @@ static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c) tcg_gen_and_i64(t, t, b); tcg_gen_andc_i64(d, d, b); tcg_gen_or_i64(d, d, t); - - tcg_temp_free_i64(t); } static DisasJumpType op_verim(DisasContext *s, DisasOps *o) @@ -2291,7 +2177,6 @@ static DisasJumpType op_ves(DisasContext *s, DisasOps *o) default: g_assert_not_reached(); } - tcg_temp_free_i32(shift); } return DISAS_NEXT; } @@ -2311,7 +2196,6 @@ static DisasJumpType gen_vsh_by_byte(DisasContext *s, DisasOps *o, read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); tcg_gen_andi_i64(shift, shift, byte ? 0x78 : 7); gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), shift, 0, gen); - tcg_temp_free_i64(shift); } return DISAS_NEXT; } @@ -2367,10 +2251,6 @@ static DisasJumpType op_vsld(DisasContext *s, DisasOps *o) write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); return DISAS_NEXT; } @@ -2397,10 +2277,6 @@ static DisasJumpType op_vsrd(DisasContext *s, DisasOps *o) write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); return DISAS_NEXT; } @@ -2445,9 +2321,6 @@ static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, /* "invert" the result: -1 -> 0; 0 -> 1 */ tcg_gen_addi_i64(dl, th, 1); tcg_gen_mov_i64(dh, zero); - - tcg_temp_free_i64(th); - tcg_temp_free_i64(tl); } static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o) @@ -2482,8 +2355,6 @@ static void gen_sbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, tcg_gen_not_i64(tl, bl); tcg_gen_not_i64(th, bh); gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch); - tcg_temp_free_i64(tl); - tcg_temp_free_i64(th); } static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o) @@ -2508,9 +2379,6 @@ static void gen_sbcbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, tcg_gen_not_i64(tl, bl); tcg_gen_not_i64(th, bh); gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch); - - tcg_temp_free_i64(tl); - tcg_temp_free_i64(th); } static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o) @@ -2550,8 +2418,6 @@ static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o) } write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64); } - tcg_temp_free_i64(sum); - tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -2580,10 +2446,6 @@ static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o) } write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64); write_vec_element_i64(suml, get_field(s, v1), 1, ES_64); - - tcg_temp_free_i64(sumh); - tcg_temp_free_i64(suml); - tcg_temp_free_i64(tmpl); return DISAS_NEXT; } @@ -2611,8 +2473,6 @@ static DisasJumpType op_vsum(DisasContext *s, DisasOps *o) } write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32); } - tcg_temp_free_i32(sum); - tcg_temp_free_i32(tmp); return DISAS_NEXT; } @@ -3399,9 +3259,6 @@ static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o) read_vec_element_i64(tmp, v2, 1, ES_64); write_vec_element_i64(tmp, v1, 1, ES_64); } - - tcg_temp_free_i64(tmp); - return DISAS_NEXT; } diff --git a/target/sh4/cpu-param.h b/target/sh4/cpu-param.h index 98a02509bb..a7cdb7edb6 100644 --- a/target/sh4/cpu-param.h +++ b/target/sh4/cpu-param.h @@ -16,6 +16,5 @@ #else # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif -#define NB_MMU_MODES 2 #endif diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index f0934b20fa..61769ffdfa 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -26,6 +26,7 @@ #include "migration/vmstate.h" #include "exec/exec-all.h" #include "fpu/softfloat-helpers.h" +#include "tcg/tcg.h" static void superh_cpu_set_pc(CPUState *cs, vaddr value) { @@ -46,7 +47,8 @@ static void superh_cpu_synchronize_from_tb(CPUState *cs, { SuperHCPU *cpu = SUPERH_CPU(cs); - cpu->env.pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + cpu->env.pc = tb->pc; cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK; } @@ -73,7 +75,7 @@ static bool superh_io_recompile_replay_branch(CPUState *cs, CPUSH4State *env = &cpu->env; if ((env->flags & (TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND)) - && env->pc != tb_pc(tb)) { + && !(cs->tcg_cflags & CF_PCREL) && env->pc != tb->pc) { env->pc -= 2; env->flags &= ~(TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND); return true; diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index 727b829598..02bfd612ea 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -214,7 +214,6 @@ struct ArchCPU { void superh_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int superh_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int superh_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, @@ -225,6 +224,7 @@ void sh4_translate_init(void); void sh4_cpu_list(void); #if !defined(CONFIG_USER_ONLY) +hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); diff --git a/target/sh4/gdbstub.c b/target/sh4/gdbstub.c index 3488f68e32..d8e199fc06 100644 --- a/target/sh4/gdbstub.c +++ b/target/sh4/gdbstub.c @@ -19,7 +19,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" /* Hint: Use "set architecture sh4" in GDB to see fpu registers */ /* FIXME: We should use XML for this. */ diff --git a/target/sh4/translate.c b/target/sh4/translate.c index 7db3468b01..d9accfa1e7 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -17,8 +17,6 @@ * License along with this library; if not, see . */ -#define DEBUG_DISAS - #include "qemu/osdep.h" #include "cpu.h" #include "disas/disas.h" @@ -171,16 +169,16 @@ void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n", env->sgr, env->dbr, env->delayed_pc, env->fpul); for (i = 0; i < 24; i += 4) { - qemu_printf("r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n", - i, env->gregs[i], i + 1, env->gregs[i + 1], - i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]); + qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n", + i, env->gregs[i], i + 1, env->gregs[i + 1], + i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]); } if (env->flags & TB_FLAG_DELAY_SLOT) { - qemu_printf("in delay slot (delayed_pc=0x%08x)\n", - env->delayed_pc); + qemu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n", + env->delayed_pc); } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) { - qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n", - env->delayed_pc); + qemu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n", + env->delayed_pc); } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) { qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n", env->delayed_pc); @@ -196,7 +194,6 @@ static void gen_read_sr(TCGv dst) tcg_gen_or_i32(dst, dst, t0); tcg_gen_shli_i32(t0, cpu_sr_t, SR_T); tcg_gen_or_i32(dst, cpu_sr, t0); - tcg_temp_free_i32(t0); } static void gen_write_sr(TCGv src) @@ -499,7 +496,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4); tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x5000: /* mov.l @(disp,Rm),Rn */ @@ -508,7 +504,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0xe000: /* mov #imm,Rn */ @@ -529,16 +524,16 @@ static void _decode_opc(DisasContext * ctx) return; case 0x9000: /* mov.w @(disp,PC),Rn */ { - TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2); - tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW); - tcg_temp_free(addr); + TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2); + tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, + MO_TESW | MO_ALIGN); } return; case 0xd000: /* mov.l @(disp,PC),Rn */ { - TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3); - tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); - tcg_temp_free(addr); + TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3); + tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, + MO_TESL | MO_ALIGN); } return; case 0x7000: /* add #imm,Rn */ @@ -590,7 +585,6 @@ static void _decode_opc(DisasContext * ctx) /* might cause re-execution */ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB); tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */ - tcg_temp_free(addr); } return; case 0x2005: /* mov.w Rm,@-Rn */ @@ -600,7 +594,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW | UNALIGN(ctx)); tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); } return; case 0x2006: /* mov.l Rm,@-Rn */ @@ -610,7 +603,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL | UNALIGN(ctx)); tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); } return; case 0x6004: /* mov.b @Rm+,Rn */ @@ -635,7 +627,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB); - tcg_temp_free(addr); } return; case 0x0005: /* mov.w Rm,@(R0,Rn) */ @@ -644,7 +635,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x0006: /* mov.l Rm,@(R0,Rn) */ @@ -653,7 +643,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x000c: /* mov.b @(R0,Rm),Rn */ @@ -661,7 +650,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB); - tcg_temp_free(addr); } return; case 0x000d: /* mov.w @(R0,Rm),Rn */ @@ -670,7 +658,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x000e: /* mov.l @(R0,Rm),Rn */ @@ -679,7 +666,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x6008: /* swap.b Rm,Rn */ @@ -687,7 +673,6 @@ static void _decode_opc(DisasContext * ctx) TCGv low = tcg_temp_new(); tcg_gen_bswap16_i32(low, REG(B7_4), 0); tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16); - tcg_temp_free(low); } return; case 0x6009: /* swap.w Rm,Rn */ @@ -701,8 +686,6 @@ static void _decode_opc(DisasContext * ctx) low = tcg_temp_new(); tcg_gen_shri_i32(low, REG(B11_8), 16); tcg_gen_or_i32(REG(B11_8), high, low); - tcg_temp_free(low); - tcg_temp_free(high); } return; case 0x300c: /* add Rm,Rn */ @@ -711,13 +694,11 @@ static void _decode_opc(DisasContext * ctx) case 0x300e: /* addc Rm,Rn */ { TCGv t0, t1; - t0 = tcg_const_tl(0); + t0 = tcg_constant_tl(0); t1 = tcg_temp_new(); tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0); tcg_gen_add2_i32(REG(B11_8), cpu_sr_t, REG(B11_8), t0, t1, cpu_sr_t); - tcg_temp_free(t0); - tcg_temp_free(t1); } return; case 0x300f: /* addv Rm,Rn */ @@ -730,11 +711,8 @@ static void _decode_opc(DisasContext * ctx) t2 = tcg_temp_new(); tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8)); tcg_gen_andc_i32(cpu_sr_t, t1, t2); - tcg_temp_free(t2); tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31); - tcg_temp_free(t1); tcg_gen_mov_i32(REG(B7_4), t0); - tcg_temp_free(t0); } return; case 0x2009: /* and Rm,Rn */ @@ -764,8 +742,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_andc_i32(cmp1, cmp1, cmp2); tcg_gen_andi_i32(cmp1, cmp1, 0x80808080); tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0); - tcg_temp_free(cmp2); - tcg_temp_free(cmp1); } return; case 0x2007: /* div0s Rm,Rn */ @@ -778,7 +754,7 @@ static void _decode_opc(DisasContext * ctx) TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); - TCGv zero = tcg_const_i32(0); + TCGv zero = tcg_constant_i32(0); /* shift left arg1, saving the bit being pushed out and inserting T on the right */ @@ -801,11 +777,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_xor_i32(t1, t1, t0); tcg_gen_xori_i32(cpu_sr_t, t1, 1); tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1); - - tcg_temp_free(zero); - tcg_temp_free(t2); - tcg_temp_free(t1); - tcg_temp_free(t0); } return; case 0x300d: /* dmuls.l Rm,Rn */ @@ -830,12 +801,12 @@ static void _decode_opc(DisasContext * ctx) { TCGv arg0, arg1; arg0 = tcg_temp_new(); - tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, + MO_TESL | MO_ALIGN); arg1 = tcg_temp_new(); - tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); gen_helper_macl(cpu_env, arg0, arg1); - tcg_temp_free(arg1); - tcg_temp_free(arg0); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); } @@ -844,12 +815,12 @@ static void _decode_opc(DisasContext * ctx) { TCGv arg0, arg1; arg0 = tcg_temp_new(); - tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, + MO_TESL | MO_ALIGN); arg1 = tcg_temp_new(); - tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); gen_helper_macw(cpu_env, arg0, arg1); - tcg_temp_free(arg1); - tcg_temp_free(arg0); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); } @@ -865,8 +836,6 @@ static void _decode_opc(DisasContext * ctx) arg1 = tcg_temp_new(); tcg_gen_ext16s_i32(arg1, REG(B11_8)); tcg_gen_mul_i32(cpu_macl, arg0, arg1); - tcg_temp_free(arg1); - tcg_temp_free(arg0); } return; case 0x200e: /* mulu.w Rm,Rn */ @@ -877,8 +846,6 @@ static void _decode_opc(DisasContext * ctx) arg1 = tcg_temp_new(); tcg_gen_ext16u_i32(arg1, REG(B11_8)); tcg_gen_mul_i32(cpu_macl, arg0, arg1); - tcg_temp_free(arg1); - tcg_temp_free(arg0); } return; case 0x600b: /* neg Rm,Rn */ @@ -886,13 +853,12 @@ static void _decode_opc(DisasContext * ctx) return; case 0x600a: /* negc Rm,Rn */ { - TCGv t0 = tcg_const_i32(0); + TCGv t0 = tcg_constant_i32(0); tcg_gen_add2_i32(REG(B11_8), cpu_sr_t, REG(B7_4), t0, cpu_sr_t, t0); tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t, t0, t0, REG(B11_8), cpu_sr_t); tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1); - tcg_temp_free(t0); } return; case 0x6007: /* not Rm,Rn */ @@ -921,10 +887,6 @@ static void _decode_opc(DisasContext * ctx) /* select between the two cases */ tcg_gen_movi_i32(t0, 0); tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } return; case 0x400d: /* shld Rm,Rn */ @@ -947,10 +909,6 @@ static void _decode_opc(DisasContext * ctx) /* select between the two cases */ tcg_gen_movi_i32(t0, 0); tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } return; case 0x3008: /* sub Rm,Rn */ @@ -959,14 +917,12 @@ static void _decode_opc(DisasContext * ctx) case 0x300a: /* subc Rm,Rn */ { TCGv t0, t1; - t0 = tcg_const_tl(0); + t0 = tcg_constant_tl(0); t1 = tcg_temp_new(); tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0); tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t, REG(B11_8), t0, t1, cpu_sr_t); tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1); - tcg_temp_free(t0); - tcg_temp_free(t1); } return; case 0x300b: /* subv Rm,Rn */ @@ -979,11 +935,8 @@ static void _decode_opc(DisasContext * ctx) t2 = tcg_temp_new(); tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4)); tcg_gen_and_i32(t1, t1, t2); - tcg_temp_free(t2); tcg_gen_shri_i32(cpu_sr_t, t1, 31); - tcg_temp_free(t1); tcg_gen_mov_i32(REG(B11_8), t0); - tcg_temp_free(t0); } return; case 0x2008: /* tst Rm,Rn */ @@ -991,7 +944,6 @@ static void _decode_opc(DisasContext * ctx) TCGv val = tcg_temp_new(); tcg_gen_and_i32(val, REG(B7_4), REG(B11_8)); tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); - tcg_temp_free(val); } return; case 0x200a: /* xor Rm,Rn */ @@ -1013,33 +965,36 @@ static void _decode_opc(DisasContext * ctx) if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); - tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEUQ); - tcg_temp_free_i64(fp); + tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, + MO_TEUQ | MO_ALIGN); } else { - tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, + MO_TEUL | MO_ALIGN); } return; case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ); + tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, + MO_TEUQ | MO_ALIGN); gen_store_fpr64(ctx, fp, XHACK(B11_8)); - tcg_temp_free_i64(fp); } else { - tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, + MO_TEUL | MO_ALIGN); } return; case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ); + tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, + MO_TEUQ | MO_ALIGN); gen_store_fpr64(ctx, fp, XHACK(B11_8)); - tcg_temp_free_i64(fp); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8); } else { - tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, + MO_TEUL | MO_ALIGN); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); } return; @@ -1051,14 +1006,14 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); tcg_gen_subi_i32(addr, REG(B11_8), 8); - tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ); - tcg_temp_free_i64(fp); + tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, + MO_TEUQ | MO_ALIGN); } else { tcg_gen_subi_i32(addr, REG(B11_8), 4); - tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, + MO_TEUL | MO_ALIGN); } tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); } return; case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */ @@ -1068,13 +1023,13 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B7_4), REG(0)); if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEUQ); + tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, + MO_TEUQ | MO_ALIGN); gen_store_fpr64(ctx, fp, XHACK(B11_8)); - tcg_temp_free_i64(fp); } else { - tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, + MO_TEUL | MO_ALIGN); } - tcg_temp_free(addr); } return; case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */ @@ -1085,12 +1040,12 @@ static void _decode_opc(DisasContext * ctx) if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); - tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ); - tcg_temp_free_i64(fp); + tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, + MO_TEUQ | MO_ALIGN); } else { - tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, + MO_TEUL | MO_ALIGN); } - tcg_temp_free(addr); } return; case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ @@ -1132,8 +1087,6 @@ static void _decode_opc(DisasContext * ctx) return; } gen_store_fpr64(ctx, fp0, B11_8); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(fp1); } else { switch (ctx->opcode & 0xf00f) { case 0xf000: /* fadd Rm,Rn */ @@ -1185,8 +1138,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); tcg_gen_andi_i32(val, val, B7_0); tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); - tcg_temp_free(val); - tcg_temp_free(addr); } return; case 0x8b00: /* bf label */ @@ -1217,23 +1168,20 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0); tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB); - tcg_temp_free(addr); } return; case 0xc500: /* mov.w @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); - tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW); - tcg_temp_free(addr); + tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN); } return; case 0xc600: /* mov.l @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); - tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL); - tcg_temp_free(addr); + tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN); } return; case 0xc000: /* mov.b R0,@(disp,GBR) */ @@ -1241,23 +1189,20 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0); tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB); - tcg_temp_free(addr); } return; case 0xc100: /* mov.w R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); - tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW); - tcg_temp_free(addr); + tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN); } return; case 0xc200: /* mov.l R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); - tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL); - tcg_temp_free(addr); + tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN); } return; case 0x8000: /* mov.b R0,@(disp,Rn) */ @@ -1265,7 +1210,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0); tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB); - tcg_temp_free(addr); } return; case 0x8100: /* mov.w R0,@(disp,Rn) */ @@ -1274,7 +1218,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x8400: /* mov.b @(disp,Rn),R0 */ @@ -1282,7 +1225,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0); tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB); - tcg_temp_free(addr); } return; case 0x8500: /* mov.w @(disp,Rn),R0 */ @@ -1291,7 +1233,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0xc700: /* mova @(disp,PC),R0 */ @@ -1310,8 +1251,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); tcg_gen_ori_i32(val, val, B7_0); tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); - tcg_temp_free(val); - tcg_temp_free(addr); } return; case 0xc300: /* trapa #imm */ @@ -1319,9 +1258,8 @@ static void _decode_opc(DisasContext * ctx) TCGv imm; CHECK_NOT_DELAY_SLOT gen_save_cpu_state(ctx, true); - imm = tcg_const_i32(B7_0); + imm = tcg_constant_i32(B7_0); gen_helper_trapa(cpu_env, imm); - tcg_temp_free(imm); ctx->base.is_jmp = DISAS_NORETURN; } return; @@ -1330,7 +1268,6 @@ static void _decode_opc(DisasContext * ctx) TCGv val = tcg_temp_new(); tcg_gen_andi_i32(val, REG(0), B7_0); tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); - tcg_temp_free(val); } return; case 0xcc00: /* tst.b #imm,@(R0,GBR) */ @@ -1340,7 +1277,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB); tcg_gen_andi_i32(val, val, B7_0); tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); - tcg_temp_free(val); } return; case 0xca00: /* xor #imm,R0 */ @@ -1355,8 +1291,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); tcg_gen_xori_i32(val, val, B7_0); tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); - tcg_temp_free(val); - tcg_temp_free(addr); } return; } @@ -1368,7 +1302,8 @@ static void _decode_opc(DisasContext * ctx) return; case 0x4087: /* ldc.l @Rm+,Rn_BANK */ CHECK_PRIVILEGED - tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); return; case 0x0082: /* stc Rm_BANK,Rn */ @@ -1380,9 +1315,9 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); - tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, + MO_TEUL | MO_ALIGN); tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); } return; } @@ -1430,7 +1365,6 @@ static void _decode_opc(DisasContext * ctx) TCGv val = tcg_temp_new(); tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3); gen_write_sr(val); - tcg_temp_free(val); ctx->base.is_jmp = DISAS_STOP; } return; @@ -1438,10 +1372,10 @@ static void _decode_opc(DisasContext * ctx) CHECK_PRIVILEGED { TCGv val = tcg_temp_new(); - tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); tcg_gen_andi_i32(val, val, 0x700083f3); gen_write_sr(val); - tcg_temp_free(val); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); ctx->base.is_jmp = DISAS_STOP; } @@ -1457,10 +1391,8 @@ static void _decode_opc(DisasContext * ctx) TCGv val = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); gen_read_sr(val); - tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN); tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(val); - tcg_temp_free(addr); } return; #define LD(reg,ldnum,ldpnum,prechk) \ @@ -1470,7 +1402,8 @@ static void _decode_opc(DisasContext * ctx) return; \ case ldpnum: \ prechk \ - tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \ + tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, \ + MO_TESL | MO_ALIGN); \ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \ return; #define ST(reg,stnum,stpnum,prechk) \ @@ -1483,9 +1416,9 @@ static void _decode_opc(DisasContext * ctx) { \ TCGv addr = tcg_temp_new(); \ tcg_gen_subi_i32(addr, REG(B11_8), 4); \ - tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \ + tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, \ + MO_TEUL | MO_ALIGN); \ tcg_gen_mov_i32(REG(B11_8), addr); \ - tcg_temp_free(addr); \ } \ return; #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \ @@ -1511,10 +1444,10 @@ static void _decode_opc(DisasContext * ctx) CHECK_FPU_ENABLED { TCGv addr = tcg_temp_new(); - tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); gen_helper_ld_fpscr(cpu_env, addr); - tcg_temp_free(addr); ctx->base.is_jmp = DISAS_STOP; } return; @@ -1530,19 +1463,18 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff); addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); - tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN); tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); - tcg_temp_free(val); } return; case 0x00c3: /* movca.l R0,@Rm */ { TCGv val = tcg_temp_new(); - tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, + MO_TEUL | MO_ALIGN); gen_helper_movcal(cpu_env, REG(B11_8), val); - tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); - tcg_temp_free(val); + tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TEUL | MO_ALIGN); } ctx->has_movcal = 1; return; @@ -1584,12 +1516,13 @@ static void _decode_opc(DisasContext * ctx) cpu_lock_addr, fail); tmp = tcg_temp_new(); tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value, - REG(0), ctx->memidx, MO_TEUL); + REG(0), ctx->memidx, + MO_TEUL | MO_ALIGN); tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value); - tcg_temp_free(tmp); } else { tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail); - tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TEUL | MO_ALIGN); tcg_gen_movi_i32(cpu_sr_t, 1); } tcg_gen_br(done); @@ -1614,12 +1547,13 @@ static void _decode_opc(DisasContext * ctx) if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) { TCGv tmp = tcg_temp_new(); tcg_gen_mov_i32(tmp, REG(B11_8)); - tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); tcg_gen_mov_i32(cpu_lock_value, REG(0)); tcg_gen_mov_i32(cpu_lock_addr, tmp); - tcg_temp_free(tmp); } else { - tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); tcg_gen_movi_i32(cpu_lock_addr, 0); } return; @@ -1653,7 +1587,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp); - tcg_temp_free(tmp); } return; case 0x4025: /* rotcr Rn */ @@ -1663,7 +1596,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp); - tcg_temp_free(tmp); } return; case 0x4004: /* rotl Rn */ @@ -1706,13 +1638,9 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16); return; case 0x401b: /* tas.b @Rn */ - { - TCGv val = tcg_const_i32(0x80); - tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val, - ctx->memidx, MO_UB); - tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); - tcg_temp_free(val); - } + tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8), + tcg_constant_i32(0x80), ctx->memidx, MO_UB); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0); return; case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */ CHECK_FPU_ENABLED @@ -1732,7 +1660,6 @@ static void _decode_opc(DisasContext * ctx) fp = tcg_temp_new_i64(); gen_helper_float_DT(fp, cpu_env, cpu_fpul); gen_store_fpr64(ctx, fp, B11_8); - tcg_temp_free_i64(fp); } else { gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul); @@ -1748,7 +1675,6 @@ static void _decode_opc(DisasContext * ctx) fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, B11_8); gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp); - tcg_temp_free_i64(fp); } else { gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8)); @@ -1772,7 +1698,6 @@ static void _decode_opc(DisasContext * ctx) gen_load_fpr64(ctx, fp, B11_8); gen_helper_fsqrt_DT(fp, cpu_env, fp); gen_store_fpr64(ctx, fp, B11_8); - tcg_temp_free_i64(fp); } else { gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8)); } @@ -1798,7 +1723,6 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul); gen_store_fpr64(ctx, fp, B11_8); - tcg_temp_free_i64(fp); } return; case 0xf0bd: /* fcnvds DRn,FPUL */ @@ -1807,18 +1731,15 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, B11_8); gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp); - tcg_temp_free_i64(fp); } return; case 0xf0ed: /* fipr FVm,FVn */ CHECK_FPU_ENABLED CHECK_FPSCR_PR_1 { - TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3); - TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3); + TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3); + TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3); gen_helper_fipr(cpu_env, m, n); - tcg_temp_free(m); - tcg_temp_free(n); return; } break; @@ -1829,9 +1750,8 @@ static void _decode_opc(DisasContext * ctx) if ((ctx->opcode & 0x0300) != 0x0100) { goto do_illegal; } - TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3); + TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3); gen_helper_ftrv(cpu_env, n); - tcg_temp_free(n); return; } break; @@ -2034,7 +1954,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) } op_dst = B11_8; op_opc = INDEX_op_xor_i32; - op_arg = tcg_const_i32(-1); + op_arg = tcg_constant_i32(-1); break; case 0x7000 ... 0x700f: /* add #imm,Rn */ @@ -2042,7 +1962,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) goto fail; } op_opc = INDEX_op_add_i32; - op_arg = tcg_const_i32(B7_0s); + op_arg = tcg_constant_i32(B7_0s); break; case 0x3000: /* cmp/eq Rm,Rn */ @@ -2088,7 +2008,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) goto fail; } op_opc = INDEX_op_setcond_i32; - op_arg = tcg_const_i32(0); + op_arg = tcg_constant_i32(0); NEXT_INSN; if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */ @@ -2220,11 +2140,6 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) g_assert_not_reached(); } - /* If op_src is not a valid register, then op_arg was a constant. */ - if (op_src < 0 && op_arg) { - tcg_temp_free_i32(op_arg); - } - /* The entire region has been translated. */ ctx->envflags &= ~TB_FLAG_GUSA_MASK; ctx->base.pc_next = pc_end; @@ -2374,7 +2289,7 @@ static const TranslatorOps sh4_tr_ops = { .disas_log = sh4_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/sparc/cpu-param.h b/target/sparc/cpu-param.h index 72ddc4a34f..cb11980404 100644 --- a/target/sparc/cpu-param.h +++ b/target/sparc/cpu-param.h @@ -16,13 +16,11 @@ # else # define TARGET_VIRT_ADDR_SPACE_BITS 44 # endif -# define NB_MMU_MODES 6 #else # define TARGET_LONG_BITS 32 # define TARGET_PAGE_BITS 12 /* 4k */ # define TARGET_PHYS_ADDR_SPACE_BITS 36 # define TARGET_VIRT_ADDR_SPACE_BITS 32 -# define NB_MMU_MODES 3 #endif #endif diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index 1734ef8dc6..e329a7aece 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -25,6 +25,7 @@ #include "exec/exec-all.h" #include "hw/qdev-properties.h" #include "qapi/visitor.h" +#include "tcg/tcg.h" //#define DEBUG_FEATURES @@ -707,7 +708,8 @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs, { SPARCCPU *cpu = SPARC_CPU(cs); - cpu->env.pc = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + cpu->env.pc = tb->pc; cpu->env.npc = tb->cs_base; } diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index e478c5eb16..fb98843dad 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -197,8 +197,7 @@ enum { #define FSR_FTT2 (1ULL << 16) #define FSR_FTT1 (1ULL << 15) #define FSR_FTT0 (1ULL << 14) -//gcc warns about constant overflow for ~FSR_FTT_MASK -//#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0) +#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0) #ifdef TARGET_SPARC64 #define FSR_FTT_NMASK 0xfffffffffffe3fffULL #define FSR_FTT_CEXC_NMASK 0xfffffffffffe3fe0ULL @@ -569,10 +568,11 @@ struct ArchCPU { #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_sparc_cpu; + +hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); #endif void sparc_cpu_do_interrupt(CPUState *cpu); -hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int sparc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, diff --git a/target/sparc/gdbstub.c b/target/sparc/gdbstub.c index 5d1e808e8c..a1c8fdc4d5 100644 --- a/target/sparc/gdbstub.c +++ b/target/sparc/gdbstub.c @@ -19,7 +19,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #ifdef TARGET_ABI32 #define gdb_get_rega(buf, val) gdb_get_reg32(buf, val) diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c index a53580d9e4..981a47d8bb 100644 --- a/target/sparc/ldst_helper.c +++ b/target/sparc/ldst_helper.c @@ -593,6 +593,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, #if defined(DEBUG_MXCC) || defined(DEBUG_ASI) uint32_t last_addr = addr; #endif + MemOpIdx oi; do_check_align(env, addr, size - 1, GETPC()); switch (asi) { @@ -692,19 +693,20 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */ break; case ASI_KERNELTXT: /* Supervisor code access */ + oi = make_memop_idx(memop, cpu_mmu_index(env, true)); switch (size) { case 1: - ret = cpu_ldub_code(env, addr); + ret = cpu_ldb_code_mmu(env, addr, oi, GETPC()); break; case 2: - ret = cpu_lduw_code(env, addr); + ret = cpu_ldw_code_mmu(env, addr, oi, GETPC()); break; default: case 4: - ret = cpu_ldl_code(env, addr); + ret = cpu_ldl_code_mmu(env, addr, oi, GETPC()); break; case 8: - ret = cpu_ldq_code(env, addr); + ret = cpu_ldq_code_mmu(env, addr, oi, GETPC()); break; } break; @@ -1332,25 +1334,13 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, ret = cpu_ldb_mmu(env, addr, oi, GETPC()); break; case 2: - if (asi & 8) { - ret = cpu_ldw_le_mmu(env, addr, oi, GETPC()); - } else { - ret = cpu_ldw_be_mmu(env, addr, oi, GETPC()); - } + ret = cpu_ldw_mmu(env, addr, oi, GETPC()); break; case 4: - if (asi & 8) { - ret = cpu_ldl_le_mmu(env, addr, oi, GETPC()); - } else { - ret = cpu_ldl_be_mmu(env, addr, oi, GETPC()); - } + ret = cpu_ldl_mmu(env, addr, oi, GETPC()); break; case 8: - if (asi & 8) { - ret = cpu_ldq_le_mmu(env, addr, oi, GETPC()); - } else { - ret = cpu_ldq_be_mmu(env, addr, oi, GETPC()); - } + ret = cpu_ldq_mmu(env, addr, oi, GETPC()); break; default: g_assert_not_reached(); diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c index 158ec2ae8f..453498c670 100644 --- a/target/sparc/mmu_helper.c +++ b/target/sparc/mmu_helper.c @@ -64,10 +64,9 @@ static const int perm_table[2][8] = { } }; -static int get_physical_address(CPUSPARCState *env, hwaddr *physical, - int *prot, int *access_index, MemTxAttrs *attrs, - target_ulong address, int rw, int mmu_idx, - target_ulong *page_size) +static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full, + int *access_index, target_ulong address, + int rw, int mmu_idx) { int access_perms = 0; hwaddr pde_ptr; @@ -80,20 +79,20 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, is_user = mmu_idx == MMU_USER_IDX; if (mmu_idx == MMU_PHYS_IDX) { - *page_size = TARGET_PAGE_SIZE; + full->lg_page_size = TARGET_PAGE_BITS; /* Boot mode: instruction fetches are taken from PROM */ if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { - *physical = env->prom_addr | (address & 0x7ffffULL); - *prot = PAGE_READ | PAGE_EXEC; + full->phys_addr = env->prom_addr | (address & 0x7ffffULL); + full->prot = PAGE_READ | PAGE_EXEC; return 0; } - *physical = address; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + full->phys_addr = address; + full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); - *physical = 0xffffffffffff0000ULL; + full->phys_addr = 0xffffffffffff0000ULL; /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ /* Context base + context number */ @@ -157,16 +156,17 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, case 2: /* L3 PTE */ page_offset = 0; } - *page_size = TARGET_PAGE_SIZE; + full->lg_page_size = TARGET_PAGE_BITS; break; case 2: /* L2 PTE */ page_offset = address & 0x3f000; - *page_size = 0x40000; + full->lg_page_size = 18; } break; case 2: /* L1 PTE */ page_offset = address & 0xfff000; - *page_size = 0x1000000; + full->lg_page_size = 24; + break; } } @@ -188,16 +188,16 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, } /* the page can be put in the TLB */ - *prot = perm_table[is_user][access_perms]; + full->prot = perm_table[is_user][access_perms]; if (!(pde & PG_MODIFIED_MASK)) { /* only set write access if already dirty... otherwise wait for dirty access */ - *prot &= ~PAGE_WRITE; + full->prot &= ~PAGE_WRITE; } /* Even if large ptes, we map only one 4KB page in the cache to avoid filling it too fast */ - *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; + full->phys_addr = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; return error_code; } @@ -208,11 +208,9 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; - hwaddr paddr; + CPUTLBEntryFull full = {}; target_ulong vaddr; - target_ulong page_size; - int error_code = 0, prot, access_index; - MemTxAttrs attrs = {}; + int error_code = 0, access_index; /* * TODO: If we ever need tlb_vaddr_to_host for this target, @@ -223,16 +221,15 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, assert(!probe); address &= TARGET_PAGE_MASK; - error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, - address, access_type, - mmu_idx, &page_size); + error_code = get_physical_address(env, &full, &access_index, + address, access_type, mmu_idx); vaddr = address; if (likely(error_code == 0)) { qemu_log_mask(CPU_LOG_MMU, "Translate at %" VADDR_PRIx " -> " HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n", - address, paddr, vaddr); - tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); + address, full.phys_addr, vaddr); + tlb_set_page_full(cs, mmu_idx, vaddr, &full); return true; } @@ -247,8 +244,8 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, permissions. If no mapping is available, redirect accesses to neverland. Fake/overridden mappings will be flushed when switching to normal mode. */ - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); + full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + tlb_set_page_full(cs, mmu_idx, vaddr, &full); return true; } else { if (access_type == MMU_INST_FETCH) { @@ -545,8 +542,7 @@ static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw) return sfsr; } -static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, - int *prot, MemTxAttrs *attrs, +static int get_physical_address_data(CPUSPARCState *env, CPUTLBEntryFull *full, target_ulong address, int rw, int mmu_idx) { CPUState *cs = env_cpu(env); @@ -579,11 +575,12 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, for (i = 0; i < 64; i++) { /* ctx match, vaddr match, valid? */ - if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { + if (ultrasparc_tag_match(&env->dtlb[i], address, context, + &full->phys_addr)) { int do_fault = 0; if (TTE_IS_IE(env->dtlb[i].tte)) { - attrs->byte_swap = true; + full->attrs.byte_swap = true; } /* access ok? */ @@ -616,9 +613,9 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, } if (!do_fault) { - *prot = PAGE_READ; + full->prot = PAGE_READ; if (TTE_IS_W_OK(env->dtlb[i].tte)) { - *prot |= PAGE_WRITE; + full->prot |= PAGE_WRITE; } TTE_SET_USED(env->dtlb[i].tte); @@ -645,8 +642,7 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, return 1; } -static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, - int *prot, MemTxAttrs *attrs, +static int get_physical_address_code(CPUSPARCState *env, CPUTLBEntryFull *full, target_ulong address, int mmu_idx) { CPUState *cs = env_cpu(env); @@ -681,7 +677,7 @@ static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, for (i = 0; i < 64; i++) { /* ctx match, vaddr match, valid? */ if (ultrasparc_tag_match(&env->itlb[i], - address, context, physical)) { + address, context, &full->phys_addr)) { /* access ok? */ if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { /* Fault status register */ @@ -708,7 +704,7 @@ static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, return 1; } - *prot = PAGE_EXEC; + full->prot = PAGE_EXEC; TTE_SET_USED(env->itlb[i].tte); return 0; } @@ -722,14 +718,13 @@ static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, return 1; } -static int get_physical_address(CPUSPARCState *env, hwaddr *physical, - int *prot, int *access_index, MemTxAttrs *attrs, - target_ulong address, int rw, int mmu_idx, - target_ulong *page_size) +static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full, + int *access_index, target_ulong address, + int rw, int mmu_idx) { /* ??? We treat everything as a small page, then explicitly flush everything when an entry is evicted. */ - *page_size = TARGET_PAGE_SIZE; + full->lg_page_size = TARGET_PAGE_BITS; /* safety net to catch wrong softmmu index use from dynamic code */ if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { @@ -747,17 +742,15 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, } if (mmu_idx == MMU_PHYS_IDX) { - *physical = ultrasparc_truncate_physical(address); - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + full->phys_addr = ultrasparc_truncate_physical(address); + full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } if (rw == 2) { - return get_physical_address_code(env, physical, prot, attrs, address, - mmu_idx); + return get_physical_address_code(env, full, address, mmu_idx); } else { - return get_physical_address_data(env, physical, prot, attrs, address, - rw, mmu_idx); + return get_physical_address_data(env, full, address, rw, mmu_idx); } } @@ -768,25 +761,17 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; - target_ulong vaddr; - hwaddr paddr; - target_ulong page_size; - MemTxAttrs attrs = {}; - int error_code = 0, prot, access_index; + CPUTLBEntryFull full = {}; + int error_code = 0, access_index; address &= TARGET_PAGE_MASK; - error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, - address, access_type, - mmu_idx, &page_size); + error_code = get_physical_address(env, &full, &access_index, + address, access_type, mmu_idx); if (likely(error_code == 0)) { - vaddr = address; - - trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl, + trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl, env->dmmu.mmu_primary_context, env->dmmu.mmu_secondary_context); - - tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx, - page_size); + tlb_set_page_full(cs, mmu_idx, address, &full); return true; } if (probe) { @@ -888,12 +873,14 @@ void dump_mmu(CPUSPARCState *env) static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, target_ulong addr, int rw, int mmu_idx) { - target_ulong page_size; - int prot, access_index; - MemTxAttrs attrs = {}; + CPUTLBEntryFull full = {}; + int access_index, ret; - return get_physical_address(env, phys, &prot, &access_index, &attrs, addr, - rw, mmu_idx, &page_size); + ret = get_physical_address(env, &full, &access_index, addr, rw, mmu_idx); + if (ret == 0) { + *phys = full.phys_addr; + } + return ret; } #if defined(TARGET_SPARC64) @@ -924,7 +911,6 @@ hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) return phys_addr; } -#ifndef CONFIG_USER_ONLY G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, @@ -942,4 +928,3 @@ G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); } -#endif /* !CONFIG_USER_ONLY */ diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 150aeecd14..9377798490 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -34,8 +34,6 @@ #include "asi.h" -#define DEBUG_DISAS - #define DYNAMIC_PC 1 /* dynamic pc value */ #define JUMP_PC 2 /* dynamic pc value which takes only two values according to jump_pc[T2] */ @@ -84,10 +82,6 @@ typedef struct DisasContext { uint32_t cc_op; /* current CC operation */ sparc_def_t *def; - TCGv_i32 t32[3]; - TCGv ttl[5]; - int n_t32; - int n_ttl; #ifdef TARGET_SPARC64 int fprs_dirty; int asi; @@ -97,7 +91,6 @@ typedef struct DisasContext { typedef struct { TCGCond cond; bool is_bool; - bool g1, g2; TCGv c1, c2; } DisasCompare; @@ -131,22 +124,6 @@ static int sign_extend(int x, int len) #define IS_IMM (insn & (1<<13)) -static inline TCGv_i32 get_temp_i32(DisasContext *dc) -{ - TCGv_i32 t; - assert(dc->n_t32 < ARRAY_SIZE(dc->t32)); - dc->t32[dc->n_t32++] = t = tcg_temp_new_i32(); - return t; -} - -static inline TCGv get_temp_tl(DisasContext *dc) -{ - TCGv t; - assert(dc->n_ttl < ARRAY_SIZE(dc->ttl)); - dc->ttl[dc->n_ttl++] = t = tcg_temp_new(); - return t; -} - static inline void gen_update_fprs_dirty(DisasContext *dc, int rd) { #if defined(TARGET_SPARC64) @@ -163,7 +140,7 @@ static inline void gen_update_fprs_dirty(DisasContext *dc, int rd) /* floating point registers moves */ static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) { - TCGv_i32 ret = get_temp_i32(dc); + TCGv_i32 ret = tcg_temp_new_i32(); if (src & 1) { tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]); } else { @@ -179,13 +156,12 @@ static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) tcg_gen_extu_i32_i64(t, v); tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t, (dst & 1 ? 0 : 32), 32); - tcg_temp_free_i64(t); gen_update_fprs_dirty(dc, dst); } static TCGv_i32 gen_dest_fpr_F(DisasContext *dc) { - return get_temp_i32(dc); + return tcg_temp_new_i32(); } static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src) @@ -301,7 +277,7 @@ static inline TCGv gen_load_gpr(DisasContext *dc, int reg) assert(reg < 32); return cpu_regs[reg]; } else { - TCGv t = get_temp_tl(dc); + TCGv t = tcg_temp_new(); tcg_gen_movi_tl(t, 0); return t; } @@ -321,7 +297,7 @@ static inline TCGv gen_dest_gpr(DisasContext *dc, int reg) assert(reg < 32); return cpu_regs[reg]; } else { - return get_temp_tl(dc); + return tcg_temp_new(); } } @@ -399,11 +375,6 @@ static TCGv_i32 gen_add32_carry32(void) carry_32 = tcg_temp_new_i32(); tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); -#if TARGET_LONG_BITS == 64 - tcg_temp_free_i32(cc_src1_32); - tcg_temp_free_i32(cc_src2_32); -#endif - return carry_32; } @@ -425,11 +396,6 @@ static TCGv_i32 gen_sub32_carry32(void) carry_32 = tcg_temp_new_i32(); tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); -#if TARGET_LONG_BITS == 64 - tcg_temp_free_i32(cc_src1_32); - tcg_temp_free_i32(cc_src2_32); -#endif - return carry_32; } @@ -460,7 +426,6 @@ static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, generated the carry in the first place. */ carry = tcg_temp_new(); tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2); - tcg_temp_free(carry); goto add_done; } carry_32 = gen_add32_carry32(); @@ -489,11 +454,6 @@ static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, tcg_gen_add_tl(dst, src1, src2); tcg_gen_add_tl(dst, dst, carry); - tcg_temp_free_i32(carry_32); -#if TARGET_LONG_BITS == 64 - tcg_temp_free(carry); -#endif - add_done: if (update_cc) { tcg_gen_mov_tl(cpu_cc_src, src1); @@ -545,7 +505,6 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, generated the carry in the first place. */ carry = tcg_temp_new(); tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2); - tcg_temp_free(carry); goto sub_done; } carry_32 = gen_sub32_carry32(); @@ -568,11 +527,6 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, tcg_gen_sub_tl(dst, src1, src2); tcg_gen_sub_tl(dst, dst, carry); - tcg_temp_free_i32(carry_32); -#if TARGET_LONG_BITS == 64 - tcg_temp_free(carry); -#endif - sub_done: if (update_cc) { tcg_gen_mov_tl(cpu_cc_src, src1); @@ -594,13 +548,12 @@ static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) if (!(env->y & 1)) T1 = 0; */ - zero = tcg_const_tl(0); + zero = tcg_constant_tl(0); tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff); tcg_gen_andi_tl(r_temp, cpu_y, 0x1); tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero, zero, cpu_cc_src2); - tcg_temp_free(zero); // b2 = T0 & 1; // env->y = (b2 << 31) | (env->y >> 1); @@ -611,14 +564,12 @@ static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) gen_mov_reg_N(t0, cpu_psr); gen_mov_reg_V(r_temp, cpu_psr); tcg_gen_xor_tl(t0, t0, r_temp); - tcg_temp_free(r_temp); // T0 = (b1 << 31) | (T0 >> 1); // src1 = T0; tcg_gen_shli_tl(t0, t0, 31); tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0); - tcg_temp_free(t0); tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2); @@ -646,9 +597,6 @@ static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext) } tcg_gen_mul_i64(dst, t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_gen_shri_i64(cpu_y, dst, 32); #endif } @@ -686,7 +634,6 @@ static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src) tcg_gen_xor_tl(dst, dst, t0); gen_mov_reg_Z(t0, src); tcg_gen_or_tl(dst, dst, t0); - tcg_temp_free(t0); } // N ^ V @@ -696,7 +643,6 @@ static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src) gen_mov_reg_V(t0, src); gen_mov_reg_N(dst, src); tcg_gen_xor_tl(dst, dst, t0); - tcg_temp_free(t0); } // C | Z @@ -706,7 +652,6 @@ static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src) gen_mov_reg_Z(t0, src); gen_mov_reg_C(dst, src); tcg_gen_or_tl(dst, dst, t0); - tcg_temp_free(t0); } // C @@ -811,7 +756,6 @@ static inline void gen_op_eval_fbne(TCGv dst, TCGv src, gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_or_tl(dst, dst, t0); - tcg_temp_free(t0); } // 1 or 2: FCC0 ^ FCC1 @@ -822,7 +766,6 @@ static inline void gen_op_eval_fblg(TCGv dst, TCGv src, gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_xor_tl(dst, dst, t0); - tcg_temp_free(t0); } // 1 or 3: FCC0 @@ -840,7 +783,6 @@ static inline void gen_op_eval_fbl(TCGv dst, TCGv src, gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_andc_tl(dst, dst, t0); - tcg_temp_free(t0); } // 2 or 3: FCC1 @@ -858,7 +800,6 @@ static inline void gen_op_eval_fbg(TCGv dst, TCGv src, gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_andc_tl(dst, t0, dst); - tcg_temp_free(t0); } // 3: FCC0 & FCC1 @@ -869,7 +810,6 @@ static inline void gen_op_eval_fbu(TCGv dst, TCGv src, gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_and_tl(dst, dst, t0); - tcg_temp_free(t0); } // 0: !(FCC0 | FCC1) @@ -881,7 +821,6 @@ static inline void gen_op_eval_fbe(TCGv dst, TCGv src, gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_or_tl(dst, dst, t0); tcg_gen_xori_tl(dst, dst, 0x1); - tcg_temp_free(t0); } // 0 or 3: !(FCC0 ^ FCC1) @@ -893,7 +832,6 @@ static inline void gen_op_eval_fbue(TCGv dst, TCGv src, gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_xor_tl(dst, dst, t0); tcg_gen_xori_tl(dst, dst, 0x1); - tcg_temp_free(t0); } // 0 or 2: !FCC0 @@ -913,7 +851,6 @@ static inline void gen_op_eval_fbuge(TCGv dst, TCGv src, gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_andc_tl(dst, dst, t0); tcg_gen_xori_tl(dst, dst, 0x1); - tcg_temp_free(t0); } // 0 or 1: !FCC1 @@ -933,7 +870,6 @@ static inline void gen_op_eval_fbule(TCGv dst, TCGv src, gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_andc_tl(dst, t0, dst); tcg_gen_xori_tl(dst, dst, 0x1); - tcg_temp_free(t0); } // !3: !(FCC0 & FCC1) @@ -945,7 +881,6 @@ static inline void gen_op_eval_fbo(TCGv dst, TCGv src, gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_and_tl(dst, dst, t0); tcg_gen_xori_tl(dst, dst, 0x1); - tcg_temp_free(t0); } static inline void gen_branch2(DisasContext *dc, target_ulong pc1, @@ -991,11 +926,9 @@ static void gen_branch_n(DisasContext *dc, target_ulong pc1) tcg_gen_mov_tl(cpu_pc, cpu_npc); tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); - t = tcg_const_tl(pc1); - z = tcg_const_tl(0); + t = tcg_constant_tl(pc1); + z = tcg_constant_tl(0); tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc); - tcg_temp_free(t); - tcg_temp_free(z); dc->pc = DYNAMIC_PC; } @@ -1003,15 +936,11 @@ static void gen_branch_n(DisasContext *dc, target_ulong pc1) static inline void gen_generic_branch(DisasContext *dc) { - TCGv npc0 = tcg_const_tl(dc->jump_pc[0]); - TCGv npc1 = tcg_const_tl(dc->jump_pc[1]); - TCGv zero = tcg_const_tl(0); + TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]); + TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]); + TCGv zero = tcg_constant_tl(0); tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1); - - tcg_temp_free(npc0); - tcg_temp_free(npc1); - tcg_temp_free(zero); } /* call this function before using the condition register as it may @@ -1050,20 +979,14 @@ static inline void save_state(DisasContext *dc) static void gen_exception(DisasContext *dc, int which) { - TCGv_i32 t; - save_state(dc); - t = tcg_const_i32(which); - gen_helper_raise_exception(cpu_env, t); - tcg_temp_free_i32(t); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(which)); dc->base.is_jmp = DISAS_NORETURN; } static void gen_check_align(TCGv addr, int mask) { - TCGv_i32 r_mask = tcg_const_i32(mask); - gen_helper_check_align(cpu_env, addr, r_mask); - tcg_temp_free_i32(r_mask); + gen_helper_check_align(cpu_env, addr, tcg_constant_i32(mask)); } static inline void gen_mov_pc_npc(DisasContext *dc) @@ -1086,16 +1009,6 @@ static inline void gen_op_next_insn(void) tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); } -static void free_compare(DisasCompare *cmp) -{ - if (!cmp->g1) { - tcg_temp_free(cmp->c1); - } - if (!cmp->g2) { - tcg_temp_free(cmp->c2); - } -} - static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, DisasContext *dc) { @@ -1155,17 +1068,14 @@ static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, cmp->cond = logic_cond[cond]; do_compare_dst_0: cmp->is_bool = false; - cmp->g2 = false; - cmp->c2 = tcg_const_tl(0); + cmp->c2 = tcg_constant_tl(0); #ifdef TARGET_SPARC64 if (!xcc) { - cmp->g1 = false; cmp->c1 = tcg_temp_new(); tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst); break; } #endif - cmp->g1 = true; cmp->c1 = cpu_cc_dst; break; @@ -1187,7 +1097,6 @@ static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, if (!xcc) { /* Note that sign-extension works for unsigned compares as long as both operands are sign-extended. */ - cmp->g1 = cmp->g2 = false; cmp->c1 = tcg_temp_new(); cmp->c2 = tcg_temp_new(); tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src); @@ -1195,7 +1104,6 @@ static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, break; } #endif - cmp->g1 = cmp->g2 = true; cmp->c1 = cpu_cc_src; cmp->c2 = cpu_cc_src2; break; @@ -1212,9 +1120,8 @@ static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, /* We're going to generate a boolean result. */ cmp->cond = TCG_COND_NE; cmp->is_bool = true; - cmp->g1 = cmp->g2 = false; cmp->c1 = r_dst = tcg_temp_new(); - cmp->c2 = tcg_const_tl(0); + cmp->c2 = tcg_constant_tl(0); switch (cond) { case 0x0: @@ -1278,9 +1185,8 @@ static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond) /* For now we still generate a straight boolean result. */ cmp->cond = TCG_COND_NE; cmp->is_bool = true; - cmp->g1 = cmp->g2 = false; cmp->c1 = r_dst = tcg_temp_new(); - cmp->c2 = tcg_const_tl(0); + cmp->c2 = tcg_constant_tl(0); switch (cc) { default: @@ -1362,8 +1268,6 @@ static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond, } else { tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); } - - free_compare(&cmp); } static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond) @@ -1377,8 +1281,6 @@ static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond) } else { tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); } - - free_compare(&cmp); } #ifdef TARGET_SPARC64 @@ -1398,10 +1300,8 @@ static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src) { cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]); cmp->is_bool = false; - cmp->g1 = true; - cmp->g2 = false; cmp->c1 = r_src; - cmp->c2 = tcg_const_tl(0); + cmp->c2 = tcg_constant_tl(0); } static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src) @@ -1411,8 +1311,6 @@ static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src) /* The interface is to return a boolean in r_dst. */ tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); - - free_compare(&cmp); } #endif @@ -1999,15 +1897,14 @@ static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int mmu_idx, MemOp memop) { gen_address_mask(dc, addr); - tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop); + tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN); } static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx) { - TCGv m1 = tcg_const_tl(0xff); + TCGv m1 = tcg_constant_tl(0xff); gen_address_mask(dc, addr); tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB); - tcg_temp_free(m1); } /* asi moves */ @@ -2256,12 +2153,12 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, break; case GET_ASI_DIRECT: gen_address_mask(dc, addr); - tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop); + tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN); break; default: { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(memop); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); save_state(dc); #ifdef TARGET_SPARC64 @@ -2271,11 +2168,8 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, TCGv_i64 t64 = tcg_temp_new_i64(); gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop); tcg_gen_trunc_i64_tl(dst, t64); - tcg_temp_free_i64(t64); } #endif - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); } break; } @@ -2305,7 +2199,7 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, /* fall through */ case GET_ASI_DIRECT: gen_address_mask(dc, addr); - tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop); + tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN); break; #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) case GET_ASI_BCOPY: @@ -2317,7 +2211,7 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, { TCGv saddr = tcg_temp_new(); TCGv daddr = tcg_temp_new(); - TCGv four = tcg_const_tl(4); + TCGv four = tcg_constant_tl(4); TCGv_i32 tmp = tcg_temp_new_i32(); int i; @@ -2331,18 +2225,13 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, tcg_gen_add_tl(saddr, saddr, four); tcg_gen_add_tl(daddr, daddr, four); } - - tcg_temp_free(saddr); - tcg_temp_free(daddr); - tcg_temp_free(four); - tcg_temp_free_i32(tmp); } break; #endif default: { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); save_state(dc); #ifdef TARGET_SPARC64 @@ -2352,11 +2241,8 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(t64, src); gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop); - tcg_temp_free_i64(t64); } #endif - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); /* A write to a TLB register may alter page maps. End the TB. */ dc->npc = DYNAMIC_PC; @@ -2395,9 +2281,8 @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, case GET_ASI_DIRECT: oldv = tcg_temp_new(); tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), - da.mem_idx, da.memop); + da.mem_idx, da.memop | MO_ALIGN); gen_store_gpr(dc, rd, oldv); - tcg_temp_free(oldv); break; default: /* ??? Should be DAE_invalid_asi. */ @@ -2422,22 +2307,18 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) if (tb_cflags(dc->base.tb) & CF_PARALLEL) { gen_helper_exit_atomic(cpu_env); } else { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(MO_UB); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(MO_UB); TCGv_i64 s64, t64; save_state(dc); t64 = tcg_temp_new_i64(); gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop); - s64 = tcg_const_i64(0xff); + s64 = tcg_constant_i64(0xff); gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop); - tcg_temp_free_i64(s64); - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); tcg_gen_trunc_i64_tl(dst, t64); - tcg_temp_free_i64(t64); /* End the TB. */ dc->npc = DYNAMIC_PC; @@ -2464,7 +2345,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, switch (size) { case 4: d32 = gen_dest_fpr_F(dc); - tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop); + tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); gen_store_fpr_F(dc, rd, d32); break; case 8: @@ -2478,7 +2359,6 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop | MO_ALIGN_4); tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); - tcg_temp_free_i64(d64); break; default: g_assert_not_reached(); @@ -2496,7 +2376,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, /* The first operation checks required alignment. */ memop = da.memop | MO_ALIGN_64; - eight = tcg_const_tl(8); + eight = tcg_constant_tl(8); for (i = 0; ; ++i) { tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da.mem_idx, memop); @@ -2506,7 +2386,6 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, tcg_gen_add_tl(addr, addr, eight); memop = da.memop; } - tcg_temp_free(eight); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2516,7 +2395,8 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, /* Valid for lddfa only. */ if (size == 8) { gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, + da.memop | MO_ALIGN); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2524,8 +2404,8 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, default: { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(da.memop); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN); save_state(dc); /* According to the table in the UA2011 manual, the only @@ -2538,7 +2418,6 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop); d32 = gen_dest_fpr_F(dc); tcg_gen_extrl_i64_i32(d32, d64); - tcg_temp_free_i64(d64); gen_store_fpr_F(dc, rd, d32); break; case 8: @@ -2550,13 +2429,10 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, tcg_gen_addi_tl(addr, addr, 8); gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop); tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); - tcg_temp_free_i64(d64); break; default: g_assert_not_reached(); } - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); } break; } @@ -2577,7 +2453,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, switch (size) { case 4: d32 = gen_load_fpr_F(dc, rd); - tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop); + tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); break; case 8: tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, @@ -2610,7 +2486,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, /* The first operation checks required alignment. */ memop = da.memop | MO_ALIGN_64; - eight = tcg_const_tl(8); + eight = tcg_constant_tl(8); for (i = 0; ; ++i) { tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da.mem_idx, memop); @@ -2620,7 +2496,6 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, tcg_gen_add_tl(addr, addr, eight); memop = da.memop; } - tcg_temp_free(eight); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2630,7 +2505,8 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, /* Valid for stdfa only. */ if (size == 8) { gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, + da.memop | MO_ALIGN); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2667,7 +2543,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) TCGv_i64 tmp = tcg_temp_new_i64(); gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop); + tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN); /* Note that LE ldda acts as if each 32-bit register result is byte swapped. Having just performed one @@ -2677,7 +2553,6 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) } else { tcg_gen_extr32_i64(hi, lo, tmp); } - tcg_temp_free_i64(tmp); } break; @@ -2687,14 +2562,12 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) real hardware allows others. This can be seen with e.g. FreeBSD 10.3 wrt ASI_IC_TAG. */ { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(da.memop); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(da.memop); TCGv_i64 tmp = tcg_temp_new_i64(); save_state(dc); gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop); - tcg_temp_free_i32(r_asi); - tcg_temp_free_i32(r_mop); /* See above. */ if ((da.memop & MO_BSWAP) == MO_TE) { @@ -2702,7 +2575,6 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) } else { tcg_gen_extr32_i64(hi, lo, tmp); } - tcg_temp_free_i64(tmp); } break; } @@ -2741,8 +2613,7 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, tcg_gen_concat32_i64(t64, hi, lo); } gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop); - tcg_temp_free_i64(t64); + tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); } break; @@ -2750,8 +2621,8 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, /* ??? In theory we've handled all of the ASIs that are valid for stda, and this should raise DAE_invalid_asi. */ { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(da.memop); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(da.memop); TCGv_i64 t64 = tcg_temp_new_i64(); /* See above. */ @@ -2763,9 +2634,6 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, save_state(dc); gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop); - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); - tcg_temp_free_i64(t64); } break; } @@ -2783,9 +2651,8 @@ static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, case GET_ASI_DIRECT: oldv = tcg_temp_new(); tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), - da.mem_idx, da.memop); + da.mem_idx, da.memop | MO_ALIGN); gen_store_gpr(dc, rd, oldv); - tcg_temp_free(oldv); break; default: /* ??? Should be DAE_invalid_asi. */ @@ -2808,27 +2675,23 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) switch (da.type) { case GET_ASI_EXCP: - tcg_temp_free_i64(t64); return; case GET_ASI_DIRECT: gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop); + tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); break; default: { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(MO_UQ); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(MO_UQ); save_state(dc); gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop); - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); } break; } tcg_gen_extr_i64_i32(lo, hi, t64); - tcg_temp_free_i64(t64); gen_store_gpr(dc, rd | 1, lo); gen_store_gpr(dc, rd, hi); } @@ -2847,7 +2710,7 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, break; case GET_ASI_DIRECT: gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop); + tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); break; case GET_ASI_BFILL: /* Store 32 bytes of T64 to ADDR. */ @@ -2857,7 +2720,7 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, as a cacheline-style operation. */ { TCGv d_addr = tcg_temp_new(); - TCGv eight = tcg_const_tl(8); + TCGv eight = tcg_constant_tl(8); int i; tcg_gen_andi_tl(d_addr, addr, -8); @@ -2865,25 +2728,18 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop); tcg_gen_add_tl(d_addr, d_addr, eight); } - - tcg_temp_free(d_addr); - tcg_temp_free(eight); } break; default: { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(MO_UQ); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(MO_UQ); save_state(dc); gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop); - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); } break; } - - tcg_temp_free_i64(t64); } #endif @@ -2897,7 +2753,7 @@ static TCGv get_src2(DisasContext *dc, unsigned int insn) { if (IS_IMM) { /* immediate */ target_long simm = GET_FIELDs(insn, 19, 31); - TCGv t = get_temp_tl(dc); + TCGv t = tcg_temp_new(); tcg_gen_movi_tl(t, simm); return t; } else { /* register */ @@ -2921,18 +2777,15 @@ static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) TCGv_i64 c64 = tcg_temp_new_i64(); tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2); tcg_gen_extrl_i64_i32(c32, c64); - tcg_temp_free_i64(c64); } s1 = gen_load_fpr_F(dc, rs); s2 = gen_load_fpr_F(dc, rd); dst = gen_dest_fpr_F(dc); - zero = tcg_const_i32(0); + zero = tcg_constant_i32(0); tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2); - tcg_temp_free_i32(c32); - tcg_temp_free_i32(zero); gen_store_fpr_F(dc, rd, dst); } @@ -2978,17 +2831,14 @@ static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env) TCGv_ptr r_tl_tmp = tcg_temp_new_ptr(); tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl); tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp); - tcg_temp_free_ptr(r_tl_tmp); } - - tcg_temp_free_i32(r_tl); } #endif static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, int width, bool cc, bool left) { - TCGv lo1, lo2, t1, t2; + TCGv lo1, lo2; uint64_t amask, tabl, tabr; int shift, imask, omask; @@ -3055,10 +2905,8 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, tcg_gen_shli_tl(lo1, lo1, shift); tcg_gen_shli_tl(lo2, lo2, shift); - t1 = tcg_const_tl(tabl); - t2 = tcg_const_tl(tabr); - tcg_gen_shr_tl(lo1, t1, lo1); - tcg_gen_shr_tl(lo2, t2, lo2); + tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1); + tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2); tcg_gen_andi_tl(dst, lo1, omask); tcg_gen_andi_tl(lo2, lo2, omask); @@ -3077,15 +2925,10 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, lo2 |= -(s1 == s2) dst &= lo2 */ - tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2); - tcg_gen_neg_tl(t1, t1); - tcg_gen_or_tl(lo2, lo2, t1); + tcg_gen_setcond_tl(TCG_COND_EQ, lo1, s1, s2); + tcg_gen_neg_tl(lo1, lo1); + tcg_gen_or_tl(lo2, lo2, lo1); tcg_gen_and_tl(dst, dst, lo2); - - tcg_temp_free(lo1); - tcg_temp_free(lo2); - tcg_temp_free(t1); - tcg_temp_free(t2); } static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left) @@ -3098,8 +2941,6 @@ static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left) tcg_gen_neg_tl(tmp, tmp); } tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); - - tcg_temp_free(tmp); } static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2) @@ -3121,10 +2962,6 @@ static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2) tcg_gen_shri_tl(t2, t2, 1); tcg_gen_or_tl(dst, t1, t2); - - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(shift); } #endif @@ -3253,7 +3090,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) case 2: /* FPU & Logical Operations */ { unsigned int xop = GET_FIELD(insn, 7, 12); - TCGv cpu_dst = get_temp_tl(dc); + TCGv cpu_dst = tcg_temp_new(); TCGv cpu_tmp0; if (xop == 0x3a) { /* generate trap */ @@ -3288,7 +3125,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) l1 = gen_new_label(); tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond), cmp.c1, cmp.c2, l1); - free_compare(&cmp); } mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) @@ -3325,7 +3161,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_raise_exception(cpu_env, trap); - tcg_temp_free_i32(trap); if (cond == 8) { /* An unconditional trap ends the TB. */ @@ -3376,7 +3211,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_const_i32(dc->mem_idx); + r_const = tcg_constant_i32(dc->mem_idx); tcg_gen_ld_ptr(r_tickptr, cpu_env, offsetof(CPUSPARCState, tick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { @@ -3384,8 +3219,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr, r_const); - tcg_temp_free_ptr(r_tickptr); - tcg_temp_free_i32(r_const); gen_store_gpr(dc, rd, cpu_dst); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { /* I/O operations in icount mode must end the TB */ @@ -3430,7 +3263,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_const_i32(dc->mem_idx); + r_const = tcg_constant_i32(dc->mem_idx); tcg_gen_ld_ptr(r_tickptr, cpu_env, offsetof(CPUSPARCState, stick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { @@ -3438,8 +3271,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr, r_const); - tcg_temp_free_ptr(r_tickptr); - tcg_temp_free_i32(r_const); gen_store_gpr(dc, rd, cpu_dst); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { /* I/O operations in icount mode must end the TB */ @@ -3513,7 +3344,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) if (!supervisor(dc)) { goto priv_insn; } - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); #ifdef TARGET_SPARC64 rs1 = GET_FIELD(insn, 13, 17); switch (rs1) { @@ -3525,7 +3356,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_ld_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tpc)); - tcg_temp_free_ptr(r_tsptr); } break; case 1: // tnpc @@ -3536,7 +3366,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_ld_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tnpc)); - tcg_temp_free_ptr(r_tsptr); } break; case 2: // tstate @@ -3547,7 +3376,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_ld_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tstate)); - tcg_temp_free_ptr(r_tsptr); } break; case 3: // tt @@ -3557,7 +3385,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tt)); - tcg_temp_free_ptr(r_tsptr); } break; case 4: // tick @@ -3566,7 +3393,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_const_i32(dc->mem_idx); + r_const = tcg_constant_i32(dc->mem_idx); tcg_gen_ld_ptr(r_tickptr, cpu_env, offsetof(CPUSPARCState, tick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { @@ -3574,8 +3401,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_get_count(cpu_tmp0, cpu_env, r_tickptr, r_const); - tcg_temp_free_ptr(r_tickptr); - tcg_temp_free_i32(r_const); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { /* I/O operations in icount mode must end the TB */ dc->base.is_jmp = DISAS_EXIT; @@ -3847,7 +3672,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) cpu_src1 = get_src1(dc, insn); \ gen_compare_reg(&cmp, cond, cpu_src1); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ - free_compare(&cmp); \ } while (0) if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */ @@ -3871,7 +3695,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) cond = GET_FIELD_SP(insn, 14, 17); \ gen_fcompare(&cmp, fcc, cond); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ - free_compare(&cmp); \ } while (0) case 0x001: /* V9 fmovscc %fcc0 */ @@ -3921,7 +3744,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) cond = GET_FIELD_SP(insn, 14, 17); \ gen_compare(&cmp, xcc, cond, dc); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ - free_compare(&cmp); \ } while (0) case 0x101: /* V9 fmovscc %icc */ @@ -4031,7 +3853,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); if (insn & (1 << 12)) { tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); } else { @@ -4053,7 +3875,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); if (insn & (1 << 12)) { tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0); @@ -4077,7 +3899,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); if (insn & (1 << 12)) { tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0); @@ -4263,7 +4085,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) simm = GET_FIELDs(insn, 20, 31); tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f); } else { /* register */ - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0); } @@ -4274,7 +4096,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) simm = GET_FIELDs(insn, 20, 31); tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f); } else { /* register */ - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0); } @@ -4285,7 +4107,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) simm = GET_FIELDs(insn, 20, 31); tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f); } else { /* register */ - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0); } @@ -4294,7 +4116,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) #endif case 0x30: { - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); switch(rd) { case 0: /* wry */ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); @@ -4393,7 +4215,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_set_limit(r_tickptr, cpu_tick_cmpr); - tcg_temp_free_ptr(r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4417,7 +4238,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_set_count(r_tickptr, cpu_tmp0); - tcg_temp_free_ptr(r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4441,7 +4261,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_set_limit(r_tickptr, cpu_stick_cmpr); - tcg_temp_free_ptr(r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4479,7 +4298,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) goto illegal_insn; } #else - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); gen_helper_wrpsr(cpu_env, cpu_tmp0); tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); @@ -4495,7 +4314,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) { if (!supervisor(dc)) goto priv_insn; - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); #ifdef TARGET_SPARC64 switch (rd) { @@ -4507,7 +4326,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_st_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tpc)); - tcg_temp_free_ptr(r_tsptr); } break; case 1: // tnpc @@ -4518,7 +4336,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_st_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tnpc)); - tcg_temp_free_ptr(r_tsptr); } break; case 2: // tstate @@ -4530,7 +4347,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_gen_st_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tstate)); - tcg_temp_free_ptr(r_tsptr); } break; case 3: // tt @@ -4541,7 +4357,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_st32_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tt)); - tcg_temp_free_ptr(r_tsptr); } break; case 4: // tick @@ -4557,7 +4372,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_set_count(r_tickptr, cpu_tmp0); - tcg_temp_free_ptr(r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4653,7 +4467,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) CHECK_IU_FEATURE(dc, HYPV); if (!hypervisor(dc)) goto priv_insn; - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); switch (rd) { case 0: // hpstate @@ -4688,7 +4502,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_set_limit(r_tickptr, cpu_hstick_cmpr); - tcg_temp_free_ptr(r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4733,7 +4546,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_gen_movcond_tl(cmp.cond, dst, cmp.c1, cmp.c2, cpu_src2, dst); - free_compare(&cmp); gen_store_gpr(dc, rd, dst); break; } @@ -4765,7 +4577,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_gen_movcond_tl(cmp.cond, dst, cmp.c1, cmp.c2, cpu_src2, dst); - free_compare(&cmp); gen_store_gpr(dc, rd, dst); break; } @@ -5227,7 +5038,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } else if (xop == 0x39) { /* V9 return */ save_state(dc); cpu_src1 = get_src1(dc, insn); - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 19, 31); tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm); @@ -5249,7 +5060,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) #endif } else { cpu_src1 = get_src1(dc, insn); - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 19, 31); tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm); @@ -5344,7 +5155,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) unsigned int xop = GET_FIELD(insn, 7, 12); /* ??? gen_address_mask prevents us from using a source register directly. Always generate a temporary. */ - TCGv cpu_addr = get_temp_tl(dc); + TCGv cpu_addr = tcg_temp_new(); tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn)); if (xop == 0x3c || xop == 0x3e) { @@ -5368,15 +5179,18 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) switch (xop) { case 0x0: /* ld, V9 lduw, load unsigned word */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, + dc->mem_idx, MO_TEUL | MO_ALIGN); break; case 0x1: /* ldub, load unsigned byte */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, + dc->mem_idx, MO_UB); break; case 0x2: /* lduh, load unsigned halfword */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, + dc->mem_idx, MO_TEUW | MO_ALIGN); break; case 0x3: /* ldd, load double word */ if (rd & 1) @@ -5386,23 +5200,24 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_address_mask(dc, cpu_addr); t64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld_i64(t64, cpu_addr, + dc->mem_idx, MO_TEUQ | MO_ALIGN); tcg_gen_trunc_i64_tl(cpu_val, t64); tcg_gen_ext32u_tl(cpu_val, cpu_val); gen_store_gpr(dc, rd + 1, cpu_val); tcg_gen_shri_i64(t64, t64, 32); tcg_gen_trunc_i64_tl(cpu_val, t64); - tcg_temp_free_i64(t64); tcg_gen_ext32u_tl(cpu_val, cpu_val); } break; case 0x9: /* ldsb, load signed byte */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB); break; case 0xa: /* ldsh, load signed halfword */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, + dc->mem_idx, MO_TESW | MO_ALIGN); break; case 0xd: /* ldstub */ gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx); @@ -5456,11 +5271,13 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) #ifdef TARGET_SPARC64 case 0x08: /* V9 ldsw */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, + dc->mem_idx, MO_TESL | MO_ALIGN); break; case 0x0b: /* V9 ldx */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, + dc->mem_idx, MO_TEUQ | MO_ALIGN); break; case 0x18: /* V9 ldswa */ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL); @@ -5511,7 +5328,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_address_mask(dc, cpu_addr); cpu_dst_32 = gen_dest_fpr_F(dc); tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, - dc->mem_idx, MO_TEUL); + dc->mem_idx, MO_TEUL | MO_ALIGN); gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x21: /* ldfsr, V9 ldxfsr */ @@ -5520,15 +5337,14 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) if (rd == 1) { TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(t64, cpu_addr, - dc->mem_idx, MO_TEUQ); + dc->mem_idx, MO_TEUQ | MO_ALIGN); gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64); - tcg_temp_free_i64(t64); break; } #endif - cpu_dst_32 = get_temp_i32(dc); + cpu_dst_32 = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, - dc->mem_idx, MO_TEUL); + dc->mem_idx, MO_TEUL | MO_ALIGN); gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32); break; case 0x22: /* ldqf, load quad fpreg */ @@ -5542,8 +5358,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4); gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64); - tcg_temp_free_i64(cpu_src1_64); - tcg_temp_free_i64(cpu_src2_64); break; case 0x23: /* lddf, load double fpreg */ gen_address_mask(dc, cpu_addr); @@ -5562,15 +5376,17 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) switch (xop) { case 0x4: /* st, store word */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st_tl(cpu_val, cpu_addr, + dc->mem_idx, MO_TEUL | MO_ALIGN); break; case 0x5: /* stb, store byte */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB); break; case 0x6: /* sth, store halfword */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st_tl(cpu_val, cpu_addr, + dc->mem_idx, MO_TEUW | MO_ALIGN); break; case 0x7: /* std, store double word */ if (rd & 1) @@ -5583,8 +5399,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) lo = gen_load_gpr(dc, rd + 1); t64 = tcg_temp_new_i64(); tcg_gen_concat_tl_i64(t64, lo, cpu_val); - tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx); - tcg_temp_free_i64(t64); + tcg_gen_qemu_st_i64(t64, cpu_addr, + dc->mem_idx, MO_TEUQ | MO_ALIGN); } break; #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) @@ -5607,7 +5423,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) #ifdef TARGET_SPARC64 case 0x0e: /* V9 stx */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st_tl(cpu_val, cpu_addr, + dc->mem_idx, MO_TEUQ | MO_ALIGN); break; case 0x1e: /* V9 stxa */ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); @@ -5625,18 +5442,20 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_address_mask(dc, cpu_addr); cpu_src1_32 = gen_load_fpr_F(dc, rd); tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr, - dc->mem_idx, MO_TEUL); + dc->mem_idx, MO_TEUL | MO_ALIGN); break; case 0x25: /* stfsr, V9 stxfsr */ { #ifdef TARGET_SPARC64 gen_address_mask(dc, cpu_addr); if (rd == 1) { - tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, + dc->mem_idx, MO_TEUQ | MO_ALIGN); break; } #endif - tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, + dc->mem_idx, MO_TEUL | MO_ALIGN); } break; case 0x26: @@ -5747,46 +5566,31 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) dc->npc = dc->npc + 4; } jmp_insn: - goto egress; + return; illegal_insn: gen_exception(dc, TT_ILL_INSN); - goto egress; + return; unimp_flush: gen_exception(dc, TT_UNIMP_FLUSH); - goto egress; + return; #if !defined(CONFIG_USER_ONLY) priv_insn: gen_exception(dc, TT_PRIV_INSN); - goto egress; + return; #endif nfpu_insn: gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); - goto egress; + return; #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) nfq_insn: gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); - goto egress; + return; #endif #ifndef TARGET_SPARC64 ncp_insn: gen_exception(dc, TT_NCP_INSN); - goto egress; + return; #endif - egress: - if (dc->n_t32 != 0) { - int i; - for (i = dc->n_t32 - 1; i >= 0; --i) { - tcg_temp_free_i32(dc->t32[i]); - } - dc->n_t32 = 0; - } - if (dc->n_ttl != 0) { - int i; - for (i = dc->n_ttl - 1; i >= 0; --i) { - tcg_temp_free(dc->ttl[i]); - } - dc->n_ttl = 0; - } } static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) @@ -5904,7 +5708,7 @@ static const TranslatorOps sparc_tr_ops = { .disas_log = sparc_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc = {}; diff --git a/target/tricore/cpu-param.h b/target/tricore/cpu-param.h index 2727913047..e29d551dd6 100644 --- a/target/tricore/cpu-param.h +++ b/target/tricore/cpu-param.h @@ -12,6 +12,5 @@ #define TARGET_PAGE_BITS 14 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define NB_MMU_MODES 3 #endif diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index 594cd1efd5..d0a9272961 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -55,7 +55,8 @@ static void tricore_cpu_synchronize_from_tb(CPUState *cs, TriCoreCPU *cpu = TRICORE_CPU(cs); CPUTriCoreState *env = &cpu->env; - env->PC = tb_pc(tb); + tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); + env->PC = tb->pc; } static void tricore_restore_state_to_opc(CPUState *cs, diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h index 3b9c533a7c..47d0ffb745 100644 --- a/target/tricore/cpu.h +++ b/target/tricore/cpu.h @@ -25,10 +25,6 @@ #include "qemu/cpu-float.h" #include "tricore-defs.h" -struct tricore_boot_info; - -typedef struct tricore_def_t tricore_def_t; - typedef struct CPUArchState { /* GPR Register */ uint32_t gpr_a[16]; @@ -179,16 +175,9 @@ typedef struct CPUArchState { uint32_t M3CNT; /* Floating Point Registers */ float_status fp_status; - /* QEMU */ - int error_code; - uint32_t hflags; /* CPU State */ /* Internal CPU feature flags. */ uint64_t features; - - const tricore_def_t *cpu_model; - void *irq[8]; - struct QEMUTimer *timer; /* Internal timer */ } CPUTriCoreState; /** diff --git a/target/tricore/gdbstub.c b/target/tricore/gdbstub.c index 3a27a7e65d..e8f8e5e6ea 100644 --- a/target/tricore/gdbstub.c +++ b/target/tricore/gdbstub.c @@ -18,7 +18,7 @@ */ #include "qemu/osdep.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #define LCX_REGNUM 32 diff --git a/target/tricore/translate.c b/target/tricore/translate.c index 7ac34efd76..2646cb3eb5 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -124,9 +124,8 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) /* Makros for generating helpers */ #define gen_helper_1arg(name, arg) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(arg); \ + TCGv_i32 helper_tmp = tcg_constant_i32(arg); \ gen_helper_##name(cpu_env, helper_tmp); \ - tcg_temp_free_i32(helper_tmp); \ } while (0) #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \ @@ -137,9 +136,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) tcg_gen_ext16s_tl(arg01, arg0); \ tcg_gen_ext16s_tl(arg11, arg1); \ gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \ - tcg_temp_free(arg00); \ - tcg_temp_free(arg01); \ - tcg_temp_free(arg11); \ } while (0) #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \ @@ -152,10 +148,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) tcg_gen_sari_tl(arg11, arg1, 16); \ tcg_gen_ext16s_tl(arg10, arg1); \ gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \ - tcg_temp_free(arg00); \ - tcg_temp_free(arg01); \ - tcg_temp_free(arg10); \ - tcg_temp_free(arg11); \ } while (0) #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \ @@ -168,10 +160,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) tcg_gen_sari_tl(arg10, arg1, 16); \ tcg_gen_ext16s_tl(arg11, arg1); \ gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \ - tcg_temp_free(arg00); \ - tcg_temp_free(arg01); \ - tcg_temp_free(arg10); \ - tcg_temp_free(arg11); \ } while (0) #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \ @@ -182,9 +170,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) tcg_gen_ext16s_tl(arg00, arg0); \ tcg_gen_sari_tl(arg11, arg1, 16); \ gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \ - tcg_temp_free(arg00); \ - tcg_temp_free(arg01); \ - tcg_temp_free(arg11); \ } while (0) #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \ @@ -194,9 +179,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) tcg_gen_concat_i32_i64(arg1, al1, ah1); \ gen_helper_##name(ret, arg1, arg2); \ tcg_gen_extr_i64_i32(rl, rh, ret); \ - \ - tcg_temp_free_i64(ret); \ - tcg_temp_free_i64(arg1); \ } while (0) #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \ @@ -204,8 +186,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) \ gen_helper_##name(ret, cpu_env, arg1, arg2); \ tcg_gen_extr_i64_i32(rl, rh, ret); \ - \ - tcg_temp_free_i64(ret); \ } while (0) #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF)) @@ -229,7 +209,6 @@ static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, r2, con); tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop); - tcg_temp_free(temp); } static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, @@ -238,7 +217,6 @@ static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, r2, con); tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop); - tcg_temp_free(temp); } static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) @@ -247,8 +225,6 @@ static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) tcg_gen_concat_i32_i64(temp, rl, rh); tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEUQ); - - tcg_temp_free_i64(temp); } static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, @@ -257,7 +233,6 @@ static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, base, con); gen_st_2regs_64(rh, rl, temp, ctx); - tcg_temp_free(temp); } static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) @@ -267,8 +242,6 @@ static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEUQ); /* write back to two 32 bit regs */ tcg_gen_extr_i64_i32(rl, rh, temp); - - tcg_temp_free_i64(temp); } static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, @@ -277,7 +250,6 @@ static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, base, con); gen_ld_2regs_64(rh, rl, temp, ctx); - tcg_temp_free(temp); } static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, @@ -287,7 +259,6 @@ static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, tcg_gen_addi_tl(temp, r2, off); tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop); tcg_gen_mov_tl(r2, temp); - tcg_temp_free(temp); } static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, @@ -297,7 +268,6 @@ static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, tcg_gen_addi_tl(temp, r2, off); tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop); tcg_gen_mov_tl(r2, temp); - tcg_temp_free(temp); } /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */ @@ -317,9 +287,6 @@ static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea) tcg_gen_or_tl(temp, temp, temp2); /* M(EA, word) = temp; */ tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } /* tmp = M(EA, word); @@ -332,8 +299,6 @@ static void gen_swap(DisasContext *ctx, int reg, TCGv ea) tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL); tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL); tcg_gen_mov_tl(cpu_gpr_d[reg], temp); - - tcg_temp_free(temp); } static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea) @@ -345,9 +310,6 @@ static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea) cpu_gpr_d[reg], temp); tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL); tcg_gen_mov_tl(cpu_gpr_d[reg], temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea) @@ -362,10 +324,6 @@ static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea) tcg_gen_or_tl(temp2, temp2, temp3); tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL); tcg_gen_mov_tl(cpu_gpr_d[reg], temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); } @@ -447,9 +405,6 @@ static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(result); - tcg_temp_free(t0); } static inline void @@ -476,11 +431,6 @@ gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_i64(ret, result); - - tcg_temp_free(temp); - tcg_temp_free_i64(result); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static inline void @@ -527,11 +477,6 @@ gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp); /* calc SAV bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free(temp4); } /* ret = r2 + (r1 * r3); */ @@ -564,17 +509,12 @@ static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_madd32_d(ret, r1, r2, temp); - tcg_temp_free(temp); } static inline void @@ -603,11 +543,6 @@ gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, /* write back the result */ tcg_gen_mov_tl(ret_low, t3); tcg_gen_mov_tl(ret_high, t4); - - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(t3); - tcg_temp_free(t4); } static inline void @@ -638,108 +573,98 @@ gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_add_tl, tcg_gen_add_tl); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_sub_tl, tcg_gen_add_tl); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); TCGv_i64 temp64_3 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high); @@ -751,11 +676,6 @@ gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, gen_add64_d(temp64_2, temp64_3, temp64); /* write back result */ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); - tcg_temp_free_i64(temp64_3); } static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2); @@ -764,23 +684,24 @@ static inline void gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv temp3 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_extr_i64_i32(temp, temp2, temp64); @@ -792,12 +713,6 @@ gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(temp64); - } static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2); @@ -806,23 +721,24 @@ static inline void gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv temp3 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_extr_i64_i32(temp, temp2, temp64); @@ -834,34 +750,28 @@ gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(temp64); - } static inline void gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */ @@ -872,10 +782,6 @@ gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); } @@ -883,89 +789,77 @@ static inline void gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); TCGv_i64 temp64_3 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n); break; } tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); gen_add64_d(temp64_3, temp64_2, temp64); /* write back result */ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); - tcg_temp_free_i64(temp64_3); } static inline void gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n); break; } tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); } static inline void gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); } static inline void @@ -977,38 +871,32 @@ gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_helper_addsur_h(ret, cpu_env, temp64, temp, temp2); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } @@ -1016,26 +904,23 @@ static inline void gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); } static inline void @@ -1047,54 +932,46 @@ gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_helper_addsur_h_ssov(ret, cpu_env, temp64, temp, temp2); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { - TCGv temp = tcg_const_i32(n); - gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, temp); - tcg_temp_free(temp); + TCGv t_n = tcg_constant_i32(n); + gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, t_n); } static inline void gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { - TCGv temp = tcg_const_i32(n); - gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, temp); - tcg_temp_free(temp); + TCGv t_n = tcg_constant_i32(n); + gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, t_n); } static inline void @@ -1145,13 +1022,6 @@ gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void @@ -1169,9 +1039,6 @@ gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) tcg_gen_sub_tl(temp, temp, temp2); } gen_add_d(ret, arg1, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -1189,9 +1056,6 @@ gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) tcg_gen_sub_tl(temp, temp, temp2); } gen_adds(ret, arg1, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -1219,12 +1083,6 @@ gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, gen_add64_d(t3, t1, t2); /* write back result */ tcg_gen_extr_i64_i32(rl, rh, t3); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -1251,11 +1109,6 @@ gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, gen_helper_add64_ssov(t1, cpu_env, t1, t2); tcg_gen_extr_i64_i32(rl, rh, t1); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } static inline void @@ -1294,9 +1147,6 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_shli_tl(temp, temp, 31); /* negate v bit, if special condition */ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } /* write back result */ tcg_gen_extr_i64_i32(rl, rh, t4); @@ -1307,11 +1157,6 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free_i64(t4); } static inline void @@ -1330,10 +1175,6 @@ gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_sari_i64(t2, t2, up_shift - n); gen_helper_madd32_q_add_ssov(ret, cpu_env, t1, t2); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void @@ -1341,15 +1182,13 @@ gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, TCGv arg3, uint32_t n) { TCGv_i64 r1 = tcg_temp_new_i64(); - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high); - gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp); + gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, t_n); tcg_gen_extr_i64_i32(rl, rh, r1); - - tcg_temp_free_i64(r1); - tcg_temp_free(temp); } + /* ret = r2 - (r1 * r3); */ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) { @@ -1381,17 +1220,12 @@ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_msub32_d(ret, r1, r2, temp); - tcg_temp_free(temp); } static inline void @@ -1420,20 +1254,14 @@ gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, /* write back the result */ tcg_gen_mov_tl(ret_low, t3); tcg_gen_mov_tl(ret_high, t4); - - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(t3); - tcg_temp_free(t4); } static inline void gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void @@ -1462,27 +1290,22 @@ gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2) { - TCGv temp = tcg_const_i32(r2); + TCGv temp = tcg_constant_i32(r2); gen_add_d(ret, r1, temp); - tcg_temp_free(temp); } + /* calculate the carry bit too */ static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2) { @@ -1505,16 +1328,12 @@ static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(result); - tcg_temp_free(t0); } static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_add_CC(ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2) @@ -1541,17 +1360,12 @@ static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(result); - tcg_temp_free(t0); - tcg_temp_free(carry); } static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_addc_CC(ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, @@ -1561,7 +1375,7 @@ static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, TCGv temp2 = tcg_temp_new(); TCGv result = tcg_temp_new(); TCGv mask = tcg_temp_new(); - TCGv t0 = tcg_const_i32(0); + TCGv t0 = tcg_constant_i32(0); /* create mask for sticky bits */ tcg_gen_setcond_tl(cond, mask, r4, t0); @@ -1585,20 +1399,13 @@ static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV); /* write back result */ tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1); - - tcg_temp_free(t0); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(result); - tcg_temp_free(mask); } static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2, TCGv r3, TCGv r4) { - TCGv temp = tcg_const_i32(r2); + TCGv temp = tcg_constant_i32(r2); gen_cond_add(cond, r1, temp, r3, r4); - tcg_temp_free(temp); } static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2) @@ -1620,9 +1427,6 @@ static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(temp); - tcg_temp_free(result); } static inline void @@ -1649,11 +1453,6 @@ gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_i64(ret, result); - - tcg_temp_free(temp); - tcg_temp_free_i64(result); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2) @@ -1677,9 +1476,6 @@ static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(result); - tcg_temp_free(temp); } static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2) @@ -1687,7 +1483,6 @@ static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2) TCGv temp = tcg_temp_new(); tcg_gen_not_tl(temp, r2); gen_addc_CC(ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, @@ -1697,7 +1492,7 @@ static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, TCGv temp2 = tcg_temp_new(); TCGv result = tcg_temp_new(); TCGv mask = tcg_temp_new(); - TCGv t0 = tcg_const_i32(0); + TCGv t0 = tcg_constant_i32(0); /* create mask for sticky bits */ tcg_gen_setcond_tl(cond, mask, r4, t0); @@ -1721,64 +1516,57 @@ static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV); /* write back result */ tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1); - - tcg_temp_free(t0); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(result); - tcg_temp_free(mask); } static inline void gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_sub_tl, tcg_gen_sub_tl); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv temp3 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_extr_i64_i32(temp, temp2, temp64); @@ -1790,100 +1578,83 @@ gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(temp64); } static inline void gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); TCGv_i64 temp64_3 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n); break; } tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); gen_sub64_d(temp64_3, temp64_2, temp64); /* write back result */ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); - tcg_temp_free_i64(temp64_3); } static inline void gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n); break; } tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); } static inline void gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); } static inline void @@ -1895,35 +1666,29 @@ gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); } static inline void @@ -1935,33 +1700,26 @@ gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { - TCGv temp = tcg_const_i32(n); + TCGv temp = tcg_constant_i32(n); gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp); - tcg_temp_free(temp); } static inline void gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { - TCGv temp = tcg_const_i32(n); + TCGv temp = tcg_constant_i32(n); gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp); - tcg_temp_free(temp); } static inline void gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, uint32_t up_shift) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); TCGv temp3 = tcg_temp_new(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); @@ -1997,14 +1755,6 @@ gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free_i64(t4); } static inline void @@ -2022,9 +1772,6 @@ gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) tcg_gen_sub_tl(temp, temp, temp2); } gen_sub_d(ret, arg1, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -2042,9 +1789,6 @@ gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) tcg_gen_sub_tl(temp, temp, temp2); } gen_subs(ret, arg1, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -2072,12 +1816,6 @@ gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, gen_sub64_d(t3, t1, t2); /* write back result */ tcg_gen_extr_i64_i32(rl, rh, t3); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -2104,11 +1842,6 @@ gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, gen_helper_sub64_ssov(t1, cpu_env, t1, t2); tcg_gen_extr_i64_i32(rl, rh, t1); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } static inline void @@ -2147,9 +1880,6 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_shli_tl(temp, temp, 31); /* negate v bit, if special condition */ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } /* write back result */ tcg_gen_extr_i64_i32(rl, rh, t4); @@ -2160,11 +1890,6 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free_i64(t4); } static inline void @@ -2188,11 +1913,6 @@ gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_add_i64(t3, t3, t4); gen_helper_msub32_q_sub_ssov(ret, cpu_env, t1, t3); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free_i64(t4); } static inline void @@ -2200,65 +1920,60 @@ gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, TCGv arg3, uint32_t n) { TCGv_i64 r1 = tcg_temp_new_i64(); - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high); - gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp); + gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, t_n); tcg_gen_extr_i64_i32(rl, rh, r1); - - tcg_temp_free_i64(r1); - tcg_temp_free(temp); } static inline void gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_add_tl, tcg_gen_sub_tl); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); TCGv_i64 temp64_3 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high); @@ -2270,63 +1985,56 @@ gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, gen_sub64_d(temp64_2, temp64_3, temp64); /* write back result */ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); - tcg_temp_free_i64(temp64_3); } static inline void gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_helper_subadr_h(ret, cpu_env, temp64, temp, temp2); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv temp3 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_extr_i64_i32(temp, temp2, temp64); @@ -2338,33 +2046,28 @@ gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(temp64); } static inline void gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */ @@ -2375,39 +2078,32 @@ gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); } static inline void gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) { - TCGv temp = tcg_const_i32(n); + TCGv t_n = tcg_constant_i32(n); + TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); break; case MODE_LU: - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); break; case MODE_UL: - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); break; case MODE_UU: - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_helper_subadr_h_ssov(ret, cpu_env, temp64, temp, temp2); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void gen_abs(TCGv ret, TCGv r1) @@ -2449,23 +2145,18 @@ static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(temp); - tcg_temp_free(result); } static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_absdif(ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_helper_absdif_ssov(ret, cpu_env, r1, temp); - tcg_temp_free(temp); } static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2) @@ -2486,16 +2177,12 @@ static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2) tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc SAV bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free(high); - tcg_temp_free(low); } static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_mul_i32s(ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) @@ -2515,9 +2202,8 @@ static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_mul_i64s(ret_low, ret_high, r1, temp); - tcg_temp_free(temp); } static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) @@ -2537,43 +2223,38 @@ static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_mul_i64u(ret_low, ret_high, r1, temp); - tcg_temp_free(temp); } static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_helper_mul_ssov(ret, cpu_env, r1, temp); - tcg_temp_free(temp); } static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_helper_mul_suov(ret, cpu_env, r1, temp); - tcg_temp_free(temp); } + /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */ static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp); - tcg_temp_free(temp); } static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp); - tcg_temp_free(temp); } static void gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift) { - TCGv temp = tcg_temp_new(); TCGv_i64 temp_64 = tcg_temp_new_i64(); TCGv_i64 temp2_64 = tcg_temp_new_i64(); @@ -2626,9 +2307,6 @@ gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift) } /* calc sav overflow bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - tcg_temp_free(temp); - tcg_temp_free_i64(temp_64); - tcg_temp_free_i64(temp2_64); } static void @@ -2651,8 +2329,6 @@ gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc sav overflow bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free(temp); } static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) @@ -2679,8 +2355,6 @@ static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* cut halfword off */ tcg_gen_andi_tl(ret, ret, 0xffff0000); - - tcg_temp_free(temp); } static inline void @@ -2691,16 +2365,14 @@ gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - tcg_temp_free_i64(temp64); } static inline void gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void @@ -2711,30 +2383,26 @@ gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - tcg_temp_free_i64(temp64); } static inline void gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp); - tcg_temp_free(temp); } static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp); - tcg_temp_free(temp); } static inline void @@ -2745,16 +2413,14 @@ gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - tcg_temp_free_i64(temp64); } static inline void gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void @@ -2765,39 +2431,25 @@ gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - tcg_temp_free_i64(temp64); } static inline void gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low) { - TCGv sat_neg = tcg_const_i32(low); - TCGv temp = tcg_const_i32(up); - - /* sat_neg = (arg < low ) ? low : arg; */ - tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg); - - /* ret = (sat_neg > up ) ? up : sat_neg; */ - tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg); - - tcg_temp_free(sat_neg); - tcg_temp_free(temp); + tcg_gen_smax_tl(ret, arg, tcg_constant_i32(low)); + tcg_gen_smin_tl(ret, ret, tcg_constant_i32(up)); } static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up) { - TCGv temp = tcg_const_i32(up); - /* sat_neg = (arg > up ) ? up : arg; */ - tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg); - tcg_temp_free(temp); + tcg_gen_umin_tl(ret, arg, tcg_constant_i32(up)); } static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count) @@ -2826,9 +2478,6 @@ static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount) gen_shi(temp_low, temp_low, shiftcount); gen_shi(ret, temp_high, shiftcount); tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16); - - tcg_temp_free(temp_low); - tcg_temp_free(temp_high); } } @@ -2837,7 +2486,6 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) uint32_t msk, msk_start; TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); - TCGv t_0 = tcg_const_i32(0); if (shift_count == 0) { /* Clear PSW.C and PSW.V */ @@ -2852,8 +2500,8 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) /* clear PSW.V */ tcg_gen_movi_tl(cpu_PSW_V, 0); } else if (shift_count > 0) { - TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count); - TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count); + TCGv t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count); + TCGv t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count); /* calc carry */ msk_start = 32 - shift_count; @@ -2868,9 +2516,6 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV); /* do shift */ tcg_gen_shli_tl(ret, r1, shift_count); - - tcg_temp_free(t_max); - tcg_temp_free(t_min); } else { /* clear PSW.V */ tcg_gen_movi_tl(cpu_PSW_V, 0); @@ -2885,10 +2530,6 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc sav overflow bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(t_0); } static void gen_shas(TCGv ret, TCGv r1, TCGv r2) @@ -2898,9 +2539,8 @@ static void gen_shas(TCGv ret, TCGv r1, TCGv r2) static void gen_shasi(TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_shas(ret, r1, temp); - tcg_temp_free(temp); } static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count) @@ -2917,9 +2557,6 @@ static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count) tcg_gen_shli_tl(low, r1, shift_count); tcg_gen_shli_tl(ret, high, shift_count); tcg_gen_deposit_tl(ret, ret, low, 0, 16); - - tcg_temp_free(low); - tcg_temp_free(high); } else { low = tcg_temp_new(); high = tcg_temp_new(); @@ -2928,11 +2565,7 @@ static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count) tcg_gen_sari_tl(low, low, -shift_count); tcg_gen_sari_tl(ret, r1, -shift_count); tcg_gen_deposit_tl(ret, ret, low, 0, 16); - - tcg_temp_free(low); - tcg_temp_free(high); } - } /* ret = {ret[30:0], (r1 cond r2)}; */ @@ -2944,16 +2577,12 @@ static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2) tcg_gen_shli_tl(temp, ret, 1); tcg_gen_setcond_tl(cond, temp2, r1, r2); tcg_gen_or_tl(ret, temp, temp2); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_sh_cond(cond, ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2) @@ -2963,16 +2592,14 @@ static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2) static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_helper_add_ssov(ret, cpu_env, r1, temp); - tcg_temp_free(temp); } static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_helper_add_suov(ret, cpu_env, r1, temp); - tcg_temp_free(temp); } static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2) @@ -3002,9 +2629,6 @@ static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2, (*op2)(temp1 , ret, temp1); tcg_gen_deposit_tl(ret, ret, temp1, 0, 1); - - tcg_temp_free(temp1); - tcg_temp_free(temp2); } /* ret = r1[pos1] op1 r2[pos2]; */ @@ -3023,9 +2647,6 @@ static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2, (*op1)(ret, temp1, temp2); tcg_gen_andi_tl(ret, ret, 0x1); - - tcg_temp_free(temp1); - tcg_temp_free(temp2); } static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2, @@ -3041,18 +2662,14 @@ static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2, (*op)(temp, temp, temp2); /* ret = {ret[31:1], temp} */ tcg_gen_deposit_tl(ret, ret, temp, 0, 1); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con, void(*op)(TCGv, TCGv, TCGv)) { - TCGv temp = tcg_const_i32(con); + TCGv temp = tcg_constant_i32(con); gen_accumulating_cond(cond, ret, r1, temp, op); - tcg_temp_free(temp); } /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/ @@ -3089,11 +2706,6 @@ static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con) tcg_gen_or_tl(ret, b0, b1); tcg_gen_or_tl(ret, ret, b2); tcg_gen_or_tl(ret, ret, b3); - - tcg_temp_free(b0); - tcg_temp_free(b1); - tcg_temp_free(b2); - tcg_temp_free(b3); } static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con) @@ -3111,10 +2723,8 @@ static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con) /* combine them */ tcg_gen_or_tl(ret, h0, h1); - - tcg_temp_free(h0); - tcg_temp_free(h1); } + /* mask = ((1 << width) -1) << pos; ret = (r1 & ~mask) | (r2 << pos) & mask); */ static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos) @@ -3132,10 +2742,6 @@ static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos) tcg_gen_and_tl(temp, temp, mask); tcg_gen_andc_tl(temp2, r1, mask); tcg_gen_or_tl(ret, temp, temp2); - - tcg_temp_free(mask); - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1) @@ -3144,8 +2750,6 @@ static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1) gen_helper_bsplit(temp, r1); tcg_gen_extr_i64_i32(rl, rh, temp); - - tcg_temp_free_i64(temp); } static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1) @@ -3154,8 +2758,6 @@ static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1) gen_helper_unpack(temp, r1); tcg_gen_extr_i64_i32(rl, rh, temp); - - tcg_temp_free_i64(temp); } static inline void @@ -3169,8 +2771,6 @@ gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) gen_helper_dvinit_b_131(ret, cpu_env, r1, r2); } tcg_gen_extr_i64_i32(rl, rh, ret); - - tcg_temp_free_i64(ret); } static inline void @@ -3184,8 +2784,6 @@ gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) gen_helper_dvinit_h_131(ret, cpu_env, r1, r2); } tcg_gen_extr_i64_i32(rl, rh, ret); - - tcg_temp_free_i64(ret); } static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high) @@ -3200,7 +2798,6 @@ static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high) /* calc SAV bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); tcg_gen_movi_tl(cpu_PSW_V, 0); - tcg_temp_free(temp); } static void gen_calc_usb_mulr_h(TCGv arg) @@ -3215,7 +2812,6 @@ static void gen_calc_usb_mulr_h(TCGv arg) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* clear V bit */ tcg_gen_movi_tl(cpu_PSW_V, 0); - tcg_temp_free(temp); } /* helpers for generating program flow micro-ops */ @@ -3239,15 +2835,12 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) static void generate_trap(DisasContext *ctx, int class, int tin) { - TCGv_i32 classtemp = tcg_const_i32(class); - TCGv_i32 tintemp = tcg_const_i32(tin); + TCGv_i32 classtemp = tcg_constant_i32(class); + TCGv_i32 tintemp = tcg_constant_i32(tin); gen_save_pc(ctx->base.pc_next); gen_helper_raise_exception_sync(cpu_env, classtemp, tintemp); ctx->base.is_jmp = DISAS_NORETURN; - - tcg_temp_free(classtemp); - tcg_temp_free(tintemp); } static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1, @@ -3265,9 +2858,8 @@ static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1, static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1, int r2, int16_t address) { - TCGv temp = tcg_const_i32(r2); + TCGv temp = tcg_constant_i32(r2); gen_branch_cond(ctx, cond, r1, temp, address); - tcg_temp_free(temp); } static void gen_loop(DisasContext *ctx, int r1, int32_t offset) @@ -3289,8 +2881,6 @@ static void gen_fcall_save_ctx(DisasContext *ctx) tcg_gen_qemu_st_tl(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL); tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn); tcg_gen_mov_tl(cpu_gpr_a[10], temp); - - tcg_temp_free(temp); } static void gen_fret(DisasContext *ctx) @@ -3303,8 +2893,6 @@ static void gen_fret(DisasContext *ctx) tcg_gen_mov_tl(cpu_PC, temp); tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; - - tcg_temp_free(temp); } static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, @@ -3350,13 +2938,11 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, temp = tcg_temp_new(); tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); - tcg_temp_free(temp); break; case OPC1_16_SBRN_JNZ_T: temp = tcg_temp_new(); tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); - tcg_temp_free(temp); break; /* SBR-format jumps */ case OPC1_16_SBR_JEQ: @@ -3474,7 +3060,6 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); } - tcg_temp_free(temp); break; /* BRN format */ case OPCM_32_BRN_JTT: @@ -3488,7 +3073,6 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, } else { gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); } - tcg_temp_free(temp); break; /* BRR Format */ case OPCM_32_BRR_EQ_NEQ: @@ -3553,8 +3137,6 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); } - tcg_temp_free(temp); - tcg_temp_free(temp2); break; case OPCM_32_BRR_JNZ: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) { @@ -3605,20 +3187,16 @@ static void decode_src_opc(DisasContext *ctx, int op1) cpu_gpr_d[15]); break; case OPC1_16_SRC_CMOV: - temp = tcg_const_tl(0); - temp2 = tcg_const_tl(const4); + temp = tcg_constant_tl(0); + temp2 = tcg_constant_tl(const4); tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, temp2, cpu_gpr_d[r1]); - tcg_temp_free(temp); - tcg_temp_free(temp2); break; case OPC1_16_SRC_CMOVN: - temp = tcg_const_tl(0); - temp2 = tcg_const_tl(const4); + temp = tcg_constant_tl(0); + temp2 = tcg_constant_tl(const4); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, temp2, cpu_gpr_d[r1]); - tcg_temp_free(temp); - tcg_temp_free(temp2); break; case OPC1_16_SRC_EQ: tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], @@ -3682,16 +3260,14 @@ static void decode_srr_opc(DisasContext *ctx, int op1) tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_CMOV: - temp = tcg_const_tl(0); + temp = tcg_constant_tl(0); tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, cpu_gpr_d[r2], cpu_gpr_d[r1]); - tcg_temp_free(temp); break; case OPC1_16_SRR_CMOVN: - temp = tcg_const_tl(0); + temp = tcg_constant_tl(0); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, cpu_gpr_d[r2], cpu_gpr_d[r1]); - tcg_temp_free(temp); break; case OPC1_16_SRR_EQ: tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], @@ -3931,17 +3507,14 @@ static void decode_sr_accu(DisasContext *ctx) { uint32_t op2; uint32_t r1; - TCGv temp; r1 = MASK_OP_SR_S1D(ctx->opcode); op2 = MASK_OP_SR_OP2(ctx->opcode); switch (op2) { case OPC2_16_SR_RSUB: - /* overflow only if r1 = -0x80000000 */ - temp = tcg_const_i32(-0x80000000); - /* calc V bit */ - tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp); + /* calc V bit -- overflow only if r1 = -0x80000000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], -0x80000000); tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* calc SV bit */ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); @@ -3952,7 +3525,6 @@ static void decode_sr_accu(DisasContext *ctx) tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV); /* calc sav */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - tcg_temp_free(temp); break; case OPC2_16_SR_SAT_B: gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80); @@ -4047,7 +3619,6 @@ static void decode_16Bit_opc(DisasContext *ctx) temp = tcg_temp_new(); tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16); tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; /* SLRO-format */ case OPC1_16_SLRO_LD_A: @@ -4219,7 +3790,7 @@ static void decode_abs_ldw(DisasContext *ctx) address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); - temp = tcg_const_i32(EA_ABS_FORMAT(address)); + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_LD_A: @@ -4239,8 +3810,6 @@ static void decode_abs_ldw(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - - tcg_temp_free(temp); } static void decode_abs_ldb(DisasContext *ctx) @@ -4254,7 +3823,7 @@ static void decode_abs_ldb(DisasContext *ctx) address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); - temp = tcg_const_i32(EA_ABS_FORMAT(address)); + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_LD_B: @@ -4272,8 +3841,6 @@ static void decode_abs_ldb(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - - tcg_temp_free(temp); } static void decode_abs_ldst_swap(DisasContext *ctx) @@ -4287,7 +3854,7 @@ static void decode_abs_ldst_swap(DisasContext *ctx) address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); - temp = tcg_const_i32(EA_ABS_FORMAT(address)); + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_LDMST: @@ -4299,8 +3866,6 @@ static void decode_abs_ldst_swap(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - - tcg_temp_free(temp); } static void decode_abs_ldst_context(DisasContext *ctx) @@ -4340,7 +3905,7 @@ static void decode_abs_store(DisasContext *ctx) address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); - temp = tcg_const_i32(EA_ABS_FORMAT(address)); + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_ST_A: @@ -4360,7 +3925,6 @@ static void decode_abs_store(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } static void decode_abs_storeb_h(DisasContext *ctx) @@ -4374,7 +3938,7 @@ static void decode_abs_storeb_h(DisasContext *ctx) address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); - temp = tcg_const_i32(EA_ABS_FORMAT(address)); + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_ST_B: @@ -4386,7 +3950,6 @@ static void decode_abs_storeb_h(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } /* Bit-format */ @@ -4486,7 +4049,6 @@ static void decode_bit_insert(DisasContext *ctx) tcg_gen_not_tl(temp, temp); } tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1); - tcg_temp_free(temp); } static void decode_bit_logical_t2(DisasContext *ctx) @@ -4604,7 +4166,6 @@ static void decode_bit_sh_logic1(DisasContext *ctx) } tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1); tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); - tcg_temp_free(temp); } static void decode_bit_sh_logic2(DisasContext *ctx) @@ -4645,7 +4206,6 @@ static void decode_bit_sh_logic2(DisasContext *ctx) } tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1); tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); - tcg_temp_free(temp); } /* BO-format */ @@ -4743,7 +4303,6 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); tcg_gen_mov_tl(cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; case OPC2_32_BO_ST_DA_SHORTOFF: CHECK_REG_PAIR(r1); @@ -4761,7 +4320,6 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); tcg_gen_mov_tl(cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; case OPC2_32_BO_ST_H_SHORTOFF: gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); @@ -4778,7 +4336,6 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) temp = tcg_temp_new(); tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW); - tcg_temp_free(temp); break; case OPC2_32_BO_ST_Q_POSTINC: temp = tcg_temp_new(); @@ -4786,13 +4343,11 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); - tcg_temp_free(temp); break; case OPC2_32_BO_ST_Q_PREINC: temp = tcg_temp_new(); tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW); - tcg_temp_free(temp); break; case OPC2_32_BO_ST_W_SHORTOFF: gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); @@ -4815,7 +4370,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) uint32_t op2; uint32_t off10; int32_t r1, r2; - TCGv temp, temp2, temp3; + TCGv temp, temp2, t_off10; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); @@ -4824,7 +4379,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) temp = tcg_temp_new(); temp2 = tcg_temp_new(); - temp3 = tcg_const_i32(off10); + t_off10 = tcg_constant_i32(off10); CHECK_REG_PAIR(r2); tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); @@ -4838,7 +4393,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) case OPC2_32_BO_CACHEA_WI_CIRC: case OPC2_32_BO_CACHEA_W_CIRC: case OPC2_32_BO_CACHEA_I_CIRC: - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_ST_A_BR: tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); @@ -4846,7 +4401,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_ST_A_CIRC: tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_ST_B_BR: tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); @@ -4854,7 +4409,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_ST_B_CIRC: tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_ST_D_BR: CHECK_REG_PAIR(r1); @@ -4869,7 +4424,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) tcg_gen_rem_tl(temp, temp, temp2); tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_ST_DA_BR: CHECK_REG_PAIR(r1); @@ -4884,7 +4439,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) tcg_gen_rem_tl(temp, temp, temp2); tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_ST_H_BR: tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); @@ -4892,7 +4447,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_ST_H_CIRC: tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_ST_Q_BR: tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); @@ -4902,7 +4457,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) case OPC2_32_BO_ST_Q_CIRC: tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_ST_W_BR: tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); @@ -4910,14 +4465,11 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_ST_W_CIRC: tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); } static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) @@ -4982,7 +4534,6 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); tcg_gen_mov_tl(cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; case OPC2_32_BO_LD_DA_SHORTOFF: CHECK_REG_PAIR(r1); @@ -5000,7 +4551,6 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); tcg_gen_mov_tl(cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; case OPC2_32_BO_LD_H_SHORTOFF: gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW); @@ -5059,8 +4609,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) uint32_t op2; uint32_t off10; int r1, r2; - - TCGv temp, temp2, temp3; + TCGv temp, temp2, t_off10; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); @@ -5069,7 +4618,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) temp = tcg_temp_new(); temp2 = tcg_temp_new(); - temp3 = tcg_const_i32(off10); + t_off10 = tcg_constant_i32(off10); CHECK_REG_PAIR(r2); tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); @@ -5082,7 +4631,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_LD_A_CIRC: tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_LD_B_BR: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); @@ -5090,7 +4639,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_LD_B_CIRC: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_LD_BU_BR: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); @@ -5098,7 +4647,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_LD_BU_CIRC: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_LD_D_BR: CHECK_REG_PAIR(r1); @@ -5113,7 +4662,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) tcg_gen_rem_tl(temp, temp, temp2); tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_LD_DA_BR: CHECK_REG_PAIR(r1); @@ -5128,7 +4677,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) tcg_gen_rem_tl(temp, temp, temp2); tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_LD_H_BR: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); @@ -5136,7 +4685,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_LD_H_CIRC: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_LD_HU_BR: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); @@ -5144,7 +4693,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_LD_HU_CIRC: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_LD_Q_BR: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); @@ -5154,7 +4703,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) case OPC2_32_BO_LD_Q_CIRC: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_LD_W_BR: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); @@ -5162,14 +4711,11 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_LD_W_CIRC: tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); } static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) @@ -5178,7 +4724,7 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) uint32_t off10; int r1, r2; - TCGv temp, temp2; + TCGv temp; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); @@ -5187,7 +4733,6 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) temp = tcg_temp_new(); - temp2 = tcg_temp_new(); switch (op2) { case OPC2_32_BO_LDLCX_SHORTOFF: @@ -5260,8 +4805,6 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); } static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) @@ -5269,8 +4812,7 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) uint32_t op2; uint32_t off10; int r1, r2; - - TCGv temp, temp2, temp3; + TCGv temp, temp2, t_off10; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); @@ -5279,7 +4821,7 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) temp = tcg_temp_new(); temp2 = tcg_temp_new(); - temp3 = tcg_const_i32(off10); + t_off10 = tcg_constant_i32(off10); CHECK_REG_PAIR(r2); tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); @@ -5291,7 +4833,7 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_LDMST_CIRC: gen_ldmst(ctx, r1, temp2); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_SWAP_W_BR: gen_swap(ctx, r1, temp2); @@ -5299,7 +4841,7 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_SWAP_W_CIRC: gen_swap(ctx, r1, temp2); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_CMPSWAP_W_BR: gen_cmpswap(ctx, r1, temp2); @@ -5307,7 +4849,7 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_CMPSWAP_W_CIRC: gen_cmpswap(ctx, r1, temp2); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; case OPC2_32_BO_SWAPMSK_W_BR: gen_swapmsk(ctx, r1, temp2); @@ -5315,15 +4857,11 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) break; case OPC2_32_BO_SWAPMSK_W_CIRC: gen_swapmsk(ctx, r1, temp2); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); } static void decode_bol_opc(DisasContext *ctx, int32_t op1) @@ -5341,13 +4879,11 @@ static void decode_bol_opc(DisasContext *ctx, int32_t op1) temp = tcg_temp_new(); tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address); tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL); - tcg_temp_free(temp); break; case OPC1_32_BOL_LD_W_LONGOFF: temp = tcg_temp_new(); tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address); tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL); - tcg_temp_free(temp); break; case OPC1_32_BOL_LEA_LONGOFF: tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address); @@ -5474,7 +5010,6 @@ static void decode_rc_logical_shift(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } static void decode_rc_accumulator(DisasContext *ctx) @@ -5674,7 +5209,6 @@ static void decode_rc_accumulator(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } static void decode_rc_serviceroutine(DisasContext *ctx) @@ -5762,9 +5296,8 @@ static void decode_rcpw_insert(DisasContext *ctx) case OPC2_32_RCPW_INSERT: /* if pos + width > 32 undefined result */ if (pos + width <= 32) { - temp = tcg_const_i32(const4); + temp = tcg_constant_i32(const4); tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width); - tcg_temp_free(temp); } break; default: @@ -5807,14 +5340,10 @@ static void decode_rcrw_insert(DisasContext *ctx) tcg_gen_movi_tl(temp2, const4); tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f); gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], temp2, temp, temp3); - - tcg_temp_free(temp3); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); } /* RCR format */ @@ -5843,20 +5372,16 @@ static void decode_rcr_cond_select(DisasContext *ctx) cpu_gpr_d[r3]); break; case OPC2_32_RCR_SEL: - temp = tcg_const_i32(0); - temp2 = tcg_const_i32(const9); + temp = tcg_constant_i32(0); + temp2 = tcg_constant_i32(const9); tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp2); - tcg_temp_free(temp); - tcg_temp_free(temp2); break; case OPC2_32_RCR_SELN: - temp = tcg_const_i32(0); - temp2 = tcg_const_i32(const9); + temp = tcg_constant_i32(0); + temp2 = tcg_constant_i32(const9); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp2); - tcg_temp_free(temp); - tcg_temp_free(temp2); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -6236,8 +5761,6 @@ static void decode_rr_accumulator(DisasContext *ctx) tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]); tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp); - - tcg_temp_free(temp); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } @@ -6377,13 +5900,10 @@ static void decode_rr_logical_shift(DisasContext *ctx) { uint32_t op2; int r3, r2, r1; - TCGv temp; r3 = MASK_OP_RR_D(ctx->opcode); r2 = MASK_OP_RR_S2(ctx->opcode); r1 = MASK_OP_RR_S1(ctx->opcode); - - temp = tcg_temp_new(); op2 = MASK_OP_RR_OP2(ctx->opcode); switch (op2) { @@ -6448,7 +5968,6 @@ static void decode_rr_logical_shift(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } static void decode_rr_address(DisasContext *ctx) @@ -6471,14 +5990,12 @@ static void decode_rr_address(DisasContext *ctx) temp = tcg_temp_new(); tcg_gen_shli_tl(temp, cpu_gpr_d[r1], n); tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; case OPC2_32_RR_ADDSC_AT: temp = tcg_temp_new(); tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 3); tcg_gen_add_tl(temp, cpu_gpr_a[r2], temp); tcg_gen_andi_tl(cpu_gpr_a[r3], temp, 0xFFFFFFFC); - tcg_temp_free(temp); break; case OPC2_32_RR_EQ_A: tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], @@ -6598,10 +6115,6 @@ static void decode_rr_divide(DisasContext *ctx) /* write result */ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 24); tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); break; case OPC2_32_RR_DVINIT_H: CHECK_REG_PAIR(r3); @@ -6631,9 +6144,6 @@ static void decode_rr_divide(DisasContext *ctx) /* write result */ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 16); tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); break; case OPC2_32_RR_DVINIT: temp = tcg_temp_new(); @@ -6655,8 +6165,6 @@ static void decode_rr_divide(DisasContext *ctx) tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); /* sign extend to high reg */ tcg_gen_sari_tl(cpu_gpr_d[r3+1], cpu_gpr_d[r1], 31); - tcg_temp_free(temp); - tcg_temp_free(temp2); break; case OPC2_32_RR_DVINIT_U: /* overflow = (D[b] == 0) */ @@ -6748,7 +6256,7 @@ static void decode_rr1_mul(DisasContext *ctx) r1 = MASK_OP_RR1_S1(ctx->opcode); r2 = MASK_OP_RR1_S2(ctx->opcode); r3 = MASK_OP_RR1_D(ctx->opcode); - n = tcg_const_i32(MASK_OP_RR1_N(ctx->opcode)); + n = tcg_constant_i32(MASK_OP_RR1_N(ctx->opcode)); op2 = MASK_OP_RR1_OP2(ctx->opcode); switch (op2) { @@ -6758,7 +6266,6 @@ static void decode_rr1_mul(DisasContext *ctx) GEN_HELPER_LL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MUL_H_32_LU: temp64 = tcg_temp_new_i64(); @@ -6766,7 +6273,6 @@ static void decode_rr1_mul(DisasContext *ctx) GEN_HELPER_LU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MUL_H_32_UL: temp64 = tcg_temp_new_i64(); @@ -6774,7 +6280,6 @@ static void decode_rr1_mul(DisasContext *ctx) GEN_HELPER_UL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MUL_H_32_UU: temp64 = tcg_temp_new_i64(); @@ -6782,7 +6287,6 @@ static void decode_rr1_mul(DisasContext *ctx) GEN_HELPER_UU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MULM_H_64_LL: temp64 = tcg_temp_new_i64(); @@ -6793,7 +6297,6 @@ static void decode_rr1_mul(DisasContext *ctx) tcg_gen_movi_tl(cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MULM_H_64_LU: temp64 = tcg_temp_new_i64(); @@ -6804,7 +6307,6 @@ static void decode_rr1_mul(DisasContext *ctx) tcg_gen_movi_tl(cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MULM_H_64_UL: temp64 = tcg_temp_new_i64(); @@ -6815,7 +6317,6 @@ static void decode_rr1_mul(DisasContext *ctx) tcg_gen_movi_tl(cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MULM_H_64_UU: temp64 = tcg_temp_new_i64(); @@ -6826,8 +6327,6 @@ static void decode_rr1_mul(DisasContext *ctx) tcg_gen_movi_tl(cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); - tcg_temp_free_i64(temp64); - break; case OPC2_32_RR1_MULR_H_16_LL: GEN_HELPER_LL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n); @@ -6848,7 +6347,6 @@ static void decode_rr1_mul(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(n); } static void decode_rr1_mulq(DisasContext *ctx) @@ -6918,8 +6416,6 @@ static void decode_rr1_mulq(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); } /* RR2 format */ @@ -7009,7 +6505,6 @@ static void decode_rrpw_extract_insert(DisasContext *ctx) tcg_gen_movi_tl(temp, ((1u << width) - 1) << pos); tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos); tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp); - tcg_temp_free(temp); } break; @@ -7055,16 +6550,14 @@ static void decode_rrr_cond_select(DisasContext *ctx) cpu_gpr_d[r3]); break; case OPC2_32_RRR_SEL: - temp = tcg_const_i32(0); + temp = tcg_constant_i32(0); tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], cpu_gpr_d[r2]); - tcg_temp_free(temp); break; case OPC2_32_RRR_SELN: - temp = tcg_const_i32(0); + temp = tcg_constant_i32(0); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], cpu_gpr_d[r2]); - tcg_temp_free(temp); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -7414,7 +6907,7 @@ static void decode_rrr1_maddq_h(DisasContext *ctx) r4 = MASK_OP_RRR1_D(ctx->opcode); n = MASK_OP_RRR1_N(ctx->opcode); - temp = tcg_const_i32(n); + temp = tcg_temp_new(); temp2 = tcg_temp_new(); switch (op2) { @@ -7577,8 +7070,6 @@ static void decode_rrr1_maddq_h(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); } static void decode_rrr1_maddsu_h(DisasContext *ctx) @@ -7898,7 +7389,7 @@ static void decode_rrr1_msubq_h(DisasContext *ctx) r4 = MASK_OP_RRR1_D(ctx->opcode); n = MASK_OP_RRR1_N(ctx->opcode); - temp = tcg_const_i32(n); + temp = tcg_temp_new(); temp2 = tcg_temp_new(); switch (op2) { @@ -8061,8 +7552,6 @@ static void decode_rrr1_msubq_h(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); } static void decode_rrr1_msubad_h(DisasContext *ctx) @@ -8257,7 +7746,6 @@ static void decode_rrrr_extract_insert(DisasContext *ctx) */ tcg_gen_movcond_tl(TCG_COND_EQ, msw, tmp_pos, zero, zero, msw); tcg_gen_or_tl(cpu_gpr_d[r4], tmp_width, msw); - tcg_temp_free(msw); } break; case OPC2_32_RRRR_EXTR: @@ -8285,8 +7773,6 @@ static void decode_rrrr_extract_insert(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(tmp_pos); - tcg_temp_free(tmp_width); } /* RRRW format */ @@ -8332,8 +7818,6 @@ static void decode_rrrw_extract_insert(DisasContext *ctx) tcg_gen_shl_tl(temp2, temp2, temp); tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r2], temp); tcg_gen_mov_tl(cpu_gpr_d[r4+1], temp2); - - tcg_temp_free(temp2); break; case OPC2_32_RRRW_INSERT: temp2 = tcg_temp_new(); @@ -8341,13 +7825,10 @@ static void decode_rrrw_extract_insert(DisasContext *ctx) tcg_gen_movi_tl(temp, width); tcg_gen_andi_tl(temp2, cpu_gpr_d[r3], 0x1f); gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], temp, temp2); - - tcg_temp_free(temp2); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } /* SYS Format*/ @@ -8400,7 +7881,6 @@ static void decode_sys_interrupts(DisasContext *ctx) gen_set_label(l1); tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(tmp); } else { /* generate privilege trap */ } @@ -8477,24 +7957,19 @@ static void decode_32Bit_opc(DisasContext *ctx) case OPC1_32_ABS_STOREQ: address = MASK_OP_ABS_OFF18(ctx->opcode); r1 = MASK_OP_ABS_S1D(ctx->opcode); - temp = tcg_const_i32(EA_ABS_FORMAT(address)); + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); temp2 = tcg_temp_new(); tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16); tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW); - - tcg_temp_free(temp2); - tcg_temp_free(temp); break; case OPC1_32_ABS_LD_Q: address = MASK_OP_ABS_OFF18(ctx->opcode); r1 = MASK_OP_ABS_S1D(ctx->opcode); - temp = tcg_const_i32(EA_ABS_FORMAT(address)); + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); - - tcg_temp_free(temp); break; case OPC1_32_ABS_LEA: address = MASK_OP_ABS_OFF18(ctx->opcode); @@ -8507,16 +7982,13 @@ static void decode_32Bit_opc(DisasContext *ctx) b = MASK_OP_ABSB_B(ctx->opcode); bpos = MASK_OP_ABSB_BPOS(ctx->opcode); - temp = tcg_const_i32(EA_ABS_FORMAT(address)); + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); temp2 = tcg_temp_new(); tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB); tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos)); tcg_gen_ori_tl(temp2, temp2, (b << bpos)); tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB); - - tcg_temp_free(temp); - tcg_temp_free(temp2); break; /* B-format */ case OPC1_32_B_CALL: @@ -8637,7 +8109,7 @@ static void decode_32Bit_opc(DisasContext *ctx) r2 = MASK_OP_RCRR_S3(ctx->opcode); r3 = MASK_OP_RCRR_D(ctx->opcode); const16 = MASK_OP_RCRR_CONST4(ctx->opcode); - temp = tcg_const_i32(const16); + temp = tcg_constant_i32(const16); temp2 = tcg_temp_new(); /* width*/ temp3 = tcg_temp_new(); /* pos */ @@ -8647,10 +8119,6 @@ static void decode_32Bit_opc(DisasContext *ctx) tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f); gen_insert(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, temp2, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); break; /* RCRW Format */ case OPCM_32_RCRW_MASK_INSERT: @@ -8881,7 +8349,7 @@ static const TranslatorOps tricore_tr_ops = { }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext ctx; diff --git a/target/xtensa/core-dc232b.c b/target/xtensa/core-dc232b.c index c982d09c24..9aba2667e3 100644 --- a/target/xtensa/core-dc232b.c +++ b/target/xtensa/core-dc232b.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "qemu/timer.h" diff --git a/target/xtensa/core-dc233c.c b/target/xtensa/core-dc233c.c index 595ab9a90f..9b0a625063 100644 --- a/target/xtensa/core-dc233c.c +++ b/target/xtensa/core-dc233c.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "core-dc233c/core-isa.h" diff --git a/target/xtensa/core-de212.c b/target/xtensa/core-de212.c index 50c995ba79..b08fe22e65 100644 --- a/target/xtensa/core-de212.c +++ b/target/xtensa/core-de212.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "core-de212/core-isa.h" diff --git a/target/xtensa/core-de233_fpu.c b/target/xtensa/core-de233_fpu.c index 41af8057fb..8845cdb592 100644 --- a/target/xtensa/core-de233_fpu.c +++ b/target/xtensa/core-de233_fpu.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "core-de233_fpu/core-isa.h" diff --git a/target/xtensa/core-dsp3400.c b/target/xtensa/core-dsp3400.c index 81e425c568..c0f94b9e27 100644 --- a/target/xtensa/core-dsp3400.c +++ b/target/xtensa/core-dsp3400.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "core-dsp3400/core-isa.h" diff --git a/target/xtensa/core-fsf.c b/target/xtensa/core-fsf.c index 3327c50b4f..310be8d61f 100644 --- a/target/xtensa/core-fsf.c +++ b/target/xtensa/core-fsf.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "core-fsf/core-isa.h" diff --git a/target/xtensa/core-lx106.c b/target/xtensa/core-lx106.c index 7a771d09a6..7f71d088f3 100644 --- a/target/xtensa/core-lx106.c +++ b/target/xtensa/core-lx106.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "core-lx106/core-isa.h" diff --git a/target/xtensa/core-sample_controller.c b/target/xtensa/core-sample_controller.c index fd5de5576b..8867001aac 100644 --- a/target/xtensa/core-sample_controller.c +++ b/target/xtensa/core-sample_controller.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "core-sample_controller/core-isa.h" diff --git a/target/xtensa/core-test_kc705_be.c b/target/xtensa/core-test_kc705_be.c index 294c16f2f4..bd082f49aa 100644 --- a/target/xtensa/core-test_kc705_be.c +++ b/target/xtensa/core-test_kc705_be.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "core-test_kc705_be/core-isa.h" diff --git a/target/xtensa/core-test_mmuhifi_c3.c b/target/xtensa/core-test_mmuhifi_c3.c index c0e5d32d1e..3090dd01ed 100644 --- a/target/xtensa/core-test_mmuhifi_c3.c +++ b/target/xtensa/core-test_mmuhifi_c3.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "core-test_mmuhifi_c3/core-isa.h" diff --git a/target/xtensa/cpu-param.h b/target/xtensa/cpu-param.h index b53e9a3e08..b1da0555de 100644 --- a/target/xtensa/cpu-param.h +++ b/target/xtensa/cpu-param.h @@ -16,6 +16,5 @@ #else #define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif -#define NB_MMU_MODES 4 #endif diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index 2dc8f2d232..acaf8c905f 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -35,6 +35,9 @@ #include "qemu/module.h" #include "migration/vmstate.h" #include "hw/qdev-clock.h" +#ifndef CONFIG_USER_ONLY +#include "exec/memory.h" +#endif static void xtensa_cpu_set_pc(CPUState *cs, vaddr value) diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index 579adcb769..b7a54711a6 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -576,9 +576,9 @@ void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); +hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); #endif void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, int flags); -hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void xtensa_count_regs(const XtensaConfig *config, unsigned *n_regs, unsigned *n_core_regs); int xtensa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); diff --git a/target/xtensa/gdbstub.c b/target/xtensa/gdbstub.c index b6696063e5..4b3bfb7e59 100644 --- a/target/xtensa/gdbstub.c +++ b/target/xtensa/gdbstub.c @@ -19,7 +19,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/log.h" enum { diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c index 2aa9777a8e..dbeb97a953 100644 --- a/target/xtensa/helper.c +++ b/target/xtensa/helper.c @@ -29,7 +29,7 @@ #include "qemu/log.h" #include "cpu.h" #include "exec/exec-all.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "exec/helper-proto.h" #include "qemu/error-report.h" #include "qemu/qemu-print.h" diff --git a/target/xtensa/import_core.sh b/target/xtensa/import_core.sh index b4c15556c2..17dfec8957 100755 --- a/target/xtensa/import_core.sh +++ b/target/xtensa/import_core.sh @@ -41,7 +41,7 @@ tar -xf "$OVERLAY" -O binutils/xtensa-modules.c | \ cat < "${TARGET}.c" #include "qemu/osdep.h" #include "cpu.h" -#include "exec/gdbstub.h" +#include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "core-$NAME/core-isa.h" diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 77bcd71030..728aeebebf 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -57,7 +57,6 @@ struct DisasContext { bool sar_5bit; bool sar_m32_5bit; - bool sar_m32_allocated; TCGv_i32 sar_m32; unsigned window; @@ -284,14 +283,7 @@ static void init_sar_tracker(DisasContext *dc) { dc->sar_5bit = false; dc->sar_m32_5bit = false; - dc->sar_m32_allocated = false; -} - -static void reset_sar_tracker(DisasContext *dc) -{ - if (dc->sar_m32_allocated) { - tcg_temp_free(dc->sar_m32); - } + dc->sar_m32 = NULL; } static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) @@ -306,9 +298,8 @@ static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa) { - if (!dc->sar_m32_allocated) { - dc->sar_m32 = tcg_temp_local_new_i32(); - dc->sar_m32_allocated = true; + if (!dc->sar_m32) { + dc->sar_m32 = tcg_temp_new_i32(); } tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f); tcg_gen_sub_i32(cpu_SR[SAR], tcg_constant_i32(32), dc->sar_m32); @@ -1074,10 +1065,10 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) if (i == 0 || arg_copy[i].resource != resource) { resource = arg_copy[i].resource; if (arg_copy[i].arg->num_bits <= 32) { - temp = tcg_temp_local_new_i32(); + temp = tcg_temp_new_i32(); tcg_gen_mov_i32(temp, arg_copy[i].arg->in); } else if (arg_copy[i].arg->num_bits <= 64) { - temp = tcg_temp_local_new_i64(); + temp = tcg_temp_new_i64(); tcg_gen_mov_i64(temp, arg_copy[i].arg->in); } else { g_assert_not_reached(); @@ -1111,16 +1102,6 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) ops->translate(dc, pslot->arg, ops->par); } - for (i = 0; i < n_arg_copy; ++i) { - if (arg_copy[i].arg->num_bits <= 32) { - tcg_temp_free_i32(arg_copy[i].temp); - } else if (arg_copy[i].arg->num_bits <= 64) { - tcg_temp_free_i64(arg_copy[i].temp); - } else { - g_assert_not_reached(); - } - } - if (dc->base.is_jmp == DISAS_NEXT) { gen_postprocess(dc, 0); dc->op_flags = 0; @@ -1187,7 +1168,7 @@ static void xtensa_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) DisasContext *dc = container_of(dcbase, DisasContext, base); if (dc->icount) { - dc->next_icount = tcg_temp_local_new_i32(); + dc->next_icount = tcg_temp_new_i32(); } } @@ -1247,11 +1228,6 @@ static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); - reset_sar_tracker(dc); - if (dc->icount) { - tcg_temp_free(dc->next_icount); - } - switch (dc->base.is_jmp) { case DISAS_NORETURN: break; @@ -1279,7 +1255,7 @@ static const TranslatorOps xtensa_translator_ops = { .disas_log = xtensa_tr_disas_log, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc) { DisasContext dc = {}; @@ -1379,14 +1355,13 @@ static void translate_addx(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[1].in, par[0]); tcg_gen_add_i32(arg[0].out, tmp, arg[2].in); - tcg_temp_free(tmp); } static void translate_all(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { uint32_t shift = par[1]; - TCGv_i32 mask = tcg_const_i32(((1 << shift) - 1) << arg[1].imm); + TCGv_i32 mask = tcg_constant_i32(((1 << shift) - 1) << arg[1].imm); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, arg[1].in, mask); @@ -1398,8 +1373,6 @@ static void translate_all(DisasContext *dc, const OpcodeArg arg[], tcg_gen_shri_i32(tmp, tmp, arg[1].imm + shift); tcg_gen_deposit_i32(arg[0].out, arg[0].out, tmp, arg[0].imm, 1); - tcg_temp_free(mask); - tcg_temp_free(tmp); } static void translate_and(DisasContext *dc, const OpcodeArg arg[], @@ -1414,7 +1387,6 @@ static void translate_ball(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, arg[0].in, arg[1].in); gen_brcond(dc, par[0], tmp, arg[1].in, arg[2].imm); - tcg_temp_free(tmp); } static void translate_bany(DisasContext *dc, const OpcodeArg arg[], @@ -1423,7 +1395,6 @@ static void translate_bany(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, arg[0].in, arg[1].in); gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); - tcg_temp_free(tmp); } static void translate_b(DisasContext *dc, const OpcodeArg arg[], @@ -1435,22 +1406,16 @@ static void translate_b(DisasContext *dc, const OpcodeArg arg[], static void translate_bb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { -#if TARGET_BIG_ENDIAN - TCGv_i32 bit = tcg_const_i32(0x80000000u); -#else - TCGv_i32 bit = tcg_const_i32(0x00000001u); -#endif TCGv_i32 tmp = tcg_temp_new_i32(); + tcg_gen_andi_i32(tmp, arg[1].in, 0x1f); -#if TARGET_BIG_ENDIAN - tcg_gen_shr_i32(bit, bit, tmp); -#else - tcg_gen_shl_i32(bit, bit, tmp); -#endif - tcg_gen_and_i32(tmp, arg[0].in, bit); + if (TARGET_BIG_ENDIAN) { + tcg_gen_shr_i32(tmp, tcg_constant_i32(0x80000000u), tmp); + } else { + tcg_gen_shl_i32(tmp, tcg_constant_i32(0x00000001u), tmp); + } + tcg_gen_and_i32(tmp, arg[0].in, tmp); gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); - tcg_temp_free(tmp); - tcg_temp_free(bit); } static void translate_bbi(DisasContext *dc, const OpcodeArg arg[], @@ -1463,7 +1428,6 @@ static void translate_bbi(DisasContext *dc, const OpcodeArg arg[], tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm); #endif gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); - tcg_temp_free(tmp); } static void translate_bi(DisasContext *dc, const OpcodeArg arg[], @@ -1504,8 +1468,6 @@ static void translate_boolean(DisasContext *dc, const OpcodeArg arg[], tcg_gen_shri_i32(tmp2, arg[2].in, arg[2].imm); op[par[0]](tmp1, tmp1, tmp2); tcg_gen_deposit_i32(arg[0].out, arg[0].out, tmp1, arg[0].imm, 1); - tcg_temp_free(tmp1); - tcg_temp_free(tmp2); } static void translate_bp(DisasContext *dc, const OpcodeArg arg[], @@ -1515,7 +1477,6 @@ static void translate_bp(DisasContext *dc, const OpcodeArg arg[], tcg_gen_andi_i32(tmp, arg[0].in, 1 << arg[0].imm); gen_brcondi(dc, par[0], tmp, 0, arg[1].imm); - tcg_temp_free(tmp); } static void translate_call0(DisasContext *dc, const OpcodeArg arg[], @@ -1528,9 +1489,8 @@ static void translate_call0(DisasContext *dc, const OpcodeArg arg[], static void translate_callw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp = tcg_const_i32(arg[0].imm); + TCGv_i32 tmp = tcg_constant_i32(arg[0].imm); gen_callw_slot(dc, par[0], tmp, adjust_jump_slot(dc, arg[0].imm, 0)); - tcg_temp_free(tmp); } static void translate_callx0(DisasContext *dc, const OpcodeArg arg[], @@ -1540,7 +1500,6 @@ static void translate_callx0(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(tmp, arg[0].in); tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next); gen_jump(dc, tmp); - tcg_temp_free(tmp); } static void translate_callxw(DisasContext *dc, const OpcodeArg arg[], @@ -1550,19 +1509,16 @@ static void translate_callxw(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(tmp, arg[0].in); gen_callw_slot(dc, par[0], tmp, -1); - tcg_temp_free(tmp); } static void translate_clamps(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp1 = tcg_const_i32(-1u << arg[2].imm); - TCGv_i32 tmp2 = tcg_const_i32((1 << arg[2].imm) - 1); + TCGv_i32 tmp1 = tcg_constant_i32(-1u << arg[2].imm); + TCGv_i32 tmp2 = tcg_constant_i32((1 << arg[2].imm) - 1); - tcg_gen_smax_i32(tmp1, tmp1, arg[1].in); - tcg_gen_smin_i32(arg[0].out, tmp1, tmp2); - tcg_temp_free(tmp1); - tcg_temp_free(tmp2); + tcg_gen_smax_i32(arg[0].out, tmp1, arg[1].in); + tcg_gen_smin_i32(arg[0].out, arg[0].out, tmp2); } static void translate_clrb_expstate(DisasContext *dc, const OpcodeArg arg[], @@ -1581,10 +1537,9 @@ static void translate_clrex(DisasContext *dc, const OpcodeArg arg[], static void translate_const16(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 c = tcg_const_i32(arg[1].imm); + TCGv_i32 c = tcg_constant_i32(arg[1].imm); tcg_gen_deposit_i32(arg[0].out, c, arg[0].in, 16, 16); - tcg_temp_free(c); } static void translate_dcache(DisasContext *dc, const OpcodeArg arg[], @@ -1594,9 +1549,7 @@ static void translate_dcache(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 res = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm); - tcg_gen_qemu_ld8u(res, addr, dc->cring); - tcg_temp_free(addr); - tcg_temp_free(res); + tcg_gen_qemu_ld_i32(res, addr, dc->cring, MO_UB); } static void translate_depbits(DisasContext *dc, const OpcodeArg arg[], @@ -1647,7 +1600,6 @@ static void translate_extui(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, arg[1].in, arg[2].imm); tcg_gen_andi_i32(arg[0].out, tmp, maskimm); - tcg_temp_free(tmp); } static void translate_getex(DisasContext *dc, const OpcodeArg arg[], @@ -1658,7 +1610,6 @@ static void translate_getex(DisasContext *dc, const OpcodeArg arg[], tcg_gen_extract_i32(tmp, cpu_SR[ATOMCTL], 8, 1); tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], arg[0].in, 8, 1); tcg_gen_mov_i32(arg[0].out, tmp); - tcg_temp_free(tmp); } static void translate_icache(DisasContext *dc, const OpcodeArg arg[], @@ -1670,7 +1621,6 @@ static void translate_icache(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movi_i32(cpu_pc, dc->pc); tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm); gen_helper_itlb_hit_test(cpu_env, addr); - tcg_temp_free(addr); #endif } @@ -1705,7 +1655,6 @@ static void translate_l32e(DisasContext *dc, const OpcodeArg arg[], tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); mop = gen_load_store_alignment(dc, MO_TEUL, addr); tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, mop); - tcg_temp_free(addr); } #ifdef CONFIG_USER_ONLY @@ -1736,7 +1685,6 @@ static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[], tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->cring, mop); tcg_gen_mov_i32(cpu_exclusive_addr, addr); tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out); - tcg_temp_free(addr); } static void translate_ldst(DisasContext *dc, const OpcodeArg arg[], @@ -1759,7 +1707,6 @@ static void translate_ldst(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mb(TCG_BAR_LDAQ | TCG_MO_ALL); } } - tcg_temp_free(addr); } static void translate_lct(DisasContext *dc, const OpcodeArg arg[], @@ -1774,13 +1721,12 @@ static void translate_l32r(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp; if (dc->base.tb->flags & XTENSA_TBFLAG_LITBASE) { - tmp = tcg_const_i32(arg[1].raw_imm - 1); - tcg_gen_add_i32(tmp, cpu_SR[LITBASE], tmp); + tmp = tcg_temp_new(); + tcg_gen_addi_i32(tmp, cpu_SR[LITBASE], arg[1].raw_imm - 1); } else { - tmp = tcg_const_i32(arg[1].imm); + tmp = tcg_constant_i32(arg[1].imm); } - tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring); - tcg_temp_free(tmp); + tcg_gen_qemu_ld_i32(arg[0].out, tmp, dc->cring, MO_TEUL); } static void translate_loop(DisasContext *dc, const OpcodeArg arg[], @@ -1866,19 +1812,12 @@ static void translate_mac16(DisasContext *dc, const OpcodeArg arg[], lo, hi); } tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]); - - tcg_temp_free_i32(lo); - tcg_temp_free_i32(hi); } - tcg_temp_free(m1); - tcg_temp_free(m2); } if (ld_offset) { tcg_gen_mov_i32(arg[1].out, vaddr); tcg_gen_mov_i32(cpu_SR[MR + arg[0].imm], mem32); } - tcg_temp_free(vaddr); - tcg_temp_free(mem32); } static void translate_memw(DisasContext *dc, const OpcodeArg arg[], @@ -1942,7 +1881,6 @@ static void translate_movp(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i32(par[0], arg[0].out, tmp, zero, arg[1].in, arg[0].in); - tcg_temp_free(tmp); } static void translate_movsp(DisasContext *dc, const OpcodeArg arg[], @@ -1965,8 +1903,6 @@ static void translate_mul16(DisasContext *dc, const OpcodeArg arg[], tcg_gen_ext16u_i32(v2, arg[2].in); } tcg_gen_mul_i32(arg[0].out, v1, v2); - tcg_temp_free(v2); - tcg_temp_free(v1); } static void translate_mull(DisasContext *dc, const OpcodeArg arg[], @@ -1985,7 +1921,6 @@ static void translate_mulh(DisasContext *dc, const OpcodeArg arg[], } else { tcg_gen_mulu2_i32(lo, arg[0].out, arg[1].in, arg[2].in); } - tcg_temp_free(lo); } static void translate_neg(DisasContext *dc, const OpcodeArg arg[], @@ -2112,15 +2047,14 @@ static uint32_t test_exceptions_retw(DisasContext *dc, const OpcodeArg arg[], static void translate_retw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp = tcg_const_i32(1); - tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); + TCGv_i32 tmp = tcg_temp_new(); + tcg_gen_shl_i32(tmp, tcg_constant_i32(1), cpu_SR[WINDOW_BASE]); tcg_gen_andc_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); tcg_gen_movi_i32(tmp, dc->pc); tcg_gen_deposit_i32(tmp, tmp, cpu_R[0], 0, 30); gen_helper_retw(cpu_env, cpu_R[0]); gen_jump(dc, tmp); - tcg_temp_free(tmp); } static void translate_rfde(DisasContext *dc, const OpcodeArg arg[], @@ -2146,10 +2080,10 @@ static void translate_rfi(DisasContext *dc, const OpcodeArg arg[], static void translate_rfw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp = tcg_const_i32(1); + TCGv_i32 tmp = tcg_temp_new(); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); - tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); + tcg_gen_shl_i32(tmp, tcg_constant_i32(1), cpu_SR[WINDOW_BASE]); if (par[0]) { tcg_gen_andc_i32(cpu_SR[WINDOW_START], @@ -2159,7 +2093,6 @@ static void translate_rfw(DisasContext *dc, const OpcodeArg arg[], cpu_SR[WINDOW_START], tmp); } - tcg_temp_free(tmp); gen_helper_restore_owb(cpu_env); gen_jump(dc, cpu_SR[EPC1]); } @@ -2209,7 +2142,6 @@ static void translate_rsr_ptevaddr(DisasContext *dc, const OpcodeArg arg[], tcg_gen_shri_i32(tmp, cpu_SR[EXCVADDR], 10); tcg_gen_or_i32(tmp, tmp, cpu_SR[PTEVADDR]); tcg_gen_andi_i32(arg[0].out, tmp, 0xfffffffc); - tcg_temp_free(tmp); #endif } @@ -2273,8 +2205,8 @@ static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr) static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp = tcg_temp_local_new_i32(); - TCGv_i32 addr = tcg_temp_local_new_i32(); + TCGv_i32 tmp = tcg_temp_new_i32(); + TCGv_i32 addr = tcg_temp_new_i32(); MemOp mop; tcg_gen_mov_i32(tmp, arg[0].in); @@ -2283,8 +2215,6 @@ static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[], gen_check_atomctl(dc, addr); tcg_gen_atomic_cmpxchg_i32(arg[0].out, addr, cpu_SR[SCOMPARE1], tmp, dc->cring, mop); - tcg_temp_free(addr); - tcg_temp_free(tmp); } static void translate_s32e(DisasContext *dc, const OpcodeArg arg[], @@ -2296,15 +2226,14 @@ static void translate_s32e(DisasContext *dc, const OpcodeArg arg[], tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); mop = gen_load_store_alignment(dc, MO_TEUL, addr); tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, mop); - tcg_temp_free(addr); } static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 prev = tcg_temp_new_i32(); - TCGv_i32 addr = tcg_temp_local_new_i32(); - TCGv_i32 res = tcg_temp_local_new_i32(); + TCGv_i32 addr = tcg_temp_new_i32(); + TCGv_i32 res = tcg_temp_new_i32(); TCGLabel *label = gen_new_label(); MemOp mop; @@ -2322,9 +2251,6 @@ static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[], gen_set_label(label); tcg_gen_extract_i32(arg[0].out, cpu_SR[ATOMCTL], 8, 1); tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], res, 8, 1); - tcg_temp_free(prev); - tcg_temp_free(addr); - tcg_temp_free(res); } static void translate_salt(DisasContext *dc, const OpcodeArg arg[], @@ -2348,7 +2274,6 @@ static void translate_sext(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[1].in, shift); tcg_gen_sari_i32(arg[0].out, tmp, shift); - tcg_temp_free(tmp); } } @@ -2388,8 +2313,6 @@ static void translate_simcall(DisasContext *dc, const OpcodeArg arg[], tcg_gen_extu_i32_i64(tmp, reg); \ tcg_gen_##cmd##_i64(v, v, tmp); \ tcg_gen_extrl_i64_i32(arg[0].out, v); \ - tcg_temp_free_i64(v); \ - tcg_temp_free_i64(tmp); \ } while (0) #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR]) @@ -2401,12 +2324,11 @@ static void translate_sll(DisasContext *dc, const OpcodeArg arg[], tcg_gen_shl_i32(arg[0].out, arg[1].in, dc->sar_m32); } else { TCGv_i64 v = tcg_temp_new_i64(); - TCGv_i32 s = tcg_const_i32(32); - tcg_gen_sub_i32(s, s, cpu_SR[SAR]); + TCGv_i32 s = tcg_temp_new(); + tcg_gen_subfi_i32(s, 32, cpu_SR[SAR]); tcg_gen_andi_i32(s, s, 0x3f); tcg_gen_extu_i32_i64(v, arg[1].in); gen_shift_reg(shl, s); - tcg_temp_free(s); } } @@ -2473,7 +2395,6 @@ static void translate_ssa8b(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[0].in, 3); gen_left_shift_sar(dc, tmp); - tcg_temp_free(tmp); } static void translate_ssa8l(DisasContext *dc, const OpcodeArg arg[], @@ -2482,7 +2403,6 @@ static void translate_ssa8l(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[0].in, 3); gen_right_shift_sar(dc, tmp); - tcg_temp_free(tmp); } static void translate_ssai(DisasContext *dc, const OpcodeArg arg[], @@ -2515,7 +2435,6 @@ static void translate_subx(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[1].in, par[0]); tcg_gen_sub_i32(arg[0].out, tmp, arg[2].in); - tcg_temp_free(tmp); } static void translate_waiti(DisasContext *dc, const OpcodeArg arg[], @@ -2767,7 +2686,6 @@ static void translate_xsr(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(tmp, arg[0].in); tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]); tcg_gen_mov_i32(cpu_SR[par[0]], tmp); - tcg_temp_free(tmp); } else { tcg_gen_movi_i32(arg[0].out, 0); } @@ -2782,7 +2700,6 @@ static void translate_xsr_mask(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(tmp, arg[0].in); tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]); tcg_gen_andi_i32(cpu_SR[par[0]], tmp, par[2]); - tcg_temp_free(tmp); } else { tcg_gen_movi_i32(arg[0].out, 0); } @@ -2802,7 +2719,6 @@ static void translate_xsr_ccount(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(tmp, cpu_SR[par[0]]); gen_helper_wsr_ccount(cpu_env, arg[0].in); tcg_gen_mov_i32(arg[0].out, tmp); - tcg_temp_free(tmp); #endif } @@ -2820,7 +2736,6 @@ static void translate_xsr_ccount(DisasContext *dc, const OpcodeArg arg[], } \ translate_wsr_##name(dc, arg, par); \ tcg_gen_mov_i32(arg[0].out, tmp); \ - tcg_temp_free(tmp); \ } gen_translate_xsr(acchi) @@ -6307,16 +6222,6 @@ static inline void put_f32_o1_i3(const OpcodeArg *arg, const OpcodeArg *arg32, (o0 >= 0 && arg[o0].num_bits == 64)) { if (o0 >= 0) { tcg_gen_extu_i32_i64(arg[o0].out, arg32[o0].out); - tcg_temp_free_i32(arg32[o0].out); - } - if (i0 >= 0) { - tcg_temp_free_i32(arg32[i0].in); - } - if (i1 >= 0) { - tcg_temp_free_i32(arg32[i1].in); - } - if (i2 >= 0) { - tcg_temp_free_i32(arg32[i2].in); } } } @@ -6440,9 +6345,6 @@ static void translate_compare_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i32(TCG_COND_NE, arg[0].out, res, zero, set_br, clr_br); - tcg_temp_free(res); - tcg_temp_free(set_br); - tcg_temp_free(clr_br); } static void translate_compare_s(DisasContext *dc, const OpcodeArg arg[], @@ -6473,9 +6375,6 @@ static void translate_compare_s(DisasContext *dc, const OpcodeArg arg[], arg[0].out, res, zero, set_br, clr_br); put_f32_i2(arg, arg32, 1, 2); - tcg_temp_free(res); - tcg_temp_free(set_br); - tcg_temp_free(clr_br); } static void translate_const_d(DisasContext *dc, const OpcodeArg arg[], @@ -6594,7 +6493,6 @@ static void translate_ldsti(DisasContext *dc, const OpcodeArg arg[], if (par[1]) { tcg_gen_mov_i32(arg[1].out, addr); } - tcg_temp_free(addr); } static void translate_ldstx(DisasContext *dc, const OpcodeArg arg[], @@ -6613,7 +6511,6 @@ static void translate_ldstx(DisasContext *dc, const OpcodeArg arg[], if (par[1]) { tcg_gen_mov_i32(arg[1].out, addr); } - tcg_temp_free(addr); } static void translate_fpu2k_madd_s(DisasContext *dc, const OpcodeArg arg[], @@ -6649,7 +6546,6 @@ static void translate_movcond_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i64(par[0], arg[0].out, arg2, zero, arg[1].in, arg[0].in); - tcg_temp_free_i64(arg2); } static void translate_movcond_s(DisasContext *dc, const OpcodeArg arg[], @@ -6678,8 +6574,6 @@ static void translate_movp_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i64(par[0], arg[0].out, tmp2, zero, arg[1].in, arg[0].in); - tcg_temp_free_i32(tmp1); - tcg_temp_free_i64(tmp2); } static void translate_movp_s(DisasContext *dc, const OpcodeArg arg[], @@ -6693,7 +6587,6 @@ static void translate_movp_s(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i32(par[0], arg[0].out, tmp, zero, arg[1].in, arg[0].in); - tcg_temp_free(tmp); } else { translate_movp_d(dc, arg, par); } @@ -7025,7 +6918,6 @@ static void translate_cvtd_s(DisasContext *dc, const OpcodeArg arg[], tcg_gen_extrl_i64_i32(v, arg[1].in); gen_helper_cvtd_s(arg[0].out, cpu_env, v); - tcg_temp_free_i32(v); } static void translate_cvts_d(DisasContext *dc, const OpcodeArg arg[], @@ -7035,7 +6927,6 @@ static void translate_cvts_d(DisasContext *dc, const OpcodeArg arg[], gen_helper_cvts_d(v, cpu_env, arg[1].in); tcg_gen_extu_i32_i64(arg[0].out, v); - tcg_temp_free_i32(v); } static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[], @@ -7063,9 +6954,6 @@ static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_addi_i32(arg[1].out, arg[1].in, arg[2].imm); } } - if (par[1]) { - tcg_temp_free(addr); - } } static void translate_ldsti_s(DisasContext *dc, const OpcodeArg arg[], @@ -7098,9 +6986,6 @@ static void translate_ldsti_s(DisasContext *dc, const OpcodeArg arg[], tcg_gen_addi_i32(arg[1].out, arg[1].in, arg[2].imm); } } - if (par[1]) { - tcg_temp_free(addr); - } } static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[], @@ -7128,9 +7013,6 @@ static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_add_i32(arg[1].out, arg[1].in, arg[2].in); } } - if (par[1]) { - tcg_temp_free(addr); - } } static void translate_ldstx_s(DisasContext *dc, const OpcodeArg arg[], @@ -7163,9 +7045,6 @@ static void translate_ldstx_s(DisasContext *dc, const OpcodeArg arg[], tcg_gen_add_i32(arg[1].out, arg[1].in, arg[2].in); } } - if (par[1]) { - tcg_temp_free(addr); - } } static void translate_madd_d(DisasContext *dc, const OpcodeArg arg[], diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h index d6c6866878..3fdee26a3d 100644 --- a/tcg/aarch64/tcg-target-con-set.h +++ b/tcg/aarch64/tcg-target-con-set.h @@ -10,11 +10,10 @@ * tcg-target-con-str.h; the constraint combination is inclusive or. */ C_O0_I1(r) -C_O0_I2(lZ, l) C_O0_I2(r, rA) C_O0_I2(rZ, r) C_O0_I2(w, r) -C_O1_I1(r, l) +C_O0_I3(rZ, rZ, r) C_O1_I1(r, r) C_O1_I1(w, r) C_O1_I1(w, w) @@ -33,4 +32,5 @@ C_O1_I2(w, w, wO) C_O1_I2(w, w, wZ) C_O1_I3(w, w, w, w) C_O1_I4(r, r, rA, rZ, rZ) +C_O2_I1(r, r, r) C_O2_I4(r, r, rZ, rZ, rA, rMZ) diff --git a/tcg/aarch64/tcg-target-con-str.h b/tcg/aarch64/tcg-target-con-str.h index 00adb64594..fb1a845b4f 100644 --- a/tcg/aarch64/tcg-target-con-str.h +++ b/tcg/aarch64/tcg-target-con-str.h @@ -9,7 +9,6 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) -REGS('l', ALL_QLDST_REGS) REGS('w', ALL_VECTOR_REGS) /* diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index a091326f84..261ad25210 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -40,11 +40,12 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, - TCG_REG_X16, TCG_REG_X17, TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, + /* X16 reserved as temporary */ + /* X17 reserved as temporary */ /* X18 reserved by system */ /* X19 reserved for AREG0 */ /* X29 reserved as fp */ @@ -71,15 +72,12 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) return TCG_REG_X0 + slot; } -#define TCG_REG_TMP TCG_REG_X30 -#define TCG_VEC_TMP TCG_REG_V31 +#define TCG_REG_TMP0 TCG_REG_X16 +#define TCG_REG_TMP1 TCG_REG_X17 +#define TCG_REG_TMP2 TCG_REG_X30 +#define TCG_VEC_TMP0 TCG_REG_V31 #ifndef CONFIG_SOFTMMU -/* Note that XZR cannot be encoded in the address base register slot, - as that actaully encodes SP. So if we need to zero-extend the guest - address, via the address index register slot, we need to load even - a zero guest base into a register. */ -#define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32) #define TCG_REG_GUEST_BASE TCG_REG_X28 #endif @@ -134,14 +132,6 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, #define ALL_GENERAL_REGS 0xffffffffu #define ALL_VECTOR_REGS 0xffffffff00000000ull -#ifdef CONFIG_SOFTMMU -#define ALL_QLDST_REGS \ - (ALL_GENERAL_REGS & ~((1 << TCG_REG_X0) | (1 << TCG_REG_X1) | \ - (1 << TCG_REG_X2) | (1 << TCG_REG_X3))) -#else -#define ALL_QLDST_REGS ALL_GENERAL_REGS -#endif - /* Match a constant valid for addition (12-bit, optionally shifted). */ static inline bool is_aimm(uint64_t val) { @@ -395,6 +385,10 @@ typedef enum { I3305_LDR_v64 = 0x5c000000, I3305_LDR_v128 = 0x9c000000, + /* Load/store exclusive. */ + I3306_LDXP = 0xc8600000, + I3306_STXP = 0xc8200000, + /* Load/store register. Described here as 3.3.12, but the helper that emits them can transform to 3.3.10 or 3.3.13. */ I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30, @@ -459,6 +453,9 @@ typedef enum { I3406_ADR = 0x10000000, I3406_ADRP = 0x90000000, + /* Add/subtract extended register instructions. */ + I3501_ADD = 0x0b200000, + /* Add/subtract shifted register instructions (without a shift). */ I3502_ADD = 0x0b000000, I3502_ADDS = 0x2b000000, @@ -629,6 +626,12 @@ static void tcg_out_insn_3305(TCGContext *s, AArch64Insn insn, tcg_out32(s, insn | (imm19 & 0x7ffff) << 5 | rt); } +static void tcg_out_insn_3306(TCGContext *s, AArch64Insn insn, TCGReg rs, + TCGReg rt, TCGReg rt2, TCGReg rn) +{ + tcg_out32(s, insn | rs << 16 | rt2 << 10 | rn << 5 | rt); +} + static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rt, int imm19) { @@ -711,6 +714,14 @@ static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn, tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd); } +static inline void tcg_out_insn_3501(TCGContext *s, AArch64Insn insn, + TCGType sf, TCGReg rd, TCGReg rn, + TCGReg rm, int opt, int imm3) +{ + tcg_out32(s, insn | sf << 31 | rm << 16 | opt << 13 | + imm3 << 10 | rn << 5 | rd); +} + /* This function is for both 3.5.2 (Add/Subtract shifted register), for the rare occasion when we actually want to supply a shift amount. */ static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn, @@ -989,7 +1000,7 @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg r, TCGReg base, intptr_t offset) { - TCGReg temp = TCG_REG_TMP; + TCGReg temp = TCG_REG_TMP0; if (offset < -0xffffff || offset > 0xffffff) { tcg_out_movi(s, TCG_TYPE_PTR, temp, offset); @@ -1106,6 +1117,11 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_out_insn(s, 3305, LDR, 0, rd); } +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) +{ + return false; +} + static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, tcg_target_long imm) { @@ -1136,8 +1152,8 @@ static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd, } /* Worst-case scenario, move offset to temp register, use reg offset. */ - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset); - tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP); + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, offset); + tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP0); } static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) @@ -1353,8 +1369,8 @@ static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *target) if (offset == sextract64(offset, 0, 26)) { tcg_out_insn(s, 3206, BL, offset); } else { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); - tcg_out_insn(s, 3207, BLR, TCG_REG_TMP); + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, (intptr_t)target); + tcg_out_insn(s, 3207, BLR, TCG_REG_TMP0); } } @@ -1419,6 +1435,26 @@ static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits, tcg_out_sbfm(s, ext, rd, rn, 0, bits); } +static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn) +{ + tcg_out_sxt(s, type, MO_8, rd, rn); +} + +static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn) +{ + tcg_out_sxt(s, type, MO_16, rd, rn); +} + +static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_sxt(s, TCG_TYPE_I64, MO_32, rd, rn); +} + +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_ext32s(s, rd, rn); +} + static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits, TCGReg rd, TCGReg rn) { @@ -1427,6 +1463,31 @@ static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits, tcg_out_ubfm(s, 0, rd, rn, 0, bits); } +static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_uxt(s, MO_8, rd, rn); +} + +static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_uxt(s, MO_16, rd, rn); +} + +static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_movr(s, TCG_TYPE_I32, rd, rn); +} + +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_ext32u(s, rd, rn); +} + +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_mov(s, TCG_TYPE_I32, rd, rn); +} + static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, TCGReg rn, int64_t aimm) { @@ -1446,7 +1507,7 @@ static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, AArch64Insn insn; if (rl == ah || (!const_bh && rl == bh)) { - rl = TCG_REG_TMP; + rl = TCG_REG_TMP0; } if (const_bl) { @@ -1463,7 +1524,7 @@ static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, possibility of adding 0+const in the low part, and the immediate add instructions encode XSP not XZR. Don't try anything more elaborate here than loading another zero. */ - al = TCG_REG_TMP; + al = TCG_REG_TMP0; tcg_out_movi(s, ext, al, 0); } tcg_out_insn_3401(s, insn, ext, rl, al, bl); @@ -1504,7 +1565,7 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d, { TCGReg a1 = a0; if (is_ctz) { - a1 = TCG_REG_TMP; + a1 = TCG_REG_TMP0; tcg_out_insn(s, 3507, RBIT, ext, a1, a0); } if (const_b && b == (ext ? 64 : 32)) { @@ -1513,7 +1574,7 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d, AArch64Insn sel = I3506_CSEL; tcg_out_cmp(s, ext, a0, 0, 1); - tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP, a1); + tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP0, a1); if (const_b) { if (b == -1) { @@ -1526,348 +1587,358 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d, b = d; } } - tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP, b, TCG_COND_NE); + tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP0, b, TCG_COND_NE); } } -static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target) +typedef struct { + TCGReg base; + TCGReg index; + TCGType index_ext; + TCGAtomAlign aa; +} HostAddress; + +bool tcg_target_has_memory_bswap(MemOp memop) { - ptrdiff_t offset = tcg_pcrel_diff(s, target); - tcg_debug_assert(offset == sextract64(offset, 0, 21)); - tcg_out_insn(s, 3406, ADR, rd, offset); + return false; } -#ifdef CONFIG_SOFTMMU -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * MemOpIdx oi, uintptr_t ra) - */ -static void * const qemu_ld_helpers[MO_SIZE + 1] = { - [MO_8] = helper_ret_ldub_mmu, -#if HOST_BIG_ENDIAN - [MO_16] = helper_be_lduw_mmu, - [MO_32] = helper_be_ldul_mmu, - [MO_64] = helper_be_ldq_mmu, -#else - [MO_16] = helper_le_lduw_mmu, - [MO_32] = helper_le_ldul_mmu, - [MO_64] = helper_le_ldq_mmu, -#endif -}; - -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, MemOpIdx oi, - * uintptr_t ra) - */ -static void * const qemu_st_helpers[MO_SIZE + 1] = { - [MO_8] = helper_ret_stb_mmu, -#if HOST_BIG_ENDIAN - [MO_16] = helper_be_stw_mmu, - [MO_32] = helper_be_stl_mmu, - [MO_64] = helper_be_stq_mmu, -#else - [MO_16] = helper_le_stw_mmu, - [MO_32] = helper_le_stl_mmu, - [MO_64] = helper_le_stq_mmu, -#endif +static const TCGLdstHelperParam ldst_helper_param = { + .ntmp = 1, .tmp = { TCG_REG_TMP0 } }; static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - MemOpIdx oi = lb->oi; - MemOp opc = get_memop(oi); - MemOp size = opc & MO_SIZE; + MemOp opc = get_memop(lb->oi); if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { return false; } - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); - tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi); - tcg_out_adr(s, TCG_REG_X3, lb->raddr); + tcg_out_ld_helper_args(s, lb, &ldst_helper_param); tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); - if (opc & MO_SIGN) { - tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0); - } else { - tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0); - } - + tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); tcg_out_goto(s, lb->raddr); return true; } static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - MemOpIdx oi = lb->oi; - MemOp opc = get_memop(oi); - MemOp size = opc & MO_SIZE; + MemOp opc = get_memop(lb->oi); if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { return false; } - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); - tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); - tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg); - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, oi); - tcg_out_adr(s, TCG_REG_X4, lb->raddr); + tcg_out_st_helper_args(s, lb, &ldst_helper_param); tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE]); tcg_out_goto(s, lb->raddr); return true; } -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, - TCGType ext, TCGReg data_reg, TCGReg addr_reg, - tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) +/* + * For softmmu, perform the TLB load and compare. + * For useronly, perform any required alignment tests. + * In both cases, return a TCGLabelQemuLdst structure if the slow path + * is required and fill in @h with the host address for the fast path. + */ +static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, + TCGReg addr_reg, MemOpIdx oi, + bool is_ld) { - TCGLabelQemuLdst *label = new_ldst_label(s); + TCGType addr_type = s->addr_type; + TCGLabelQemuLdst *ldst = NULL; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; + unsigned a_mask; - label->is_ld = is_ld; - label->oi = oi; - label->type = ext; - label->datalo_reg = data_reg; - label->addrlo_reg = addr_reg; - label->raddr = tcg_splitwx_to_rx(raddr); - label->label_ptr[0] = label_ptr; -} + h->aa = atom_and_align_for_opc(s, opc, + have_lse2 ? MO_ATOM_WITHIN16 + : MO_ATOM_IFALIGN, + s_bits == MO_128); + a_mask = (1 << h->aa.align) - 1; -/* We expect to use a 7-bit scaled negative offset from ENV. */ -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512); - -/* These offsets are built into the LDP below. */ -QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); -QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); - -/* Load and compare a TLB entry, emitting the conditional jump to the - slow path for the failure case, which will be patched later when finalizing - the slow path. Generated code returns the host addend in X1, - clobbers X0,X2,X3,TMP. */ -static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, - tcg_insn_unit **label_ptr, int mem_index, - bool is_read) -{ - unsigned a_bits = get_alignment_bits(opc); - unsigned s_bits = opc & MO_SIZE; - unsigned a_mask = (1u << a_bits) - 1; +#ifdef CONFIG_SOFTMMU unsigned s_mask = (1u << s_bits) - 1; - TCGReg x3; + unsigned mem_index = get_mmuidx(oi); + TCGReg addr_adj; TCGType mask_type; uint64_t compare_mask; - mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32 + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; + + mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32 ? TCG_TYPE_I64 : TCG_TYPE_I32); - /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */ - tcg_out_insn(s, 3314, LDP, TCG_REG_X0, TCG_REG_X1, TCG_AREG0, + /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {tmp0,tmp1}. */ + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512); + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); + tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index), 1, 0); /* Extract the TLB index from the address into X0. */ tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64, - TCG_REG_X0, TCG_REG_X0, addr_reg, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + TCG_REG_TMP0, TCG_REG_TMP0, addr_reg, + s->page_bits - CPU_TLB_ENTRY_BITS); - /* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */ - tcg_out_insn(s, 3502, ADD, 1, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0); + /* Add the tlb_table pointer, forming the CPUTLBEntry address in TMP1. */ + tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0); - /* Load the tlb comparator into X0, and the fast path addend into X1. */ - tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_X0, TCG_REG_X1, is_read - ? offsetof(CPUTLBEntry, addr_read) - : offsetof(CPUTLBEntry, addr_write)); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_X1, TCG_REG_X1, + /* Load the tlb comparator into TMP0, and the fast path addend into TMP1. */ + tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP1, + is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, offsetof(CPUTLBEntry, addend)); - /* For aligned accesses, we check the first byte and include the alignment - bits within the address. For unaligned access, we check that we don't - cross pages using the address of the last byte of the access. */ - if (a_bits >= s_bits) { - x3 = addr_reg; + /* + * For aligned accesses, we check the first byte and include the alignment + * bits within the address. For unaligned access, we check that we don't + * cross pages using the address of the last byte of the access. + */ + if (a_mask >= s_mask) { + addr_adj = addr_reg; } else { - tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64, - TCG_REG_X3, addr_reg, s_mask - a_mask); - x3 = TCG_REG_X3; + addr_adj = TCG_REG_TMP2; + tcg_out_insn(s, 3401, ADDI, addr_type, + addr_adj, addr_reg, s_mask - a_mask); } - compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; + compare_mask = (uint64_t)s->page_mask | a_mask; - /* Store the page mask part of the address into X3. */ - tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, - TCG_REG_X3, x3, compare_mask); + /* Store the page mask part of the address into TMP2. */ + tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2, + addr_adj, compare_mask); /* Perform the address comparison. */ - tcg_out_cmp(s, TARGET_LONG_BITS == 64, TCG_REG_X0, TCG_REG_X3, 0); + tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0); /* If not equal, we jump to the slow path. */ - *label_ptr = s->code_ptr; + ldst->label_ptr[0] = s->code_ptr; tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); -} + h->base = TCG_REG_TMP1; + h->index = addr_reg; + h->index_ext = addr_type; #else -static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, - unsigned a_bits) -{ - unsigned a_mask = (1 << a_bits) - 1; - TCGLabelQemuLdst *label = new_ldst_label(s); + if (a_mask) { + ldst = new_ldst_label(s); - label->is_ld = is_ld; - label->addrlo_reg = addr_reg; + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; - /* tst addr, #mask */ - tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask); + /* tst addr, #mask */ + tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask); - label->label_ptr[0] = s->code_ptr; - - /* b.ne slow_path */ - tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); - - label->raddr = tcg_splitwx_to_rx(s->code_ptr); -} - -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - if (!reloc_pc19(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; + /* b.ne slow_path */ + ldst->label_ptr[0] = s->code_ptr; + tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); } - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_X1, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); + if (guest_base || addr_type == TCG_TYPE_I32) { + h->base = TCG_REG_GUEST_BASE; + h->index = addr_reg; + h->index_ext = addr_type; + } else { + h->base = addr_reg; + h->index = TCG_REG_XZR; + h->index_ext = TCG_TYPE_I64; + } +#endif - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_adr(s, TCG_REG_LR, l->raddr); - tcg_out_goto_long(s, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st)); - return true; + return ldst; } -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} -#endif /* CONFIG_SOFTMMU */ - static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, - TCGReg data_r, TCGReg addr_r, - TCGType otype, TCGReg off_r) + TCGReg data_r, HostAddress h) { switch (memop & MO_SSIZE) { case MO_UB: - tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r); + tcg_out_ldst_r(s, I3312_LDRB, data_r, h.base, h.index_ext, h.index); break; case MO_SB: tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW, - data_r, addr_r, otype, off_r); + data_r, h.base, h.index_ext, h.index); break; case MO_UW: - tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r); + tcg_out_ldst_r(s, I3312_LDRH, data_r, h.base, h.index_ext, h.index); break; case MO_SW: tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW), - data_r, addr_r, otype, off_r); + data_r, h.base, h.index_ext, h.index); break; case MO_UL: - tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r); + tcg_out_ldst_r(s, I3312_LDRW, data_r, h.base, h.index_ext, h.index); break; case MO_SL: - tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r); + tcg_out_ldst_r(s, I3312_LDRSWX, data_r, h.base, h.index_ext, h.index); break; case MO_UQ: - tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r); + tcg_out_ldst_r(s, I3312_LDRX, data_r, h.base, h.index_ext, h.index); break; default: - tcg_abort(); + g_assert_not_reached(); } } static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, - TCGReg data_r, TCGReg addr_r, - TCGType otype, TCGReg off_r) + TCGReg data_r, HostAddress h) { switch (memop & MO_SIZE) { case MO_8: - tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r); + tcg_out_ldst_r(s, I3312_STRB, data_r, h.base, h.index_ext, h.index); break; case MO_16: - tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r); + tcg_out_ldst_r(s, I3312_STRH, data_r, h.base, h.index_ext, h.index); break; case MO_32: - tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r); + tcg_out_ldst_r(s, I3312_STRW, data_r, h.base, h.index_ext, h.index); break; case MO_64: - tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r); + tcg_out_ldst_r(s, I3312_STRX, data_r, h.base, h.index_ext, h.index); break; default: - tcg_abort(); + g_assert_not_reached(); } } static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi, TCGType ext) + MemOpIdx oi, TCGType data_type) { - MemOp memop = get_memop(oi); - const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; + TCGLabelQemuLdst *ldst; + HostAddress h; - /* Byte swapping is left to middle-end expansion. */ - tcg_debug_assert((memop & MO_BSWAP) == 0); + ldst = prepare_host_addr(s, &h, addr_reg, oi, true); + tcg_out_qemu_ld_direct(s, get_memop(oi), data_type, data_reg, h); -#ifdef CONFIG_SOFTMMU - unsigned mem_index = get_mmuidx(oi); - tcg_insn_unit *label_ptr; - - tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1); - tcg_out_qemu_ld_direct(s, memop, ext, data_reg, - TCG_REG_X1, otype, addr_reg); - add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg, - s->code_ptr, label_ptr); -#else /* !CONFIG_SOFTMMU */ - unsigned a_bits = get_alignment_bits(memop); - if (a_bits) { - tcg_out_test_alignment(s, true, addr_reg, a_bits); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data_reg; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - if (USE_GUEST_BASE) { - tcg_out_qemu_ld_direct(s, memop, ext, data_reg, - TCG_REG_GUEST_BASE, otype, addr_reg); - } else { - tcg_out_qemu_ld_direct(s, memop, ext, data_reg, - addr_reg, TCG_TYPE_I64, TCG_REG_XZR); - } -#endif /* CONFIG_SOFTMMU */ } static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi) + MemOpIdx oi, TCGType data_type) { - MemOp memop = get_memop(oi); - const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; + TCGLabelQemuLdst *ldst; + HostAddress h; - /* Byte swapping is left to middle-end expansion. */ - tcg_debug_assert((memop & MO_BSWAP) == 0); + ldst = prepare_host_addr(s, &h, addr_reg, oi, false); + tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h); -#ifdef CONFIG_SOFTMMU - unsigned mem_index = get_mmuidx(oi); - tcg_insn_unit *label_ptr; - - tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0); - tcg_out_qemu_st_direct(s, memop, data_reg, - TCG_REG_X1, otype, addr_reg); - add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64, - data_reg, addr_reg, s->code_ptr, label_ptr); -#else /* !CONFIG_SOFTMMU */ - unsigned a_bits = get_alignment_bits(memop); - if (a_bits) { - tcg_out_test_alignment(s, false, addr_reg, a_bits); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data_reg; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - if (USE_GUEST_BASE) { - tcg_out_qemu_st_direct(s, memop, data_reg, - TCG_REG_GUEST_BASE, otype, addr_reg); +} + +static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addr_reg, MemOpIdx oi, bool is_ld) +{ + TCGLabelQemuLdst *ldst; + HostAddress h; + TCGReg base; + bool use_pair; + + ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld); + + /* Compose the final address, as LDP/STP have no indexing. */ + if (h.index == TCG_REG_XZR) { + base = h.base; } else { - tcg_out_qemu_st_direct(s, memop, data_reg, - addr_reg, TCG_TYPE_I64, TCG_REG_XZR); + base = TCG_REG_TMP2; + if (h.index_ext == TCG_TYPE_I32) { + /* add base, base, index, uxtw */ + tcg_out_insn(s, 3501, ADD, TCG_TYPE_I64, base, + h.base, h.index, MO_32, 0); + } else { + /* add base, base, index */ + tcg_out_insn(s, 3502, ADD, 1, base, h.base, h.index); + } + } + + use_pair = h.aa.atom < MO_128 || have_lse2; + + if (!use_pair) { + tcg_insn_unit *branch = NULL; + TCGReg ll, lh, sl, sh; + + /* + * If we have already checked for 16-byte alignment, that's all + * we need. Otherwise we have determined that misaligned atomicity + * may be handled with two 8-byte loads. + */ + if (h.aa.align < MO_128) { + /* + * TODO: align should be MO_64, so we only need test bit 3, + * which means we could use TBNZ instead of ANDS+B_C. + */ + tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, 15); + branch = s->code_ptr; + tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); + use_pair = true; + } + + if (is_ld) { + /* + * 16-byte atomicity without LSE2 requires LDXP+STXP loop: + * ldxp lo, hi, [base] + * stxp t0, lo, hi, [base] + * cbnz t0, .-8 + * Require no overlap between data{lo,hi} and base. + */ + if (datalo == base || datahi == base) { + tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_TMP2, base); + base = TCG_REG_TMP2; + } + ll = sl = datalo; + lh = sh = datahi; + } else { + /* + * 16-byte atomicity without LSE2 requires LDXP+STXP loop: + * 1: ldxp t0, t1, [base] + * stxp t0, lo, hi, [base] + * cbnz t0, 1b + */ + tcg_debug_assert(base != TCG_REG_TMP0 && base != TCG_REG_TMP1); + ll = TCG_REG_TMP0; + lh = TCG_REG_TMP1; + sl = datalo; + sh = datahi; + } + + tcg_out_insn(s, 3306, LDXP, TCG_REG_XZR, ll, lh, base); + tcg_out_insn(s, 3306, STXP, TCG_REG_TMP0, sl, sh, base); + tcg_out_insn(s, 3201, CBNZ, 0, TCG_REG_TMP0, -2); + + if (use_pair) { + /* "b .+8", branching across the one insn of use_pair. */ + tcg_out_insn(s, 3206, B, 2); + reloc_pc19(branch, tcg_splitwx_to_rx(s->code_ptr)); + } + } + + if (use_pair) { + if (is_ld) { + tcg_out_insn(s, 3314, LDP, datalo, datahi, base, 0, 1, 0); + } else { + tcg_out_insn(s, 3314, STP, datalo, datahi, base, 0, 1, 0); + } + } + + if (ldst) { + ldst->type = TCG_TYPE_I128; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } -#endif /* CONFIG_SOFTMMU */ } static const tcg_insn_unit *tb_ret_addr; @@ -1895,7 +1966,7 @@ static void tcg_out_goto_tb(TCGContext *s, int which) set_jmp_insn_offset(s, which); tcg_out32(s, I3206_B); - tcg_out_insn(s, 3207, BR, TCG_REG_TMP); + tcg_out_insn(s, 3207, BR, TCG_REG_TMP0); set_jmp_reset_offset(s, which); } @@ -1914,7 +1985,7 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, ptrdiff_t i_offset = i_addr - jmp_rx; /* Note that we asserted this in range in tcg_out_goto_tb. */ - insn = deposit32(I3305_LDR | TCG_REG_TMP, 5, 19, i_offset >> 2); + insn = deposit32(I3305_LDR | TCG_REG_TMP0, 5, 19, i_offset >> 2); } qatomic_set((uint32_t *)jmp_rw, insn); flush_idcache_range(jmp_rx, jmp_rw, 4); @@ -2108,13 +2179,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_rem_i64: case INDEX_op_rem_i32: - tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2); - tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); + tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP0, a1, a2); + tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP0, a2, a1); break; case INDEX_op_remu_i64: case INDEX_op_remu_i32: - tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2); - tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); + tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP0, a1, a2); + tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP0, a2, a1); break; case INDEX_op_shl_i64: @@ -2158,8 +2229,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, if (c2) { tcg_out_rotl(s, ext, a0, a1, a2); } else { - tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2); - tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP); + tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP0, TCG_REG_XZR, a2); + tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP0); } break; @@ -2197,13 +2268,25 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]); break; - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: tcg_out_qemu_ld(s, a0, a1, a2, ext); break; - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, REG0(0), a1, a2); + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + tcg_out_qemu_st(s, REG0(0), a1, a2, ext); + break; + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true); + break; + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: + tcg_out_qemu_ldst_i128(s, REG0(0), REG0(1), a2, args[3], false); break; case INDEX_op_bswap64_i64: @@ -2212,7 +2295,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_bswap32_i64: tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1); if (a2 & TCG_BSWAP_OS) { - tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a0); + tcg_out_ext32s(s, a0, a0); } break; case INDEX_op_bswap32_i32: @@ -2223,38 +2306,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1); if (a2 & TCG_BSWAP_OS) { /* Output must be sign-extended. */ - tcg_out_sxt(s, ext, MO_16, a0, a0); + tcg_out_ext16s(s, ext, a0, a0); } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { /* Output must be zero-extended, but input isn't. */ - tcg_out_uxt(s, MO_16, a0, a0); + tcg_out_ext16u(s, a0, a0); } break; - case INDEX_op_ext8s_i64: - case INDEX_op_ext8s_i32: - tcg_out_sxt(s, ext, MO_8, a0, a1); - break; - case INDEX_op_ext16s_i64: - case INDEX_op_ext16s_i32: - tcg_out_sxt(s, ext, MO_16, a0, a1); - break; - case INDEX_op_ext_i32_i64: - case INDEX_op_ext32s_i64: - tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1); - break; - case INDEX_op_ext8u_i64: - case INDEX_op_ext8u_i32: - tcg_out_uxt(s, MO_8, a0, a1); - break; - case INDEX_op_ext16u_i64: - case INDEX_op_ext16u_i32: - tcg_out_uxt(s, MO_16, a0, a1); - break; - case INDEX_op_extu_i32_i64: - case INDEX_op_ext32u_i64: - tcg_out_movr(s, TCG_TYPE_I32, a0, a1); - break; - case INDEX_op_deposit_i64: case INDEX_op_deposit_i32: tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]); @@ -2310,6 +2368,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ + case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: default: g_assert_not_reached(); } @@ -2573,8 +2644,8 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, break; } } - tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); - a2 = TCG_VEC_TMP; + tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP0, 0); + a2 = TCG_VEC_TMP0; } if (is_scalar) { insn = cmp_scalar_insn[cond]; @@ -2851,12 +2922,22 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_movcond_i64: return C_O1_I4(r, r, rA, rZ, rZ); - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: - return C_O1_I1(r, l); - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: - return C_O0_I2(lZ, l); + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + return C_O1_I1(r, r); + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + return C_O2_I1(r, r, r); + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + return C_O0_I2(rZ, r); + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: + return C_O0_I3(rZ, rZ, r); case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: @@ -2952,9 +3033,11 @@ static void tcg_target_init(TCGContext *s) s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */ - tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); + tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0); } /* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */ @@ -2999,10 +3082,14 @@ static void tcg_target_qemu_prologue(TCGContext *s) CPU_TEMP_BUF_NLONGS * sizeof(long)); #if !defined(CONFIG_SOFTMMU) - if (USE_GUEST_BASE) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); - } + /* + * Note that XZR cannot be encoded in the address base register slot, + * as that actaully encodes SP. Depending on the guest, we may need + * to zero-extend the guest address via the address index register slot, + * therefore we need to load even a zero guest base into a register. + */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); #endif tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index c0b0f614ba..ce64de06e5 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -13,8 +13,9 @@ #ifndef AARCH64_TCG_TARGET_H #define AARCH64_TCG_TARGET_H +#include "host/cpuinfo.h" + #define TCG_TARGET_INSN_UNIT_SIZE 4 -#define TCG_TARGET_TLB_DISPLACEMENT_BITS 24 #define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) typedef enum { @@ -57,6 +58,9 @@ typedef enum { #define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN #define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL +#define have_lse (cpuinfo & CPUINFO_LSE) +#define have_lse2 (cpuinfo & CPUINFO_LSE2) + /* optional instructions */ #define TCG_TARGET_HAS_div_i32 1 #define TCG_TARGET_HAS_rem_i32 1 @@ -126,6 +130,17 @@ typedef enum { #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 +/* + * Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load, + * which requires writable pages. We must defer to the helper for user-only, + * but in system mode all ram is writable for the host. + */ +#ifdef CONFIG_USER_ONLY +#define TCG_TARGET_HAS_qemu_ldst_i128 have_lse2 +#else +#define TCG_TARGET_HAS_qemu_ldst_i128 1 +#endif + #define TCG_TARGET_HAS_v64 1 #define TCG_TARGET_HAS_v128 1 #define TCG_TARGET_HAS_v256 0 @@ -151,7 +166,6 @@ typedef enum { #define TCG_TARGET_HAS_cmpsel_vec 0 #define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 0 #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h index b8849b2478..229ae258ac 100644 --- a/tcg/arm/tcg-target-con-set.h +++ b/tcg/arm/tcg-target-con-set.h @@ -12,19 +12,19 @@ C_O0_I1(r) C_O0_I2(r, r) C_O0_I2(r, rIN) -C_O0_I2(s, s) +C_O0_I2(q, q) C_O0_I2(w, r) -C_O0_I3(s, s, s) -C_O0_I3(S, p, s) +C_O0_I3(q, q, q) +C_O0_I3(Q, p, q) C_O0_I4(r, r, rI, rI) -C_O0_I4(S, p, s, s) -C_O1_I1(r, l) +C_O0_I4(Q, p, q, q) +C_O1_I1(r, q) C_O1_I1(r, r) C_O1_I1(w, r) C_O1_I1(w, w) C_O1_I1(w, wr) C_O1_I2(r, 0, rZ) -C_O1_I2(r, l, l) +C_O1_I2(r, q, q) C_O1_I2(r, r, r) C_O1_I2(r, r, rI) C_O1_I2(r, r, rIK) @@ -39,8 +39,8 @@ C_O1_I2(w, w, wZ) C_O1_I3(w, w, w, w) C_O1_I4(r, r, r, rI, rI) C_O1_I4(r, r, rIN, rIK, 0) -C_O2_I1(e, p, l) -C_O2_I2(e, p, l, l) +C_O2_I1(e, p, q) +C_O2_I2(e, p, q, q) C_O2_I2(r, r, r, r) C_O2_I4(r, r, r, r, rIN, rIK) C_O2_I4(r, r, rI, rI, rIN, rIK) diff --git a/tcg/arm/tcg-target-con-str.h b/tcg/arm/tcg-target-con-str.h index 24b4b59feb..f83f1d3919 100644 --- a/tcg/arm/tcg-target-con-str.h +++ b/tcg/arm/tcg-target-con-str.h @@ -10,9 +10,8 @@ */ REGS('e', ALL_GENERAL_REGS & 0x5555) /* even regs */ REGS('r', ALL_GENERAL_REGS) -REGS('l', ALL_QLOAD_REGS) -REGS('s', ALL_QSTORE_REGS) -REGS('S', ALL_QSTORE_REGS & 0x5555) /* even qstore */ +REGS('q', ALL_QLDST_REGS) +REGS('Q', ALL_QLDST_REGS & 0x5555) /* even qldst */ REGS('w', ALL_VECTOR_REGS) /* diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index d06ac60c15..20cc1cc477 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -353,23 +353,16 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, #define ALL_VECTOR_REGS 0xffff0000u /* - * r0-r2 will be overwritten when reading the tlb entry (softmmu only) - * and r0-r1 doing the byte swapping, so don't use these. - * r3 is removed for softmmu to avoid clashes with helper arguments. + * r0-r3 will be overwritten when reading the tlb entry (softmmu only); + * r14 will be overwritten by the BLNE branching to the slow path. */ #ifdef CONFIG_SOFTMMU -#define ALL_QLOAD_REGS \ +#define ALL_QLDST_REGS \ (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \ (1 << TCG_REG_R14))) -#define ALL_QSTORE_REGS \ - (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ - (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \ - ((TARGET_LONG_BITS == 64) << TCG_REG_R3))) #else -#define ALL_QLOAD_REGS ALL_GENERAL_REGS -#define ALL_QSTORE_REGS \ - (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1))) +#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R14)) #endif /* @@ -690,8 +683,8 @@ tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm) tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); } -static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, - TCGReg rn, int imm8) +static void __attribute__((unused)) +tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); } @@ -958,28 +951,52 @@ static void tcg_out_udiv(TCGContext *s, ARMCond cond, tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); } -static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) +static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) { /* sxtb */ - tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn); + tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn); } -static void __attribute__((unused)) -tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) +static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn) { - tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff); + tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff); } -static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) +static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) { /* sxth */ - tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn); + tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn); } -static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) +static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn) { /* uxth */ - tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn); + tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn); +} + +static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn) +{ + g_assert_not_reached(); +} + +static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn) +{ + g_assert_not_reached(); +} + +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) +{ + g_assert_not_reached(); +} + +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) +{ + g_assert_not_reached(); +} + +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) +{ + g_assert_not_reached(); } static void tcg_out_bswap16(TCGContext *s, ARMCond cond, @@ -1301,143 +1318,131 @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn, tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); } +typedef struct { + ARMCond cond; + TCGReg base; + int index; + bool index_scratch; + TCGAtomAlign aa; +} HostAddress; + +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return false; +} + +static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) +{ + /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */ + return TCG_REG_R14; +} + +static const TCGLdstHelperParam ldst_helper_param = { + .ra_gen = ldst_ra_gen, + .ntmp = 1, + .tmp = { TCG_REG_TMP }, +}; + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + MemOp opc = get_memop(lb->oi); + + if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_ld_helper_args(s, lb, &ldst_helper_param); + tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); + tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); + + tcg_out_goto(s, COND_AL, lb->raddr); + return true; +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + MemOp opc = get_memop(lb->oi); + + if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_st_helper_args(s, lb, &ldst_helper_param); + + /* Tail-call to the helper, which will return to the fast path. */ + tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); + return true; +} + +static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, bool is_ld) +{ + TCGLabelQemuLdst *ldst = NULL; + MemOp opc = get_memop(oi); + unsigned a_mask; + #ifdef CONFIG_SOFTMMU -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * int mmu_idx, uintptr_t ra) - */ -static void * const qemu_ld_helpers[MO_SSIZE + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, -#if HOST_BIG_ENDIAN - [MO_UW] = helper_be_lduw_mmu, - [MO_UL] = helper_be_ldul_mmu, - [MO_UQ] = helper_be_ldq_mmu, - [MO_SW] = helper_be_ldsw_mmu, - [MO_SL] = helper_be_ldul_mmu, + *h = (HostAddress){ + .cond = COND_AL, + .base = addrlo, + .index = TCG_REG_R1, + .index_scratch = true, + }; #else - [MO_UW] = helper_le_lduw_mmu, - [MO_UL] = helper_le_ldul_mmu, - [MO_UQ] = helper_le_ldq_mmu, - [MO_SW] = helper_le_ldsw_mmu, - [MO_SL] = helper_le_ldul_mmu, + *h = (HostAddress){ + .cond = COND_AL, + .base = addrlo, + .index = guest_base ? TCG_REG_GUEST_BASE : -1, + .index_scratch = false, + }; #endif -}; -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, int mmu_idx, uintptr_t ra) - */ -static void * const qemu_st_helpers[MO_SIZE + 1] = { - [MO_8] = helper_ret_stb_mmu, -#if HOST_BIG_ENDIAN - [MO_16] = helper_be_stw_mmu, - [MO_32] = helper_be_stl_mmu, - [MO_64] = helper_be_stq_mmu, -#else - [MO_16] = helper_le_stw_mmu, - [MO_32] = helper_le_stl_mmu, - [MO_64] = helper_le_stq_mmu, -#endif -}; + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_mask = (1 << h->aa.align) - 1; -/* Helper routines for marshalling helper function arguments into - * the correct registers and stack. - * argreg is where we want to put this argument, arg is the argument itself. - * Return value is the updated argreg ready for the next call. - * Note that argreg 0..3 is real registers, 4+ on stack. - * - * We provide routines for arguments which are: immediate, 32 bit - * value in register, 16 and 8 bit values in register (which must be zero - * extended before use) and 64 bit value in a lo:hi register pair. - */ -#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \ -static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \ -{ \ - if (argreg < 4) { \ - MOV_ARG(s, COND_AL, argreg, arg); \ - } else { \ - int ofs = (argreg - 4) * 4; \ - EXT_ARG; \ - tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \ - tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \ - } \ - return argreg + 1; \ -} - -DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32, - (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) -DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u, - (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) -DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u, - (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP)) -DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, ) - -static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg, - TCGReg arglo, TCGReg arghi) -{ - /* 64 bit arguments must go in even/odd register pairs - * and in 8-aligned stack slots. - */ - if (argreg & 1) { - argreg++; - } - if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) { - tcg_out_strd_8(s, COND_AL, arglo, - TCG_REG_CALL_STACK, (argreg - 4) * 4); - return argreg + 2; - } else { - argreg = tcg_out_arg_reg32(s, argreg, arglo); - argreg = tcg_out_arg_reg32(s, argreg, arghi); - return argreg; - } -} - -#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) - -/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256); - -/* These offsets are built into the LDRD below. */ -QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); -QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); - -/* Load and compare a TLB entry, leaving the flags set. Returns the register - containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ - -static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - MemOp opc, int mem_index, bool is_load) -{ - int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) - : offsetof(CPUTLBEntry, addr_write)); +#ifdef CONFIG_SOFTMMU + int mem_index = get_mmuidx(oi); + int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write); int fast_off = TLB_MASK_TABLE_OFS(mem_index); unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; - unsigned a_mask = (1 << get_alignment_bits(opc)) - 1; TCGReg t_addr; + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addrlo; + ldst->addrhi_reg = addrhi; + /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256); + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); /* Extract the tlb index from the address into R0. */ tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, - SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)); + SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); /* * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. * Load the tlb comparator into R2/R3 and the fast path addend into R1. */ if (cmp_off == 0) { - if (TARGET_LONG_BITS == 64) { - tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); - } else { + if (s->addr_type == TCG_TYPE_I32) { tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); + } else { + tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); } } else { tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); - if (TARGET_LONG_BITS == 64) { - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); - } else { + if (s->addr_type == TCG_TYPE_I32) { tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); + } else { + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); } } @@ -1463,8 +1468,8 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, addrlo, s_mask - a_mask); } - if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask)); + if (use_armv7_instructions && s->page_bits <= 16) { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, t_addr, TCG_REG_TMP, 0); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); @@ -1474,228 +1479,76 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); } tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, - SHIFT_IMM_LSR(TARGET_PAGE_BITS)); + SHIFT_IMM_LSR(s->page_bits)); tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, - SHIFT_IMM_LSL(TARGET_PAGE_BITS)); + SHIFT_IMM_LSL(s->page_bits)); } - if (TARGET_LONG_BITS == 64) { + if (s->addr_type != TCG_TYPE_I32) { tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); } - - return TCG_REG_R1; -} - -/* Record the context of a call to the out of line helper code for the slow - path for a load or store, so that we can later generate the correct - helper code. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, - TCGReg datalo, TCGReg datahi, TCGReg addrlo, - TCGReg addrhi, tcg_insn_unit *raddr, - tcg_insn_unit *label_ptr) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->oi = oi; - label->datalo_reg = datalo; - label->datahi_reg = datahi; - label->addrlo_reg = addrlo; - label->addrhi_reg = addrhi; - label->raddr = tcg_splitwx_to_rx(raddr); - label->label_ptr[0] = label_ptr; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) -{ - TCGReg argreg, datalo, datahi; - MemOpIdx oi = lb->oi; - MemOp opc = get_memop(oi); - - if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0); - if (TARGET_LONG_BITS == 64) { - argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); - } else { - argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); - } - argreg = tcg_out_arg_imm32(s, argreg, oi); - argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); - - /* Use the canonical unsigned helpers and minimize icache usage. */ - tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); - - datalo = lb->datalo_reg; - datahi = lb->datahi_reg; - switch (opc & MO_SSIZE) { - case MO_SB: - tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0); - break; - case MO_SW: - tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0); - break; - default: - tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); - break; - case MO_UQ: - if (datalo != TCG_REG_R1) { - tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); - tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); - } else if (datahi != TCG_REG_R0) { - tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); - tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); - } else { - tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0); - tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); - tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP); - } - break; - } - - tcg_out_goto(s, COND_AL, lb->raddr); - return true; -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) -{ - TCGReg argreg, datalo, datahi; - MemOpIdx oi = lb->oi; - MemOp opc = get_memop(oi); - - if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - argreg = TCG_REG_R0; - argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); - if (TARGET_LONG_BITS == 64) { - argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg); - } else { - argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); - } - - datalo = lb->datalo_reg; - datahi = lb->datahi_reg; - switch (opc & MO_SIZE) { - case MO_8: - argreg = tcg_out_arg_reg8(s, argreg, datalo); - break; - case MO_16: - argreg = tcg_out_arg_reg16(s, argreg, datalo); - break; - case MO_32: - default: - argreg = tcg_out_arg_reg32(s, argreg, datalo); - break; - case MO_64: - argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi); - break; - } - - argreg = tcg_out_arg_imm32(s, argreg, oi); - argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); - - /* Tail-call to the helper, which will return to the fast path. */ - tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); - return true; -} #else + if (a_mask) { + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addrlo; + ldst->addrhi_reg = addrhi; -static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, - TCGReg addrhi, unsigned a_bits) -{ - unsigned a_mask = (1 << a_bits) - 1; - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->addrlo_reg = addrlo; - label->addrhi_reg = addrhi; - - /* We are expecting a_bits to max out at 7, and can easily support 8. */ - tcg_debug_assert(a_mask <= 0xff); - /* tst addr, #mask */ - tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); - - /* blne slow_path */ - label->label_ptr[0] = s->code_ptr; - tcg_out_bl_imm(s, COND_NE, 0); - - label->raddr = tcg_splitwx_to_rx(s->code_ptr); -} - -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; + /* We are expecting alignment to max out at 7 */ + tcg_debug_assert(a_mask <= 0xff); + /* tst addr, #mask */ + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); } +#endif - if (TARGET_LONG_BITS == 64) { - /* 64-bit target address is aligned into R2:R3. */ - if (l->addrhi_reg != TCG_REG_R2) { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); - } else if (l->addrlo_reg != TCG_REG_R3) { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); - } else { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1); - } - } else { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg); - } - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0); - - /* - * Tail call to the helper, with the return address back inline, - * just for the clarity of the debugging traceback -- the helper - * cannot return. We have used BLNE to arrive here, so LR is - * already set. - */ - tcg_out_goto(s, COND_AL, (const void *) - (l->is_ld ? helper_unaligned_ld : helper_unaligned_st)); - return true; + return ldst; } -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, + TCGReg datahi, HostAddress h) { - return tcg_out_fail_alignment(s, l); -} + TCGReg base; -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} -#endif /* SOFTMMU */ - -static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addend, - bool scratch_addend) -{ /* Byte swapping is left to middle-end expansion. */ tcg_debug_assert((opc & MO_BSWAP) == 0); switch (opc & MO_SSIZE) { case MO_UB: - tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend); + if (h.index < 0) { + tcg_out_ld8_12(s, h.cond, datalo, h.base, 0); + } else { + tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index); + } break; case MO_SB: - tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend); + if (h.index < 0) { + tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0); + } else { + tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index); + } break; case MO_UW: - tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); + if (h.index < 0) { + tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0); + } else { + tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index); + } break; case MO_SW: - tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend); + if (h.index < 0) { + tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0); + } else { + tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index); + } break; case MO_UL: - tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); + if (h.index < 0) { + tcg_out_ld32_12(s, h.cond, datalo, h.base, 0); + } else { + tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index); + } break; case MO_UQ: /* We used pair allocation for datalo, so already should be aligned. */ @@ -1703,182 +1556,103 @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, tcg_debug_assert(datahi == datalo + 1); /* LDRD requires alignment; double-check that. */ if (get_alignment_bits(opc) >= MO_64) { + if (h.index < 0) { + tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0); + break; + } /* * Rm (the second address op) must not overlap Rt or Rt + 1. * Since datalo is aligned, we can simplify the test via alignment. * Flip the two address arguments if that works. */ - if ((addend & ~1) != datalo) { - tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); + if ((h.index & ~1) != datalo) { + tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index); break; } - if ((addrlo & ~1) != datalo) { - tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo); + if ((h.base & ~1) != datalo) { + tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base); break; } } - if (scratch_addend) { - tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo); - tcg_out_ld32_12(s, COND_AL, datahi, addend, 4); + if (h.index < 0) { + base = h.base; + if (datalo == h.base) { + tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base); + base = TCG_REG_TMP; + } + } else if (h.index_scratch) { + tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base); + tcg_out_ld32_12(s, h.cond, datahi, h.index, 4); + break; } else { - tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP, - addend, addrlo, SHIFT_IMM_LSL(0)); - tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0); - tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4); + tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, + h.base, h.index, SHIFT_IMM_LSL(0)); + base = TCG_REG_TMP; } + tcg_out_ld32_12(s, h.cond, datalo, base, 0); + tcg_out_ld32_12(s, h.cond, datahi, base, 4); break; default: g_assert_not_reached(); } } -#ifndef CONFIG_SOFTMMU -static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, - TCGReg datahi, TCGReg addrlo) +static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, TCGType data_type) { - /* Byte swapping is left to middle-end expansion. */ - tcg_debug_assert((opc & MO_BSWAP) == 0); + MemOp opc = get_memop(oi); + TCGLabelQemuLdst *ldst; + HostAddress h; - switch (opc & MO_SSIZE) { - case MO_UB: - tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0); - break; - case MO_SB: - tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0); - break; - case MO_UW: - tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); - break; - case MO_SW: - tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0); - break; - case MO_UL: - tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); - break; - case MO_UQ: - /* We used pair allocation for datalo, so already should be aligned. */ - tcg_debug_assert((datalo & 1) == 0); - tcg_debug_assert(datahi == datalo + 1); - /* LDRD requires alignment; double-check that. */ - if (get_alignment_bits(opc) >= MO_64) { - tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0); - } else if (datalo == addrlo) { - tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4); - tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); - } else { - tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); - tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4); - } - break; - default: - g_assert_not_reached(); - } -} -#endif + ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) -{ - TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); - MemOpIdx oi; - MemOp opc; -#ifdef CONFIG_SOFTMMU - int mem_index; - TCGReg addend; - tcg_insn_unit *label_ptr; -#else - unsigned a_bits; -#endif + /* + * This a conditional BL only to load a pointer within this + * opcode into LR for the slow path. We will not be using + * the value for a tail call. + */ + ldst->label_ptr[0] = s->code_ptr; + tcg_out_bl_imm(s, COND_NE, 0); - datalo = *args++; - datahi = (is64 ? *args++ : 0); - addrlo = *args++; - addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); - oi = *args++; - opc = get_memop(oi); - -#ifdef CONFIG_SOFTMMU - mem_index = get_mmuidx(oi); - addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1); - - /* This a conditional BL only to load a pointer within this opcode into LR - for the slow path. We will not be using the value for a tail call. */ - label_ptr = s->code_ptr; - tcg_out_bl_imm(s, COND_NE, 0); - - tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true); - - add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, - s->code_ptr, label_ptr); -#else /* !CONFIG_SOFTMMU */ - a_bits = get_alignment_bits(opc); - if (a_bits) { - tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); - } - if (guest_base) { - tcg_out_qemu_ld_index(s, opc, datalo, datahi, - addrlo, TCG_REG_GUEST_BASE, false); + tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } else { - tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); - } -#endif -} - -static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addend, - bool scratch_addend) -{ - /* Byte swapping is left to middle-end expansion. */ - tcg_debug_assert((opc & MO_BSWAP) == 0); - - switch (opc & MO_SIZE) { - case MO_8: - tcg_out_st8_r(s, cond, datalo, addrlo, addend); - break; - case MO_16: - tcg_out_st16_r(s, cond, datalo, addrlo, addend); - break; - case MO_32: - tcg_out_st32_r(s, cond, datalo, addrlo, addend); - break; - case MO_64: - /* We used pair allocation for datalo, so already should be aligned. */ - tcg_debug_assert((datalo & 1) == 0); - tcg_debug_assert(datahi == datalo + 1); - /* STRD requires alignment; double-check that. */ - if (get_alignment_bits(opc) >= MO_64) { - tcg_out_strd_r(s, cond, datalo, addrlo, addend); - } else if (scratch_addend) { - tcg_out_st32_rwb(s, cond, datalo, addend, addrlo); - tcg_out_st32_12(s, cond, datahi, addend, 4); - } else { - tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP, - addend, addrlo, SHIFT_IMM_LSL(0)); - tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0); - tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4); - } - break; - default: - g_assert_not_reached(); + tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); } } -#ifndef CONFIG_SOFTMMU static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, - TCGReg datahi, TCGReg addrlo) + TCGReg datahi, HostAddress h) { /* Byte swapping is left to middle-end expansion. */ tcg_debug_assert((opc & MO_BSWAP) == 0); switch (opc & MO_SIZE) { case MO_8: - tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0); + if (h.index < 0) { + tcg_out_st8_12(s, h.cond, datalo, h.base, 0); + } else { + tcg_out_st8_r(s, h.cond, datalo, h.base, h.index); + } break; case MO_16: - tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0); + if (h.index < 0) { + tcg_out_st16_8(s, h.cond, datalo, h.base, 0); + } else { + tcg_out_st16_r(s, h.cond, datalo, h.base, h.index); + } break; case MO_32: - tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); + if (h.index < 0) { + tcg_out_st32_12(s, h.cond, datalo, h.base, 0); + } else { + tcg_out_st32_r(s, h.cond, datalo, h.base, h.index); + } break; case MO_64: /* We used pair allocation for datalo, so already should be aligned. */ @@ -1886,63 +1660,50 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, tcg_debug_assert(datahi == datalo + 1); /* STRD requires alignment; double-check that. */ if (get_alignment_bits(opc) >= MO_64) { - tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); + if (h.index < 0) { + tcg_out_strd_8(s, h.cond, datalo, h.base, 0); + } else { + tcg_out_strd_r(s, h.cond, datalo, h.base, h.index); + } + } else if (h.index_scratch) { + tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base); + tcg_out_st32_12(s, h.cond, datahi, h.index, 4); } else { - tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); - tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4); + tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP, + h.base, h.index, SHIFT_IMM_LSL(0)); + tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0); + tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4); } break; default: g_assert_not_reached(); } } -#endif -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) +static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, TCGType data_type) { - TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); - MemOpIdx oi; - MemOp opc; -#ifdef CONFIG_SOFTMMU - int mem_index; - TCGReg addend; - tcg_insn_unit *label_ptr; -#else - unsigned a_bits; -#endif + MemOp opc = get_memop(oi); + TCGLabelQemuLdst *ldst; + HostAddress h; - datalo = *args++; - datahi = (is64 ? *args++ : 0); - addrlo = *args++; - addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); - oi = *args++; - opc = get_memop(oi); + ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; -#ifdef CONFIG_SOFTMMU - mem_index = get_mmuidx(oi); - addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); + h.cond = COND_EQ; + tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); - tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, - addrlo, addend, true); - - /* The conditional call must come last, as we're going to return here. */ - label_ptr = s->code_ptr; - tcg_out_bl_imm(s, COND_NE, 0); - - add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, - s->code_ptr, label_ptr); -#else /* !CONFIG_SOFTMMU */ - a_bits = get_alignment_bits(opc); - if (a_bits) { - tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); - } - if (guest_base) { - tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, - addrlo, TCG_REG_GUEST_BASE, false); + /* The conditional call is last, as we're going to return here. */ + ldst->label_ptr[0] = s->code_ptr; + tcg_out_bl_imm(s, COND_NE, 0); + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } else { - tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); + tcg_out_qemu_st_direct(s, opc, datalo, datahi, h); } -#endif } static void tcg_out_epilogue(TCGContext *s); @@ -2224,17 +1985,36 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, ARITH_MOV, args[0], 0, 0); break; - case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, args, 0); + case INDEX_op_qemu_ld_a32_i32: + tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args, 1); + case INDEX_op_qemu_ld_a64_i32: + tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], + args[3], TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i32: - tcg_out_qemu_st(s, args, 0); + case INDEX_op_qemu_ld_a32_i64: + tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, + args[3], TCG_TYPE_I64); break; - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args, 1); + case INDEX_op_qemu_ld_a64_i64: + tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], + args[4], TCG_TYPE_I64); + break; + + case INDEX_op_qemu_st_a32_i32: + tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); + break; + case INDEX_op_qemu_st_a64_i32: + tcg_out_qemu_st(s, args[0], -1, args[1], args[2], + args[3], TCG_TYPE_I32); + break; + case INDEX_op_qemu_st_a32_i64: + tcg_out_qemu_st(s, args[0], args[1], args[2], -1, + args[3], TCG_TYPE_I64); + break; + case INDEX_op_qemu_st_a64_i64: + tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], + args[4], TCG_TYPE_I64); break; case INDEX_op_bswap16_i32: @@ -2244,16 +2024,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_bswap32(s, COND_AL, args[0], args[1]); break; - case INDEX_op_ext8s_i32: - tcg_out_ext8s(s, COND_AL, args[0], args[1]); - break; - case INDEX_op_ext16s_i32: - tcg_out_ext16s(s, COND_AL, args[0], args[1]); - break; - case INDEX_op_ext16u_i32: - tcg_out_ext16u(s, COND_AL, args[0], args[1]); - break; - case INDEX_op_deposit_i32: tcg_out_deposit(s, COND_AL, args[0], args[2], args[3], args[4], const_args[2]); @@ -2301,8 +2071,12 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ + case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ + case INDEX_op_ext8u_i32: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16u_i32: default: - tcg_abort(); + g_assert_not_reached(); } } @@ -2381,14 +2155,22 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_setcond2_i32: return C_O1_I4(r, r, r, rI, rI); - case INDEX_op_qemu_ld_i32: - return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l); - case INDEX_op_qemu_ld_i64: - return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, l) : C_O2_I2(e, p, l, l); - case INDEX_op_qemu_st_i32: - return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s); - case INDEX_op_qemu_st_i64: - return TARGET_LONG_BITS == 32 ? C_O0_I3(S, p, s) : C_O0_I4(S, p, s, s); + case INDEX_op_qemu_ld_a32_i32: + return C_O1_I1(r, q); + case INDEX_op_qemu_ld_a64_i32: + return C_O1_I2(r, q, q); + case INDEX_op_qemu_ld_a32_i64: + return C_O2_I1(e, p, q); + case INDEX_op_qemu_ld_a64_i64: + return C_O2_I2(e, p, q, q); + case INDEX_op_qemu_st_a32_i32: + return C_O0_I2(q, q); + case INDEX_op_qemu_st_a64_i32: + return C_O0_I3(q, q, q); + case INDEX_op_qemu_st_a32_i64: + return C_O0_I3(Q, p, q); + case INDEX_op_qemu_st_a64_i64: + return C_O0_I4(Q, p, q, q); case INDEX_op_st_vec: return C_O0_I2(w, r); @@ -2585,6 +2367,11 @@ static void tcg_out_movi(TCGContext *s, TCGType type, tcg_out_movi32(s, COND_AL, ret, arg); } +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) +{ + return false; +} + static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, tcg_target_long imm) { diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h index def2a189e6..c649db72a6 100644 --- a/tcg/arm/tcg-target.h +++ b/tcg/arm/tcg-target.h @@ -31,7 +31,6 @@ extern int arm_arch; #define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7) #define TCG_TARGET_INSN_UNIT_SIZE 4 -#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 #define MAX_CODE_GEN_BUFFER_SIZE UINT32_MAX typedef enum { @@ -125,6 +124,8 @@ extern bool use_neon_instructions; #define TCG_TARGET_HAS_rem_i32 0 #define TCG_TARGET_HAS_qemu_st8_i32 0 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + #define TCG_TARGET_HAS_v64 use_neon_instructions #define TCG_TARGET_HAS_v128 use_neon_instructions #define TCG_TARGET_HAS_v256 0 @@ -150,7 +151,6 @@ extern bool use_neon_instructions; #define TCG_TARGET_HAS_cmpsel_vec 0 #define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 0 #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 883ced8168..ae54e5fbf3 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -91,6 +91,8 @@ static const int tcg_target_reg_alloc_order[] = { #endif }; +#define TCG_TMP_VEC TCG_REG_XMM5 + static const int tcg_target_call_iarg_regs[] = { #if TCG_TARGET_REG_BITS == 64 #if defined(_WIN64) @@ -158,41 +160,14 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) # define SOFTMMU_RESERVE_REGS 0 #endif -/* The host compiler should supply to enable runtime features - detection, as we're not going to go so far as our own inline assembly. - If not available, default values will be assumed. */ -#if defined(CONFIG_CPUID_H) -#include "qemu/cpuid.h" -#endif - /* For 64-bit, we always know that CMOV is available. */ #if TCG_TARGET_REG_BITS == 64 -# define have_cmov 1 -#elif defined(CONFIG_CPUID_H) -static bool have_cmov; +# define have_cmov true #else -# define have_cmov 0 -#endif - -/* We need these symbols in tcg-target.h, and we can't properly conditionalize - it there. Therefore we always define the variable. */ -bool have_bmi1; -bool have_popcnt; -bool have_avx1; -bool have_avx2; -bool have_avx512bw; -bool have_avx512dq; -bool have_avx512vbmi2; -bool have_avx512vl; -bool have_movbe; - -#ifdef CONFIG_CPUID_H -static bool have_bmi2; -static bool have_lzcnt; -#else -# define have_bmi2 0 -# define have_lzcnt 0 +# define have_cmov (cpuinfo & CPUINFO_CMOV) #endif +#define have_bmi2 (cpuinfo & CPUINFO_BMI2) +#define have_lzcnt (cpuinfo & CPUINFO_LZCNT) static const tcg_insn_unit *tb_ret_addr; @@ -218,7 +193,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, tcg_patch8(code_ptr, value); break; default: - tcg_abort(); + g_assert_not_reached(); } return true; } @@ -346,6 +321,8 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define OPC_PCMPGTW (0x65 | P_EXT | P_DATA16) #define OPC_PCMPGTD (0x66 | P_EXT | P_DATA16) #define OPC_PCMPGTQ (0x37 | P_EXT38 | P_DATA16) +#define OPC_PEXTRD (0x16 | P_EXT3A | P_DATA16) +#define OPC_PINSRD (0x22 | P_EXT3A | P_DATA16) #define OPC_PMAXSB (0x3c | P_EXT38 | P_DATA16) #define OPC_PMAXSW (0xee | P_EXT | P_DATA16) #define OPC_PMAXSD (0x3d | P_EXT38 | P_DATA16) @@ -460,6 +437,7 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define OPC_VPTERNLOGQ (0x25 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX) #define OPC_VZEROUPPER (0x77 | P_EXT) #define OPC_XCHG_ax_r32 (0x90) +#define OPC_XCHG_EvGv (0x87) #define OPC_GRP3_Eb (0xf6) #define OPC_GRP3_Ev (0xf7) @@ -1078,12 +1056,19 @@ static void tcg_out_movi(TCGContext *s, TCGType type, } } +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_XCHG_EvGv + rexw, r1, r2); + return true; +} + static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, tcg_target_long imm) { /* This function is only used for passing structs by reference. */ - tcg_debug_assert(TCG_TARGET_REG_BITS == 32); - tcg_out_modrm_offset(s, OPC_LEA, rd, rs, imm); + tcg_debug_assert(imm == (int32_t)imm); + tcg_out_modrm_offset(s, OPC_LEA | P_REXW, rd, rs, imm); } static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) @@ -1095,7 +1080,7 @@ static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0); tcg_out32(s, val); } else { - tcg_abort(); + g_assert_not_reached(); } } @@ -1259,43 +1244,63 @@ static inline void tcg_out_rolw_8(TCGContext *s, int reg) tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8); } -static inline void tcg_out_ext8u(TCGContext *s, int dest, int src) +static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src) { /* movzbl */ tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64); tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src); } -static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw) +static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; /* movsbl */ tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64); tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src); } -static inline void tcg_out_ext16u(TCGContext *s, int dest, int src) +static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src) { /* movzwl */ tcg_out_modrm(s, OPC_MOVZWL, dest, src); } -static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw) +static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; /* movsw[lq] */ tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src); } -static inline void tcg_out_ext32u(TCGContext *s, int dest, int src) +static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src) { /* 32-bit mov zero extends. */ tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src); } -static inline void tcg_out_ext32s(TCGContext *s, int dest, int src) +static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src) { + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_modrm(s, OPC_MOVSLQ, dest, src); } +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src) +{ + tcg_out_ext32s(s, dest, src); +} + +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src) +{ + if (dest != src) { + tcg_out_ext32u(s, dest, src); + } +} + +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src) +{ + tcg_out_ext32u(s, dest, src); +} + static inline void tcg_out_bswap64(TCGContext *s, int reg) { tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0); @@ -1359,7 +1364,7 @@ static void tgen_arithi(TCGContext *s, int c, int r0, return; } - tcg_abort(); + g_assert_not_reached(); } static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) @@ -1369,8 +1374,8 @@ static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) } } -/* Use SMALL != 0 to force a short forward branch. */ -static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small) +/* Set SMALL to force a short forward branch. */ +static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small) { int32_t val, val1; @@ -1385,9 +1390,7 @@ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small) } tcg_out8(s, val1); } else { - if (small) { - tcg_abort(); - } + tcg_debug_assert(!small); if (opc == -1) { tcg_out8(s, OPC_JMP_long); tcg_out32(s, val - 5); @@ -1525,7 +1528,7 @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args, label_this, small); break; default: - tcg_abort(); + g_assert_not_reached(); } tcg_out_label(s, label_next); } @@ -1727,160 +1730,97 @@ static void tcg_out_nopn(TCGContext *s, int n) tcg_out8(s, 0x90); } -#if defined(CONFIG_SOFTMMU) -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * int mmu_idx, uintptr_t ra) - */ -static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEUQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEUQ] = helper_be_ldq_mmu, -}; - -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, int mmu_idx, uintptr_t ra) - */ -static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEUQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEUQ] = helper_be_stq_mmu, -}; - -/* Perform the TLB load and compare. - - Inputs: - ADDRLO and ADDRHI contain the low and high part of the address. - - MEM_INDEX and S_BITS are the memory context and log2 size of the load. - - WHICH is the offset into the CPUTLBEntry structure of the slot to read. - This should be offsetof addr_read or addr_write. - - Outputs: - LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses) - positions of the displacements of forward jumps to the TLB miss case. - - Second argument register is loaded with the low part of the address. - In the TLB hit case, it has been adjusted as indicated by the TLB - and so is a host address. In the TLB miss case, it continues to - hold a guest address. - - First argument register is clobbered. */ - -static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - int mem_index, MemOp opc, - tcg_insn_unit **label_ptr, int which) +/* Test register R vs immediate bits I, setting Z flag for EQ/NE. */ +static void __attribute__((unused)) +tcg_out_testi(TCGContext *s, TCGReg r, uint32_t i) { - const TCGReg r0 = TCG_REG_L0; - const TCGReg r1 = TCG_REG_L1; - TCGType ttype = TCG_TYPE_I32; - TCGType tlbtype = TCG_TYPE_I32; - int trexw = 0, hrexw = 0, tlbrexw = 0; - unsigned a_bits = get_alignment_bits(opc); - unsigned s_bits = opc & MO_SIZE; - unsigned a_mask = (1 << a_bits) - 1; - unsigned s_mask = (1 << s_bits) - 1; - target_ulong tlb_mask; - - if (TCG_TARGET_REG_BITS == 64) { - if (TARGET_LONG_BITS == 64) { - ttype = TCG_TYPE_I64; - trexw = P_REXW; - } - if (TCG_TYPE_PTR == TCG_TYPE_I64) { - hrexw = P_REXW; - if (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32) { - tlbtype = TCG_TYPE_I64; - tlbrexw = P_REXW; - } - } - } - - tcg_out_mov(s, tlbtype, r0, addrlo); - tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - - tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0, - TLB_MASK_TABLE_OFS(mem_index) + - offsetof(CPUTLBDescFast, mask)); - - tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0, - TLB_MASK_TABLE_OFS(mem_index) + - offsetof(CPUTLBDescFast, table)); - - /* If the required alignment is at least as large as the access, simply - copy the address and mask. For lesser alignments, check that we don't - cross pages for the complete access. */ - if (a_bits >= s_bits) { - tcg_out_mov(s, ttype, r1, addrlo); + /* + * This is used for testing alignment, so we can usually use testb. + * For i686, we have to use testl for %esi/%edi. + */ + if (i <= 0xff && (TCG_TARGET_REG_BITS == 64 || r < 4)) { + tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, r); + tcg_out8(s, i); } else { - tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask); + tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, r); + tcg_out32(s, i); } - tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask; - tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0); +} - /* cmp 0(r0), r1 */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, which); +typedef struct { + TCGReg base; + int index; + int ofs; + int seg; + TCGAtomAlign aa; +} HostAddress; - /* Prepare for both the fast path add of the tlb addend, and the slow - path function argument setup. */ - tcg_out_mov(s, ttype, r1, addrlo); +bool tcg_target_has_memory_bswap(MemOp memop) +{ + TCGAtomAlign aa; - /* jne slow_path */ - tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); - label_ptr[0] = s->code_ptr; - s->code_ptr += 4; - - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - /* cmp 4(r0), addrhi */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, which + 4); - - /* jne slow_path */ - tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); - label_ptr[1] = s->code_ptr; - s->code_ptr += 4; + if (!have_movbe) { + return false; + } + if ((memop & MO_SIZE) < MO_128) { + return true; } - /* TLB Hit. */ - - /* add addend(r0), r1 */ - tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, - offsetof(CPUTLBEntry, addend)); + /* + * Reject 16-byte memop with 16-byte atomicity, i.e. VMOVDQA, + * but do allow a pair of 64-bit operations, i.e. MOVBEQ. + */ + aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true); + return aa.atom < MO_128; } /* - * Record the context of a call to the out of line helper code for the slow path - * for a load or store, so that we can later generate the correct helper code + * Because i686 has no register parameters and because x86_64 has xchg + * to handle addr/data register overlap, we have placed all input arguments + * before we need might need a scratch reg. + * + * Even then, a scratch is only needed for l->raddr. Rather than expose + * a general-purpose scratch when we don't actually know it's available, + * use the ra_gen hook to load into RAX if needed. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, - MemOpIdx oi, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - tcg_insn_unit *raddr, - tcg_insn_unit **label_ptr) +#if TCG_TARGET_REG_BITS == 64 +static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) { - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->oi = oi; - label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32; - label->datalo_reg = datalo; - label->datahi_reg = datahi; - label->addrlo_reg = addrlo; - label->addrhi_reg = addrhi; - label->raddr = tcg_splitwx_to_rx(raddr); - label->label_ptr[0] = label_ptr[0]; - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - label->label_ptr[1] = label_ptr[1]; + if (arg < 0) { + arg = TCG_REG_RAX; } + tcg_out_movi(s, TCG_TYPE_PTR, arg, (uintptr_t)l->raddr); + return arg; +} +static const TCGLdstHelperParam ldst_helper_param = { + .ra_gen = ldst_ra_gen +}; +#else +static const TCGLdstHelperParam ldst_helper_param = { }; +#endif + +static void tcg_out_vec_to_pair(TCGContext *s, TCGType type, + TCGReg l, TCGReg h, TCGReg v) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + + /* vpmov{d,q} %v, %l */ + tcg_out_vex_modrm(s, OPC_MOVD_EyVy + rexw, v, 0, l); + /* vpextr{d,q} $1, %v, %h */ + tcg_out_vex_modrm(s, OPC_PEXTRD + rexw, v, 0, h); + tcg_out8(s, 1); +} + +static void tcg_out_pair_to_vec(TCGContext *s, TCGType type, + TCGReg v, TCGReg l, TCGReg h) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + + /* vmov{d,q} %l, %v */ + tcg_out_vex_modrm(s, OPC_MOVD_VyEy + rexw, v, 0, l); + /* vpinsr{d,q} $1, %h, %v, %v */ + tcg_out_vex_modrm(s, OPC_PINSRD + rexw, v, v, h); + tcg_out8(s, 1); } /* @@ -1888,82 +1828,19 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, */ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - MemOpIdx oi = l->oi; - MemOp opc = get_memop(oi); - TCGReg data_reg; + MemOp opc = get_memop(l->oi); tcg_insn_unit **label_ptr = &l->label_ptr[0]; - int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0); /* resolve label address */ tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + if (label_ptr[1]) { tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); } - if (TCG_TARGET_REG_BITS == 32) { - int ofs = 0; + tcg_out_ld_helper_args(s, l, &ldst_helper_param); + tcg_out_branch(s, 1, qemu_ld_helpers[opc & MO_SIZE]); + tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); - tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); - ofs += 4; - - if (TARGET_LONG_BITS == 64) { - tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs); - } else { - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); - /* The second argument is already loaded with addrlo. */ - tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi); - tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3], - (uintptr_t)l->raddr); - } - - tcg_out_branch(s, 1, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); - - data_reg = l->datalo_reg; - switch (opc & MO_SSIZE) { - case MO_SB: - tcg_out_ext8s(s, data_reg, TCG_REG_EAX, rexw); - break; - case MO_SW: - tcg_out_ext16s(s, data_reg, TCG_REG_EAX, rexw); - break; -#if TCG_TARGET_REG_BITS == 64 - case MO_SL: - tcg_out_ext32s(s, data_reg, TCG_REG_EAX); - break; -#endif - case MO_UB: - case MO_UW: - /* Note that the helpers have zero-extended to tcg_target_long. */ - case MO_UL: - tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); - break; - case MO_UQ: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); - } else if (data_reg == TCG_REG_EDX) { - /* xchg %edx, %eax */ - tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); - tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX); - } else { - tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); - tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX); - } - break; - default: - tcg_abort(); - } - - /* Jump to the code corresponding to next IR of qemu_st */ tcg_out_jmp(s, l->raddr); return true; } @@ -1973,157 +1850,30 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) */ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - MemOpIdx oi = l->oi; - MemOp opc = get_memop(oi); - MemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(l->oi); tcg_insn_unit **label_ptr = &l->label_ptr[0]; - TCGReg retaddr; /* resolve label address */ tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + if (label_ptr[1]) { tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); } - if (TCG_TARGET_REG_BITS == 32) { - int ofs = 0; + tcg_out_st_helper_args(s, l, &ldst_helper_param); + tcg_out_branch(s, 1, qemu_st_helpers[opc & MO_SIZE]); - tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); - ofs += 4; - - if (TARGET_LONG_BITS == 64) { - tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs); - ofs += 4; - - if (s_bits == MO_64) { - tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs); - ofs += 4; - - retaddr = TCG_REG_EAX; - tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); - tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs); - } else { - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); - /* The second argument is already loaded with addrlo. */ - tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), - tcg_target_call_iarg_regs[2], l->datalo_reg); - tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi); - - if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) { - retaddr = tcg_target_call_iarg_regs[4]; - tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); - } else { - retaddr = TCG_REG_RAX; - tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); - tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, - TCG_TARGET_CALL_STACK_OFFSET); - } - } - - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_push(s, retaddr); - tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); - return true; -} -#else - -static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, - TCGReg addrhi, unsigned a_bits) -{ - unsigned a_mask = (1 << a_bits) - 1; - TCGLabelQemuLdst *label; - - /* - * We are expecting a_bits to max out at 7, so we can usually use testb. - * For i686, we have to use testl for %esi/%edi. - */ - if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) { - tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo); - tcg_out8(s, a_mask); - } else { - tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo); - tcg_out32(s, a_mask); - } - - /* jne slow_path */ - tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); - - label = new_ldst_label(s); - label->is_ld = is_ld; - label->addrlo_reg = addrlo; - label->addrhi_reg = addrhi; - label->raddr = tcg_splitwx_to_rx(s->code_ptr + 4); - label->label_ptr[0] = s->code_ptr; - - s->code_ptr += 4; -} - -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - /* resolve label address */ - tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4); - - if (TCG_TARGET_REG_BITS == 32) { - int ofs = 0; - - tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); - ofs += 4; - if (TARGET_LONG_BITS == 64) { - tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_pushi(s, (uintptr_t)l->raddr); - } else { - tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], - l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); - - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr); - tcg_out_push(s, TCG_REG_RAX); - } - - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st)); + tcg_out_jmp(s, l->raddr); return true; } -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} +#ifndef CONFIG_SOFTMMU +static HostAddress x86_guest_base = { + .index = -1 +}; -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -#if TCG_TARGET_REG_BITS == 32 -# define x86_guest_base_seg 0 -# define x86_guest_base_index -1 -# define x86_guest_base_offset guest_base -#else -static int x86_guest_base_seg; -static int x86_guest_base_index = -1; -static int32_t x86_guest_base_offset; -# if defined(__x86_64__) && defined(__linux__) -# include -# include +#if defined(__x86_64__) && defined(__linux__) +# include +# include int arch_prctl(int code, unsigned long addr); static inline int setup_guest_base_seg(void) { @@ -2132,8 +1882,9 @@ static inline int setup_guest_base_seg(void) } return 0; } -# elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__) -# include +#elif defined(__x86_64__) && \ + (defined (__FreeBSD__) || defined (__FreeBSD_kernel__)) +# include static inline int setup_guest_base_seg(void) { if (sysarch(AMD64_SET_GSBASE, &guest_base) == 0) { @@ -2141,21 +1892,141 @@ static inline int setup_guest_base_seg(void) } return 0; } -# else +#else static inline int setup_guest_base_seg(void) { return 0; } -# endif +#endif /* setup_guest_base_seg */ +#endif /* !SOFTMMU */ + +/* + * For softmmu, perform the TLB load and compare. + * For useronly, perform any required alignment tests. + * In both cases, return a TCGLabelQemuLdst structure if the slow path + * is required and fill in @h with the host address for the fast path. + */ +static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, bool is_ld) +{ + TCGLabelQemuLdst *ldst = NULL; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; + unsigned a_mask; + +#ifdef CONFIG_SOFTMMU + h->index = TCG_REG_L0; + h->ofs = 0; + h->seg = 0; +#else + *h = x86_guest_base; #endif -#endif /* SOFTMMU */ + h->base = addrlo; + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128); + a_mask = (1 << h->aa.align) - 1; + +#ifdef CONFIG_SOFTMMU + int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write); + TCGType ttype = TCG_TYPE_I32; + TCGType tlbtype = TCG_TYPE_I32; + int trexw = 0, hrexw = 0, tlbrexw = 0; + unsigned mem_index = get_mmuidx(oi); + unsigned s_mask = (1 << s_bits) - 1; + int tlb_mask; + + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addrlo; + ldst->addrhi_reg = addrhi; + + if (TCG_TARGET_REG_BITS == 64) { + ttype = s->addr_type; + trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW); + if (TCG_TYPE_PTR == TCG_TYPE_I64) { + hrexw = P_REXW; + if (s->page_bits + s->tlb_dyn_max_bits > 32) { + tlbtype = TCG_TYPE_I64; + tlbrexw = P_REXW; + } + } + } + + tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo); + tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0, + s->page_bits - CPU_TLB_ENTRY_BITS); + + tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0, + TLB_MASK_TABLE_OFS(mem_index) + + offsetof(CPUTLBDescFast, mask)); + + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0, + TLB_MASK_TABLE_OFS(mem_index) + + offsetof(CPUTLBDescFast, table)); + + /* + * If the required alignment is at least as large as the access, simply + * copy the address and mask. For lesser alignments, check that we don't + * cross pages for the complete access. + */ + if (a_mask >= s_mask) { + tcg_out_mov(s, ttype, TCG_REG_L1, addrlo); + } else { + tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1, + addrlo, s_mask - a_mask); + } + tlb_mask = s->page_mask | a_mask; + tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0); + + /* cmp 0(TCG_REG_L0), TCG_REG_L1 */ + tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, + TCG_REG_L1, TCG_REG_L0, cmp_ofs); + + /* jne slow_path */ + tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); + ldst->label_ptr[0] = s->code_ptr; + s->code_ptr += 4; + + if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) { + /* cmp 4(TCG_REG_L0), addrhi */ + tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, cmp_ofs + 4); + + /* jne slow_path */ + tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); + ldst->label_ptr[1] = s->code_ptr; + s->code_ptr += 4; + } + + /* TLB Hit. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0, + offsetof(CPUTLBEntry, addend)); +#else + if (a_mask) { + ldst = new_ldst_label(s); + + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addrlo; + ldst->addrhi_reg = addrhi; + + tcg_out_testi(s, addrlo, a_mask); + /* jne slow_path */ + tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); + ldst->label_ptr[0] = s->code_ptr; + s->code_ptr += 4; + } +#endif + + return ldst; +} static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg base, int index, intptr_t ofs, - int seg, bool is64, MemOp memop) + HostAddress h, TCGType type, MemOp memop) { bool use_movbe = false; - int rexw = is64 * P_REXW; + int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW); int movop = OPC_MOVL_GvEv; /* Do big-endian loads with movbe. */ @@ -2167,134 +2038,171 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, switch (memop & MO_SSIZE) { case MO_UB: - tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo, - base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, OPC_MOVZBL + h.seg, datalo, + h.base, h.index, 0, h.ofs); break; case MO_SB: - tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo, - base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + h.seg, datalo, + h.base, h.index, 0, h.ofs); break; case MO_UW: if (use_movbe) { /* There is no extending movbe; only low 16-bits are modified. */ - if (datalo != base && datalo != index) { + if (datalo != h.base && datalo != h.index) { /* XOR breaks dependency chains. */ tgen_arithr(s, ARITH_XOR, datalo, datalo); - tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, - datalo, base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg, + datalo, h.base, h.index, 0, h.ofs); } else { - tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, - datalo, base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg, + datalo, h.base, h.index, 0, h.ofs); tcg_out_ext16u(s, datalo, datalo); } } else { - tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo, - base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, OPC_MOVZWL + h.seg, datalo, + h.base, h.index, 0, h.ofs); } break; case MO_SW: if (use_movbe) { - tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, - datalo, base, index, 0, ofs); - tcg_out_ext16s(s, datalo, datalo, rexw); + tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg, + datalo, h.base, h.index, 0, h.ofs); + tcg_out_ext16s(s, type, datalo, datalo); } else { - tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg, - datalo, base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + h.seg, + datalo, h.base, h.index, 0, h.ofs); } break; case MO_UL: - tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, movop + h.seg, datalo, + h.base, h.index, 0, h.ofs); break; #if TCG_TARGET_REG_BITS == 64 case MO_SL: if (use_movbe) { - tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + seg, datalo, - base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + h.seg, datalo, + h.base, h.index, 0, h.ofs); tcg_out_ext32s(s, datalo, datalo); } else { - tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo, - base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + h.seg, datalo, + h.base, h.index, 0, h.ofs); } break; #endif case MO_UQ: if (TCG_TARGET_REG_BITS == 64) { - tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, - base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo, + h.base, h.index, 0, h.ofs); + break; + } + if (use_movbe) { + TCGReg t = datalo; + datalo = datahi; + datahi = t; + } + if (h.base == datalo || h.index == datalo) { + tcg_out_modrm_sib_offset(s, OPC_LEA, datahi, + h.base, h.index, 0, h.ofs); + tcg_out_modrm_offset(s, movop + h.seg, datalo, datahi, 0); + tcg_out_modrm_offset(s, movop + h.seg, datahi, datahi, 4); } else { + tcg_out_modrm_sib_offset(s, movop + h.seg, datalo, + h.base, h.index, 0, h.ofs); + tcg_out_modrm_sib_offset(s, movop + h.seg, datahi, + h.base, h.index, 0, h.ofs + 4); + } + break; + + case MO_128: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + + /* + * Without 16-byte atomicity, use integer regs. + * That is where we want the data, and it allows bswaps. + */ + if (h.aa.atom < MO_128) { if (use_movbe) { TCGReg t = datalo; datalo = datahi; datahi = t; } - if (base != datalo) { - tcg_out_modrm_sib_offset(s, movop + seg, datalo, - base, index, 0, ofs); - tcg_out_modrm_sib_offset(s, movop + seg, datahi, - base, index, 0, ofs + 4); + if (h.base == datalo || h.index == datalo) { + tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, datahi, + h.base, h.index, 0, h.ofs); + tcg_out_modrm_offset(s, movop + P_REXW + h.seg, + datalo, datahi, 0); + tcg_out_modrm_offset(s, movop + P_REXW + h.seg, + datahi, datahi, 8); } else { - tcg_out_modrm_sib_offset(s, movop + seg, datahi, - base, index, 0, ofs + 4); - tcg_out_modrm_sib_offset(s, movop + seg, datalo, - base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo, + h.base, h.index, 0, h.ofs); + tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datahi, + h.base, h.index, 0, h.ofs + 8); } + break; } + + /* + * With 16-byte atomicity, a vector load is required. + * If we already have 16-byte alignment, then VMOVDQA always works. + * Else if VMOVDQU has atomicity with dynamic alignment, use that. + * Else use we require a runtime test for alignment for VMOVDQA; + * use VMOVDQU on the unaligned nonatomic path for simplicity. + */ + if (h.aa.align >= MO_128) { + tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_VxWx + h.seg, + TCG_TMP_VEC, 0, + h.base, h.index, 0, h.ofs); + } else if (cpuinfo & CPUINFO_ATOMIC_VMOVDQU) { + tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQU_VxWx + h.seg, + TCG_TMP_VEC, 0, + h.base, h.index, 0, h.ofs); + } else { + TCGLabel *l1 = gen_new_label(); + TCGLabel *l2 = gen_new_label(); + + tcg_out_testi(s, h.base, 15); + tcg_out_jxx(s, JCC_JNE, l1, true); + + tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_VxWx + h.seg, + TCG_TMP_VEC, 0, + h.base, h.index, 0, h.ofs); + tcg_out_jxx(s, JCC_JMP, l2, true); + + tcg_out_label(s, l1); + tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQU_VxWx + h.seg, + TCG_TMP_VEC, 0, + h.base, h.index, 0, h.ofs); + tcg_out_label(s, l2); + } + tcg_out_vec_to_pair(s, TCG_TYPE_I64, datalo, datahi, TCG_TMP_VEC); break; + default: g_assert_not_reached(); } } -/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and - EAX. It will be useful once fixed registers globals are less - common. */ -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) +static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, TCGType data_type) { - TCGReg datalo, datahi, addrlo; - TCGReg addrhi __attribute__((unused)); - MemOpIdx oi; - MemOp opc; -#if defined(CONFIG_SOFTMMU) - int mem_index; - tcg_insn_unit *label_ptr[2]; -#else - unsigned a_bits; -#endif + TCGLabelQemuLdst *ldst; + HostAddress h; - datalo = *args++; - datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); - addrlo = *args++; - addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); - oi = *args++; - opc = get_memop(oi); + ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); + tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, get_memop(oi)); -#if defined(CONFIG_SOFTMMU) - mem_index = get_mmuidx(oi); - - tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc, - label_ptr, offsetof(CPUTLBEntry, addr_read)); - - /* TLB Hit. */ - tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc); - - /* Record the current context of a load into ldst label */ - add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi, - s->code_ptr, label_ptr); -#else - a_bits = get_alignment_bits(opc); - if (a_bits) { - tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - - tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index, - x86_guest_base_offset, x86_guest_base_seg, - is64, opc); -#endif } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg base, int index, intptr_t ofs, - int seg, MemOp memop) + HostAddress h, MemOp memop) { bool use_movbe = false; int movop = OPC_MOVL_EvGv; @@ -2313,78 +2221,111 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, case MO_8: /* This is handled with constraints on INDEX_op_qemu_st8_i32. */ tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4); - tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg, - datalo, base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg, + datalo, h.base, h.index, 0, h.ofs); break; case MO_16: - tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo, - base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, movop + P_DATA16 + h.seg, datalo, + h.base, h.index, 0, h.ofs); break; case MO_32: - tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, movop + h.seg, datalo, + h.base, h.index, 0, h.ofs); break; case MO_64: if (TCG_TARGET_REG_BITS == 64) { - tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, - base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo, + h.base, h.index, 0, h.ofs); } else { if (use_movbe) { TCGReg t = datalo; datalo = datahi; datahi = t; } - tcg_out_modrm_sib_offset(s, movop + seg, datalo, - base, index, 0, ofs); - tcg_out_modrm_sib_offset(s, movop + seg, datahi, - base, index, 0, ofs + 4); + tcg_out_modrm_sib_offset(s, movop + h.seg, datalo, + h.base, h.index, 0, h.ofs); + tcg_out_modrm_sib_offset(s, movop + h.seg, datahi, + h.base, h.index, 0, h.ofs + 4); } break; + + case MO_128: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + + /* + * Without 16-byte atomicity, use integer regs. + * That is where we have the data, and it allows bswaps. + */ + if (h.aa.atom < MO_128) { + if (use_movbe) { + TCGReg t = datalo; + datalo = datahi; + datahi = t; + } + tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo, + h.base, h.index, 0, h.ofs); + tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datahi, + h.base, h.index, 0, h.ofs + 8); + break; + } + + /* + * With 16-byte atomicity, a vector store is required. + * If we already have 16-byte alignment, then VMOVDQA always works. + * Else if VMOVDQU has atomicity with dynamic alignment, use that. + * Else use we require a runtime test for alignment for VMOVDQA; + * use VMOVDQU on the unaligned nonatomic path for simplicity. + */ + tcg_out_pair_to_vec(s, TCG_TYPE_I64, TCG_TMP_VEC, datalo, datahi); + if (h.aa.align >= MO_128) { + tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_WxVx + h.seg, + TCG_TMP_VEC, 0, + h.base, h.index, 0, h.ofs); + } else if (cpuinfo & CPUINFO_ATOMIC_VMOVDQU) { + tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQU_WxVx + h.seg, + TCG_TMP_VEC, 0, + h.base, h.index, 0, h.ofs); + } else { + TCGLabel *l1 = gen_new_label(); + TCGLabel *l2 = gen_new_label(); + + tcg_out_testi(s, h.base, 15); + tcg_out_jxx(s, JCC_JNE, l1, true); + + tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_WxVx + h.seg, + TCG_TMP_VEC, 0, + h.base, h.index, 0, h.ofs); + tcg_out_jxx(s, JCC_JMP, l2, true); + + tcg_out_label(s, l1); + tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQU_WxVx + h.seg, + TCG_TMP_VEC, 0, + h.base, h.index, 0, h.ofs); + tcg_out_label(s, l2); + } + break; + default: g_assert_not_reached(); } } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) +static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, TCGType data_type) { - TCGReg datalo, datahi, addrlo; - TCGReg addrhi __attribute__((unused)); - MemOpIdx oi; - MemOp opc; -#if defined(CONFIG_SOFTMMU) - int mem_index; - tcg_insn_unit *label_ptr[2]; -#else - unsigned a_bits; -#endif + TCGLabelQemuLdst *ldst; + HostAddress h; - datalo = *args++; - datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); - addrlo = *args++; - addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); - oi = *args++; - opc = get_memop(oi); + ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); + tcg_out_qemu_st_direct(s, datalo, datahi, h, get_memop(oi)); -#if defined(CONFIG_SOFTMMU) - mem_index = get_mmuidx(oi); - - tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc, - label_ptr, offsetof(CPUTLBEntry, addr_write)); - - /* TLB Hit. */ - tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc); - - /* Record the current context of a store into ldst label */ - add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi, - s->code_ptr, label_ptr); -#else - a_bits = get_alignment_bits(opc); - if (a_bits) { - tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - - tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index, - x86_guest_base_offset, x86_guest_base_seg, opc); -#endif } static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) @@ -2671,31 +2612,64 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0); break; - OP_32_64(ext8s): - tcg_out_ext8s(s, a0, a1, rexw); + case INDEX_op_qemu_ld_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32); + break; + } + /* fall through */ + case INDEX_op_qemu_ld_a32_i32: + tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32); break; - OP_32_64(ext16s): - tcg_out_ext16s(s, a0, a1, rexw); + case INDEX_op_qemu_ld_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); + } else { + tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64); + } break; - OP_32_64(ext8u): - tcg_out_ext8u(s, a0, a1); + case INDEX_op_qemu_ld_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); + } else { + tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); + } break; - OP_32_64(ext16u): - tcg_out_ext16u(s, a0, a1); + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128); break; - case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, args, 0); + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st8_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32); + break; + } + /* fall through */ + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st8_a32_i32: + tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args, 1); + case INDEX_op_qemu_st_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); + } else { + tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64); + } break; - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st8_i32: - tcg_out_qemu_st(s, args, 0); + case INDEX_op_qemu_st_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); + } else { + tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); + } break; - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args, 1); + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128); break; OP_32_64(mulu2): @@ -2765,15 +2739,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_bswap64_i64: tcg_out_bswap64(s, a0); break; - case INDEX_op_extu_i32_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_extrl_i64_i32: - tcg_out_ext32u(s, a0, a1); - break; - case INDEX_op_ext_i32_i64: - case INDEX_op_ext32s_i64: - tcg_out_ext32s(s, a0, a1); - break; case INDEX_op_extrh_i64_i32: tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32); break; @@ -2790,7 +2755,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, /* load bits 0..15 */ tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0); } else { - tcg_abort(); + g_assert_not_reached(); } break; @@ -2823,7 +2788,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, if (a1 < 4 && a0 < 8) { tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4); } else { - tcg_out_ext16s(s, a0, a1, 0); + tcg_out_ext16s(s, TCG_TYPE_I32, a0, a1); tcg_out_shifti(s, SHIFT_SAR, a0, 8); } break; @@ -2842,8 +2807,21 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ + case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: default: - tcg_abort(); + g_assert_not_reached(); } #undef OP_32_64 @@ -3366,26 +3344,38 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_clz_i64: return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r); - case INDEX_op_qemu_ld_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O1_I1(r, L) : C_O1_I2(r, L, L)); + case INDEX_op_qemu_ld_a32_i32: + return C_O1_I1(r, L); + case INDEX_op_qemu_ld_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O1_I2(r, L, L); - case INDEX_op_qemu_st_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O0_I2(L, L) : C_O0_I3(L, L, L)); - case INDEX_op_qemu_st8_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O0_I2(s, L) : C_O0_I3(s, L, L)); + case INDEX_op_qemu_st_a32_i32: + return C_O0_I2(L, L); + case INDEX_op_qemu_st_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L); + case INDEX_op_qemu_st8_a32_i32: + return C_O0_I2(s, L); + case INDEX_op_qemu_st8_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(s, L) : C_O0_I3(s, L, L); - case INDEX_op_qemu_ld_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L) - : C_O2_I2(r, r, L, L)); + case INDEX_op_qemu_ld_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I1(r, r, L); + case INDEX_op_qemu_ld_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I2(r, r, L, L); - case INDEX_op_qemu_st_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(L, L, L) - : C_O0_I4(L, L, L, L)); + case INDEX_op_qemu_st_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L); + case INDEX_op_qemu_st_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I4(L, L, L, L); + + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + return C_O2_I1(r, r, L); + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + return C_O0_I3(L, L, L); case INDEX_op_brcond2_i32: return C_O0_I4(r, r, ri, ri); @@ -3651,6 +3641,7 @@ static void expand_vec_sari(TCGType type, unsigned vece, break; case MO_64: + t1 = tcg_temp_new_vec(type); if (imm <= 32) { /* * We can emulate a small sign extend by performing an arithmetic @@ -3659,24 +3650,22 @@ static void expand_vec_sari(TCGType type, unsigned vece, * does not, so we have to bound the smaller shift -- we get the * same result in the high half either way. */ - t1 = tcg_temp_new_vec(type); tcg_gen_sari_vec(MO_32, t1, v1, MIN(imm, 31)); tcg_gen_shri_vec(MO_64, v0, v1, imm); vec_gen_4(INDEX_op_x86_blend_vec, type, MO_32, tcgv_vec_arg(v0), tcgv_vec_arg(v0), tcgv_vec_arg(t1), 0xaa); - tcg_temp_free_vec(t1); } else { /* Otherwise we will need to use a compare vs 0 to produce * the sign-extend, shift and merge. */ - t1 = tcg_const_zeros_vec(type); - tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1, t1, v1); + tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1, + tcg_constant_vec(type, MO_64, 0), v1); tcg_gen_shri_vec(MO_64, v0, v1, imm); tcg_gen_shli_vec(MO_64, t1, t1, 64 - imm); tcg_gen_or_vec(MO_64, v0, v0, t1); - tcg_temp_free_vec(t1); } + tcg_temp_free_vec(t1); break; default: @@ -4079,18 +4068,18 @@ static void tcg_target_qemu_prologue(TCGContext *s) (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4 + stack_addend); #else -# if !defined(CONFIG_SOFTMMU) && TCG_TARGET_REG_BITS == 64 +# if !defined(CONFIG_SOFTMMU) if (guest_base) { int seg = setup_guest_base_seg(); if (seg != 0) { - x86_guest_base_seg = seg; + x86_guest_base.seg = seg; } else if (guest_base == (int32_t)guest_base) { - x86_guest_base_offset = guest_base; + x86_guest_base.ofs = guest_base; } else { /* Choose R12 because, as a base, it requires a SIB byte. */ - x86_guest_base_index = TCG_REG_R12; - tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base_index, guest_base); - tcg_regset_set_reg(s->reserved_regs, x86_guest_base_index); + x86_guest_base.index = TCG_REG_R12; + tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base.index, guest_base); + tcg_regset_set_reg(s->reserved_regs, x86_guest_base.index); } } # endif @@ -4128,70 +4117,6 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count) static void tcg_target_init(TCGContext *s) { -#ifdef CONFIG_CPUID_H - unsigned a, b, c, d, b7 = 0, c7 = 0; - unsigned max = __get_cpuid_max(0, 0); - - if (max >= 7) { - /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */ - __cpuid_count(7, 0, a, b7, c7, d); - have_bmi1 = (b7 & bit_BMI) != 0; - have_bmi2 = (b7 & bit_BMI2) != 0; - } - - if (max >= 1) { - __cpuid(1, a, b, c, d); -#ifndef have_cmov - /* For 32-bit, 99% certainty that we're running on hardware that - supports cmov, but we still need to check. In case cmov is not - available, we'll use a small forward branch. */ - have_cmov = (d & bit_CMOV) != 0; -#endif - - /* MOVBE is only available on Intel Atom and Haswell CPUs, so we - need to probe for it. */ - have_movbe = (c & bit_MOVBE) != 0; - have_popcnt = (c & bit_POPCNT) != 0; - - /* There are a number of things we must check before we can be - sure of not hitting invalid opcode. */ - if (c & bit_OSXSAVE) { - unsigned xcrl, xcrh; - /* The xgetbv instruction is not available to older versions of - * the assembler, so we encode the instruction manually. - */ - asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (0)); - if ((xcrl & 6) == 6) { - have_avx1 = (c & bit_AVX) != 0; - have_avx2 = (b7 & bit_AVX2) != 0; - - /* - * There are interesting instructions in AVX512, so long - * as we have AVX512VL, which indicates support for EVEX - * on sizes smaller than 512 bits. We are required to - * check that OPMASK and all extended ZMM state are enabled - * even if we're not using them -- the insns will fault. - */ - if ((xcrl & 0xe0) == 0xe0 - && (b7 & bit_AVX512F) - && (b7 & bit_AVX512VL)) { - have_avx512vl = true; - have_avx512bw = (b7 & bit_AVX512BW) != 0; - have_avx512dq = (b7 & bit_AVX512DQ) != 0; - have_avx512vbmi2 = (c7 & bit_AVX512VBMI2) != 0; - } - } - } - } - - max = __get_cpuid_max(0x8000000, 0); - if (max >= 1) { - __cpuid(0x80000001, a, b, c, d); - /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */ - have_lzcnt = (c & bit_LZCNT) != 0; - } -#endif /* CONFIG_CPUID_H */ - tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; if (TCG_TARGET_REG_BITS == 64) { tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; @@ -4221,6 +4146,20 @@ static void tcg_target_init(TCGContext *s) s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); + tcg_regset_set_reg(s->reserved_regs, TCG_TMP_VEC); +#ifdef _WIN64 + /* These are call saved, and we don't save them, so don't use them. */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM6); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM7); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM8); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM9); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM10); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM11); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM12); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM13); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM14); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM15); +#endif } typedef struct { diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index d4f2a6f8c2..1468f8ef25 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -25,8 +25,9 @@ #ifndef I386_TCG_TARGET_H #define I386_TCG_TARGET_H +#include "host/cpuinfo.h" + #define TCG_TARGET_INSN_UNIT_SIZE 1 -#define TCG_TARGET_TLB_DISPLACEMENT_BITS 31 #ifdef __x86_64__ # define TCG_TARGET_REG_BITS 64 @@ -111,15 +112,21 @@ typedef enum { # define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF #endif -extern bool have_bmi1; -extern bool have_popcnt; -extern bool have_avx1; -extern bool have_avx2; -extern bool have_avx512bw; -extern bool have_avx512dq; -extern bool have_avx512vbmi2; -extern bool have_avx512vl; -extern bool have_movbe; +#define have_bmi1 (cpuinfo & CPUINFO_BMI1) +#define have_popcnt (cpuinfo & CPUINFO_POPCNT) +#define have_avx1 (cpuinfo & CPUINFO_AVX1) +#define have_avx2 (cpuinfo & CPUINFO_AVX2) +#define have_movbe (cpuinfo & CPUINFO_MOVBE) + +/* + * There are interesting instructions in AVX512, so long as we have AVX512VL, + * which indicates support for EVEX on sizes smaller than 512 bits. + */ +#define have_avx512vl ((cpuinfo & CPUINFO_AVX512VL) && \ + (cpuinfo & CPUINFO_AVX512F)) +#define have_avx512bw ((cpuinfo & CPUINFO_AVX512BW) && have_avx512vl) +#define have_avx512dq ((cpuinfo & CPUINFO_AVX512DQ) && have_avx512vl) +#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl) /* optional instructions */ #define TCG_TARGET_HAS_div2_i32 1 @@ -153,9 +160,9 @@ extern bool have_movbe; #define TCG_TARGET_HAS_mulsh_i32 0 #if TCG_TARGET_REG_BITS == 64 -/* Keep target addresses zero-extended in a register. */ -#define TCG_TARGET_HAS_extrl_i64_i32 (TARGET_LONG_BITS == 32) -#define TCG_TARGET_HAS_extrh_i64_i32 (TARGET_LONG_BITS == 32) +/* Keep 32-bit values zero-extended in a register. */ +#define TCG_TARGET_HAS_extrl_i64_i32 1 +#define TCG_TARGET_HAS_extrh_i64_i32 1 #define TCG_TARGET_HAS_div2_i64 1 #define TCG_TARGET_HAS_rot_i64 1 #define TCG_TARGET_HAS_ext8s_i64 1 @@ -193,6 +200,9 @@ extern bool have_movbe; #define TCG_TARGET_HAS_qemu_st8_i32 1 #endif +#define TCG_TARGET_HAS_qemu_ldst_i128 \ + (TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA)) + /* We do not support older SSE systems, only beginning with AVX1. */ #define TCG_TARGET_HAS_v64 have_avx1 #define TCG_TARGET_HAS_v128 have_avx1 @@ -239,9 +249,6 @@ extern bool have_movbe; #include "tcg/tcg-mo.h" #define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) - -#define TCG_TARGET_HAS_MEMORY_BSWAP have_movbe - #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h index 172c107289..c2bde44613 100644 --- a/tcg/loongarch64/tcg-target-con-set.h +++ b/tcg/loongarch64/tcg-target-con-set.h @@ -17,9 +17,7 @@ C_O0_I1(r) C_O0_I2(rZ, r) C_O0_I2(rZ, rZ) -C_O0_I2(LZ, L) C_O1_I1(r, r) -C_O1_I1(r, L) C_O1_I2(r, r, rC) C_O1_I2(r, r, ri) C_O1_I2(r, r, rI) diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h index 541ff47fa9..6e9ccca3ad 100644 --- a/tcg/loongarch64/tcg-target-con-str.h +++ b/tcg/loongarch64/tcg-target-con-str.h @@ -14,7 +14,6 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) -REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) /* * Define constraint letters for constants: diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index c5f55afd68..0bae922982 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -30,6 +30,7 @@ */ #include "../tcg-ldst.c.inc" +#include #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { @@ -133,18 +134,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define TCG_CT_CONST_C12 0x1000 #define TCG_CT_CONST_WSZ 0x2000 -#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) -/* - * For softmmu, we need to avoid conflicts with the first 5 - * argument registers to call the helper. Some of these are - * also used for the tlb lookup. - */ -#ifdef CONFIG_SOFTMMU -#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5) -#else -#define SOFTMMU_RESERVE_REGS 0 -#endif - +#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) { @@ -419,6 +409,11 @@ static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd, } } +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) +{ + return false; +} + static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, tcg_target_long imm) { @@ -441,12 +436,12 @@ static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); } -static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) +static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { tcg_out_opc_sext_b(s, ret, arg); } -static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) +static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { tcg_out_opc_sext_h(s, ret, arg); } @@ -456,6 +451,23 @@ static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) tcg_out_opc_addi_w(s, ret, arg, 0); } +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (ret != arg) { + tcg_out_ext32s(s, ret, arg); + } +} + +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_ext32u(s, ret, arg); +} + +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_ext32s(s, ret, arg); +} + static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, TCGReg a0, TCGReg a1, TCGReg a2, bool c2, bool is_32bit) @@ -772,392 +784,246 @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, * Load/store helpers for SoftMMU, and qemu_ld/st implementations */ -#if defined(CONFIG_SOFTMMU) -/* - * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * MemOpIdx oi, uintptr_t ra) - */ -static void * const qemu_ld_helpers[4] = { - [MO_8] = helper_ret_ldub_mmu, - [MO_16] = helper_le_lduw_mmu, - [MO_32] = helper_le_ldul_mmu, - [MO_64] = helper_le_ldq_mmu, -}; - -/* - * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, MemOpIdx oi, - * uintptr_t ra) - */ -static void * const qemu_st_helpers[4] = { - [MO_8] = helper_ret_stb_mmu, - [MO_16] = helper_le_stw_mmu, - [MO_32] = helper_le_stl_mmu, - [MO_64] = helper_le_stq_mmu, -}; - -/* We expect to use a 12-bit negative offset from ENV. */ -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); - static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) { tcg_out_opc_b(s, 0); return reloc_br_sd10k16(s->code_ptr - 1, target); } -/* - * Emits common code for TLB addend lookup, that eventually loads the - * addend in TCG_REG_TMP2. - */ -static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi, - tcg_insn_unit **label_ptr, bool is_load) +static const TCGLdstHelperParam ldst_helper_param = { + .ntmp = 1, .tmp = { TCG_REG_TMP0 } +}; + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { + MemOp opc = get_memop(l->oi); + + /* resolve label address */ + if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_ld_helper_args(s, l, &ldst_helper_param); + tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false); + tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); + return tcg_out_goto(s, l->raddr); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + MemOp opc = get_memop(l->oi); + + /* resolve label address */ + if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_st_helper_args(s, l, &ldst_helper_param); + tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); + return tcg_out_goto(s, l->raddr); +} + +typedef struct { + TCGReg base; + TCGReg index; + TCGAtomAlign aa; +} HostAddress; + +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return false; +} + +/* + * For softmmu, perform the TLB load and compare. + * For useronly, perform any required alignment tests. + * In both cases, return a TCGLabelQemuLdst structure if the slow path + * is required and fill in @h with the host address for the fast path. + */ +static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, + TCGReg addr_reg, MemOpIdx oi, + bool is_ld) +{ + TCGType addr_type = s->addr_type; + TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); + MemOp a_bits; + + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_bits = h->aa.align; + +#ifdef CONFIG_SOFTMMU unsigned s_bits = opc & MO_SIZE; - unsigned a_bits = get_alignment_bits(opc); - tcg_target_long compare_mask; int mem_index = get_mmuidx(oi); int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; + + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); - tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, + s->page_bits - CPU_TLB_ENTRY_BITS); tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); /* Load the tlb comparator and the addend. */ - tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, - is_load ? offsetof(CPUTLBEntry, addr_read) - : offsetof(CPUTLBEntry, addr_write)); + tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, + is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, offsetof(CPUTLBEntry, addend)); - /* We don't support unaligned accesses. */ + /* + * For aligned accesses, we check the first byte and include the alignment + * bits within the address. For unaligned access, we check that we don't + * cross pages using the address of the last byte of the access. + */ if (a_bits < s_bits) { - a_bits = s_bits; + unsigned a_mask = (1u << a_bits) - 1; + unsigned s_mask = (1u << s_bits) - 1; + tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); + } else { + tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); } - /* Clear the non-page, non-alignment bits from the address. */ - compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); - tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); - tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl); + tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, + a_bits, s->page_bits - 1); /* Compare masked address with the TLB entry. */ - label_ptr[0] = s->code_ptr; + ldst->label_ptr[0] = s->code_ptr; tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); - /* TLB Hit - addend in TCG_REG_TMP2, ready for use. */ -} - -static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, - TCGType type, - TCGReg datalo, TCGReg addrlo, - void *raddr, tcg_insn_unit **label_ptr) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->oi = oi; - label->type = type; - label->datalo_reg = datalo; - label->datahi_reg = 0; /* unused */ - label->addrlo_reg = addrlo; - label->addrhi_reg = 0; /* unused */ - label->raddr = tcg_splitwx_to_rx(raddr); - label->label_ptr[0] = label_ptr[0]; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - MemOpIdx oi = l->oi; - MemOp opc = get_memop(oi); - MemOp size = opc & MO_SIZE; - TCGType type = l->type; - - /* resolve label address */ - if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - /* call load helper */ - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A2, oi); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (tcg_target_long)l->raddr); - - tcg_out_call_int(s, qemu_ld_helpers[size], false); - - switch (opc & MO_SSIZE) { - case MO_SB: - tcg_out_ext8s(s, l->datalo_reg, TCG_REG_A0); - break; - case MO_SW: - tcg_out_ext16s(s, l->datalo_reg, TCG_REG_A0); - break; - case MO_SL: - tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0); - break; - case MO_UL: - if (type == TCG_TYPE_I32) { - /* MO_UL loads of i32 should be sign-extended too */ - tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0); - break; - } - /* fallthrough */ - default: - tcg_out_mov(s, type, l->datalo_reg, TCG_REG_A0); - break; - } - - return tcg_out_goto(s, l->raddr); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - MemOpIdx oi = l->oi; - MemOp opc = get_memop(oi); - MemOp size = opc & MO_SIZE; - - /* resolve label address */ - if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - /* call store helper */ - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg); - switch (size) { - case MO_8: - tcg_out_ext8u(s, TCG_REG_A2, l->datalo_reg); - break; - case MO_16: - tcg_out_ext16u(s, TCG_REG_A2, l->datalo_reg); - break; - case MO_32: - tcg_out_ext32u(s, TCG_REG_A2, l->datalo_reg); - break; - case MO_64: - tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, l->datalo_reg); - break; - default: - g_assert_not_reached(); - break; - } - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr); - - tcg_out_call_int(s, qemu_st_helpers[size], false); - - return tcg_out_goto(s, l->raddr); -} + h->index = TCG_REG_TMP2; #else + if (a_bits) { + ldst = new_ldst_label(s); -/* - * Alignment helpers for user-mode emulation - */ + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; -static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, - unsigned a_bits) -{ - TCGLabelQemuLdst *l = new_ldst_label(s); + /* + * Without micro-architecture details, we don't know which of + * bstrpick or andi is faster, so use bstrpick as it's not + * constrained by imm field width. Not to say alignments >= 2^12 + * are going to happen any time soon. + */ + tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); - l->is_ld = is_ld; - l->addrlo_reg = addr_reg; - - /* - * Without micro-architecture details, we don't know which of bstrpick or - * andi is faster, so use bstrpick as it's not constrained by imm field - * width. (Not to say alignments >= 2^12 are going to happen any time - * soon, though) - */ - tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); - - l->label_ptr[0] = s->code_ptr; - tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); - - l->raddr = tcg_splitwx_to_rx(s->code_ptr); -} - -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - /* resolve label address */ - if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; + ldst->label_ptr[0] = s->code_ptr; + tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); } - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); + h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; +#endif - /* tail call, with the return address back inline. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); - tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st), true); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -#endif /* CONFIG_SOFTMMU */ - -/* - * `ext32u` the address register into the temp register given, - * if target is 32-bit, no-op otherwise. - * - * Returns the address register ready for use with TLB addend. - */ -static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s, - TCGReg addr, TCGReg tmp) -{ - if (TARGET_LONG_BITS == 32) { - tcg_out_ext32u(s, tmp, addr); - return tmp; + if (addr_type == TCG_TYPE_I32) { + h->base = TCG_REG_TMP0; + tcg_out_ext32u(s, h->base, addr_reg); + } else { + h->base = addr_reg; } - return addr; + + return ldst; } -static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj, - TCGReg rk, MemOp opc, TCGType type) +static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, + TCGReg rd, HostAddress h) { /* Byte swapping is left to middle-end expansion. */ tcg_debug_assert((opc & MO_BSWAP) == 0); switch (opc & MO_SSIZE) { case MO_UB: - tcg_out_opc_ldx_bu(s, rd, rj, rk); + tcg_out_opc_ldx_bu(s, rd, h.base, h.index); break; case MO_SB: - tcg_out_opc_ldx_b(s, rd, rj, rk); + tcg_out_opc_ldx_b(s, rd, h.base, h.index); break; case MO_UW: - tcg_out_opc_ldx_hu(s, rd, rj, rk); + tcg_out_opc_ldx_hu(s, rd, h.base, h.index); break; case MO_SW: - tcg_out_opc_ldx_h(s, rd, rj, rk); + tcg_out_opc_ldx_h(s, rd, h.base, h.index); break; case MO_UL: if (type == TCG_TYPE_I64) { - tcg_out_opc_ldx_wu(s, rd, rj, rk); + tcg_out_opc_ldx_wu(s, rd, h.base, h.index); break; } /* fallthrough */ case MO_SL: - tcg_out_opc_ldx_w(s, rd, rj, rk); + tcg_out_opc_ldx_w(s, rd, h.base, h.index); break; case MO_UQ: - tcg_out_opc_ldx_d(s, rd, rj, rk); + tcg_out_opc_ldx_d(s, rd, h.base, h.index); break; default: g_assert_not_reached(); } } -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type) +static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + MemOpIdx oi, TCGType data_type) { - TCGReg addr_regl; - TCGReg data_regl; - MemOpIdx oi; - MemOp opc; -#if defined(CONFIG_SOFTMMU) - tcg_insn_unit *label_ptr[1]; -#else - unsigned a_bits; -#endif - TCGReg base; + TCGLabelQemuLdst *ldst; + HostAddress h; - data_regl = *args++; - addr_regl = *args++; - oi = *args++; - opc = get_memop(oi); + ldst = prepare_host_addr(s, &h, addr_reg, oi, true); + tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); -#if defined(CONFIG_SOFTMMU) - tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1); - base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); - tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type); - add_qemu_ldst_label(s, 1, oi, type, - data_regl, addr_regl, - s->code_ptr, label_ptr); -#else - a_bits = get_alignment_bits(opc); - if (a_bits) { - tcg_out_test_alignment(s, true, addr_regl, a_bits); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data_reg; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); - TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; - tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type); -#endif } -static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data, - TCGReg rj, TCGReg rk, MemOp opc) +static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, + TCGReg rd, HostAddress h) { /* Byte swapping is left to middle-end expansion. */ tcg_debug_assert((opc & MO_BSWAP) == 0); switch (opc & MO_SIZE) { case MO_8: - tcg_out_opc_stx_b(s, data, rj, rk); + tcg_out_opc_stx_b(s, rd, h.base, h.index); break; case MO_16: - tcg_out_opc_stx_h(s, data, rj, rk); + tcg_out_opc_stx_h(s, rd, h.base, h.index); break; case MO_32: - tcg_out_opc_stx_w(s, data, rj, rk); + tcg_out_opc_stx_w(s, rd, h.base, h.index); break; case MO_64: - tcg_out_opc_stx_d(s, data, rj, rk); + tcg_out_opc_stx_d(s, rd, h.base, h.index); break; default: g_assert_not_reached(); } } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) +static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + MemOpIdx oi, TCGType data_type) { - TCGReg addr_regl; - TCGReg data_regl; - MemOpIdx oi; - MemOp opc; -#if defined(CONFIG_SOFTMMU) - tcg_insn_unit *label_ptr[1]; -#else - unsigned a_bits; -#endif - TCGReg base; + TCGLabelQemuLdst *ldst; + HostAddress h; - data_regl = *args++; - addr_regl = *args++; - oi = *args++; - opc = get_memop(oi); + ldst = prepare_host_addr(s, &h, addr_reg, oi, false); + tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); -#if defined(CONFIG_SOFTMMU) - tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0); - base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); - tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc); - add_qemu_ldst_label(s, 0, oi, - 0, /* type param is unused for stores */ - data_regl, addr_regl, - s->code_ptr, label_ptr); -#else - a_bits = get_alignment_bits(opc); - if (a_bits) { - tcg_out_test_alignment(s, false, addr_regl, a_bits); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data_reg; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); - TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; - tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc); -#endif } /* @@ -1246,37 +1112,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); break; - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - tcg_out_ext8s(s, a0, a1); - break; - - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - tcg_out_ext8u(s, a0, a1); - break; - - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - tcg_out_ext16s(s, a0, a1); - break; - - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - tcg_out_ext16u(s, a0, a1); - break; - - case INDEX_op_ext32u_i64: - case INDEX_op_extu_i32_i64: - tcg_out_ext32u(s, a0, a1); - break; - - case INDEX_op_ext32s_i64: - case INDEX_op_extrl_i64_i32: - case INDEX_op_ext_i32_i64: - tcg_out_ext32s(s, a0, a1); - break; - case INDEX_op_extrh_i64_i32: tcg_out_opc_srai_d(s, a0, a1, 32); break; @@ -1361,7 +1196,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_bswap16_i64: tcg_out_opc_revb_2h(s, a0, a1); if (a2 & TCG_BSWAP_OS) { - tcg_out_ext16s(s, a0, a0); + tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { tcg_out_ext16u(s, a0, a0); } @@ -1609,17 +1444,21 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); break; - case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, args, TCG_TYPE_I32); + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args, TCG_TYPE_I64); + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); break; - case INDEX_op_qemu_st_i32: - tcg_out_qemu_st(s, args); + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args); + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ @@ -1627,6 +1466,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ + case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: default: g_assert_not_reached(); } @@ -1645,16 +1497,16 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_st32_i64: case INDEX_op_st_i32: case INDEX_op_st_i64: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: return C_O0_I2(rZ, r); case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: return C_O0_I2(rZ, rZ); - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: - return C_O0_I2(LZ, L); - case INDEX_op_ext8s_i32: case INDEX_op_ext8s_i64: case INDEX_op_ext8u_i32: @@ -1690,12 +1542,12 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_ld32u_i64: case INDEX_op_ld_i32: case INDEX_op_ld_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: return C_O1_I1(r, r); - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: - return C_O1_I1(r, L); - case INDEX_op_andc_i32: case INDEX_op_andc_i64: case INDEX_op_orc_i32: @@ -1846,6 +1698,14 @@ static void tcg_target_qemu_prologue(TCGContext *s) static void tcg_target_init(TCGContext *s) { + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + + /* Server and desktop class cpus have UAL; embedded cpus do not. */ + if (!(hwcap & HWCAP_LOONGARCH_UAL)) { + error_report("TCG: unaligned access support required; exiting"); + exit(EXIT_FAILURE); + } + tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h index 17b8193aa5..482901ac15 100644 --- a/tcg/loongarch64/tcg-target.h +++ b/tcg/loongarch64/tcg-target.h @@ -168,11 +168,10 @@ typedef enum { #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_NEED_LDST_LABELS -#define TCG_TARGET_HAS_MEMORY_BSWAP 0 - #endif /* LOONGARCH_TCG_TARGET_H */ diff --git a/tcg/meson.build b/tcg/meson.build index c4c63b19d4..bdc185a485 100644 --- a/tcg/meson.build +++ b/tcg/meson.build @@ -6,13 +6,14 @@ tcg_ss.add(files( 'tcg.c', 'tcg-common.c', 'tcg-op.c', + 'tcg-op-ldst.c', 'tcg-op-gvec.c', 'tcg-op-vec.c', )) if get_option('tcg_interpreter') libffi = dependency('libffi', version: '>=3.0', required: true, - method: 'pkg-config', kwargs: static_kwargs) + method: 'pkg-config') specific_ss.add(libffi) specific_ss.add(files('tci.c')) endif diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h index fe3e868a2f..864034f468 100644 --- a/tcg/mips/tcg-target-con-set.h +++ b/tcg/mips/tcg-target-con-set.h @@ -12,15 +12,13 @@ C_O0_I1(r) C_O0_I2(rZ, r) C_O0_I2(rZ, rZ) -C_O0_I2(SZ, S) -C_O0_I3(SZ, S, S) -C_O0_I3(SZ, SZ, S) +C_O0_I3(rZ, r, r) +C_O0_I3(rZ, rZ, r) C_O0_I4(rZ, rZ, rZ, rZ) -C_O0_I4(SZ, SZ, S, S) -C_O1_I1(r, L) +C_O0_I4(rZ, rZ, r, r) C_O1_I1(r, r) C_O1_I2(r, 0, rZ) -C_O1_I2(r, L, L) +C_O1_I2(r, r, r) C_O1_I2(r, r, ri) C_O1_I2(r, r, rI) C_O1_I2(r, r, rIK) @@ -30,7 +28,6 @@ C_O1_I2(r, rZ, rN) C_O1_I2(r, rZ, rZ) C_O1_I4(r, rZ, rZ, rZ, 0) C_O1_I4(r, rZ, rZ, rZ, rZ) -C_O2_I1(r, r, L) -C_O2_I2(r, r, L, L) +C_O2_I1(r, r, r) C_O2_I2(r, r, r, r) C_O2_I4(r, r, rZ, rZ, rN, rN) diff --git a/tcg/mips/tcg-target-con-str.h b/tcg/mips/tcg-target-con-str.h index e4b2965c72..413c280a7a 100644 --- a/tcg/mips/tcg-target-con-str.h +++ b/tcg/mips/tcg-target-con-str.h @@ -9,8 +9,6 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) -REGS('L', ALL_QLOAD_REGS) -REGS('S', ALL_QSTORE_REGS) /* * Define constraint letters for constants: diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index 80748d892e..3274d9aace 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -25,22 +25,15 @@ */ #include "../tcg-ldst.c.inc" - -#if HOST_BIG_ENDIAN -# define MIPS_BE 1 -#else -# define MIPS_BE 0 -#endif +#include "../tcg-pool.c.inc" #if TCG_TARGET_REG_BITS == 32 -# define LO_OFF (MIPS_BE * 4) +# define LO_OFF (HOST_BIG_ENDIAN * 4) # define HI_OFF (4 - LO_OFF) #else -/* To assert at compile-time that these values are never used - for TCG_TARGET_REG_BITS == 64. */ -int link_error(void); -# define LO_OFF link_error() -# define HI_OFF link_error() +/* Assert at compile-time that these values are never used for 64-bit. */ +# define LO_OFF ({ qemu_build_not_reached(); 0; }) +# define HI_OFF ({ qemu_build_not_reached(); 0; }) #endif #ifdef CONFIG_DEBUG_TCG @@ -86,7 +79,12 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #define TCG_TMP3 TCG_REG_T7 #ifndef CONFIG_SOFTMMU -#define TCG_GUEST_BASE_REG TCG_REG_S1 +#define TCG_GUEST_BASE_REG TCG_REG_S7 +#endif +#if TCG_TARGET_REG_BITS == 64 +#define TCG_REG_TB TCG_REG_S6 +#else +#define TCG_REG_TB (qemu_build_not_reached(), TCG_REG_ZERO) #endif /* check if we really need so many registers :P */ @@ -163,9 +161,18 @@ static bool reloc_pc16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { - tcg_debug_assert(type == R_MIPS_PC16); - tcg_debug_assert(addend == 0); - return reloc_pc16(code_ptr, (const tcg_insn_unit *)value); + value += addend; + switch (type) { + case R_MIPS_PC16: + return reloc_pc16(code_ptr, (const tcg_insn_unit *)value); + case R_MIPS_16: + if (value != (int16_t)value) { + return false; + } + *code_ptr = deposit32(*code_ptr, 0, 16, value); + return true; + } + g_assert_not_reached(); } #define TCG_CT_CONST_ZERO 0x100 @@ -176,20 +183,6 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, #define TCG_CT_CONST_WSZ 0x2000 /* word size */ #define ALL_GENERAL_REGS 0xffffffffu -#define NOA0_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_A0)) - -#ifdef CONFIG_SOFTMMU -#define ALL_QLOAD_REGS \ - (NOA0_REGS & ~((TCG_TARGET_REG_BITS < TARGET_LONG_BITS) << TCG_REG_A2)) -#define ALL_QSTORE_REGS \ - (NOA0_REGS & ~(TCG_TARGET_REG_BITS < TARGET_LONG_BITS \ - ? (1 << TCG_REG_A2) | (1 << TCG_REG_A3) \ - : (1 << TCG_REG_A1))) -#else -#define ALL_QLOAD_REGS NOA0_REGS -#define ALL_QSTORE_REGS NOA0_REGS -#endif - static bool is_p2m1(tcg_target_long val) { @@ -368,8 +361,6 @@ typedef enum { /* Aliases for convenience. */ ALIAS_PADD = sizeof(void *) == 4 ? OPC_ADDU : OPC_DADDU, ALIAS_PADDI = sizeof(void *) == 4 ? OPC_ADDIU : OPC_DADDIU, - ALIAS_TSRL = TARGET_LONG_BITS == 32 || TCG_TARGET_REG_BITS == 32 - ? OPC_SRL : OPC_DSRL, } MIPSInsn; /* @@ -497,6 +488,11 @@ static void tcg_out_nop(TCGContext *s) tcg_out32(s, 0); } +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + memset(p, 0, count * sizeof(tcg_insn_unit)); +} + static void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) { tcg_out_opc_sa64(s, OPC_DSLL, OPC_DSLL32, rd, rt, sa); @@ -521,35 +517,175 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) return true; } -static void tcg_out_movi(TCGContext *s, TCGType type, - TCGReg ret, tcg_target_long arg) +static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg) { - if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { - arg = (int32_t)arg; - } if (arg == (int16_t)arg) { tcg_out_opc_imm(s, OPC_ADDIU, ret, TCG_REG_ZERO, arg); - return; + return true; } if (arg == (uint16_t)arg) { tcg_out_opc_imm(s, OPC_ORI, ret, TCG_REG_ZERO, arg); + return true; + } + if (arg == (int32_t)arg && (arg & 0xffff) == 0) { + tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16); + return true; + } + return false; +} + +static bool tcg_out_movi_two(TCGContext *s, TCGReg ret, tcg_target_long arg) +{ + /* + * All signed 32-bit constants are loadable with two immediates, + * and everything else requires more work. + */ + if (arg == (int32_t)arg) { + if (!tcg_out_movi_one(s, ret, arg)) { + tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16); + tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg & 0xffff); + } + return true; + } + return false; +} + +static void tcg_out_movi_pool(TCGContext *s, TCGReg ret, + tcg_target_long arg, TCGReg tbreg) +{ + new_pool_label(s, arg, R_MIPS_16, s->code_ptr, tcg_tbrel_diff(s, NULL)); + tcg_out_opc_imm(s, OPC_LD, ret, tbreg, 0); +} + +static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, + tcg_target_long arg, TCGReg tbreg) +{ + tcg_target_long tmp; + int sh, lo; + + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + arg = (int32_t)arg; + } + + /* Load all 32-bit constants. */ + if (tcg_out_movi_two(s, ret, arg)) { return; } - if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) { - tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16); - } else { - tcg_out_movi(s, TCG_TYPE_I32, ret, arg >> 31 >> 1); - if (arg & 0xffff0000ull) { - tcg_out_dsll(s, ret, ret, 16); - tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg >> 16); - tcg_out_dsll(s, ret, ret, 16); - } else { - tcg_out_dsll(s, ret, ret, 32); + assert(TCG_TARGET_REG_BITS == 64); + + /* Load addresses within 2GB of TB with 1 or 3 insns. */ + tmp = tcg_tbrel_diff(s, (void *)arg); + if (tmp == (int16_t)tmp) { + tcg_out_opc_imm(s, OPC_DADDIU, ret, tbreg, tmp); + return; + } + if (tcg_out_movi_two(s, ret, tmp)) { + tcg_out_opc_reg(s, OPC_DADDU, ret, ret, tbreg); + return; + } + + /* + * Load bitmasks with a right-shift. This is good for things + * like 0x0fff_ffff_ffff_fff0: ADDUI r,0,0xff00 + DSRL r,r,4. + * or similarly using LUI. For this to work, bit 31 must be set. + */ + if (arg > 0 && (int32_t)arg < 0) { + sh = clz64(arg); + if (tcg_out_movi_one(s, ret, arg << sh)) { + tcg_out_dsrl(s, ret, ret, sh); + return; } } - if (arg & 0xffff) { - tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg & 0xffff); + + /* + * Load slightly larger constants using left-shift. + * Limit this sequence to 3 insns to avoid too much expansion. + */ + sh = ctz64(arg); + if (sh && tcg_out_movi_two(s, ret, arg >> sh)) { + tcg_out_dsll(s, ret, ret, sh); + return; } + + /* + * Load slightly larger constants using left-shift and add/or. + * Prefer addi with a negative immediate when that would produce + * a larger shift. For this to work, bits 15 and 16 must be set. + */ + lo = arg & 0xffff; + if (lo) { + if ((arg & 0x18000) == 0x18000) { + lo = (int16_t)arg; + } + tmp = arg - lo; + sh = ctz64(tmp); + tmp >>= sh; + if (tcg_out_movi_one(s, ret, tmp)) { + tcg_out_dsll(s, ret, ret, sh); + tcg_out_opc_imm(s, lo < 0 ? OPC_DADDIU : OPC_ORI, ret, ret, lo); + return; + } + } + + /* Otherwise, put 64-bit constants into the constant pool. */ + tcg_out_movi_pool(s, ret, arg, tbreg); +} + +static void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long arg) +{ + TCGReg tbreg = TCG_TARGET_REG_BITS == 64 ? TCG_REG_TB : 0; + tcg_out_movi_int(s, type, ret, arg, tbreg); +} + +static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) +{ + tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32); + tcg_out_opc_reg(s, OPC_SEB, rd, TCG_REG_ZERO, rs); +} + +static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_opc_imm(s, OPC_ANDI, rd, rs, 0xff); +} + +static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) +{ + tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32); + tcg_out_opc_reg(s, OPC_SEH, rd, TCG_REG_ZERO, rs); +} + +static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_opc_imm(s, OPC_ANDI, rd, rs, 0xffff); +} + +static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_opc_sa(s, OPC_SLL, rd, rs, 0); +} + +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) +{ + if (rd != rs) { + tcg_out_ext32s(s, rd, rs); + } +} + +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_ext32u(s, rd, rs); +} + +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_ext32s(s, rd, rs); +} + +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) +{ + return false; } static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, @@ -635,6 +771,7 @@ static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg) static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) { + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); if (use_mips32r2_instructions) { tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0); } else { @@ -798,7 +935,7 @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, break; default: - tcg_abort(); + g_assert_not_reached(); break; } } @@ -855,7 +992,7 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, break; default: - tcg_abort(); + g_assert_not_reached(); break; } @@ -1013,9 +1150,19 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) { - /* Note that the ABI requires the called function's address to be - loaded into T9, even if a direct branch is in range. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg); + /* + * Note that __mips_abicalls requires the called function's address + * to be loaded into $25 (t9), even if a direct branch is in range. + * + * For n64, always drop the pointer into the constant pool. + * We can re-use helper addresses often and do not want any + * of the longer sequences tcg_out_movi may try. + */ + if (sizeof(uintptr_t) == 8) { + tcg_out_movi_pool(s, TCG_REG_T9, (uintptr_t)arg, TCG_REG_TB); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg); + } /* But do try a direct branch, allowing the cpu better insn prefetch. */ if (tail) { @@ -1036,252 +1183,29 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, tcg_out_nop(s); } -#if defined(CONFIG_SOFTMMU) -static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LESW] = helper_le_ldsw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEUQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BESW] = helper_be_ldsw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEUQ] = helper_be_ldq_mmu, -#if TCG_TARGET_REG_BITS == 64 - [MO_LESL] = helper_le_ldsl_mmu, - [MO_BESL] = helper_be_ldsl_mmu, -#endif +/* We have four temps, we might as well expose three of them. */ +static const TCGLdstHelperParam ldst_helper_param = { + .ntmp = 3, .tmp = { TCG_TMP0, TCG_TMP1, TCG_TMP2 } }; -static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEUQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEUQ] = helper_be_stq_mmu, -}; - -/* Helper routines for marshalling helper function arguments into - * the correct registers and stack. - * I is where we want to put this argument, and is updated and returned - * for the next call. ARG is the argument itself. - * - * We provide routines for arguments which are: immediate, 32 bit - * value in register, 16 and 8 bit values in register (which must be zero - * extended before use) and 64 bit value in a lo:hi register pair. - */ - -static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg) -{ - if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { - tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg); - } else { - /* For N32 and N64, the initial offset is different. But there - we also have 8 argument register so we don't run out here. */ - tcg_debug_assert(TCG_TARGET_REG_BITS == 32); - tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i); - } - return i + 1; -} - -static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg) -{ - TCGReg tmp = TCG_TMP0; - if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { - tmp = tcg_target_call_iarg_regs[i]; - } - tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff); - return tcg_out_call_iarg_reg(s, i, tmp); -} - -static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg) -{ - TCGReg tmp = TCG_TMP0; - if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { - tmp = tcg_target_call_iarg_regs[i]; - } - tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff); - return tcg_out_call_iarg_reg(s, i, tmp); -} - -static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg) -{ - TCGReg tmp = TCG_TMP0; - if (arg == 0) { - tmp = TCG_REG_ZERO; - } else { - if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { - tmp = tcg_target_call_iarg_regs[i]; - } - tcg_out_movi(s, TCG_TYPE_REG, tmp, arg); - } - return tcg_out_call_iarg_reg(s, i, tmp); -} - -static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah) -{ - tcg_debug_assert(TCG_TARGET_REG_BITS == 32); - i = (i + 1) & ~1; - i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al)); - i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah)); - return i; -} - -/* We expect to use a 16-bit negative offset from ENV. */ -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); - -/* - * Perform the tlb comparison operation. - * The complete host address is placed in BASE. - * Clobbers TMP0, TMP1, TMP2, TMP3. - */ -static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, - TCGReg addrh, MemOpIdx oi, - tcg_insn_unit *label_ptr[2], bool is_load) -{ - MemOp opc = get_memop(oi); - unsigned a_bits = get_alignment_bits(opc); - unsigned s_bits = opc & MO_SIZE; - unsigned a_mask = (1 << a_bits) - 1; - unsigned s_mask = (1 << s_bits) - 1; - int mem_index = get_mmuidx(oi); - int fast_off = TLB_MASK_TABLE_OFS(mem_index); - int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); - int table_off = fast_off + offsetof(CPUTLBDescFast, table); - int add_off = offsetof(CPUTLBEntry, addend); - int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) - : offsetof(CPUTLBEntry, addr_write)); - target_ulong tlb_mask; - - /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off); - - /* Extract the TLB index from the address into TMP3. */ - tcg_out_opc_sa(s, ALIAS_TSRL, TCG_TMP3, addrl, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0); - - /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */ - tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); - - /* Load the (low-half) tlb comparator. */ - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF); - } else { - tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD - : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW), - TCG_TMP0, TCG_TMP3, cmp_off); - } - - /* Zero extend a 32-bit guest address for a 64-bit host. */ - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { - tcg_out_ext32u(s, base, addrl); - addrl = base; - } - - /* - * Mask the page bits, keeping the alignment bits to compare against. - * For unaligned accesses, compare against the end of the access to - * verify that it does not cross a page boundary. - */ - tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask; - tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, tlb_mask); - if (a_mask >= s_mask) { - tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl); - } else { - tcg_out_opc_imm(s, ALIAS_PADDI, TCG_TMP2, addrl, s_mask - a_mask); - tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2); - } - - if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { - /* Load the tlb addend for the fast path. */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); - } - - label_ptr[0] = s->code_ptr; - tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); - - /* Load and test the high half tlb comparator. */ - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - /* delay slot */ - tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); - - /* Load the tlb addend for the fast path. */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); - - label_ptr[1] = s->code_ptr; - tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0); - } - - /* delay slot */ - tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl); -} - -static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, - TCGType ext, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - void *raddr, tcg_insn_unit *label_ptr[2]) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->oi = oi; - label->type = ext; - label->datalo_reg = datalo; - label->datahi_reg = datahi; - label->addrlo_reg = addrlo; - label->addrhi_reg = addrhi; - label->raddr = tcg_splitwx_to_rx(raddr); - label->label_ptr[0] = label_ptr[0]; - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - label->label_ptr[1] = label_ptr[1]; - } -} - static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr); - MemOpIdx oi = l->oi; - MemOp opc = get_memop(oi); - TCGReg v0; - int i; + MemOp opc = get_memop(l->oi); /* resolve label address */ if (!reloc_pc16(l->label_ptr[0], tgt_rx) - || (TCG_TARGET_REG_BITS < TARGET_LONG_BITS - && !reloc_pc16(l->label_ptr[1], tgt_rx))) { + || (l->label_ptr[1] && !reloc_pc16(l->label_ptr[1], tgt_rx))) { return false; } - i = 1; - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg); - } else { - i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg); - } - i = tcg_out_call_iarg_imm(s, i, oi); - i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr); - tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)], false); - /* delay slot */ - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + tcg_out_ld_helper_args(s, l, &ldst_helper_param); - v0 = l->datalo_reg; - if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { - /* We eliminated V0 from the possible output registers, so it - cannot be clobbered here. So we must move V1 first. */ - if (MIPS_BE) { - tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1); - v0 = l->datahi_reg; - } else { - tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1); - } - } + tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false); + /* delay slot */ + tcg_out_nop(s); + + tcg_out_ld_helper_ret(s, l, true, &ldst_helper_param); tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO); if (!reloc_pc16(s->code_ptr - 1, l->raddr)) { @@ -1289,195 +1213,215 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) } /* delay slot */ - if (TCG_TARGET_REG_BITS == 64 && l->type == TCG_TYPE_I32) { - /* we always sign-extend 32-bit loads */ - tcg_out_opc_sa(s, OPC_SLL, v0, TCG_REG_V0, 0); - } else { - tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO); - } + tcg_out_nop(s); return true; } static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr); - MemOpIdx oi = l->oi; - MemOp opc = get_memop(oi); - MemOp s_bits = opc & MO_SIZE; - int i; + MemOp opc = get_memop(l->oi); /* resolve label address */ if (!reloc_pc16(l->label_ptr[0], tgt_rx) - || (TCG_TARGET_REG_BITS < TARGET_LONG_BITS - && !reloc_pc16(l->label_ptr[1], tgt_rx))) { + || (l->label_ptr[1] && !reloc_pc16(l->label_ptr[1], tgt_rx))) { return false; } - i = 1; - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg); - } else { - i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg); - } - switch (s_bits) { - case MO_8: - i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg); - break; - case MO_16: - i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg); - break; - case MO_32: - i = tcg_out_call_iarg_reg(s, i, l->datalo_reg); - break; - case MO_64: - if (TCG_TARGET_REG_BITS == 32) { - i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg); - } else { - i = tcg_out_call_iarg_reg(s, i, l->datalo_reg); - } - break; - default: - tcg_abort(); - } - i = tcg_out_call_iarg_imm(s, i, oi); + tcg_out_st_helper_args(s, l, &ldst_helper_param); - /* Tail call to the store helper. Thus force the return address - computation to take place in the return address register. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr); - i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA); - tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true); + tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); /* delay slot */ - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + tcg_out_nop(s); + + tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO); + if (!reloc_pc16(s->code_ptr - 1, l->raddr)) { + return false; + } + + /* delay slot */ + tcg_out_nop(s); return true; } -#else +typedef struct { + TCGReg base; + TCGAtomAlign aa; +} HostAddress; -static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, - TCGReg addrhi, unsigned a_bits) +bool tcg_target_has_memory_bswap(MemOp memop) { - unsigned a_mask = (1 << a_bits) - 1; - TCGLabelQemuLdst *l = new_ldst_label(s); - - l->is_ld = is_ld; - l->addrlo_reg = addrlo; - l->addrhi_reg = addrhi; - - /* We are expecting a_bits to max out at 7, much lower than ANDI. */ - tcg_debug_assert(a_bits < 16); - tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask); - - l->label_ptr[0] = s->code_ptr; - if (use_mips32r6_instructions) { - tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0); - } else { - tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO); - tcg_out_nop(s); - } - - l->raddr = tcg_splitwx_to_rx(s->code_ptr); + return false; } -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) +/* + * For softmmu, perform the TLB load and compare. + * For useronly, perform any required alignment tests. + * In both cases, return a TCGLabelQemuLdst structure if the slow path + * is required and fill in @h with the host address for the fast path. + */ +static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, bool is_ld) { - void *target; + TCGType addr_type = s->addr_type; + TCGLabelQemuLdst *ldst = NULL; + MemOp opc = get_memop(oi); + MemOp a_bits; + unsigned s_bits = opc & MO_SIZE; + unsigned a_mask; + TCGReg base; - if (!reloc_pc16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_bits = h->aa.align; + a_mask = (1 << a_bits) - 1; - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - /* A0 is env, A1 is skipped, A2:A3 is the uint64_t address. */ - TCGReg a2 = MIPS_BE ? l->addrhi_reg : l->addrlo_reg; - TCGReg a3 = MIPS_BE ? l->addrlo_reg : l->addrhi_reg; +#ifdef CONFIG_SOFTMMU + unsigned s_mask = (1 << s_bits) - 1; + int mem_index = get_mmuidx(oi); + int fast_off = TLB_MASK_TABLE_OFS(mem_index); + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); + int table_off = fast_off + offsetof(CPUTLBDescFast, table); + int add_off = offsetof(CPUTLBEntry, addend); + int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write); - if (a3 != TCG_REG_A2) { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3); - } else if (a2 != TCG_REG_A3) { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2); - } else { - tcg_out_mov(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A2); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, TCG_REG_A3); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, TCG_TMP0); - } + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addrlo; + ldst->addrhi_reg = addrhi; + + /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off); + + /* Extract the TLB index from the address into TMP3. */ + if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo, + s->page_bits - CPU_TLB_ENTRY_BITS); } else { - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); + tcg_out_dsrl(s, TCG_TMP3, addrlo, + s->page_bits - CPU_TLB_ENTRY_BITS); + } + tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0); + + /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */ + tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); + + if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) { + /* Load the tlb comparator. */ + tcg_out_ld(s, addr_type, TCG_TMP0, TCG_TMP3, cmp_off); + /* Load the tlb addend for the fast path. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off); + } else { + /* Load the low half of the tlb comparator. */ + tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF); } - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); /* - * Tail call to the helper, with the return address back inline. - * We have arrived here via BNEL, so $31 is already set. + * Mask the page bits, keeping the alignment bits to compare against. + * For unaligned accesses, compare against the end of the access to + * verify that it does not cross a page boundary. */ - target = (l->is_ld ? helper_unaligned_ld : helper_unaligned_st); - tcg_out_call_int(s, target, true); - return true; -} + tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask); + if (a_mask < s_mask) { + if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { + tcg_out_opc_imm(s, OPC_ADDIU, TCG_TMP2, addrlo, s_mask - a_mask); + } else { + tcg_out_opc_imm(s, OPC_DADDIU, TCG_TMP2, addrlo, s_mask - a_mask); + } + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2); + } else { + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo); + } -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} + /* Zero extend a 32-bit guest address for a 64-bit host. */ + if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { + tcg_out_ext32u(s, TCG_TMP2, addrlo); + addrlo = TCG_TMP2; + } -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); + ldst->label_ptr[0] = s->code_ptr; + tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); + + /* Load and test the high half tlb comparator. */ + if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) { + /* delay slot */ + tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); + + /* Load the tlb addend for the fast path. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off); + + ldst->label_ptr[1] = s->code_ptr; + tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0); + } + + /* delay slot */ + base = TCG_TMP3; + tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo); +#else + if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) { + ldst = new_ldst_label(s); + + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addrlo; + ldst->addrhi_reg = addrhi; + + /* We are expecting a_bits to max out at 7, much lower than ANDI. */ + tcg_debug_assert(a_bits < 16); + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask); + + ldst->label_ptr[0] = s->code_ptr; + if (use_mips32r6_instructions) { + tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0); + } else { + tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO); + tcg_out_nop(s); + } + } + + base = addrlo; + if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { + tcg_out_ext32u(s, TCG_REG_A0, base); + base = TCG_REG_A0; + } + if (guest_base) { + if (guest_base == (int16_t)guest_base) { + tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base); + } else { + tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base, + TCG_GUEST_BASE_REG); + } + base = TCG_REG_A0; + } +#endif + + h->base = base; + return ldst; } -#endif /* SOFTMMU */ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, MemOp opc, bool is_64) + TCGReg base, MemOp opc, TCGType type) { - switch (opc & (MO_SSIZE | MO_BSWAP)) { + switch (opc & MO_SSIZE) { case MO_UB: tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); break; case MO_SB: tcg_out_opc_imm(s, OPC_LB, lo, base, 0); break; - case MO_UW | MO_BSWAP: - tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0); - tcg_out_bswap16(s, lo, TCG_TMP1, TCG_BSWAP_IZ | TCG_BSWAP_OZ); - break; case MO_UW: tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); break; - case MO_SW | MO_BSWAP: - tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0); - tcg_out_bswap16(s, lo, TCG_TMP1, TCG_BSWAP_IZ | TCG_BSWAP_OS); - break; case MO_SW: tcg_out_opc_imm(s, OPC_LH, lo, base, 0); break; - case MO_UL | MO_BSWAP: - if (TCG_TARGET_REG_BITS == 64 && is_64) { - if (use_mips32r2_instructions) { - tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); - tcg_out_bswap32(s, lo, lo, TCG_BSWAP_IZ | TCG_BSWAP_OZ); - } else { - tcg_out_bswap_subr(s, bswap32u_addr); - /* delay slot */ - tcg_out_opc_imm(s, OPC_LWU, TCG_TMP0, base, 0); - tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3); - } - break; - } - /* FALLTHRU */ - case MO_SL | MO_BSWAP: - if (use_mips32r2_instructions) { - tcg_out_opc_imm(s, OPC_LW, lo, base, 0); - tcg_out_bswap32(s, lo, lo, 0); - } else { - tcg_out_bswap_subr(s, bswap32_addr); - /* delay slot */ - tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0); - tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_TMP3); - } - break; case MO_UL: - if (TCG_TARGET_REG_BITS == 64 && is_64) { + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) { tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); break; } @@ -1485,40 +1429,11 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, case MO_SL: tcg_out_opc_imm(s, OPC_LW, lo, base, 0); break; - case MO_UQ | MO_BSWAP: - if (TCG_TARGET_REG_BITS == 64) { - if (use_mips32r2_instructions) { - tcg_out_opc_imm(s, OPC_LD, lo, base, 0); - tcg_out_bswap64(s, lo, lo); - } else { - tcg_out_bswap_subr(s, bswap64_addr); - /* delay slot */ - tcg_out_opc_imm(s, OPC_LD, TCG_TMP0, base, 0); - tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3); - } - } else if (use_mips32r2_instructions) { - tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0); - tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 4); - tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0); - tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1); - tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16); - tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16); - } else { - tcg_out_bswap_subr(s, bswap32_addr); - /* delay slot */ - tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0); - tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 4); - tcg_out_bswap_subr(s, bswap32_addr); - /* delay slot */ - tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3); - tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3); - } - break; case MO_UQ: /* Prefer to load from offset 0 first, but allow for overlap. */ if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_imm(s, OPC_LD, lo, base, 0); - } else if (MIPS_BE ? hi != base : lo == base) { + } else if (HOST_BIG_ENDIAN ? hi != base : lo == base) { tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF); tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF); } else { @@ -1527,36 +1442,31 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, } break; default: - tcg_abort(); + g_assert_not_reached(); } } static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, MemOp opc, bool is_64) + TCGReg base, MemOp opc, TCGType type) { - const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR; - const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL; - const MIPSInsn ld1 = MIPS_BE ? OPC_LDL : OPC_LDR; - const MIPSInsn ld2 = MIPS_BE ? OPC_LDR : OPC_LDL; + const MIPSInsn lw1 = HOST_BIG_ENDIAN ? OPC_LWL : OPC_LWR; + const MIPSInsn lw2 = HOST_BIG_ENDIAN ? OPC_LWR : OPC_LWL; + const MIPSInsn ld1 = HOST_BIG_ENDIAN ? OPC_LDL : OPC_LDR; + const MIPSInsn ld2 = HOST_BIG_ENDIAN ? OPC_LDR : OPC_LDL; + bool sgn = opc & MO_SIGN; - bool sgn = (opc & MO_SIGN); - - switch (opc & (MO_SSIZE | MO_BSWAP)) { - case MO_SW | MO_BE: - case MO_UW | MO_BE: - tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 0); - tcg_out_opc_imm(s, OPC_LBU, lo, base, 1); - if (use_mips32r2_instructions) { - tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8); - } else { - tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8); - tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1); - } - break; - - case MO_SW | MO_LE: - case MO_UW | MO_LE: - if (use_mips32r2_instructions && lo != base) { + switch (opc & MO_SIZE) { + case MO_16: + if (HOST_BIG_ENDIAN) { + tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 0); + tcg_out_opc_imm(s, OPC_LBU, lo, base, 1); + if (use_mips32r2_instructions) { + tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8); + } else { + tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8); + tcg_out_opc_reg(s, OPC_OR, lo, lo, TCG_TMP0); + } + } else if (use_mips32r2_instructions && lo != base) { tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 1); tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8); @@ -1568,81 +1478,23 @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, } break; - case MO_SL: - case MO_UL: + case MO_32: tcg_out_opc_imm(s, lw1, lo, base, 0); tcg_out_opc_imm(s, lw2, lo, base, 3); - if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) { + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 && !sgn) { tcg_out_ext32u(s, lo, lo); } break; - case MO_UL | MO_BSWAP: - case MO_SL | MO_BSWAP: - if (use_mips32r2_instructions) { - tcg_out_opc_imm(s, lw1, lo, base, 0); - tcg_out_opc_imm(s, lw2, lo, base, 3); - tcg_out_bswap32(s, lo, lo, - TCG_TARGET_REG_BITS == 64 && is_64 - ? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0); - } else { - const tcg_insn_unit *subr = - (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn - ? bswap32u_addr : bswap32_addr); - - tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0); - tcg_out_bswap_subr(s, subr); - /* delay slot */ - tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3); - tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3); - } - break; - - case MO_UQ: + case MO_64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_imm(s, ld1, lo, base, 0); tcg_out_opc_imm(s, ld2, lo, base, 7); } else { - tcg_out_opc_imm(s, lw1, MIPS_BE ? hi : lo, base, 0 + 0); - tcg_out_opc_imm(s, lw2, MIPS_BE ? hi : lo, base, 0 + 3); - tcg_out_opc_imm(s, lw1, MIPS_BE ? lo : hi, base, 4 + 0); - tcg_out_opc_imm(s, lw2, MIPS_BE ? lo : hi, base, 4 + 3); - } - break; - - case MO_UQ | MO_BSWAP: - if (TCG_TARGET_REG_BITS == 64) { - if (use_mips32r2_instructions) { - tcg_out_opc_imm(s, ld1, lo, base, 0); - tcg_out_opc_imm(s, ld2, lo, base, 7); - tcg_out_bswap64(s, lo, lo); - } else { - tcg_out_opc_imm(s, ld1, TCG_TMP0, base, 0); - tcg_out_bswap_subr(s, bswap64_addr); - /* delay slot */ - tcg_out_opc_imm(s, ld2, TCG_TMP0, base, 7); - tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3); - } - } else if (use_mips32r2_instructions) { - tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0); - tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3); - tcg_out_opc_imm(s, lw1, TCG_TMP1, base, 4 + 0); - tcg_out_opc_imm(s, lw2, TCG_TMP1, base, 4 + 3); - tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0); - tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1); - tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16); - tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16); - } else { - tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0); - tcg_out_bswap_subr(s, bswap32_addr); - /* delay slot */ - tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3); - tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 4 + 0); - tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3); - tcg_out_bswap_subr(s, bswap32_addr); - /* delay slot */ - tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 4 + 3); - tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3); + tcg_out_opc_imm(s, lw1, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 0); + tcg_out_opc_imm(s, lw2, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 3); + tcg_out_opc_imm(s, lw1, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 0); + tcg_out_opc_imm(s, lw2, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 3); } break; @@ -1651,270 +1503,115 @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, } } -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) +static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, TCGType data_type) { - TCGReg addr_regl, addr_regh __attribute__((unused)); - TCGReg data_regl, data_regh; - MemOpIdx oi; - MemOp opc; -#if defined(CONFIG_SOFTMMU) - tcg_insn_unit *label_ptr[2]; -#else -#endif - unsigned a_bits, s_bits; - TCGReg base = TCG_REG_A0; + MemOp opc = get_memop(oi); + TCGLabelQemuLdst *ldst; + HostAddress h; - data_regl = *args++; - data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); - addr_regl = *args++; - addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); - oi = *args++; - opc = get_memop(oi); - a_bits = get_alignment_bits(opc); - s_bits = opc & MO_SIZE; + ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); - /* - * R6 removes the left/right instructions but requires the - * system to support misaligned memory accesses. - */ -#if defined(CONFIG_SOFTMMU) - tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1); - if (use_mips32r6_instructions || a_bits >= s_bits) { - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); + if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) { + tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, data_type); } else { - tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64); + tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, data_type); } - add_qemu_ldst_label(s, 1, oi, - (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), - data_regl, data_regh, addr_regl, addr_regh, - s->code_ptr, label_ptr); -#else - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { - tcg_out_ext32u(s, base, addr_regl); - addr_regl = base; + + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - if (guest_base == 0 && data_regl != addr_regl) { - base = addr_regl; - } else if (guest_base == (int16_t)guest_base) { - tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base); - } else { - tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); - } - if (use_mips32r6_instructions) { - if (a_bits) { - tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); - } - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); - } else { - if (a_bits && a_bits != s_bits) { - tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); - } - if (a_bits >= s_bits) { - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); - } else { - tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64); - } - } -#endif } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, MemOp opc) { - /* Don't clutter the code below with checks to avoid bswapping ZERO. */ - if ((lo | hi) == 0) { - opc &= ~MO_BSWAP; - } - - switch (opc & (MO_SIZE | MO_BSWAP)) { + switch (opc & MO_SIZE) { case MO_8: tcg_out_opc_imm(s, OPC_SB, lo, base, 0); break; - - case MO_16 | MO_BSWAP: - tcg_out_bswap16(s, TCG_TMP1, lo, 0); - lo = TCG_TMP1; - /* FALLTHRU */ case MO_16: tcg_out_opc_imm(s, OPC_SH, lo, base, 0); break; - - case MO_32 | MO_BSWAP: - tcg_out_bswap32(s, TCG_TMP3, lo, 0); - lo = TCG_TMP3; - /* FALLTHRU */ case MO_32: tcg_out_opc_imm(s, OPC_SW, lo, base, 0); break; - - case MO_64 | MO_BSWAP: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_bswap64(s, TCG_TMP3, lo); - tcg_out_opc_imm(s, OPC_SD, TCG_TMP3, base, 0); - } else if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? lo : hi); - tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? hi : lo); - tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16); - tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16); - tcg_out_opc_imm(s, OPC_SW, TCG_TMP0, base, 0); - tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, 4); - } else { - tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi, 0); - tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 0); - tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo, 0); - tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 4); - } - break; case MO_64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_imm(s, OPC_SD, lo, base, 0); } else { - tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? hi : lo, base, 0); - tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? lo : hi, base, 4); + tcg_out_opc_imm(s, OPC_SW, HOST_BIG_ENDIAN ? hi : lo, base, 0); + tcg_out_opc_imm(s, OPC_SW, HOST_BIG_ENDIAN ? lo : hi, base, 4); } break; - default: - tcg_abort(); + g_assert_not_reached(); } } static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, MemOp opc) { - const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR; - const MIPSInsn sw2 = MIPS_BE ? OPC_SWR : OPC_SWL; - const MIPSInsn sd1 = MIPS_BE ? OPC_SDL : OPC_SDR; - const MIPSInsn sd2 = MIPS_BE ? OPC_SDR : OPC_SDL; + const MIPSInsn sw1 = HOST_BIG_ENDIAN ? OPC_SWL : OPC_SWR; + const MIPSInsn sw2 = HOST_BIG_ENDIAN ? OPC_SWR : OPC_SWL; + const MIPSInsn sd1 = HOST_BIG_ENDIAN ? OPC_SDL : OPC_SDR; + const MIPSInsn sd2 = HOST_BIG_ENDIAN ? OPC_SDR : OPC_SDL; - /* Don't clutter the code below with checks to avoid bswapping ZERO. */ - if ((lo | hi) == 0) { - opc &= ~MO_BSWAP; - } - - switch (opc & (MO_SIZE | MO_BSWAP)) { - case MO_16 | MO_BE: + switch (opc & MO_SIZE) { + case MO_16: tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8); - tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 0); - tcg_out_opc_imm(s, OPC_SB, lo, base, 1); + tcg_out_opc_imm(s, OPC_SB, HOST_BIG_ENDIAN ? TCG_TMP0 : lo, base, 0); + tcg_out_opc_imm(s, OPC_SB, HOST_BIG_ENDIAN ? lo : TCG_TMP0, base, 1); break; - case MO_16 | MO_LE: - tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8); - tcg_out_opc_imm(s, OPC_SB, lo, base, 0); - tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 1); - break; - - case MO_32 | MO_BSWAP: - tcg_out_bswap32(s, TCG_TMP3, lo, 0); - lo = TCG_TMP3; - /* fall through */ case MO_32: tcg_out_opc_imm(s, sw1, lo, base, 0); tcg_out_opc_imm(s, sw2, lo, base, 3); break; - case MO_64 | MO_BSWAP: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_bswap64(s, TCG_TMP3, lo); - lo = TCG_TMP3; - } else if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? hi : lo); - tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? lo : hi); - tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16); - tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16); - hi = MIPS_BE ? TCG_TMP0 : TCG_TMP1; - lo = MIPS_BE ? TCG_TMP1 : TCG_TMP0; - } else { - tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi, 0); - tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 0 + 0); - tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 0 + 3); - tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo, 0); - tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 4 + 0); - tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 4 + 3); - break; - } - /* fall through */ case MO_64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_imm(s, sd1, lo, base, 0); tcg_out_opc_imm(s, sd2, lo, base, 7); } else { - tcg_out_opc_imm(s, sw1, MIPS_BE ? hi : lo, base, 0 + 0); - tcg_out_opc_imm(s, sw2, MIPS_BE ? hi : lo, base, 0 + 3); - tcg_out_opc_imm(s, sw1, MIPS_BE ? lo : hi, base, 4 + 0); - tcg_out_opc_imm(s, sw2, MIPS_BE ? lo : hi, base, 4 + 3); + tcg_out_opc_imm(s, sw1, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 0); + tcg_out_opc_imm(s, sw2, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 3); + tcg_out_opc_imm(s, sw1, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 0); + tcg_out_opc_imm(s, sw2, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 3); } break; default: - tcg_abort(); + g_assert_not_reached(); } } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) + +static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, TCGType data_type) { - TCGReg addr_regl, addr_regh __attribute__((unused)); - TCGReg data_regl, data_regh; - MemOpIdx oi; - MemOp opc; -#if defined(CONFIG_SOFTMMU) - tcg_insn_unit *label_ptr[2]; -#endif - unsigned a_bits, s_bits; - TCGReg base = TCG_REG_A0; + MemOp opc = get_memop(oi); + TCGLabelQemuLdst *ldst; + HostAddress h; - data_regl = *args++; - data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); - addr_regl = *args++; - addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); - oi = *args++; - opc = get_memop(oi); - a_bits = get_alignment_bits(opc); - s_bits = opc & MO_SIZE; + ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); - /* - * R6 removes the left/right instructions but requires the - * system to support misaligned memory accesses. - */ -#if defined(CONFIG_SOFTMMU) - tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0); - if (use_mips32r6_instructions || a_bits >= s_bits) { - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); + if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) { + tcg_out_qemu_st_direct(s, datalo, datahi, h.base, opc); } else { - tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc); + tcg_out_qemu_st_unalign(s, datalo, datahi, h.base, opc); } - add_qemu_ldst_label(s, 0, oi, - (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), - data_regl, data_regh, addr_regl, addr_regh, - s->code_ptr, label_ptr); -#else - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { - tcg_out_ext32u(s, base, addr_regl); - addr_regl = base; + + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - if (guest_base == 0) { - base = addr_regl; - } else if (guest_base == (int16_t)guest_base) { - tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base); - } else { - tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); - } - if (use_mips32r6_instructions) { - if (a_bits) { - tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); - } - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); - } else { - if (a_bits && a_bits != s_bits) { - tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); - } - if (a_bits >= s_bits) { - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); - } else { - tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc); - } - } -#endif } static void tcg_out_mb(TCGContext *s, TCGArg a0) @@ -1962,27 +1659,61 @@ static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6, static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) { - TCGReg b0 = TCG_REG_ZERO; + TCGReg base = TCG_REG_ZERO; + int16_t lo = 0; - if (a0 & ~0xffff) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff); - b0 = TCG_REG_V0; + if (a0) { + intptr_t ofs; + if (TCG_TARGET_REG_BITS == 64) { + ofs = tcg_tbrel_diff(s, (void *)a0); + lo = ofs; + if (ofs == lo) { + base = TCG_REG_TB; + } else { + base = TCG_REG_V0; + tcg_out_movi(s, TCG_TYPE_PTR, base, ofs - lo); + tcg_out_opc_reg(s, ALIAS_PADD, base, base, TCG_REG_TB); + } + } else { + ofs = a0; + lo = ofs; + base = TCG_REG_V0; + tcg_out_movi(s, TCG_TYPE_PTR, base, ofs - lo); + } } if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)tb_ret_addr); tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); } - tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff); + /* delay slot */ + tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_V0, base, lo); } static void tcg_out_goto_tb(TCGContext *s, int which) { + intptr_t ofs = get_jmp_target_addr(s, which); + TCGReg base, dest; + /* indirect jump method */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO, - get_jmp_target_addr(s, which)); - tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); + if (TCG_TARGET_REG_BITS == 64) { + dest = TCG_REG_TB; + base = TCG_REG_TB; + ofs = tcg_tbrel_diff(s, (void *)ofs); + } else { + dest = TCG_TMP0; + base = TCG_REG_ZERO; + } + tcg_out_ld(s, TCG_TYPE_PTR, dest, base, ofs); + tcg_out_opc_reg(s, OPC_JR, 0, dest, 0); + /* delay slot */ tcg_out_nop(s); + set_jmp_reset_offset(s, which); + if (TCG_TARGET_REG_BITS == 64) { + /* For the unlinked case, need to reset TCG_REG_TB. */ + tcg_out_ldst(s, ALIAS_PADDI, TCG_REG_TB, TCG_REG_TB, + -tcg_current_code_size(s)); + } } void tb_target_set_jmp_target(const TranslationBlock *tb, int n, @@ -2013,7 +1744,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_goto_ptr: /* jmp to the given host address (could be epilogue) */ tcg_out_opc_reg(s, OPC_JR, 0, a0, 0); - tcg_out_nop(s); + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0); + } else { + tcg_out_nop(s); + } break; case INDEX_op_br: tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, @@ -2245,13 +1980,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_not_i64: i1 = OPC_NOR; goto do_unary; - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - i1 = OPC_SEB; - goto do_unary; - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - i1 = OPC_SEH; do_unary: tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1); break; @@ -2272,15 +2000,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_extrh_i64_i32: tcg_out_dsra(s, a0, a1, 32); break; - case INDEX_op_ext32s_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extrl_i64_i32: - tcg_out_opc_sa(s, OPC_SLL, a0, a1, 0); - break; - case INDEX_op_ext32u_i64: - case INDEX_op_extu_i32_i64: - tcg_out_ext32u(s, a0, a1); - break; case INDEX_op_sar_i32: i1 = OPC_SRAV, i2 = OPC_SRA; @@ -2389,17 +2108,52 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); break; - case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, args, false); + case INDEX_op_qemu_ld_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_qemu_ld(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32); + break; + } + /* fall through */ + case INDEX_op_qemu_ld_a32_i32: + tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args, true); + case INDEX_op_qemu_ld_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); + } else { + tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64); + } break; - case INDEX_op_qemu_st_i32: - tcg_out_qemu_st(s, args, false); + case INDEX_op_qemu_ld_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); + } else { + tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); + } break; - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args, true); + + case INDEX_op_qemu_st_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_qemu_st(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32); + break; + } + /* fall through */ + case INDEX_op_qemu_st_a32_i32: + tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32); + break; + case INDEX_op_qemu_st_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); + } else { + tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64); + } + break; + case INDEX_op_qemu_st_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); + } else { + tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); + } break; case INDEX_op_add2_i32: @@ -2419,8 +2173,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ + case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: default: - tcg_abort(); + g_assert_not_reached(); } } @@ -2544,20 +2309,23 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_brcond2_i32: return C_O0_I4(rZ, rZ, rZ, rZ); - case INDEX_op_qemu_ld_i32: - return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 - ? C_O1_I1(r, L) : C_O1_I2(r, L, L)); - case INDEX_op_qemu_st_i32: - return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 - ? C_O0_I2(SZ, S) : C_O0_I3(SZ, S, S)); - case INDEX_op_qemu_ld_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) - : TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, L) - : C_O2_I2(r, r, L, L)); - case INDEX_op_qemu_st_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(SZ, S) - : TARGET_LONG_BITS == 32 ? C_O0_I3(SZ, SZ, S) - : C_O0_I4(SZ, SZ, S, S)); + case INDEX_op_qemu_ld_a32_i32: + return C_O1_I1(r, r); + case INDEX_op_qemu_ld_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r); + case INDEX_op_qemu_st_a32_i32: + return C_O0_I2(rZ, r); + case INDEX_op_qemu_st_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, r, r); + case INDEX_op_qemu_ld_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); + case INDEX_op_qemu_ld_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r); + case INDEX_op_qemu_st_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, rZ, r); + case INDEX_op_qemu_st_a64_i64: + return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) + : C_O0_I4(rZ, rZ, r, r)); default: g_assert_not_reached(); @@ -2565,15 +2333,15 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) } static const int tcg_target_callee_save_regs[] = { - TCG_REG_S0, /* used for the global env (TCG_AREG0) */ + TCG_REG_S0, TCG_REG_S1, TCG_REG_S2, TCG_REG_S3, TCG_REG_S4, TCG_REG_S5, - TCG_REG_S6, - TCG_REG_S7, - TCG_REG_S8, + TCG_REG_S6, /* used for the tb base (TCG_REG_TB) */ + TCG_REG_S7, /* used for guest_base */ + TCG_REG_S8, /* used for the global env (TCG_AREG0) */ TCG_REG_RA, /* should be last for ABI compliance */ }; @@ -2694,12 +2462,25 @@ static void tcg_target_qemu_prologue(TCGContext *s) } #ifndef CONFIG_SOFTMMU - if (guest_base) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); + if (guest_base != (int16_t)guest_base) { + /* + * The function call abi for n32 and n64 will have loaded $25 (t9) + * with the address of the prologue, so we can use that instead + * of TCG_REG_TB. + */ +#if TCG_TARGET_REG_BITS == 64 && !defined(__mips_abicalls) +# error "Unknown mips abi" +#endif + tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, + TCG_TARGET_REG_BITS == 64 ? TCG_REG_T9 : 0); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } #endif + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]); + } + /* Call generated code */ tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0); /* delay slot */ @@ -2880,6 +2661,9 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */ + if (TCG_TARGET_REG_BITS == 64) { + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tc->tc_ptr */ + } } typedef struct { diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h index 68b11e4d48..e4806f6ff5 100644 --- a/tcg/mips/tcg-target.h +++ b/tcg/mips/tcg-target.h @@ -36,7 +36,6 @@ #endif #define TCG_TARGET_INSN_UNIT_SIZE 4 -#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 #define TCG_TARGET_NB_REGS 32 #define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) @@ -76,7 +75,7 @@ typedef enum { TCG_REG_RA, TCG_REG_CALL_STACK = TCG_REG_SP, - TCG_AREG0 = TCG_REG_S0, + TCG_AREG0 = TCG_REG_S8, } TCGReg; /* used for function call generation */ @@ -84,13 +83,14 @@ typedef enum { #if _MIPS_SIM == _ABIO32 # define TCG_TARGET_CALL_STACK_OFFSET 16 # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN +# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF #else # define TCG_TARGET_CALL_STACK_OFFSET 0 # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL #endif #define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL #define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN -#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL /* MOVN/MOVZ instructions detection */ #if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \ @@ -203,9 +203,10 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */ #endif -#define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 1 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 +#define TCG_TARGET_DEFAULT_MO 0 #define TCG_TARGET_NEED_LDST_LABELS +#define TCG_TARGET_NEED_POOL_LABELS #endif diff --git a/tcg/optimize.c b/tcg/optimize.c index 763bca9ea6..bf975a3a6c 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -190,7 +190,7 @@ static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) } else if (i->kind > ts->kind) { if (i->kind == TEMP_GLOBAL) { g = i; - } else if (i->kind == TEMP_LOCAL) { + } else if (i->kind == TEMP_TB) { l = i; } } @@ -453,9 +453,7 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) return (uint64_t)x % ((uint64_t)y ? : 1); default: - fprintf(stderr, - "Unrecognized operation %d in do_constant_folding.\n", op); - tcg_abort(); + g_assert_not_reached(); } } @@ -493,7 +491,7 @@ static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c) case TCG_COND_GTU: return x > y; default: - tcg_abort(); + g_assert_not_reached(); } } @@ -521,7 +519,7 @@ static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c) case TCG_COND_GTU: return x > y; default: - tcg_abort(); + g_assert_not_reached(); } } @@ -541,7 +539,7 @@ static bool do_constant_folding_cond_eq(TCGCond c) case TCG_COND_EQ: return 1; default: - tcg_abort(); + g_assert_not_reached(); } } @@ -2186,13 +2184,22 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64_VEC(orc): done = fold_orc(&ctx, op); break; - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: done = fold_qemu_ld(&ctx, op); break; - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st8_i32: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st8_a32_i32: + case INDEX_op_qemu_st8_a64_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: done = fold_qemu_st(&ctx, op); break; CASE_OP_32_64(rem): diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h index a1a345883d..bbd7b21247 100644 --- a/tcg/ppc/tcg-target-con-set.h +++ b/tcg/ppc/tcg-target-con-set.h @@ -12,18 +12,16 @@ C_O0_I1(r) C_O0_I2(r, r) C_O0_I2(r, ri) -C_O0_I2(S, S) C_O0_I2(v, r) -C_O0_I3(S, S, S) +C_O0_I3(r, r, r) +C_O0_I3(o, m, r) C_O0_I4(r, r, ri, ri) -C_O0_I4(S, S, S, S) -C_O1_I1(r, L) +C_O0_I4(r, r, r, r) C_O1_I1(r, r) C_O1_I1(v, r) C_O1_I1(v, v) C_O1_I1(v, vr) C_O1_I2(r, 0, rZ) -C_O1_I2(r, L, L) C_O1_I2(r, rI, ri) C_O1_I2(r, rI, rT) C_O1_I2(r, r, r) @@ -36,7 +34,8 @@ C_O1_I2(v, v, v) C_O1_I3(v, v, v, v) C_O1_I4(r, r, ri, rZ, rZ) C_O1_I4(r, r, r, ri, ri) -C_O2_I1(L, L, L) -C_O2_I2(L, L, L, L) +C_O2_I1(r, r, r) +C_O2_I1(o, m, r) +C_O2_I2(r, r, r, r) C_O2_I4(r, r, rI, rZM, r, r) C_O2_I4(r, r, r, r, rI, rZM) diff --git a/tcg/ppc/tcg-target-con-str.h b/tcg/ppc/tcg-target-con-str.h index 298ca20d5b..20846901de 100644 --- a/tcg/ppc/tcg-target-con-str.h +++ b/tcg/ppc/tcg-target-con-str.h @@ -9,20 +9,14 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) +REGS('o', ALL_GENERAL_REGS & 0xAAAAAAAAu) /* odd registers */ REGS('v', ALL_VECTOR_REGS) -REGS('A', 1u << TCG_REG_R3) -REGS('B', 1u << TCG_REG_R4) -REGS('C', 1u << TCG_REG_R5) -REGS('D', 1u << TCG_REG_R6) -REGS('L', ALL_QLOAD_REGS) -REGS('S', ALL_QSTORE_REGS) /* * Define constraint letters for constants: * CONST(letter, TCG_CT_CONST_* bit set) */ CONST('I', TCG_CT_CONST_S16) -CONST('J', TCG_CT_CONST_U16) CONST('M', TCG_CT_CONST_MONE) CONST('T', TCG_CT_CONST_S32) CONST('U', TCG_CT_CONST_U32) diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index afadf9a1e3..d47a9e3478 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -46,17 +46,18 @@ #if TCG_TARGET_REG_BITS == 64 # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND +# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL #else # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL +# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF #endif #ifdef _CALL_SYSV # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF #else # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL #endif -/* Note sysv arg alignment applies only to 2-word types, not more. */ -#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL /* For some memory operations, we need a scratch that isn't R0. For the AIX calling convention, we can re-use the TOC register since we'll be reloading @@ -67,6 +68,7 @@ #else # define TCG_REG_TMP1 TCG_REG_R12 #endif +#define TCG_REG_TMP2 TCG_REG_R11 #define TCG_VEC_TMP1 TCG_REG_V0 #define TCG_VEC_TMP2 TCG_REG_V1 @@ -81,7 +83,6 @@ #define SZR (TCG_TARGET_REG_BITS / 8) #define TCG_CT_CONST_S16 0x100 -#define TCG_CT_CONST_U16 0x200 #define TCG_CT_CONST_S32 0x400 #define TCG_CT_CONST_U32 0x800 #define TCG_CT_CONST_ZERO 0x1000 @@ -91,18 +92,6 @@ #define ALL_GENERAL_REGS 0xffffffffu #define ALL_VECTOR_REGS 0xffffffff00000000ull -#ifdef CONFIG_SOFTMMU -#define ALL_QLOAD_REGS \ - (ALL_GENERAL_REGS & \ - ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5))) -#define ALL_QSTORE_REGS \ - (ALL_GENERAL_REGS & ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | \ - (1 << TCG_REG_R5) | (1 << TCG_REG_R6))) -#else -#define ALL_QLOAD_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R3)) -#define ALL_QSTORE_REGS ALL_QLOAD_REGS -#endif - TCGPowerISA have_isa; static bool have_isel; bool have_altivec; @@ -280,8 +269,6 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { return 1; - } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { - return 1; } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { return 1; } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { @@ -308,25 +295,27 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) #define B OPCD( 18) #define BC OPCD( 16) + #define LBZ OPCD( 34) #define LHZ OPCD( 40) #define LHA OPCD( 42) #define LWZ OPCD( 32) #define LWZUX XO31( 55) -#define STB OPCD( 38) -#define STH OPCD( 44) -#define STW OPCD( 36) - -#define STD XO62( 0) -#define STDU XO62( 1) -#define STDX XO31(149) - #define LD XO58( 0) #define LDX XO31( 21) #define LDU XO58( 1) #define LDUX XO31( 53) #define LWA XO58( 2) #define LWAX XO31(341) +#define LQ OPCD( 56) + +#define STB OPCD( 38) +#define STH OPCD( 44) +#define STW OPCD( 36) +#define STD XO62( 0) +#define STDU XO62( 1) +#define STDX XO31(149) +#define STQ XO62( 2) #define ADDIC OPCD( 12) #define ADDI OPCD( 14) @@ -774,31 +763,54 @@ static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me)); } -static inline void tcg_out_ext8s(TCGContext *s, TCGReg dst, TCGReg src) +static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) { tcg_out32(s, EXTSB | RA(dst) | RS(src)); } -static inline void tcg_out_ext16s(TCGContext *s, TCGReg dst, TCGReg src) +static void tcg_out_ext8u(TCGContext *s, TCGReg dst, TCGReg src) +{ + tcg_out32(s, ANDI | SAI(src, dst, 0xff)); +} + +static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) { tcg_out32(s, EXTSH | RA(dst) | RS(src)); } -static inline void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src) +static void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src) { tcg_out32(s, ANDI | SAI(src, dst, 0xffff)); } -static inline void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src) +static void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src) { + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out32(s, EXTSW | RA(dst) | RS(src)); } -static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src) +static void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src) { + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_rld(s, RLDICL, dst, src, 0, 32); } +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dst, TCGReg src) +{ + tcg_out_ext32s(s, dst, src); +} + +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dst, TCGReg src) +{ + tcg_out_ext32u(s, dst, src); +} + +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_mov(s, TCG_TYPE_I32, rd, rn); +} + static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c) { tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c); @@ -837,7 +849,7 @@ static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags) if (have_isa_3_10) { tcg_out32(s, BRH | RA(dst) | RS(src)); if (flags & TCG_BSWAP_OS) { - tcg_out_ext16s(s, dst, dst); + tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst); } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { tcg_out_ext16u(s, dst, dst); } @@ -856,7 +868,7 @@ static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags) tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23); if (flags & TCG_BSWAP_OS) { - tcg_out_ext16s(s, dst, tmp); + tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp); } else { tcg_out_mov(s, TCG_TYPE_REG, dst, tmp); } @@ -1130,6 +1142,11 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, } } +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) +{ + return false; +} + static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, tcg_target_long imm) { @@ -1509,7 +1526,7 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, break; default: - tcg_abort(); + g_assert_not_reached(); } op |= BF(cr) | ((type == TCG_TYPE_I64) << 21); @@ -1680,7 +1697,7 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, break; default: - tcg_abort(); + g_assert_not_reached(); } } @@ -1834,7 +1851,7 @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, break; default: - tcg_abort(); + g_assert_not_reached(); } } @@ -1947,96 +1964,167 @@ static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = { [MO_BSWAP | MO_UQ] = STDBRX, }; -static const uint32_t qemu_exts_opc[4] = { - EXTSB, EXTSH, EXTSW, 0 -}; - -#if defined (CONFIG_SOFTMMU) -/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, - * int mmu_idx, uintptr_t ra) - */ -static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEUQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEUQ] = helper_be_ldq_mmu, -}; - -/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, int mmu_idx, uintptr_t ra) - */ -static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEUQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEUQ] = helper_be_stq_mmu, -}; - -/* We expect to use a 16-bit negative offset from ENV. */ -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); - -/* Perform the TLB load and compare. Places the result of the comparison - in CR7, loads the addend of the TLB into R3, and returns the register - containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ - -static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, - TCGReg addrlo, TCGReg addrhi, - int mem_index, bool is_read) +static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) { - int cmp_off - = (is_read - ? offsetof(CPUTLBEntry, addr_read) - : offsetof(CPUTLBEntry, addr_write)); + if (arg < 0) { + arg = TCG_REG_TMP1; + } + tcg_out32(s, MFSPR | RT(arg) | LR); + return arg; +} + +/* + * For the purposes of ppc32 sorting 4 input registers into 4 argument + * registers, there is an outside chance we would require 3 temps. + */ +static const TCGLdstHelperParam ldst_helper_param = { + .ra_gen = ldst_ra_gen, + .ntmp = 3, + .tmp = { TCG_REG_TMP1, TCG_REG_TMP2, TCG_REG_R0 } +}; + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + MemOp opc = get_memop(lb->oi); + + if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_ld_helper_args(s, lb, &ldst_helper_param); + tcg_out_call_int(s, LK, qemu_ld_helpers[opc & MO_SIZE]); + tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); + + tcg_out_b(s, 0, lb->raddr); + return true; +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + MemOp opc = get_memop(lb->oi); + + if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_st_helper_args(s, lb, &ldst_helper_param); + tcg_out_call_int(s, LK, qemu_st_helpers[opc & MO_SIZE]); + + tcg_out_b(s, 0, lb->raddr); + return true; +} + +typedef struct { + TCGReg base; + TCGReg index; + TCGAtomAlign aa; +} HostAddress; + +bool tcg_target_has_memory_bswap(MemOp memop) +{ + TCGAtomAlign aa; + + if ((memop & MO_SIZE) <= MO_64) { + return true; + } + + /* + * Reject 16-byte memop with 16-byte atomicity, + * but do allow a pair of 64-bit operations. + */ + aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true); + return aa.atom <= MO_64; +} + +/* + * For softmmu, perform the TLB load and compare. + * For useronly, perform any required alignment tests. + * In both cases, return a TCGLabelQemuLdst structure if the slow path + * is required and fill in @h with the host address for the fast path. + */ +static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, bool is_ld) +{ + TCGLabelQemuLdst *ldst = NULL; + MemOp opc = get_memop(oi); + MemOp a_bits, s_bits; + + /* + * Book II, Section 1.4, Single-Copy Atomicity, specifies: + * + * Before 3.0, "An access that is not atomic is performed as a set of + * smaller disjoint atomic accesses. In general, the number and alignment + * of these accesses are implementation-dependent." Thus MO_ATOM_IFALIGN. + * + * As of 3.0, "the non-atomic access is performed as described in + * the corresponding list", which matches MO_ATOM_SUBALIGN. + */ + s_bits = opc & MO_SIZE; + h->aa = atom_and_align_for_opc(s, opc, + have_isa_3_00 ? MO_ATOM_SUBALIGN + : MO_ATOM_IFALIGN, + s_bits == MO_128); + a_bits = h->aa.align; + +#ifdef CONFIG_SOFTMMU + int mem_index = get_mmuidx(oi); + int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write); int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); int table_off = fast_off + offsetof(CPUTLBDescFast, table); - unsigned s_bits = opc & MO_SIZE; - unsigned a_bits = get_alignment_bits(opc); + + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addrlo; + ldst->addrhi_reg = addrhi; /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off); /* Extract the page index, shifted into place for tlb index. */ if (TCG_TARGET_REG_BITS == 32) { - tcg_out_shri32(s, TCG_REG_TMP1, addrlo, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_shri32(s, TCG_REG_R0, addrlo, + s->page_bits - CPU_TLB_ENTRY_BITS); } else { - tcg_out_shri64(s, TCG_REG_TMP1, addrlo, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_shri64(s, TCG_REG_R0, addrlo, + s->page_bits - CPU_TLB_ENTRY_BITS); } - tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1)); + tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0)); - /* Load the TLB comparator. */ + /* Load the (low part) TLB comparator into TMP2. */ if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32 ? LWZUX : LDUX); - tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4)); + tcg_out32(s, lxu | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2)); } else { - tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4)); + tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2)); if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4); - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off); + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, + TCG_REG_TMP1, cmp_off + 4 * HOST_BIG_ENDIAN); } else { - tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off); + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off); } } - /* Load the TLB addend for use on the fast path. Do this asap - to minimize any load use delay. */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, - offsetof(CPUTLBEntry, addend)); + /* + * Load the TLB addend for use on the fast path. + * Do this asap to minimize any load use delay. + */ + if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, + offsetof(CPUTLBEntry, addend)); + } - /* Clear the non-page, non-alignment bits from the address */ + /* Clear the non-page, non-alignment bits from the address in R0. */ if (TCG_TARGET_REG_BITS == 32) { - /* We don't support unaligned accesses on 32-bits. + /* + * We don't support unaligned accesses on 32-bits. * Preserve the bottom bits and thus trigger a comparison * failure on unaligned accesses. */ @@ -2044,11 +2132,12 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, a_bits = s_bits; } tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, - (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); + (32 - a_bits) & 31, 31 - s->page_bits); } else { TCGReg t = addrlo; - /* If the access is unaligned, we need to make sure we fail if we + /* + * If the access is unaligned, we need to make sure we fail if we * cross a page boundary. The trick is to add the access size-1 * to the address before masking the low bits. That will make the * address overflow to the next page if we cross a page boundary, @@ -2064,371 +2153,221 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, /* Mask the address for the requested alignment. */ if (TARGET_LONG_BITS == 32) { tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0, - (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); - /* Zero-extend the address for use in the final address. */ - tcg_out_ext32u(s, TCG_REG_R4, addrlo); - addrlo = TCG_REG_R4; + (32 - a_bits) & 31, 31 - s->page_bits); } else if (a_bits == 0) { - tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS); + tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits); } else { tcg_out_rld(s, RLDICL, TCG_REG_R0, t, - 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits); - tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); + 64 - s->page_bits, s->page_bits - a_bits); + tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0); } } if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, + /* Low part comparison into cr7. */ + tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 7, TCG_TYPE_I32); - tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32); + + /* Load the high part TLB comparator into TMP2. */ + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1, + cmp_off + 4 * !HOST_BIG_ENDIAN); + + /* Load addend, deferred for this case. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, + offsetof(CPUTLBEntry, addend)); + + /* High part comparison into cr6. */ + tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, 0, 6, TCG_TYPE_I32); + + /* Combine comparisons into cr7. */ tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); } else { - tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, + /* Full comparison into cr7. */ + tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 7, TCG_TYPE_TL); } - return addrlo; -} - -/* Record the context of a call to the out of line helper code for the slow - path for a load or store, so that we can later generate the correct - helper code. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, - TCGReg datalo_reg, TCGReg datahi_reg, - TCGReg addrlo_reg, TCGReg addrhi_reg, - tcg_insn_unit *raddr, tcg_insn_unit *lptr) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->oi = oi; - label->datalo_reg = datalo_reg; - label->datahi_reg = datahi_reg; - label->addrlo_reg = addrlo_reg; - label->addrhi_reg = addrhi_reg; - label->raddr = tcg_splitwx_to_rx(raddr); - label->label_ptr[0] = lptr; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) -{ - MemOpIdx oi = lb->oi; - MemOp opc = get_memop(oi); - TCGReg hi, lo, arg = TCG_REG_R3; - - if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); - - lo = lb->addrlo_reg; - hi = lb->addrhi_reg; - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN); - tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); - tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); - } else { - /* If the address needed to be zero-extended, we'll have already - placed it in R4. The only remaining case is 64-bit guest. */ - tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); - } - - tcg_out_movi(s, TCG_TYPE_I32, arg++, oi); - tcg_out32(s, MFSPR | RT(arg) | LR); - - tcg_out_call_int(s, LK, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); - - lo = lb->datalo_reg; - hi = lb->datahi_reg; - if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { - tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4); - tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3); - } else if (opc & MO_SIGN) { - uint32_t insn = qemu_exts_opc[opc & MO_SIZE]; - tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3)); - } else { - tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3); - } - - tcg_out_b(s, 0, lb->raddr); - return true; -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) -{ - MemOpIdx oi = lb->oi; - MemOp opc = get_memop(oi); - MemOp s_bits = opc & MO_SIZE; - TCGReg hi, lo, arg = TCG_REG_R3; - - if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); - - lo = lb->addrlo_reg; - hi = lb->addrhi_reg; - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN); - tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); - tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); - } else { - /* If the address needed to be zero-extended, we'll have already - placed it in R4. The only remaining case is 64-bit guest. */ - tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); - } - - lo = lb->datalo_reg; - hi = lb->datahi_reg; - if (TCG_TARGET_REG_BITS == 32) { - switch (s_bits) { - case MO_64: - arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN); - tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); - /* FALLTHRU */ - case MO_32: - tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); - break; - default: - tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31); - break; - } - } else { - if (s_bits == MO_64) { - tcg_out_mov(s, TCG_TYPE_I64, arg++, lo); - } else { - tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits)); - } - } - - tcg_out_movi(s, TCG_TYPE_I32, arg++, oi); - tcg_out32(s, MFSPR | RT(arg) | LR); - - tcg_out_call_int(s, LK, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); - - tcg_out_b(s, 0, lb->raddr); - return true; -} -#else - -static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, - TCGReg addrhi, unsigned a_bits) -{ - unsigned a_mask = (1 << a_bits) - 1; - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->addrlo_reg = addrlo; - label->addrhi_reg = addrhi; - - /* We are expecting a_bits to max out at 7, much lower than ANDI. */ - tcg_debug_assert(a_bits < 16); - tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, a_mask)); - - label->label_ptr[0] = s->code_ptr; - tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK); - - label->raddr = tcg_splitwx_to_rx(s->code_ptr); -} - -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - TCGReg arg = TCG_REG_R4; - - arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN); - if (l->addrlo_reg != arg) { - tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg); - tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg); - } else if (l->addrhi_reg != arg + 1) { - tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg); - } else { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg); - tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1); - tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0); - } - } else { - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg); - } - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0); - - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st)); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -#endif /* SOFTMMU */ - -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) -{ - TCGReg datalo, datahi, addrlo, rbase; - TCGReg addrhi __attribute__((unused)); - MemOpIdx oi; - MemOp opc, s_bits; -#ifdef CONFIG_SOFTMMU - int mem_index; - tcg_insn_unit *label_ptr; -#else - unsigned a_bits; -#endif - - datalo = *args++; - datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); - addrlo = *args++; - addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); - oi = *args++; - opc = get_memop(oi); - s_bits = opc & MO_SIZE; - -#ifdef CONFIG_SOFTMMU - mem_index = get_mmuidx(oi); - addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true); - /* Load a pointer into the current opcode w/conditional branch-link. */ - label_ptr = s->code_ptr; + ldst->label_ptr[0] = s->code_ptr; tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); - rbase = TCG_REG_R3; -#else /* !CONFIG_SOFTMMU */ - a_bits = get_alignment_bits(opc); + h->base = TCG_REG_TMP1; +#else if (a_bits) { - tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); - } - rbase = guest_base ? TCG_GUEST_BASE_REG : 0; - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { - tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); - addrlo = TCG_REG_TMP1; + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addrlo; + ldst->addrhi_reg = addrhi; + + /* We are expecting a_bits to max out at 7, much lower than ANDI. */ + tcg_debug_assert(a_bits < 16); + tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1)); + + ldst->label_ptr[0] = s->code_ptr; + tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK); } + + h->base = guest_base ? TCG_GUEST_BASE_REG : 0; #endif - if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + /* Zero-extend the guest address for use in the host address. */ + tcg_out_ext32u(s, TCG_REG_R0, addrlo); + h->index = TCG_REG_R0; + } else { + h->index = addrlo; + } + + return ldst; +} + +static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, TCGType data_type) +{ + MemOp opc = get_memop(oi); + TCGLabelQemuLdst *ldst; + HostAddress h; + + ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); + + if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { if (opc & MO_BSWAP) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); - tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); - tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0)); - } else if (rbase != 0) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); - tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo)); - tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0)); - } else if (addrlo == datahi) { - tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); - tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); + tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index)); + tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0)); + } else if (h.base != 0) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); + tcg_out32(s, LWZX | TAB(datahi, h.base, h.index)); + tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0)); + } else if (h.index == datahi) { + tcg_out32(s, LWZ | TAI(datalo, h.index, 4)); + tcg_out32(s, LWZ | TAI(datahi, h.index, 0)); } else { - tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); - tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); + tcg_out32(s, LWZ | TAI(datahi, h.index, 0)); + tcg_out32(s, LWZ | TAI(datalo, h.index, 4)); } } else { uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; if (!have_isa_2_06 && insn == LDBRX) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); - tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); - tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); + tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index)); + tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0)); tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0); } else if (insn) { - tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); + tcg_out32(s, insn | TAB(datalo, h.base, h.index)); } else { insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; - tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); - insn = qemu_exts_opc[s_bits]; - tcg_out32(s, insn | RA(datalo) | RS(datalo)); + tcg_out32(s, insn | TAB(datalo, h.base, h.index)); + tcg_out_movext(s, TCG_TYPE_REG, datalo, + TCG_TYPE_REG, opc & MO_SSIZE, datalo); } } -#ifdef CONFIG_SOFTMMU - add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, - s->code_ptr, label_ptr); -#endif + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); + } } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) +static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + MemOpIdx oi, TCGType data_type) { - TCGReg datalo, datahi, addrlo, rbase; - TCGReg addrhi __attribute__((unused)); - MemOpIdx oi; - MemOp opc, s_bits; -#ifdef CONFIG_SOFTMMU - int mem_index; - tcg_insn_unit *label_ptr; -#else - unsigned a_bits; -#endif + MemOp opc = get_memop(oi); + TCGLabelQemuLdst *ldst; + HostAddress h; - datalo = *args++; - datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); - addrlo = *args++; - addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); - oi = *args++; - opc = get_memop(oi); - s_bits = opc & MO_SIZE; + ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); -#ifdef CONFIG_SOFTMMU - mem_index = get_mmuidx(oi); - addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false); - - /* Load a pointer into the current opcode w/conditional branch-link. */ - label_ptr = s->code_ptr; - tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); - - rbase = TCG_REG_R3; -#else /* !CONFIG_SOFTMMU */ - a_bits = get_alignment_bits(opc); - if (a_bits) { - tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); - } - rbase = guest_base ? TCG_GUEST_BASE_REG : 0; - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { - tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); - addrlo = TCG_REG_TMP1; - } -#endif - - if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { + if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { if (opc & MO_BSWAP) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); - tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); - tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0)); - } else if (rbase != 0) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); - tcg_out32(s, STWX | SAB(datahi, rbase, addrlo)); - tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0)); + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); + tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index)); + tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0)); + } else if (h.base != 0) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); + tcg_out32(s, STWX | SAB(datahi, h.base, h.index)); + tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0)); } else { - tcg_out32(s, STW | TAI(datahi, addrlo, 0)); - tcg_out32(s, STW | TAI(datalo, addrlo, 4)); + tcg_out32(s, STW | TAI(datahi, h.index, 0)); + tcg_out32(s, STW | TAI(datalo, h.index, 4)); } } else { uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; if (!have_isa_2_06 && insn == STDBRX) { - tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); - tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4)); + tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index)); + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4)); tcg_out_shri64(s, TCG_REG_R0, datalo, 32); - tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1)); + tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1)); } else { - tcg_out32(s, insn | SAB(datalo, rbase, addrlo)); + tcg_out32(s, insn | SAB(datalo, h.base, h.index)); } } -#ifdef CONFIG_SOFTMMU - add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, - s->code_ptr, label_ptr); -#endif + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); + } +} + +static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addr_reg, MemOpIdx oi, bool is_ld) +{ + TCGLabelQemuLdst *ldst; + HostAddress h; + bool need_bswap; + uint32_t insn; + TCGReg index; + + ldst = prepare_host_addr(s, &h, addr_reg, -1, oi, is_ld); + + /* Compose the final address, as LQ/STQ have no indexing. */ + index = h.index; + if (h.base != 0) { + index = TCG_REG_TMP1; + tcg_out32(s, ADD | TAB(index, h.base, h.index)); + } + need_bswap = get_memop(oi) & MO_BSWAP; + + if (h.aa.atom == MO_128) { + tcg_debug_assert(!need_bswap); + tcg_debug_assert(datalo & 1); + tcg_debug_assert(datahi == datalo - 1); + insn = is_ld ? LQ : STQ; + tcg_out32(s, insn | TAI(datahi, index, 0)); + } else { + TCGReg d1, d2; + + if (HOST_BIG_ENDIAN ^ need_bswap) { + d1 = datahi, d2 = datalo; + } else { + d1 = datalo, d2 = datahi; + } + + if (need_bswap) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 8); + insn = is_ld ? LDBRX : STDBRX; + tcg_out32(s, insn | TAB(d1, 0, index)); + tcg_out32(s, insn | TAB(d2, index, TCG_REG_R0)); + } else { + insn = is_ld ? LD : STD; + tcg_out32(s, insn | TAI(d1, index, 0)); + tcg_out32(s, insn | TAI(d2, index, 8)); + } + } + + if (ldst) { + ldst->type = TCG_TYPE_I128; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); + } } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) @@ -2625,7 +2564,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_ld8s_i32: case INDEX_op_ld8s_i64: tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); - tcg_out_ext8s(s, args[0], args[0]); + tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]); break; case INDEX_op_ld16u_i32: case INDEX_op_ld16u_i64: @@ -2960,33 +2899,72 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out32(s, MODUD | TAB(args[0], args[1], args[2])); break; - case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, args, false); + case INDEX_op_qemu_ld_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], + args[3], TCG_TYPE_I32); + break; + } + /* fall through */ + case INDEX_op_qemu_ld_a32_i32: + tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args, true); + case INDEX_op_qemu_ld_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_ld(s, args[0], -1, args[1], -1, + args[2], TCG_TYPE_I64); + } else { + tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, + args[3], TCG_TYPE_I64); + } break; - case INDEX_op_qemu_st_i32: - tcg_out_qemu_st(s, args, false); + case INDEX_op_qemu_ld_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_ld(s, args[0], -1, args[1], -1, + args[2], TCG_TYPE_I64); + } else { + tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], + args[4], TCG_TYPE_I64); + } break; - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args, true); + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true); break; - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - tcg_out_ext8s(s, args[0], args[1]); + case INDEX_op_qemu_st_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_qemu_st(s, args[0], -1, args[1], args[2], + args[3], TCG_TYPE_I32); + break; + } + /* fall through */ + case INDEX_op_qemu_st_a32_i32: + tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); break; - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - tcg_out_ext16s(s, args[0], args[1]); + case INDEX_op_qemu_st_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_st(s, args[0], -1, args[1], -1, + args[2], TCG_TYPE_I64); + } else { + tcg_out_qemu_st(s, args[0], args[1], args[2], -1, + args[3], TCG_TYPE_I64); + } break; - case INDEX_op_ext_i32_i64: - case INDEX_op_ext32s_i64: - tcg_out_ext32s(s, args[0], args[1]); + case INDEX_op_qemu_st_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_st(s, args[0], -1, args[1], -1, + args[2], TCG_TYPE_I64); + } else { + tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], + args[4], TCG_TYPE_I64); + } break; - case INDEX_op_extu_i32_i64: - tcg_out_ext32u(s, args[0], args[1]); + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false); break; case INDEX_op_setcond_i32: @@ -3124,8 +3102,21 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ + case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: default: - tcg_abort(); + g_assert_not_reached(); } } @@ -3790,25 +3781,30 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_sub2_i32: return C_O2_I4(r, r, rI, rZM, r, r); - case INDEX_op_qemu_ld_i32: - return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 - ? C_O1_I1(r, L) - : C_O1_I2(r, L, L)); + case INDEX_op_qemu_ld_a32_i32: + return C_O1_I1(r, r); + case INDEX_op_qemu_ld_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r); + case INDEX_op_qemu_ld_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); + case INDEX_op_qemu_ld_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r); - case INDEX_op_qemu_st_i32: - return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 - ? C_O0_I2(S, S) - : C_O0_I3(S, S, S)); + case INDEX_op_qemu_st_a32_i32: + return C_O0_I2(r, r); + case INDEX_op_qemu_st_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); + case INDEX_op_qemu_st_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); + case INDEX_op_qemu_st_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r); - case INDEX_op_qemu_ld_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) - : TARGET_LONG_BITS == 32 ? C_O2_I1(L, L, L) - : C_O2_I2(L, L, L, L)); - - case INDEX_op_qemu_st_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(S, S) - : TARGET_LONG_BITS == 32 ? C_O0_I3(S, S, S) - : C_O0_I4(S, S, S, S)); + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + return C_O2_I1(o, m, r); + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: + return C_O0_I3(o, m, r); case INDEX_op_add_vec: case INDEX_op_sub_vec: @@ -3958,7 +3954,8 @@ static void tcg_target_init(TCGContext *s) #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ #endif - tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1); tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2); if (USE_REG_TB) { diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index af81c5a57f..40f20b0c1a 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -34,7 +34,6 @@ #define TCG_TARGET_NB_REGS 64 #define TCG_TARGET_INSN_UNIT_SIZE 4 -#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 typedef enum { TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, @@ -149,6 +148,9 @@ extern bool have_vsx; #define TCG_TARGET_HAS_mulsh_i64 1 #endif +#define TCG_TARGET_HAS_qemu_ldst_i128 \ + (TCG_TARGET_REG_BITS == 64 && have_isa_2_07) + /* * While technically Altivec could support V64, it has no 64-bit store * instruction and substituting two 32-bit stores makes the generated @@ -179,7 +181,6 @@ extern bool have_vsx; #define TCG_TARGET_HAS_cmpsel_vec 0 #define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 1 #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/region.c b/tcg/region.c index 88d6bb273f..bef4c4756f 100644 --- a/tcg/region.c +++ b/tcg/region.c @@ -28,6 +28,7 @@ #include "qemu/mprotect.h" #include "qemu/memalign.h" #include "qemu/cacheinfo.h" +#include "qemu/qtree.h" #include "qapi/error.h" #include "exec/exec-all.h" #include "tcg/tcg.h" @@ -36,7 +37,7 @@ struct tcg_region_tree { QemuMutex lock; - GTree *tree; + QTree *tree; /* padding to avoid false sharing is computed at run-time */ }; @@ -163,7 +164,7 @@ static void tcg_region_trees_init(void) struct tcg_region_tree *rt = region_trees + i * tree_size; qemu_mutex_init(&rt->lock); - rt->tree = g_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy); + rt->tree = q_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy); } } @@ -202,7 +203,7 @@ void tcg_tb_insert(TranslationBlock *tb) g_assert(rt != NULL); qemu_mutex_lock(&rt->lock); - g_tree_insert(rt->tree, &tb->tc, tb); + q_tree_insert(rt->tree, &tb->tc, tb); qemu_mutex_unlock(&rt->lock); } @@ -212,7 +213,7 @@ void tcg_tb_remove(TranslationBlock *tb) g_assert(rt != NULL); qemu_mutex_lock(&rt->lock); - g_tree_remove(rt->tree, &tb->tc); + q_tree_remove(rt->tree, &tb->tc); qemu_mutex_unlock(&rt->lock); } @@ -232,7 +233,7 @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) } qemu_mutex_lock(&rt->lock); - tb = g_tree_lookup(rt->tree, &s); + tb = q_tree_lookup(rt->tree, &s); qemu_mutex_unlock(&rt->lock); return tb; } @@ -267,7 +268,7 @@ void tcg_tb_foreach(GTraverseFunc func, gpointer user_data) for (i = 0; i < region.n; i++) { struct tcg_region_tree *rt = region_trees + i * tree_size; - g_tree_foreach(rt->tree, func, user_data); + q_tree_foreach(rt->tree, func, user_data); } tcg_region_tree_unlock_all(); } @@ -281,7 +282,7 @@ size_t tcg_nb_tbs(void) for (i = 0; i < region.n; i++) { struct tcg_region_tree *rt = region_trees + i * tree_size; - nb_tbs += g_tree_nnodes(rt->tree); + nb_tbs += q_tree_nnodes(rt->tree); } tcg_region_tree_unlock_all(); return nb_tbs; @@ -296,8 +297,8 @@ static void tcg_region_tree_reset_all(void) struct tcg_region_tree *rt = region_trees + i * tree_size; /* Increment the refcount first so that destroy acts as a reset */ - g_tree_ref(rt->tree); - g_tree_destroy(rt->tree); + q_tree_ref(rt->tree); + q_tree_destroy(rt->tree); } tcg_region_tree_unlock_all(); } diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h index cf0ac4d751..aac5ceee2b 100644 --- a/tcg/riscv/tcg-target-con-set.h +++ b/tcg/riscv/tcg-target-con-set.h @@ -10,21 +10,14 @@ * tcg-target-con-str.h; the constraint combination is inclusive or. */ C_O0_I1(r) -C_O0_I2(LZ, L) C_O0_I2(rZ, r) C_O0_I2(rZ, rZ) -C_O0_I3(LZ, L, L) -C_O0_I3(LZ, LZ, L) -C_O0_I4(LZ, LZ, L, L) -C_O0_I4(rZ, rZ, rZ, rZ) -C_O1_I1(r, L) C_O1_I1(r, r) -C_O1_I2(r, L, L) C_O1_I2(r, r, ri) C_O1_I2(r, r, rI) +C_O1_I2(r, r, rJ) C_O1_I2(r, rZ, rN) C_O1_I2(r, rZ, rZ) -C_O1_I4(r, rZ, rZ, rZ, rZ) -C_O2_I1(r, r, L) -C_O2_I2(r, r, L, L) +C_N1_I2(r, r, rM) +C_O1_I4(r, r, rI, rM, rM) C_O2_I4(r, r, rZ, rZ, rM, rM) diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h index 8d8afaee53..d5c419dff1 100644 --- a/tcg/riscv/tcg-target-con-str.h +++ b/tcg/riscv/tcg-target-con-str.h @@ -9,13 +9,13 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) -REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) /* * Define constraint letters for constants: * CONST(letter, TCG_CT_CONST_* bit set) */ CONST('I', TCG_CT_CONST_S12) +CONST('J', TCG_CT_CONST_J12) CONST('N', TCG_CT_CONST_N12) CONST('M', TCG_CT_CONST_M12) CONST('Z', TCG_CT_CONST_ZERO) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 558de127ef..c0257124fa 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -113,6 +113,20 @@ static const int tcg_target_call_iarg_regs[] = { TCG_REG_A7, }; +#ifndef have_zbb +bool have_zbb; +#endif +#if defined(__riscv_arch_test) && defined(__riscv_zba) +# define have_zba true +#else +static bool have_zba; +#endif +#if defined(__riscv_arch_test) && defined(__riscv_zicond) +# define have_zicond true +#else +static bool have_zicond; +#endif + static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) { tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); @@ -124,28 +138,11 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define TCG_CT_CONST_S12 0x200 #define TCG_CT_CONST_N12 0x400 #define TCG_CT_CONST_M12 0x800 +#define TCG_CT_CONST_J12 0x1000 -#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) -/* - * For softmmu, we need to avoid conflicts with the first 5 - * argument registers to call the helper. Some of these are - * also used for the tlb lookup. - */ -#ifdef CONFIG_SOFTMMU -#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5) -#else -#define SOFTMMU_RESERVE_REGS 0 -#endif +#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) - -static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) -{ - if (TCG_TARGET_REG_BITS == 32) { - return sextract32(val, pos, len); - } else { - return sextract64(val, pos, len); - } -} +#define sextreg sextract64 /* test if a constant matches the constraint */ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) @@ -172,12 +169,19 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) } /* * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. - * Used by addsub2, which may need the negative operation, + * Used by addsub2 and movcond, which may need the negative value, * and requires the modified constant to be representable. */ if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { return 1; } + /* + * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff]. + * Used to map ANDN back to ANDI, etc. + */ + if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) { + return 1; + } return 0; } @@ -235,7 +239,6 @@ typedef enum { OPC_XOR = 0x4033, OPC_XORI = 0x4013, -#if TCG_TARGET_REG_BITS == 64 OPC_ADDIW = 0x1b, OPC_ADDW = 0x3b, OPC_DIVUW = 0x200503b, @@ -250,26 +253,37 @@ typedef enum { OPC_SRLIW = 0x501b, OPC_SRLW = 0x503b, OPC_SUBW = 0x4000003b, -#else - /* Simplify code throughout by defining aliases for RV32. */ - OPC_ADDIW = OPC_ADDI, - OPC_ADDW = OPC_ADD, - OPC_DIVUW = OPC_DIVU, - OPC_DIVW = OPC_DIV, - OPC_MULW = OPC_MUL, - OPC_REMUW = OPC_REMU, - OPC_REMW = OPC_REM, - OPC_SLLIW = OPC_SLLI, - OPC_SLLW = OPC_SLL, - OPC_SRAIW = OPC_SRAI, - OPC_SRAW = OPC_SRA, - OPC_SRLIW = OPC_SRLI, - OPC_SRLW = OPC_SRL, - OPC_SUBW = OPC_SUB, -#endif OPC_FENCE = 0x0000000f, OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */ + + /* Zba: Bit manipulation extension, address generation */ + OPC_ADD_UW = 0x0800003b, + + /* Zbb: Bit manipulation extension, basic bit manipulaton */ + OPC_ANDN = 0x40007033, + OPC_CLZ = 0x60001013, + OPC_CLZW = 0x6000101b, + OPC_CPOP = 0x60201013, + OPC_CPOPW = 0x6020101b, + OPC_CTZ = 0x60101013, + OPC_CTZW = 0x6010101b, + OPC_ORN = 0x40006033, + OPC_REV8 = 0x6b805013, + OPC_ROL = 0x60001033, + OPC_ROLW = 0x6000103b, + OPC_ROR = 0x60005033, + OPC_RORW = 0x6000503b, + OPC_RORI = 0x60005013, + OPC_RORIW = 0x6000501b, + OPC_SEXT_B = 0x60401013, + OPC_SEXT_H = 0x60501013, + OPC_XNOR = 0x40004033, + OPC_ZEXT_H = 0x0800403b, + + /* Zicond: integer conditional operations */ + OPC_CZERO_EQZ = 0x0e005033, + OPC_CZERO_NEZ = 0x0e007033, } RISCVInsn; /* @@ -500,7 +514,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long lo, hi, tmp; int shift, ret; - if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + if (type == TCG_TYPE_I32) { val = (int32_t)val; } @@ -511,7 +525,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, } hi = val - lo; - if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) { + if (val == (int32_t)val) { tcg_out_opc_upper(s, OPC_LUI, rd, hi); if (lo != 0) { tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); @@ -519,7 +533,6 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, return; } - /* We can only be here if TCG_TARGET_REG_BITS != 32 */ tmp = tcg_pcrel_diff(s, (void *)val); if (tmp == (int32_t)tmp) { tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); @@ -561,6 +574,11 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); } +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) +{ + return false; +} + static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, tcg_target_long imm) { @@ -575,26 +593,42 @@ static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) { - tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); - tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); + if (have_zbb) { + tcg_out_opc_reg(s, OPC_ZEXT_H, ret, arg, TCG_REG_ZERO); + } else { + tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); + tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); + } } static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) { - tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); - tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); + if (have_zba) { + tcg_out_opc_reg(s, OPC_ADD_UW, ret, arg, TCG_REG_ZERO); + } else { + tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); + tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); + } } -static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) +static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { - tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); - tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); + if (have_zbb) { + tcg_out_opc_imm(s, OPC_SEXT_B, ret, arg, 0); + } else { + tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); + tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); + } } -static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) +static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { - tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); - tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); + if (have_zbb) { + tcg_out_opc_imm(s, OPC_SEXT_H, ret, arg, 0); + } else { + tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); + tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); + } } static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) @@ -602,6 +636,23 @@ static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); } +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (ret != arg) { + tcg_out_ext32s(s, ret, arg); + } +} + +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_ext32u(s, ret, arg); +} + +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_ext32s(s, ret, arg); +} + static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, TCGReg addr, intptr_t offset) { @@ -646,15 +697,15 @@ static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { - bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); - tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2); + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD; + tcg_out_ldst(s, insn, arg, arg1, arg2); } static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { - bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); - tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2); + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD; + tcg_out_ldst(s, insn, arg, arg1, arg2); } static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, @@ -761,64 +812,271 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, tcg_out_opc_branch(s, op, arg1, arg2, 0); } -static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg arg1, TCGReg arg2) +#define SETCOND_INV TCG_TARGET_NB_REGS +#define SETCOND_NEZ (SETCOND_INV << 1) +#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) + +static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg arg1, tcg_target_long arg2, bool c2) { + int flags = 0; + switch (cond) { - case TCG_COND_EQ: - tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); - tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1); - break; - case TCG_COND_NE: - tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); - tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret); - break; - case TCG_COND_LT: - tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); - break; - case TCG_COND_GE: - tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); - tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); - break; - case TCG_COND_LE: - tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); - tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); - break; - case TCG_COND_GT: - tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); - break; - case TCG_COND_LTU: - tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); - break; - case TCG_COND_GEU: - tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); - tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); - break; - case TCG_COND_LEU: - tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); - tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); - break; - case TCG_COND_GTU: - tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); + case TCG_COND_EQ: /* -> NE */ + case TCG_COND_GE: /* -> LT */ + case TCG_COND_GEU: /* -> LTU */ + case TCG_COND_GT: /* -> LE */ + case TCG_COND_GTU: /* -> LEU */ + cond = tcg_invert_cond(cond); + flags ^= SETCOND_INV; break; default: - g_assert_not_reached(); - break; - } + break; + } + + switch (cond) { + case TCG_COND_LE: + case TCG_COND_LEU: + /* + * If we have a constant input, the most efficient way to implement + * LE is by adding 1 and using LT. Watch out for wrap around for LEU. + * We don't need to care for this for LE because the constant input + * is constrained to signed 12-bit, and 0x800 is representable in the + * temporary register. + */ + if (c2) { + if (cond == TCG_COND_LEU) { + /* unsigned <= -1 is true */ + if (arg2 == -1) { + tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); + return ret; + } + cond = TCG_COND_LTU; + } else { + cond = TCG_COND_LT; + } + tcg_debug_assert(arg2 <= 0x7ff); + if (++arg2 == 0x800) { + tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); + arg2 = TCG_REG_TMP0; + c2 = false; + } + } else { + TCGReg tmp = arg2; + arg2 = arg1; + arg1 = tmp; + cond = tcg_swap_cond(cond); /* LE -> GE */ + cond = tcg_invert_cond(cond); /* GE -> LT */ + flags ^= SETCOND_INV; + } + break; + default: + break; + } + + switch (cond) { + case TCG_COND_NE: + flags |= SETCOND_NEZ; + if (!c2) { + tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); + } else if (arg2 == 0) { + ret = arg1; + } else { + tcg_out_opc_imm(s, OPC_XORI, ret, arg1, arg2); + } + break; + + case TCG_COND_LT: + if (c2) { + tcg_out_opc_imm(s, OPC_SLTI, ret, arg1, arg2); + } else { + tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); + } + break; + + case TCG_COND_LTU: + if (c2) { + tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, arg2); + } else { + tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); + } + break; + + default: + g_assert_not_reached(); + } + + return ret | flags; } -static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, - TCGReg bl, TCGReg bh, TCGLabel *l) +static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg arg1, tcg_target_long arg2, bool c2) { - /* todo */ - g_assert_not_reached(); + int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); + + if (tmpflags != ret) { + TCGReg tmp = tmpflags & ~SETCOND_FLAGS; + + switch (tmpflags & SETCOND_FLAGS) { + case SETCOND_INV: + /* Intermediate result is boolean: simply invert. */ + tcg_out_opc_imm(s, OPC_XORI, ret, tmp, 1); + break; + case SETCOND_NEZ: + /* Intermediate result is zero/non-zero: test != 0. */ + tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp); + break; + case SETCOND_NEZ | SETCOND_INV: + /* Intermediate result is zero/non-zero: test == 0. */ + tcg_out_opc_imm(s, OPC_SLTIU, ret, tmp, 1); + break; + default: + g_assert_not_reached(); + } + } } -static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) +static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne, + int val1, bool c_val1, + int val2, bool c_val2) { - /* todo */ - g_assert_not_reached(); + if (val1 == 0) { + if (c_val2) { + tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val2); + val2 = TCG_REG_TMP1; + } + tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, val2, test_ne); + return; + } + + if (val2 == 0) { + if (c_val1) { + tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1); + val1 = TCG_REG_TMP1; + } + tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, val1, test_ne); + return; + } + + if (c_val2) { + if (c_val1) { + tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1 - val2); + } else { + tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val1, -val2); + } + tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, TCG_REG_TMP1, test_ne); + tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val2); + return; + } + + if (c_val1) { + tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val2, -val1); + tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, TCG_REG_TMP1, test_ne); + tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val1); + return; + } + + tcg_out_opc_reg(s, OPC_CZERO_NEZ, TCG_REG_TMP1, val2, test_ne); + tcg_out_opc_reg(s, OPC_CZERO_EQZ, TCG_REG_TMP0, val1, test_ne); + tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_TMP0, TCG_REG_TMP1); +} + +static void tcg_out_movcond_br1(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg cmp1, TCGReg cmp2, + int val, bool c_val) +{ + RISCVInsn op; + int disp = 8; + + tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_brcond_to_riscv)); + op = tcg_brcond_to_riscv[cond].op; + tcg_debug_assert(op != 0); + + if (tcg_brcond_to_riscv[cond].swap) { + tcg_out_opc_branch(s, op, cmp2, cmp1, disp); + } else { + tcg_out_opc_branch(s, op, cmp1, cmp2, disp); + } + if (c_val) { + tcg_out_opc_imm(s, OPC_ADDI, ret, TCG_REG_ZERO, val); + } else { + tcg_out_opc_imm(s, OPC_ADDI, ret, val, 0); + } +} + +static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg cmp1, TCGReg cmp2, + int val1, bool c_val1, + int val2, bool c_val2) +{ + TCGReg tmp; + + /* TCG optimizer reorders to prefer ret matching val2. */ + if (!c_val2 && ret == val2) { + cond = tcg_invert_cond(cond); + tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val1, c_val1); + return; + } + + if (!c_val1 && ret == val1) { + tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val2, c_val2); + return; + } + + tmp = (ret == cmp1 || ret == cmp2 ? TCG_REG_TMP1 : ret); + if (c_val1) { + tcg_out_movi(s, TCG_TYPE_REG, tmp, val1); + } else { + tcg_out_mov(s, TCG_TYPE_REG, tmp, val1); + } + tcg_out_movcond_br1(s, cond, tmp, cmp1, cmp2, val2, c_val2); + tcg_out_mov(s, TCG_TYPE_REG, ret, tmp); +} + +static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg cmp1, int cmp2, bool c_cmp2, + TCGReg val1, bool c_val1, + TCGReg val2, bool c_val2) +{ + int tmpflags; + TCGReg t; + + if (!have_zicond && (!c_cmp2 || cmp2 == 0)) { + tcg_out_movcond_br2(s, cond, ret, cmp1, cmp2, + val1, c_val1, val2, c_val2); + return; + } + + tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, cmp1, cmp2, c_cmp2); + t = tmpflags & ~SETCOND_FLAGS; + + if (have_zicond) { + if (tmpflags & SETCOND_INV) { + tcg_out_movcond_zicond(s, ret, t, val2, c_val2, val1, c_val1); + } else { + tcg_out_movcond_zicond(s, ret, t, val1, c_val1, val2, c_val2); + } + } else { + cond = tmpflags & SETCOND_INV ? TCG_COND_EQ : TCG_COND_NE; + tcg_out_movcond_br2(s, cond, ret, t, TCG_REG_ZERO, + val1, c_val1, val2, c_val2); + } +} + +static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn, + TCGReg ret, TCGReg src1, int src2, bool c_src2) +{ + tcg_out_opc_imm(s, insn, ret, src1, 0); + + if (!c_src2 || src2 != (type == TCG_TYPE_I32 ? 32 : 64)) { + /* + * The requested zero result does not match the insn, so adjust. + * Note that constraints put 'ret' in a new register, so the + * computation above did not clobber either 'src1' or 'src2'. + */ + tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true, + src2, c_src2, ret, false); + } } static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) @@ -831,20 +1089,18 @@ static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) if (offset == sextreg(offset, 0, 20)) { /* short jump: -2097150 to 2097152 */ tcg_out_opc_jump(s, OPC_JAL, link, offset); - } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) { + } else if (offset == (int32_t)offset) { /* long jump: -2147483646 to 2147483648 */ tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); ret = reloc_call(s->code_ptr - 2, arg); tcg_debug_assert(ret == true); - } else if (TCG_TARGET_REG_BITS == 64) { + } else { /* far jump: 64-bit */ tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); tcg_target_long base = (tcg_target_long)arg - imm; tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); - } else { - g_assert_not_reached(); } } @@ -877,56 +1133,6 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) * Load/store and TLB */ -#if defined(CONFIG_SOFTMMU) -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * MemOpIdx oi, uintptr_t ra) - */ -static void * const qemu_ld_helpers[MO_SSIZE + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, -#if HOST_BIG_ENDIAN - [MO_UW] = helper_be_lduw_mmu, - [MO_SW] = helper_be_ldsw_mmu, - [MO_UL] = helper_be_ldul_mmu, -#if TCG_TARGET_REG_BITS == 64 - [MO_SL] = helper_be_ldsl_mmu, -#endif - [MO_UQ] = helper_be_ldq_mmu, -#else - [MO_UW] = helper_le_lduw_mmu, - [MO_SW] = helper_le_ldsw_mmu, - [MO_UL] = helper_le_ldul_mmu, -#if TCG_TARGET_REG_BITS == 64 - [MO_SL] = helper_le_ldsl_mmu, -#endif - [MO_UQ] = helper_le_ldq_mmu, -#endif -}; - -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, MemOpIdx oi, - * uintptr_t ra) - */ -static void * const qemu_st_helpers[MO_SIZE + 1] = { - [MO_8] = helper_ret_stb_mmu, -#if HOST_BIG_ENDIAN - [MO_16] = helper_be_stw_mmu, - [MO_32] = helper_be_stl_mmu, - [MO_64] = helper_be_stq_mmu, -#else - [MO_16] = helper_le_stw_mmu, - [MO_32] = helper_le_stl_mmu, - [MO_64] = helper_le_stq_mmu, -#endif -}; - -/* We don't support oversize guests */ -QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS); - -/* We expect to use a 12-bit negative offset from ENV. */ -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); - static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) { tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); @@ -934,93 +1140,19 @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) tcg_debug_assert(ok); } -static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl, - TCGReg addrh, MemOpIdx oi, - tcg_insn_unit **label_ptr, bool is_load) +bool tcg_target_has_memory_bswap(MemOp memop) { - MemOp opc = get_memop(oi); - unsigned s_bits = opc & MO_SIZE; - unsigned a_bits = get_alignment_bits(opc); - tcg_target_long compare_mask; - int mem_index = get_mmuidx(oi); - int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); - int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); - int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); - TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; - - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); - - tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); - - /* Load the tlb comparator and the addend. */ - tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, - is_load ? offsetof(CPUTLBEntry, addr_read) - : offsetof(CPUTLBEntry, addr_write)); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, - offsetof(CPUTLBEntry, addend)); - - /* We don't support unaligned accesses. */ - if (a_bits < s_bits) { - a_bits = s_bits; - } - /* Clear the non-page, non-alignment bits from the address. */ - compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); - if (compare_mask == sextreg(compare_mask, 0, 12)) { - tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask); - } else { - tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); - tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl); - } - - /* Compare masked address with the TLB entry. */ - label_ptr[0] = s->code_ptr; - tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); - - /* TLB Hit - translate address using addend. */ - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { - tcg_out_ext32u(s, TCG_REG_TMP0, addrl); - addrl = TCG_REG_TMP0; - } - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); - return TCG_REG_TMP0; + return false; } -static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, - TCGType ext, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - void *raddr, tcg_insn_unit **label_ptr) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->oi = oi; - label->type = ext; - label->datalo_reg = datalo; - label->datahi_reg = datahi; - label->addrlo_reg = addrlo; - label->addrhi_reg = addrhi; - label->raddr = tcg_splitwx_to_rx(raddr); - label->label_ptr[0] = label_ptr[0]; -} +/* We have three temps, we might as well expose them. */ +static const TCGLdstHelperParam ldst_helper_param = { + .ntmp = 3, .tmp = { TCG_REG_TMP0, TCG_REG_TMP1, TCG_REG_TMP2 } +}; static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - MemOpIdx oi = l->oi; - MemOp opc = get_memop(oi); - TCGReg a0 = tcg_target_call_iarg_regs[0]; - TCGReg a1 = tcg_target_call_iarg_regs[1]; - TCGReg a2 = tcg_target_call_iarg_regs[2]; - TCGReg a3 = tcg_target_call_iarg_regs[3]; - - /* We don't support oversize guests */ - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - g_assert_not_reached(); - } + MemOp opc = get_memop(l->oi); /* resolve label address */ if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { @@ -1028,13 +1160,9 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) } /* call load helper */ - tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); - tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); - tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); - tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); - + tcg_out_ld_helper_args(s, l, &ldst_helper_param); tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false); - tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); + tcg_out_ld_helper_ret(s, l, true, &ldst_helper_param); tcg_out_goto(s, l->raddr); return true; @@ -1042,19 +1170,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - MemOpIdx oi = l->oi; - MemOp opc = get_memop(oi); - MemOp s_bits = opc & MO_SIZE; - TCGReg a0 = tcg_target_call_iarg_regs[0]; - TCGReg a1 = tcg_target_call_iarg_regs[1]; - TCGReg a2 = tcg_target_call_iarg_regs[2]; - TCGReg a3 = tcg_target_call_iarg_regs[3]; - TCGReg a4 = tcg_target_call_iarg_regs[4]; - - /* We don't support oversize guests */ - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - g_assert_not_reached(); - } + MemOp opc = get_memop(l->oi); /* resolve label address */ if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { @@ -1062,168 +1178,188 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) } /* call store helper */ - tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); - tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg); - switch (s_bits) { - case MO_8: - tcg_out_ext8u(s, a2, a2); - break; - case MO_16: - tcg_out_ext16u(s, a2, a2); - break; - default: - break; - } - tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); - tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); - + tcg_out_st_helper_args(s, l, &ldst_helper_param); tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); tcg_out_goto(s, l->raddr); return true; } -#else -static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, - unsigned a_bits) +/* + * For softmmu, perform the TLB load and compare. + * For useronly, perform any required alignment tests. + * In both cases, return a TCGLabelQemuLdst structure if the slow path + * is required and fill in @h with the host address for the fast path. + */ +static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, + TCGReg addr_reg, MemOpIdx oi, + bool is_ld) { - unsigned a_mask = (1 << a_bits) - 1; - TCGLabelQemuLdst *l = new_ldst_label(s); + TCGLabelQemuLdst *ldst = NULL; + MemOp opc = get_memop(oi); + TCGAtomAlign aa; + unsigned a_mask; - l->is_ld = is_ld; - l->addrlo_reg = addr_reg; + aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_mask = (1u << aa.align) - 1; - /* We are expecting a_bits to max out at 7, so we can always use andi. */ - tcg_debug_assert(a_bits < 12); - tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); +#ifdef CONFIG_SOFTMMU + unsigned s_bits = opc & MO_SIZE; + unsigned s_mask = (1u << s_bits) - 1; + int mem_index = get_mmuidx(oi); + int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); + int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); + int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); + int compare_mask; + TCGReg addr_adj; - l->label_ptr[0] = s->code_ptr; - tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; - l->raddr = tcg_splitwx_to_rx(s->code_ptr); -} + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - /* resolve label address */ - if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; + tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, + s->page_bits - CPU_TLB_ENTRY_BITS); + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); + + /* + * For aligned accesses, we check the first byte and include the alignment + * bits within the address. For unaligned access, we check that we don't + * cross pages using the address of the last byte of the access. + */ + addr_adj = addr_reg; + if (a_mask < s_mask) { + addr_adj = TCG_REG_TMP0; + tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI, + addr_adj, addr_reg, s_mask - a_mask); + } + compare_mask = s->page_mask | a_mask; + if (compare_mask == sextreg(compare_mask, 0, 12)) { + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); + } else { + tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj); } - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); + /* Load the tlb comparator and the addend. */ + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, + is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, + offsetof(CPUTLBEntry, addend)); - /* tail call, with the return address back inline. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); - tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st), true); - return true; + /* Compare masked address with the TLB entry. */ + ldst->label_ptr[0] = s->code_ptr; + tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); + + /* TLB Hit - translate address using addend. */ + if (TARGET_LONG_BITS == 64) { + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2); + } else if (have_zba) { + tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2); + } else { + tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg); + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, TCG_REG_TMP2); + } + *pbase = TCG_REG_TMP0; +#else + TCGReg base; + + if (a_mask) { + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; + + /* We are expecting alignment max 7, so we can always use andi. */ + tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12)); + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); + + ldst->label_ptr[0] = s->code_ptr; + tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); + } + + if (guest_base != 0) { + base = TCG_REG_TMP0; + if (TARGET_LONG_BITS == 64) { + tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, TCG_GUEST_BASE_REG); + } else if (have_zba) { + tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, TCG_GUEST_BASE_REG); + } else { + tcg_out_ext32u(s, base, addr_reg); + tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG); + } + } else if (TARGET_LONG_BITS == 64) { + base = addr_reg; + } else { + base = TCG_REG_TMP0; + tcg_out_ext32u(s, base, addr_reg); + } + *pbase = base; +#endif + + return ldst; } -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -#endif /* CONFIG_SOFTMMU */ - -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, MemOp opc, bool is_64) +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val, + TCGReg base, MemOp opc, TCGType type) { /* Byte swapping is left to middle-end expansion. */ tcg_debug_assert((opc & MO_BSWAP) == 0); switch (opc & (MO_SSIZE)) { case MO_UB: - tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); + tcg_out_opc_imm(s, OPC_LBU, val, base, 0); break; case MO_SB: - tcg_out_opc_imm(s, OPC_LB, lo, base, 0); + tcg_out_opc_imm(s, OPC_LB, val, base, 0); break; case MO_UW: - tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); + tcg_out_opc_imm(s, OPC_LHU, val, base, 0); break; case MO_SW: - tcg_out_opc_imm(s, OPC_LH, lo, base, 0); + tcg_out_opc_imm(s, OPC_LH, val, base, 0); break; case MO_UL: - if (TCG_TARGET_REG_BITS == 64 && is_64) { - tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); + if (type == TCG_TYPE_I64) { + tcg_out_opc_imm(s, OPC_LWU, val, base, 0); break; } /* FALLTHRU */ case MO_SL: - tcg_out_opc_imm(s, OPC_LW, lo, base, 0); + tcg_out_opc_imm(s, OPC_LW, val, base, 0); break; case MO_UQ: - /* Prefer to load from offset 0 first, but allow for overlap. */ - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_opc_imm(s, OPC_LD, lo, base, 0); - } else if (lo != base) { - tcg_out_opc_imm(s, OPC_LW, lo, base, 0); - tcg_out_opc_imm(s, OPC_LW, hi, base, 4); - } else { - tcg_out_opc_imm(s, OPC_LW, hi, base, 4); - tcg_out_opc_imm(s, OPC_LW, lo, base, 0); - } + tcg_out_opc_imm(s, OPC_LD, val, base, 0); break; default: g_assert_not_reached(); } } -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) +static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + MemOpIdx oi, TCGType data_type) { - TCGReg addr_regl, addr_regh __attribute__((unused)); - TCGReg data_regl, data_regh; - MemOpIdx oi; - MemOp opc; -#if defined(CONFIG_SOFTMMU) - tcg_insn_unit *label_ptr[1]; -#else - unsigned a_bits; -#endif + TCGLabelQemuLdst *ldst; TCGReg base; - data_regl = *args++; - data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); - addr_regl = *args++; - addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); - oi = *args++; - opc = get_memop(oi); + ldst = prepare_host_addr(s, &base, addr_reg, oi, true); + tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type); -#if defined(CONFIG_SOFTMMU) - base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1); - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); - add_qemu_ldst_label(s, 1, oi, - (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), - data_regl, data_regh, addr_regl, addr_regh, - s->code_ptr, label_ptr); -#else - a_bits = get_alignment_bits(opc); - if (a_bits) { - tcg_out_test_alignment(s, true, addr_regl, a_bits); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data_reg; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - base = addr_regl; - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { - tcg_out_ext32u(s, TCG_REG_TMP0, base); - base = TCG_REG_TMP0; - } - if (guest_base != 0) { - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); - base = TCG_REG_TMP0; - } - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); -#endif } -static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, +static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val, TCGReg base, MemOp opc) { /* Byte swapping is left to middle-end expansion. */ @@ -1231,70 +1367,36 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, switch (opc & (MO_SSIZE)) { case MO_8: - tcg_out_opc_store(s, OPC_SB, base, lo, 0); + tcg_out_opc_store(s, OPC_SB, base, val, 0); break; case MO_16: - tcg_out_opc_store(s, OPC_SH, base, lo, 0); + tcg_out_opc_store(s, OPC_SH, base, val, 0); break; case MO_32: - tcg_out_opc_store(s, OPC_SW, base, lo, 0); + tcg_out_opc_store(s, OPC_SW, base, val, 0); break; case MO_64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_opc_store(s, OPC_SD, base, lo, 0); - } else { - tcg_out_opc_store(s, OPC_SW, base, lo, 0); - tcg_out_opc_store(s, OPC_SW, base, hi, 4); - } + tcg_out_opc_store(s, OPC_SD, base, val, 0); break; default: g_assert_not_reached(); } } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) +static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + MemOpIdx oi, TCGType data_type) { - TCGReg addr_regl, addr_regh __attribute__((unused)); - TCGReg data_regl, data_regh; - MemOpIdx oi; - MemOp opc; -#if defined(CONFIG_SOFTMMU) - tcg_insn_unit *label_ptr[1]; -#else - unsigned a_bits; -#endif + TCGLabelQemuLdst *ldst; TCGReg base; - data_regl = *args++; - data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); - addr_regl = *args++; - addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); - oi = *args++; - opc = get_memop(oi); + ldst = prepare_host_addr(s, &base, addr_reg, oi, false); + tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi)); -#if defined(CONFIG_SOFTMMU) - base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0); - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); - add_qemu_ldst_label(s, 0, oi, - (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), - data_regl, data_regh, addr_regl, addr_regh, - s->code_ptr, label_ptr); -#else - a_bits = get_alignment_bits(opc); - if (a_bits) { - tcg_out_test_alignment(s, false, addr_regl, a_bits); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data_reg; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - base = addr_regl; - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { - tcg_out_ext32u(s, TCG_REG_TMP0, base); - base = TCG_REG_TMP0; - } - if (guest_base != 0) { - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); - base = TCG_REG_TMP0; - } - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); -#endif } static const tcg_insn_unit *tb_ret_addr; @@ -1459,6 +1561,31 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_ANDI, a0, a1, ~a2); + } else { + tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2); + } + break; + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_ORI, a0, a1, ~a2); + } else { + tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2); + } + break; + case INDEX_op_eqv_i32: + case INDEX_op_eqv_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_XORI, a0, a1, ~a2); + } else { + tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2); + } + break; + case INDEX_op_not_i32: case INDEX_op_not_i64: tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); @@ -1551,6 +1678,80 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; + case INDEX_op_rotl_i32: + if (c2) { + tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f); + } else { + tcg_out_opc_reg(s, OPC_ROLW, a0, a1, a2); + } + break; + case INDEX_op_rotl_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_RORI, a0, a1, -a2 & 0x3f); + } else { + tcg_out_opc_reg(s, OPC_ROL, a0, a1, a2); + } + break; + + case INDEX_op_rotr_i32: + if (c2) { + tcg_out_opc_imm(s, OPC_RORIW, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_reg(s, OPC_RORW, a0, a1, a2); + } + break; + case INDEX_op_rotr_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_RORI, a0, a1, a2 & 0x3f); + } else { + tcg_out_opc_reg(s, OPC_ROR, a0, a1, a2); + } + break; + + case INDEX_op_bswap64_i64: + tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); + break; + case INDEX_op_bswap32_i32: + a2 = 0; + /* fall through */ + case INDEX_op_bswap32_i64: + tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); + if (a2 & TCG_BSWAP_OZ) { + tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32); + } else { + tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32); + } + break; + case INDEX_op_bswap16_i64: + case INDEX_op_bswap16_i32: + tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); + if (a2 & TCG_BSWAP_OZ) { + tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48); + } else { + tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48); + } + break; + + case INDEX_op_ctpop_i32: + tcg_out_opc_imm(s, OPC_CPOPW, a0, a1, 0); + break; + case INDEX_op_ctpop_i64: + tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0); + break; + + case INDEX_op_clz_i32: + tcg_out_cltz(s, TCG_TYPE_I32, OPC_CLZW, a0, a1, a2, c2); + break; + case INDEX_op_clz_i64: + tcg_out_cltz(s, TCG_TYPE_I64, OPC_CLZ, a0, a1, a2, c2); + break; + case INDEX_op_ctz_i32: + tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2); + break; + case INDEX_op_ctz_i64: + tcg_out_cltz(s, TCG_TYPE_I64, OPC_CTZ, a0, a1, a2, c2); + break; + case INDEX_op_add2_i32: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], false, true); @@ -1572,60 +1773,33 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_brcond_i64: tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); break; - case INDEX_op_brcond2_i32: - tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); - break; case INDEX_op_setcond_i32: case INDEX_op_setcond_i64: - tcg_out_setcond(s, args[3], a0, a1, a2); - break; - case INDEX_op_setcond2_i32: - tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); + tcg_out_setcond(s, args[3], a0, a1, a2, c2); break; - case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, args, false); - break; - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args, true); - break; - case INDEX_op_qemu_st_i32: - tcg_out_qemu_st(s, args, false); - break; - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args, true); + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + tcg_out_movcond(s, args[5], a0, a1, a2, c2, + args[3], const_args[3], args[4], const_args[4]); break; - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - tcg_out_ext8u(s, a0, a1); + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); break; - - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - tcg_out_ext16u(s, a0, a1); + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); break; - - case INDEX_op_ext32u_i64: - case INDEX_op_extu_i32_i64: - tcg_out_ext32u(s, a0, a1); + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); break; - - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - tcg_out_ext8s(s, a0, a1); - break; - - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - tcg_out_ext16s(s, a0, a1); - break; - - case INDEX_op_ext32s_i64: - case INDEX_op_extrl_i64_i32: - case INDEX_op_ext_i32_i64: - tcg_out_ext32s(s, a0, a1); + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); break; case INDEX_op_extrh_i64_i32: @@ -1651,6 +1825,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ + case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: default: g_assert_not_reached(); } @@ -1692,6 +1879,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: case INDEX_op_ext_i32_i64: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap32_i32: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_ctpop_i32: + case INDEX_op_ctpop_i64: return C_O1_I1(r, r); case INDEX_op_st8_i32: @@ -1711,8 +1905,18 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_and_i64: case INDEX_op_or_i64: case INDEX_op_xor_i64: + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: return C_O1_I2(r, r, rI); + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + case INDEX_op_eqv_i32: + case INDEX_op_eqv_i64: + return C_O1_I2(r, r, rJ); + case INDEX_op_sub_i32: case INDEX_op_sub_i64: return C_O1_I2(r, rZ, rN); @@ -1724,7 +1928,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_divu_i32: case INDEX_op_rem_i32: case INDEX_op_remu_i32: - case INDEX_op_setcond_i32: case INDEX_op_mul_i64: case INDEX_op_mulsh_i64: case INDEX_op_muluh_i64: @@ -1732,47 +1935,50 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_divu_i64: case INDEX_op_rem_i64: case INDEX_op_remu_i64: - case INDEX_op_setcond_i64: return C_O1_I2(r, rZ, rZ); case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: + case INDEX_op_rotl_i32: + case INDEX_op_rotr_i32: case INDEX_op_shl_i64: case INDEX_op_shr_i64: case INDEX_op_sar_i64: + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i64: return C_O1_I2(r, r, ri); + case INDEX_op_clz_i32: + case INDEX_op_clz_i64: + case INDEX_op_ctz_i32: + case INDEX_op_ctz_i64: + return C_N1_I2(r, r, rM); + case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: return C_O0_I2(rZ, rZ); + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + return C_O1_I4(r, r, rI, rM, rM); + case INDEX_op_add2_i32: case INDEX_op_add2_i64: case INDEX_op_sub2_i32: case INDEX_op_sub2_i64: return C_O2_I4(r, r, rZ, rZ, rM, rM); - case INDEX_op_brcond2_i32: - return C_O0_I4(rZ, rZ, rZ, rZ); - - case INDEX_op_setcond2_i32: - return C_O1_I4(r, rZ, rZ, rZ, rZ); - - case INDEX_op_qemu_ld_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O1_I1(r, L) : C_O1_I2(r, L, L)); - case INDEX_op_qemu_st_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L)); - case INDEX_op_qemu_ld_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L) - : C_O2_I2(r, r, L, L)); - case INDEX_op_qemu_st_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L) - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L) - : C_O0_I4(LZ, LZ, L, L)); + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + return C_O1_I1(r, r); + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + return C_O0_I2(rZ, r); default: g_assert_not_reached(); @@ -1845,12 +2051,64 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); } +static volatile sig_atomic_t got_sigill; + +static void sigill_handler(int signo, siginfo_t *si, void *data) +{ + /* Skip the faulty instruction */ + ucontext_t *uc = (ucontext_t *)data; + uc->uc_mcontext.__gregs[REG_PC] += 4; + + got_sigill = 1; +} + +static void tcg_target_detect_isa(void) +{ +#if !defined(have_zba) || !defined(have_zbb) || !defined(have_zicond) + /* + * TODO: It is expected that this will be determinable via + * linux riscv_hwprobe syscall, not yet merged. + * In the meantime, test via sigill. + */ + + struct sigaction sa_old, sa_new; + + memset(&sa_new, 0, sizeof(sa_new)); + sa_new.sa_flags = SA_SIGINFO; + sa_new.sa_sigaction = sigill_handler; + sigaction(SIGILL, &sa_new, &sa_old); + +#ifndef have_zba + /* Probe for Zba: add.uw zero,zero,zero. */ + got_sigill = 0; + asm volatile(".insn r 0x3b, 0, 0x04, zero, zero, zero" : : : "memory"); + have_zba = !got_sigill; +#endif + +#ifndef have_zbb + /* Probe for Zba: andn zero,zero,zero. */ + got_sigill = 0; + asm volatile(".insn r 0x33, 7, 0x20, zero, zero, zero" : : : "memory"); + have_zbb = !got_sigill; +#endif + +#ifndef have_zicond + /* Probe for Zicond: czero.eqz zero,zero,zero. */ + got_sigill = 0; + asm volatile(".insn r 0x33, 5, 0x07, zero, zero, zero" : : : "memory"); + have_zicond = !got_sigill; +#endif + + sigaction(SIGILL, &sa_old, NULL); +#endif +} + static void tcg_target_init(TCGContext *s) { + tcg_target_detect_isa(); + tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; - if (TCG_TARGET_REG_BITS == 64) { - tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; - } + tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; tcg_target_call_clobber_regs = -1u; tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index 0deb33701f..54fdff0caa 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -25,14 +25,16 @@ #ifndef RISCV_TCG_TARGET_H #define RISCV_TCG_TARGET_H -#if __riscv_xlen == 32 -# define TCG_TARGET_REG_BITS 32 -#elif __riscv_xlen == 64 -# define TCG_TARGET_REG_BITS 64 +/* + * We don't support oversize guests. + * Since we will only build tcg once, this in turn requires a 64-bit host. + */ +#if __riscv_xlen != 64 +#error "unsupported code generation mode" #endif +#define TCG_TARGET_REG_BITS 64 #define TCG_TARGET_INSN_UNIT_SIZE 4 -#define TCG_TARGET_TLB_DISPLACEMENT_BITS 20 #define TCG_TARGET_NB_REGS 32 #define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) @@ -83,21 +85,22 @@ typedef enum { #define TCG_TARGET_STACK_ALIGN 16 #define TCG_TARGET_CALL_STACK_OFFSET 0 #define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL -#if TCG_TARGET_REG_BITS == 32 -#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN -#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN -#else #define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL #define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL -#endif #define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL +#if defined(__riscv_arch_test) && defined(__riscv_zbb) +# define have_zbb true +#else +extern bool have_zbb; +#endif + /* optional instructions */ -#define TCG_TARGET_HAS_movcond_i32 0 +#define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_div_i32 1 #define TCG_TARGET_HAS_rem_i32 1 #define TCG_TARGET_HAS_div2_i32 0 -#define TCG_TARGET_HAS_rot_i32 0 +#define TCG_TARGET_HAS_rot_i32 have_zbb #define TCG_TARGET_HAS_deposit_i32 0 #define TCG_TARGET_HAS_extract_i32 0 #define TCG_TARGET_HAS_sextract_i32 0 @@ -106,34 +109,33 @@ typedef enum { #define TCG_TARGET_HAS_sub2_i32 1 #define TCG_TARGET_HAS_mulu2_i32 0 #define TCG_TARGET_HAS_muls2_i32 0 -#define TCG_TARGET_HAS_muluh_i32 (TCG_TARGET_REG_BITS == 32) -#define TCG_TARGET_HAS_mulsh_i32 (TCG_TARGET_REG_BITS == 32) +#define TCG_TARGET_HAS_muluh_i32 0 +#define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_ext8s_i32 1 #define TCG_TARGET_HAS_ext16s_i32 1 #define TCG_TARGET_HAS_ext8u_i32 1 #define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_bswap16_i32 0 -#define TCG_TARGET_HAS_bswap32_i32 0 +#define TCG_TARGET_HAS_bswap16_i32 have_zbb +#define TCG_TARGET_HAS_bswap32_i32 have_zbb #define TCG_TARGET_HAS_not_i32 1 #define TCG_TARGET_HAS_neg_i32 1 -#define TCG_TARGET_HAS_andc_i32 0 -#define TCG_TARGET_HAS_orc_i32 0 -#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_andc_i32 have_zbb +#define TCG_TARGET_HAS_orc_i32 have_zbb +#define TCG_TARGET_HAS_eqv_i32 have_zbb #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 -#define TCG_TARGET_HAS_clz_i32 0 -#define TCG_TARGET_HAS_ctz_i32 0 -#define TCG_TARGET_HAS_ctpop_i32 0 +#define TCG_TARGET_HAS_clz_i32 have_zbb +#define TCG_TARGET_HAS_ctz_i32 have_zbb +#define TCG_TARGET_HAS_ctpop_i32 have_zbb #define TCG_TARGET_HAS_brcond2 1 #define TCG_TARGET_HAS_setcond2 1 #define TCG_TARGET_HAS_qemu_st8_i32 0 -#if TCG_TARGET_REG_BITS == 64 -#define TCG_TARGET_HAS_movcond_i64 0 +#define TCG_TARGET_HAS_movcond_i64 1 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 1 #define TCG_TARGET_HAS_div2_i64 0 -#define TCG_TARGET_HAS_rot_i64 0 +#define TCG_TARGET_HAS_rot_i64 have_zbb #define TCG_TARGET_HAS_deposit_i64 0 #define TCG_TARGET_HAS_extract_i64 0 #define TCG_TARGET_HAS_sextract_i64 0 @@ -146,32 +148,31 @@ typedef enum { #define TCG_TARGET_HAS_ext8u_i64 1 #define TCG_TARGET_HAS_ext16u_i64 1 #define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_bswap16_i64 0 -#define TCG_TARGET_HAS_bswap32_i64 0 -#define TCG_TARGET_HAS_bswap64_i64 0 +#define TCG_TARGET_HAS_bswap16_i64 have_zbb +#define TCG_TARGET_HAS_bswap32_i64 have_zbb +#define TCG_TARGET_HAS_bswap64_i64 have_zbb #define TCG_TARGET_HAS_not_i64 1 #define TCG_TARGET_HAS_neg_i64 1 -#define TCG_TARGET_HAS_andc_i64 0 -#define TCG_TARGET_HAS_orc_i64 0 -#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_andc_i64 have_zbb +#define TCG_TARGET_HAS_orc_i64 have_zbb +#define TCG_TARGET_HAS_eqv_i64 have_zbb #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_clz_i64 0 -#define TCG_TARGET_HAS_ctz_i64 0 -#define TCG_TARGET_HAS_ctpop_i64 0 +#define TCG_TARGET_HAS_clz_i64 have_zbb +#define TCG_TARGET_HAS_ctz_i64 have_zbb +#define TCG_TARGET_HAS_ctpop_i64 have_zbb #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 #define TCG_TARGET_HAS_mulu2_i64 0 #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 -#endif + +#define TCG_TARGET_HAS_qemu_ldst_i128 0 #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS -#define TCG_TARGET_HAS_MEMORY_BSWAP 0 - #endif diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h index 15f1c55103..cbad91b2b5 100644 --- a/tcg/s390x/tcg-target-con-set.h +++ b/tcg/s390x/tcg-target-con-set.h @@ -10,12 +10,11 @@ * tcg-target-con-str.h; the constraint combination is inclusive or. */ C_O0_I1(r) -C_O0_I2(L, L) C_O0_I2(r, r) C_O0_I2(r, ri) C_O0_I2(r, rA) C_O0_I2(v, r) -C_O1_I1(r, L) +C_O0_I3(o, m, r) C_O1_I1(r, r) C_O1_I1(v, r) C_O1_I1(v, v) @@ -38,6 +37,7 @@ C_O1_I2(v, v, v) C_O1_I3(v, v, v, v) C_O1_I4(r, r, ri, rI, r) C_O1_I4(r, r, rA, rI, r) +C_O2_I1(o, m, r) C_O2_I2(o, m, 0, r) C_O2_I2(o, m, r, r) C_O2_I3(o, m, 0, 1, r) diff --git a/tcg/s390x/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h index 6fa64a1ed6..25675b449e 100644 --- a/tcg/s390x/tcg-target-con-str.h +++ b/tcg/s390x/tcg-target-con-str.h @@ -9,7 +9,6 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) -REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) REGS('v', ALL_VECTOR_REGS) REGS('o', 0xaaaa) /* odd numbered general regs */ diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index 844532156b..503126cd66 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -44,18 +44,6 @@ #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16) #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) -/* - * For softmmu, we need to avoid conflicts with the first 3 - * argument registers to perform the tlb lookup, and to call - * the helper function. - */ -#ifdef CONFIG_SOFTMMU -#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_R2, 3) -#else -#define SOFTMMU_RESERVE_REGS 0 -#endif - - /* Several places within the instruction set 0 means "no register" rather than TCG_REG_R0. */ #define TCG_REG_NONE 0 @@ -149,6 +137,7 @@ typedef enum S390Opcode { RRE_ALGR = 0xb90a, RRE_ALCR = 0xb998, RRE_ALCGR = 0xb988, + RRE_ALGFR = 0xb91a, RRE_CGR = 0xb920, RRE_CLGR = 0xb921, RRE_DLGR = 0xb987, @@ -254,6 +243,7 @@ typedef enum S390Opcode { RXY_LLGF = 0xe316, RXY_LLGH = 0xe391, RXY_LMG = 0xeb04, + RXY_LPQ = 0xe38f, RXY_LRV = 0xe31e, RXY_LRVG = 0xe30f, RXY_LRVH = 0xe31f, @@ -264,6 +254,7 @@ typedef enum S390Opcode { RXY_STG = 0xe324, RXY_STHY = 0xe370, RXY_STMG = 0xeb24, + RXY_STPQ = 0xe38e, RXY_STRV = 0xe33e, RXY_STRVG = 0xe32f, RXY_STRVH = 0xe33f, @@ -449,33 +440,6 @@ static const uint8_t tcg_cond_to_ltr_cond[] = { [TCG_COND_GEU] = S390_CC_ALWAYS, }; -#ifdef CONFIG_SOFTMMU -static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LESW] = helper_le_ldsw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LESL] = helper_le_ldsl_mmu, - [MO_LEUQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BESW] = helper_be_ldsw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BESL] = helper_be_ldsl_mmu, - [MO_BEUQ] = helper_be_ldq_mmu, -}; - -static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEUQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEUQ] = helper_be_stq_mmu, -}; -#endif - static const tcg_insn_unit *tb_ret_addr; uint64_t s390_facilities[3]; @@ -1076,6 +1040,11 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, return false; } +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) +{ + return false; +} + static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, tcg_target_long imm) { @@ -1092,36 +1061,51 @@ static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src, tcg_out16(s, (ofs << 8) | (RIEf_RISBG & 0xff)); } -static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) +static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { tcg_out_insn(s, RRE, LGBR, dest, src); } -static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) +static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src) { tcg_out_insn(s, RRE, LLGCR, dest, src); } -static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) +static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { tcg_out_insn(s, RRE, LGHR, dest, src); } -static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) +static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src) { tcg_out_insn(s, RRE, LLGHR, dest, src); } -static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src) +static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src) { tcg_out_insn(s, RRE, LGFR, dest, src); } -static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src) +static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src) { tcg_out_insn(s, RRE, LLGFR, dest, src); } +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src) +{ + tcg_out_ext32s(s, dest, src); +} + +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src) +{ + tcg_out_ext32u(s, dest, src); +} + +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src) +{ + tcg_out_mov(s, TCG_TYPE_I32, dest, src); +} + static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val) { int msb, lsb; @@ -1149,15 +1133,15 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) /* Look for the zero-extensions. */ if ((val & valid) == 0xffffffff) { - tgen_ext32u(s, dest, dest); + tcg_out_ext32u(s, dest, dest); return; } if ((val & valid) == 0xff) { - tgen_ext8u(s, TCG_TYPE_I64, dest, dest); + tcg_out_ext8u(s, dest, dest); return; } if ((val & valid) == 0xffff) { - tgen_ext16u(s, TCG_TYPE_I64, dest, dest); + tcg_out_ext16u(s, dest, dest); return; } @@ -1440,7 +1424,7 @@ static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) /* With MIE3, and bit 0 of m4 set, we get the complete result. */ if (HAVE_FACILITY(MISC_INSN_EXT3)) { if (type == TCG_TYPE_I32) { - tgen_ext32u(s, dest, src); + tcg_out_ext32u(s, dest, src); src = dest; } tcg_out_insn(s, RRFc, POPCNT, dest, src, 8); @@ -1586,146 +1570,223 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest, tcg_out_call_int(s, dest); } +typedef struct { + TCGReg base; + TCGReg index; + int disp; + TCGAtomAlign aa; +} HostAddress; + +bool tcg_target_has_memory_bswap(MemOp memop) +{ + TCGAtomAlign aa; + + if ((memop & MO_SIZE) <= MO_64) { + return true; + } + + /* + * Reject 16-byte memop with 16-byte atomicity, + * but do allow a pair of 64-bit operations. + */ + aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true); + return aa.atom <= MO_64; +} + static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, - TCGReg base, TCGReg index, int disp) + HostAddress h) { switch (opc & (MO_SSIZE | MO_BSWAP)) { case MO_UB: - tcg_out_insn(s, RXY, LLGC, data, base, index, disp); + tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp); break; case MO_SB: - tcg_out_insn(s, RXY, LGB, data, base, index, disp); + tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp); break; case MO_UW | MO_BSWAP: /* swapped unsigned halfword load with upper bits zeroed */ - tcg_out_insn(s, RXY, LRVH, data, base, index, disp); - tgen_ext16u(s, TCG_TYPE_I64, data, data); + tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp); + tcg_out_ext16u(s, data, data); break; case MO_UW: - tcg_out_insn(s, RXY, LLGH, data, base, index, disp); + tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp); break; case MO_SW | MO_BSWAP: /* swapped sign-extended halfword load */ - tcg_out_insn(s, RXY, LRVH, data, base, index, disp); - tgen_ext16s(s, TCG_TYPE_I64, data, data); + tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp); + tcg_out_ext16s(s, TCG_TYPE_REG, data, data); break; case MO_SW: - tcg_out_insn(s, RXY, LGH, data, base, index, disp); + tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp); break; case MO_UL | MO_BSWAP: /* swapped unsigned int load with upper bits zeroed */ - tcg_out_insn(s, RXY, LRV, data, base, index, disp); - tgen_ext32u(s, data, data); + tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp); + tcg_out_ext32u(s, data, data); break; case MO_UL: - tcg_out_insn(s, RXY, LLGF, data, base, index, disp); + tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp); break; case MO_SL | MO_BSWAP: /* swapped sign-extended int load */ - tcg_out_insn(s, RXY, LRV, data, base, index, disp); - tgen_ext32s(s, data, data); + tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp); + tcg_out_ext32s(s, data, data); break; case MO_SL: - tcg_out_insn(s, RXY, LGF, data, base, index, disp); + tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp); break; case MO_UQ | MO_BSWAP: - tcg_out_insn(s, RXY, LRVG, data, base, index, disp); + tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp); break; case MO_UQ: - tcg_out_insn(s, RXY, LG, data, base, index, disp); + tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp); break; default: - tcg_abort(); + g_assert_not_reached(); } } static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, - TCGReg base, TCGReg index, int disp) + HostAddress h) { switch (opc & (MO_SIZE | MO_BSWAP)) { case MO_UB: - if (disp >= 0 && disp < 0x1000) { - tcg_out_insn(s, RX, STC, data, base, index, disp); + if (h.disp >= 0 && h.disp < 0x1000) { + tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp); } else { - tcg_out_insn(s, RXY, STCY, data, base, index, disp); + tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp); } break; case MO_UW | MO_BSWAP: - tcg_out_insn(s, RXY, STRVH, data, base, index, disp); + tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp); break; case MO_UW: - if (disp >= 0 && disp < 0x1000) { - tcg_out_insn(s, RX, STH, data, base, index, disp); + if (h.disp >= 0 && h.disp < 0x1000) { + tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp); } else { - tcg_out_insn(s, RXY, STHY, data, base, index, disp); + tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp); } break; case MO_UL | MO_BSWAP: - tcg_out_insn(s, RXY, STRV, data, base, index, disp); + tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp); break; case MO_UL: - if (disp >= 0 && disp < 0x1000) { - tcg_out_insn(s, RX, ST, data, base, index, disp); + if (h.disp >= 0 && h.disp < 0x1000) { + tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp); } else { - tcg_out_insn(s, RXY, STY, data, base, index, disp); + tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp); } break; case MO_UQ | MO_BSWAP: - tcg_out_insn(s, RXY, STRVG, data, base, index, disp); + tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp); break; case MO_UQ: - tcg_out_insn(s, RXY, STG, data, base, index, disp); + tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp); break; default: - tcg_abort(); + g_assert_not_reached(); } } -#if defined(CONFIG_SOFTMMU) -/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */ -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); +static const TCGLdstHelperParam ldst_helper_param = { + .ntmp = 1, .tmp = { TCG_TMP0 } +}; -/* Load and compare a TLB entry, leaving the flags set. Loads the TLB - addend into R2. Returns a register with the santitized guest address. */ -static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, - int mem_index, bool is_ld) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - unsigned s_bits = opc & MO_SIZE; - unsigned a_bits = get_alignment_bits(opc); + MemOp opc = get_memop(lb->oi); + + if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, + (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) { + return false; + } + + tcg_out_ld_helper_args(s, lb, &ldst_helper_param); + tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); + tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); + + tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); + return true; +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + MemOp opc = get_memop(lb->oi); + + if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, + (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) { + return false; + } + + tcg_out_st_helper_args(s, lb, &ldst_helper_param); + tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE]); + + tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); + return true; +} + +/* + * For softmmu, perform the TLB load and compare. + * For useronly, perform any required alignment tests. + * In both cases, return a TCGLabelQemuLdst structure if the slow path + * is required and fill in @h with the host address for the fast path. + */ +static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, + TCGReg addr_reg, MemOpIdx oi, + bool is_ld) +{ + TCGLabelQemuLdst *ldst = NULL; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; + unsigned a_mask; + + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128); + a_mask = (1 << h->aa.align) - 1; + +#ifdef CONFIG_SOFTMMU unsigned s_mask = (1 << s_bits) - 1; - unsigned a_mask = (1 << a_bits) - 1; + int mem_index = get_mmuidx(oi); int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); int table_off = fast_off + offsetof(CPUTLBDescFast, table); int ofs, a_off; uint64_t tlb_mask; - tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tcg_out_insn(s, RXY, NG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, mask_off); - tcg_out_insn(s, RXY, AG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, table_off); + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; - /* For aligned accesses, we check the first byte and include the alignment - bits within the address. For unaligned access, we check that we don't - cross pages using the address of the last byte of the access. */ - a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); - tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; + tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE, + s->page_bits - CPU_TLB_ENTRY_BITS); + + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); + tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off); + tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off); + + /* + * For aligned accesses, we check the first byte and include the alignment + * bits within the address. For unaligned access, we check that we don't + * cross pages using the address of the last byte of the access. + */ + a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask); + tlb_mask = (uint64_t)s->page_mask | a_mask; if (a_off == 0) { - tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); + tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask); } else { - tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); - tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); + tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off); + tgen_andi(s, TCG_TYPE_TL, TCG_REG_R0, tlb_mask); } if (is_ld) { @@ -1734,224 +1795,161 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, ofs = offsetof(CPUTLBEntry, addr_write); } if (TARGET_LONG_BITS == 32) { - tcg_out_insn(s, RX, C, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); + tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs); } else { - tcg_out_insn(s, RXY, CG, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); + tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs); } - tcg_out_insn(s, RXY, LG, TCG_REG_R2, TCG_REG_R2, TCG_REG_NONE, + tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); + ldst->label_ptr[0] = s->code_ptr++; + + h->index = TCG_TMP0; + tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE, offsetof(CPUTLBEntry, addend)); if (TARGET_LONG_BITS == 32) { - tgen_ext32u(s, TCG_REG_R3, addr_reg); - return TCG_REG_R3; + tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg); + h->base = TCG_REG_NONE; + } else { + h->base = addr_reg; } - return addr_reg; -} - -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, - TCGReg data, TCGReg addr, - tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->oi = oi; - label->datalo_reg = data; - label->addrlo_reg = addr; - label->raddr = tcg_splitwx_to_rx(raddr); - label->label_ptr[0] = label_ptr; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) -{ - TCGReg addr_reg = lb->addrlo_reg; - TCGReg data_reg = lb->datalo_reg; - MemOpIdx oi = lb->oi; - MemOp opc = get_memop(oi); - - if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, - (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) { - return false; - } - - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); - if (TARGET_LONG_BITS == 64) { - tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); - } - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr); - tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); - tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2); - - tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); - return true; -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) -{ - TCGReg addr_reg = lb->addrlo_reg; - TCGReg data_reg = lb->datalo_reg; - MemOpIdx oi = lb->oi; - MemOp opc = get_memop(oi); - - if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, - (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) { - return false; - } - - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); - if (TARGET_LONG_BITS == 64) { - tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); - } - switch (opc & MO_SIZE) { - case MO_UB: - tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); - break; - case MO_UW: - tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); - break; - case MO_UL: - tgen_ext32u(s, TCG_REG_R4, data_reg); - break; - case MO_UQ: - tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); - break; - default: - tcg_abort(); - } - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr); - tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); - - tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); - return true; -} + h->disp = 0; #else -static void tcg_out_test_alignment(TCGContext *s, bool is_ld, - TCGReg addrlo, unsigned a_bits) -{ - unsigned a_mask = (1 << a_bits) - 1; - TCGLabelQemuLdst *l = new_ldst_label(s); + if (a_mask) { + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; - l->is_ld = is_ld; - l->addrlo_reg = addrlo; + /* We are expecting a_bits to max out at 7, much lower than TMLL. */ + tcg_debug_assert(a_mask <= 0xffff); + tcg_out_insn(s, RI, TMLL, addr_reg, a_mask); - /* We are expecting a_bits to max out at 7, much lower than TMLL. */ - tcg_debug_assert(a_bits < 16); - tcg_out_insn(s, RI, TMLL, addrlo, a_mask); - - tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */ - l->label_ptr[0] = s->code_ptr; - s->code_ptr += 1; - - l->raddr = tcg_splitwx_to_rx(s->code_ptr); -} - -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - if (!patch_reloc(l->label_ptr[0], R_390_PC16DBL, - (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) { - return false; + tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */ + ldst->label_ptr[0] = s->code_ptr++; } - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); - - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R14, (uintptr_t)l->raddr); - tgen_gotoi(s, S390_CC_ALWAYS, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st)); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, - TCGReg *index_reg, tcg_target_long *disp) -{ + h->base = addr_reg; if (TARGET_LONG_BITS == 32) { - tgen_ext32u(s, TCG_TMP0, *addr_reg); - *addr_reg = TCG_TMP0; + tcg_out_ext32u(s, TCG_TMP0, addr_reg); + h->base = TCG_TMP0; } if (guest_base < 0x80000) { - *index_reg = TCG_REG_NONE; - *disp = guest_base; + h->index = TCG_REG_NONE; + h->disp = guest_base; } else { - *index_reg = TCG_GUEST_BASE_REG; - *disp = 0; + h->index = TCG_GUEST_BASE_REG; + h->disp = 0; } +#endif + + return ldst; } -#endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi) + MemOpIdx oi, TCGType data_type) { - MemOp opc = get_memop(oi); -#ifdef CONFIG_SOFTMMU - unsigned mem_index = get_mmuidx(oi); - tcg_insn_unit *label_ptr; - TCGReg base_reg; + TCGLabelQemuLdst *ldst; + HostAddress h; - base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1); + ldst = prepare_host_addr(s, &h, addr_reg, oi, true); + tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h); - tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); - label_ptr = s->code_ptr; - s->code_ptr += 1; - - tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); - - add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr); -#else - TCGReg index_reg; - tcg_target_long disp; - unsigned a_bits = get_alignment_bits(opc); - - if (a_bits) { - tcg_out_test_alignment(s, true, addr_reg, a_bits); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data_reg; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); - tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp); -#endif } static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi) + MemOpIdx oi, TCGType data_type) { - MemOp opc = get_memop(oi); -#ifdef CONFIG_SOFTMMU - unsigned mem_index = get_mmuidx(oi); - tcg_insn_unit *label_ptr; - TCGReg base_reg; + TCGLabelQemuLdst *ldst; + HostAddress h; - base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0); + ldst = prepare_host_addr(s, &h, addr_reg, oi, false); + tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h); - tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); - label_ptr = s->code_ptr; - s->code_ptr += 1; - - tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); - - add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr); -#else - TCGReg index_reg; - tcg_target_long disp; - unsigned a_bits = get_alignment_bits(opc); - - if (a_bits) { - tcg_out_test_alignment(s, false, addr_reg, a_bits); + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data_reg; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); + } +} + +static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg addr_reg, MemOpIdx oi, bool is_ld) +{ + TCGLabel *l1 = NULL, *l2 = NULL; + TCGLabelQemuLdst *ldst; + HostAddress h; + bool need_bswap; + bool use_pair; + S390Opcode insn; + + ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld); + + use_pair = h.aa.atom < MO_128; + need_bswap = get_memop(oi) & MO_BSWAP; + + if (!use_pair) { + /* + * Atomicity requires we use LPQ. If we've already checked for + * 16-byte alignment, that's all we need. If we arrive with + * lesser alignment, we have determined that less than 16-byte + * alignment can be satisfied with two 8-byte loads. + */ + if (h.aa.align < MO_128) { + use_pair = true; + l1 = gen_new_label(); + l2 = gen_new_label(); + + tcg_out_insn(s, RI, TMLL, addr_reg, 15); + tgen_branch(s, 7, l1); /* CC in {1,2,3} */ + } + + tcg_debug_assert(!need_bswap); + tcg_debug_assert(datalo & 1); + tcg_debug_assert(datahi == datalo - 1); + insn = is_ld ? RXY_LPQ : RXY_STPQ; + tcg_out_insn_RXY(s, insn, datahi, h.base, h.index, h.disp); + + if (use_pair) { + tgen_branch(s, S390_CC_ALWAYS, l2); + tcg_out_label(s, l1); + } + } + if (use_pair) { + TCGReg d1, d2; + + if (need_bswap) { + d1 = datalo, d2 = datahi; + insn = is_ld ? RXY_LRVG : RXY_STRVG; + } else { + d1 = datahi, d2 = datalo; + insn = is_ld ? RXY_LG : RXY_STG; + } + + if (h.base == d1 || h.index == d1) { + tcg_out_insn(s, RXY, LAY, TCG_TMP0, h.base, h.index, h.disp); + h.base = TCG_TMP0; + h.index = TCG_REG_NONE; + h.disp = 0; + } + tcg_out_insn_RXY(s, insn, d1, h.base, h.index, h.disp); + tcg_out_insn_RXY(s, insn, d2, h.base, h.index, h.disp + 8); + } + if (l2) { + tcg_out_label(s, l2); + } + + if (ldst) { + ldst->type = TCG_TYPE_I128; + ldst->datalo_reg = datalo; + ldst->datahi_reg = datahi; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); - tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp); -#endif } static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) @@ -2233,19 +2231,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; - case INDEX_op_ext8s_i32: - tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]); - break; - case INDEX_op_ext16s_i32: - tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]); - break; - case INDEX_op_ext8u_i32: - tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]); - break; - case INDEX_op_ext16u_i32: - tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]); - break; - case INDEX_op_bswap16_i32: a0 = args[0], a1 = args[1], a2 = args[2]; tcg_out_insn(s, RRE, LRVR, a0, a1); @@ -2272,9 +2257,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, a0 = args[0], a1 = args[1], a2 = args[2]; tcg_out_insn(s, RRE, LRVR, a0, a1); if (a2 & TCG_BSWAP_OS) { - tgen_ext32s(s, a0, a0); + tcg_out_ext32s(s, a0, a0); } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { - tgen_ext32u(s, a0, a0); + tcg_out_ext32u(s, a0, a0); } break; @@ -2312,14 +2297,29 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, args[2], const_args[2], args[3], const_args[3], args[4]); break; - case INDEX_op_qemu_ld_i32: - /* ??? Technically we can use a non-extending instruction. */ - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args[0], args[1], args[2]); + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args[0], args[1], args[2]); + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64); + break; + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32); + break; + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64); + break; + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true); + break; + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: + tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false); break; case INDEX_op_ld16s_i64: @@ -2537,27 +2537,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; - case INDEX_op_ext8s_i64: - tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]); - break; - case INDEX_op_ext16s_i64: - tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]); - break; - case INDEX_op_ext_i32_i64: - case INDEX_op_ext32s_i64: - tgen_ext32s(s, args[0], args[1]); - break; - case INDEX_op_ext8u_i64: - tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]); - break; - case INDEX_op_ext16u_i64: - tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]); - break; - case INDEX_op_extu_i32_i64: - case INDEX_op_ext32u_i64: - tgen_ext32u(s, args[0], args[1]); - break; - case INDEX_op_add2_i64: if (const_args[4]) { if ((int64_t)args[4] >= 0) { @@ -2644,8 +2623,21 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ + case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: default: - tcg_abort(); + g_assert_not_reached(); } } @@ -3200,12 +3192,22 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_ctpop_i64: return C_O1_I1(r, r); - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: - return C_O1_I1(r, L); - case INDEX_op_qemu_st_i64: - case INDEX_op_qemu_st_i32: - return C_O0_I2(L, L); + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + return C_O1_I1(r, r); + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + return C_O0_I2(r, r); + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + return C_O2_I1(o, m, r); + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: + return C_O0_I3(o, m, r); case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h index a05b473117..9a405003b9 100644 --- a/tcg/s390x/tcg-target.h +++ b/tcg/s390x/tcg-target.h @@ -26,7 +26,6 @@ #define S390_TCG_TARGET_H #define TCG_TARGET_INSN_UNIT_SIZE 2 -#define TCG_TARGET_TLB_DISPLACEMENT_BITS 19 /* We have a +- 4GB range on the branches; leave some slop. */ #define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB) @@ -140,6 +139,8 @@ extern uint64_t s390_facilities[3]; #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 +#define TCG_TARGET_HAS_qemu_ldst_i128 1 + #define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR) #define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR) #define TCG_TARGET_HAS_v256 0 @@ -172,8 +173,6 @@ extern uint64_t s390_facilities[3]; #define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF #define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF -#define TCG_TARGET_HAS_MEMORY_BSWAP 1 - #define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h index 31e6fea1fc..434bf25072 100644 --- a/tcg/sparc64/tcg-target-con-set.h +++ b/tcg/sparc64/tcg-target-con-set.h @@ -12,8 +12,6 @@ C_O0_I1(r) C_O0_I2(rZ, r) C_O0_I2(rZ, rJ) -C_O0_I2(sZ, s) -C_O1_I1(r, s) C_O1_I1(r, r) C_O1_I2(r, r, r) C_O1_I2(r, rZ, rJ) diff --git a/tcg/sparc64/tcg-target-con-str.h b/tcg/sparc64/tcg-target-con-str.h index 8f5c7aef97..0577ec4942 100644 --- a/tcg/sparc64/tcg-target-con-str.h +++ b/tcg/sparc64/tcg-target-con-str.h @@ -9,7 +9,6 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) -REGS('s', ALL_QLDST_REGS) /* * Define constraint letters for constants: diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index ccc4144f7c..d2d0f604c2 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -27,6 +27,7 @@ #error "unsupported code generation mode" #endif +#include "../tcg-ldst.c.inc" #include "../tcg-pool.c.inc" #ifdef CONFIG_DEBUG_TCG @@ -70,22 +71,12 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #define TCG_CT_CONST_S13 0x200 #define TCG_CT_CONST_ZERO 0x400 -/* - * For softmmu, we need to avoid conflicts with the first 3 - * argument registers to perform the tlb lookup, and to call - * the helper function. - */ -#ifdef CONFIG_SOFTMMU -#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3) -#else -#define SOFTMMU_RESERVE_REGS 0 -#endif -#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) -#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) +#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) -/* Define some temporary registers. T2 is used for constant generation. */ +/* Define some temporary registers. T3 is used for constant generation. */ #define TCG_REG_T1 TCG_REG_G1 -#define TCG_REG_T2 TCG_REG_O7 +#define TCG_REG_T2 TCG_REG_G2 +#define TCG_REG_T3 TCG_REG_O7 #ifndef CONFIG_SOFTMMU # define TCG_GUEST_BASE_REG TCG_REG_I5 @@ -110,7 +101,6 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_I4, TCG_REG_I5, - TCG_REG_G2, TCG_REG_G3, TCG_REG_G4, TCG_REG_G5, @@ -399,22 +389,25 @@ static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); } -static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) +/* A 13-bit constant sign-extended to 64 bits. */ +static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg) { tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); } -static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg) +/* A 32-bit constant sign-extended to 64 bits. */ +static void tcg_out_movi_s32(TCGContext *s, TCGReg ret, int32_t arg) { - if (check_fit_i32(arg, 13)) { - /* A 13-bit constant sign-extended to 64-bits. */ - tcg_out_movi_imm13(s, ret, arg); - } else { - /* A 32-bit constant zero-extended to 64 bits. */ - tcg_out_sethi(s, ret, arg); - if (arg & 0x3ff) { - tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); - } + tcg_out_sethi(s, ret, ~arg); + tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR); +} + +/* A 32-bit constant zero-extended to 64 bits. */ +static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg) +{ + tcg_out_sethi(s, ret, arg); + if (arg & 0x3ff) { + tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); } } @@ -425,15 +418,15 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long hi, lo = (int32_t)arg; tcg_target_long test, lsb; - /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ - if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { - tcg_out_movi_imm32(s, ret, arg); + /* A 13-bit constant sign-extended to 64-bits. */ + if (check_fit_tl(arg, 13)) { + tcg_out_movi_s13(s, ret, arg); return; } - /* A 13-bit constant sign-extended to 64-bits. */ - if (check_fit_tl(arg, 13)) { - tcg_out_movi_imm13(s, ret, arg); + /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ + if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { + tcg_out_movi_u32(s, ret, arg); return; } @@ -448,8 +441,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, /* A 32-bit constant sign-extended to 64-bits. */ if (arg == lo) { - tcg_out_sethi(s, ret, ~arg); - tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR); + tcg_out_movi_s32(s, ret, arg); return; } @@ -477,13 +469,13 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, /* A 64-bit constant decomposed into 2 32-bit pieces. */ if (check_fit_i32(lo, 13)) { hi = (arg - lo) >> 32; - tcg_out_movi_imm32(s, ret, hi); + tcg_out_movi_u32(s, ret, hi); tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); tcg_out_arithi(s, ret, ret, lo, ARITH_ADD); } else { hi = arg >> 32; - tcg_out_movi_imm32(s, ret, hi); - tcg_out_movi_imm32(s, scratch, lo); + tcg_out_movi_u32(s, ret, hi); + tcg_out_movi_u32(s, scratch, lo); tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); tcg_out_arith(s, ret, ret, scratch, ARITH_OR); } @@ -492,8 +484,59 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { - tcg_debug_assert(ret != TCG_REG_T2); - tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2); + tcg_debug_assert(ret != TCG_REG_T3); + tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3); +} + +static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) +{ + g_assert_not_reached(); +} + +static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) +{ + g_assert_not_reached(); +} + +static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND); +} + +static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL); + tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL); +} + +static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA); +} + +static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL); +} + +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_ext32s(s, rd, rs); +} + +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_ext32u(s, rd, rs); +} + +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_mov(s, TCG_TYPE_I32, rd, rs); +} + +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) +{ + return false; } static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, @@ -716,7 +759,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, default: tcg_out_cmp(s, c1, c2, c2const); - tcg_out_movi_imm13(s, ret, 0); + tcg_out_movi_s13(s, ret, 0); tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1); return; } @@ -752,11 +795,11 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, /* For 64-bit signed comparisons vs zero, we can avoid the compare if the input does not overlap the output. */ if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) { - tcg_out_movi_imm13(s, ret, 0); + tcg_out_movi_s13(s, ret, 0); tcg_out_movr(s, cond, ret, c1, 1, 1); } else { tcg_out_cmp(s, c1, c2, c2const); - tcg_out_movi_imm13(s, ret, 0); + tcg_out_movi_s13(s, ret, 0); tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1); } } @@ -793,7 +836,7 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, if (use_vis3_instructions && !is_sub) { /* Note that ADDXC doesn't accept immediates. */ if (bhconst && bh != 0) { - tcg_out_movi_imm13(s, TCG_REG_T2, bh); + tcg_out_movi_s13(s, TCG_REG_T2, bh); bh = TCG_REG_T2; } tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); @@ -815,7 +858,7 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, * so the adjustment fits 12 bits. */ if (bhconst) { - tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1)); + tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1)); } else { tcg_out_arithi(s, TCG_REG_T2, bh, 1, is_sub ? ARITH_SUB : ARITH_ADD); @@ -834,10 +877,8 @@ static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest, { uintptr_t desti = (uintptr_t)dest; - /* Be careful not to clobber %o7 for a tail call. */ tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, - desti & ~0xfff, in_prologue, - tail_call ? TCG_REG_G2 : TCG_REG_O7); + desti & ~0xfff, in_prologue, TCG_REG_T2); tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL); } @@ -867,127 +908,6 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL)); } -#ifdef CONFIG_SOFTMMU -static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1]; -static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1]; - -static void emit_extend(TCGContext *s, TCGReg r, int op) -{ - /* Emit zero extend of 8, 16 or 32 bit data as - * required by the MO_* value op; do nothing for 64 bit. - */ - switch (op & MO_SIZE) { - case MO_8: - tcg_out_arithi(s, r, r, 0xff, ARITH_AND); - break; - case MO_16: - tcg_out_arithi(s, r, r, 16, SHIFT_SLL); - tcg_out_arithi(s, r, r, 16, SHIFT_SRL); - break; - case MO_32: - tcg_out_arith(s, r, r, 0, SHIFT_SRL); - break; - case MO_64: - break; - } -} - -static void build_trampolines(TCGContext *s) -{ - static void * const qemu_ld_helpers[] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LESW] = helper_le_ldsw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEUQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BESW] = helper_be_ldsw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEUQ] = helper_be_ldq_mmu, - }; - static void * const qemu_st_helpers[] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEUQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEUQ] = helper_be_stq_mmu, - }; - - int i; - - for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) { - if (qemu_ld_helpers[i] == NULL) { - continue; - } - - /* May as well align the trampoline. */ - while ((uintptr_t)s->code_ptr & 15) { - tcg_out_nop(s); - } - qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr); - - /* Set the retaddr operand. */ - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7); - /* Tail call. */ - tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true); - /* delay slot -- set the env argument */ - tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); - } - - for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) { - if (qemu_st_helpers[i] == NULL) { - continue; - } - - /* May as well align the trampoline. */ - while ((uintptr_t)s->code_ptr & 15) { - tcg_out_nop(s); - } - qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr); - - emit_extend(s, TCG_REG_O2, i); - - /* Set the retaddr operand. */ - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7); - - /* Tail call. */ - tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true); - /* delay slot -- set the env argument */ - tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); - } -} -#else -static const tcg_insn_unit *qemu_unalign_ld_trampoline; -static const tcg_insn_unit *qemu_unalign_st_trampoline; - -static void build_trampolines(TCGContext *s) -{ - for (int ld = 0; ld < 2; ++ld) { - void *helper; - - while ((uintptr_t)s->code_ptr & 15) { - tcg_out_nop(s); - } - - if (ld) { - helper = helper_unaligned_ld; - qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr); - } else { - helper = helper_unaligned_st; - qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr); - } - - /* Tail call. */ - tcg_out_jmpl_const(s, helper, true, true); - /* delay slot -- set the env argument */ - tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); - } -} -#endif - /* Generate global QEMU prologue and epilogue code */ static void tcg_target_qemu_prologue(TCGContext *s) { @@ -1032,9 +952,7 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); /* delay slot */ - tcg_out_movi_imm13(s, TCG_REG_O0, 0); - - build_trampolines(s); + tcg_out_movi_s13(s, TCG_REG_O0, 0); } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) @@ -1045,387 +963,239 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count) } } -#if defined(CONFIG_SOFTMMU) +static const TCGLdstHelperParam ldst_helper_param = { + .ntmp = 1, .tmp = { TCG_REG_T1 } +}; -/* We expect to use a 13-bit negative offset from ENV. */ -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); - -/* Perform the TLB load and compare. - - Inputs: - ADDRLO and ADDRHI contain the possible two parts of the address. - - MEM_INDEX and S_BITS are the memory context and log2 size of the load. - - WHICH is the offset into the CPUTLBEntry structure of the slot to read. - This should be offsetof addr_read or addr_write. - - The result of the TLB comparison is in %[ix]cc. The sanitized address - is in the returned register, maybe %o0. The TLB addend is in %o1. */ - -static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, - MemOp opc, int which) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { + MemOp opc = get_memop(lb->oi); + MemOp sgn; + + if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19, + (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) { + return false; + } + + /* Use inline tcg_out_ext32s; otherwise let the helper sign-extend. */ + sgn = (opc & MO_SIZE) < MO_32 ? MO_SIGN : 0; + + tcg_out_ld_helper_args(s, lb, &ldst_helper_param); + tcg_out_call(s, qemu_ld_helpers[opc & (MO_SIZE | sgn)], NULL); + tcg_out_ld_helper_ret(s, lb, sgn, &ldst_helper_param); + + tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0); + return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19, + (intptr_t)lb->raddr, 0); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + MemOp opc = get_memop(lb->oi); + + if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19, + (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) { + return false; + } + + tcg_out_st_helper_args(s, lb, &ldst_helper_param); + tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE], NULL); + + tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0); + return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19, + (intptr_t)lb->raddr, 0); +} + +typedef struct { + TCGReg base; + TCGReg index; + TCGAtomAlign aa; +} HostAddress; + +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return true; +} + +/* + * For softmmu, perform the TLB load and compare. + * For useronly, perform any required alignment tests. + * In both cases, return a TCGLabelQemuLdst structure if the slow path + * is required and fill in @h with the host address for the fast path. + */ +static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, + TCGReg addr_reg, MemOpIdx oi, + bool is_ld) +{ + TCGLabelQemuLdst *ldst = NULL; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; + unsigned a_mask; + + /* We don't support unaligned accesses. */ + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + h->aa.align = MAX(h->aa.align, s_bits); + a_mask = (1u << h->aa.align) - 1; + +#ifdef CONFIG_SOFTMMU + int mem_index = get_mmuidx(oi); int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); int table_off = fast_off + offsetof(CPUTLBDescFast, table); - const TCGReg r0 = TCG_REG_O0; - const TCGReg r1 = TCG_REG_O1; - const TCGReg r2 = TCG_REG_O2; - unsigned s_bits = opc & MO_SIZE; - unsigned a_bits = get_alignment_bits(opc); - tcg_target_long compare_mask; + int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write); + int add_off = offsetof(CPUTLBEntry, addend); + int compare_mask; + int cc; /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ - tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off); - tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off); /* Extract the page index, shifted into place for tlb index. */ - tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, - SHIFT_SRL); - tcg_out_arith(s, r2, r2, r0, ARITH_AND); + tcg_out_arithi(s, TCG_REG_T1, addr_reg, + s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL); + tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND); /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */ - tcg_out_arith(s, r2, r2, r1, ARITH_ADD); + tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD); - /* Load the tlb comparator and the addend. */ - tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which); - tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend)); + /* Load the tlb comparator and the addend. */ + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_T2, TCG_REG_T1, cmp_off); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off); + h->base = TCG_REG_T1; - /* Mask out the page offset, except for the required alignment. - We don't support unaligned accesses. */ - if (a_bits < s_bits) { - a_bits = s_bits; - } - compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1); + /* Mask out the page offset, except for the required alignment. */ + compare_mask = s->page_mask | a_mask; if (check_fit_tl(compare_mask, 13)) { - tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND); + tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND); } else { - tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask); - tcg_out_arith(s, r2, addr, r2, ARITH_AND); + tcg_out_movi_s32(s, TCG_REG_T3, compare_mask); + tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND); } - tcg_out_cmp(s, r0, r2, 0); + tcg_out_cmp(s, TCG_REG_T2, TCG_REG_T3, 0); - /* If the guest address must be zero-extended, do so now. */ + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; + ldst->label_ptr[0] = s->code_ptr; + + /* bne,pn %[xi]cc, label0 */ + cc = TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC; + tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0); +#else + /* + * If the size equals the required alignment, we can skip the test + * and allow host SIGBUS to deliver SIGBUS to the guest. + * Otherwise, test for at least natural alignment and defer + * everything else to the helper functions. + */ + if (s_bits != get_alignment_bits(opc)) { + tcg_debug_assert(check_fit_tl(a_mask, 13)); + tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC); + + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; + ldst->label_ptr[0] = s->code_ptr; + + /* bne,pn %icc, label0 */ + tcg_out_bpcc0(s, COND_NE, BPCC_PN | BPCC_ICC, 0); + } + h->base = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0; +#endif + + /* If the guest address must be zero-extended, do in the delay slot. */ if (TARGET_LONG_BITS == 32) { - tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL); - return r0; + tcg_out_ext32u(s, TCG_REG_T2, addr_reg); + h->index = TCG_REG_T2; + } else { + if (ldst) { + tcg_out_nop(s); + } + h->index = addr_reg; } - return addr; + return ldst; } -#endif /* CONFIG_SOFTMMU */ - -static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { - [MO_UB] = LDUB, - [MO_SB] = LDSB, - [MO_UB | MO_LE] = LDUB, - [MO_SB | MO_LE] = LDSB, - - [MO_BEUW] = LDUH, - [MO_BESW] = LDSH, - [MO_BEUL] = LDUW, - [MO_BESL] = LDSW, - [MO_BEUQ] = LDX, - [MO_BESQ] = LDX, - - [MO_LEUW] = LDUH_LE, - [MO_LESW] = LDSH_LE, - [MO_LEUL] = LDUW_LE, - [MO_LESL] = LDSW_LE, - [MO_LEUQ] = LDX_LE, - [MO_LESQ] = LDX_LE, -}; - -static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = STB, - - [MO_BEUW] = STH, - [MO_BEUL] = STW, - [MO_BEUQ] = STX, - - [MO_LEUW] = STH_LE, - [MO_LEUL] = STW_LE, - [MO_LEUQ] = STX_LE, -}; static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, - MemOpIdx oi, bool is_64) + MemOpIdx oi, TCGType data_type) { - MemOp memop = get_memop(oi); - tcg_insn_unit *label_ptr; + static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { + [MO_UB] = LDUB, + [MO_SB] = LDSB, + [MO_UB | MO_LE] = LDUB, + [MO_SB | MO_LE] = LDSB, -#ifdef CONFIG_SOFTMMU - unsigned memi = get_mmuidx(oi); - TCGReg addrz; - const tcg_insn_unit *func; + [MO_BEUW] = LDUH, + [MO_BESW] = LDSH, + [MO_BEUL] = LDUW, + [MO_BESL] = LDSW, + [MO_BEUQ] = LDX, + [MO_BESQ] = LDX, - addrz = tcg_out_tlb_load(s, addr, memi, memop, - offsetof(CPUTLBEntry, addr_read)); + [MO_LEUW] = LDUH_LE, + [MO_LESW] = LDSH_LE, + [MO_LEUL] = LDUW_LE, + [MO_LESL] = LDSW_LE, + [MO_LEUQ] = LDX_LE, + [MO_LESQ] = LDX_LE, + }; - /* The fast path is exactly one insn. Thus we can perform the - entire TLB Hit in the (annulled) delay slot of the branch - over the TLB Miss case. */ + TCGLabelQemuLdst *ldst; + HostAddress h; - /* beq,a,pt %[xi]cc, label0 */ - label_ptr = s->code_ptr; - tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT - | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); - /* delay slot */ - tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, - qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); + ldst = prepare_host_addr(s, &h, addr, oi, true); - /* TLB Miss. */ + tcg_out_ldst_rr(s, data, h.base, h.index, + ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]); - tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz); - - /* We use the helpers to extend SB and SW data, leaving the case - of SL needing explicit extending below. */ - if ((memop & MO_SSIZE) == MO_SL) { - func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)]; - } else { - func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)]; + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - tcg_debug_assert(func != NULL); - tcg_out_call_nodelay(s, func, false); - /* delay slot */ - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi); - - /* We let the helper sign-extend SB and SW, but leave SL for here. */ - if (is_64 && (memop & MO_SSIZE) == MO_SL) { - tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA); - } else { - tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); - } - - *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); -#else - TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); - unsigned a_bits = get_alignment_bits(memop); - unsigned s_bits = memop & MO_SIZE; - unsigned t_bits; - - if (TARGET_LONG_BITS == 32) { - tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); - addr = TCG_REG_T1; - } - - /* - * Normal case: alignment equal to access size. - */ - if (a_bits == s_bits) { - tcg_out_ldst_rr(s, data, addr, index, - qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); - return; - } - - /* - * Test for at least natural alignment, and assume most accesses - * will be aligned -- perform a straight load in the delay slot. - * This is required to preserve atomicity for aligned accesses. - */ - t_bits = MAX(a_bits, s_bits); - tcg_debug_assert(t_bits < 13); - tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); - - /* beq,a,pt %icc, label */ - label_ptr = s->code_ptr; - tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); - /* delay slot */ - tcg_out_ldst_rr(s, data, addr, index, - qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); - - if (a_bits >= s_bits) { - /* - * Overalignment: A successful alignment test will perform the memory - * operation in the delay slot, and failure need only invoke the - * handler for SIGBUS. - */ - tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false); - /* delay slot -- move to low part of argument reg */ - tcg_out_mov_delay(s, TCG_REG_O1, addr); - } else { - /* Underalignment: load by pieces of minimum alignment. */ - int ld_opc, a_size, s_size, i; - - /* - * Force full address into T1 early; avoids problems with - * overlap between @addr and @data. - */ - tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); - - a_size = 1 << a_bits; - s_size = 1 << s_bits; - if ((memop & MO_BSWAP) == MO_BE) { - ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)]; - tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); - ld_opc = qemu_ld_opc[a_bits | MO_BE]; - for (i = a_size; i < s_size; i += a_size) { - tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); - tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX); - tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); - } - } else if (a_bits == 0) { - ld_opc = LDUB; - tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); - for (i = a_size; i < s_size; i += a_size) { - if ((memop & MO_SIGN) && i == s_size - a_size) { - ld_opc = LDSB; - } - tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); - tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); - tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); - } - } else { - ld_opc = qemu_ld_opc[a_bits | MO_LE]; - tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc); - for (i = a_size; i < s_size; i += a_size) { - tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); - if ((memop & MO_SIGN) && i == s_size - a_size) { - ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN]; - } - tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc); - tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); - tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); - } - } - } - - *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); -#endif /* CONFIG_SOFTMMU */ } static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, - MemOpIdx oi) + MemOpIdx oi, TCGType data_type) { - MemOp memop = get_memop(oi); - tcg_insn_unit *label_ptr; + static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = { + [MO_UB] = STB, -#ifdef CONFIG_SOFTMMU - unsigned memi = get_mmuidx(oi); - TCGReg addrz; - const tcg_insn_unit *func; + [MO_BEUW] = STH, + [MO_BEUL] = STW, + [MO_BEUQ] = STX, - addrz = tcg_out_tlb_load(s, addr, memi, memop, - offsetof(CPUTLBEntry, addr_write)); + [MO_LEUW] = STH_LE, + [MO_LEUL] = STW_LE, + [MO_LEUQ] = STX_LE, + }; - /* The fast path is exactly one insn. Thus we can perform the entire - TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */ - /* beq,a,pt %[xi]cc, label0 */ - label_ptr = s->code_ptr; - tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT - | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); - /* delay slot */ - tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, - qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); + TCGLabelQemuLdst *ldst; + HostAddress h; - /* TLB Miss. */ + ldst = prepare_host_addr(s, &h, addr, oi, false); - tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz); - tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data); + tcg_out_ldst_rr(s, data, h.base, h.index, + st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]); - func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)]; - tcg_debug_assert(func != NULL); - tcg_out_call_nodelay(s, func, false); - /* delay slot */ - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi); - - *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); -#else - TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); - unsigned a_bits = get_alignment_bits(memop); - unsigned s_bits = memop & MO_SIZE; - unsigned t_bits; - - if (TARGET_LONG_BITS == 32) { - tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); - addr = TCG_REG_T1; + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - - /* - * Normal case: alignment equal to access size. - */ - if (a_bits == s_bits) { - tcg_out_ldst_rr(s, data, addr, index, - qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); - return; - } - - /* - * Test for at least natural alignment, and assume most accesses - * will be aligned -- perform a straight store in the delay slot. - * This is required to preserve atomicity for aligned accesses. - */ - t_bits = MAX(a_bits, s_bits); - tcg_debug_assert(t_bits < 13); - tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); - - /* beq,a,pt %icc, label */ - label_ptr = s->code_ptr; - tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); - /* delay slot */ - tcg_out_ldst_rr(s, data, addr, index, - qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); - - if (a_bits >= s_bits) { - /* - * Overalignment: A successful alignment test will perform the memory - * operation in the delay slot, and failure need only invoke the - * handler for SIGBUS. - */ - tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false); - /* delay slot -- move to low part of argument reg */ - tcg_out_mov_delay(s, TCG_REG_O1, addr); - } else { - /* Underalignment: store by pieces of minimum alignment. */ - int st_opc, a_size, s_size, i; - - /* - * Force full address into T1 early; avoids problems with - * overlap between @addr and @data. - */ - tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); - - a_size = 1 << a_bits; - s_size = 1 << s_bits; - if ((memop & MO_BSWAP) == MO_BE) { - st_opc = qemu_st_opc[a_bits | MO_BE]; - for (i = 0; i < s_size; i += a_size) { - TCGReg d = data; - int shift = (s_size - a_size - i) * 8; - if (shift) { - d = TCG_REG_T2; - tcg_out_arithi(s, d, data, shift, SHIFT_SRLX); - } - tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc); - } - } else if (a_bits == 0) { - tcg_out_ldst(s, data, TCG_REG_T1, 0, STB); - for (i = 1; i < s_size; i++) { - tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); - tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB); - } - } else { - /* Note that ST*A with immediate asi must use indexed address. */ - st_opc = qemu_st_opc[a_bits + MO_LE]; - tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc); - for (i = a_size; i < s_size; i += a_size) { - tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); - tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); - tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc); - } - } - } - - *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); -#endif /* CONFIG_SOFTMMU */ } static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) { if (check_fit_ptr(a0, 13)) { tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); - tcg_out_movi_imm13(s, TCG_REG_O0, a0); + tcg_out_movi_s13(s, TCG_REG_O0, a0); return; } else { intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0); @@ -1445,12 +1215,12 @@ static void tcg_out_goto_tb(TCGContext *s, int which) { ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which)); - /* Direct branch will be patched by tb_target_set_jmp_target. */ + /* Load link and indirect branch. */ set_jmp_insn_offset(s, which); - tcg_out32(s, CALL); - /* delay slot */ - tcg_debug_assert(check_fit_ptr(off, 13)); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off); + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL); + /* delay slot */ + tcg_out_nop(s); set_jmp_reset_offset(s, which); /* @@ -1469,28 +1239,6 @@ static void tcg_out_goto_tb(TCGContext *s, int which) void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { - uintptr_t addr = tb->jmp_target_addr[n]; - intptr_t br_disp = (intptr_t)(addr - jmp_rx) >> 2; - tcg_insn_unit insn; - - br_disp >>= 2; - if (check_fit_ptr(br_disp, 19)) { - /* ba,pt %icc, addr */ - insn = deposit32(INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A) - | BPCC_ICC | BPCC_PT, 0, 19, br_disp); - } else if (check_fit_ptr(br_disp, 22)) { - /* ba addr */ - insn = deposit32(INSN_OP(0) | INSN_OP2(2) | INSN_COND(COND_A), - 0, 22, br_disp); - } else { - /* The code_gen_buffer can't be larger than 2GB. */ - tcg_debug_assert(check_fit_ptr(br_disp, 30)); - /* call addr */ - insn = deposit32(CALL, 0, 30, br_disp); - } - - qatomic_set((uint32_t *)jmp_rw, insn); - flush_idcache_range(jmp_rx, jmp_rw, 4); } static void tcg_out_op(TCGContext *s, TCGOpcode opc, @@ -1628,15 +1376,21 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); break; - case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, a0, a1, a2, false); + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, a0, a1, a2, true); + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); break; - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, a0, a1, a2); + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); + break; + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); break; case INDEX_op_ld32s_i64: @@ -1669,17 +1423,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_divu_i64: c = ARITH_UDIVX; goto gen_arith; - case INDEX_op_ext_i32_i64: - case INDEX_op_ext32s_i64: - tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA); - break; - case INDEX_op_extu_i32_i64: - case INDEX_op_ext32u_i64: - tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL); - break; - case INDEX_op_extrl_i64_i32: - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - break; case INDEX_op_extrh_i64_i32: tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX); break; @@ -1722,8 +1465,21 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ + case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: default: - tcg_abort(); + g_assert_not_reached(); } } @@ -1755,6 +1511,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_extu_i32_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: return C_O1_I1(r, r); case INDEX_op_st8_i32: @@ -1764,6 +1524,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_st_i32: case INDEX_op_st32_i64: case INDEX_op_st_i64: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: return C_O0_I2(rZ, r); case INDEX_op_add_i32: @@ -1813,13 +1577,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_muluh_i64: return C_O1_I2(r, r, r); - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: - return C_O1_I1(r, s); - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: - return C_O0_I2(sZ, s); - default: g_assert_not_reached(); } @@ -1867,6 +1624,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */ } #define ELF_HOST_MACHINE EM_SPARCV9 diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h index ffe22b1d21..d454278811 100644 --- a/tcg/sparc64/tcg-target.h +++ b/tcg/sparc64/tcg-target.h @@ -26,7 +26,6 @@ #define SPARC_TCG_TARGET_H #define TCG_TARGET_INSN_UNIT_SIZE 4 -#define TCG_TARGET_TLB_DISPLACEMENT_BITS 32 #define TCG_TARGET_NB_REGS 32 #define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) @@ -151,10 +150,12 @@ extern bool use_vis3_instructions; #define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions #define TCG_TARGET_HAS_mulsh_i64 0 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + #define TCG_AREG0 TCG_REG_I0 #define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 1 +#define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS #endif diff --git a/tcg/tcg-common.c b/tcg/tcg-common.c index aa0c4f60c9..35e7616ae9 100644 --- a/tcg/tcg-common.c +++ b/tcg/tcg-common.c @@ -27,7 +27,7 @@ TCGOpDef tcg_op_defs[] = { #define DEF(s, oargs, iargs, cargs, flags) \ - { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags }, + { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags, NULL }, #include "tcg/tcg-opc.h" #undef DEF }; diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h index e542a4e9b7..67b698bd5c 100644 --- a/tcg/tcg-internal.h +++ b/tcg/tcg-internal.h @@ -58,10 +58,6 @@ typedef struct TCGCallArgumentLoc { unsigned tmp_subindex : 2; } TCGCallArgumentLoc; -/* Avoid "unsigned < 0 is always false" Werror, when iarg_regs is empty. */ -#define REG_P(L) \ - ((int)(L)->arg_slot < (int)ARRAY_SIZE(tcg_target_call_iarg_regs)) - typedef struct TCGHelperInfo { void *func; const char *name; @@ -130,4 +126,6 @@ static inline TCGv_i64 TCGV128_HIGH(TCGv_i128 t) return temp_tcgv_i64(tcgv_i128_temp(t) + o); } +bool tcg_target_has_memory_bswap(MemOp memop); + #endif /* TCG_INTERNAL_H */ diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc index 6c6848d034..ffada04af0 100644 --- a/tcg/tcg-ldst.c.inc +++ b/tcg/tcg-ldst.c.inc @@ -20,20 +20,6 @@ * THE SOFTWARE. */ -typedef struct TCGLabelQemuLdst { - bool is_ld; /* qemu_ld: true, qemu_st: false */ - MemOpIdx oi; - TCGType type; /* result type of a load */ - TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ - TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ - TCGReg datalo_reg; /* reg index for low word to be loaded or stored */ - TCGReg datahi_reg; /* reg index for high word to be loaded or stored */ - const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */ - tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */ - QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next; -} TCGLabelQemuLdst; - - /* * Generate TB finalization at the end of block */ @@ -72,6 +58,7 @@ static inline TCGLabelQemuLdst *new_ldst_label(TCGContext *s) { TCGLabelQemuLdst *l = tcg_malloc(sizeof(*l)); + memset(l, 0, sizeof(*l)); QSIMPLEQ_INSERT_TAIL(&s->ldst_labels, l, next); return l; diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c index 079a761b04..f51bcaa87b 100644 --- a/tcg/tcg-op-gvec.c +++ b/tcg/tcg-op-gvec.c @@ -19,9 +19,9 @@ #include "qemu/osdep.h" #include "tcg/tcg.h" +#include "tcg/tcg-temp-internal.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" -#include "qemu/main-loop.h" #include "tcg/tcg-gvec-desc.h" #define MAX_UNROLL 4 @@ -117,8 +117,8 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, TCGv_ptr a0, a1; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -138,8 +138,8 @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c, TCGv_ptr a0, a1; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -158,9 +158,9 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -181,10 +181,10 @@ void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2, a3; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); - a3 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); + a3 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -207,11 +207,11 @@ void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2, a3, a4; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); - a3 = tcg_temp_new_ptr(); - a4 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); + a3 = tcg_temp_ebb_new_ptr(); + a4 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -237,8 +237,8 @@ void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs, TCGv_ptr a0, a1; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -258,9 +258,9 @@ void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -283,10 +283,10 @@ void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2, a3; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); - a3 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); + a3 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -311,11 +311,11 @@ void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2, a3, a4; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); - a3 = tcg_temp_new_ptr(); - a4 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); + a3 = tcg_temp_ebb_new_ptr(); + a4 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -576,16 +576,16 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, be simple enough. */ if (TCG_TARGET_REG_BITS == 64 && (vece != MO_32 || !check_size_impl(oprsz, 4))) { - t_64 = tcg_temp_new_i64(); + t_64 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t_64, in_32); tcg_gen_dup_i64(vece, t_64, t_64); } else { - t_32 = tcg_temp_new_i32(); + t_32 = tcg_temp_ebb_new_i32(); tcg_gen_dup_i32(vece, t_32, in_32); } } else if (in_64) { /* We are given a 64-bit variable input. */ - t_64 = tcg_temp_new_i64(); + t_64 = tcg_temp_ebb_new_i64(); tcg_gen_dup_i64(vece, t_64, in_64); } else { /* We are given a constant input. */ @@ -620,7 +620,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, } /* Otherwise implement out of line. */ - t_ptr = tcg_temp_new_ptr(); + t_ptr = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(t_ptr, cpu_env, dofs); /* @@ -630,13 +630,13 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, * stores through to memset. */ if (oprsz == maxsz && vece == MO_8) { - TCGv_ptr t_size = tcg_const_ptr(oprsz); + TCGv_ptr t_size = tcg_constant_ptr(oprsz); TCGv_i32 t_val; if (in_32) { t_val = in_32; } else if (in_64) { - t_val = tcg_temp_new_i32(); + t_val = tcg_temp_ebb_new_i32(); tcg_gen_extrl_i64_i32(t_val, in_64); } else { t_val = tcg_constant_i32(in_c); @@ -646,7 +646,6 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, if (in_64) { tcg_temp_free_i32(t_val); } - tcg_temp_free_ptr(t_size); tcg_temp_free_ptr(t_ptr); return; } @@ -671,7 +670,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, if (in_32) { fns[vece](t_ptr, t_desc, in_32); } else if (in_64) { - t_32 = tcg_temp_new_i32(); + t_32 = tcg_temp_ebb_new_i32(); tcg_gen_extrl_i64_i32(t_32, in_64); fns[vece](t_ptr, t_desc, t_32); tcg_temp_free_i32(t_32); @@ -1735,7 +1734,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, do_dup_store(type, dofs, oprsz, maxsz, t_vec); tcg_temp_free_vec(t_vec); } else if (vece <= MO_32) { - TCGv_i32 in = tcg_temp_new_i32(); + TCGv_i32 in = tcg_temp_ebb_new_i32(); switch (vece) { case MO_8: tcg_gen_ld8u_i32(in, cpu_env, aofs); @@ -1750,7 +1749,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0); tcg_temp_free_i32(in); } else { - TCGv_i64 in = tcg_temp_new_i64(); + TCGv_i64 in = tcg_temp_ebb_new_i64(); tcg_gen_ld_i64(in, cpu_env, aofs); do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0); tcg_temp_free_i64(in); @@ -1769,8 +1768,8 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, } tcg_temp_free_vec(in); } else { - TCGv_i64 in0 = tcg_temp_new_i64(); - TCGv_i64 in1 = tcg_temp_new_i64(); + TCGv_i64 in0 = tcg_temp_ebb_new_i64(); + TCGv_i64 in1 = tcg_temp_ebb_new_i64(); tcg_gen_ld_i64(in0, cpu_env, aofs); tcg_gen_ld_i64(in1, cpu_env, aofs + 8); @@ -1815,7 +1814,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, int j; for (j = 0; j < 4; ++j) { - in[j] = tcg_temp_new_i64(); + in[j] = tcg_temp_ebb_new_i64(); tcg_gen_ld_i64(in[j], cpu_env, aofs + j * 8); } for (i = (aofs == dofs) * 32; i < oprsz; i += 32) { @@ -1860,9 +1859,9 @@ void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs, the 64-bit operation. */ static void gen_addv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); - TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); + TCGv_i64 t3 = tcg_temp_ebb_new_i64(); tcg_gen_andc_i64(t1, a, m); tcg_gen_andc_i64(t2, b, m); @@ -1885,9 +1884,9 @@ void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80)); - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); - TCGv_i32 t3 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); + TCGv_i32 t3 = tcg_temp_ebb_new_i32(); tcg_gen_andc_i32(t1, a, m); tcg_gen_andc_i32(t2, b, m); @@ -1909,8 +1908,8 @@ void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t1, a, ~0xffff); tcg_gen_add_i32(t2, a, b); @@ -1923,8 +1922,8 @@ void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t1, a, ~0xffffffffull); tcg_gen_add_i64(t2, a, b); @@ -2043,9 +2042,9 @@ void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs, Compare gen_addv_mask above. */ static void gen_subv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); - TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); + TCGv_i64 t3 = tcg_temp_ebb_new_i64(); tcg_gen_or_i64(t1, a, m); tcg_gen_andc_i64(t2, b, m); @@ -2068,9 +2067,9 @@ void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80)); - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); - TCGv_i32 t3 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); + TCGv_i32 t3 = tcg_temp_ebb_new_i32(); tcg_gen_or_i32(t1, a, m); tcg_gen_andc_i32(t2, b, m); @@ -2092,8 +2091,8 @@ void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t1, b, ~0xffff); tcg_gen_sub_i32(t2, a, b); @@ -2106,8 +2105,8 @@ void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t1, b, ~0xffffffffull); tcg_gen_sub_i64(t2, a, b); @@ -2468,8 +2467,8 @@ void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs, Compare gen_subv_mask above. */ static void gen_negv_mask(TCGv_i64 d, TCGv_i64 b, TCGv_i64 m) { - TCGv_i64 t2 = tcg_temp_new_i64(); - TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); + TCGv_i64 t3 = tcg_temp_ebb_new_i64(); tcg_gen_andc_i64(t3, m, b); tcg_gen_andc_i64(t2, b, m); @@ -2494,8 +2493,8 @@ void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 b) void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 b) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t1, b, ~0xffffffffull); tcg_gen_neg_i64(t2, b); @@ -2540,7 +2539,7 @@ void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs, static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); int nbit = 8 << vece; /* Create -1 for each negative element. */ @@ -2749,7 +2748,7 @@ static const GVecGen2s gop_ands = { void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { - TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_ebb_new_i64(); tcg_gen_dup_i64(vece, tmp, c); tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands); tcg_temp_free_i64(tmp); @@ -2762,6 +2761,23 @@ void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs, tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands); } +void tcg_gen_gvec_andcs(unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) +{ + static GVecGen2s g = { + .fni8 = tcg_gen_andc_i64, + .fniv = tcg_gen_andc_vec, + .fno = gen_helper_gvec_andcs, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 + }; + + TCGv_i64 tmp = tcg_temp_ebb_new_i64(); + tcg_gen_dup_i64(vece, tmp, c); + tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g); + tcg_temp_free_i64(tmp); +} + static const GVecGen2s gop_xors = { .fni8 = tcg_gen_xor_i64, .fniv = tcg_gen_xor_vec, @@ -2773,7 +2789,7 @@ static const GVecGen2s gop_xors = { void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { - TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_ebb_new_i64(); tcg_gen_dup_i64(vece, tmp, c); tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors); tcg_temp_free_i64(tmp); @@ -2797,7 +2813,7 @@ static const GVecGen2s gop_ors = { void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { - TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_ebb_new_i64(); tcg_gen_dup_i64(vece, tmp, c); tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors); tcg_temp_free_i64(tmp); @@ -2944,7 +2960,7 @@ void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c) { uint64_t s_mask = dup_const(MO_8, 0x80 >> c); uint64_t c_mask = dup_const(MO_8, 0xff >> c); - TCGv_i64 s = tcg_temp_new_i64(); + TCGv_i64 s = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(d, a, c); tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */ @@ -2958,7 +2974,7 @@ void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c) { uint64_t s_mask = dup_const(MO_16, 0x8000 >> c); uint64_t c_mask = dup_const(MO_16, 0xffff >> c); - TCGv_i64 s = tcg_temp_new_i64(); + TCGv_i64 s = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(d, a, c); tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */ @@ -2972,7 +2988,7 @@ void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c) { uint32_t s_mask = dup_const(MO_8, 0x80 >> c); uint32_t c_mask = dup_const(MO_8, 0xff >> c); - TCGv_i32 s = tcg_temp_new_i32(); + TCGv_i32 s = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(d, a, c); tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */ @@ -2986,7 +3002,7 @@ void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c) { uint32_t s_mask = dup_const(MO_16, 0x8000 >> c); uint32_t c_mask = dup_const(MO_16, 0xffff >> c); - TCGv_i32 s = tcg_temp_new_i32(); + TCGv_i32 s = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(d, a, c); tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */ @@ -3180,7 +3196,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, TCGv_vec v_shift = tcg_temp_new_vec(type); if (vece == MO_64) { - TCGv_i64 sh64 = tcg_temp_new_i64(); + TCGv_i64 sh64 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(sh64, shift); tcg_gen_dup_i64_vec(MO_64, v_shift, sh64); tcg_temp_free_i64(sh64); @@ -3221,14 +3237,14 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, if (vece == MO_32 && check_size_impl(oprsz, 4)) { expand_2s_i32(dofs, aofs, oprsz, shift, false, g->fni4); } else if (vece == MO_64 && check_size_impl(oprsz, 8)) { - TCGv_i64 sh64 = tcg_temp_new_i64(); + TCGv_i64 sh64 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(sh64, shift); expand_2s_i64(dofs, aofs, oprsz, sh64, false, g->fni8); tcg_temp_free_i64(sh64); } else { - TCGv_ptr a0 = tcg_temp_new_ptr(); - TCGv_ptr a1 = tcg_temp_new_ptr(); - TCGv_i32 desc = tcg_temp_new_i32(); + TCGv_ptr a0 = tcg_temp_ebb_new_ptr(); + TCGv_ptr a1 = tcg_temp_ebb_new_ptr(); + TCGv_i32 desc = tcg_temp_ebb_new_i32(); tcg_gen_shli_i32(desc, shift, SIMD_DATA_SHIFT); tcg_gen_ori_i32(desc, desc, simd_desc(oprsz, maxsz, 0)); @@ -3337,6 +3353,17 @@ void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs, do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g); } +void tcg_gen_gvec_rotrs(unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz) +{ + TCGv_i32 tmp = tcg_temp_ebb_new_i32(); + + tcg_gen_neg_i32(tmp, shift); + tcg_gen_andi_i32(tmp, tmp, (8 << vece) - 1); + tcg_gen_gvec_rotls(vece, dofs, aofs, tmp, oprsz, maxsz); + tcg_temp_free_i32(tmp); +} + /* * Expand D = A << (B % element bits) * @@ -3360,7 +3387,7 @@ static void tcg_gen_shlv_mod_vec(unsigned vece, TCGv_vec d, static void tcg_gen_shl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t, b, 31); tcg_gen_shl_i32(d, a, t); @@ -3369,7 +3396,7 @@ static void tcg_gen_shl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_shl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t, b, 63); tcg_gen_shl_i64(d, a, t); @@ -3423,7 +3450,7 @@ static void tcg_gen_shrv_mod_vec(unsigned vece, TCGv_vec d, static void tcg_gen_shr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t, b, 31); tcg_gen_shr_i32(d, a, t); @@ -3432,7 +3459,7 @@ static void tcg_gen_shr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_shr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t, b, 63); tcg_gen_shr_i64(d, a, t); @@ -3486,7 +3513,7 @@ static void tcg_gen_sarv_mod_vec(unsigned vece, TCGv_vec d, static void tcg_gen_sar_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t, b, 31); tcg_gen_sar_i32(d, a, t); @@ -3495,7 +3522,7 @@ static void tcg_gen_sar_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_sar_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t, b, 63); tcg_gen_sar_i64(d, a, t); @@ -3549,7 +3576,7 @@ static void tcg_gen_rotlv_mod_vec(unsigned vece, TCGv_vec d, static void tcg_gen_rotl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t, b, 31); tcg_gen_rotl_i32(d, a, t); @@ -3558,7 +3585,7 @@ static void tcg_gen_rotl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_rotl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t, b, 63); tcg_gen_rotl_i64(d, a, t); @@ -3608,7 +3635,7 @@ static void tcg_gen_rotrv_mod_vec(unsigned vece, TCGv_vec d, static void tcg_gen_rotr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t, b, 31); tcg_gen_rotr_i32(d, a, t); @@ -3617,7 +3644,7 @@ static void tcg_gen_rotr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_rotr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t, b, 63); tcg_gen_rotr_i64(d, a, t); @@ -3658,8 +3685,8 @@ void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs, static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, TCGCond cond) { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); uint32_t i; for (i = 0; i < oprsz; i += 4) { @@ -3676,8 +3703,8 @@ static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, static void expand_cmp_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, TCGCond cond) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); uint32_t i; for (i = 0; i < oprsz; i += 8) { @@ -3823,7 +3850,7 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, static void tcg_gen_bitsel_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_and_i64(t, b, a); tcg_gen_andc_i64(d, c, a); diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c new file mode 100644 index 0000000000..fce21da564 --- /dev/null +++ b/tcg/tcg-op-ldst.c @@ -0,0 +1,1261 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#include "tcg/tcg-temp-internal.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-mo.h" +#include "exec/plugin-gen.h" +#include "tcg-internal.h" + + +static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) +{ + /* Trigger the asserts within as early as possible. */ + unsigned a_bits = get_alignment_bits(op); + + /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */ + if (a_bits == (op & MO_SIZE)) { + op = (op & ~MO_AMASK) | MO_ALIGN; + } + + switch (op & MO_SIZE) { + case MO_8: + op &= ~MO_BSWAP; + break; + case MO_16: + break; + case MO_32: + if (!is64) { + op &= ~MO_SIGN; + } + break; + case MO_64: + if (is64) { + op &= ~MO_SIGN; + break; + } + /* fall through */ + default: + g_assert_not_reached(); + } + if (st) { + op &= ~MO_SIGN; + } + return op; +} + +static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh, + TCGTemp *addr, MemOpIdx oi) +{ + if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) { + if (vh) { + tcg_gen_op4(opc, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi); + } else { + tcg_gen_op3(opc, temp_arg(vl), temp_arg(addr), oi); + } + } else { + /* See TCGV_LOW/HIGH. */ + TCGTemp *al = addr + HOST_BIG_ENDIAN; + TCGTemp *ah = addr + !HOST_BIG_ENDIAN; + + if (vh) { + tcg_gen_op5(opc, temp_arg(vl), temp_arg(vh), + temp_arg(al), temp_arg(ah), oi); + } else { + tcg_gen_op4(opc, temp_arg(vl), temp_arg(al), temp_arg(ah), oi); + } + } +} + +static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi) +{ + if (TCG_TARGET_REG_BITS == 32) { + TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v)); + TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v)); + gen_ldst(opc, vl, vh, addr, oi); + } else { + gen_ldst(opc, tcgv_i64_temp(v), NULL, addr, oi); + } +} + +static void tcg_gen_req_mo(TCGBar type) +{ +#ifdef TCG_GUEST_DEFAULT_MO + type &= TCG_GUEST_DEFAULT_MO; +#endif + type &= ~TCG_TARGET_DEFAULT_MO; + if (type) { + tcg_gen_mb(type | TCG_BAR_SC); + } +} + +/* Only required for loads, where value might overlap addr. */ +static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr) +{ +#ifdef CONFIG_PLUGIN + if (tcg_ctx->plugin_insn != NULL) { + /* Save a copy of the vaddr for use after a load. */ + TCGv_i64 temp = tcg_temp_ebb_new_i64(); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr)); + } else { + tcg_gen_mov_i64(temp, temp_tcgv_i64(addr)); + } + return temp; + } +#endif + return NULL; +} + +static void +plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi, + enum qemu_plugin_mem_rw rw) +{ +#ifdef CONFIG_PLUGIN + if (tcg_ctx->plugin_insn != NULL) { + qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); + + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + if (!copy_addr) { + copy_addr = tcg_temp_ebb_new_i64(); + tcg_gen_extu_i32_i64(copy_addr, temp_tcgv_i32(orig_addr)); + } + plugin_gen_empty_mem_callback(copy_addr, info); + tcg_temp_free_i64(copy_addr); + } else { + if (copy_addr) { + plugin_gen_empty_mem_callback(copy_addr, info); + tcg_temp_free_i64(copy_addr); + } else { + plugin_gen_empty_mem_callback(temp_tcgv_i64(orig_addr), info); + } + } + } +#endif +} + +//// --- Begin LibAFL code --- + +void libafl_gen_read(TCGTemp *addr, MemOpIdx oi); +void libafl_gen_write(TCGTemp *addr, MemOpIdx oi); + +//// --- End LibAFL code --- + +static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, + TCGArg idx, MemOp memop) +{ + MemOp orig_memop; + MemOpIdx orig_oi, oi; + TCGv_i64 copy_addr; + TCGOpcode opc; + + tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0); + orig_oi = oi = make_memop_idx(memop, idx); + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + memop &= ~MO_BSWAP; + /* The bswap primitive benefits from zero-extended input. */ + if ((memop & MO_SSIZE) == MO_SW) { + memop &= ~MO_SIGN; + } + oi = make_memop_idx(memop, idx); + } + + copy_addr = plugin_maybe_preserve_addr(addr); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i32; + } else { + opc = INDEX_op_qemu_ld_a64_i32; + } + gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi); + plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); + +//// --- Begin LibAFL code --- + + libafl_gen_read(addr, oi); + +//// --- End LibAFL code --- + + if ((orig_memop ^ memop) & MO_BSWAP) { + switch (orig_memop & MO_SIZE) { + case MO_16: + tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN + ? TCG_BSWAP_IZ | TCG_BSWAP_OS + : TCG_BSWAP_IZ | TCG_BSWAP_OZ)); + break; + case MO_32: + tcg_gen_bswap32_i32(val, val); + break; + default: + g_assert_not_reached(); + } + } +} + +void tcg_gen_qemu_ld_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_32); + tcg_gen_qemu_ld_i32_int(val, addr, idx, memop); +} + +static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr, + TCGArg idx, MemOp memop) +{ + TCGv_i32 swap = NULL; + MemOpIdx orig_oi, oi; + TCGOpcode opc; + + tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + memop = tcg_canonicalize_memop(memop, 0, 1); + orig_oi = oi = make_memop_idx(memop, idx); + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + swap = tcg_temp_ebb_new_i32(); + switch (memop & MO_SIZE) { + case MO_16: + tcg_gen_bswap16_i32(swap, val, 0); + break; + case MO_32: + tcg_gen_bswap32_i32(swap, val); + break; + default: + g_assert_not_reached(); + } + val = swap; + memop &= ~MO_BSWAP; + oi = make_memop_idx(memop, idx); + } + + if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st8_a32_i32; + } else { + opc = INDEX_op_qemu_st8_a64_i32; + } + } else { + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i32; + } else { + opc = INDEX_op_qemu_st_a64_i32; + } + } + gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi); + plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); + +//// --- Begin LibAFL code --- + + libafl_gen_write(addr, oi); + +//// --- End LibAFL code --- + + if (swap) { + tcg_temp_free_i32(swap); + } +} + +void tcg_gen_qemu_st_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_32); + tcg_gen_qemu_st_i32_int(val, addr, idx, memop); +} + +static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr, + TCGArg idx, MemOp memop) +{ + MemOp orig_memop; + MemOpIdx orig_oi, oi; + TCGv_i64 copy_addr; + TCGOpcode opc; + + if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { + tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop); + if (memop & MO_SIGN) { + tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31); + } else { + tcg_gen_movi_i32(TCGV_HIGH(val), 0); + } + return; + } + + tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + orig_memop = memop = tcg_canonicalize_memop(memop, 1, 0); + orig_oi = oi = make_memop_idx(memop, idx); + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + memop &= ~MO_BSWAP; + /* The bswap primitive benefits from zero-extended input. */ + if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) { + memop &= ~MO_SIGN; + } + oi = make_memop_idx(memop, idx); + } + + copy_addr = plugin_maybe_preserve_addr(addr); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i64; + } else { + opc = INDEX_op_qemu_ld_a64_i64; + } + gen_ldst_i64(opc, val, addr, oi); + plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); + +//// --- Begin LibAFL code --- + + libafl_gen_read(addr, oi); + +//// --- End LibAFL code --- + + if ((orig_memop ^ memop) & MO_BSWAP) { + int flags = (orig_memop & MO_SIGN + ? TCG_BSWAP_IZ | TCG_BSWAP_OS + : TCG_BSWAP_IZ | TCG_BSWAP_OZ); + switch (orig_memop & MO_SIZE) { + case MO_16: + tcg_gen_bswap16_i64(val, val, flags); + break; + case MO_32: + tcg_gen_bswap32_i64(val, val, flags); + break; + case MO_64: + tcg_gen_bswap64_i64(val, val); + break; + default: + g_assert_not_reached(); + } + } +} + +void tcg_gen_qemu_ld_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_64); + tcg_gen_qemu_ld_i64_int(val, addr, idx, memop); +} + +static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr, + TCGArg idx, MemOp memop) +{ + TCGv_i64 swap = NULL; + MemOpIdx orig_oi, oi; + TCGOpcode opc; + + if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { + tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop); + return; + } + + tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + memop = tcg_canonicalize_memop(memop, 1, 1); + orig_oi = oi = make_memop_idx(memop, idx); + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + swap = tcg_temp_ebb_new_i64(); + switch (memop & MO_SIZE) { + case MO_16: + tcg_gen_bswap16_i64(swap, val, 0); + break; + case MO_32: + tcg_gen_bswap32_i64(swap, val, 0); + break; + case MO_64: + tcg_gen_bswap64_i64(swap, val); + break; + default: + g_assert_not_reached(); + } + val = swap; + memop &= ~MO_BSWAP; + oi = make_memop_idx(memop, idx); + } + + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i64; + } else { + opc = INDEX_op_qemu_st_a64_i64; + } + gen_ldst_i64(opc, val, addr, oi); + plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); + +//// --- Begin LibAFL code --- + + libafl_gen_write(addr, oi); + +//// --- End LibAFL code --- + + if (swap) { + tcg_temp_free_i64(swap); + } +} + +void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_64); + tcg_gen_qemu_st_i64_int(val, addr, idx, memop); +} + +/* + * Return true if @mop, without knowledge of the pointer alignment, + * does not require 16-byte atomicity, and it would be adventagous + * to avoid a call to a helper function. + */ +static bool use_two_i64_for_i128(MemOp mop) +{ +#ifdef CONFIG_SOFTMMU + /* Two softmmu tlb lookups is larger than one function call. */ + return false; +#else + /* + * For user-only, two 64-bit operations may well be smaller than a call. + * Determine if that would be legal for the requested atomicity. + */ + switch (mop & MO_ATOM_MASK) { + case MO_ATOM_NONE: + case MO_ATOM_IFALIGN_PAIR: + return true; + case MO_ATOM_IFALIGN: + case MO_ATOM_SUBALIGN: + case MO_ATOM_WITHIN16: + case MO_ATOM_WITHIN16_PAIR: + /* In a serialized context, no atomicity is required. */ + return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL); + default: + g_assert_not_reached(); + } +#endif +} + +static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) +{ + MemOp mop_1 = orig, mop_2; + + /* Reduce the size to 64-bit. */ + mop_1 = (mop_1 & ~MO_SIZE) | MO_64; + + /* Retain the alignment constraints of the original. */ + switch (orig & MO_AMASK) { + case MO_UNALN: + case MO_ALIGN_2: + case MO_ALIGN_4: + mop_2 = mop_1; + break; + case MO_ALIGN_8: + /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */ + mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN; + mop_2 = mop_1; + break; + case MO_ALIGN: + /* Second has 8-byte alignment; first has 16-byte alignment. */ + mop_2 = mop_1; + mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16; + break; + case MO_ALIGN_16: + case MO_ALIGN_32: + case MO_ALIGN_64: + /* Second has 8-byte alignment; first retains original. */ + mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN; + break; + default: + g_assert_not_reached(); + } + + /* Use a memory ordering implemented by the host. */ + if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) { + mop_1 &= ~MO_BSWAP; + mop_2 &= ~MO_BSWAP; + } + + ret[0] = mop_1; + ret[1] = mop_2; +} + +static TCGv_i64 maybe_extend_addr64(TCGTemp *addr) +{ + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + TCGv_i64 a64 = tcg_temp_ebb_new_i64(); + tcg_gen_extu_i32_i64(a64, temp_tcgv_i32(addr)); + return a64; + } + return temp_tcgv_i64(addr); +} + +static void maybe_free_addr64(TCGv_i64 a64) +{ + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + tcg_temp_free_i64(a64); + } +} + +static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, + TCGArg idx, MemOp memop) +{ + const MemOpIdx orig_oi = make_memop_idx(memop, idx); + TCGv_i64 ext_addr = NULL; + TCGOpcode opc; + + tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + + /* TODO: For now, force 32-bit hosts to use the helper. */ + if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { + TCGv_i64 lo, hi; + bool need_bswap = false; + MemOpIdx oi = orig_oi; + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + lo = TCGV128_HIGH(val); + hi = TCGV128_LOW(val); + oi = make_memop_idx(memop & ~MO_BSWAP, idx); + need_bswap = true; + } else { + lo = TCGV128_LOW(val); + hi = TCGV128_HIGH(val); + } + + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i128; + } else { + opc = INDEX_op_qemu_ld_a64_i128; + } + gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi); + + if (need_bswap) { + tcg_gen_bswap64_i64(lo, lo); + tcg_gen_bswap64_i64(hi, hi); + } + } else if (use_two_i64_for_i128(memop)) { + MemOp mop[2]; + TCGTemp *addr_p8; + TCGv_i64 x, y; + bool need_bswap; + + canonicalize_memop_i128_as_i64(mop, memop); + need_bswap = (mop[0] ^ memop) & MO_BSWAP; + + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i64; + } else { + opc = INDEX_op_qemu_ld_a64_i64; + } + + /* + * Since there are no global TCGv_i128, there is no visible state + * changed if the second load faults. Load directly into the two + * subwords. + */ + if ((memop & MO_BSWAP) == MO_LE) { + x = TCGV128_LOW(val); + y = TCGV128_HIGH(val); + } else { + x = TCGV128_HIGH(val); + y = TCGV128_LOW(val); + } + + gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx)); + + if (need_bswap) { + tcg_gen_bswap64_i64(x, x); + } + + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + TCGv_i32 t = tcg_temp_ebb_new_i32(); + tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8); + addr_p8 = tcgv_i32_temp(t); + } else { + TCGv_i64 t = tcg_temp_ebb_new_i64(); + tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8); + addr_p8 = tcgv_i64_temp(t); + } + + gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx)); + tcg_temp_free_internal(addr_p8); + + if (need_bswap) { + tcg_gen_bswap64_i64(y, y); + } + } else { + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + ext_addr = tcg_temp_ebb_new_i64(); + tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr)); + addr = tcgv_i64_temp(ext_addr); + } + gen_helper_ld_i128(val, cpu_env, temp_tcgv_i64(addr), + tcg_constant_i32(orig_oi)); + } + + plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); +} + +void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) == MO_128); + tcg_debug_assert((memop & MO_SIGN) == 0); + tcg_gen_qemu_ld_i128_int(val, addr, idx, memop); +} + +static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, + TCGArg idx, MemOp memop) +{ + const MemOpIdx orig_oi = make_memop_idx(memop, idx); + TCGv_i64 ext_addr = NULL; + TCGOpcode opc; + + tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); + + /* TODO: For now, force 32-bit hosts to use the helper. */ + + if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { + TCGv_i64 lo, hi; + MemOpIdx oi = orig_oi; + bool need_bswap = false; + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + lo = tcg_temp_ebb_new_i64(); + hi = tcg_temp_ebb_new_i64(); + tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val)); + tcg_gen_bswap64_i64(hi, TCGV128_LOW(val)); + oi = make_memop_idx(memop & ~MO_BSWAP, idx); + need_bswap = true; + } else { + lo = TCGV128_LOW(val); + hi = TCGV128_HIGH(val); + } + + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i128; + } else { + opc = INDEX_op_qemu_st_a64_i128; + } + gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi); + + if (need_bswap) { + tcg_temp_free_i64(lo); + tcg_temp_free_i64(hi); + } + } else if (use_two_i64_for_i128(memop)) { + MemOp mop[2]; + TCGTemp *addr_p8; + TCGv_i64 x, y, b = NULL; + + canonicalize_memop_i128_as_i64(mop, memop); + + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i64; + } else { + opc = INDEX_op_qemu_st_a64_i64; + } + + if ((memop & MO_BSWAP) == MO_LE) { + x = TCGV128_LOW(val); + y = TCGV128_HIGH(val); + } else { + x = TCGV128_HIGH(val); + y = TCGV128_LOW(val); + } + + if ((mop[0] ^ memop) & MO_BSWAP) { + b = tcg_temp_ebb_new_i64(); + tcg_gen_bswap64_i64(b, x); + x = b; + } + + gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx)); + + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + TCGv_i32 t = tcg_temp_ebb_new_i32(); + tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8); + addr_p8 = tcgv_i32_temp(t); + } else { + TCGv_i64 t = tcg_temp_ebb_new_i64(); + tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8); + addr_p8 = tcgv_i64_temp(t); + } + + if (b) { + tcg_gen_bswap64_i64(b, y); + gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx)); + tcg_temp_free_i64(b); + } else { + gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx)); + } + tcg_temp_free_internal(addr_p8); + } else { + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + ext_addr = tcg_temp_ebb_new_i64(); + tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr)); + addr = tcgv_i64_temp(ext_addr); + } + gen_helper_st_i128(cpu_env, temp_tcgv_i64(addr), val, + tcg_constant_i32(orig_oi)); + } + + plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_W); +} + +void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) == MO_128); + tcg_debug_assert((memop & MO_SIGN) == 0); + tcg_gen_qemu_st_i128_int(val, addr, idx, memop); +} + +static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) +{ + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_gen_ext8s_i32(ret, val); + break; + case MO_UB: + tcg_gen_ext8u_i32(ret, val); + break; + case MO_SW: + tcg_gen_ext16s_i32(ret, val); + break; + case MO_UW: + tcg_gen_ext16u_i32(ret, val); + break; + default: + tcg_gen_mov_i32(ret, val); + break; + } +} + +static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) +{ + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_gen_ext8s_i64(ret, val); + break; + case MO_UB: + tcg_gen_ext8u_i64(ret, val); + break; + case MO_SW: + tcg_gen_ext16s_i64(ret, val); + break; + case MO_UW: + tcg_gen_ext16u_i64(ret, val); + break; + case MO_SL: + tcg_gen_ext32s_i64(ret, val); + break; + case MO_UL: + tcg_gen_ext32u_i64(ret, val); + break; + default: + tcg_gen_mov_i64(ret, val); + break; + } +} + +typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv_i64, + TCGv_i32, TCGv_i32, TCGv_i32); +typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv_i64, + TCGv_i64, TCGv_i64, TCGv_i32); +typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv_i64, + TCGv_i128, TCGv_i128, TCGv_i32); +typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64, + TCGv_i32, TCGv_i32); +typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64, + TCGv_i64, TCGv_i32); + +#ifdef CONFIG_ATOMIC64 +# define WITH_ATOMIC64(X) X, +#else +# define WITH_ATOMIC64(X) +#endif +#ifdef CONFIG_CMPXCHG128 +# define WITH_ATOMIC128(X) X, +#else +# define WITH_ATOMIC128(X) +#endif + +static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = { + [MO_8] = gen_helper_atomic_cmpxchgb, + [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le, + [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be, + [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le, + [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be, + WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le) + WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be) + WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le) + WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be) +}; + +static void tcg_gen_nonatomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr, + TCGv_i32 cmpv, TCGv_i32 newv, + TCGArg idx, MemOp memop) +{ + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); + + tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); + + tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop & ~MO_SIGN); + tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1); + tcg_gen_qemu_st_i32_int(t2, addr, idx, memop); + tcg_temp_free_i32(t2); + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(retv, t1, memop); + } else { + tcg_gen_mov_i32(retv, t1); + } + tcg_temp_free_i32(t1); +} + +void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr, + TCGv_i32 cmpv, TCGv_i32 newv, + TCGArg idx, MemOp memop, + TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_32); + tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop); +} + +static void tcg_gen_atomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr, + TCGv_i32 cmpv, TCGv_i32 newv, + TCGArg idx, MemOp memop) +{ + gen_atomic_cx_i32 gen; + TCGv_i64 a64; + MemOpIdx oi; + + if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { + tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop); + return; + } + + memop = tcg_canonicalize_memop(memop, 0, 0); + gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; + tcg_debug_assert(gen != NULL); + + oi = make_memop_idx(memop & ~MO_SIGN, idx); + a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(retv, retv, memop); + } +} + +void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr, + TCGv_i32 cmpv, TCGv_i32 newv, + TCGArg idx, MemOp memop, + TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_32); + tcg_gen_atomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop); +} + +static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr, + TCGv_i64 cmpv, TCGv_i64 newv, + TCGArg idx, MemOp memop) +{ + TCGv_i64 t1, t2; + + if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { + tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), + TCGV_LOW(newv), idx, memop); + if (memop & MO_SIGN) { + tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); + } else { + tcg_gen_movi_i32(TCGV_HIGH(retv), 0); + } + return; + } + + t1 = tcg_temp_ebb_new_i64(); + t2 = tcg_temp_ebb_new_i64(); + + tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); + + tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop & ~MO_SIGN); + tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1); + tcg_gen_qemu_st_i64_int(t2, addr, idx, memop); + tcg_temp_free_i64(t2); + + if (memop & MO_SIGN) { + tcg_gen_ext_i64(retv, t1, memop); + } else { + tcg_gen_mov_i64(retv, t1); + } + tcg_temp_free_i64(t1); +} + +void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr, + TCGv_i64 cmpv, TCGv_i64 newv, + TCGArg idx, MemOp memop, + TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_64); + tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop); +} + +static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr, + TCGv_i64 cmpv, TCGv_i64 newv, + TCGArg idx, MemOp memop) +{ + if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { + tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop); + return; + } + + if ((memop & MO_SIZE) == MO_64) { + gen_atomic_cx_i64 gen; + + memop = tcg_canonicalize_memop(memop, 1, 0); + gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; + if (gen) { + MemOpIdx oi = make_memop_idx(memop, idx); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); + return; + } + + gen_helper_exit_atomic(cpu_env); + + /* + * Produce a result for a well-formed opcode stream. This satisfies + * liveness for set before used, which happens before this dead code + * is removed. + */ + tcg_gen_movi_i64(retv, 0); + return; + } + + if (TCG_TARGET_REG_BITS == 32) { + tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), + TCGV_LOW(newv), idx, memop); + if (memop & MO_SIGN) { + tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); + } else { + tcg_gen_movi_i32(TCGV_HIGH(retv), 0); + } + } else { + TCGv_i32 c32 = tcg_temp_ebb_new_i32(); + TCGv_i32 n32 = tcg_temp_ebb_new_i32(); + TCGv_i32 r32 = tcg_temp_ebb_new_i32(); + + tcg_gen_extrl_i64_i32(c32, cmpv); + tcg_gen_extrl_i64_i32(n32, newv); + tcg_gen_atomic_cmpxchg_i32_int(r32, addr, c32, n32, + idx, memop & ~MO_SIGN); + tcg_temp_free_i32(c32); + tcg_temp_free_i32(n32); + + tcg_gen_extu_i32_i64(retv, r32); + tcg_temp_free_i32(r32); + + if (memop & MO_SIGN) { + tcg_gen_ext_i64(retv, retv, memop); + } + } +} + +void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr, + TCGv_i64 cmpv, TCGv_i64 newv, + TCGArg idx, MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_64); + tcg_gen_atomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop); +} + +static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr, + TCGv_i128 cmpv, TCGv_i128 newv, + TCGArg idx, MemOp memop) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* Inline expansion below is simply too large for 32-bit hosts. */ + MemOpIdx oi = make_memop_idx(memop, idx); + TCGv_i64 a64 = maybe_extend_addr64(addr); + + gen_helper_nonatomic_cmpxchgo(retv, cpu_env, a64, cmpv, newv, + tcg_constant_i32(oi)); + maybe_free_addr64(a64); + } else { + TCGv_i128 oldv = tcg_temp_ebb_new_i128(); + TCGv_i128 tmpv = tcg_temp_ebb_new_i128(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 z = tcg_constant_i64(0); + + tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop); + + /* Compare i128 */ + tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv)); + tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv)); + tcg_gen_or_i64(t0, t0, t1); + + /* tmpv = equal ? newv : oldv */ + tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z, + TCGV128_LOW(newv), TCGV128_LOW(oldv)); + tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z, + TCGV128_HIGH(newv), TCGV128_HIGH(oldv)); + + /* Unconditional writeback. */ + tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop); + tcg_gen_mov_i128(retv, oldv); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i128(tmpv); + tcg_temp_free_i128(oldv); + } +} + +void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr, + TCGv_i128 cmpv, TCGv_i128 newv, + TCGArg idx, MemOp memop, + TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128); + tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop); +} + +static void tcg_gen_atomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr, + TCGv_i128 cmpv, TCGv_i128 newv, + TCGArg idx, MemOp memop) +{ + gen_atomic_cx_i128 gen; + + if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { + tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop); + return; + } + + gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; + if (gen) { + MemOpIdx oi = make_memop_idx(memop, idx); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); + return; + } + + gen_helper_exit_atomic(cpu_env); + + /* + * Produce a result for a well-formed opcode stream. This satisfies + * liveness for set before used, which happens before this dead code + * is removed. + */ + tcg_gen_movi_i64(TCGV128_LOW(retv), 0); + tcg_gen_movi_i64(TCGV128_HIGH(retv), 0); +} + +void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr, + TCGv_i128 cmpv, TCGv_i128 newv, + TCGArg idx, MemOp memop, + TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128); + tcg_gen_atomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop); +} + +static void do_nonatomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val, + TCGArg idx, MemOp memop, bool new_val, + void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); + + memop = tcg_canonicalize_memop(memop, 0, 0); + + tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop); + tcg_gen_ext_i32(t2, val, memop); + gen(t2, t1, t2); + tcg_gen_qemu_st_i32_int(t2, addr, idx, memop); + + tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop); + tcg_temp_free_i32(t1); + tcg_temp_free_i32(t2); +} + +static void do_atomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val, + TCGArg idx, MemOp memop, void * const table[]) +{ + gen_atomic_op_i32 gen; + TCGv_i64 a64; + MemOpIdx oi; + + memop = tcg_canonicalize_memop(memop, 0, 0); + + gen = table[memop & (MO_SIZE | MO_BSWAP)]; + tcg_debug_assert(gen != NULL); + + oi = make_memop_idx(memop & ~MO_SIGN, idx); + a64 = maybe_extend_addr64(addr); + gen(ret, cpu_env, a64, val, tcg_constant_i32(oi)); + maybe_free_addr64(a64); + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(ret, ret, memop); + } +} + +static void do_nonatomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val, + TCGArg idx, MemOp memop, bool new_val, + void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); + + memop = tcg_canonicalize_memop(memop, 1, 0); + + tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop); + tcg_gen_ext_i64(t2, val, memop); + gen(t2, t1, t2); + tcg_gen_qemu_st_i64_int(t2, addr, idx, memop); + + tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); +} + +static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val, + TCGArg idx, MemOp memop, void * const table[]) +{ + memop = tcg_canonicalize_memop(memop, 1, 0); + + if ((memop & MO_SIZE) == MO_64) { + gen_atomic_op_i64 gen = table[memop & (MO_SIZE | MO_BSWAP)]; + + if (gen) { + MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen(ret, cpu_env, a64, val, tcg_constant_i32(oi)); + maybe_free_addr64(a64); + return; + } + + gen_helper_exit_atomic(cpu_env); + /* Produce a result, so that we have a well-formed opcode stream + with respect to uses of the result in the (dead) code following. */ + tcg_gen_movi_i64(ret, 0); + } else { + TCGv_i32 v32 = tcg_temp_ebb_new_i32(); + TCGv_i32 r32 = tcg_temp_ebb_new_i32(); + + tcg_gen_extrl_i64_i32(v32, val); + do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table); + tcg_temp_free_i32(v32); + + tcg_gen_extu_i32_i64(ret, r32); + tcg_temp_free_i32(r32); + + if (memop & MO_SIGN) { + tcg_gen_ext_i64(ret, ret, memop); + } + } +} + +#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ +static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \ + [MO_8] = gen_helper_atomic_##NAME##b, \ + [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ + [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ + [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \ + [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \ + WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \ + WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ +}; \ +void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr, \ + TCGv_i32 val, TCGArg idx, \ + MemOp memop, TCGType addr_type) \ +{ \ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); \ + tcg_debug_assert((memop & MO_SIZE) <= MO_32); \ + if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ + do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ + } else { \ + do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \ + tcg_gen_##OP##_i32); \ + } \ +} \ +void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr, \ + TCGv_i64 val, TCGArg idx, \ + MemOp memop, TCGType addr_type) \ +{ \ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); \ + tcg_debug_assert((memop & MO_SIZE) <= MO_64); \ + if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ + do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ + } else { \ + do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \ + tcg_gen_##OP##_i64); \ + } \ +} + +GEN_ATOMIC_HELPER(fetch_add, add, 0) +GEN_ATOMIC_HELPER(fetch_and, and, 0) +GEN_ATOMIC_HELPER(fetch_or, or, 0) +GEN_ATOMIC_HELPER(fetch_xor, xor, 0) +GEN_ATOMIC_HELPER(fetch_smin, smin, 0) +GEN_ATOMIC_HELPER(fetch_umin, umin, 0) +GEN_ATOMIC_HELPER(fetch_smax, smax, 0) +GEN_ATOMIC_HELPER(fetch_umax, umax, 0) + +GEN_ATOMIC_HELPER(add_fetch, add, 1) +GEN_ATOMIC_HELPER(and_fetch, and, 1) +GEN_ATOMIC_HELPER(or_fetch, or, 1) +GEN_ATOMIC_HELPER(xor_fetch, xor, 1) +GEN_ATOMIC_HELPER(smin_fetch, smin, 1) +GEN_ATOMIC_HELPER(umin_fetch, umin, 1) +GEN_ATOMIC_HELPER(smax_fetch, smax, 1) +GEN_ATOMIC_HELPER(umax_fetch, umax, 1) + +static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_mov_i32(r, b); +} + +static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_mov_i64(r, b); +} + +GEN_ATOMIC_HELPER(xchg, mov2, 0) + +#undef GEN_ATOMIC_HELPER diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c index 966d41d65a..aeeb2435cb 100644 --- a/tcg/tcg-op-vec.c +++ b/tcg/tcg-op-vec.c @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "tcg/tcg.h" +#include "tcg/tcg-temp-internal.h" #include "tcg/tcg-op.h" #include "tcg/tcg-mo.h" #include "tcg-internal.h" @@ -228,32 +229,6 @@ void tcg_gen_mov_vec(TCGv_vec r, TCGv_vec a) } } -TCGv_vec tcg_const_zeros_vec(TCGType type) -{ - TCGv_vec ret = tcg_temp_new_vec(type); - tcg_gen_dupi_vec(MO_64, ret, 0); - return ret; -} - -TCGv_vec tcg_const_ones_vec(TCGType type) -{ - TCGv_vec ret = tcg_temp_new_vec(type); - tcg_gen_dupi_vec(MO_64, ret, -1); - return ret; -} - -TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec m) -{ - TCGTemp *t = tcgv_vec_temp(m); - return tcg_const_zeros_vec(t->base_type); -} - -TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m) -{ - TCGTemp *t = tcgv_vec_temp(m); - return tcg_const_ones_vec(t->base_type); -} - void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a) { TCGTemp *rt = tcgv_vec_temp(r); @@ -430,9 +405,7 @@ void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a) const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) { - TCGv_vec t = tcg_const_ones_vec_matching(r); - tcg_gen_xor_vec(0, r, a, t); - tcg_temp_free_vec(t); + tcg_gen_xor_vec(0, r, a, tcg_constant_vec_matching(r, 0, -1)); } tcg_swap_vecop_list(hold_list); } @@ -445,9 +418,7 @@ void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a) hold_list = tcg_swap_vecop_list(NULL); if (!TCG_TARGET_HAS_neg_vec || !do_op2(vece, r, a, INDEX_op_neg_vec)) { - TCGv_vec t = tcg_const_zeros_vec_matching(r); - tcg_gen_sub_vec(vece, r, t, a); - tcg_temp_free_vec(t); + tcg_gen_sub_vec(vece, r, tcg_constant_vec_matching(r, vece, 0), a); } tcg_swap_vecop_list(hold_list); } diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 6a274392f8..edbd1c61d7 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -25,8 +25,8 @@ #include "qemu/osdep.h" #include "exec/exec-all.h" #include "tcg/tcg.h" +#include "tcg/tcg-temp-internal.h" #include "tcg/tcg-op.h" -#include "tcg/tcg-mo.h" #include "exec/plugin-gen.h" #include "tcg-internal.h" @@ -84,6 +84,22 @@ void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, op->args[5] = a6; } +/* Generic ops. */ + +static void add_last_as_label_use(TCGLabel *l) +{ + TCGLabelUse *u = tcg_malloc(sizeof(TCGLabelUse)); + + u->op = tcg_last_op(); + QSIMPLEQ_INSERT_TAIL(&l->branches, u, next); +} + +void tcg_gen_br(TCGLabel *l) +{ + tcg_gen_op1(INDEX_op_br, label_arg(l)); + add_last_as_label_use(l); +} + void tcg_gen_mb(TCGBar mb_type) { if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { @@ -216,8 +232,8 @@ void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l) if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { - l->refs++; tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l)); + add_last_as_label_use(l); } } @@ -264,7 +280,7 @@ void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_div_i32) { tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t0, arg1, 31); tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2); tcg_temp_free_i32(t0); @@ -278,13 +294,13 @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_rem_i32) { tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2); tcg_gen_mul_i32(t0, t0, arg2); tcg_gen_sub_i32(ret, arg1, t0); tcg_temp_free_i32(t0); } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t0, arg1, 31); tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2); tcg_temp_free_i32(t0); @@ -298,7 +314,7 @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_div_i32) { tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_movi_i32(t0, 0); tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2); tcg_temp_free_i32(t0); @@ -312,13 +328,13 @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_rem_i32) { tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2); tcg_gen_mul_i32(t0, t0, arg2); tcg_gen_sub_i32(ret, arg1, t0); tcg_temp_free_i32(t0); } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_movi_i32(t0, 0); tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2); tcg_temp_free_i32(t0); @@ -332,7 +348,7 @@ void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_andc_i32) { tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_not_i32(t0, arg2); tcg_gen_and_i32(ret, arg1, t0); tcg_temp_free_i32(t0); @@ -374,7 +390,7 @@ void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_orc_i32) { tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_not_i32(t0, arg2); tcg_gen_or_i32(ret, arg1, t0); tcg_temp_free_i32(t0); @@ -386,8 +402,8 @@ void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_clz_i32) { tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_clz_i64) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t1, arg1); tcg_gen_extu_i32_i64(t2, arg2); tcg_gen_addi_i64(t2, t2, 32); @@ -411,8 +427,8 @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_ctz_i32) { tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_ctz_i64) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t1, arg1); tcg_gen_extu_i32_i64(t2, arg2); tcg_gen_ctz_i64(t1, t1, t2); @@ -423,7 +439,7 @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) || TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i32 || TCG_TARGET_HAS_clz_i64) { - TCGv_i32 z, t = tcg_temp_new_i32(); + TCGv_i32 z, t = tcg_temp_ebb_new_i32(); if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) { tcg_gen_subi_i32(t, arg1, 1); @@ -448,7 +464,7 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) { if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) { /* This equivalence has the advantage of not requiring a fixup. */ - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_subi_i32(t, arg1, 1); tcg_gen_andc_i32(t, t, arg1); tcg_gen_ctpop_i32(ret, t); @@ -461,7 +477,7 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_clz_i32) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t, arg, 31); tcg_gen_xor_i32(t, t, arg); tcg_gen_clzi_i32(t, t, 32); @@ -477,7 +493,7 @@ void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1) if (TCG_TARGET_HAS_ctpop_i32) { tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1); } else if (TCG_TARGET_HAS_ctpop_i64) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t, arg1); tcg_gen_ctpop_i64(t, t); tcg_gen_extrl_i64_i32(ret, t); @@ -494,8 +510,8 @@ void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) } else { TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(); - t1 = tcg_temp_new_i32(); + t0 = tcg_temp_ebb_new_i32(); + t1 = tcg_temp_ebb_new_i32(); tcg_gen_shl_i32(t0, arg1, arg2); tcg_gen_subfi_i32(t1, 32, arg2); tcg_gen_shr_i32(t1, arg1, t1); @@ -515,8 +531,8 @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) tcg_gen_rotl_i32(ret, arg1, tcg_constant_i32(arg2)); } else { TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(); - t1 = tcg_temp_new_i32(); + t0 = tcg_temp_ebb_new_i32(); + t1 = tcg_temp_ebb_new_i32(); tcg_gen_shli_i32(t0, arg1, arg2); tcg_gen_shri_i32(t1, arg1, 32 - arg2); tcg_gen_or_i32(ret, t0, t1); @@ -532,8 +548,8 @@ void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) } else { TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(); - t1 = tcg_temp_new_i32(); + t0 = tcg_temp_ebb_new_i32(); + t1 = tcg_temp_ebb_new_i32(); tcg_gen_shr_i32(t0, arg1, arg2); tcg_gen_subfi_i32(t1, 32, arg2); tcg_gen_shl_i32(t1, arg1, t1); @@ -574,7 +590,7 @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, return; } - t1 = tcg_temp_new_i32(); + t1 = tcg_temp_ebb_new_i32(); if (TCG_TARGET_HAS_extract2_i32) { if (ofs + len == 32) { @@ -801,7 +817,7 @@ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah, } else if (TCG_TARGET_HAS_extract2_i32) { tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(t0, al, ofs); tcg_gen_deposit_i32(ret, t0, ah, 32 - ofs, ofs); tcg_temp_free_i32(t0); @@ -818,8 +834,8 @@ void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, } else if (TCG_TARGET_HAS_movcond_i32) { tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_setcond_i32(cond, t0, c1, c2); tcg_gen_neg_i32(t0, t0); tcg_gen_and_i32(t1, v1, t0); @@ -836,8 +852,8 @@ void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, if (TCG_TARGET_HAS_add2_i32) { tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_concat_i32_i64(t0, al, ah); tcg_gen_concat_i32_i64(t1, bl, bh); tcg_gen_add_i64(t0, t0, t1); @@ -853,8 +869,8 @@ void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, if (TCG_TARGET_HAS_sub2_i32) { tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_concat_i32_i64(t0, al, ah); tcg_gen_concat_i32_i64(t1, bl, bh); tcg_gen_sub_i64(t0, t0, t1); @@ -869,14 +885,14 @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_mulu2_i32) { tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_muluh_i32) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2); tcg_gen_mov_i32(rl, t); tcg_temp_free_i32(t); } else if (TCG_TARGET_REG_BITS == 64) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t0, arg1); tcg_gen_extu_i32_i64(t1, arg2); tcg_gen_mul_i64(t0, t0, t1); @@ -893,16 +909,16 @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_muls2_i32) { tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_mulsh_i32) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2); tcg_gen_mov_i32(rl, t); tcg_temp_free_i32(t); } else if (TCG_TARGET_REG_BITS == 32) { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); - TCGv_i32 t3 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); + TCGv_i32 t3 = tcg_temp_ebb_new_i32(); tcg_gen_mulu2_i32(t0, t1, arg1, arg2); /* Adjust for negative inputs. */ tcg_gen_sari_i32(t2, arg1, 31); @@ -917,8 +933,8 @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) tcg_temp_free_i32(t2); tcg_temp_free_i32(t3); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_ext_i32_i64(t0, arg1); tcg_gen_ext_i32_i64(t1, arg2); tcg_gen_mul_i64(t0, t0, t1); @@ -931,9 +947,9 @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_REG_BITS == 32) { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); tcg_gen_mulu2_i32(t0, t1, arg1, arg2); /* Adjust for negative input for the signed arg1. */ tcg_gen_sari_i32(t2, arg1, 31); @@ -944,8 +960,8 @@ void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_ext_i32_i64(t0, arg1); tcg_gen_extu_i32_i64(t1, arg2); tcg_gen_mul_i64(t0, t0, t1); @@ -1001,8 +1017,8 @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags) if (TCG_TARGET_HAS_bswap16_i32) { tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(t0, arg, 8); if (!(flags & TCG_BSWAP_IZ)) { @@ -1030,8 +1046,8 @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg) if (TCG_TARGET_HAS_bswap32_i32) { tcg_gen_op3i_i32(INDEX_op_bswap32_i32, ret, arg, 0); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); TCGv_i32 t2 = tcg_constant_i32(0x00ff00ff); /* arg = abcd */ @@ -1078,7 +1094,7 @@ void tcg_gen_umax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t, a, 31); tcg_gen_xor_i32(ret, a, t); @@ -1241,8 +1257,8 @@ void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) TCGv_i64 t0; TCGv_i32 t1; - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i32(); + t0 = tcg_temp_ebb_new_i64(); + t1 = tcg_temp_ebb_new_i32(); tcg_gen_mulu2_i32(TCGV_LOW(t0), TCGV_HIGH(t0), TCGV_LOW(arg1), TCGV_LOW(arg2)); @@ -1423,7 +1439,7 @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1, tcg_gen_extract2_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c); tcg_gen_deposit_i32(TCGV_HIGH(ret), t0, TCGV_HIGH(arg1), c, 32 - c); @@ -1474,7 +1490,6 @@ void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l) if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { - l->refs++; if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), @@ -1483,6 +1498,7 @@ void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l) tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, label_arg(l)); } + add_last_as_label_use(l); } } @@ -1493,12 +1509,12 @@ void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l) } else if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { - l->refs++; tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1), TCGV_HIGH(arg1), tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32), cond, label_arg(l)); + add_last_as_label_use(l); } } @@ -1546,9 +1562,7 @@ void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) } else if (is_power_of_2(arg2)) { tcg_gen_shli_i64(ret, arg1, ctz64(arg2)); } else { - TCGv_i64 t0 = tcg_const_i64(arg2); - tcg_gen_mul_i64(ret, arg1, t0); - tcg_temp_free_i64(t0); + tcg_gen_mul_i64(ret, arg1, tcg_constant_i64(arg2)); } } @@ -1557,7 +1571,7 @@ void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_div_i64) { tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t0, arg1, 63); tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); tcg_temp_free_i64(t0); @@ -1571,13 +1585,13 @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_rem_i64) { tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2); tcg_gen_mul_i64(t0, t0, arg2); tcg_gen_sub_i64(ret, arg1, t0); tcg_temp_free_i64(t0); } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t0, arg1, 63); tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); tcg_temp_free_i64(t0); @@ -1591,7 +1605,7 @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_div_i64) { tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_movi_i64(t0, 0); tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); tcg_temp_free_i64(t0); @@ -1605,13 +1619,13 @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_rem_i64) { tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2); tcg_gen_mul_i64(t0, t0, arg2); tcg_gen_sub_i64(ret, arg1, t0); tcg_temp_free_i64(t0); } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_movi_i64(t0, 0); tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); tcg_temp_free_i64(t0); @@ -1710,8 +1724,8 @@ void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags) } else if (TCG_TARGET_HAS_bswap16_i64) { tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(t0, arg, 8); if (!(flags & TCG_BSWAP_IZ)) { @@ -1749,8 +1763,8 @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags) } else if (TCG_TARGET_HAS_bswap32_i64) { tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg, flags); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_constant_i64(0x00ff00ff); /* arg = xxxxabcd */ @@ -1778,8 +1792,8 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(); - t1 = tcg_temp_new_i32(); + t0 = tcg_temp_ebb_new_i32(); + t1 = tcg_temp_ebb_new_i32(); tcg_gen_bswap32_i32(t0, TCGV_LOW(arg)); tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg)); @@ -1790,9 +1804,9 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) } else if (TCG_TARGET_HAS_bswap64_i64) { tcg_gen_op3i_i64(INDEX_op_bswap64_i64, ret, arg, 0); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); /* arg = abcdefgh */ tcg_gen_movi_i64(t2, 0x00ff00ff00ff00ffull); @@ -1822,8 +1836,8 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg) { uint64_t m = 0x0000ffff0000ffffull; - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); /* See include/qemu/bitops.h, hswap64. */ tcg_gen_rotli_i64(t1, arg, 32); @@ -1863,7 +1877,7 @@ void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) } else if (TCG_TARGET_HAS_andc_i64) { tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_not_i64(t0, arg2); tcg_gen_and_i64(ret, arg1, t0); tcg_temp_free_i64(t0); @@ -1917,7 +1931,7 @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) } else if (TCG_TARGET_HAS_orc_i64) { tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_not_i64(t0, arg2); tcg_gen_or_i64(ret, arg1, t0); tcg_temp_free_i64(t0); @@ -1938,16 +1952,14 @@ void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_clz_i32 && arg2 <= 0xffffffffu) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32); tcg_gen_addi_i32(t, t, 32); tcg_gen_clz_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), t); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); tcg_temp_free_i32(t); } else { - TCGv_i64 t0 = tcg_const_i64(arg2); - tcg_gen_clz_i64(ret, arg1, t0); - tcg_temp_free_i64(t0); + tcg_gen_clz_i64(ret, arg1, tcg_constant_i64(arg2)); } } @@ -1956,7 +1968,7 @@ void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_ctz_i64) { tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) { - TCGv_i64 z, t = tcg_temp_new_i64(); + TCGv_i64 z, t = tcg_temp_ebb_new_i64(); if (TCG_TARGET_HAS_ctpop_i64) { tcg_gen_subi_i64(t, arg1, 1); @@ -1983,7 +1995,7 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctz_i32 && arg2 <= 0xffffffffu) { - TCGv_i32 t32 = tcg_temp_new_i32(); + TCGv_i32 t32 = tcg_temp_ebb_new_i32(); tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32); tcg_gen_addi_i32(t32, t32, 32); tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32); @@ -1993,22 +2005,20 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) && TCG_TARGET_HAS_ctpop_i64 && arg2 == 64) { /* This equivalence has the advantage of not requiring a fixup. */ - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_subi_i64(t, arg1, 1); tcg_gen_andc_i64(t, t, arg1); tcg_gen_ctpop_i64(ret, t); tcg_temp_free_i64(t); } else { - TCGv_i64 t0 = tcg_const_i64(arg2); - tcg_gen_ctz_i64(ret, arg1, t0); - tcg_temp_free_i64(t0); + tcg_gen_ctz_i64(ret, arg1, tcg_constant_i64(arg2)); } } void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t, arg, 63); tcg_gen_xor_i64(t, t, arg); tcg_gen_clzi_i64(t, t, 64); @@ -2039,8 +2049,8 @@ void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2); } else { TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); + t0 = tcg_temp_ebb_new_i64(); + t1 = tcg_temp_ebb_new_i64(); tcg_gen_shl_i64(t0, arg1, arg2); tcg_gen_subfi_i64(t1, 64, arg2); tcg_gen_shr_i64(t1, arg1, t1); @@ -2060,8 +2070,8 @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) tcg_gen_rotl_i64(ret, arg1, tcg_constant_i64(arg2)); } else { TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); + t0 = tcg_temp_ebb_new_i64(); + t1 = tcg_temp_ebb_new_i64(); tcg_gen_shli_i64(t0, arg1, arg2); tcg_gen_shri_i64(t1, arg1, 64 - arg2); tcg_gen_or_i64(ret, t0, t1); @@ -2076,8 +2086,8 @@ void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2); } else { TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); + t0 = tcg_temp_ebb_new_i64(); + t1 = tcg_temp_ebb_new_i64(); tcg_gen_shr_i64(t0, arg1, arg2); tcg_gen_subfi_i64(t1, 64, arg2); tcg_gen_shl_i64(t1, arg1, t1); @@ -2133,7 +2143,7 @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, } } - t1 = tcg_temp_new_i64(); + t1 = tcg_temp_ebb_new_i64(); if (TCG_TARGET_HAS_extract2_i64) { if (ofs + len == 64) { @@ -2365,7 +2375,7 @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg, tcg_gen_sextract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg), 0, len - 32); return; } else if (len > 32) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); /* Extract the bits for the high word normally. */ tcg_gen_sextract_i32(t, TCGV_HIGH(arg), ofs + 32, len - 32); /* Shift the field down for the low part. */ @@ -2460,7 +2470,7 @@ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah, } else if (TCG_TARGET_HAS_extract2_i64) { tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(t0, al, ofs); tcg_gen_deposit_i64(ret, t0, ah, 64 - ofs, ofs); tcg_temp_free_i64(t0); @@ -2475,8 +2485,8 @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, } else if (cond == TCG_COND_NEVER) { tcg_gen_mov_i64(ret, v2); } else if (TCG_TARGET_REG_BITS == 32) { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_op6i_i32(INDEX_op_setcond2_i32, t0, TCGV_LOW(c1), TCGV_HIGH(c1), TCGV_LOW(c2), TCGV_HIGH(c2), cond); @@ -2503,8 +2513,8 @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, } else if (TCG_TARGET_HAS_movcond_i64) { tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_setcond_i64(cond, t0, c1, c2); tcg_gen_neg_i64(t0, t0); tcg_gen_and_i64(t1, v1, t0); @@ -2521,8 +2531,8 @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, if (TCG_TARGET_HAS_add2_i64) { tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_add_i64(t0, al, bl); tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, al); tcg_gen_add_i64(rh, ah, bh); @@ -2539,8 +2549,8 @@ void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, if (TCG_TARGET_HAS_sub2_i64) { tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_sub_i64(t0, al, bl); tcg_gen_setcond_i64(TCG_COND_LTU, t1, al, bl); tcg_gen_sub_i64(rh, ah, bh); @@ -2556,13 +2566,13 @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_mulu2_i64) { tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_muluh_i64) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2); tcg_gen_mov_i64(rl, t); tcg_temp_free_i64(t); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_mul_i64(t0, arg1, arg2); gen_helper_muluh_i64(rh, arg1, arg2); tcg_gen_mov_i64(rl, t0); @@ -2575,16 +2585,16 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_muls2_i64) { tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_mulsh_i64) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2); tcg_gen_mov_i64(rl, t); tcg_temp_free_i64(t); } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); - TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); + TCGv_i64 t3 = tcg_temp_ebb_new_i64(); tcg_gen_mulu2_i64(t0, t1, arg1, arg2); /* Adjust for negative inputs. */ tcg_gen_sari_i64(t2, arg1, 63); @@ -2599,7 +2609,7 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) tcg_temp_free_i64(t2); tcg_temp_free_i64(t3); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_mul_i64(t0, arg1, arg2); gen_helper_mulsh_i64(rh, arg1, arg2); tcg_gen_mov_i64(rl, t0); @@ -2609,9 +2619,9 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_mulu2_i64(t0, t1, arg1, arg2); /* Adjust for negative input for the signed arg1. */ tcg_gen_sari_i64(t2, arg1, 63); @@ -2645,7 +2655,7 @@ void tcg_gen_umax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) void tcg_gen_abs_i64(TCGv_i64 ret, TCGv_i64 a) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t, a, 63); tcg_gen_xor_i64(ret, a, t); @@ -2675,7 +2685,7 @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg) tcg_gen_op2(INDEX_op_extrh_i64_i32, tcgv_i32_arg(ret), tcgv_i64_arg(arg)); } else { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(t, arg, 32); tcg_gen_mov_i32(ret, (TCGv_i32)t); tcg_temp_free_i64(t); @@ -2714,7 +2724,7 @@ void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high) return; } - tmp = tcg_temp_new_i64(); + tmp = tcg_temp_ebb_new_i64(); /* These extensions are only needed for type correctness. We may be able to do better given target specific information. */ tcg_gen_extu_i32_i64(tmp, high); @@ -2797,7 +2807,6 @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx) tcg_debug_assert(idx == TB_EXIT_REQUESTED); } - plugin_gen_disable_mem_helpers(); tcg_gen_op1i(INDEX_op_exit_tb, val); } @@ -2826,906 +2835,8 @@ void tcg_gen_lookup_and_goto_ptr(void) } plugin_gen_disable_mem_helpers(); - ptr = tcg_temp_new_ptr(); + ptr = tcg_temp_ebb_new_ptr(); gen_helper_lookup_tb_ptr(ptr, cpu_env); tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); tcg_temp_free_ptr(ptr); } - -static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) -{ - /* Trigger the asserts within as early as possible. */ - unsigned a_bits = get_alignment_bits(op); - - /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */ - if (a_bits == (op & MO_SIZE)) { - op = (op & ~MO_AMASK) | MO_ALIGN; - } - - switch (op & MO_SIZE) { - case MO_8: - op &= ~MO_BSWAP; - break; - case MO_16: - break; - case MO_32: - if (!is64) { - op &= ~MO_SIGN; - } - break; - case MO_64: - if (is64) { - op &= ~MO_SIGN; - break; - } - /* fall through */ - default: - g_assert_not_reached(); - } - if (st) { - op &= ~MO_SIGN; - } - return op; -} - -static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, - MemOp memop, TCGArg idx) -{ - MemOpIdx oi = make_memop_idx(memop, idx); -#if TARGET_LONG_BITS == 32 - tcg_gen_op3i_i32(opc, val, addr, oi); -#else - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi); - } else { - tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi); - } -#endif -} - -static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, - MemOp memop, TCGArg idx) -{ - MemOpIdx oi = make_memop_idx(memop, idx); -#if TARGET_LONG_BITS == 32 - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi); - } else { - tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi); - } -#else - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_op5i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), - TCGV_LOW(addr), TCGV_HIGH(addr), oi); - } else { - tcg_gen_op3i_i64(opc, val, addr, oi); - } -#endif -} - -static void tcg_gen_req_mo(TCGBar type) -{ -#ifdef TCG_GUEST_DEFAULT_MO - type &= TCG_GUEST_DEFAULT_MO; -#endif - type &= ~TCG_TARGET_DEFAULT_MO; - if (type) { - tcg_gen_mb(type | TCG_BAR_SC); - } -} - -static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr) -{ -#ifdef CONFIG_PLUGIN - if (tcg_ctx->plugin_insn != NULL) { - /* Save a copy of the vaddr for use after a load. */ - TCGv temp = tcg_temp_new(); - tcg_gen_mov_tl(temp, vaddr); - return temp; - } -#endif - return vaddr; -} - -static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi, - enum qemu_plugin_mem_rw rw) -{ -#ifdef CONFIG_PLUGIN - if (tcg_ctx->plugin_insn != NULL) { - qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); - plugin_gen_empty_mem_callback(vaddr, info); - tcg_temp_free(vaddr); - } -#endif -} - -//// --- Begin LibAFL code --- - -void libafl_gen_read(TCGv addr, MemOpIdx oi); -void libafl_gen_write(TCGv addr, MemOpIdx oi); - -//// --- End LibAFL code --- - -void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) -{ - MemOp orig_memop; - MemOpIdx oi; - - tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); - memop = tcg_canonicalize_memop(memop, 0, 0); - oi = make_memop_idx(memop, idx); - - orig_memop = memop; - if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { - memop &= ~MO_BSWAP; - /* The bswap primitive benefits from zero-extended input. */ - if ((memop & MO_SSIZE) == MO_SW) { - memop &= ~MO_SIGN; - } - } - - addr = plugin_prep_mem_callbacks(addr); - - gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); - -//// --- Begin LibAFL code --- - - libafl_gen_read(addr, oi); - -//// --- End LibAFL code --- - - if ((orig_memop ^ memop) & MO_BSWAP) { - switch (orig_memop & MO_SIZE) { - case MO_16: - tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN - ? TCG_BSWAP_IZ | TCG_BSWAP_OS - : TCG_BSWAP_IZ | TCG_BSWAP_OZ)); - break; - case MO_32: - tcg_gen_bswap32_i32(val, val); - break; - default: - g_assert_not_reached(); - } - } -} - -void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) -{ - TCGv_i32 swap = NULL; - MemOpIdx oi; - - tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); - memop = tcg_canonicalize_memop(memop, 0, 1); - oi = make_memop_idx(memop, idx); - - if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { - swap = tcg_temp_new_i32(); - switch (memop & MO_SIZE) { - case MO_16: - tcg_gen_bswap16_i32(swap, val, 0); - break; - case MO_32: - tcg_gen_bswap32_i32(swap, val); - break; - default: - g_assert_not_reached(); - } - val = swap; - memop &= ~MO_BSWAP; - } - - addr = plugin_prep_mem_callbacks(addr); - - if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { - gen_ldst_i32(INDEX_op_qemu_st8_i32, val, addr, memop, idx); - } else { - gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); - } - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); - -//// --- Begin LibAFL code --- - - libafl_gen_write(addr, oi); - -//// --- End LibAFL code --- - - if (swap) { - tcg_temp_free_i32(swap); - } -} - -void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) -{ - MemOp orig_memop; - MemOpIdx oi; - - if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { - tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); - if (memop & MO_SIGN) { - tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31); - } else { - tcg_gen_movi_i32(TCGV_HIGH(val), 0); - } - return; - } - - tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); - memop = tcg_canonicalize_memop(memop, 1, 0); - oi = make_memop_idx(memop, idx); - - orig_memop = memop; - if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { - memop &= ~MO_BSWAP; - /* The bswap primitive benefits from zero-extended input. */ - if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) { - memop &= ~MO_SIGN; - } - } - - addr = plugin_prep_mem_callbacks(addr); - - gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); - -//// --- Begin LibAFL code --- - - libafl_gen_read(addr, oi); - -//// --- End LibAFL code --- - - if ((orig_memop ^ memop) & MO_BSWAP) { - int flags = (orig_memop & MO_SIGN - ? TCG_BSWAP_IZ | TCG_BSWAP_OS - : TCG_BSWAP_IZ | TCG_BSWAP_OZ); - switch (orig_memop & MO_SIZE) { - case MO_16: - tcg_gen_bswap16_i64(val, val, flags); - break; - case MO_32: - tcg_gen_bswap32_i64(val, val, flags); - break; - case MO_64: - tcg_gen_bswap64_i64(val, val); - break; - default: - g_assert_not_reached(); - } - } -} - -void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) -{ - TCGv_i64 swap = NULL; - MemOpIdx oi; - - if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { - tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); - return; - } - - tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); - memop = tcg_canonicalize_memop(memop, 1, 1); - oi = make_memop_idx(memop, idx); - - if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { - swap = tcg_temp_new_i64(); - switch (memop & MO_SIZE) { - case MO_16: - tcg_gen_bswap16_i64(swap, val, 0); - break; - case MO_32: - tcg_gen_bswap32_i64(swap, val, 0); - break; - case MO_64: - tcg_gen_bswap64_i64(swap, val); - break; - default: - g_assert_not_reached(); - } - val = swap; - memop &= ~MO_BSWAP; - } - - addr = plugin_prep_mem_callbacks(addr); - - gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); - -//// --- Begin LibAFL code --- - - libafl_gen_write(addr, oi); - -//// --- End LibAFL code --- - - if (swap) { - tcg_temp_free_i64(swap); - } -} - -static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) -{ - MemOp mop_1 = orig, mop_2; - - tcg_debug_assert((orig & MO_SIZE) == MO_128); - tcg_debug_assert((orig & MO_SIGN) == 0); - - /* Use a memory ordering implemented by the host. */ - if (!TCG_TARGET_HAS_MEMORY_BSWAP && (orig & MO_BSWAP)) { - mop_1 &= ~MO_BSWAP; - } - - /* Reduce the size to 64-bit. */ - mop_1 = (mop_1 & ~MO_SIZE) | MO_64; - - /* Retain the alignment constraints of the original. */ - switch (orig & MO_AMASK) { - case MO_UNALN: - case MO_ALIGN_2: - case MO_ALIGN_4: - mop_2 = mop_1; - break; - case MO_ALIGN_8: - /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */ - mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN; - mop_2 = mop_1; - break; - case MO_ALIGN: - /* Second has 8-byte alignment; first has 16-byte alignment. */ - mop_2 = mop_1; - mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16; - break; - case MO_ALIGN_16: - case MO_ALIGN_32: - case MO_ALIGN_64: - /* Second has 8-byte alignment; first retains original. */ - mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN; - break; - default: - g_assert_not_reached(); - } - ret[0] = mop_1; - ret[1] = mop_2; -} - -void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) -{ - MemOp mop[2]; - TCGv addr_p8; - TCGv_i64 x, y; - - canonicalize_memop_i128_as_i64(mop, memop); - - tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); - addr = plugin_prep_mem_callbacks(addr); - - /* TODO: respect atomicity of the operation. */ - /* TODO: allow the tcg backend to see the whole operation. */ - - /* - * Since there are no global TCGv_i128, there is no visible state - * changed if the second load faults. Load directly into the two - * subwords. - */ - if ((memop & MO_BSWAP) == MO_LE) { - x = TCGV128_LOW(val); - y = TCGV128_HIGH(val); - } else { - x = TCGV128_HIGH(val); - y = TCGV128_LOW(val); - } - - gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr, mop[0], idx); - - if ((mop[0] ^ memop) & MO_BSWAP) { - tcg_gen_bswap64_i64(x, x); - } - - addr_p8 = tcg_temp_new(); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8, mop[1], idx); - tcg_temp_free(addr_p8); - - if ((mop[0] ^ memop) & MO_BSWAP) { - tcg_gen_bswap64_i64(y, y); - } - - plugin_gen_mem_callbacks(addr, make_memop_idx(memop, idx), - QEMU_PLUGIN_MEM_R); -} - -void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) -{ - MemOp mop[2]; - TCGv addr_p8; - TCGv_i64 x, y; - - canonicalize_memop_i128_as_i64(mop, memop); - - tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); - addr = plugin_prep_mem_callbacks(addr); - - /* TODO: respect atomicity of the operation. */ - /* TODO: allow the tcg backend to see the whole operation. */ - - if ((memop & MO_BSWAP) == MO_LE) { - x = TCGV128_LOW(val); - y = TCGV128_HIGH(val); - } else { - x = TCGV128_HIGH(val); - y = TCGV128_LOW(val); - } - - addr_p8 = tcg_temp_new(); - if ((mop[0] ^ memop) & MO_BSWAP) { - TCGv_i64 t = tcg_temp_new_i64(); - - tcg_gen_bswap64_i64(t, x); - gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr, mop[0], idx); - tcg_gen_bswap64_i64(t, y); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr_p8, mop[1], idx); - tcg_temp_free_i64(t); - } else { - gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr, mop[0], idx); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8, mop[1], idx); - } - tcg_temp_free(addr_p8); - - plugin_gen_mem_callbacks(addr, make_memop_idx(memop, idx), - QEMU_PLUGIN_MEM_W); -} - -static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) -{ - switch (opc & MO_SSIZE) { - case MO_SB: - tcg_gen_ext8s_i32(ret, val); - break; - case MO_UB: - tcg_gen_ext8u_i32(ret, val); - break; - case MO_SW: - tcg_gen_ext16s_i32(ret, val); - break; - case MO_UW: - tcg_gen_ext16u_i32(ret, val); - break; - default: - tcg_gen_mov_i32(ret, val); - break; - } -} - -static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) -{ - switch (opc & MO_SSIZE) { - case MO_SB: - tcg_gen_ext8s_i64(ret, val); - break; - case MO_UB: - tcg_gen_ext8u_i64(ret, val); - break; - case MO_SW: - tcg_gen_ext16s_i64(ret, val); - break; - case MO_UW: - tcg_gen_ext16u_i64(ret, val); - break; - case MO_SL: - tcg_gen_ext32s_i64(ret, val); - break; - case MO_UL: - tcg_gen_ext32u_i64(ret, val); - break; - default: - tcg_gen_mov_i64(ret, val); - break; - } -} - -typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, - TCGv_i32, TCGv_i32, TCGv_i32); -typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, - TCGv_i64, TCGv_i64, TCGv_i32); -typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv, - TCGv_i128, TCGv_i128, TCGv_i32); -typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, - TCGv_i32, TCGv_i32); -typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, - TCGv_i64, TCGv_i32); - -#ifdef CONFIG_ATOMIC64 -# define WITH_ATOMIC64(X) X, -#else -# define WITH_ATOMIC64(X) -#endif -#ifdef CONFIG_CMPXCHG128 -# define WITH_ATOMIC128(X) X, -#else -# define WITH_ATOMIC128(X) -#endif - -static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_8] = gen_helper_atomic_cmpxchgb, - [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le, - [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be, - [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le, - [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be, - WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le) - WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be) - WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le) - WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be) -}; - -void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, - TCGv_i32 newv, TCGArg idx, MemOp memop) -{ - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); - - tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); - - tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN); - tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1); - tcg_gen_qemu_st_i32(t2, addr, idx, memop); - tcg_temp_free_i32(t2); - - if (memop & MO_SIGN) { - tcg_gen_ext_i32(retv, t1, memop); - } else { - tcg_gen_mov_i32(retv, t1); - } - tcg_temp_free_i32(t1); -} - -void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, - TCGv_i32 newv, TCGArg idx, MemOp memop) -{ - gen_atomic_cx_i32 gen; - MemOpIdx oi; - - if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - tcg_gen_nonatomic_cmpxchg_i32(retv, addr, cmpv, newv, idx, memop); - return; - } - - memop = tcg_canonicalize_memop(memop, 0, 0); - gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; - tcg_debug_assert(gen != NULL); - - oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); - - if (memop & MO_SIGN) { - tcg_gen_ext_i32(retv, retv, memop); - } -} - -void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, - TCGv_i64 newv, TCGArg idx, MemOp memop) -{ - TCGv_i64 t1, t2; - - if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { - tcg_gen_nonatomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), - TCGV_LOW(newv), idx, memop); - if (memop & MO_SIGN) { - tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); - } else { - tcg_gen_movi_i32(TCGV_HIGH(retv), 0); - } - return; - } - - t1 = tcg_temp_new_i64(); - t2 = tcg_temp_new_i64(); - - tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); - - tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN); - tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1); - tcg_gen_qemu_st_i64(t2, addr, idx, memop); - tcg_temp_free_i64(t2); - - if (memop & MO_SIGN) { - tcg_gen_ext_i64(retv, t1, memop); - } else { - tcg_gen_mov_i64(retv, t1); - } - tcg_temp_free_i64(t1); -} - -void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, - TCGv_i64 newv, TCGArg idx, MemOp memop) -{ - if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - tcg_gen_nonatomic_cmpxchg_i64(retv, addr, cmpv, newv, idx, memop); - return; - } - - if ((memop & MO_SIZE) == MO_64) { - gen_atomic_cx_i64 gen; - - memop = tcg_canonicalize_memop(memop, 1, 0); - gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; - if (gen) { - MemOpIdx oi = make_memop_idx(memop, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); - return; - } - - gen_helper_exit_atomic(cpu_env); - - /* - * Produce a result for a well-formed opcode stream. This satisfies - * liveness for set before used, which happens before this dead code - * is removed. - */ - tcg_gen_movi_i64(retv, 0); - return; - } - - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_atomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), - TCGV_LOW(newv), idx, memop); - if (memop & MO_SIGN) { - tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); - } else { - tcg_gen_movi_i32(TCGV_HIGH(retv), 0); - } - } else { - TCGv_i32 c32 = tcg_temp_new_i32(); - TCGv_i32 n32 = tcg_temp_new_i32(); - TCGv_i32 r32 = tcg_temp_new_i32(); - - tcg_gen_extrl_i64_i32(c32, cmpv); - tcg_gen_extrl_i64_i32(n32, newv); - tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN); - tcg_temp_free_i32(c32); - tcg_temp_free_i32(n32); - - tcg_gen_extu_i32_i64(retv, r32); - tcg_temp_free_i32(r32); - - if (memop & MO_SIGN) { - tcg_gen_ext_i64(retv, retv, memop); - } - } -} - -void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, - TCGv_i128 newv, TCGArg idx, MemOp memop) -{ - if (TCG_TARGET_REG_BITS == 32) { - /* Inline expansion below is simply too large for 32-bit hosts. */ - gen_atomic_cx_i128 gen = ((memop & MO_BSWAP) == MO_LE - ? gen_helper_nonatomic_cmpxchgo_le - : gen_helper_nonatomic_cmpxchgo_be); - MemOpIdx oi = make_memop_idx(memop, idx); - - tcg_debug_assert((memop & MO_SIZE) == MO_128); - tcg_debug_assert((memop & MO_SIGN) == 0); - - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); - } else { - TCGv_i128 oldv = tcg_temp_new_i128(); - TCGv_i128 tmpv = tcg_temp_new_i128(); - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 z = tcg_constant_i64(0); - - tcg_gen_qemu_ld_i128(oldv, addr, idx, memop); - - /* Compare i128 */ - tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv)); - tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv)); - tcg_gen_or_i64(t0, t0, t1); - - /* tmpv = equal ? newv : oldv */ - tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z, - TCGV128_LOW(newv), TCGV128_LOW(oldv)); - tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z, - TCGV128_HIGH(newv), TCGV128_HIGH(oldv)); - - /* Unconditional writeback. */ - tcg_gen_qemu_st_i128(tmpv, addr, idx, memop); - tcg_gen_mov_i128(retv, oldv); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i128(tmpv); - tcg_temp_free_i128(oldv); - } -} - -void tcg_gen_atomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, - TCGv_i128 newv, TCGArg idx, MemOp memop) -{ - gen_atomic_cx_i128 gen; - - if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - tcg_gen_nonatomic_cmpxchg_i128(retv, addr, cmpv, newv, idx, memop); - return; - } - - tcg_debug_assert((memop & MO_SIZE) == MO_128); - tcg_debug_assert((memop & MO_SIGN) == 0); - gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; - - if (gen) { - MemOpIdx oi = make_memop_idx(memop, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); - return; - } - - gen_helper_exit_atomic(cpu_env); - - /* - * Produce a result for a well-formed opcode stream. This satisfies - * liveness for set before used, which happens before this dead code - * is removed. - */ - tcg_gen_movi_i64(TCGV128_LOW(retv), 0); - tcg_gen_movi_i64(TCGV128_HIGH(retv), 0); -} - -static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, - TCGArg idx, MemOp memop, bool new_val, - void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) -{ - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); - - memop = tcg_canonicalize_memop(memop, 0, 0); - - tcg_gen_qemu_ld_i32(t1, addr, idx, memop); - tcg_gen_ext_i32(t2, val, memop); - gen(t2, t1, t2); - tcg_gen_qemu_st_i32(t2, addr, idx, memop); - - tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); -} - -static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, - TCGArg idx, MemOp memop, void * const table[]) -{ - gen_atomic_op_i32 gen; - MemOpIdx oi; - - memop = tcg_canonicalize_memop(memop, 0, 0); - - gen = table[memop & (MO_SIZE | MO_BSWAP)]; - tcg_debug_assert(gen != NULL); - - oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); - - if (memop & MO_SIGN) { - tcg_gen_ext_i32(ret, ret, memop); - } -} - -static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, - TCGArg idx, MemOp memop, bool new_val, - void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); - - memop = tcg_canonicalize_memop(memop, 1, 0); - - tcg_gen_qemu_ld_i64(t1, addr, idx, memop); - tcg_gen_ext_i64(t2, val, memop); - gen(t2, t1, t2); - tcg_gen_qemu_st_i64(t2, addr, idx, memop); - - tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); -} - -static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, - TCGArg idx, MemOp memop, void * const table[]) -{ - memop = tcg_canonicalize_memop(memop, 1, 0); - - if ((memop & MO_SIZE) == MO_64) { -#ifdef CONFIG_ATOMIC64 - gen_atomic_op_i64 gen; - MemOpIdx oi; - - gen = table[memop & (MO_SIZE | MO_BSWAP)]; - tcg_debug_assert(gen != NULL); - - oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); -#else - gen_helper_exit_atomic(cpu_env); - /* Produce a result, so that we have a well-formed opcode stream - with respect to uses of the result in the (dead) code following. */ - tcg_gen_movi_i64(ret, 0); -#endif /* CONFIG_ATOMIC64 */ - } else { - TCGv_i32 v32 = tcg_temp_new_i32(); - TCGv_i32 r32 = tcg_temp_new_i32(); - - tcg_gen_extrl_i64_i32(v32, val); - do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table); - tcg_temp_free_i32(v32); - - tcg_gen_extu_i32_i64(ret, r32); - tcg_temp_free_i32(r32); - - if (memop & MO_SIGN) { - tcg_gen_ext_i64(ret, ret, memop); - } - } -} - -#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ -static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \ - [MO_8] = gen_helper_atomic_##NAME##b, \ - [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ - [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ - [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \ - [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \ - WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \ - WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ -}; \ -void tcg_gen_atomic_##NAME##_i32 \ - (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ -{ \ - if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ - do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ - } else { \ - do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \ - tcg_gen_##OP##_i32); \ - } \ -} \ -void tcg_gen_atomic_##NAME##_i64 \ - (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ -{ \ - if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ - do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ - } else { \ - do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \ - tcg_gen_##OP##_i64); \ - } \ -} - -GEN_ATOMIC_HELPER(fetch_add, add, 0) -GEN_ATOMIC_HELPER(fetch_and, and, 0) -GEN_ATOMIC_HELPER(fetch_or, or, 0) -GEN_ATOMIC_HELPER(fetch_xor, xor, 0) -GEN_ATOMIC_HELPER(fetch_smin, smin, 0) -GEN_ATOMIC_HELPER(fetch_umin, umin, 0) -GEN_ATOMIC_HELPER(fetch_smax, smax, 0) -GEN_ATOMIC_HELPER(fetch_umax, umax, 0) - -GEN_ATOMIC_HELPER(add_fetch, add, 1) -GEN_ATOMIC_HELPER(and_fetch, and, 1) -GEN_ATOMIC_HELPER(or_fetch, or, 1) -GEN_ATOMIC_HELPER(xor_fetch, xor, 1) -GEN_ATOMIC_HELPER(smin_fetch, smin, 1) -GEN_ATOMIC_HELPER(umin_fetch, umin, 1) -GEN_ATOMIC_HELPER(smax_fetch, smax, 1) -GEN_ATOMIC_HELPER(umax_fetch, umax, 1) - -static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b) -{ - tcg_gen_mov_i32(r, b); -} - -static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b) -{ - tcg_gen_mov_i64(r, b); -} - -GEN_ATOMIC_HELPER(xchg, mov2, 0) - -#undef GEN_ATOMIC_HELPER diff --git a/tcg/tcg.c b/tcg/tcg.c index f50bbdf530..f4035a00b1 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -22,9 +22,6 @@ * THE SOFTWARE. */ -/* define it to use liveness analysis (better code) */ -#define USE_TCG_OPTIMIZATIONS - #include "qemu/osdep.h" /* Define to jump the ELF file used to communicate with GDB. */ @@ -34,9 +31,9 @@ #include "qemu/cutils.h" #include "qemu/host-utils.h" #include "qemu/qemu-print.h" -#include "qemu/timer.h" #include "qemu/cacheflush.h" #include "qemu/cacheinfo.h" +#include "qemu/timer.h" /* Note: the long term plan is to reduce the dependencies on the QEMU CPU definitions. Currently they are used for qemu_ld/st @@ -60,8 +57,12 @@ #include "elf.h" #include "exec/log.h" #include "tcg/tcg-ldst.h" +#include "tcg/tcg-temp-internal.h" #include "tcg-internal.h" #include "accel/tcg/perf.h" +#ifdef CONFIG_USER_ONLY +#include "exec/user/guest-base.h" +#endif /* Forward declarations for functions declared in tcg-target.c.inc and used here. */ @@ -93,6 +94,19 @@ typedef struct QEMU_PACKED { DebugFrameFDEHeader fde; } DebugFrameHeader; +typedef struct TCGLabelQemuLdst { + bool is_ld; /* qemu_ld: true, qemu_st: false */ + MemOpIdx oi; + TCGType type; /* result type of a load */ + TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ + TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ + TCGReg datalo_reg; /* reg index for low word to be loaded or stored */ + TCGReg datahi_reg; /* reg index for high word to be loaded or stored */ + const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */ + tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */ + QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next; +} TCGLabelQemuLdst; + static void tcg_register_jit_int(const void *buf, size_t size, const void *debug_frame, size_t debug_frame_size) @@ -104,7 +118,17 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg); static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg); +static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg); +static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg); +static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg); +static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg); +static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg); +static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg); +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg); +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg); +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg); static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long); +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2); static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg); static void tcg_out_goto_tb(TCGContext *s, int which); static void tcg_out_op(TCGContext *s, TCGOpcode opc, @@ -157,6 +181,54 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct); static int tcg_out_ldst_finalize(TCGContext *s); #endif +typedef struct TCGLdstHelperParam { + TCGReg (*ra_gen)(TCGContext *s, const TCGLabelQemuLdst *l, int arg_reg); + unsigned ntmp; + int tmp[3]; +} TCGLdstHelperParam; + +static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *l, + const TCGLdstHelperParam *p) + __attribute__((unused)); +static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *l, + bool load_sign, const TCGLdstHelperParam *p) + __attribute__((unused)); +static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *l, + const TCGLdstHelperParam *p) + __attribute__((unused)); + +static void * const qemu_ld_helpers[MO_SSIZE + 1] __attribute__((unused)) = { + [MO_UB] = helper_ldub_mmu, + [MO_SB] = helper_ldsb_mmu, + [MO_UW] = helper_lduw_mmu, + [MO_SW] = helper_ldsw_mmu, + [MO_UL] = helper_ldul_mmu, + [MO_UQ] = helper_ldq_mmu, +#if TCG_TARGET_REG_BITS == 64 + [MO_SL] = helper_ldsl_mmu, + [MO_128] = helper_ld16_mmu, +#endif +}; + +static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = { + [MO_8] = helper_stb_mmu, + [MO_16] = helper_stw_mmu, + [MO_32] = helper_stl_mmu, + [MO_64] = helper_stq_mmu, +#if TCG_TARGET_REG_BITS == 64 + [MO_128] = helper_st16_mmu, +#endif +}; + +typedef struct { + MemOp atom; /* lg2 bits of atomicity required */ + MemOp align; /* lg2 bits of alignment to use */ +} TCGAtomAlign; + +static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc, + MemOp host_atom, bool allow_two_ops) + __attribute__((unused)); + TCGContext tcg_init_ctx; __thread TCGContext *tcg_ctx; @@ -283,6 +355,7 @@ TCGLabel *gen_new_label(void) memset(l, 0, sizeof(TCGLabel)); l->id = s->nb_labels++; + QSIMPLEQ_INIT(&l->branches); QSIMPLEQ_INIT(&l->relocs); QSIMPLEQ_INSERT_TAIL(&s->labels, l, next); @@ -341,6 +414,213 @@ void tcg_raise_tb_overflow(TCGContext *s) siglongjmp(s->jmp_trans, -2); } +/* + * Used by tcg_out_movext{1,2} to hold the arguments for tcg_out_movext. + * By the time we arrive at tcg_out_movext1, @dst is always a TCGReg. + * + * However, tcg_out_helper_load_slots reuses this field to hold an + * argument slot number (which may designate a argument register or an + * argument stack slot), converting to TCGReg once all arguments that + * are destined for the stack are processed. + */ +typedef struct TCGMovExtend { + unsigned dst; + TCGReg src; + TCGType dst_type; + TCGType src_type; + MemOp src_ext; +} TCGMovExtend; + +/** + * tcg_out_movext -- move and extend + * @s: tcg context + * @dst_type: integral type for destination + * @dst: destination register + * @src_type: integral type for source + * @src_ext: extension to apply to source + * @src: source register + * + * Move or extend @src into @dst, depending on @src_ext and the types. + */ +static void tcg_out_movext(TCGContext *s, TCGType dst_type, TCGReg dst, + TCGType src_type, MemOp src_ext, TCGReg src) +{ + switch (src_ext) { + case MO_UB: + tcg_out_ext8u(s, dst, src); + break; + case MO_SB: + tcg_out_ext8s(s, dst_type, dst, src); + break; + case MO_UW: + tcg_out_ext16u(s, dst, src); + break; + case MO_SW: + tcg_out_ext16s(s, dst_type, dst, src); + break; + case MO_UL: + case MO_SL: + if (dst_type == TCG_TYPE_I32) { + if (src_type == TCG_TYPE_I32) { + tcg_out_mov(s, TCG_TYPE_I32, dst, src); + } else { + tcg_out_extrl_i64_i32(s, dst, src); + } + } else if (src_type == TCG_TYPE_I32) { + if (src_ext & MO_SIGN) { + tcg_out_exts_i32_i64(s, dst, src); + } else { + tcg_out_extu_i32_i64(s, dst, src); + } + } else { + if (src_ext & MO_SIGN) { + tcg_out_ext32s(s, dst, src); + } else { + tcg_out_ext32u(s, dst, src); + } + } + break; + case MO_UQ: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + if (dst_type == TCG_TYPE_I32) { + tcg_out_extrl_i64_i32(s, dst, src); + } else { + tcg_out_mov(s, TCG_TYPE_I64, dst, src); + } + break; + default: + g_assert_not_reached(); + } +} + +/* Minor variations on a theme, using a structure. */ +static void tcg_out_movext1_new_src(TCGContext *s, const TCGMovExtend *i, + TCGReg src) +{ + tcg_out_movext(s, i->dst_type, i->dst, i->src_type, i->src_ext, src); +} + +static void tcg_out_movext1(TCGContext *s, const TCGMovExtend *i) +{ + tcg_out_movext1_new_src(s, i, i->src); +} + +/** + * tcg_out_movext2 -- move and extend two pair + * @s: tcg context + * @i1: first move description + * @i2: second move description + * @scratch: temporary register, or -1 for none + * + * As tcg_out_movext, for both @i1 and @i2, caring for overlap + * between the sources and destinations. + */ + +static void tcg_out_movext2(TCGContext *s, const TCGMovExtend *i1, + const TCGMovExtend *i2, int scratch) +{ + TCGReg src1 = i1->src; + TCGReg src2 = i2->src; + + if (i1->dst != src2) { + tcg_out_movext1(s, i1); + tcg_out_movext1(s, i2); + return; + } + if (i2->dst == src1) { + TCGType src1_type = i1->src_type; + TCGType src2_type = i2->src_type; + + if (tcg_out_xchg(s, MAX(src1_type, src2_type), src1, src2)) { + /* The data is now in the correct registers, now extend. */ + src1 = i2->src; + src2 = i1->src; + } else { + tcg_debug_assert(scratch >= 0); + tcg_out_mov(s, src1_type, scratch, src1); + src1 = scratch; + } + } + tcg_out_movext1_new_src(s, i2, src2); + tcg_out_movext1_new_src(s, i1, src1); +} + +/** + * tcg_out_movext3 -- move and extend three pair + * @s: tcg context + * @i1: first move description + * @i2: second move description + * @i3: third move description + * @scratch: temporary register, or -1 for none + * + * As tcg_out_movext, for all of @i1, @i2 and @i3, caring for overlap + * between the sources and destinations. + */ + +static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1, + const TCGMovExtend *i2, const TCGMovExtend *i3, + int scratch) +{ + TCGReg src1 = i1->src; + TCGReg src2 = i2->src; + TCGReg src3 = i3->src; + + if (i1->dst != src2 && i1->dst != src3) { + tcg_out_movext1(s, i1); + tcg_out_movext2(s, i2, i3, scratch); + return; + } + if (i2->dst != src1 && i2->dst != src3) { + tcg_out_movext1(s, i2); + tcg_out_movext2(s, i1, i3, scratch); + return; + } + if (i3->dst != src1 && i3->dst != src2) { + tcg_out_movext1(s, i3); + tcg_out_movext2(s, i1, i2, scratch); + return; + } + + /* + * There is a cycle. Since there are only 3 nodes, the cycle is + * either "clockwise" or "anti-clockwise", and can be solved with + * a single scratch or two xchg. + */ + if (i1->dst == src2 && i2->dst == src3 && i3->dst == src1) { + /* "Clockwise" */ + if (tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2)) { + tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3); + /* The data is now in the correct registers, now extend. */ + tcg_out_movext1_new_src(s, i1, i1->dst); + tcg_out_movext1_new_src(s, i2, i2->dst); + tcg_out_movext1_new_src(s, i3, i3->dst); + } else { + tcg_debug_assert(scratch >= 0); + tcg_out_mov(s, i1->src_type, scratch, src1); + tcg_out_movext1(s, i3); + tcg_out_movext1(s, i2); + tcg_out_movext1_new_src(s, i1, scratch); + } + } else if (i1->dst == src3 && i2->dst == src1 && i3->dst == src2) { + /* "Anti-clockwise" */ + if (tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3)) { + tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2); + /* The data is now in the correct registers, now extend. */ + tcg_out_movext1_new_src(s, i1, i1->dst); + tcg_out_movext1_new_src(s, i2, i2->dst); + tcg_out_movext1_new_src(s, i3, i3->dst); + } else { + tcg_debug_assert(scratch >= 0); + tcg_out_mov(s, i1->src_type, scratch, src1); + tcg_out_movext1(s, i2); + tcg_out_movext1(s, i3); + tcg_out_movext1_new_src(s, i1, scratch); + } + } else { + g_assert_not_reached(); + } +} + #define C_PFX1(P, A) P##A #define C_PFX2(P, A, B) P##A##_##B #define C_PFX3(P, A, B, C) P##A##_##B##_##C @@ -567,6 +847,77 @@ static TCGHelperInfo all_helpers[] = { }; static GHashTable *helper_table; +/* + * Create TCGHelperInfo structures for "tcg/tcg-ldst.h" functions, + * akin to what "exec/helper-tcg.h" does with DEF_HELPER_FLAGS_N. + * We only use these for layout in tcg_out_ld_helper_ret and + * tcg_out_st_helper_args, and share them between several of + * the helpers, with the end result that it's easier to build manually. + */ + +#if TCG_TARGET_REG_BITS == 32 +# define dh_typecode_ttl dh_typecode_i32 +#else +# define dh_typecode_ttl dh_typecode_i64 +#endif + +static TCGHelperInfo info_helper_ld32_mmu = { + .flags = TCG_CALL_NO_WG, + .typemask = dh_typemask(ttl, 0) /* return tcg_target_ulong */ + | dh_typemask(env, 1) + | dh_typemask(i64, 2) /* uint64_t addr */ + | dh_typemask(i32, 3) /* unsigned oi */ + | dh_typemask(ptr, 4) /* uintptr_t ra */ +}; + +static TCGHelperInfo info_helper_ld64_mmu = { + .flags = TCG_CALL_NO_WG, + .typemask = dh_typemask(i64, 0) /* return uint64_t */ + | dh_typemask(env, 1) + | dh_typemask(i64, 2) /* uint64_t addr */ + | dh_typemask(i32, 3) /* unsigned oi */ + | dh_typemask(ptr, 4) /* uintptr_t ra */ +}; + +static TCGHelperInfo info_helper_ld128_mmu = { + .flags = TCG_CALL_NO_WG, + .typemask = dh_typemask(i128, 0) /* return Int128 */ + | dh_typemask(env, 1) + | dh_typemask(i64, 2) /* uint64_t addr */ + | dh_typemask(i32, 3) /* unsigned oi */ + | dh_typemask(ptr, 4) /* uintptr_t ra */ +}; + +static TCGHelperInfo info_helper_st32_mmu = { + .flags = TCG_CALL_NO_WG, + .typemask = dh_typemask(void, 0) + | dh_typemask(env, 1) + | dh_typemask(i64, 2) /* uint64_t addr */ + | dh_typemask(i32, 3) /* uint32_t data */ + | dh_typemask(i32, 4) /* unsigned oi */ + | dh_typemask(ptr, 5) /* uintptr_t ra */ +}; + +static TCGHelperInfo info_helper_st64_mmu = { + .flags = TCG_CALL_NO_WG, + .typemask = dh_typemask(void, 0) + | dh_typemask(env, 1) + | dh_typemask(i64, 2) /* uint64_t addr */ + | dh_typemask(i64, 3) /* uint64_t data */ + | dh_typemask(i32, 4) /* unsigned oi */ + | dh_typemask(ptr, 5) /* uintptr_t ra */ +}; + +static TCGHelperInfo info_helper_st128_mmu = { + .flags = TCG_CALL_NO_WG, + .typemask = dh_typemask(void, 0) + | dh_typemask(env, 1) + | dh_typemask(i64, 2) /* uint64_t addr */ + | dh_typemask(i128, 3) /* Int128 data */ + | dh_typemask(i32, 4) /* unsigned oi */ + | dh_typemask(ptr, 5) /* uintptr_t ra */ +}; + #ifdef CONFIG_TCG_INTERPRETER static ffi_type *typecode_to_ffi(int argmask) { @@ -658,6 +1009,25 @@ static void init_ffi_layouts(void) } #endif /* CONFIG_TCG_INTERPRETER */ +static inline bool arg_slot_reg_p(unsigned arg_slot) +{ + /* + * Split the sizeof away from the comparison to avoid Werror from + * "unsigned < 0 is always false", when iarg_regs is empty. + */ + unsigned nreg = ARRAY_SIZE(tcg_target_call_iarg_regs); + return arg_slot < nreg; +} + +static inline int arg_slot_stk_ofs(unsigned arg_slot) +{ + unsigned max = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long); + unsigned stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs); + + tcg_debug_assert(stk_slot < max); + return TCG_TARGET_CALL_STACK_OFFSET + stk_slot * sizeof(tcg_target_long); +} + typedef struct TCGCumulativeArgs { int arg_idx; /* tcg_gen_callN args[] */ int info_in_idx; /* TCGHelperInfo in[] */ @@ -897,6 +1267,7 @@ static void init_call_layout(TCGHelperInfo *info) } } assert(ref_base + cum.ref_slot <= max_stk_slots); + ref_base += max_reg_slots; if (ref_base != 0) { for (int i = cum.info_in_idx - 1; i >= 0; --i) { @@ -969,6 +1340,13 @@ static void tcg_context_init(unsigned max_cpus) (gpointer)&all_helpers[i]); } + init_call_layout(&info_helper_ld32_mmu); + init_call_layout(&info_helper_ld64_mmu); + init_call_layout(&info_helper_ld128_mmu); + init_call_layout(&info_helper_st32_mmu); + init_call_layout(&info_helper_st64_mmu); + init_call_layout(&info_helper_st128_mmu); + #ifdef CONFIG_TCG_INTERPRETER init_ffi_layouts(); #endif @@ -1081,7 +1459,6 @@ void tcg_prologue_init(TCGContext *s) (uintptr_t)s->code_buf, prologue_size); #endif -#ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { FILE *logfile = qemu_log_trylock(); if (logfile) { @@ -1113,7 +1490,6 @@ void tcg_prologue_init(TCGContext *s) qemu_log_unlock(logfile); } } -#endif #ifndef CONFIG_TCG_INTERPRETER /* @@ -1153,6 +1529,9 @@ void tcg_func_start(TCGContext *s) QTAILQ_INIT(&s->ops); QTAILQ_INIT(&s->free_ops); QSIMPLEQ_INIT(&s->labels); + + tcg_debug_assert(s->addr_type == TCG_TYPE_I32 || + s->addr_type == TCG_TYPE_I64); } static TCGTemp *tcg_temp_alloc(TCGContext *s) @@ -1183,9 +1562,7 @@ static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, { TCGTemp *ts; - if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) { - tcg_abort(); - } + tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); ts = tcg_global_alloc(s); ts->base_type = type; @@ -1266,69 +1643,67 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base, return ts; } -TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local) +TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind) { TCGContext *s = tcg_ctx; - TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL; TCGTemp *ts; - int idx, k; + int n; - k = type + (temp_local ? TCG_TYPE_COUNT : 0); - idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS); - if (idx < TCG_MAX_TEMPS) { - /* There is already an available temp with the right type. */ - clear_bit(idx, s->free_temps[k].l); + if (kind == TEMP_EBB) { + int idx = find_first_bit(s->free_temps[type].l, TCG_MAX_TEMPS); - ts = &s->temps[idx]; - ts->temp_allocated = 1; - tcg_debug_assert(ts->base_type == type); - tcg_debug_assert(ts->kind == kind); + if (idx < TCG_MAX_TEMPS) { + /* There is already an available temp with the right type. */ + clear_bit(idx, s->free_temps[type].l); + + ts = &s->temps[idx]; + ts->temp_allocated = 1; + tcg_debug_assert(ts->base_type == type); + tcg_debug_assert(ts->kind == kind); + return ts; + } } else { - int i, n; - - switch (type) { - case TCG_TYPE_I32: - case TCG_TYPE_V64: - case TCG_TYPE_V128: - case TCG_TYPE_V256: - n = 1; - break; - case TCG_TYPE_I64: - n = 64 / TCG_TARGET_REG_BITS; - break; - case TCG_TYPE_I128: - n = 128 / TCG_TARGET_REG_BITS; - break; - default: - g_assert_not_reached(); - } - - ts = tcg_temp_alloc(s); - ts->base_type = type; - ts->temp_allocated = 1; - ts->kind = kind; - - if (n == 1) { - ts->type = type; - } else { - ts->type = TCG_TYPE_REG; - - for (i = 1; i < n; ++i) { - TCGTemp *ts2 = tcg_temp_alloc(s); - - tcg_debug_assert(ts2 == ts + i); - ts2->base_type = type; - ts2->type = TCG_TYPE_REG; - ts2->temp_allocated = 1; - ts2->temp_subindex = i; - ts2->kind = kind; - } - } + tcg_debug_assert(kind == TEMP_TB); } -#if defined(CONFIG_DEBUG_TCG) - s->temps_in_use++; -#endif + switch (type) { + case TCG_TYPE_I32: + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + n = 1; + break; + case TCG_TYPE_I64: + n = 64 / TCG_TARGET_REG_BITS; + break; + case TCG_TYPE_I128: + n = 128 / TCG_TARGET_REG_BITS; + break; + default: + g_assert_not_reached(); + } + + ts = tcg_temp_alloc(s); + ts->base_type = type; + ts->temp_allocated = 1; + ts->kind = kind; + + if (n == 1) { + ts->type = type; + } else { + ts->type = TCG_TYPE_REG; + + for (int i = 1; i < n; ++i) { + TCGTemp *ts2 = tcg_temp_alloc(s); + + tcg_debug_assert(ts2 == ts + i); + ts2->base_type = type; + ts2->type = TCG_TYPE_REG; + ts2->temp_allocated = 1; + ts2->temp_subindex = i; + ts2->kind = kind; + } + } return ts; } @@ -1352,7 +1727,7 @@ TCGv_vec tcg_temp_new_vec(TCGType type) } #endif - t = tcg_temp_new_internal(type, 0); + t = tcg_temp_new_internal(type, TEMP_EBB); return temp_tcgv_vec(t); } @@ -1363,42 +1738,28 @@ TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match) tcg_debug_assert(t->temp_allocated != 0); - t = tcg_temp_new_internal(t->base_type, 0); + t = tcg_temp_new_internal(t->base_type, TEMP_EBB); return temp_tcgv_vec(t); } void tcg_temp_free_internal(TCGTemp *ts) { TCGContext *s = tcg_ctx; - int k, idx; switch (ts->kind) { case TEMP_CONST: - /* - * In order to simplify users of tcg_constant_*, - * silently ignore free. - */ - return; - case TEMP_NORMAL: - case TEMP_LOCAL: + case TEMP_TB: + /* Silently ignore free. */ + break; + case TEMP_EBB: + tcg_debug_assert(ts->temp_allocated != 0); + ts->temp_allocated = 0; + set_bit(temp_idx(ts), s->free_temps[ts->base_type].l); break; default: + /* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */ g_assert_not_reached(); } - -#if defined(CONFIG_DEBUG_TCG) - s->temps_in_use--; - if (s->temps_in_use < 0) { - fprintf(stderr, "More temporaries freed than allocated!\n"); - } -#endif - - tcg_debug_assert(ts->temp_allocated != 0); - ts->temp_allocated = 0; - - idx = temp_idx(ts); - k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT); - set_bit(idx, s->free_temps[k].l); } TCGTemp *tcg_constant_internal(TCGType type, int64_t val) @@ -1470,59 +1831,6 @@ TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val) return tcg_constant_vec(t->base_type, vece, val); } -TCGv_i32 tcg_const_i32(int32_t val) -{ - TCGv_i32 t0; - t0 = tcg_temp_new_i32(); - tcg_gen_movi_i32(t0, val); - return t0; -} - -TCGv_i64 tcg_const_i64(int64_t val) -{ - TCGv_i64 t0; - t0 = tcg_temp_new_i64(); - tcg_gen_movi_i64(t0, val); - return t0; -} - -TCGv_i32 tcg_const_local_i32(int32_t val) -{ - TCGv_i32 t0; - t0 = tcg_temp_local_new_i32(); - tcg_gen_movi_i32(t0, val); - return t0; -} - -TCGv_i64 tcg_const_local_i64(int64_t val) -{ - TCGv_i64 t0; - t0 = tcg_temp_local_new_i64(); - tcg_gen_movi_i64(t0, val); - return t0; -} - -#if defined(CONFIG_DEBUG_TCG) -void tcg_clear_temp_count(void) -{ - TCGContext *s = tcg_ctx; - s->temps_in_use = 0; -} - -int tcg_check_temp_count(void) -{ - TCGContext *s = tcg_ctx; - if (s->temps_in_use) { - /* Clear the count so that we don't give another - * warning immediately next time around. - */ - s->temps_in_use = 0; - return 1; - } - return 0; -} -#endif - /* Return true if OP may appear in the opcode stream. Test the runtime variable that controls each opcode. */ bool tcg_op_supported(TCGOpcode op) @@ -1540,15 +1848,26 @@ bool tcg_op_supported(TCGOpcode op) case INDEX_op_exit_tb: case INDEX_op_goto_tb: case INDEX_op_goto_ptr: - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: return true; - case INDEX_op_qemu_st8_i32: + case INDEX_op_qemu_st8_a32_i32: + case INDEX_op_qemu_st8_a64_i32: return TCG_TARGET_HAS_qemu_st8_i32; + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: + return TCG_TARGET_HAS_qemu_ldst_i128; + case INDEX_op_mov_i32: case INDEX_op_setcond_i32: case INDEX_op_brcond_i32: @@ -1878,7 +2197,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) case TCG_CALL_ARG_EXTEND_U: case TCG_CALL_ARG_EXTEND_S: { - TCGv_i64 temp = tcg_temp_new_i64(); + TCGv_i64 temp = tcg_temp_ebb_new_i64(); TCGv_i32 orig = temp_tcgv_i32(ts); if (loc->kind == TCG_CALL_ARG_EXTEND_S) { @@ -1924,11 +2243,10 @@ static void tcg_reg_alloc_start(TCGContext *s) break; case TEMP_GLOBAL: break; - case TEMP_NORMAL: case TEMP_EBB: val = TEMP_VAL_DEAD; /* fall through */ - case TEMP_LOCAL: + case TEMP_TB: ts->mem_allocated = 0; break; default: @@ -1950,13 +2268,10 @@ static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, case TEMP_GLOBAL: pstrcpy(buf, buf_size, ts->name); break; - case TEMP_LOCAL: + case TEMP_TB: snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); break; case TEMP_EBB: - snprintf(buf, buf_size, "ebb%d", idx - s->nb_globals); - break; - case TEMP_NORMAL: snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); break; case TEMP_CONST: @@ -2005,7 +2320,7 @@ static const char * const cond_name[] = [TCG_COND_GTU] = "gtu" }; -static const char * const ldst_name[] = +static const char * const ldst_name[(MO_BSWAP | MO_SSIZE) + 1] = { [MO_UB] = "ub", [MO_SB] = "sb", @@ -2019,16 +2334,13 @@ static const char * const ldst_name[] = [MO_BEUL] = "beul", [MO_BESL] = "besl", [MO_BEUQ] = "beq", + [MO_128 + MO_BE] = "beo", + [MO_128 + MO_LE] = "leo", }; static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = { -#ifdef TARGET_ALIGNED_ONLY [MO_UNALN >> MO_ASHIFT] = "un+", - [MO_ALIGN >> MO_ASHIFT] = "", -#else - [MO_UNALN >> MO_ASHIFT] = "", [MO_ALIGN >> MO_ASHIFT] = "al+", -#endif [MO_ALIGN_2 >> MO_ASHIFT] = "al2+", [MO_ALIGN_4 >> MO_ASHIFT] = "al4+", [MO_ALIGN_8 >> MO_ASHIFT] = "al8+", @@ -2037,6 +2349,15 @@ static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = { [MO_ALIGN_64 >> MO_ASHIFT] = "al64+", }; +static const char * const atom_name[(MO_ATOM_MASK >> MO_ATOM_SHIFT) + 1] = { + [MO_ATOM_IFALIGN >> MO_ATOM_SHIFT] = "", + [MO_ATOM_IFALIGN_PAIR >> MO_ATOM_SHIFT] = "pair+", + [MO_ATOM_WITHIN16 >> MO_ATOM_SHIFT] = "w16+", + [MO_ATOM_WITHIN16_PAIR >> MO_ATOM_SHIFT] = "w16p+", + [MO_ATOM_SUBALIGN >> MO_ATOM_SHIFT] = "sub+", + [MO_ATOM_NONE >> MO_ATOM_SHIFT] = "noat+", +}; + static const char bswap_flag_name[][6] = { [TCG_BSWAP_IZ] = "iz", [TCG_BSWAP_OZ] = "oz", @@ -2082,13 +2403,8 @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) col += ne_fprintf(f, "\n ----"); for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { - target_ulong a; -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS - a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); -#else - a = op->args[i]; -#endif - col += ne_fprintf(f, " " TARGET_FMT_lx, a); + col += ne_fprintf(f, " %016" PRIx64, + tcg_get_insn_start_param(op, i)); } } else if (c == INDEX_op_call) { const TCGHelperInfo *info = tcg_call_info(op); @@ -2166,23 +2482,38 @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) } i = 1; break; - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st8_i32: - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st8_a32_i32: + case INDEX_op_qemu_st8_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: { + const char *s_al, *s_op, *s_at; MemOpIdx oi = op->args[k++]; MemOp op = get_memop(oi); unsigned ix = get_mmuidx(oi); - if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { - col += ne_fprintf(f, ",$0x%x,%u", op, ix); + s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT]; + s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; + s_at = atom_name[(op & MO_ATOM_MASK) >> MO_ATOM_SHIFT]; + op &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK); + + /* If all fields are accounted for, print symbolically. */ + if (!op && s_al && s_op && s_at) { + col += ne_fprintf(f, ",%s%s%s,%u", + s_at, s_al, s_op, ix); } else { - const char *s_al, *s_op; - s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT]; - s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; - col += ne_fprintf(f, ",%s%s,%u", s_al, s_op, ix); + op = get_memop(oi); + col += ne_fprintf(f, ",$0x%x,%u", op, ix); } i = 1; } @@ -2221,6 +2552,85 @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) arg_label(op->args[k])->id); i++, k++; break; + case INDEX_op_mb: + { + TCGBar membar = op->args[k]; + const char *b_op, *m_op; + + switch (membar & TCG_BAR_SC) { + case 0: + b_op = "none"; + break; + case TCG_BAR_LDAQ: + b_op = "acq"; + break; + case TCG_BAR_STRL: + b_op = "rel"; + break; + case TCG_BAR_SC: + b_op = "seq"; + break; + default: + g_assert_not_reached(); + } + + switch (membar & TCG_MO_ALL) { + case 0: + m_op = "none"; + break; + case TCG_MO_LD_LD: + m_op = "rr"; + break; + case TCG_MO_LD_ST: + m_op = "rw"; + break; + case TCG_MO_ST_LD: + m_op = "wr"; + break; + case TCG_MO_ST_ST: + m_op = "ww"; + break; + case TCG_MO_LD_LD | TCG_MO_LD_ST: + m_op = "rr+rw"; + break; + case TCG_MO_LD_LD | TCG_MO_ST_LD: + m_op = "rr+wr"; + break; + case TCG_MO_LD_LD | TCG_MO_ST_ST: + m_op = "rr+ww"; + break; + case TCG_MO_LD_ST | TCG_MO_ST_LD: + m_op = "rw+wr"; + break; + case TCG_MO_LD_ST | TCG_MO_ST_ST: + m_op = "rw+ww"; + break; + case TCG_MO_ST_LD | TCG_MO_ST_ST: + m_op = "wr+ww"; + break; + case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_LD: + m_op = "rr+rw+wr"; + break; + case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST: + m_op = "rr+rw+ww"; + break; + case TCG_MO_LD_LD | TCG_MO_ST_LD | TCG_MO_ST_ST: + m_op = "rr+wr+ww"; + break; + case TCG_MO_LD_ST | TCG_MO_ST_LD | TCG_MO_ST_ST: + m_op = "rw+wr+ww"; + break; + case TCG_MO_ALL: + m_op = "all"; + break; + default: + g_assert_not_reached(); + } + + col += ne_fprintf(f, "%s%s:%s", (k ? "," : ""), b_op, m_op); + i++, k++; + } + break; default: break; } @@ -2550,23 +2960,32 @@ static void process_op_defs(TCGContext *s) } } +static void remove_label_use(TCGOp *op, int idx) +{ + TCGLabel *label = arg_label(op->args[idx]); + TCGLabelUse *use; + + QSIMPLEQ_FOREACH(use, &label->branches, next) { + if (use->op == op) { + QSIMPLEQ_REMOVE(&label->branches, use, TCGLabelUse, next); + return; + } + } + g_assert_not_reached(); +} + void tcg_op_remove(TCGContext *s, TCGOp *op) { - TCGLabel *label; - switch (op->opc) { case INDEX_op_br: - label = arg_label(op->args[0]); - label->refs--; + remove_label_use(op, 0); break; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: - label = arg_label(op->args[3]); - label->refs--; + remove_label_use(op, 3); break; case INDEX_op_brcond2_i32: - label = arg_label(op->args[5]); - label->refs--; + remove_label_use(op, 5); break; default: break; @@ -2648,10 +3067,36 @@ TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, return new_op; } -/* Reachable analysis : remove unreachable code. */ -static void reachable_code_pass(TCGContext *s) +static void move_label_uses(TCGLabel *to, TCGLabel *from) { - TCGOp *op, *op_next; + TCGLabelUse *u; + + QSIMPLEQ_FOREACH(u, &from->branches, next) { + TCGOp *op = u->op; + switch (op->opc) { + case INDEX_op_br: + op->args[0] = label_arg(to); + break; + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + op->args[3] = label_arg(to); + break; + case INDEX_op_brcond2_i32: + op->args[5] = label_arg(to); + break; + default: + g_assert_not_reached(); + } + } + + QSIMPLEQ_CONCAT(&to->branches, &from->branches); +} + +/* Reachable analysis : remove unreachable code. */ +static void __attribute__((noinline)) +reachable_code_pass(TCGContext *s) +{ + TCGOp *op, *op_next, *op_prev; bool dead = false; QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { @@ -2661,7 +3106,40 @@ static void reachable_code_pass(TCGContext *s) switch (op->opc) { case INDEX_op_set_label: label = arg_label(op->args[0]); - if (label->refs == 0) { + + /* + * Note that the first op in the TB is always a load, + * so there is always something before a label. + */ + op_prev = QTAILQ_PREV(op, link); + + /* + * If we find two sequential labels, move all branches to + * reference the second label and remove the first label. + * Do this before branch to next optimization, so that the + * middle label is out of the way. + */ + if (op_prev->opc == INDEX_op_set_label) { + move_label_uses(label, arg_label(op_prev->args[0])); + tcg_op_remove(s, op_prev); + op_prev = QTAILQ_PREV(op, link); + } + + /* + * Optimization can fold conditional branches to unconditional. + * If we find a label which is preceded by an unconditional + * branch to next, remove the branch. We couldn't do this when + * processing the branch because any dead code between the branch + * and label had not yet been removed. + */ + if (op_prev->opc == INDEX_op_br && + label == arg_label(op_prev->args[0])) { + tcg_op_remove(s, op_prev); + /* Fall through means insns become live again. */ + dead = false; + } + + if (QSIMPLEQ_EMPTY(&label->branches)) { /* * While there is an occasional backward branch, virtually * all branches generated by the translators are forward. @@ -2674,21 +3152,6 @@ static void reachable_code_pass(TCGContext *s) /* Once we see a label, insns become live again. */ dead = false; remove = false; - - /* - * Optimization can fold conditional branches to unconditional. - * If we find a label with one reference which is preceded by - * an unconditional branch to it, remove both. This needed to - * wait until the dead code in between them was removed. - */ - if (label->refs == 1) { - TCGOp *op_prev = QTAILQ_PREV(op, link); - if (op_prev->opc == INDEX_op_br && - label == arg_label(op_prev->args[0])) { - tcg_op_remove(s, op_prev); - remove = true; - } - } } break; @@ -2771,10 +3234,9 @@ static void la_bb_end(TCGContext *s, int ng, int nt) switch (ts->kind) { case TEMP_FIXED: case TEMP_GLOBAL: - case TEMP_LOCAL: + case TEMP_TB: state = TS_DEAD | TS_MEM; break; - case TEMP_NORMAL: case TEMP_EBB: case TEMP_CONST: state = TS_DEAD; @@ -2816,16 +3278,13 @@ static void la_bb_sync(TCGContext *s, int ng, int nt) int state; switch (ts->kind) { - case TEMP_LOCAL: + case TEMP_TB: state = ts->state; ts->state = state | TS_MEM; if (state != TS_DEAD) { continue; } break; - case TEMP_NORMAL: - s->temps[i].state = TS_DEAD; - break; case TEMP_EBB: case TEMP_CONST: continue; @@ -2869,10 +3328,80 @@ static void la_cross_call(TCGContext *s, int nt) } } +/* + * Liveness analysis: Verify the lifetime of TEMP_TB, and reduce + * to TEMP_EBB, if possible. + */ +static void __attribute__((noinline)) +liveness_pass_0(TCGContext *s) +{ + void * const multiple_ebb = (void *)(uintptr_t)-1; + int nb_temps = s->nb_temps; + TCGOp *op, *ebb; + + for (int i = s->nb_globals; i < nb_temps; ++i) { + s->temps[i].state_ptr = NULL; + } + + /* + * Represent each EBB by the op at which it begins. In the case of + * the first EBB, this is the first op, otherwise it is a label. + * Collect the uses of each TEMP_TB: NULL for unused, EBB for use + * within a single EBB, else MULTIPLE_EBB. + */ + ebb = QTAILQ_FIRST(&s->ops); + QTAILQ_FOREACH(op, &s->ops, link) { + const TCGOpDef *def; + int nb_oargs, nb_iargs; + + switch (op->opc) { + case INDEX_op_set_label: + ebb = op; + continue; + case INDEX_op_discard: + continue; + case INDEX_op_call: + nb_oargs = TCGOP_CALLO(op); + nb_iargs = TCGOP_CALLI(op); + break; + default: + def = &tcg_op_defs[op->opc]; + nb_oargs = def->nb_oargs; + nb_iargs = def->nb_iargs; + break; + } + + for (int i = 0; i < nb_oargs + nb_iargs; ++i) { + TCGTemp *ts = arg_temp(op->args[i]); + + if (ts->kind != TEMP_TB) { + continue; + } + if (ts->state_ptr == NULL) { + ts->state_ptr = ebb; + } else if (ts->state_ptr != ebb) { + ts->state_ptr = multiple_ebb; + } + } + } + + /* + * For TEMP_TB that turned out not to be used beyond one EBB, + * reduce the liveness to TEMP_EBB. + */ + for (int i = s->nb_globals; i < nb_temps; ++i) { + TCGTemp *ts = &s->temps[i]; + if (ts->kind == TEMP_TB && ts->state_ptr != multiple_ebb) { + ts->kind = TEMP_EBB; + } + } +} + /* Liveness analysis : update the opc_arg_life array to tell if a given input arguments is dead. Instructions updating dead temporaries are removed. */ -static void liveness_pass_1(TCGContext *s) +static void __attribute__((noinline)) +liveness_pass_1(TCGContext *s) { int nb_globals = s->nb_globals; int nb_temps = s->nb_temps; @@ -2971,7 +3500,7 @@ static void liveness_pass_1(TCGContext *s) case TCG_CALL_ARG_NORMAL: case TCG_CALL_ARG_EXTEND_U: case TCG_CALL_ARG_EXTEND_S: - if (REG_P(loc)) { + if (arg_slot_reg_p(loc->arg_slot)) { *la_temp_pref(ts) = 0; break; } @@ -2998,7 +3527,7 @@ static void liveness_pass_1(TCGContext *s) case TCG_CALL_ARG_NORMAL: case TCG_CALL_ARG_EXTEND_U: case TCG_CALL_ARG_EXTEND_S: - if (REG_P(loc)) { + if (arg_slot_reg_p(loc->arg_slot)) { tcg_regset_set_reg(*la_temp_pref(ts), tcg_target_call_iarg_regs[loc->arg_slot]); } @@ -3212,7 +3741,8 @@ static void liveness_pass_1(TCGContext *s) } /* Liveness analysis: Convert indirect regs to direct temporaries. */ -static bool liveness_pass_2(TCGContext *s) +static bool __attribute__((noinline)) +liveness_pass_2(TCGContext *s) { int nb_globals = s->nb_globals; int nb_temps, i; @@ -3509,10 +4039,9 @@ static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) case TEMP_FIXED: return; case TEMP_GLOBAL: - case TEMP_LOCAL: + case TEMP_TB: new_type = TEMP_VAL_MEM; break; - case TEMP_NORMAL: case TEMP_EBB: new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD; break; @@ -3566,7 +4095,7 @@ static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs, case TEMP_VAL_DEAD: default: - tcg_abort(); + g_assert_not_reached(); } ts->mem_coherent = 1; } @@ -3653,7 +4182,7 @@ static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs, } } - tcg_abort(); + g_assert_not_reached(); } static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs, @@ -3699,7 +4228,7 @@ static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs, } } } - tcg_abort(); + g_assert_not_reached(); } /* Make sure the temporary is in a register. If needed, allocate the register @@ -3746,7 +4275,7 @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs, break; case TEMP_VAL_DEAD: default: - tcg_abort(); + g_assert_not_reached(); } set_temp_val_reg(s, ts, reg); } @@ -3797,10 +4326,9 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) TCGTemp *ts = &s->temps[i]; switch (ts->kind) { - case TEMP_LOCAL: + case TEMP_TB: temp_save(s, ts, allocated_regs); break; - case TEMP_NORMAL: case TEMP_EBB: /* The liveness analysis already ensures that temps are dead. Keep an tcg_debug_assert for safety. */ @@ -3834,12 +4362,9 @@ static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs) * Keep tcg_debug_asserts for safety. */ switch (ts->kind) { - case TEMP_LOCAL: + case TEMP_TB: tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent); break; - case TEMP_NORMAL: - tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); - break; case TEMP_EBB: case TEMP_CONST: break; @@ -4386,11 +4911,50 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) } /* emit instruction */ - if (def->flags & TCG_OPF_VECTOR) { - tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op), - new_args, const_args); - } else { - tcg_out_op(s, op->opc, new_args, const_args); + switch (op->opc) { + case INDEX_op_ext8s_i32: + tcg_out_ext8s(s, TCG_TYPE_I32, new_args[0], new_args[1]); + break; + case INDEX_op_ext8s_i64: + tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]); + break; + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + tcg_out_ext8u(s, new_args[0], new_args[1]); + break; + case INDEX_op_ext16s_i32: + tcg_out_ext16s(s, TCG_TYPE_I32, new_args[0], new_args[1]); + break; + case INDEX_op_ext16s_i64: + tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]); + break; + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + tcg_out_ext16u(s, new_args[0], new_args[1]); + break; + case INDEX_op_ext32s_i64: + tcg_out_ext32s(s, new_args[0], new_args[1]); + break; + case INDEX_op_ext32u_i64: + tcg_out_ext32u(s, new_args[0], new_args[1]); + break; + case INDEX_op_ext_i32_i64: + tcg_out_exts_i32_i64(s, new_args[0], new_args[1]); + break; + case INDEX_op_extu_i32_i64: + tcg_out_extu_i32_i64(s, new_args[0], new_args[1]); + break; + case INDEX_op_extrl_i64_i32: + tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]); + break; + default: + if (def->flags & TCG_OPF_VECTOR) { + tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op), + new_args, const_args); + } else { + tcg_out_op(s, op->opc, new_args, const_args); + } + break; } /* move the outputs in the correct register if needed */ @@ -4521,7 +5085,7 @@ static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts, } } -static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts, +static void load_arg_stk(TCGContext *s, unsigned arg_slot, TCGTemp *ts, TCGRegSet allocated_regs) { /* @@ -4531,30 +5095,27 @@ static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts, */ temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0); tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, - TCG_TARGET_CALL_STACK_OFFSET + - stk_slot * sizeof(tcg_target_long)); + arg_slot_stk_ofs(arg_slot)); } static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l, TCGTemp *ts, TCGRegSet *allocated_regs) { - if (REG_P(l)) { + if (arg_slot_reg_p(l->arg_slot)) { TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot]; load_arg_reg(s, reg, ts, *allocated_regs); tcg_regset_set_reg(*allocated_regs, reg); } else { - load_arg_stk(s, l->arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs), - ts, *allocated_regs); + load_arg_stk(s, l->arg_slot, ts, *allocated_regs); } } -static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base, +static void load_arg_ref(TCGContext *s, unsigned arg_slot, TCGReg ref_base, intptr_t ref_off, TCGRegSet *allocated_regs) { TCGReg reg; - int stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs); - if (stk_slot < 0) { + if (arg_slot_reg_p(arg_slot)) { reg = tcg_target_call_iarg_regs[arg_slot]; tcg_reg_free(s, reg, *allocated_regs); tcg_out_addi_ptr(s, reg, ref_base, ref_off); @@ -4564,8 +5125,7 @@ static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base, *allocated_regs, 0, false); tcg_out_addi_ptr(s, reg, ref_base, ref_off); tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK, - TCG_TARGET_CALL_STACK_OFFSET - + stk_slot * sizeof(tcg_target_long)); + arg_slot_stk_ofs(arg_slot)); } } @@ -4595,8 +5155,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) case TCG_CALL_ARG_BY_REF: load_arg_stk(s, loc->ref_slot, ts, allocated_regs); load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK, - TCG_TARGET_CALL_STACK_OFFSET - + loc->ref_slot * sizeof(tcg_target_long), + arg_slot_stk_ofs(loc->ref_slot), &allocated_regs); break; case TCG_CALL_ARG_BY_REF_N: @@ -4701,6 +5260,613 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) } } +/** + * atom_and_align_for_opc: + * @s: tcg context + * @opc: memory operation code + * @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations + * @allow_two_ops: true if we are prepared to issue two operations + * + * Return the alignment and atomicity to use for the inline fast path + * for the given memory operation. The alignment may be larger than + * that specified in @opc, and the correct alignment will be diagnosed + * by the slow path helper. + * + * If @allow_two_ops, the host is prepared to test for 2x alignment, + * and issue two loads or stores for subalignment. + */ +static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc, + MemOp host_atom, bool allow_two_ops) +{ + MemOp align = get_alignment_bits(opc); + MemOp size = opc & MO_SIZE; + MemOp half = size ? size - 1 : 0; + MemOp atmax; + MemOp atom; + + /* When serialized, no further atomicity required. */ + if (s->gen_tb->cflags & CF_PARALLEL) { + atom = opc & MO_ATOM_MASK; + } else { + atom = MO_ATOM_NONE; + } + + switch (atom) { + case MO_ATOM_NONE: + /* The operation requires no specific atomicity. */ + atmax = MO_8; + break; + + case MO_ATOM_IFALIGN: + atmax = size; + break; + + case MO_ATOM_IFALIGN_PAIR: + atmax = half; + break; + + case MO_ATOM_WITHIN16: + atmax = size; + if (size == MO_128) { + /* Misalignment implies !within16, and therefore no atomicity. */ + } else if (host_atom != MO_ATOM_WITHIN16) { + /* The host does not implement within16, so require alignment. */ + align = MAX(align, size); + } + break; + + case MO_ATOM_WITHIN16_PAIR: + atmax = size; + /* + * Misalignment implies !within16, and therefore half atomicity. + * Any host prepared for two operations can implement this with + * half alignment. + */ + if (host_atom != MO_ATOM_WITHIN16 && allow_two_ops) { + align = MAX(align, half); + } + break; + + case MO_ATOM_SUBALIGN: + atmax = size; + if (host_atom != MO_ATOM_SUBALIGN) { + /* If unaligned but not odd, there are subobjects up to half. */ + if (allow_two_ops) { + align = MAX(align, half); + } else { + align = MAX(align, size); + } + } + break; + + default: + g_assert_not_reached(); + } + + return (TCGAtomAlign){ .atom = atmax, .align = align }; +} + +/* + * Similarly for qemu_ld/st slow path helpers. + * We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously, + * using only the provided backend tcg_out_* functions. + */ + +static int tcg_out_helper_stk_ofs(TCGType type, unsigned slot) +{ + int ofs = arg_slot_stk_ofs(slot); + + /* + * Each stack slot is TCG_TARGET_LONG_BITS. If the host does not + * require extension to uint64_t, adjust the address for uint32_t. + */ + if (HOST_BIG_ENDIAN && + TCG_TARGET_REG_BITS == 64 && + type == TCG_TYPE_I32) { + ofs += 4; + } + return ofs; +} + +static void tcg_out_helper_load_slots(TCGContext *s, + unsigned nmov, TCGMovExtend *mov, + const TCGLdstHelperParam *parm) +{ + unsigned i; + TCGReg dst3; + + /* + * Start from the end, storing to the stack first. + * This frees those registers, so we need not consider overlap. + */ + for (i = nmov; i-- > 0; ) { + unsigned slot = mov[i].dst; + + if (arg_slot_reg_p(slot)) { + goto found_reg; + } + + TCGReg src = mov[i].src; + TCGType dst_type = mov[i].dst_type; + MemOp dst_mo = dst_type == TCG_TYPE_I32 ? MO_32 : MO_64; + + /* The argument is going onto the stack; extend into scratch. */ + if ((mov[i].src_ext & MO_SIZE) != dst_mo) { + tcg_debug_assert(parm->ntmp != 0); + mov[i].dst = src = parm->tmp[0]; + tcg_out_movext1(s, &mov[i]); + } + + tcg_out_st(s, dst_type, src, TCG_REG_CALL_STACK, + tcg_out_helper_stk_ofs(dst_type, slot)); + } + return; + + found_reg: + /* + * The remaining arguments are in registers. + * Convert slot numbers to argument registers. + */ + nmov = i + 1; + for (i = 0; i < nmov; ++i) { + mov[i].dst = tcg_target_call_iarg_regs[mov[i].dst]; + } + + switch (nmov) { + case 4: + /* The backend must have provided enough temps for the worst case. */ + tcg_debug_assert(parm->ntmp >= 2); + + dst3 = mov[3].dst; + for (unsigned j = 0; j < 3; ++j) { + if (dst3 == mov[j].src) { + /* + * Conflict. Copy the source to a temporary, perform the + * remaining moves, then the extension from our scratch + * on the way out. + */ + TCGReg scratch = parm->tmp[1]; + + tcg_out_mov(s, mov[3].src_type, scratch, mov[3].src); + tcg_out_movext3(s, mov, mov + 1, mov + 2, parm->tmp[0]); + tcg_out_movext1_new_src(s, &mov[3], scratch); + break; + } + } + + /* No conflicts: perform this move and continue. */ + tcg_out_movext1(s, &mov[3]); + /* fall through */ + + case 3: + tcg_out_movext3(s, mov, mov + 1, mov + 2, + parm->ntmp ? parm->tmp[0] : -1); + break; + case 2: + tcg_out_movext2(s, mov, mov + 1, + parm->ntmp ? parm->tmp[0] : -1); + break; + case 1: + tcg_out_movext1(s, mov); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_helper_load_imm(TCGContext *s, unsigned slot, + TCGType type, tcg_target_long imm, + const TCGLdstHelperParam *parm) +{ + if (arg_slot_reg_p(slot)) { + tcg_out_movi(s, type, tcg_target_call_iarg_regs[slot], imm); + } else { + int ofs = tcg_out_helper_stk_ofs(type, slot); + if (!tcg_out_sti(s, type, imm, TCG_REG_CALL_STACK, ofs)) { + tcg_debug_assert(parm->ntmp != 0); + tcg_out_movi(s, type, parm->tmp[0], imm); + tcg_out_st(s, type, parm->tmp[0], TCG_REG_CALL_STACK, ofs); + } + } +} + +static void tcg_out_helper_load_common_args(TCGContext *s, + const TCGLabelQemuLdst *ldst, + const TCGLdstHelperParam *parm, + const TCGHelperInfo *info, + unsigned next_arg) +{ + TCGMovExtend ptr_mov = { + .dst_type = TCG_TYPE_PTR, + .src_type = TCG_TYPE_PTR, + .src_ext = sizeof(void *) == 4 ? MO_32 : MO_64 + }; + const TCGCallArgumentLoc *loc = &info->in[0]; + TCGType type; + unsigned slot; + tcg_target_ulong imm; + + /* + * Handle env, which is always first. + */ + ptr_mov.dst = loc->arg_slot; + ptr_mov.src = TCG_AREG0; + tcg_out_helper_load_slots(s, 1, &ptr_mov, parm); + + /* + * Handle oi. + */ + imm = ldst->oi; + loc = &info->in[next_arg]; + type = TCG_TYPE_I32; + switch (loc->kind) { + case TCG_CALL_ARG_NORMAL: + break; + case TCG_CALL_ARG_EXTEND_U: + case TCG_CALL_ARG_EXTEND_S: + /* No extension required for MemOpIdx. */ + tcg_debug_assert(imm <= INT32_MAX); + type = TCG_TYPE_REG; + break; + default: + g_assert_not_reached(); + } + tcg_out_helper_load_imm(s, loc->arg_slot, type, imm, parm); + next_arg++; + + /* + * Handle ra. + */ + loc = &info->in[next_arg]; + slot = loc->arg_slot; + if (parm->ra_gen) { + int arg_reg = -1; + TCGReg ra_reg; + + if (arg_slot_reg_p(slot)) { + arg_reg = tcg_target_call_iarg_regs[slot]; + } + ra_reg = parm->ra_gen(s, ldst, arg_reg); + + ptr_mov.dst = slot; + ptr_mov.src = ra_reg; + tcg_out_helper_load_slots(s, 1, &ptr_mov, parm); + } else { + imm = (uintptr_t)ldst->raddr; + tcg_out_helper_load_imm(s, slot, TCG_TYPE_PTR, imm, parm); + } +} + +static unsigned tcg_out_helper_add_mov(TCGMovExtend *mov, + const TCGCallArgumentLoc *loc, + TCGType dst_type, TCGType src_type, + TCGReg lo, TCGReg hi) +{ + MemOp reg_mo; + + if (dst_type <= TCG_TYPE_REG) { + MemOp src_ext; + + switch (loc->kind) { + case TCG_CALL_ARG_NORMAL: + src_ext = src_type == TCG_TYPE_I32 ? MO_32 : MO_64; + break; + case TCG_CALL_ARG_EXTEND_U: + dst_type = TCG_TYPE_REG; + src_ext = MO_UL; + break; + case TCG_CALL_ARG_EXTEND_S: + dst_type = TCG_TYPE_REG; + src_ext = MO_SL; + break; + default: + g_assert_not_reached(); + } + + mov[0].dst = loc->arg_slot; + mov[0].dst_type = dst_type; + mov[0].src = lo; + mov[0].src_type = src_type; + mov[0].src_ext = src_ext; + return 1; + } + + if (TCG_TARGET_REG_BITS == 32) { + assert(dst_type == TCG_TYPE_I64); + reg_mo = MO_32; + } else { + assert(dst_type == TCG_TYPE_I128); + reg_mo = MO_64; + } + + mov[0].dst = loc[HOST_BIG_ENDIAN].arg_slot; + mov[0].src = lo; + mov[0].dst_type = TCG_TYPE_REG; + mov[0].src_type = TCG_TYPE_REG; + mov[0].src_ext = reg_mo; + + mov[1].dst = loc[!HOST_BIG_ENDIAN].arg_slot; + mov[1].src = hi; + mov[1].dst_type = TCG_TYPE_REG; + mov[1].src_type = TCG_TYPE_REG; + mov[1].src_ext = reg_mo; + + return 2; +} + +static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, + const TCGLdstHelperParam *parm) +{ + const TCGHelperInfo *info; + const TCGCallArgumentLoc *loc; + TCGMovExtend mov[2]; + unsigned next_arg, nmov; + MemOp mop = get_memop(ldst->oi); + + switch (mop & MO_SIZE) { + case MO_8: + case MO_16: + case MO_32: + info = &info_helper_ld32_mmu; + break; + case MO_64: + info = &info_helper_ld64_mmu; + break; + case MO_128: + info = &info_helper_ld128_mmu; + break; + default: + g_assert_not_reached(); + } + + /* Defer env argument. */ + next_arg = 1; + + loc = &info->in[next_arg]; + if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) { + /* + * 32-bit host with 32-bit guest: zero-extend the guest address + * to 64-bits for the helper by storing the low part, then + * load a zero for the high part. + */ + tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN, + TCG_TYPE_I32, TCG_TYPE_I32, + ldst->addrlo_reg, -1); + tcg_out_helper_load_slots(s, 1, mov, parm); + + tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot, + TCG_TYPE_I32, 0, parm); + next_arg += 2; + } else { + nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type, + ldst->addrlo_reg, ldst->addrhi_reg); + tcg_out_helper_load_slots(s, nmov, mov, parm); + next_arg += nmov; + } + + switch (info->out_kind) { + case TCG_CALL_RET_NORMAL: + case TCG_CALL_RET_BY_VEC: + break; + case TCG_CALL_RET_BY_REF: + /* + * The return reference is in the first argument slot. + * We need memory in which to return: re-use the top of stack. + */ + { + int ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET; + + if (arg_slot_reg_p(0)) { + tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[0], + TCG_REG_CALL_STACK, ofs_slot0); + } else { + tcg_debug_assert(parm->ntmp != 0); + tcg_out_addi_ptr(s, parm->tmp[0], + TCG_REG_CALL_STACK, ofs_slot0); + tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0], + TCG_REG_CALL_STACK, ofs_slot0); + } + } + break; + default: + g_assert_not_reached(); + } + + tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg); +} + +static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *ldst, + bool load_sign, + const TCGLdstHelperParam *parm) +{ + MemOp mop = get_memop(ldst->oi); + TCGMovExtend mov[2]; + int ofs_slot0; + + switch (ldst->type) { + case TCG_TYPE_I64: + if (TCG_TARGET_REG_BITS == 32) { + break; + } + /* fall through */ + + case TCG_TYPE_I32: + mov[0].dst = ldst->datalo_reg; + mov[0].src = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, 0); + mov[0].dst_type = ldst->type; + mov[0].src_type = TCG_TYPE_REG; + + /* + * If load_sign, then we allowed the helper to perform the + * appropriate sign extension to tcg_target_ulong, and all + * we need now is a plain move. + * + * If they do not, then we expect the relevant extension + * instruction to be no more expensive than a move, and + * we thus save the icache etc by only using one of two + * helper functions. + */ + if (load_sign || !(mop & MO_SIGN)) { + if (TCG_TARGET_REG_BITS == 32 || ldst->type == TCG_TYPE_I32) { + mov[0].src_ext = MO_32; + } else { + mov[0].src_ext = MO_64; + } + } else { + mov[0].src_ext = mop & MO_SSIZE; + } + tcg_out_movext1(s, mov); + return; + + case TCG_TYPE_I128: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET; + switch (TCG_TARGET_CALL_RET_I128) { + case TCG_CALL_RET_NORMAL: + break; + case TCG_CALL_RET_BY_VEC: + tcg_out_st(s, TCG_TYPE_V128, + tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0), + TCG_REG_CALL_STACK, ofs_slot0); + /* fall through */ + case TCG_CALL_RET_BY_REF: + tcg_out_ld(s, TCG_TYPE_I64, ldst->datalo_reg, + TCG_REG_CALL_STACK, ofs_slot0 + 8 * HOST_BIG_ENDIAN); + tcg_out_ld(s, TCG_TYPE_I64, ldst->datahi_reg, + TCG_REG_CALL_STACK, ofs_slot0 + 8 * !HOST_BIG_ENDIAN); + return; + default: + g_assert_not_reached(); + } + break; + + default: + g_assert_not_reached(); + } + + mov[0].dst = ldst->datalo_reg; + mov[0].src = + tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, HOST_BIG_ENDIAN); + mov[0].dst_type = TCG_TYPE_REG; + mov[0].src_type = TCG_TYPE_REG; + mov[0].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64; + + mov[1].dst = ldst->datahi_reg; + mov[1].src = + tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, !HOST_BIG_ENDIAN); + mov[1].dst_type = TCG_TYPE_REG; + mov[1].src_type = TCG_TYPE_REG; + mov[1].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64; + + tcg_out_movext2(s, mov, mov + 1, parm->ntmp ? parm->tmp[0] : -1); +} + +static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, + const TCGLdstHelperParam *parm) +{ + const TCGHelperInfo *info; + const TCGCallArgumentLoc *loc; + TCGMovExtend mov[4]; + TCGType data_type; + unsigned next_arg, nmov, n; + MemOp mop = get_memop(ldst->oi); + + switch (mop & MO_SIZE) { + case MO_8: + case MO_16: + case MO_32: + info = &info_helper_st32_mmu; + data_type = TCG_TYPE_I32; + break; + case MO_64: + info = &info_helper_st64_mmu; + data_type = TCG_TYPE_I64; + break; + case MO_128: + info = &info_helper_st128_mmu; + data_type = TCG_TYPE_I128; + break; + default: + g_assert_not_reached(); + } + + /* Defer env argument. */ + next_arg = 1; + nmov = 0; + + /* Handle addr argument. */ + loc = &info->in[next_arg]; + if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) { + /* + * 32-bit host with 32-bit guest: zero-extend the guest address + * to 64-bits for the helper by storing the low part. Later, + * after we have processed the register inputs, we will load a + * zero for the high part. + */ + tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN, + TCG_TYPE_I32, TCG_TYPE_I32, + ldst->addrlo_reg, -1); + next_arg += 2; + nmov += 1; + } else { + n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type, + ldst->addrlo_reg, ldst->addrhi_reg); + next_arg += n; + nmov += n; + } + + /* Handle data argument. */ + loc = &info->in[next_arg]; + switch (loc->kind) { + case TCG_CALL_ARG_NORMAL: + case TCG_CALL_ARG_EXTEND_U: + case TCG_CALL_ARG_EXTEND_S: + n = tcg_out_helper_add_mov(mov + nmov, loc, data_type, ldst->type, + ldst->datalo_reg, ldst->datahi_reg); + next_arg += n; + nmov += n; + tcg_out_helper_load_slots(s, nmov, mov, parm); + break; + + case TCG_CALL_ARG_BY_REF: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_debug_assert(data_type == TCG_TYPE_I128); + tcg_out_st(s, TCG_TYPE_I64, + HOST_BIG_ENDIAN ? ldst->datahi_reg : ldst->datalo_reg, + TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[0].ref_slot)); + tcg_out_st(s, TCG_TYPE_I64, + HOST_BIG_ENDIAN ? ldst->datalo_reg : ldst->datahi_reg, + TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[1].ref_slot)); + + tcg_out_helper_load_slots(s, nmov, mov, parm); + + if (arg_slot_reg_p(loc->arg_slot)) { + tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[loc->arg_slot], + TCG_REG_CALL_STACK, + arg_slot_stk_ofs(loc->ref_slot)); + } else { + tcg_debug_assert(parm->ntmp != 0); + tcg_out_addi_ptr(s, parm->tmp[0], TCG_REG_CALL_STACK, + arg_slot_stk_ofs(loc->ref_slot)); + tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0], + TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc->arg_slot)); + } + next_arg += 2; + break; + + default: + g_assert_not_reached(); + } + + if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) { + /* Zero extend the address by loading a zero for the high part. */ + loc = &info->in[1 + !HOST_BIG_ENDIAN]; + tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm); + } + + tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg); +} + #ifdef CONFIG_PROFILER /* avoid copy/paste errors */ @@ -4810,7 +5976,7 @@ int64_t tcg_cpu_exec_time(void) #endif -int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) +int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) { #ifdef CONFIG_PROFILER TCGProfile *prof = &s->prof; @@ -4838,7 +6004,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) } #endif -#ifdef DEBUG_DISAS if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP) && qemu_log_in_addr_range(pc_start))) { FILE *logfile = qemu_log_trylock(); @@ -4849,7 +6014,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) qemu_log_unlock(logfile); } } -#endif #ifdef CONFIG_DEBUG_TCG /* Ensure all labels referenced have been emitted. */ @@ -4858,7 +6022,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) bool error = false; QSIMPLEQ_FOREACH(l, &s->labels, next) { - if (unlikely(!l->present) && l->refs) { + if (unlikely(!l->present) && !QSIMPLEQ_EMPTY(&l->branches)) { qemu_log_mask(CPU_LOG_TB_OP, "$L%d referenced but not present.\n", l->id); error = true; @@ -4872,9 +6036,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock()); #endif -#ifdef USE_TCG_OPTIMIZATIONS tcg_optimize(s); -#endif #ifdef CONFIG_PROFILER qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock()); @@ -4882,10 +6044,10 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) #endif reachable_code_pass(s); + liveness_pass_0(s); liveness_pass_1(s); if (s->nb_indirects > 0) { -#ifdef DEBUG_DISAS if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND) && qemu_log_in_addr_range(pc_start))) { FILE *logfile = qemu_log_trylock(); @@ -4896,7 +6058,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) qemu_log_unlock(logfile); } } -#endif + /* Replace indirect temps with direct temps. */ if (liveness_pass_2(s)) { /* If changes were made, re-run liveness. */ @@ -4908,7 +6070,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) qatomic_set(&prof->la_time, prof->la_time + profile_getclock()); #endif -#ifdef DEBUG_DISAS if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT) && qemu_log_in_addr_range(pc_start))) { FILE *logfile = qemu_log_trylock(); @@ -4919,7 +6080,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) qemu_log_unlock(logfile); } } -#endif /* Initialize goto_tb jump offsets. */ tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID; @@ -4970,13 +6130,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) } num_insns++; for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { - target_ulong a; -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS - a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); -#else - a = op->args[i]; -#endif - s->gen_insn_data[num_insns][i] = a; + s->gen_insn_data[num_insns][i] = + tcg_get_insn_start_param(op, i); } break; case INDEX_op_discard: diff --git a/tcg/tci.c b/tcg/tci.c index fc67e7e767..bab4397bc5 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -286,162 +286,54 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) return result; } -static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, +static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr, MemOpIdx oi, const void *tb_ptr) { MemOp mop = get_memop(oi); uintptr_t ra = (uintptr_t)tb_ptr; -#ifdef CONFIG_SOFTMMU - switch (mop & (MO_BSWAP | MO_SSIZE)) { + switch (mop & MO_SSIZE) { case MO_UB: - return helper_ret_ldub_mmu(env, taddr, oi, ra); + return helper_ldub_mmu(env, taddr, oi, ra); case MO_SB: - return helper_ret_ldsb_mmu(env, taddr, oi, ra); - case MO_LEUW: - return helper_le_lduw_mmu(env, taddr, oi, ra); - case MO_LESW: - return helper_le_ldsw_mmu(env, taddr, oi, ra); - case MO_LEUL: - return helper_le_ldul_mmu(env, taddr, oi, ra); - case MO_LESL: - return helper_le_ldsl_mmu(env, taddr, oi, ra); - case MO_LEUQ: - return helper_le_ldq_mmu(env, taddr, oi, ra); - case MO_BEUW: - return helper_be_lduw_mmu(env, taddr, oi, ra); - case MO_BESW: - return helper_be_ldsw_mmu(env, taddr, oi, ra); - case MO_BEUL: - return helper_be_ldul_mmu(env, taddr, oi, ra); - case MO_BESL: - return helper_be_ldsl_mmu(env, taddr, oi, ra); - case MO_BEUQ: - return helper_be_ldq_mmu(env, taddr, oi, ra); + return helper_ldsb_mmu(env, taddr, oi, ra); + case MO_UW: + return helper_lduw_mmu(env, taddr, oi, ra); + case MO_SW: + return helper_ldsw_mmu(env, taddr, oi, ra); + case MO_UL: + return helper_ldul_mmu(env, taddr, oi, ra); + case MO_SL: + return helper_ldsl_mmu(env, taddr, oi, ra); + case MO_UQ: + return helper_ldq_mmu(env, taddr, oi, ra); default: g_assert_not_reached(); } -#else - void *haddr = g2h(env_cpu(env), taddr); - unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; - uint64_t ret; - - set_helper_retaddr(ra); - if (taddr & a_mask) { - helper_unaligned_ld(env, taddr); - } - switch (mop & (MO_BSWAP | MO_SSIZE)) { - case MO_UB: - ret = ldub_p(haddr); - break; - case MO_SB: - ret = ldsb_p(haddr); - break; - case MO_LEUW: - ret = lduw_le_p(haddr); - break; - case MO_LESW: - ret = ldsw_le_p(haddr); - break; - case MO_LEUL: - ret = (uint32_t)ldl_le_p(haddr); - break; - case MO_LESL: - ret = (int32_t)ldl_le_p(haddr); - break; - case MO_LEUQ: - ret = ldq_le_p(haddr); - break; - case MO_BEUW: - ret = lduw_be_p(haddr); - break; - case MO_BESW: - ret = ldsw_be_p(haddr); - break; - case MO_BEUL: - ret = (uint32_t)ldl_be_p(haddr); - break; - case MO_BESL: - ret = (int32_t)ldl_be_p(haddr); - break; - case MO_BEUQ: - ret = ldq_be_p(haddr); - break; - default: - g_assert_not_reached(); - } - clear_helper_retaddr(); - return ret; -#endif } -static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, +static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val, MemOpIdx oi, const void *tb_ptr) { MemOp mop = get_memop(oi); uintptr_t ra = (uintptr_t)tb_ptr; -#ifdef CONFIG_SOFTMMU - switch (mop & (MO_BSWAP | MO_SIZE)) { + switch (mop & MO_SIZE) { case MO_UB: - helper_ret_stb_mmu(env, taddr, val, oi, ra); + helper_stb_mmu(env, taddr, val, oi, ra); break; - case MO_LEUW: - helper_le_stw_mmu(env, taddr, val, oi, ra); + case MO_UW: + helper_stw_mmu(env, taddr, val, oi, ra); break; - case MO_LEUL: - helper_le_stl_mmu(env, taddr, val, oi, ra); + case MO_UL: + helper_stl_mmu(env, taddr, val, oi, ra); break; - case MO_LEUQ: - helper_le_stq_mmu(env, taddr, val, oi, ra); - break; - case MO_BEUW: - helper_be_stw_mmu(env, taddr, val, oi, ra); - break; - case MO_BEUL: - helper_be_stl_mmu(env, taddr, val, oi, ra); - break; - case MO_BEUQ: - helper_be_stq_mmu(env, taddr, val, oi, ra); + case MO_UQ: + helper_stq_mmu(env, taddr, val, oi, ra); break; default: g_assert_not_reached(); } -#else - void *haddr = g2h(env_cpu(env), taddr); - unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; - - set_helper_retaddr(ra); - if (taddr & a_mask) { - helper_unaligned_st(env, taddr); - } - switch (mop & (MO_BSWAP | MO_SIZE)) { - case MO_UB: - stb_p(haddr, val); - break; - case MO_LEUW: - stw_le_p(haddr, val); - break; - case MO_LEUL: - stl_le_p(haddr, val); - break; - case MO_LEUQ: - stq_le_p(haddr, val); - break; - case MO_BEUW: - stw_be_p(haddr, val); - break; - case MO_BEUL: - stl_be_p(haddr, val); - break; - case MO_BEUQ: - stq_be_p(haddr, val); - break; - default: - g_assert_not_reached(); - } - clear_helper_retaddr(); -#endif } #if TCG_TARGET_REG_BITS == 64 @@ -480,10 +372,9 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, TCGReg r0, r1, r2, r3, r4, r5; tcg_target_ulong t1; TCGCond condition; - target_ulong taddr; uint8_t pos, len; uint32_t tmp32; - uint64_t tmp64; + uint64_t tmp64, taddr; uint64_t T1, T2; MemOpIdx oi; int32_t ofs; @@ -1030,30 +921,41 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, tb_ptr = ptr; break; - case INDEX_op_qemu_ld_i32: - if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { + case INDEX_op_qemu_ld_a32_i32: + tci_args_rrm(insn, &r0, &r1, &oi); + taddr = (uint32_t)regs[r1]; + goto do_ld_i32; + case INDEX_op_qemu_ld_a64_i32: + if (TCG_TARGET_REG_BITS == 64) { tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; } else { tci_args_rrrm(insn, &r0, &r1, &r2, &oi); taddr = tci_uint64(regs[r2], regs[r1]); } - tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr); - regs[r0] = tmp32; + do_ld_i32: + regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr); break; - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { + tci_args_rrm(insn, &r0, &r1, &oi); + taddr = (uint32_t)regs[r1]; + } else { + tci_args_rrrm(insn, &r0, &r1, &r2, &oi); + taddr = (uint32_t)regs[r2]; + } + goto do_ld_i64; + case INDEX_op_qemu_ld_a64_i64: if (TCG_TARGET_REG_BITS == 64) { tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; - } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { - tci_args_rrrm(insn, &r0, &r1, &r2, &oi); - taddr = regs[r2]; } else { tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); taddr = tci_uint64(regs[r3], regs[r2]); oi = regs[r4]; } + do_ld_i64: tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); if (TCG_TARGET_REG_BITS == 32) { tci_write_reg64(regs, r1, r0, tmp64); @@ -1062,34 +964,45 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, } break; - case INDEX_op_qemu_st_i32: - if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { + case INDEX_op_qemu_st_a32_i32: + tci_args_rrm(insn, &r0, &r1, &oi); + taddr = (uint32_t)regs[r1]; + goto do_st_i32; + case INDEX_op_qemu_st_a64_i32: + if (TCG_TARGET_REG_BITS == 64) { tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; } else { tci_args_rrrm(insn, &r0, &r1, &r2, &oi); taddr = tci_uint64(regs[r2], regs[r1]); } - tmp32 = regs[r0]; - tci_qemu_st(env, taddr, tmp32, oi, tb_ptr); + do_st_i32: + tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr); break; - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i64: if (TCG_TARGET_REG_BITS == 64) { tci_args_rrm(insn, &r0, &r1, &oi); - taddr = regs[r1]; tmp64 = regs[r0]; + taddr = (uint32_t)regs[r1]; } else { - if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { - tci_args_rrrm(insn, &r0, &r1, &r2, &oi); - taddr = regs[r2]; - } else { - tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); - taddr = tci_uint64(regs[r3], regs[r2]); - oi = regs[r4]; - } + tci_args_rrrm(insn, &r0, &r1, &r2, &oi); tmp64 = tci_uint64(regs[r1], regs[r0]); + taddr = (uint32_t)regs[r2]; } + goto do_st_i64; + case INDEX_op_qemu_st_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tci_args_rrm(insn, &r0, &r1, &oi); + tmp64 = regs[r0]; + taddr = regs[r1]; + } else { + tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); + tmp64 = tci_uint64(regs[r1], regs[r0]); + taddr = tci_uint64(regs[r3], regs[r2]); + oi = regs[r4]; + } + do_st_i64: tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); break; @@ -1359,15 +1272,21 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) str_r(r3), str_r(r4), str_r(r5)); break; - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_st_i64: - len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_st_a32_i32: + len = 1 + 1; + goto do_qemu_ldst; + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_st_a64_i32: + len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); + goto do_qemu_ldst; + case INDEX_op_qemu_ld_a64_i64: + case INDEX_op_qemu_st_a64_i64: + len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); goto do_qemu_ldst; - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_st_i32: - len = 1; do_qemu_ldst: - len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS); switch (len) { case 2: tci_args_rrm(insn, &r0, &r1, &oi); diff --git a/tcg/tci/README b/tcg/tci/README index f72a40a395..4a8b5b5401 100644 --- a/tcg/tci/README +++ b/tcg/tci/README @@ -49,7 +49,7 @@ The only difference from running QEMU with TCI to running without TCI should be speed. Especially during development of TCI, it was very useful to compare runs with and without TCI. Create /tmp/qemu.log by - qemu-system-i386 -d in_asm,op_opt,cpu -D /tmp/qemu.log -singlestep + qemu-system-i386 -d in_asm,op_opt,cpu -D /tmp/qemu.log -accel tcg,one-insn-per-tb=on once with interpreter and once without interpreter and compare the resulting qemu.log files. This is also useful to see the effects of additional diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index c1d34d7bd1..c9516a5e8b 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -156,22 +156,22 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_setcond2_i32: return C_O1_I4(r, r, r, r, r); - case INDEX_op_qemu_ld_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O1_I1(r, r) - : C_O1_I2(r, r, r)); - case INDEX_op_qemu_ld_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, r) - : C_O2_I2(r, r, r, r)); - case INDEX_op_qemu_st_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O0_I2(r, r) - : C_O0_I3(r, r, r)); - case INDEX_op_qemu_st_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(r, r, r) - : C_O0_I4(r, r, r, r)); + case INDEX_op_qemu_ld_a32_i32: + return C_O1_I1(r, r); + case INDEX_op_qemu_ld_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r); + case INDEX_op_qemu_ld_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); + case INDEX_op_qemu_ld_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r); + case INDEX_op_qemu_st_a32_i32: + return C_O0_I2(r, r); + case INDEX_op_qemu_st_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); + case INDEX_op_qemu_st_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); + case INDEX_op_qemu_st_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r); default: g_assert_not_reached(); @@ -243,7 +243,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, return false; } -static void stack_bounds_check(TCGReg base, target_long offset) +static void stack_bounds_check(TCGReg base, intptr_t offset) { if (base == TCG_REG_CALL_STACK) { tcg_debug_assert(offset >= 0); @@ -557,6 +557,99 @@ static void tcg_out_movi(TCGContext *s, TCGType type, } } +static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) +{ + switch (type) { + case TCG_TYPE_I32: + tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32); + tcg_out_op_rr(s, INDEX_op_ext8s_i32, rd, rs); + break; +#if TCG_TARGET_REG_BITS == 64 + case TCG_TYPE_I64: + tcg_debug_assert(TCG_TARGET_HAS_ext8s_i64); + tcg_out_op_rr(s, INDEX_op_ext8s_i64, rd, rs); + break; +#endif + default: + g_assert_not_reached(); + } +} + +static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) +{ + if (TCG_TARGET_REG_BITS == 64) { + tcg_debug_assert(TCG_TARGET_HAS_ext8u_i64); + tcg_out_op_rr(s, INDEX_op_ext8u_i64, rd, rs); + } else { + tcg_debug_assert(TCG_TARGET_HAS_ext8u_i32); + tcg_out_op_rr(s, INDEX_op_ext8u_i32, rd, rs); + } +} + +static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) +{ + switch (type) { + case TCG_TYPE_I32: + tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32); + tcg_out_op_rr(s, INDEX_op_ext16s_i32, rd, rs); + break; +#if TCG_TARGET_REG_BITS == 64 + case TCG_TYPE_I64: + tcg_debug_assert(TCG_TARGET_HAS_ext16s_i64); + tcg_out_op_rr(s, INDEX_op_ext16s_i64, rd, rs); + break; +#endif + default: + g_assert_not_reached(); + } +} + +static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) +{ + if (TCG_TARGET_REG_BITS == 64) { + tcg_debug_assert(TCG_TARGET_HAS_ext16u_i64); + tcg_out_op_rr(s, INDEX_op_ext16u_i64, rd, rs); + } else { + tcg_debug_assert(TCG_TARGET_HAS_ext16u_i32); + tcg_out_op_rr(s, INDEX_op_ext16u_i32, rd, rs); + } +} + +static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_debug_assert(TCG_TARGET_HAS_ext32s_i64); + tcg_out_op_rr(s, INDEX_op_ext32s_i64, rd, rs); +} + +static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_debug_assert(TCG_TARGET_HAS_ext32u_i64); + tcg_out_op_rr(s, INDEX_op_ext32u_i64, rd, rs); +} + +static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_ext32s(s, rd, rs); +} + +static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_out_ext32u(s, rd, rs); +} + +static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) +{ + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_mov(s, TCG_TYPE_I32, rd, rs); +} + +static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) +{ + return false; +} + static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, tcg_target_long imm) { @@ -715,14 +808,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */ - CASE_32_64(ext8s) /* Optional (TCG_TARGET_HAS_ext8s_*). */ - CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */ - CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */ - CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */ - CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */ - CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */ - CASE_64(ext_i32) - CASE_64(extu_i32) CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */ case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ @@ -764,21 +849,24 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]); break; - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_st_i32: - if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_st_a32_i32: + tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); + break; + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_st_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); } else { tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]); } break; - - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_ld_a64_i64: + case INDEX_op_qemu_st_a64_i64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); - } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { - tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]); } else { tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]); tcg_out_op_rrrrr(s, opc, args[0], args[1], @@ -795,8 +883,21 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ + case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: default: - tcg_abort(); + g_assert_not_reached(); } } @@ -865,3 +966,8 @@ static void tcg_target_init(TCGContext *s) static inline void tcg_target_qemu_prologue(TCGContext *s) { } + +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return true; +} diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index 7140a76a73..60a6ed65ce 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -42,7 +42,6 @@ #define TCG_TARGET_INTERPRETER 1 #define TCG_TARGET_INSN_UNIT_SIZE 4 -#define TCG_TARGET_TLB_DISPLACEMENT_BITS 32 #define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) #if UINTPTR_MAX == UINT32_MAX @@ -127,6 +126,8 @@ #define TCG_TARGET_HAS_mulu2_i32 1 #endif /* TCG_TARGET_REG_BITS == 64 */ +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + /* Number of registers available. */ #define TCG_TARGET_NB_REGS 16 @@ -176,6 +177,4 @@ typedef enum { We prefer consistency across hosts on this. */ #define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 1 - #endif /* TCG_TARGET_H */ diff --git a/tests/Makefile.include b/tests/Makefile.include index 9422ddaece..0184ef2237 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -89,10 +89,9 @@ distclean-tcg: $(DISTCLEAN_TCG_TARGET_RULES) # Build up our target list from the filtered list of ninja targets TARGETS=$(patsubst libqemu-%.fa, %, $(filter libqemu-%.fa, $(ninja-targets))) -TESTS_VENV_DIR=$(BUILD_DIR)/tests/venv +TESTS_VENV_TOKEN=$(BUILD_DIR)/pyvenv/tests.group TESTS_VENV_REQ=$(SRC_PATH)/tests/requirements.txt TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results -TESTS_PYTHON=$(TESTS_VENV_DIR)/bin/python3 ifndef AVOCADO_TESTS AVOCADO_TESTS=tests/avocado endif @@ -108,11 +107,10 @@ else endif quiet-venv-pip = $(quiet-@)$(call quiet-command-run, \ - $(TESTS_PYTHON) -m pip -q --disable-pip-version-check $1, \ + $(PYTHON) -m pip -q --disable-pip-version-check $1, \ "VENVPIP","$1") -$(TESTS_VENV_DIR): $(TESTS_VENV_REQ) - $(call quiet-command, $(PYTHON) -m venv $@, VENV, $@) +$(TESTS_VENV_TOKEN): $(TESTS_VENV_REQ) $(call quiet-venv-pip,install -e "$(SRC_PATH)/python/") $(call quiet-venv-pip,install -r $(TESTS_VENV_REQ)) $(call quiet-command, touch $@) @@ -121,7 +119,7 @@ $(TESTS_RESULTS_DIR): $(call quiet-command, mkdir -p $@, \ MKDIR, $@) -check-venv: $(TESTS_VENV_DIR) +check-venv: $(TESTS_VENV_TOKEN) FEDORA_31_ARCHES_TARGETS=$(patsubst %-softmmu,%, $(filter %-softmmu,$(TARGETS))) FEDORA_31_ARCHES_CANDIDATES=$(patsubst ppc64,ppc64le,$(FEDORA_31_ARCHES_TARGETS)) @@ -131,21 +129,25 @@ FEDORA_31_DOWNLOAD=$(filter $(FEDORA_31_ARCHES),$(FEDORA_31_ARCHES_CANDIDATES)) # download one specific Fedora 31 image get-vm-image-fedora-31-%: check-venv $(call quiet-command, \ - $(TESTS_PYTHON) -m avocado vmimage get \ + $(PYTHON) -m avocado vmimage get \ --distro=fedora --distro-version=31 --arch=$*, \ "AVOCADO", "Downloading avocado tests VM image for $*") # download all vm images, according to defined targets get-vm-images: check-venv $(patsubst %,get-vm-image-fedora-31-%, $(FEDORA_31_DOWNLOAD)) +JOBS_OPTION=$(lastword -j1 $(filter-out -j, $(filter -j%,$(MAKEFLAGS)))) + check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images - $(call quiet-command, \ - $(TESTS_PYTHON) -m avocado \ - --show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \ - $(if $(AVOCADO_TAGS),, --filter-by-tags-include-empty \ - --filter-by-tags-include-empty-key) \ - $(AVOCADO_CMDLINE_TAGS) \ - $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \ + $(call quiet-command, \ + $(PYTHON) -m avocado \ + --show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \ + $(if $(AVOCADO_TAGS),, \ + --filter-by-tags-include-empty \ + --filter-by-tags-include-empty-key) \ + --max-parallel-tasks $(JOBS_OPTION:-j%=%) \ + $(AVOCADO_CMDLINE_TAGS) \ + $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \ "AVOCADO", "tests/avocado") check-acceptance-deprecated-warning: @@ -163,7 +165,7 @@ check: check-build: run-ninja check-clean: - rm -rf $(TESTS_VENV_DIR) $(TESTS_RESULTS_DIR) + rm -rf $(TESTS_RESULTS_DIR) clean: check-clean clean-tcg distclean: distclean-tcg diff --git a/tests/avocado/acpi-bits.py b/tests/avocado/acpi-bits.py index 14038fa3c4..3ed286dcbd 100644 --- a/tests/avocado/acpi-bits.py +++ b/tests/avocado/acpi-bits.py @@ -123,9 +123,9 @@ class QEMUBitsMachine(QEMUMachine): # pylint: disable=too-few-public-methods """return the base argument to QEMU binary""" return self._base_args -@skipIf(not supported_platform() or missing_deps() or os.getenv('GITLAB_CI'), - 'incorrect platform or dependencies (%s) not installed ' \ - 'or running on GitLab' % ','.join(deps)) +@skipIf(not supported_platform() or missing_deps(), + 'unsupported platform or dependencies (%s) not installed' \ + % ','.join(deps)) class AcpiBitsTest(QemuBaseTest): #pylint: disable=too-many-instance-attributes """ ACPI and SMBIOS tests using biosbits. @@ -356,7 +356,7 @@ class AcpiBitsTest(QemuBaseTest): #pylint: disable=too-many-instance-attributes """ if self._vm: self.assertFalse(not self._vm.is_running) - if not os.getenv('BITS_DEBUG'): + if not os.getenv('BITS_DEBUG') and self._workDir: self.logger.info('removing the work directory %s', self._workDir) shutil.rmtree(self._workDir) else: diff --git a/tests/avocado/avocado_qemu/__init__.py b/tests/avocado/avocado_qemu/__init__.py index a313e88c07..33090903f1 100644 --- a/tests/avocado/avocado_qemu/__init__.py +++ b/tests/avocado/avocado_qemu/__init__.py @@ -309,6 +309,16 @@ class QemuSystemTest(QemuBaseTest): if netdevhelp.find('\n' + netdevname + '\n') < 0: self.cancel('no support for user networking') + def require_multiprocess(self): + """ + Test for the presence of the x-pci-proxy-dev which is required + to support multiprocess. + """ + devhelp = run_cmd([self.qemu_bin, + '-M', 'none', '-device', 'help'])[0]; + if devhelp.find('x-pci-proxy-dev') < 0: + self.cancel('no support for multiprocess device emulation') + def _new_vm(self, name, *args): self._sd = tempfile.TemporaryDirectory(prefix="qemu_") vm = QEMUMachine(self.qemu_bin, base_temp_dir=self.workdir, @@ -320,6 +330,19 @@ class QemuSystemTest(QemuBaseTest): vm.add_args(*args) return vm + def get_qemu_img(self): + self.log.debug('Looking for and selecting a qemu-img binary') + + # If qemu-img has been built, use it, otherwise the system wide one + # will be used. + qemu_img = os.path.join(BUILD_DIR, 'qemu-img') + if not os.path.exists(qemu_img): + qemu_img = find_command('qemu-img', False) + if qemu_img is False: + self.cancel('Could not find "qemu-img"') + + return qemu_img + @property def vm(self): return self.get_vm(name='default') @@ -421,6 +444,14 @@ class LinuxSSHMixIn: f'Guest command failed: {command}') return stdout_lines, stderr_lines + def ssh_command_output_contains(self, cmd, exp): + stdout, _ = self.ssh_command(cmd) + for line in stdout: + if exp in line: + break + else: + self.fail('"%s" output does not contain "%s"' % (cmd, exp)) + class LinuxDistro: """Represents a Linux distribution @@ -584,17 +615,9 @@ class LinuxTest(LinuxSSHMixIn, QemuSystemTest): return (ssh_public_key, ssh_private_key) def download_boot(self): - self.log.debug('Looking for and selecting a qemu-img binary to be ' - 'used to create the bootable snapshot image') - # If qemu-img has been built, use it, otherwise the system wide one - # will be used. If none is available, the test will cancel. - qemu_img = os.path.join(BUILD_DIR, 'qemu-img') - if not os.path.exists(qemu_img): - qemu_img = find_command('qemu-img', False) - if qemu_img is False: - self.cancel('Could not find "qemu-img", which is required to ' - 'create the bootable image') - vmimage.QEMU_IMG = qemu_img + # Set the qemu-img binary. + # If none is available, the test will cancel. + vmimage.QEMU_IMG = super().get_qemu_img() self.log.info('Downloading/preparing boot image') # Fedora 31 only provides ppc64le images diff --git a/tests/avocado/boot_linux.py b/tests/avocado/boot_linux.py index fe0bb180d9..be30dcbd58 100644 --- a/tests/avocado/boot_linux.py +++ b/tests/avocado/boot_linux.py @@ -84,6 +84,8 @@ class BootLinuxAarch64(LinuxTest): self.launch_and_wait(set_up_ssh_connection=False) +# See the tux_baseline.py tests for almost the same coverage in a lot +# less time. class BootLinuxPPC64(LinuxTest): """ :avocado: tags=arch:ppc64 @@ -91,6 +93,7 @@ class BootLinuxPPC64(LinuxTest): timeout = 360 + @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_pseries_tcg(self): """ :avocado: tags=machine:pseries diff --git a/tests/avocado/boot_linux_console.py b/tests/avocado/boot_linux_console.py index 574609bf43..c0675809e6 100644 --- a/tests/avocado/boot_linux_console.py +++ b/tests/avocado/boot_linux_console.py @@ -581,7 +581,10 @@ class BootLinuxConsole(LinuxKernelTest): 'Allwinner sun4i/sun5i') exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', 'system-control@1c00000') - # cubieboard's reboot is not functioning; omit reboot test. + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() def test_arm_cubieboard_sata(self): """ @@ -625,7 +628,10 @@ class BootLinuxConsole(LinuxKernelTest): 'Allwinner sun4i/sun5i') exec_command_and_wait_for_pattern(self, 'cat /proc/partitions', 'sda') - # cubieboard's reboot is not functioning; omit reboot test. + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') def test_arm_cubieboard_openwrt_22_03_2(self): @@ -672,7 +678,10 @@ class BootLinuxConsole(LinuxKernelTest): exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', 'Allwinner sun4i/sun5i') - # cubieboard's reboot is not functioning; omit reboot test. + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') def test_arm_quanta_gsj(self): diff --git a/tests/avocado/kvm_xen_guest.py b/tests/avocado/kvm_xen_guest.py new file mode 100644 index 0000000000..5391283113 --- /dev/null +++ b/tests/avocado/kvm_xen_guest.py @@ -0,0 +1,171 @@ +# KVM Xen guest functional tests +# +# Copyright © 2021 Red Hat, Inc. +# Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Author: +# David Woodhouse +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu.machine import machine + +from avocado_qemu import LinuxSSHMixIn +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern + +class KVMXenGuest(QemuSystemTest, LinuxSSHMixIn): + """ + :avocado: tags=arch:x86_64 + :avocado: tags=machine:q35 + :avocado: tags=accel:kvm + :avocado: tags=kvm_xen_guest + """ + + KERNEL_DEFAULT = 'printk.time=0 root=/dev/xvda console=ttyS0' + + kernel_path = None + kernel_params = None + + # Fetch assets from the kvm-xen-guest subdir of my shared test + # images directory on fileserver.linaro.org where you can find + # build instructions for how they where assembled. + def get_asset(self, name, sha1): + base_url = ('https://fileserver.linaro.org/s/' + 'kE4nCFLdQcoBF9t/download?' + 'path=%2Fkvm-xen-guest&files=' ) + url = base_url + name + # use explicit name rather than failing to neatly parse the + # URL into a unique one + return self.fetch_asset(name=name, locations=(url), asset_hash=sha1) + + def common_vm_setup(self): + # We also catch lack of KVM_XEN support if we fail to launch + self.require_accelerator("kvm") + + self.vm.set_console() + + self.vm.add_args("-accel", "kvm,xen-version=0x4000a,kernel-irqchip=split") + self.vm.add_args("-smp", "2") + + self.kernel_path = self.get_asset("bzImage", + "367962983d0d32109998a70b45dcee4672d0b045") + self.rootfs = self.get_asset("rootfs.ext4", + "f1478401ea4b3fa2ea196396be44315bab2bb5e4") + + def run_and_check(self): + self.vm.add_args('-kernel', self.kernel_path, + '-append', self.kernel_params, + '-drive', f"file={self.rootfs},if=none,format=raw,id=drv0", + '-device', 'xen-disk,drive=drv0,vdev=xvda', + '-device', 'virtio-net-pci,netdev=unet', + '-netdev', 'user,id=unet,hostfwd=:127.0.0.1:0-:22') + + try: + self.vm.launch() + except machine.VMLaunchFailure as e: + if "Xen HVM guest support not present" in e.output: + self.cancel("KVM Xen support is not present " + "(need v5.12+ kernel with CONFIG_KVM_XEN)") + elif "Property 'kvm-accel.xen-version' not found" in e.output: + self.cancel("QEMU not built with CONFIG_XEN_EMU support") + else: + raise e + + self.log.info('VM launched, waiting for sshd') + console_pattern = 'Starting dropbear sshd: OK' + wait_for_console_pattern(self, console_pattern, 'Oops') + self.log.info('sshd ready') + self.ssh_connect('root', '', False) + + self.ssh_command('cat /proc/cmdline') + self.ssh_command('dmesg | grep -e "Grant table initialized"') + + def test_kvm_xen_guest(self): + """ + :avocado: tags=kvm_xen_guest + """ + + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks') + self.run_and_check() + self.ssh_command('grep xen-pirq.*msi /proc/interrupts') + + def test_kvm_xen_guest_nomsi(self): + """ + :avocado: tags=kvm_xen_guest_nomsi + """ + + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks pci=nomsi') + self.run_and_check() + self.ssh_command('grep xen-pirq.* /proc/interrupts') + + def test_kvm_xen_guest_noapic_nomsi(self): + """ + :avocado: tags=kvm_xen_guest_noapic_nomsi + """ + + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks noapic pci=nomsi') + self.run_and_check() + self.ssh_command('grep xen-pirq /proc/interrupts') + + def test_kvm_xen_guest_vapic(self): + """ + :avocado: tags=kvm_xen_guest_vapic + """ + + self.common_vm_setup() + self.vm.add_args('-cpu', 'host,+xen-vapic') + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks') + self.run_and_check() + self.ssh_command('grep xen-pirq /proc/interrupts') + self.ssh_command('grep PCI-MSI /proc/interrupts') + + def test_kvm_xen_guest_novector(self): + """ + :avocado: tags=kvm_xen_guest_novector + """ + + self.common_vm_setup() + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks' + + ' xen_no_vector_callback') + self.run_and_check() + self.ssh_command('grep xen-platform-pci /proc/interrupts') + + def test_kvm_xen_guest_novector_nomsi(self): + """ + :avocado: tags=kvm_xen_guest_novector_nomsi + """ + + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks pci=nomsi' + + ' xen_no_vector_callback') + self.run_and_check() + self.ssh_command('grep xen-platform-pci /proc/interrupts') + + def test_kvm_xen_guest_novector_noapic(self): + """ + :avocado: tags=kvm_xen_guest_novector_noapic + """ + + self.common_vm_setup() + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks' + + ' xen_no_vector_callback noapic') + self.run_and_check() + self.ssh_command('grep xen-platform-pci /proc/interrupts') diff --git a/tests/avocado/linux_ssh_mips_malta.py b/tests/avocado/linux_ssh_mips_malta.py index 0179d8a6ca..d9bb525ad9 100644 --- a/tests/avocado/linux_ssh_mips_malta.py +++ b/tests/avocado/linux_ssh_mips_malta.py @@ -101,14 +101,6 @@ class LinuxSSH(QemuSystemTest, LinuxSSHMixIn): self.ssh_disconnect_vm() wait_for_console_pattern(self, 'Power down', 'Oops') - def ssh_command_output_contains(self, cmd, exp): - stdout, _ = self.ssh_command(cmd) - for line in stdout: - if exp in line: - break - else: - self.fail('"%s" output does not contain "%s"' % (cmd, exp)) - def run_common_commands(self, wordsize): self.ssh_command_output_contains( 'cat /proc/cpuinfo', diff --git a/tests/avocado/machine_aarch64_sbsaref.py b/tests/avocado/machine_aarch64_sbsaref.py new file mode 100644 index 0000000000..0a79fa7ab6 --- /dev/null +++ b/tests/avocado/machine_aarch64_sbsaref.py @@ -0,0 +1,158 @@ +# Functional test that boots a Linux kernel and checks the console +# +# SPDX-FileCopyrightText: 2023 Linaro Ltd. +# SPDX-FileContributor: Philippe Mathieu-Daudé +# SPDX-FileContributor: Marcin Juszkiewicz +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from avocado import skip +from avocado import skipUnless +from avocado.utils import archive + +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern +from avocado_qemu import interrupt_interactive_console_until_pattern + + +class Aarch64SbsarefMachine(QemuSystemTest): + """ + :avocado: tags=arch:aarch64 + :avocado: tags=machine:sbsa-ref + """ + + timeout = 180 + + def fetch_firmware(self): + """ + Flash volumes generated using: + + - Fedora GNU Toolchain version 12.2.1 20220819 (Red Hat Cross 12.2.1-2) + + - Trusted Firmware-A + https://github.com/ARM-software/arm-trusted-firmware/tree/5fdb2e54 + + - Tianocore EDK II + https://github.com/tianocore/edk2/tree/494127613b + https://github.com/tianocore/edk2-non-osi/tree/41876073 + https://github.com/tianocore/edk2-platforms/tree/8efa4f42 + """ + + # Secure BootRom (TF-A code) + fs0_xz_url = ( + "https://fileserver.linaro.org/s/ATnSmq6k8SoXgbH/" + "download/SBSA_FLASH0.fd.xz" + ) + fs0_xz_hash = "a210a09692bcbe0a3743ffd0df44e80e0c7ad8ab" + tar_xz_path = self.fetch_asset(fs0_xz_url, asset_hash=fs0_xz_hash) + archive.extract(tar_xz_path, self.workdir) + fs0_path = os.path.join(self.workdir, "SBSA_FLASH0.fd") + + # Non-secure rom (UEFI and EFI variables) + fs1_xz_url = ( + "https://fileserver.linaro.org/s/t8foNnMPz74DZZy/" + "download/SBSA_FLASH1.fd.xz" + ) + fs1_xz_hash = "13a9a262953787c7fc5a9155dfaa26e703631e02" + tar_xz_path = self.fetch_asset(fs1_xz_url, asset_hash=fs1_xz_hash) + archive.extract(tar_xz_path, self.workdir) + fs1_path = os.path.join(self.workdir, "SBSA_FLASH1.fd") + + for path in [fs0_path, fs1_path]: + with open(path, "ab+") as fd: + fd.truncate(256 << 20) # Expand volumes to 256MiB + + self.vm.set_console() + self.vm.add_args( + "-drive", + f"if=pflash,file={fs0_path},format=raw", + "-drive", + f"if=pflash,file={fs1_path},format=raw", + "-smp", + "1", + "-machine", + "sbsa-ref", + ) + + def test_sbsaref_edk2_firmware(self): + """ + :avocado: tags=cpu:cortex-a57 + """ + + self.fetch_firmware() + self.vm.launch() + + # TF-A boot sequence: + # + # https://github.com/ARM-software/arm-trusted-firmware/blob/v2.8.0/\ + # docs/design/trusted-board-boot.rst#trusted-board-boot-sequence + # https://trustedfirmware-a.readthedocs.io/en/v2.8/\ + # design/firmware-design.html#cold-boot + + # AP Trusted ROM + wait_for_console_pattern(self, "Booting Trusted Firmware") + wait_for_console_pattern(self, "BL1: v2.8(release):v2.8") + wait_for_console_pattern(self, "BL1: Booting BL2") + + # Trusted Boot Firmware + wait_for_console_pattern(self, "BL2: v2.8(release)") + wait_for_console_pattern(self, "Booting BL31") + + # EL3 Runtime Software + wait_for_console_pattern(self, "BL31: v2.8(release)") + + # Non-trusted Firmware + wait_for_console_pattern(self, "UEFI firmware (version 1.0") + interrupt_interactive_console_until_pattern(self, "QEMU SBSA-REF Machine") + + # This tests the whole boot chain from EFI to Userspace + # We only boot a whole OS for the current top level CPU and GIC + # Other test profiles should use more minimal boots + def boot_alpine_linux(self, cpu): + self.fetch_firmware() + + iso_url = ( + "https://dl-cdn.alpinelinux.org/" + "alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso" + ) + + iso_hash = "5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027" + iso_path = self.fetch_asset(iso_url, algorithm="sha256", asset_hash=iso_hash) + + self.vm.set_console() + self.vm.add_args( + "-cpu", + cpu, + "-drive", + f"file={iso_path},format=raw", + "-device", + "virtio-rng-pci,rng=rng0", + "-object", + "rng-random,id=rng0,filename=/dev/urandom", + ) + + self.vm.launch() + wait_for_console_pattern(self, "Welcome to Alpine Linux 3.17") + + @skipUnless(os.getenv("AVOCADO_TIMEOUT_EXPECTED"), "Test might timeout") + def test_sbsaref_alpine_linux_cortex_a57(self): + """ + :avocado: tags=cpu:cortex-a57 + """ + self.boot_alpine_linux("cortex-a57") + + @skipUnless(os.getenv("AVOCADO_TIMEOUT_EXPECTED"), "Test might timeout") + def test_sbsaref_alpine_linux_neoverse_n1(self): + """ + :avocado: tags=cpu:max + """ + self.boot_alpine_linux("neoverse-n1") + + @skip("requires TF-A update to handle FEAT_FGT") + def test_sbsaref_alpine_linux_max(self): + """ + :avocado: tags=cpu:max + """ + self.boot_alpine_linux("max,pauth-impdef=on") diff --git a/tests/avocado/machine_aarch64_virt.py b/tests/avocado/machine_aarch64_virt.py index 25dab8dc00..a90dc6ff4b 100644 --- a/tests/avocado/machine_aarch64_virt.py +++ b/tests/avocado/machine_aarch64_virt.py @@ -38,11 +38,11 @@ class Aarch64VirtMachine(QemuSystemTest): :avocado: tags=accel:tcg """ iso_url = ('https://dl-cdn.alpinelinux.org/' - 'alpine/v3.16/releases/aarch64/' - 'alpine-virt-3.16.3-aarch64.iso') + 'alpine/v3.17/releases/aarch64/' + 'alpine-standard-3.17.2-aarch64.iso') # Alpine use sha256 so I recalculated this myself - iso_sha1 = '0683bc089486d55c91bf6607d5ecb93925769bc0' + iso_sha1 = '76284fcd7b41fe899b0c2375ceb8470803eea839' iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha1) self.vm.set_console() @@ -65,7 +65,7 @@ class Aarch64VirtMachine(QemuSystemTest): self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') self.vm.launch() - self.wait_for_console_pattern('Welcome to Alpine Linux 3.16') + self.wait_for_console_pattern('Welcome to Alpine Linux 3.17') def common_aarch64_virt(self, machine): diff --git a/tests/avocado/machine_aspeed.py b/tests/avocado/machine_aspeed.py index ddf05b3617..724ee72c02 100644 --- a/tests/avocado/machine_aspeed.py +++ b/tests/avocado/machine_aspeed.py @@ -7,14 +7,19 @@ import time import os +import tempfile +import subprocess +from avocado_qemu import LinuxSSHMixIn from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import interrupt_interactive_console_until_pattern +from avocado_qemu import has_cmd from avocado.utils import archive from avocado import skipIf +from avocado import skipUnless class AST1030Machine(QemuSystemTest): @@ -132,7 +137,7 @@ class AST2x00Machine(QemuSystemTest): self.do_test_arm_aspeed(image_path) - def do_test_arm_aspeed_buildroot_start(self, image, cpu_id): + def do_test_arm_aspeed_buildroot_start(self, image, cpu_id, pattern='Aspeed EVB'): self.require_netdev('user') self.vm.set_console() @@ -146,7 +151,7 @@ class AST2x00Machine(QemuSystemTest): self.wait_for_console_pattern('Booting Linux on physical CPU ' + cpu_id) self.wait_for_console_pattern('lease of 10.0.2.15') # the line before login: - self.wait_for_console_pattern('Aspeed EVB') + self.wait_for_console_pattern(pattern) time.sleep(0.1) exec_command(self, 'root') time.sleep(0.1) @@ -199,6 +204,8 @@ class AST2x00Machine(QemuSystemTest): 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); self.vm.add_args('-device', 'ds1338,bus=aspeed.i2c.bus.3,address=0x32'); + self.vm.add_args('-device', + 'i2c-echo,bus=aspeed.i2c.bus.3,address=0x42'); self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00') exec_command_and_wait_for_pattern(self, @@ -217,10 +224,52 @@ class AST2x00Machine(QemuSystemTest): year = time.strftime("%Y") exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year); + exec_command_and_wait_for_pattern(self, + 'echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-3/new_device', + 'i2c i2c-3: new_device: Instantiated device slave-24c02 at 0x64'); + exec_command(self, 'i2cset -y 3 0x42 0x64 0x00 0xaa i'); + time.sleep(0.1) + exec_command_and_wait_for_pattern(self, + 'hexdump /sys/bus/i2c/devices/3-1064/slave-eeprom', + '0000000 ffaa ffff ffff ffff ffff ffff ffff ffff'); self.do_test_arm_aspeed_buildroot_poweroff() + @skipUnless(*has_cmd('swtpm')) + def test_arm_ast2600_evb_buildroot_tpm(self): + """ + :avocado: tags=arch:arm + :avocado: tags=machine:ast2600-evb + """ -class AST2x00MachineSDK(QemuSystemTest): + image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' + 'images/ast2600-evb/buildroot-2023.02-tpm/flash.img') + image_hash = ('a46009ae8a5403a0826d607215e731a8c68d27c14c41e55331706b8f9c7bd997') + image_path = self.fetch_asset(image_url, asset_hash=image_hash, + algorithm='sha256') + + socket = os.path.join(self.vm.sock_dir, 'swtpm-socket') + + subprocess.run(['swtpm', 'socket', '-d', '--tpm2', + '--tpmstate', f'dir={self.vm.temp_dir}', + '--ctrl', f'type=unixio,path={socket}']) + + self.vm.add_args('-chardev', f'socket,id=chrtpm,path={socket}') + self.vm.add_args('-tpmdev', 'emulator,id=tpm0,chardev=chrtpm') + self.vm.add_args('-device', + 'tpm-tis-i2c,tpmdev=tpm0,bus=aspeed.i2c.bus.12,address=0x2e') + self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00', 'Aspeed AST2600 EVB') + exec_command(self, "passw0rd") + + exec_command_and_wait_for_pattern(self, + 'echo tpm_tis_i2c 0x2e > /sys/bus/i2c/devices/i2c-12/new_device', + 'tpm_tis_i2c 12-002e: 2.0 TPM (device-id 0x1, rev-id 1)'); + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/tpm/tpm0/pcr-sha256/0', + 'B804724EA13F52A9072BA87FE8FDCC497DFC9DF9AA15B9088694639C431688E0'); + + self.do_test_arm_aspeed_buildroot_poweroff() + +class AST2x00MachineSDK(QemuSystemTest, LinuxSSHMixIn): EXTRA_BOOTARGS = ( 'quiet ' @@ -247,7 +296,7 @@ class AST2x00MachineSDK(QemuSystemTest): self.require_netdev('user') self.vm.set_console() self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', - '-net', 'nic', '-net', 'user') + '-net', 'nic', '-net', 'user,hostfwd=:127.0.0.1:0-:22') self.vm.launch() self.wait_for_console_pattern('U-Boot 2019.04') @@ -275,7 +324,7 @@ class AST2x00MachineSDK(QemuSystemTest): self.do_test_arm_aspeed_sdk_start( self.workdir + '/ast2500-default/image-bmc') - self.wait_for_console_pattern('ast2500-default login:') + self.wait_for_console_pattern('nodistro.0 ast2500-default ttyS4') @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_arm_ast2600_evb_sdk(self): @@ -297,22 +346,25 @@ class AST2x00MachineSDK(QemuSystemTest): 'ds1338,bus=aspeed.i2c.bus.5,address=0x32'); self.do_test_arm_aspeed_sdk_start( self.workdir + '/ast2600-default/image-bmc') - self.wait_for_console_pattern('ast2600-default login:') - exec_command_and_wait_for_pattern(self, 'root', 'Password:') - exec_command_and_wait_for_pattern(self, '0penBmc', 'root@ast2600-default:~#') + self.wait_for_console_pattern('nodistro.0 ast2600-default ttyS4') - exec_command_and_wait_for_pattern(self, - 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-5/device/new_device', + self.ssh_connect('root', '0penBmc', False) + self.ssh_command('dmesg -c > /dev/null') + + self.ssh_command_output_contains( + 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-5/device/new_device ; ' + 'dmesg -c', 'i2c i2c-5: new_device: Instantiated device lm75 at 0x4d'); - exec_command_and_wait_for_pattern(self, + self.ssh_command_output_contains( 'cat /sys/class/hwmon/hwmon19/temp1_input', '0') self.vm.command('qom-set', path='/machine/peripheral/tmp-test', property='temperature', value=18000); - exec_command_and_wait_for_pattern(self, + self.ssh_command_output_contains( 'cat /sys/class/hwmon/hwmon19/temp1_input', '18000') - exec_command_and_wait_for_pattern(self, - 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-5/device/new_device', + self.ssh_command_output_contains( + 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-5/device/new_device ; ' + 'dmesg -c', 'i2c i2c-5: new_device: Instantiated device ds1307 at 0x32'); year = time.strftime("%Y") - exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year); + self.ssh_command_output_contains('/sbin/hwclock -f /dev/rtc1', year); diff --git a/tests/avocado/machine_loongarch.py b/tests/avocado/machine_loongarch.py new file mode 100644 index 0000000000..7d8a3c1fa5 --- /dev/null +++ b/tests/avocado/machine_loongarch.py @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# LoongArch virt test. +# +# Copyright (c) 2023 Loongson Technology Corporation Limited +# + +from avocado_qemu import QemuSystemTest +from avocado_qemu import exec_command_and_wait_for_pattern +from avocado_qemu import wait_for_console_pattern + +class LoongArchMachine(QemuSystemTest): + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + + timeout = 120 + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + def test_loongarch64_devices(self): + + """ + :avocado: tags=arch:loongarch64 + :avocado: tags=machine:virt + """ + + kernel_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/' + 'releases/download/binary-files/vmlinuz.efi') + kernel_hash = '951b485b16e3788b6db03a3e1793c067009e31a2' + kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) + + initrd_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/' + 'releases/download/binary-files/ramdisk') + initrd_hash = 'c67658d9b2a447ce7db2f73ba3d373c9b2b90ab2' + initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) + + bios_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/' + 'releases/download/binary-files/QEMU_EFI.fd') + bios_hash = ('dfc1bfba4853cd763b9d392d0031827e8addbca8') + bios_path = self.fetch_asset(bios_url, asset_hash=bios_hash) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'root=/dev/ram rdinit=/sbin/init console=ttyS0,115200') + self.vm.add_args('-nographic', + '-smp', '4', + '-m', '1024', + '-cpu', 'la464', + '-kernel', kernel_path, + '-initrd', initrd_path, + '-bios', bios_path, + '-append', kernel_command_line) + self.vm.launch() + self.wait_for_console_pattern('Run /sbin/init as init process') + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'processor : 3') diff --git a/tests/avocado/migration.py b/tests/avocado/migration.py index 4b25680c50..8b2ec0e3c4 100644 --- a/tests/avocado/migration.py +++ b/tests/avocado/migration.py @@ -11,6 +11,8 @@ import tempfile +import os + from avocado_qemu import QemuSystemTest from avocado import skipUnless @@ -19,7 +21,7 @@ from avocado.utils import wait from avocado.utils.path import find_command -class Migration(QemuSystemTest): +class MigrationTest(QemuSystemTest): """ :avocado: tags=migration """ @@ -62,20 +64,91 @@ class Migration(QemuSystemTest): self.cancel('Failed to find a free port') return port - - def test_migration_with_tcp_localhost(self): + def migration_with_tcp_localhost(self): dest_uri = 'tcp:localhost:%u' % self._get_free_port() self.do_migrate(dest_uri) - def test_migration_with_unix(self): + def migration_with_unix(self): with tempfile.TemporaryDirectory(prefix='socket_') as socket_path: dest_uri = 'unix:%s/qemu-test.sock' % socket_path self.do_migrate(dest_uri) @skipUnless(find_command('nc', default=False), "'nc' command not found") - def test_migration_with_exec(self): + def migration_with_exec(self): """The test works for both netcat-traditional and netcat-openbsd packages.""" free_port = self._get_free_port() dest_uri = 'exec:nc -l localhost %u' % free_port src_uri = 'exec:nc localhost %u' % free_port self.do_migrate(dest_uri, src_uri) + + +@skipUnless('aarch64' in os.uname()[4], "host != target") +class Aarch64(MigrationTest): + """ + :avocado: tags=arch:aarch64 + :avocado: tags=machine:virt + :avocado: tags=cpu:max + """ + + def test_migration_with_tcp_localhost(self): + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.migration_with_unix() + + def test_migration_with_exec(self): + self.migration_with_exec() + + +@skipUnless('x86_64' in os.uname()[4], "host != target") +class X86_64(MigrationTest): + """ + :avocado: tags=arch:x86_64 + :avocado: tags=machine:pc + :avocado: tags=cpu:qemu64 + """ + + def test_migration_with_tcp_localhost(self): + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.migration_with_unix() + + def test_migration_with_exec(self): + self.migration_with_exec() + + +@skipUnless('ppc64le' in os.uname()[4], "host != target") +class PPC64(MigrationTest): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:pseries + :avocado: tags=cpu:power9_v2.0 + """ + + def test_migration_with_tcp_localhost(self): + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.migration_with_unix() + + def test_migration_with_exec(self): + self.migration_with_exec() + + +@skipUnless('s390x' in os.uname()[4], "host != target") +class S390X(MigrationTest): + """ + :avocado: tags=arch:s390x + :avocado: tags=machine:s390-ccw-virtio + :avocado: tags=cpu:qemu + """ + + def test_migration_with_tcp_localhost(self): + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.migration_with_unix() + + def test_migration_with_exec(self): + self.migration_with_exec() diff --git a/tests/avocado/multiprocess.py b/tests/avocado/multiprocess.py index 80a3b8f442..9112a4cacc 100644 --- a/tests/avocado/multiprocess.py +++ b/tests/avocado/multiprocess.py @@ -22,6 +22,7 @@ class Multiprocess(QemuSystemTest): machine_type): """Main test method""" self.require_accelerator('kvm') + self.require_multiprocess() # Create socketpair to connect proxy and remote processes proxy_sock, remote_sock = socket.socketpair(socket.AF_UNIX, diff --git a/tests/avocado/netdev-ethtool.py b/tests/avocado/netdev-ethtool.py new file mode 100644 index 0000000000..5f33288f81 --- /dev/null +++ b/tests/avocado/netdev-ethtool.py @@ -0,0 +1,101 @@ +# ethtool tests for emulated network devices +# +# This test leverages ethtool's --test sequence to validate network +# device behaviour. +# +# SPDX-License-Identifier: GPL-2.0-or-late + +from avocado import skip +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern + +class NetDevEthtool(QemuSystemTest): + """ + :avocado: tags=arch:x86_64 + :avocado: tags=machine:q35 + """ + + # Runs in about 17s under KVM, 19s under TCG, 25s under GCOV + timeout = 45 + + # Fetch assets from the netdev-ethtool subdir of my shared test + # images directory on fileserver.linaro.org. + def get_asset(self, name, sha1): + base_url = ('https://fileserver.linaro.org/s/' + 'kE4nCFLdQcoBF9t/download?' + 'path=%2Fnetdev-ethtool&files=' ) + url = base_url + name + # use explicit name rather than failing to neatly parse the + # URL into a unique one + return self.fetch_asset(name=name, locations=(url), asset_hash=sha1) + + def common_test_code(self, netdev, extra_args=None): + + # This custom kernel has drivers for all the supported network + # devices we can emulate in QEMU + kernel = self.get_asset("bzImage", + "33469d7802732d5815226166581442395cb289e2") + + rootfs = self.get_asset("rootfs.squashfs", + "9793cea7021414ae844bda51f558bd6565b50cdc") + + append = 'printk.time=0 console=ttyS0 ' + append += 'root=/dev/sr0 rootfstype=squashfs ' + + # any additional kernel tweaks for the test + if extra_args: + append += extra_args + + # finally invoke ethtool directly + append += ' init=/usr/sbin/ethtool -- -t eth1 offline' + + # add the rootfs via a readonly cdrom image + drive = f"file={rootfs},if=ide,index=0,media=cdrom" + + self.vm.add_args('-kernel', kernel, + '-append', append, + '-drive', drive, + '-device', netdev) + + self.vm.set_console(console_index=0) + self.vm.launch() + + wait_for_console_pattern(self, + "The test result is PASS", + "The test result is FAIL", + vm=None) + # no need to gracefully shutdown, just finish + self.vm.kill() + + def test_igb(self): + """ + :avocado: tags=device:igb + """ + self.common_test_code("igb") + + def test_igb_nomsi(self): + """ + :avocado: tags=device:igb + """ + self.common_test_code("igb", "pci=nomsi") + + # It seems the other popular cards we model in QEMU currently fail + # the pattern test with: + # + # pattern test failed (reg 0x00178): got 0x00000000 expected 0x00005A5A + # + # So for now we skip them. + + @skip("Incomplete reg 0x00178 support") + def test_e1000(self): + """ + :avocado: tags=device:e1000 + """ + self.common_test_code("e1000") + + @skip("Incomplete reg 0x00178 support") + def test_i82550(self): + """ + :avocado: tags=device:i82550 + """ + self.common_test_code("i82550") diff --git a/tests/avocado/replay_kernel.py b/tests/avocado/replay_kernel.py index 00a26e4a0c..fe1e901f4b 100644 --- a/tests/avocado/replay_kernel.py +++ b/tests/avocado/replay_kernel.py @@ -349,6 +349,7 @@ class ReplayKernelNormal(ReplayKernelBase): file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'vmlinux') + @skip("nios2 emulation is buggy under record/replay") def test_nios2_10m50(self): """ :avocado: tags=arch:nios2 @@ -492,7 +493,7 @@ class ReplayKernelSlow(ReplayKernelBase): :avocado: tags=endian:little :avocado: tags=cpu:I7200 """ - kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/' + kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' 'generic_nano32r6el_page4k.xz') kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6' @@ -506,7 +507,7 @@ class ReplayKernelSlow(ReplayKernelBase): :avocado: tags=endian:little :avocado: tags=cpu:I7200 """ - kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/' + kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' 'generic_nano32r6el_page16k_up.xz') kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc' @@ -520,7 +521,7 @@ class ReplayKernelSlow(ReplayKernelBase): :avocado: tags=endian:little :avocado: tags=cpu:I7200 """ - kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/' + kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' 'generic_nano32r6el_page64k_dbg.xz') kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180' diff --git a/tests/avocado/tuxrun_baselines.py b/tests/avocado/tuxrun_baselines.py new file mode 100644 index 0000000000..3a46e7a745 --- /dev/null +++ b/tests/avocado/tuxrun_baselines.py @@ -0,0 +1,586 @@ +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import time +import tempfile + +from avocado import skip, skipIf +from avocado_qemu import QemuSystemTest +from avocado_qemu import exec_command, exec_command_and_wait_for_pattern +from avocado_qemu import wait_for_console_pattern +from avocado.utils import process +from avocado.utils.path import find_command + +class TuxRunBaselineTest(QemuSystemTest): + """ + :avocado: tags=accel:tcg + """ + + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0' + # Tests are ~10-40s, allow for --debug/--enable-gcov overhead + timeout = 100 + + def get_tag(self, tagname, default=None): + """ + Get the metadata tag or return the default. + """ + utag = self._get_unique_tag_val(tagname) + print(f"{tagname}/{default} -> {utag}") + if utag: + return utag + + return default + + def setUp(self): + super().setUp() + + # We need zstd for all the tuxrun tests + # See https://github.com/avocado-framework/avocado/issues/5609 + zstd = find_command('zstd', False) + if zstd is False: + self.cancel('Could not find "zstd", which is required to ' + 'decompress rootfs') + self.zstd = zstd + + # Process the TuxRun specific tags, most machines work with + # reasonable defaults but we sometimes need to tweak the + # config. To avoid open coding everything we store all these + # details in the metadata for each test. + + # The tuxboot tag matches the root directory + self.tuxboot = self.get_tag('tuxboot') + + # Most Linux's use ttyS0 for their serial port + self.console = self.get_tag('console', "ttyS0") + + # Does the machine shutdown QEMU nicely on "halt" + self.shutdown = self.get_tag('shutdown') + + # The name of the kernel Image file + self.image = self.get_tag('image', "Image") + + self.root = self.get_tag('root', "vda") + + # Occasionally we need extra devices to hook things up + self.extradev = self.get_tag('extradev') + + self.qemu_img = super().get_qemu_img() + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + def fetch_tuxrun_assets(self, csums=None, dt=None): + """ + Fetch the TuxBoot assets. They are stored in a standard way so we + use the per-test tags to fetch details. + """ + base_url = f"https://storage.tuxboot.com/20230331/{self.tuxboot}/" + + # empty hash if we weren't passed one + csums = {} if csums is None else csums + ksum = csums.get(self.image, None) + isum = csums.get("rootfs.ext4.zst", None) + + kernel_image = self.fetch_asset(base_url + self.image, + asset_hash = ksum, + algorithm = "sha256") + disk_image_zst = self.fetch_asset(base_url + "rootfs.ext4.zst", + asset_hash = isum, + algorithm = "sha256") + + cmd = f"{self.zstd} -d {disk_image_zst} -o {self.workdir}/rootfs.ext4" + process.run(cmd) + + if dt: + dsum = csums.get(dt, None) + dtb = self.fetch_asset(base_url + dt, + asset_hash = dsum, + algorithm = "sha256") + else: + dtb = None + + return (kernel_image, self.workdir + "/rootfs.ext4", dtb) + + def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0): + """ + Setup to run and add the common parameters to the system + """ + self.vm.set_console(console_index=console_index) + + # all block devices are raw ext4's + blockdev = "driver=raw,file.driver=file," \ + + f"file.filename={disk},node-name=hd0" + + kcmd_line = self.KERNEL_COMMON_COMMAND_LINE + kcmd_line += f" root=/dev/{self.root}" + kcmd_line += f" console={self.console}" + + self.vm.add_args('-kernel', kernel, + '-append', kcmd_line, + '-blockdev', blockdev) + + # Sometimes we need extra devices attached + if self.extradev: + self.vm.add_args('-device', self.extradev) + + self.vm.add_args('-device', + f"{drive},drive=hd0") + + # Some machines need an explicit DTB + if dtb: + self.vm.add_args('-dtb', dtb) + + def run_tuxtest_tests(self, haltmsg): + """ + Wait for the system to boot up, wait for the login prompt and + then do a few things on the console. Trigger a shutdown and + wait to exit cleanly. + """ + self.wait_for_console_pattern("Welcome to TuxTest") + time.sleep(0.2) + exec_command(self, 'root') + time.sleep(0.2) + exec_command(self, 'cat /proc/interrupts') + time.sleep(0.1) + exec_command(self, 'cat /proc/self/maps') + time.sleep(0.1) + exec_command(self, 'uname -a') + time.sleep(0.1) + exec_command_and_wait_for_pattern(self, 'halt', haltmsg) + + # Wait for VM to shut down gracefully if it can + if self.shutdown == "nowait": + self.vm.shutdown() + else: + self.vm.wait() + + def common_tuxrun(self, + csums=None, + dt=None, + drive="virtio-blk-device", + haltmsg="reboot: System halted", + console_index=0): + """ + Common path for LKFT tests. Unless we need to do something + special with the command line we can process most things using + the tag metadata. + """ + (kernel, disk, dtb) = self.fetch_tuxrun_assets(csums, dt) + + self.prepare_run(kernel, disk, drive, dtb, console_index) + self.vm.launch() + self.run_tuxtest_tests(haltmsg) + + def ppc64_common_tuxrun(self, sums, prefix): + # add device args to command line. + self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', + '-device', 'virtio-net,netdev=vnet') + self.vm.add_args('-netdev', '{"type":"user","id":"hostnet0"}', + '-device', '{"driver":"virtio-net-pci","netdev":' + '"hostnet0","id":"net0","mac":"52:54:00:4c:e3:86",' + '"bus":"pci.0","addr":"0x9"}') + self.vm.add_args('-device', '{"driver":"qemu-xhci","p2":15,"p3":15,' + '"id":"usb","bus":"pci.0","addr":"0x2"}') + self.vm.add_args('-device', '{"driver":"virtio-scsi-pci","id":"scsi0"' + ',"bus":"pci.0","addr":"0x3"}') + self.vm.add_args('-device', '{"driver":"virtio-serial-pci","id":' + '"virtio-serial0","bus":"pci.0","addr":"0x4"}') + self.vm.add_args('-device', '{"driver":"scsi-cd","bus":"scsi0.0"' + ',"channel":0,"scsi-id":0,"lun":0,"device_id":' + '"drive-scsi0-0-0-0","id":"scsi0-0-0-0"}') + self.vm.add_args('-device', '{"driver":"virtio-balloon-pci",' + '"id":"balloon0","bus":"pci.0","addr":"0x6"}') + self.vm.add_args('-audiodev', '{"id":"audio1","driver":"none"}') + self.vm.add_args('-device', '{"driver":"usb-tablet","id":"input0"' + ',"bus":"usb.0","port":"1"}') + self.vm.add_args('-device', '{"driver":"usb-kbd","id":"input1"' + ',"bus":"usb.0","port":"2"}') + self.vm.add_args('-device', '{"driver":"VGA","id":"video0",' + '"vgamem_mb":16,"bus":"pci.0","addr":"0x7"}') + self.vm.add_args('-object', '{"qom-type":"rng-random","id":"objrng0"' + ',"filename":"/dev/urandom"}', + '-device', '{"driver":"virtio-rng-pci","rng":"objrng0"' + ',"id":"rng0","bus":"pci.0","addr":"0x8"}') + self.vm.add_args('-object', '{"qom-type":"cryptodev-backend-builtin",' + '"id":"objcrypto0","queues":1}', + '-device', '{"driver":"virtio-crypto-pci",' + '"cryptodev":"objcrypto0","id":"crypto0","bus"' + ':"pci.0","addr":"0xa"}') + self.vm.add_args('-device', '{"driver":"spapr-pci-host-bridge"' + ',"index":1,"id":"pci.1"}') + self.vm.add_args('-device', '{"driver":"spapr-vscsi","id":"scsi1"' + ',"reg":12288}') + self.vm.add_args('-m', '2G,slots=32,maxmem=4G', + '-object', 'memory-backend-ram,id=ram1,size=1G', + '-device', 'pc-dimm,id=dimm1,memdev=ram1') + + # Create a temporary qcow2 and launch the test-case + with tempfile.NamedTemporaryFile(prefix=prefix, + suffix='.qcow2') as qcow2: + process.run(self.qemu_img + ' create -f qcow2 ' + + qcow2.name + ' 1G') + + self.vm.add_args('-drive', 'file=' + qcow2.name + + ',format=qcow2,if=none,id=' + 'drive-virtio-disk1', + '-device', 'virtio-blk-pci,scsi=off,bus=pci.0,' + 'addr=0xb,drive=drive-virtio-disk1,id=virtio-disk1' + ',bootindex=2') + self.common_tuxrun(csums=sums, drive="scsi-hd") + + # + # The tests themselves. The configuration is derived from how + # tuxrun invokes qemu (with minor tweaks like using -blockdev + # consistently). The tuxrun equivalent is something like: + # + # tuxrun --device qemu-{ARCH} \ + # --kernel https://storage.tuxboot.com/{TUXBOOT}/{IMAGE} + # + + def test_arm64(self): + """ + :avocado: tags=arch:aarch64 + :avocado: tags=cpu:cortex-a57 + :avocado: tags=machine:virt + :avocado: tags=tuxboot:arm64 + :avocado: tags=console:ttyAMA0 + :avocado: tags=shutdown:nowait + """ + sums = {"Image" : + "ce95a7101a5fecebe0fe630deee6bd97b32ba41bc8754090e9ad8961ea8674c7", + "rootfs.ext4.zst" : + "bbd5ed4b9c7d3f4ca19ba71a323a843c6b585e880115df3b7765769dbd9dd061"} + self.common_tuxrun(csums=sums) + + def test_arm64be(self): + """ + :avocado: tags=arch:aarch64 + :avocado: tags=cpu:cortex-a57 + :avocado: tags=endian:big + :avocado: tags=machine:virt + :avocado: tags=tuxboot:arm64be + :avocado: tags=console:ttyAMA0 + :avocado: tags=shutdown:nowait + """ + sums = { "Image" : + "e0df4425eb2cd9ea9a283e808037f805641c65d8fcecc8f6407d8f4f339561b4", + "rootfs.ext4.zst" : + "e6ffd8813c8a335bc15728f2835f90539c84be7f8f5f691a8b01451b47fb4bd7"} + self.common_tuxrun(csums=sums) + + def test_armv5(self): + """ + :avocado: tags=arch:arm + :avocado: tags=cpu:arm926 + :avocado: tags=machine:versatilepb + :avocado: tags=tuxboot:armv5 + :avocado: tags=image:zImage + :avocado: tags=console:ttyAMA0 + :avocado: tags=shutdown:nowait + """ + sums = { "rootfs.ext4.zst" : + "17177afa74e7294da0642861f08c88ca3c836764299a54bf6d1ce276cb9712a5", + "versatile-pb.dtb" : + "0bc0c0b0858cefd3c32b385c0d66d97142ded29472a496f4f490e42fc7615b25", + "zImage" : + "c95af2f27647c12265d75e9df44c22ff5228c59855f54aaa70f41ec2842e3a4d" } + + self.common_tuxrun(csums=sums, + drive="virtio-blk-pci", + dt="versatile-pb.dtb") + + def test_armv7(self): + """ + :avocado: tags=arch:arm + :avocado: tags=cpu:cortex-a15 + :avocado: tags=machine:virt + :avocado: tags=tuxboot:armv7 + :avocado: tags=image:zImage + :avocado: tags=console:ttyAMA0 + :avocado: tags=shutdown:nowait + """ + sums = { "rootfs.ext4.zst" : + "ab1fbbeaddda1ffdd45c9405a28cd5370c20f23a7cbc809cc90dc9f243a8eb5a", + "zImage" : + "4c7a22e9f15875bec06bd2a29d822496571eb297d4f22694099ffcdb19077572" } + + self.common_tuxrun(csums=sums) + + def test_armv7be(self): + """ + :avocado: tags=arch:arm + :avocado: tags=cpu:cortex-a15 + :avocado: tags=endian:big + :avocado: tags=machine:virt + :avocado: tags=tuxboot:armv7be + :avocado: tags=image:zImage + :avocado: tags=console:ttyAMA0 + :avocado: tags=shutdown:nowait + """ + sums = {"rootfs.ext4.zst" : + "42ed46dd2d59986206c5b1f6cf35eab58fe3fd20c96b41aaa16b32f3f90a9835", + "zImage" : + "7facc62082b57af12015b08f7fdbaf2f123ba07a478367853ae12b219afc9f2f" } + + self.common_tuxrun(csums=sums) + + def test_i386(self): + """ + :avocado: tags=arch:i386 + :avocado: tags=cpu:coreduo + :avocado: tags=machine:q35 + :avocado: tags=tuxboot:i386 + :avocado: tags=image:bzImage + :avocado: tags=shutdown:nowait + """ + sums = {"bzImage" : + "a3e5b32a354729e65910f5a1ffcda7c14a6c12a55e8213fb86e277f1b76ed956", + "rootfs.ext4.zst" : + "f15e66b2bf673a210ec2a4b2e744a80530b36289e04f5388aab812b97f69754a" } + + self.common_tuxrun(csums=sums, drive="virtio-blk-pci") + + def test_mips32(self): + """ + :avocado: tags=arch:mips + :avocado: tags=machine:malta + :avocado: tags=cpu:mips32r6-generic + :avocado: tags=endian:big + :avocado: tags=tuxboot:mips32 + :avocado: tags=image:vmlinux + :avocado: tags=root:sda + :avocado: tags=shutdown:nowait + """ + sums = { "rootfs.ext4.zst" : + "fc3da0b4c2f38d74c6d705123bb0f633c76ed953128f9d0859378c328a6d11a0", + "vmlinux" : + "bfd2172f8b17fb32970ca0c8c58f59c5a4ca38aa5855d920be3a69b5d16e52f0" } + + self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0") + + def test_mips32el(self): + """ + :avocado: tags=arch:mipsel + :avocado: tags=machine:malta + :avocado: tags=cpu:mips32r6-generic + :avocado: tags=tuxboot:mips32el + :avocado: tags=image:vmlinux + :avocado: tags=root:sda + :avocado: tags=shutdown:nowait + """ + sums = { "rootfs.ext4.zst" : + "e799768e289fd69209c21f4dacffa11baea7543d5db101e8ce27e3bc2c41d90e", + "vmlinux" : + "8573867c68a8443db8de6d08bb33fb291c189ca2ca671471d3973a3e712096a3" } + + self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0") + + def test_mips64(self): + """ + :avocado: tags=arch:mips64 + :avocado: tags=machine:malta + :avocado: tags=tuxboot:mips64 + :avocado: tags=endian:big + :avocado: tags=image:vmlinux + :avocado: tags=root:sda + :avocado: tags=shutdown:nowait + """ + sums = { "rootfs.ext4.zst" : + "69d91eeb04df3d8d172922c6993bb37d4deeb6496def75d8580f6f9de3e431da", + "vmlinux" : + "09010e51e4b8bcbbd2494786ffb48eca78f228e96e5c5438344b0eac4029dc61" } + + self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0") + + def test_mips64el(self): + """ + :avocado: tags=arch:mips64el + :avocado: tags=machine:malta + :avocado: tags=tuxboot:mips64el + :avocado: tags=image:vmlinux + :avocado: tags=root:sda + :avocado: tags=shutdown:nowait + """ + sums = { "rootfs.ext4.zst" : + "fba585368f5915b1498ed081863474b2d7ec4e97cdd46d21bdcb2f9698f83de4", + "vmlinux" : + "d4e08965e2155c4cccce7c5f34d18fe34c636cda2f2c9844387d614950155266" } + + self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0") + + def test_ppc32(self): + """ + :avocado: tags=arch:ppc + :avocado: tags=machine:ppce500 + :avocado: tags=cpu:e500mc + :avocado: tags=tuxboot:ppc32 + :avocado: tags=image:uImage + :avocado: tags=shutdown:nowait + """ + sums = { "rootfs.ext4.zst" : + "8885b9d999cc24d679542a02e9b6aaf48f718f2050ece6b8347074b6ee41dd09", + "uImage" : + "1a68f74b860fda022fb12e03c5efece8c2b8b590d96cca37a8481a3ae0b3f81f" } + + self.common_tuxrun(csums=sums, drive="virtio-blk-pci") + + def test_ppc64(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:pseries + :avocado: tags=cpu:POWER10 + :avocado: tags=endian:big + :avocado: tags=console:hvc0 + :avocado: tags=tuxboot:ppc64 + :avocado: tags=image:vmlinux + :avocado: tags=extradev:driver=spapr-vscsi + :avocado: tags=root:sda + """ + sums = { "rootfs.ext4.zst" : + "1d953e81a4379e537fc8e41e05a0a59d9b453eef97aa03d47866c6c45b00bdff", + "vmlinux" : + "f22a9b9e924174a4c199f4c7e5d91a2339fcfe51c6eafd0907dc3e09b64ab728" } + self.ppc64_common_tuxrun(sums, prefix='tuxrun_ppc64_') + + def test_ppc64le(self): + """ + :avocado: tags=arch:ppc64 + :avocado: tags=machine:pseries + :avocado: tags=cpu:POWER10 + :avocado: tags=console:hvc0 + :avocado: tags=tuxboot:ppc64le + :avocado: tags=image:vmlinux + :avocado: tags=extradev:driver=spapr-vscsi + :avocado: tags=root:sda + """ + sums = { "rootfs.ext4.zst" : + "b442678c93fb8abe1f7d3bfa20556488de6b475c22c8fed363f42cf81a0a3906", + "vmlinux" : + "979eb61b445a010fb13e2b927126991f8ceef9c590fa2be0996c00e293e80cf2" } + self.ppc64_common_tuxrun(sums, prefix='tuxrun_ppc64le_') + + def test_riscv32(self): + """ + :avocado: tags=arch:riscv32 + :avocado: tags=machine:virt + :avocado: tags=tuxboot:riscv32 + """ + sums = { "Image" : + "89599407d7334de629a40e7ad6503c73670359eb5f5ae9d686353a3d6deccbd5", + "fw_jump.elf" : + "f2ef28a0b77826f79d085d3e4aa686f1159b315eff9099a37046b18936676985", + "rootfs.ext4.zst" : + "7168d296d0283238ea73cd5a775b3dd608e55e04c7b92b76ecce31bb13108cba" } + + self.common_tuxrun(csums=sums) + + def test_riscv64(self): + """ + :avocado: tags=arch:riscv64 + :avocado: tags=machine:virt + :avocado: tags=tuxboot:riscv64 + """ + sums = { "Image" : + "cd634badc65e52fb63465ec99e309c0de0369f0841b7d9486f9729e119bac25e", + "fw_jump.elf" : + "6e3373abcab4305fe151b564a4c71110d833c21f2c0a1753b7935459e36aedcf", + "rootfs.ext4.zst" : + "b18e3a3bdf27be03da0b285e84cb71bf09eca071c3a087b42884b6982ed679eb" } + + self.common_tuxrun(csums=sums) + + def test_s390(self): + """ + :avocado: tags=arch:s390x + :avocado: tags=endian:big + :avocado: tags=tuxboot:s390 + :avocado: tags=image:bzImage + :avocado: tags=shutdown:nowait + """ + sums = { "bzImage" : + "0414e98dd1c3dafff8496c9cd9c28a5f8d04553bb5ba37e906a812b48d442ef0", + "rootfs.ext4.zst" : + "88c37c32276677f873a25ab9ec6247895b8e3e6f8259134de2a616080b8ab3fc" } + + self.common_tuxrun(csums=sums, + drive="virtio-blk-ccw", + haltmsg="Requesting system halt") + + # Note: some segfaults caused by unaligned userspace access + @skipIf(os.getenv('GITLAB_CI'), 'Skipping unstable test on GitLab') + def test_sh4(self): + """ + :avocado: tags=arch:sh4 + :avocado: tags=machine:r2d + :avocado: tags=cpu:sh7785 + :avocado: tags=tuxboot:sh4 + :avocado: tags=image:zImage + :avocado: tags=root:sda + :avocado: tags=console:ttySC1 + """ + sums = { "rootfs.ext4.zst" : + "3592a7a3d5a641e8b9821449e77bc43c9904a56c30d45da0694349cfd86743fd", + "zImage" : + "29d9b2aba604a0f53a5dc3b5d0f2b8e35d497de1129f8ee5139eb6fdf0db692f" } + + # The test is currently too unstable to do much in userspace + # so we skip common_tuxrun and do a minimal boot and shutdown. + (kernel, disk, dtb) = self.fetch_tuxrun_assets(csums=sums) + + # the console comes on the second serial port + self.prepare_run(kernel, disk, + "driver=ide-hd,bus=ide.0,unit=0", + console_index=1) + self.vm.launch() + + self.wait_for_console_pattern("Welcome to TuxTest") + time.sleep(0.1) + exec_command(self, 'root') + time.sleep(0.1) + exec_command_and_wait_for_pattern(self, 'halt', + "reboot: System halted") + + def test_sparc64(self): + """ + :avocado: tags=arch:sparc64 + :avocado: tags=tuxboot:sparc64 + :avocado: tags=image:vmlinux + :avocado: tags=root:sda + :avocado: tags=shutdown:nowait + """ + + sums = { "rootfs.ext4.zst" : + "ad2f1dc436ab51583543d25d2c210cab478645d47078d30d129a66ab0e281d76", + "vmlinux" : + "e34313e4325ff21deaa3d38a502aa09a373ef62b9bd4d7f8f29388b688225c55" } + + self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0") + + def test_x86_64(self): + """ + :avocado: tags=arch:x86_64 + :avocado: tags=machine:q35 + :avocado: tags=cpu:Nehalem + :avocado: tags=tuxboot:x86_64 + :avocado: tags=image:bzImage + :avocado: tags=root:sda + :avocado: tags=shutdown:nowait + """ + sums = { "bzImage" : + "2bc7480a669ee9b6b82500a236aba0c54233debe98cb968268fa230f52f03461", + "rootfs.ext4.zst" : + "b72ac729769b8f51c6dffb221113c9a063c774dbe1d66af30eb593c4e9999b4b" } + + self.common_tuxrun(csums=sums, + drive="driver=ide-hd,bus=ide.0,unit=0") diff --git a/tests/avocado/virtio-gpu.py b/tests/avocado/virtio-gpu.py index 2a249a3a2c..89bfecc715 100644 --- a/tests/avocado/virtio-gpu.py +++ b/tests/avocado/virtio-gpu.py @@ -36,13 +36,13 @@ class VirtioGPUx86(QemuSystemTest): KERNEL_COMMAND_LINE = "printk.time=0 console=ttyS0 rdinit=/bin/bash" KERNEL_URL = ( - "https://archives.fedoraproject.org/pub/fedora" + "https://archives.fedoraproject.org/pub/archive/fedora" "/linux/releases/33/Everything/x86_64/os/images" "/pxeboot/vmlinuz" ) KERNEL_HASH = '1433cfe3f2ffaa44de4ecfb57ec25dc2399cdecf' INITRD_URL = ( - "https://archives.fedoraproject.org/pub/fedora" + "https://archives.fedoraproject.org/pub/archive/fedora" "/linux/releases/33/Everything/x86_64/os/images" "/pxeboot/initrd.img" ) @@ -143,7 +143,11 @@ class VirtioGPUx86(QemuSystemTest): "-append", self.KERNEL_COMMAND_LINE, ) - self.vm.launch() + try: + self.vm.launch() + except: + # TODO: probably fails because we are missing the VirGL features + self.cancel("VirGL not enabled?") self.wait_for_console_pattern("as init process") exec_command_and_wait_for_pattern( self, "/usr/sbin/modprobe virtio_gpu", "" diff --git a/tests/bench/meson.build b/tests/bench/meson.build index 7477a1f401..3c799dbd98 100644 --- a/tests/bench/meson.build +++ b/tests/bench/meson.build @@ -3,11 +3,9 @@ qht_bench = executable('qht-bench', sources: 'qht-bench.c', dependencies: [qemuutil]) -if have_system -xbzrle_bench = executable('xbzrle-bench', - sources: 'xbzrle-bench.c', - dependencies: [qemuutil,migration]) -endif +qtree_bench = executable('qtree-bench', + sources: 'qtree-bench.c', + dependencies: [qemuutil]) executable('atomic_add-bench', sources: files('atomic_add-bench.c'), diff --git a/tests/bench/qtree-bench.c b/tests/bench/qtree-bench.c new file mode 100644 index 0000000000..f3d7edc76d --- /dev/null +++ b/tests/bench/qtree-bench.c @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#include "qemu/osdep.h" +#include "qemu/qtree.h" +#include "qemu/timer.h" + +enum tree_op { + OP_LOOKUP, + OP_INSERT, + OP_REMOVE, + OP_REMOVE_ALL, + OP_TRAVERSE, +}; + +struct benchmark { + const char * const name; + enum tree_op op; + bool fill_on_init; +}; + +enum impl_type { + IMPL_GTREE, + IMPL_QTREE, +}; + +struct tree_implementation { + const char * const name; + enum impl_type type; +}; + +static const struct benchmark benchmarks[] = { + { + .name = "Lookup", + .op = OP_LOOKUP, + .fill_on_init = true, + }, + { + .name = "Insert", + .op = OP_INSERT, + .fill_on_init = false, + }, + { + .name = "Remove", + .op = OP_REMOVE, + .fill_on_init = true, + }, + { + .name = "RemoveAll", + .op = OP_REMOVE_ALL, + .fill_on_init = true, + }, + { + .name = "Traverse", + .op = OP_TRAVERSE, + .fill_on_init = true, + }, +}; + +static const struct tree_implementation impls[] = { + { + .name = "GTree", + .type = IMPL_GTREE, + }, + { + .name = "QTree", + .type = IMPL_QTREE, + }, +}; + +static int compare_func(const void *ap, const void *bp) +{ + const size_t *a = ap; + const size_t *b = bp; + + return *a - *b; +} + +static void init_empty_tree_and_keys(enum impl_type impl, + void **ret_tree, size_t **ret_keys, + size_t n_elems) +{ + size_t *keys = g_malloc_n(n_elems, sizeof(*keys)); + for (size_t i = 0; i < n_elems; i++) { + keys[i] = i; + } + + void *tree; + switch (impl) { + case IMPL_GTREE: + tree = g_tree_new(compare_func); + break; + case IMPL_QTREE: + tree = q_tree_new(compare_func); + break; + default: + g_assert_not_reached(); + } + + *ret_tree = tree; + *ret_keys = keys; +} + +static gboolean traverse_func(gpointer key, gpointer value, gpointer data) +{ + return FALSE; +} + +static inline void remove_all(void *tree, enum impl_type impl) +{ + switch (impl) { + case IMPL_GTREE: + g_tree_destroy(tree); + break; + case IMPL_QTREE: + q_tree_destroy(tree); + break; + default: + g_assert_not_reached(); + } +} + +static int64_t run_benchmark(const struct benchmark *bench, + enum impl_type impl, + size_t n_elems) +{ + void *tree; + size_t *keys; + + init_empty_tree_and_keys(impl, &tree, &keys, n_elems); + if (bench->fill_on_init) { + for (size_t i = 0; i < n_elems; i++) { + switch (impl) { + case IMPL_GTREE: + g_tree_insert(tree, &keys[i], &keys[i]); + break; + case IMPL_QTREE: + q_tree_insert(tree, &keys[i], &keys[i]); + break; + default: + g_assert_not_reached(); + } + } + } + + int64_t start_ns = get_clock(); + switch (bench->op) { + case OP_LOOKUP: + for (size_t i = 0; i < n_elems; i++) { + void *value; + switch (impl) { + case IMPL_GTREE: + value = g_tree_lookup(tree, &keys[i]); + break; + case IMPL_QTREE: + value = q_tree_lookup(tree, &keys[i]); + break; + default: + g_assert_not_reached(); + } + (void)value; + } + break; + case OP_INSERT: + for (size_t i = 0; i < n_elems; i++) { + switch (impl) { + case IMPL_GTREE: + g_tree_insert(tree, &keys[i], &keys[i]); + break; + case IMPL_QTREE: + q_tree_insert(tree, &keys[i], &keys[i]); + break; + default: + g_assert_not_reached(); + } + } + break; + case OP_REMOVE: + for (size_t i = 0; i < n_elems; i++) { + switch (impl) { + case IMPL_GTREE: + g_tree_remove(tree, &keys[i]); + break; + case IMPL_QTREE: + q_tree_remove(tree, &keys[i]); + break; + default: + g_assert_not_reached(); + } + } + break; + case OP_REMOVE_ALL: + remove_all(tree, impl); + break; + case OP_TRAVERSE: + switch (impl) { + case IMPL_GTREE: + g_tree_foreach(tree, traverse_func, NULL); + break; + case IMPL_QTREE: + q_tree_foreach(tree, traverse_func, NULL); + break; + default: + g_assert_not_reached(); + } + break; + default: + g_assert_not_reached(); + } + int64_t ns = get_clock() - start_ns; + + if (bench->op != OP_REMOVE_ALL) { + remove_all(tree, impl); + } + g_free(keys); + + return ns; +} + +int main(int argc, char *argv[]) +{ + size_t sizes[] = { + 32, + 1024, + 1024 * 4, + 1024 * 128, + 1024 * 1024, + }; + + double res[ARRAY_SIZE(benchmarks)][ARRAY_SIZE(impls)][ARRAY_SIZE(sizes)]; + for (int i = 0; i < ARRAY_SIZE(sizes); i++) { + size_t size = sizes[i]; + for (int j = 0; j < ARRAY_SIZE(impls); j++) { + const struct tree_implementation *impl = &impls[j]; + for (int k = 0; k < ARRAY_SIZE(benchmarks); k++) { + const struct benchmark *bench = &benchmarks[k]; + + /* warm-up run */ + run_benchmark(bench, impl->type, size); + + int64_t total_ns = 0; + int64_t n_runs = 0; + while (total_ns < 2e8 || n_runs < 5) { + total_ns += run_benchmark(bench, impl->type, size); + n_runs++; + } + double ns_per_run = (double)total_ns / n_runs; + + /* Throughput, in Mops/s */ + res[k][j][i] = size / ns_per_run * 1e3; + } + } + } + + printf("# Results' breakdown: Tree, Op and #Elements. Units: Mops/s\n"); + printf("%5s %10s ", "Tree", "Op"); + for (int i = 0; i < ARRAY_SIZE(sizes); i++) { + printf("%7zu ", sizes[i]); + } + printf("\n"); + char separator[97]; + for (int i = 0; i < ARRAY_SIZE(separator) - 1; i++) { + separator[i] = '-'; + } + separator[ARRAY_SIZE(separator) - 1] = '\0'; + printf("%s\n", separator); + for (int i = 0; i < ARRAY_SIZE(benchmarks); i++) { + for (int j = 0; j < ARRAY_SIZE(impls); j++) { + printf("%5s %10s ", impls[j].name, benchmarks[i].name); + for (int k = 0; k < ARRAY_SIZE(sizes); k++) { + printf("%7.2f ", res[i][j][k]); + if (j == 0) { + printf(" "); + } else { + if (res[i][0][k] != 0) { + double speedup = res[i][j][k] / res[i][0][k]; + printf("(%4.2fx) ", speedup); + } else { + printf("( ) "); + } + } + } + printf("\n"); + } + } + printf("%s\n", separator); + return 0; +} diff --git a/tests/bench/xbzrle-bench.c b/tests/bench/xbzrle-bench.c deleted file mode 100644 index 8848a3a32d..0000000000 --- a/tests/bench/xbzrle-bench.c +++ /dev/null @@ -1,469 +0,0 @@ -/* - * Xor Based Zero Run Length Encoding unit tests. - * - * Copyright 2013 Red Hat, Inc. and/or its affiliates - * - * Authors: - * Orit Wasserman - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ -#include "qemu/osdep.h" -#include "qemu/cutils.h" -#include "../migration/xbzrle.h" - -#if defined(CONFIG_AVX512BW_OPT) -#define XBZRLE_PAGE_SIZE 4096 -static bool is_cpu_support_avx512bw; -#include "qemu/cpuid.h" -static void __attribute__((constructor)) init_cpu_flag(void) -{ - unsigned max = __get_cpuid_max(0, NULL); - int a, b, c, d; - is_cpu_support_avx512bw = false; - if (max >= 1) { - __cpuid(1, a, b, c, d); - /* We must check that AVX is not just available, but usable. */ - if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) { - int bv; - __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); - __cpuid_count(7, 0, a, b, c, d); - /* 0xe6: - * XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15 - * and ZMM16-ZMM31 state are enabled by OS) - * XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS) - */ - if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512BW)) { - is_cpu_support_avx512bw = true; - } - } - } - return ; -} - -struct ResTime { - float t_raw; - float t_512; -}; - - -/* Function prototypes -int xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, - uint8_t *dst, int dlen); -*/ -static void encode_decode_zero(struct ResTime *res) -{ - uint8_t *buffer = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *compressed = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *buffer512 = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *compressed512 = g_malloc0(XBZRLE_PAGE_SIZE); - int i = 0; - int dlen = 0, dlen512 = 0; - int diff_len = g_test_rand_int_range(0, XBZRLE_PAGE_SIZE - 1006); - - for (i = diff_len; i > 0; i--) { - buffer[1000 + i] = i; - buffer512[1000 + i] = i; - } - - buffer[1000 + diff_len + 3] = 103; - buffer[1000 + diff_len + 5] = 105; - - buffer512[1000 + diff_len + 3] = 103; - buffer512[1000 + diff_len + 5] = 105; - - /* encode zero page */ - time_t t_start, t_end, t_start512, t_end512; - t_start = clock(); - dlen = xbzrle_encode_buffer(buffer, buffer, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); - t_end = clock(); - float time_val = difftime(t_end, t_start); - g_assert(dlen == 0); - - t_start512 = clock(); - dlen512 = xbzrle_encode_buffer_avx512(buffer512, buffer512, XBZRLE_PAGE_SIZE, - compressed512, XBZRLE_PAGE_SIZE); - t_end512 = clock(); - float time_val512 = difftime(t_end512, t_start512); - g_assert(dlen512 == 0); - - res->t_raw = time_val; - res->t_512 = time_val512; - - g_free(buffer); - g_free(compressed); - g_free(buffer512); - g_free(compressed512); - -} - -static void test_encode_decode_zero_avx512(void) -{ - int i; - float time_raw = 0.0, time_512 = 0.0; - struct ResTime res; - for (i = 0; i < 10000; i++) { - encode_decode_zero(&res); - time_raw += res.t_raw; - time_512 += res.t_512; - } - printf("Zero test:\n"); - printf("Raw xbzrle_encode time is %f ms\n", time_raw); - printf("512 xbzrle_encode time is %f ms\n", time_512); -} - -static void encode_decode_unchanged(struct ResTime *res) -{ - uint8_t *compressed = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *test = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *compressed512 = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *test512 = g_malloc0(XBZRLE_PAGE_SIZE); - int i = 0; - int dlen = 0, dlen512 = 0; - int diff_len = g_test_rand_int_range(0, XBZRLE_PAGE_SIZE - 1006); - - for (i = diff_len; i > 0; i--) { - test[1000 + i] = i + 4; - test512[1000 + i] = i + 4; - } - - test[1000 + diff_len + 3] = 107; - test[1000 + diff_len + 5] = 109; - - test512[1000 + diff_len + 3] = 107; - test512[1000 + diff_len + 5] = 109; - - /* test unchanged buffer */ - time_t t_start, t_end, t_start512, t_end512; - t_start = clock(); - dlen = xbzrle_encode_buffer(test, test, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); - t_end = clock(); - float time_val = difftime(t_end, t_start); - g_assert(dlen == 0); - - t_start512 = clock(); - dlen512 = xbzrle_encode_buffer_avx512(test512, test512, XBZRLE_PAGE_SIZE, - compressed512, XBZRLE_PAGE_SIZE); - t_end512 = clock(); - float time_val512 = difftime(t_end512, t_start512); - g_assert(dlen512 == 0); - - res->t_raw = time_val; - res->t_512 = time_val512; - - g_free(test); - g_free(compressed); - g_free(test512); - g_free(compressed512); - -} - -static void test_encode_decode_unchanged_avx512(void) -{ - int i; - float time_raw = 0.0, time_512 = 0.0; - struct ResTime res; - for (i = 0; i < 10000; i++) { - encode_decode_unchanged(&res); - time_raw += res.t_raw; - time_512 += res.t_512; - } - printf("Unchanged test:\n"); - printf("Raw xbzrle_encode time is %f ms\n", time_raw); - printf("512 xbzrle_encode time is %f ms\n", time_512); -} - -static void encode_decode_1_byte(struct ResTime *res) -{ - uint8_t *buffer = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *test = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *compressed = g_malloc(XBZRLE_PAGE_SIZE); - uint8_t *buffer512 = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *test512 = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *compressed512 = g_malloc(XBZRLE_PAGE_SIZE); - int dlen = 0, rc = 0, dlen512 = 0, rc512 = 0; - uint8_t buf[2]; - uint8_t buf512[2]; - - test[XBZRLE_PAGE_SIZE - 1] = 1; - test512[XBZRLE_PAGE_SIZE - 1] = 1; - - time_t t_start, t_end, t_start512, t_end512; - t_start = clock(); - dlen = xbzrle_encode_buffer(buffer, test, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); - t_end = clock(); - float time_val = difftime(t_end, t_start); - g_assert(dlen == (uleb128_encode_small(&buf[0], 4095) + 2)); - - rc = xbzrle_decode_buffer(compressed, dlen, buffer, XBZRLE_PAGE_SIZE); - g_assert(rc == XBZRLE_PAGE_SIZE); - g_assert(memcmp(test, buffer, XBZRLE_PAGE_SIZE) == 0); - - t_start512 = clock(); - dlen512 = xbzrle_encode_buffer_avx512(buffer512, test512, XBZRLE_PAGE_SIZE, - compressed512, XBZRLE_PAGE_SIZE); - t_end512 = clock(); - float time_val512 = difftime(t_end512, t_start512); - g_assert(dlen512 == (uleb128_encode_small(&buf512[0], 4095) + 2)); - - rc512 = xbzrle_decode_buffer(compressed512, dlen512, buffer512, - XBZRLE_PAGE_SIZE); - g_assert(rc512 == XBZRLE_PAGE_SIZE); - g_assert(memcmp(test512, buffer512, XBZRLE_PAGE_SIZE) == 0); - - res->t_raw = time_val; - res->t_512 = time_val512; - - g_free(buffer); - g_free(compressed); - g_free(test); - g_free(buffer512); - g_free(compressed512); - g_free(test512); - -} - -static void test_encode_decode_1_byte_avx512(void) -{ - int i; - float time_raw = 0.0, time_512 = 0.0; - struct ResTime res; - for (i = 0; i < 10000; i++) { - encode_decode_1_byte(&res); - time_raw += res.t_raw; - time_512 += res.t_512; - } - printf("1 byte test:\n"); - printf("Raw xbzrle_encode time is %f ms\n", time_raw); - printf("512 xbzrle_encode time is %f ms\n", time_512); -} - -static void encode_decode_overflow(struct ResTime *res) -{ - uint8_t *compressed = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *test = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *buffer = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *compressed512 = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *test512 = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *buffer512 = g_malloc0(XBZRLE_PAGE_SIZE); - int i = 0, rc = 0, rc512 = 0; - - for (i = 0; i < XBZRLE_PAGE_SIZE / 2 - 1; i++) { - test[i * 2] = 1; - test512[i * 2] = 1; - } - - /* encode overflow */ - time_t t_start, t_end, t_start512, t_end512; - t_start = clock(); - rc = xbzrle_encode_buffer(buffer, test, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); - t_end = clock(); - float time_val = difftime(t_end, t_start); - g_assert(rc == -1); - - t_start512 = clock(); - rc512 = xbzrle_encode_buffer_avx512(buffer512, test512, XBZRLE_PAGE_SIZE, - compressed512, XBZRLE_PAGE_SIZE); - t_end512 = clock(); - float time_val512 = difftime(t_end512, t_start512); - g_assert(rc512 == -1); - - res->t_raw = time_val; - res->t_512 = time_val512; - - g_free(buffer); - g_free(compressed); - g_free(test); - g_free(buffer512); - g_free(compressed512); - g_free(test512); - -} - -static void test_encode_decode_overflow_avx512(void) -{ - int i; - float time_raw = 0.0, time_512 = 0.0; - struct ResTime res; - for (i = 0; i < 10000; i++) { - encode_decode_overflow(&res); - time_raw += res.t_raw; - time_512 += res.t_512; - } - printf("Overflow test:\n"); - printf("Raw xbzrle_encode time is %f ms\n", time_raw); - printf("512 xbzrle_encode time is %f ms\n", time_512); -} - -static void encode_decode_range_avx512(struct ResTime *res) -{ - uint8_t *buffer = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *compressed = g_malloc(XBZRLE_PAGE_SIZE); - uint8_t *test = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *buffer512 = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *compressed512 = g_malloc(XBZRLE_PAGE_SIZE); - uint8_t *test512 = g_malloc0(XBZRLE_PAGE_SIZE); - int i = 0, rc = 0, rc512 = 0; - int dlen = 0, dlen512 = 0; - - int diff_len = g_test_rand_int_range(0, XBZRLE_PAGE_SIZE - 1006); - - for (i = diff_len; i > 0; i--) { - buffer[1000 + i] = i; - test[1000 + i] = i + 4; - buffer512[1000 + i] = i; - test512[1000 + i] = i + 4; - } - - buffer[1000 + diff_len + 3] = 103; - test[1000 + diff_len + 3] = 107; - - buffer[1000 + diff_len + 5] = 105; - test[1000 + diff_len + 5] = 109; - - buffer512[1000 + diff_len + 3] = 103; - test512[1000 + diff_len + 3] = 107; - - buffer512[1000 + diff_len + 5] = 105; - test512[1000 + diff_len + 5] = 109; - - /* test encode/decode */ - time_t t_start, t_end, t_start512, t_end512; - t_start = clock(); - dlen = xbzrle_encode_buffer(test, buffer, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); - t_end = clock(); - float time_val = difftime(t_end, t_start); - rc = xbzrle_decode_buffer(compressed, dlen, test, XBZRLE_PAGE_SIZE); - g_assert(rc < XBZRLE_PAGE_SIZE); - g_assert(memcmp(test, buffer, XBZRLE_PAGE_SIZE) == 0); - - t_start512 = clock(); - dlen512 = xbzrle_encode_buffer_avx512(test512, buffer512, XBZRLE_PAGE_SIZE, - compressed512, XBZRLE_PAGE_SIZE); - t_end512 = clock(); - float time_val512 = difftime(t_end512, t_start512); - rc512 = xbzrle_decode_buffer(compressed512, dlen512, test512, XBZRLE_PAGE_SIZE); - g_assert(rc512 < XBZRLE_PAGE_SIZE); - g_assert(memcmp(test512, buffer512, XBZRLE_PAGE_SIZE) == 0); - - res->t_raw = time_val; - res->t_512 = time_val512; - - g_free(buffer); - g_free(compressed); - g_free(test); - g_free(buffer512); - g_free(compressed512); - g_free(test512); - -} - -static void test_encode_decode_avx512(void) -{ - int i; - float time_raw = 0.0, time_512 = 0.0; - struct ResTime res; - for (i = 0; i < 10000; i++) { - encode_decode_range_avx512(&res); - time_raw += res.t_raw; - time_512 += res.t_512; - } - printf("Encode decode test:\n"); - printf("Raw xbzrle_encode time is %f ms\n", time_raw); - printf("512 xbzrle_encode time is %f ms\n", time_512); -} - -static void encode_decode_random(struct ResTime *res) -{ - uint8_t *buffer = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *compressed = g_malloc(XBZRLE_PAGE_SIZE); - uint8_t *test = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *buffer512 = g_malloc0(XBZRLE_PAGE_SIZE); - uint8_t *compressed512 = g_malloc(XBZRLE_PAGE_SIZE); - uint8_t *test512 = g_malloc0(XBZRLE_PAGE_SIZE); - int i = 0, rc = 0, rc512 = 0; - int dlen = 0, dlen512 = 0; - - int diff_len = g_test_rand_int_range(0, XBZRLE_PAGE_SIZE - 1); - /* store the index of diff */ - int dirty_index[diff_len]; - for (int j = 0; j < diff_len; j++) { - dirty_index[j] = g_test_rand_int_range(0, XBZRLE_PAGE_SIZE - 1); - } - for (i = diff_len - 1; i >= 0; i--) { - buffer[dirty_index[i]] = i; - test[dirty_index[i]] = i + 4; - buffer512[dirty_index[i]] = i; - test512[dirty_index[i]] = i + 4; - } - - time_t t_start, t_end, t_start512, t_end512; - t_start = clock(); - dlen = xbzrle_encode_buffer(test, buffer, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); - t_end = clock(); - float time_val = difftime(t_end, t_start); - rc = xbzrle_decode_buffer(compressed, dlen, test, XBZRLE_PAGE_SIZE); - g_assert(rc < XBZRLE_PAGE_SIZE); - - t_start512 = clock(); - dlen512 = xbzrle_encode_buffer_avx512(test512, buffer512, XBZRLE_PAGE_SIZE, - compressed512, XBZRLE_PAGE_SIZE); - t_end512 = clock(); - float time_val512 = difftime(t_end512, t_start512); - rc512 = xbzrle_decode_buffer(compressed512, dlen512, test512, XBZRLE_PAGE_SIZE); - g_assert(rc512 < XBZRLE_PAGE_SIZE); - - res->t_raw = time_val; - res->t_512 = time_val512; - - g_free(buffer); - g_free(compressed); - g_free(test); - g_free(buffer512); - g_free(compressed512); - g_free(test512); - -} - -static void test_encode_decode_random_avx512(void) -{ - int i; - float time_raw = 0.0, time_512 = 0.0; - struct ResTime res; - for (i = 0; i < 10000; i++) { - encode_decode_random(&res); - time_raw += res.t_raw; - time_512 += res.t_512; - } - printf("Random test:\n"); - printf("Raw xbzrle_encode time is %f ms\n", time_raw); - printf("512 xbzrle_encode time is %f ms\n", time_512); -} -#endif - -int main(int argc, char **argv) -{ - g_test_init(&argc, &argv, NULL); - g_test_rand_int(); - #if defined(CONFIG_AVX512BW_OPT) - if (likely(is_cpu_support_avx512bw)) { - g_test_add_func("/xbzrle/encode_decode_zero", test_encode_decode_zero_avx512); - g_test_add_func("/xbzrle/encode_decode_unchanged", - test_encode_decode_unchanged_avx512); - g_test_add_func("/xbzrle/encode_decode_1_byte", test_encode_decode_1_byte_avx512); - g_test_add_func("/xbzrle/encode_decode_overflow", - test_encode_decode_overflow_avx512); - g_test_add_func("/xbzrle/encode_decode", test_encode_decode_avx512); - g_test_add_func("/xbzrle/encode_decode_random", test_encode_decode_random_avx512); - } - #endif - return g_test_run(); -} diff --git a/tests/check-block.sh b/tests/check-block.sh deleted file mode 100755 index 5de2c1ba0b..0000000000 --- a/tests/check-block.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/sh - -if [ "$#" -eq 0 ]; then - echo "Usage: $0 fmt..." >&2 - exit 99 -fi - -# Honor the SPEED environment variable, just like we do it for "meson test" -format_list="$@" -if [ "$SPEED" = "slow" ] || [ "$SPEED" = "thorough" ]; then - group= -else - group="-g auto" -fi - -skip() { - echo "1..0 #SKIP $*" - exit 0 -} - -if [ -z "$(find . -name 'qemu-system-*' -print)" ]; then - skip "No qemu-system binary available ==> Not running the qemu-iotests." -fi - -cd tests/qemu-iotests - -# QEMU_CHECK_BLOCK_AUTO is used to disable some unstable sub-tests -export QEMU_CHECK_BLOCK_AUTO=1 -export PYTHONUTF8=1 -# If make was called with -jN we want to call ./check with -j N. Extract the -# flag from MAKEFLAGS, so that if it absent (or MAKEFLAGS is not defined), JOBS -# would be an empty line otherwise JOBS is prepared string of flag with value: -# "-j N" -# Note, that the following works even if make was called with "-j N" or even -# "--jobs N", as all these variants becomes simply "-jN" in MAKEFLAGS variable. -JOBS=$(echo "$MAKEFLAGS" | sed -n 's/\(^\|.* \)-j\([0-9]\+\)\( .*\|$\)/-j \2/p') - -ret=0 -for fmt in $format_list ; do - ${PYTHON} ./check $JOBS -tap -$fmt $group || ret=1 -done - -exit $ret diff --git a/tests/data/acpi/microvm/APIC b/tests/data/acpi/microvm/APIC index 68dbd44a7e..672764e711 100644 Binary files a/tests/data/acpi/microvm/APIC and b/tests/data/acpi/microvm/APIC differ diff --git a/tests/data/acpi/microvm/APIC.ioapic2 b/tests/data/acpi/microvm/APIC.ioapic2 index 3063c52cd3..6f24fdb12c 100644 Binary files a/tests/data/acpi/microvm/APIC.ioapic2 and b/tests/data/acpi/microvm/APIC.ioapic2 differ diff --git a/tests/data/acpi/microvm/APIC.pcie b/tests/data/acpi/microvm/APIC.pcie index 4e8f6ed8d6..2239ca76a6 100644 Binary files a/tests/data/acpi/microvm/APIC.pcie and b/tests/data/acpi/microvm/APIC.pcie differ diff --git a/tests/data/acpi/pc/APIC b/tests/data/acpi/pc/APIC index 208331db53..868a3432f0 100644 Binary files a/tests/data/acpi/pc/APIC and b/tests/data/acpi/pc/APIC differ diff --git a/tests/data/acpi/pc/APIC.acpihmat b/tests/data/acpi/pc/APIC.acpihmat index 812c4603f2..125d1ff087 100644 Binary files a/tests/data/acpi/pc/APIC.acpihmat and b/tests/data/acpi/pc/APIC.acpihmat differ diff --git a/tests/data/acpi/pc/APIC.cphp b/tests/data/acpi/pc/APIC.cphp index 65cc4f4a9a..a2c2a24e5e 100644 Binary files a/tests/data/acpi/pc/APIC.cphp and b/tests/data/acpi/pc/APIC.cphp differ diff --git a/tests/data/acpi/pc/APIC.dimmpxm b/tests/data/acpi/pc/APIC.dimmpxm index d904d4a70d..9b5922bc72 100644 Binary files a/tests/data/acpi/pc/APIC.dimmpxm and b/tests/data/acpi/pc/APIC.dimmpxm differ diff --git a/tests/data/acpi/pc/DSDT b/tests/data/acpi/pc/DSDT index 0b475fb5a9..32d255cfc0 100644 Binary files a/tests/data/acpi/pc/DSDT and b/tests/data/acpi/pc/DSDT differ diff --git a/tests/data/acpi/pc/DSDT.acpierst b/tests/data/acpi/pc/DSDT.acpierst index 17ef7caeb6..33e872b2fa 100644 Binary files a/tests/data/acpi/pc/DSDT.acpierst and b/tests/data/acpi/pc/DSDT.acpierst differ diff --git a/tests/data/acpi/pc/DSDT.acpihmat b/tests/data/acpi/pc/DSDT.acpihmat index 675b674eaa..cd84abc1b1 100644 Binary files a/tests/data/acpi/pc/DSDT.acpihmat and b/tests/data/acpi/pc/DSDT.acpihmat differ diff --git a/tests/data/acpi/pc/DSDT.bridge b/tests/data/acpi/pc/DSDT.bridge index c1ce061366..69a73ea2a6 100644 Binary files a/tests/data/acpi/pc/DSDT.bridge and b/tests/data/acpi/pc/DSDT.bridge differ diff --git a/tests/data/acpi/pc/DSDT.cphp b/tests/data/acpi/pc/DSDT.cphp index 754ab854dc..20379056b3 100644 Binary files a/tests/data/acpi/pc/DSDT.cphp and b/tests/data/acpi/pc/DSDT.cphp differ diff --git a/tests/data/acpi/pc/DSDT.dimmpxm b/tests/data/acpi/pc/DSDT.dimmpxm index 170503336b..435496e836 100644 Binary files a/tests/data/acpi/pc/DSDT.dimmpxm and b/tests/data/acpi/pc/DSDT.dimmpxm differ diff --git a/tests/data/acpi/pc/DSDT.hpbridge b/tests/data/acpi/pc/DSDT.hpbridge index 834c27002e..b6eafab250 100644 Binary files a/tests/data/acpi/pc/DSDT.hpbridge and b/tests/data/acpi/pc/DSDT.hpbridge differ diff --git a/tests/data/acpi/pc/DSDT.hpbrroot b/tests/data/acpi/pc/DSDT.hpbrroot index a71ed4fbaa..a4073f36d6 100644 Binary files a/tests/data/acpi/pc/DSDT.hpbrroot and b/tests/data/acpi/pc/DSDT.hpbrroot differ diff --git a/tests/data/acpi/pc/DSDT.ipmikcs b/tests/data/acpi/pc/DSDT.ipmikcs index dd71356027..06aa7bfdec 100644 Binary files a/tests/data/acpi/pc/DSDT.ipmikcs and b/tests/data/acpi/pc/DSDT.ipmikcs differ diff --git a/tests/data/acpi/pc/DSDT.memhp b/tests/data/acpi/pc/DSDT.memhp index 2f895e9b38..10a0e44d61 100644 Binary files a/tests/data/acpi/pc/DSDT.memhp and b/tests/data/acpi/pc/DSDT.memhp differ diff --git a/tests/data/acpi/pc/DSDT.nohpet b/tests/data/acpi/pc/DSDT.nohpet index c012b63ace..6905312d82 100644 Binary files a/tests/data/acpi/pc/DSDT.nohpet and b/tests/data/acpi/pc/DSDT.nohpet differ diff --git a/tests/data/acpi/pc/DSDT.numamem b/tests/data/acpi/pc/DSDT.numamem index f2ef4b9729..59e31338ee 100644 Binary files a/tests/data/acpi/pc/DSDT.numamem and b/tests/data/acpi/pc/DSDT.numamem differ diff --git a/tests/data/acpi/pc/DSDT.roothp b/tests/data/acpi/pc/DSDT.roothp index 657c8263f0..448d596cf4 100644 Binary files a/tests/data/acpi/pc/DSDT.roothp and b/tests/data/acpi/pc/DSDT.roothp differ diff --git a/tests/data/acpi/q35/APIC b/tests/data/acpi/q35/APIC index 208331db53..868a3432f0 100644 Binary files a/tests/data/acpi/q35/APIC and b/tests/data/acpi/q35/APIC differ diff --git a/tests/data/acpi/q35/APIC.acpihmat b/tests/data/acpi/q35/APIC.acpihmat index 812c4603f2..125d1ff087 100644 Binary files a/tests/data/acpi/q35/APIC.acpihmat and b/tests/data/acpi/q35/APIC.acpihmat differ diff --git a/tests/data/acpi/q35/APIC.acpihmat-noinitiator b/tests/data/acpi/q35/APIC.acpihmat-noinitiator index d904d4a70d..9b5922bc72 100644 Binary files a/tests/data/acpi/q35/APIC.acpihmat-noinitiator and b/tests/data/acpi/q35/APIC.acpihmat-noinitiator differ diff --git a/tests/data/acpi/q35/APIC.core-count2 b/tests/data/acpi/q35/APIC.core-count2 index a255082ef5..f5da2eb1e8 100644 Binary files a/tests/data/acpi/q35/APIC.core-count2 and b/tests/data/acpi/q35/APIC.core-count2 differ diff --git a/tests/data/acpi/q35/APIC.cphp b/tests/data/acpi/q35/APIC.cphp index 65cc4f4a9a..a2c2a24e5e 100644 Binary files a/tests/data/acpi/q35/APIC.cphp and b/tests/data/acpi/q35/APIC.cphp differ diff --git a/tests/data/acpi/q35/APIC.dimmpxm b/tests/data/acpi/q35/APIC.dimmpxm index d904d4a70d..9b5922bc72 100644 Binary files a/tests/data/acpi/q35/APIC.dimmpxm and b/tests/data/acpi/q35/APIC.dimmpxm differ diff --git a/tests/data/acpi/q35/APIC.xapic b/tests/data/acpi/q35/APIC.xapic index c1969c35aa..83bd28325a 100644 Binary files a/tests/data/acpi/q35/APIC.xapic and b/tests/data/acpi/q35/APIC.xapic differ diff --git a/tests/data/acpi/q35/DSDT b/tests/data/acpi/q35/DSDT index d68c472b46..720e8cbbbb 100644 Binary files a/tests/data/acpi/q35/DSDT and b/tests/data/acpi/q35/DSDT differ diff --git a/tests/data/acpi/q35/DSDT.acpierst b/tests/data/acpi/q35/DSDT.acpierst index de7ae27125..f26b1f2a19 100644 Binary files a/tests/data/acpi/q35/DSDT.acpierst and b/tests/data/acpi/q35/DSDT.acpierst differ diff --git a/tests/data/acpi/q35/DSDT.acpihmat b/tests/data/acpi/q35/DSDT.acpihmat index 48e2862257..86771f1746 100644 Binary files a/tests/data/acpi/q35/DSDT.acpihmat and b/tests/data/acpi/q35/DSDT.acpihmat differ diff --git a/tests/data/acpi/q35/DSDT.acpihmat-noinitiator b/tests/data/acpi/q35/DSDT.acpihmat-noinitiator index 30a4aa2ec8..a894a2d16c 100644 Binary files a/tests/data/acpi/q35/DSDT.acpihmat-noinitiator and b/tests/data/acpi/q35/DSDT.acpihmat-noinitiator differ diff --git a/tests/data/acpi/q35/DSDT.applesmc b/tests/data/acpi/q35/DSDT.applesmc index 84e2b5cbc4..276ae1df51 100644 Binary files a/tests/data/acpi/q35/DSDT.applesmc and b/tests/data/acpi/q35/DSDT.applesmc differ diff --git a/tests/data/acpi/q35/DSDT.bridge b/tests/data/acpi/q35/DSDT.bridge index e411d40fd1..9f8a208aaa 100644 Binary files a/tests/data/acpi/q35/DSDT.bridge and b/tests/data/acpi/q35/DSDT.bridge differ diff --git a/tests/data/acpi/q35/DSDT.core-count2 b/tests/data/acpi/q35/DSDT.core-count2 index 0603db8cc6..2ec11fe3c3 100644 Binary files a/tests/data/acpi/q35/DSDT.core-count2 and b/tests/data/acpi/q35/DSDT.core-count2 differ diff --git a/tests/data/acpi/q35/DSDT.cphp b/tests/data/acpi/q35/DSDT.cphp index beeb83c33b..612c85b1b4 100644 Binary files a/tests/data/acpi/q35/DSDT.cphp and b/tests/data/acpi/q35/DSDT.cphp differ diff --git a/tests/data/acpi/q35/DSDT.cxl b/tests/data/acpi/q35/DSDT.cxl index 3d18b9672d..f049f414f0 100644 Binary files a/tests/data/acpi/q35/DSDT.cxl and b/tests/data/acpi/q35/DSDT.cxl differ diff --git a/tests/data/acpi/q35/DSDT.dimmpxm b/tests/data/acpi/q35/DSDT.dimmpxm index 99a93e12a7..23dabeacb0 100644 Binary files a/tests/data/acpi/q35/DSDT.dimmpxm and b/tests/data/acpi/q35/DSDT.dimmpxm differ diff --git a/tests/data/acpi/q35/DSDT.ipmibt b/tests/data/acpi/q35/DSDT.ipmibt index 7f7601dbff..541bb70522 100644 Binary files a/tests/data/acpi/q35/DSDT.ipmibt and b/tests/data/acpi/q35/DSDT.ipmibt differ diff --git a/tests/data/acpi/q35/DSDT.ipmismbus b/tests/data/acpi/q35/DSDT.ipmismbus index 6c5d1afe44..e2d57a3318 100644 Binary files a/tests/data/acpi/q35/DSDT.ipmismbus and b/tests/data/acpi/q35/DSDT.ipmismbus differ diff --git a/tests/data/acpi/q35/DSDT.ivrs b/tests/data/acpi/q35/DSDT.ivrs index de7ae27125..f26b1f2a19 100644 Binary files a/tests/data/acpi/q35/DSDT.ivrs and b/tests/data/acpi/q35/DSDT.ivrs differ diff --git a/tests/data/acpi/q35/DSDT.memhp b/tests/data/acpi/q35/DSDT.memhp index 79bce5c8f0..809d7e2f0f 100644 Binary files a/tests/data/acpi/q35/DSDT.memhp and b/tests/data/acpi/q35/DSDT.memhp differ diff --git a/tests/data/acpi/q35/DSDT.mmio64 b/tests/data/acpi/q35/DSDT.mmio64 index c249929add..ab3fe3c1b5 100644 Binary files a/tests/data/acpi/q35/DSDT.mmio64 and b/tests/data/acpi/q35/DSDT.mmio64 differ diff --git a/tests/data/acpi/q35/DSDT.multi-bridge b/tests/data/acpi/q35/DSDT.multi-bridge index 66b39be294..9ae8ee0b41 100644 Binary files a/tests/data/acpi/q35/DSDT.multi-bridge and b/tests/data/acpi/q35/DSDT.multi-bridge differ diff --git a/tests/data/acpi/q35/DSDT.noacpihp b/tests/data/acpi/q35/DSDT.noacpihp new file mode 100644 index 0000000000..6ab1f0e525 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.noacpihp differ diff --git a/tests/data/acpi/q35/DSDT.nohpet b/tests/data/acpi/q35/DSDT.nohpet index 9ff9983a80..becb5f7cad 100644 Binary files a/tests/data/acpi/q35/DSDT.nohpet and b/tests/data/acpi/q35/DSDT.nohpet differ diff --git a/tests/data/acpi/q35/DSDT.numamem b/tests/data/acpi/q35/DSDT.numamem index 1e7c45ef3c..0cdec0b4c5 100644 Binary files a/tests/data/acpi/q35/DSDT.numamem and b/tests/data/acpi/q35/DSDT.numamem differ diff --git a/tests/data/acpi/q35/DSDT.pvpanic-isa b/tests/data/acpi/q35/DSDT.pvpanic-isa index ed47451c44..6a9904ec94 100644 Binary files a/tests/data/acpi/q35/DSDT.pvpanic-isa and b/tests/data/acpi/q35/DSDT.pvpanic-isa differ diff --git a/tests/data/acpi/q35/DSDT.tis.tpm12 b/tests/data/acpi/q35/DSDT.tis.tpm12 index efc2efc19f..628bf628f6 100644 Binary files a/tests/data/acpi/q35/DSDT.tis.tpm12 and b/tests/data/acpi/q35/DSDT.tis.tpm12 differ diff --git a/tests/data/acpi/q35/DSDT.tis.tpm2 b/tests/data/acpi/q35/DSDT.tis.tpm2 index 675339715f..35c6b08068 100644 Binary files a/tests/data/acpi/q35/DSDT.tis.tpm2 and b/tests/data/acpi/q35/DSDT.tis.tpm2 differ diff --git a/tests/data/acpi/q35/DSDT.viot b/tests/data/acpi/q35/DSDT.viot index eeb40b360f..3ad4d26b7f 100644 Binary files a/tests/data/acpi/q35/DSDT.viot and b/tests/data/acpi/q35/DSDT.viot differ diff --git a/tests/data/acpi/q35/DSDT.xapic b/tests/data/acpi/q35/DSDT.xapic index 3aa86f0724..d4a34e2351 100644 Binary files a/tests/data/acpi/q35/DSDT.xapic and b/tests/data/acpi/q35/DSDT.xapic differ diff --git a/tests/data/acpi/virt/APIC.memhp b/tests/data/acpi/virt/APIC.memhp deleted file mode 100644 index 179d274770..0000000000 Binary files a/tests/data/acpi/virt/APIC.memhp and /dev/null differ diff --git a/tests/data/acpi/virt/APIC.numamem b/tests/data/acpi/virt/APIC.numamem deleted file mode 100644 index 179d274770..0000000000 Binary files a/tests/data/acpi/virt/APIC.numamem and /dev/null differ diff --git a/tests/data/acpi/virt/DSDT.numamem b/tests/data/acpi/virt/DSDT.numamem deleted file mode 100644 index c475039907..0000000000 Binary files a/tests/data/acpi/virt/DSDT.numamem and /dev/null differ diff --git a/tests/data/acpi/virt/FACP.memhp b/tests/data/acpi/virt/FACP.memhp deleted file mode 100644 index ac05c35a69..0000000000 Binary files a/tests/data/acpi/virt/FACP.memhp and /dev/null differ diff --git a/tests/data/acpi/virt/FACP.numamem b/tests/data/acpi/virt/FACP.numamem deleted file mode 100644 index ac05c35a69..0000000000 Binary files a/tests/data/acpi/virt/FACP.numamem and /dev/null differ diff --git a/tests/data/acpi/virt/GTDT.memhp b/tests/data/acpi/virt/GTDT.memhp deleted file mode 100644 index 6f8cb9b8f3..0000000000 Binary files a/tests/data/acpi/virt/GTDT.memhp and /dev/null differ diff --git a/tests/data/acpi/virt/GTDT.numamem b/tests/data/acpi/virt/GTDT.numamem deleted file mode 100644 index 6f8cb9b8f3..0000000000 Binary files a/tests/data/acpi/virt/GTDT.numamem and /dev/null differ diff --git a/tests/data/acpi/virt/IORT.memhp b/tests/data/acpi/virt/IORT.memhp deleted file mode 100644 index 7efd0ce8a6..0000000000 Binary files a/tests/data/acpi/virt/IORT.memhp and /dev/null differ diff --git a/tests/data/acpi/virt/IORT.numamem b/tests/data/acpi/virt/IORT.numamem deleted file mode 100644 index 7efd0ce8a6..0000000000 Binary files a/tests/data/acpi/virt/IORT.numamem and /dev/null differ diff --git a/tests/data/acpi/virt/IORT.pxb b/tests/data/acpi/virt/IORT.pxb deleted file mode 100644 index 7efd0ce8a6..0000000000 Binary files a/tests/data/acpi/virt/IORT.pxb and /dev/null differ diff --git a/tests/data/acpi/virt/MCFG.memhp b/tests/data/acpi/virt/MCFG.memhp deleted file mode 100644 index f4ae3203a4..0000000000 Binary files a/tests/data/acpi/virt/MCFG.memhp and /dev/null differ diff --git a/tests/data/acpi/virt/MCFG.numamem b/tests/data/acpi/virt/MCFG.numamem deleted file mode 100644 index f4ae3203a4..0000000000 Binary files a/tests/data/acpi/virt/MCFG.numamem and /dev/null differ diff --git a/tests/data/acpi/virt/SPCR.memhp b/tests/data/acpi/virt/SPCR.memhp deleted file mode 100644 index 24e0a579e7..0000000000 Binary files a/tests/data/acpi/virt/SPCR.memhp and /dev/null differ diff --git a/tests/data/acpi/virt/SPCR.numamem b/tests/data/acpi/virt/SPCR.numamem deleted file mode 100644 index 24e0a579e7..0000000000 Binary files a/tests/data/acpi/virt/SPCR.numamem and /dev/null differ diff --git a/tests/data/acpi/virt/SRAT.acpihmatvirt b/tests/data/acpi/virt/SRAT.acpihmatvirt index 691ef56e34..6fe55dd7d0 100644 Binary files a/tests/data/acpi/virt/SRAT.acpihmatvirt and b/tests/data/acpi/virt/SRAT.acpihmatvirt differ diff --git a/tests/data/acpi/virt/SSDT.memhp b/tests/data/acpi/virt/SSDT.memhp index 2fcfc5fda9..ef93c44464 100644 Binary files a/tests/data/acpi/virt/SSDT.memhp and b/tests/data/acpi/virt/SSDT.memhp differ diff --git a/tests/decode/check.sh b/tests/decode/check.sh deleted file mode 100755 index 95445a0115..0000000000 --- a/tests/decode/check.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/sh -# This work is licensed under the terms of the GNU LGPL, version 2 or later. -# See the COPYING.LIB file in the top-level directory. - -PYTHON=$1 -DECODETREE=$2 -E=0 - -# All of these tests should produce errors -for i in err_*.decode; do - if $PYTHON $DECODETREE $i > /dev/null 2> /dev/null; then - # Pass, aka failed to fail. - echo FAIL: $i 1>&2 - E=1 - fi -done - -for i in succ_*.decode; do - if ! $PYTHON $DECODETREE $i > /dev/null 2> /dev/null; then - echo FAIL:$i 1>&2 - fi -done - -exit $E diff --git a/tests/decode/err_field10.decode b/tests/decode/err_field10.decode new file mode 100644 index 0000000000..3e672b7459 --- /dev/null +++ b/tests/decode/err_field10.decode @@ -0,0 +1,7 @@ +# This work is licensed under the terms of the GNU LGPL, version 2 or later. +# See the COPYING.LIB file in the top-level directory. + +# Diagnose formats which refer to undefined fields +%field1 field2:3 +@fmt ........ ........ ........ ........ %field1 +insn 00000000 00000000 00000000 00000000 @fmt diff --git a/tests/decode/err_field7.decode b/tests/decode/err_field7.decode new file mode 100644 index 0000000000..51fad7ccea --- /dev/null +++ b/tests/decode/err_field7.decode @@ -0,0 +1,7 @@ +# This work is licensed under the terms of the GNU LGPL, version 2 or later. +# See the COPYING.LIB file in the top-level directory. + +# Diagnose fields whose definitions form a loop +%field1 field2:3 +%field2 field1:4 +insn 00000000 00000000 00000000 00000000 %field1 %field2 diff --git a/tests/decode/err_field8.decode b/tests/decode/err_field8.decode new file mode 100644 index 0000000000..cc47c08a45 --- /dev/null +++ b/tests/decode/err_field8.decode @@ -0,0 +1,8 @@ +# This work is licensed under the terms of the GNU LGPL, version 2 or later. +# See the COPYING.LIB file in the top-level directory. + +# Diagnose patterns which refer to undefined fields +&f1 f1 a +%field1 field2:3 +@fmt ........ ........ ........ .... a:4 &f1 +insn 00000000 00000000 00000000 0000 .... @fmt f1=%field1 diff --git a/tests/decode/err_field9.decode b/tests/decode/err_field9.decode new file mode 100644 index 0000000000..e7361d521b --- /dev/null +++ b/tests/decode/err_field9.decode @@ -0,0 +1,14 @@ +# This work is licensed under the terms of the GNU LGPL, version 2 or later. +# See the COPYING.LIB file in the top-level directory. + +# Diagnose fields where the format refers to a field defined in the +# pattern and the pattern refers to a field defined in the format. +# This is theoretically not impossible to implement, but is not +# supported by the script at this time. +&abcd a b c d +%refa a:3 +%refc c:4 +# Format defines 'c' and sets 'b' to an indirect ref to 'a' +@fmt ........ ........ ........ c:8 &abcd b=%refa +# Pattern defines 'a' and sets 'd' to an indirect ref to 'c' +insn 00000000 00000000 00000000 ........ @fmt d=%refc a=6 diff --git a/tests/decode/meson.build b/tests/decode/meson.build new file mode 100644 index 0000000000..b13fada980 --- /dev/null +++ b/tests/decode/meson.build @@ -0,0 +1,64 @@ +err_tests = [ + 'err_argset1.decode', + 'err_argset2.decode', + 'err_field1.decode', + 'err_field2.decode', + 'err_field3.decode', + 'err_field4.decode', + 'err_field5.decode', + 'err_field6.decode', + 'err_field7.decode', + 'err_field8.decode', + 'err_field9.decode', + 'err_field10.decode', + 'err_init1.decode', + 'err_init2.decode', + 'err_init3.decode', + 'err_init4.decode', + 'err_overlap1.decode', + 'err_overlap2.decode', + 'err_overlap3.decode', + 'err_overlap4.decode', + 'err_overlap5.decode', + 'err_overlap6.decode', + 'err_overlap7.decode', + 'err_overlap8.decode', + 'err_overlap9.decode', + 'err_pattern_group_empty.decode', + 'err_pattern_group_ident1.decode', + 'err_pattern_group_ident2.decode', + 'err_pattern_group_nest1.decode', + 'err_pattern_group_nest2.decode', + 'err_pattern_group_nest3.decode', + 'err_pattern_group_overlap1.decode', + 'err_width1.decode', + 'err_width2.decode', + 'err_width3.decode', + 'err_width4.decode', +] + +succ_tests = [ + 'succ_argset_type1.decode', + 'succ_function.decode', + 'succ_ident1.decode', + 'succ_named_field.decode', + 'succ_pattern_group_nest1.decode', + 'succ_pattern_group_nest2.decode', + 'succ_pattern_group_nest3.decode', + 'succ_pattern_group_nest4.decode', +] + +suite = 'decodetree' +decodetree = find_program(meson.project_source_root() / 'scripts/decodetree.py') + +foreach t: err_tests + test(fs.replace_suffix(t, ''), + decodetree, args: ['--output-null', '--test-for-error', files(t)], + suite: suite) +endforeach + +foreach t: succ_tests + test(fs.replace_suffix(t, ''), + decodetree, args: ['--output-null', files(t)], + suite: suite) +endforeach diff --git a/tests/decode/succ_named_field.decode b/tests/decode/succ_named_field.decode new file mode 100644 index 0000000000..e64b3f9356 --- /dev/null +++ b/tests/decode/succ_named_field.decode @@ -0,0 +1,19 @@ +# This work is licensed under the terms of the GNU LGPL, version 2 or later. +# See the COPYING.LIB file in the top-level directory. + +# field using a named_field +%imm_sz 8:8 sz:3 +insn 00000000 00000000 ........ 00000000 imm_sz=%imm_sz sz=1 + +# Ditto, via a format. Here a field in the format +# references a named field defined in the insn pattern: +&imm_a imm alpha +%foo 0:16 alpha:4 +@foo 00000001 ........ ........ ........ &imm_a imm=%foo +i1 ........ 00000000 ........ ........ @foo alpha=1 +i2 ........ 00000001 ........ ........ @foo alpha=2 + +# Here the named field is defined in the format and referenced +# from the insn pattern: +@bar 00000010 ........ ........ ........ &imm_a alpha=4 +i3 ........ 00000000 ........ ........ @bar imm=%foo diff --git a/tests/docker/Makefile.include b/tests/docker/Makefile.include index bfb0dcac21..142e8605ee 100644 --- a/tests/docker/Makefile.include +++ b/tests/docker/Makefile.include @@ -6,7 +6,9 @@ NULL := SPACE := $(NULL) # COMMA := , -HOST_ARCH = $(if $(ARCH),$(ARCH),$(shell uname -m)) +HOST_ARCH = $(shell uname -m) +USER = $(if $(NOUSER),,$(shell id -un)) +UID = $(if $(NOUSER),,$(shell id -u)) DOCKER_FILES_DIR := $(SRC_PATH)/tests/docker/dockerfiles ifeq ($(HOST_ARCH),x86_64) @@ -14,6 +16,7 @@ DOCKER_DEFAULT_REGISTRY := registry.gitlab.com/qemu-project/qemu endif DOCKER_REGISTRY := $(if $(REGISTRY),$(REGISTRY),$(DOCKER_DEFAULT_REGISTRY)) +RUNC ?= docker ENGINE ?= auto DOCKER_SCRIPT=$(SRC_PATH)/tests/docker/docker.py --engine $(ENGINE) @@ -35,15 +38,17 @@ docker-qemu-src: $(DOCKER_SRC_COPY) # General rule for building docker images. docker-image-%: $(DOCKER_FILES_DIR)/%.docker - $(call quiet-command,\ - $(DOCKER_SCRIPT) build -t qemu/$* -f $< \ - $(if $V,,--quiet) \ - $(if $(NOCACHE),--no-cache, \ - $(if $(DOCKER_REGISTRY),--registry $(DOCKER_REGISTRY))) \ - $(if $(NOUSER),,--add-current-user) \ - $(if $(EXTRA_FILES),--extra-files $(EXTRA_FILES))\ - $(if $(EXECUTABLE),--include-executable=$(EXECUTABLE)),\ - "BUILD","$*") + $(call quiet-command, \ + DOCKER_BUILDKIT=1 $(RUNC) build \ + $(if $V,,--quiet) \ + $(if $(NOCACHE),--no-cache, \ + $(if $(DOCKER_REGISTRY),--cache-from $(DOCKER_REGISTRY)/qemu/$*)) \ + --build-arg BUILDKIT_INLINE_CACHE=1 \ + $(if $(NOUSER),, \ + --build-arg USER=$(USER) \ + --build-arg UID=$(UID)) \ + -t qemu/$* - < $<, \ + "BUILD", $1) # Special rule for debootstraped binfmt linux-user images docker-binfmt-image-debian-%: $(DOCKER_FILES_DIR)/debian-bootstrap.docker @@ -219,8 +224,9 @@ docker-run: docker-qemu-src $(IMAGE) --executable $(EXECUTABLE), \ " COPYING $(EXECUTABLE) to $(IMAGE)")) $(call quiet-command, \ - $(DOCKER_SCRIPT) run \ - $(if $(NOUSER),,--run-as-current-user) \ + $(RUNC) run \ + --rm \ + $(if $(NOUSER),,-u $(UID)) \ --security-opt seccomp=unconfined \ $(if $(DEBUG),-ti,) \ $(if $(NETWORK),$(if $(subst $(NETWORK),,1),--net=$(NETWORK)),--net=none) \ diff --git a/tests/docker/docker.py b/tests/docker/docker.py index 3a1ed7cb18..688ef62989 100755 --- a/tests/docker/docker.py +++ b/tests/docker/docker.py @@ -23,10 +23,10 @@ import enum import tempfile import re import signal +import getpass from tarfile import TarFile, TarInfo from io import StringIO, BytesIO from shutil import copy, rmtree -from pwd import getpwuid from datetime import datetime, timedelta @@ -316,7 +316,7 @@ class Docker(object): if user: uid = os.getuid() - uname = getpwuid(uid).pw_name + uname = getpass.getuser() tmp_df.write("\n") tmp_df.write("RUN id %s 2>/dev/null || useradd -u %d -U %s" % (uname, uid, uname)) @@ -570,7 +570,7 @@ class UpdateCommand(SubCommand): if args.user: uid = os.getuid() - uname = getpwuid(uid).pw_name + uname = getpass.getuser() df.write("\n") df.write("RUN id %s 2>/dev/null || useradd -u %d -U %s" % (uname, uid, uname)) diff --git a/tests/docker/dockerfiles/alpine.docker b/tests/docker/dockerfiles/alpine.docker index 4a569d82f6..0097637dca 100644 --- a/tests/docker/dockerfiles/alpine.docker +++ b/tests/docker/dockerfiles/alpine.docker @@ -19,7 +19,6 @@ RUN apk update && \ ca-certificates \ capstone-dev \ ccache \ - cdrkit \ ceph-dev \ clang \ cmocka-dev \ @@ -61,12 +60,13 @@ RUN apk update && \ liburing-dev \ libusb-dev \ linux-pam-dev \ - llvm11 \ + llvm \ lttng-ust-dev \ lzo-dev \ make \ mesa-dev \ meson \ + mtools \ multipath-tools \ musl-dev \ ncurses-dev \ @@ -94,6 +94,7 @@ RUN apk update && \ sed \ snappy-dev \ sndio-dev \ + socat \ sparse \ spice-dev \ spice-protocol \ @@ -107,8 +108,10 @@ RUN apk update && \ which \ xen-dev \ xfsprogs-dev \ + xorriso \ zlib-dev \ zlib-static \ + zstd \ zstd-dev && \ apk list | sort > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ @@ -123,3 +126,8 @@ ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" ENV NINJA "/usr/bin/ninja" ENV PYTHON "/usr/bin/python3" +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/centos8.docker b/tests/docker/dockerfiles/centos8.docker index fbc953c6dc..78f454b782 100644 --- a/tests/docker/dockerfiles/centos8.docker +++ b/tests/docker/dockerfiles/centos8.docker @@ -36,7 +36,6 @@ RUN dnf distro-sync -y && \ fuse3-devel \ gcc \ gcc-c++ \ - genisoimage \ gettext \ git \ glib2-devel \ @@ -82,7 +81,7 @@ RUN dnf distro-sync -y && \ lzo-devel \ make \ mesa-libgbm-devel \ - meson \ + mtools \ ncurses-devel \ nettle-devel \ ninja-build \ @@ -94,17 +93,16 @@ RUN dnf distro-sync -y && \ pixman-devel \ pkgconfig \ pulseaudio-libs-devel \ - python3 \ - python3-PyYAML \ - python3-numpy \ - python3-pillow \ - python3-pip \ - python3-sphinx \ - python3-sphinx_rtd_theme \ + python38 \ + python38-PyYAML \ + python38-numpy \ + python38-pip \ + python38-setuptools \ + python38-wheel \ rdma-core-devel \ - rpm \ sed \ snappy-devel \ + socat \ spice-protocol \ spice-server-devel \ systemd-devel \ @@ -116,8 +114,10 @@ RUN dnf distro-sync -y && \ vte291-devel \ which \ xfsprogs-devel \ + xorriso \ zlib-devel \ - zlib-static && \ + zlib-static \ + zstd && \ dnf autoremove -y && \ dnf clean all -y && \ rpm -qa | sort > /packages.txt && \ @@ -128,8 +128,19 @@ RUN dnf distro-sync -y && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc +RUN /usr/bin/pip3.8 install \ + meson==0.63.2 \ + pillow \ + sphinx \ + sphinx-rtd-theme + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" ENV NINJA "/usr/bin/ninja" -ENV PYTHON "/usr/bin/python3" +ENV PYTHON "/usr/bin/python3.8" +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-all-test-cross.docker b/tests/docker/dockerfiles/debian-all-test-cross.docker index 8dc5e1b5de..f9f401544a 100644 --- a/tests/docker/dockerfiles/debian-all-test-cross.docker +++ b/tests/docker/dockerfiles/debian-all-test-cross.docker @@ -57,7 +57,13 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata \ gcc-sh4-linux-gnu \ libc6-dev-sh4-cross \ gcc-sparc64-linux-gnu \ - libc6-dev-sparc64-cross + libc6-dev-sparc64-cross \ + python3-venv ENV QEMU_CONFIGURE_OPTS --disable-system --disable-docs --disable-tools ENV DEF_TARGET_LIST aarch64-linux-user,alpha-linux-user,arm-linux-user,hppa-linux-user,i386-linux-user,m68k-linux-user,mips-linux-user,mips64-linux-user,mips64el-linux-user,mipsel-linux-user,ppc-linux-user,ppc64-linux-user,ppc64le-linux-user,riscv64-linux-user,s390x-linux-user,sh4-linux-user,sparc64-linux-user +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-alpha-cross.docker b/tests/docker/dockerfiles/debian-alpha-cross.docker index 4eeb43c78a..7fa7bf1bde 100644 --- a/tests/docker/dockerfiles/debian-alpha-cross.docker +++ b/tests/docker/dockerfiles/debian-alpha-cross.docker @@ -12,3 +12,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-alpha-linux-gnu \ libc6.1-dev-alpha-cross +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-amd64-cross.docker b/tests/docker/dockerfiles/debian-amd64-cross.docker index 5175095a85..40a2b6acc4 100644 --- a/tests/docker/dockerfiles/debian-amd64-cross.docker +++ b/tests/docker/dockerfiles/debian-amd64-cross.docker @@ -25,7 +25,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ findutils \ flex \ gcovr \ - genisoimage \ gettext \ git \ hostname \ @@ -37,6 +36,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ locales \ make \ meson \ + mtools \ ncat \ ninja-build \ openssh-client \ @@ -52,10 +52,13 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-yaml \ rpm2cpio \ sed \ + socat \ sparse \ tar \ tesseract-ocr \ - tesseract-ocr-eng && \ + tesseract-ocr-eng \ + xorriso \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ @@ -146,7 +149,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ mkdir -p /usr/local/share/meson/cross && \ - echo "[binaries]\n\ + printf "[binaries]\n\ c = '/usr/bin/x86_64-linux-gnu-gcc'\n\ ar = '/usr/bin/x86_64-linux-gnu-gcc-ar'\n\ strip = '/usr/bin/x86_64-linux-gnu-strip'\n\ @@ -156,7 +159,7 @@ pkgconfig = '/usr/bin/x86_64-linux-gnu-pkg-config'\n\ system = 'linux'\n\ cpu_family = 'x86_64'\n\ cpu = 'x86_64'\n\ -endian = 'little'" > /usr/local/share/meson/cross/x86_64-linux-gnu && \ +endian = 'little'\n" > /usr/local/share/meson/cross/x86_64-linux-gnu && \ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/x86_64-linux-gnu-c++ && \ @@ -168,3 +171,8 @@ ENV ABI "x86_64-linux-gnu" ENV MESON_OPTS "--cross-file=x86_64-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=x86_64-linux-gnu- ENV DEF_TARGET_LIST x86_64-softmmu,x86_64-linux-user,i386-softmmu,i386-linux-user +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-amd64.docker b/tests/docker/dockerfiles/debian-amd64.docker index b61f664ea2..e39871c7bb 100644 --- a/tests/docker/dockerfiles/debian-amd64.docker +++ b/tests/docker/dockerfiles/debian-amd64.docker @@ -28,7 +28,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ g++ \ gcc \ gcovr \ - genisoimage \ gettext \ git \ hostname \ @@ -103,6 +102,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ locales \ make \ meson \ + mtools \ multipath-tools \ ncat \ nettle-dev \ @@ -120,13 +120,16 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-yaml \ rpm2cpio \ sed \ + socat \ sparse \ systemtap-sdt-dev \ tar \ tesseract-ocr \ tesseract-ocr-eng \ xfslibs-dev \ - zlib1g-dev && \ + xorriso \ + zlib1g-dev \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ @@ -154,3 +157,8 @@ RUN git clone https://github.com/luigirizzo/netmap.git /usr/src/netmap RUN cd /usr/src/netmap && git checkout v11.3 RUN cd /usr/src/netmap/LINUX && ./configure --no-drivers --no-apps --kernel-dir=$(ls -d /usr/src/linux-headers-*-amd64) && make install ENV QEMU_CONFIGURE_OPTS --enable-netmap +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-arm64-cross.docker b/tests/docker/dockerfiles/debian-arm64-cross.docker index b69958c69f..c99300bbfa 100644 --- a/tests/docker/dockerfiles/debian-arm64-cross.docker +++ b/tests/docker/dockerfiles/debian-arm64-cross.docker @@ -25,7 +25,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ findutils \ flex \ gcovr \ - genisoimage \ gettext \ git \ hostname \ @@ -37,6 +36,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ locales \ make \ meson \ + mtools \ ncat \ ninja-build \ openssh-client \ @@ -52,10 +52,13 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-yaml \ rpm2cpio \ sed \ + socat \ sparse \ tar \ tesseract-ocr \ - tesseract-ocr-eng && \ + tesseract-ocr-eng \ + xorriso \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ @@ -145,7 +148,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ mkdir -p /usr/local/share/meson/cross && \ - echo "[binaries]\n\ + printf "[binaries]\n\ c = '/usr/bin/aarch64-linux-gnu-gcc'\n\ ar = '/usr/bin/aarch64-linux-gnu-gcc-ar'\n\ strip = '/usr/bin/aarch64-linux-gnu-strip'\n\ @@ -155,7 +158,7 @@ pkgconfig = '/usr/bin/aarch64-linux-gnu-pkg-config'\n\ system = 'linux'\n\ cpu_family = 'aarch64'\n\ cpu = 'aarch64'\n\ -endian = 'little'" > /usr/local/share/meson/cross/aarch64-linux-gnu && \ +endian = 'little'\n" > /usr/local/share/meson/cross/aarch64-linux-gnu && \ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/aarch64-linux-gnu-c++ && \ @@ -167,3 +170,8 @@ ENV ABI "aarch64-linux-gnu" ENV MESON_OPTS "--cross-file=aarch64-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=aarch64-linux-gnu- ENV DEF_TARGET_LIST aarch64-softmmu,aarch64-linux-user +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-armel-cross.docker b/tests/docker/dockerfiles/debian-armel-cross.docker index 96b524fab6..5db5c78b31 100644 --- a/tests/docker/dockerfiles/debian-armel-cross.docker +++ b/tests/docker/dockerfiles/debian-armel-cross.docker @@ -25,7 +25,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ findutils \ flex \ gcovr \ - genisoimage \ gettext \ git \ hostname \ @@ -37,6 +36,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ locales \ make \ meson \ + mtools \ ncat \ ninja-build \ openssh-client \ @@ -52,10 +52,13 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-yaml \ rpm2cpio \ sed \ + socat \ sparse \ tar \ tesseract-ocr \ - tesseract-ocr-eng && \ + tesseract-ocr-eng \ + xorriso \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ @@ -144,7 +147,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ mkdir -p /usr/local/share/meson/cross && \ - echo "[binaries]\n\ + printf "[binaries]\n\ c = '/usr/bin/arm-linux-gnueabi-gcc'\n\ ar = '/usr/bin/arm-linux-gnueabi-gcc-ar'\n\ strip = '/usr/bin/arm-linux-gnueabi-strip'\n\ @@ -154,7 +157,7 @@ pkgconfig = '/usr/bin/arm-linux-gnueabi-pkg-config'\n\ system = 'linux'\n\ cpu_family = 'arm'\n\ cpu = 'arm'\n\ -endian = 'little'" > /usr/local/share/meson/cross/arm-linux-gnueabi && \ +endian = 'little'\n" > /usr/local/share/meson/cross/arm-linux-gnueabi && \ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabi-c++ && \ @@ -166,3 +169,8 @@ ENV ABI "arm-linux-gnueabi" ENV MESON_OPTS "--cross-file=arm-linux-gnueabi" ENV QEMU_CONFIGURE_OPTS --cross-prefix=arm-linux-gnueabi- ENV DEF_TARGET_LIST arm-softmmu,arm-linux-user,armeb-linux-user +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-armhf-cross.docker b/tests/docker/dockerfiles/debian-armhf-cross.docker index 08a75cebdb..ae6600b25f 100644 --- a/tests/docker/dockerfiles/debian-armhf-cross.docker +++ b/tests/docker/dockerfiles/debian-armhf-cross.docker @@ -25,7 +25,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ findutils \ flex \ gcovr \ - genisoimage \ gettext \ git \ hostname \ @@ -37,6 +36,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ locales \ make \ meson \ + mtools \ ncat \ ninja-build \ openssh-client \ @@ -52,10 +52,13 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-yaml \ rpm2cpio \ sed \ + socat \ sparse \ tar \ tesseract-ocr \ - tesseract-ocr-eng && \ + tesseract-ocr-eng \ + xorriso \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ @@ -145,7 +148,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ mkdir -p /usr/local/share/meson/cross && \ - echo "[binaries]\n\ + printf "[binaries]\n\ c = '/usr/bin/arm-linux-gnueabihf-gcc'\n\ ar = '/usr/bin/arm-linux-gnueabihf-gcc-ar'\n\ strip = '/usr/bin/arm-linux-gnueabihf-strip'\n\ @@ -155,7 +158,7 @@ pkgconfig = '/usr/bin/arm-linux-gnueabihf-pkg-config'\n\ system = 'linux'\n\ cpu_family = 'arm'\n\ cpu = 'armhf'\n\ -endian = 'little'" > /usr/local/share/meson/cross/arm-linux-gnueabihf && \ +endian = 'little'\n" > /usr/local/share/meson/cross/arm-linux-gnueabihf && \ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabihf-c++ && \ @@ -167,3 +170,8 @@ ENV ABI "arm-linux-gnueabihf" ENV MESON_OPTS "--cross-file=arm-linux-gnueabihf" ENV QEMU_CONFIGURE_OPTS --cross-prefix=arm-linux-gnueabihf- ENV DEF_TARGET_LIST arm-softmmu,arm-linux-user +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-hexagon-cross.docker b/tests/docker/dockerfiles/debian-hexagon-cross.docker index 8a0d748343..c2cfb6a5d0 100644 --- a/tests/docker/dockerfiles/debian-hexagon-cross.docker +++ b/tests/docker/dockerfiles/debian-hexagon-cross.docker @@ -20,16 +20,22 @@ RUN apt-get update && \ bison \ flex \ git \ - ninja-build && \ + ninja-build \ + python3-venv && \ # Install QEMU build deps for use in CI DEBIAN_FRONTEND=noninteractive eatmydata \ apt build-dep -yy --arch-only qemu ENV TOOLCHAIN_INSTALL /opt -ENV TOOLCHAIN_RELEASE 15.0.3 +ENV TOOLCHAIN_RELEASE 16.0.0 ENV TOOLCHAIN_BASENAME "clang+llvm-${TOOLCHAIN_RELEASE}-cross-hexagon-unknown-linux-musl" ENV TOOLCHAIN_URL https://codelinaro.jfrog.io/artifactory/codelinaro-toolchain-for-hexagon/v${TOOLCHAIN_RELEASE}/${TOOLCHAIN_BASENAME}.tar.xz RUN curl -#SL "$TOOLCHAIN_URL" | tar -xJC "$TOOLCHAIN_INSTALL" ENV PATH $PATH:${TOOLCHAIN_INSTALL}/${TOOLCHAIN_BASENAME}/x86_64-linux-gnu/bin +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-hppa-cross.docker b/tests/docker/dockerfiles/debian-hppa-cross.docker index af1c8403d8..dd47ffdfa4 100644 --- a/tests/docker/dockerfiles/debian-hppa-cross.docker +++ b/tests/docker/dockerfiles/debian-hppa-cross.docker @@ -12,3 +12,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-hppa-linux-gnu \ libc6-dev-hppa-cross +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-loongarch-cross.docker b/tests/docker/dockerfiles/debian-loongarch-cross.docker index a8e8e98909..9d957547b5 100644 --- a/tests/docker/dockerfiles/debian-loongarch-cross.docker +++ b/tests/docker/dockerfiles/debian-loongarch-cross.docker @@ -25,3 +25,8 @@ RUN curl -#SL https://github.com/loongson/build-tools/releases/download/2022.05. ENV PATH $PATH:/opt/cross-tools/bin ENV LD_LIBRARY_PATH /opt/cross-tools/lib:/opt/cross-tools/loongarch64-unknown-linux-gnu/lib:$LD_LIBRARY_PATH +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-m68k-cross.docker b/tests/docker/dockerfiles/debian-m68k-cross.docker index dded71c5d2..25dd1c1e68 100644 --- a/tests/docker/dockerfiles/debian-m68k-cross.docker +++ b/tests/docker/dockerfiles/debian-m68k-cross.docker @@ -12,3 +12,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-m68k-linux-gnu \ libc6-dev-m68k-cross +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-mips-cross.docker b/tests/docker/dockerfiles/debian-mips-cross.docker index 7b55f0f3b2..2cbc568ed1 100644 --- a/tests/docker/dockerfiles/debian-mips-cross.docker +++ b/tests/docker/dockerfiles/debian-mips-cross.docker @@ -12,3 +12,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-mips-linux-gnu \ libc6-dev-mips-cross +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-mips64-cross.docker b/tests/docker/dockerfiles/debian-mips64-cross.docker index afcff9726f..ba965cf564 100644 --- a/tests/docker/dockerfiles/debian-mips64-cross.docker +++ b/tests/docker/dockerfiles/debian-mips64-cross.docker @@ -12,3 +12,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-mips64-linux-gnuabi64 \ libc6-dev-mips64-cross +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-mips64el-cross.docker b/tests/docker/dockerfiles/debian-mips64el-cross.docker index 5930e6fa5d..daa2d48e36 100644 --- a/tests/docker/dockerfiles/debian-mips64el-cross.docker +++ b/tests/docker/dockerfiles/debian-mips64el-cross.docker @@ -25,7 +25,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ findutils \ flex \ gcovr \ - genisoimage \ gettext \ git \ hostname \ @@ -37,6 +36,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ locales \ make \ meson \ + mtools \ ncat \ ninja-build \ openssh-client \ @@ -52,10 +52,13 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-yaml \ rpm2cpio \ sed \ + socat \ sparse \ tar \ tesseract-ocr \ - tesseract-ocr-eng && \ + tesseract-ocr-eng \ + xorriso \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ @@ -142,7 +145,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ mkdir -p /usr/local/share/meson/cross && \ - echo "[binaries]\n\ + printf "[binaries]\n\ c = '/usr/bin/mips64el-linux-gnuabi64-gcc'\n\ ar = '/usr/bin/mips64el-linux-gnuabi64-gcc-ar'\n\ strip = '/usr/bin/mips64el-linux-gnuabi64-strip'\n\ @@ -152,7 +155,7 @@ pkgconfig = '/usr/bin/mips64el-linux-gnuabi64-pkg-config'\n\ system = 'linux'\n\ cpu_family = 'mips64'\n\ cpu = 'mips64el'\n\ -endian = 'little'" > /usr/local/share/meson/cross/mips64el-linux-gnuabi64 && \ +endian = 'little'\n" > /usr/local/share/meson/cross/mips64el-linux-gnuabi64 && \ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/mips64el-linux-gnuabi64-c++ && \ @@ -164,3 +167,8 @@ ENV ABI "mips64el-linux-gnuabi64" ENV MESON_OPTS "--cross-file=mips64el-linux-gnuabi64" ENV QEMU_CONFIGURE_OPTS --cross-prefix=mips64el-linux-gnuabi64- ENV DEF_TARGET_LIST mips64el-softmmu,mips64el-linux-user +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-mipsel-cross.docker b/tests/docker/dockerfiles/debian-mipsel-cross.docker index c65d9830e7..5af04e2054 100644 --- a/tests/docker/dockerfiles/debian-mipsel-cross.docker +++ b/tests/docker/dockerfiles/debian-mipsel-cross.docker @@ -25,7 +25,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ findutils \ flex \ gcovr \ - genisoimage \ gettext \ git \ hostname \ @@ -37,6 +36,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ locales \ make \ meson \ + mtools \ ncat \ ninja-build \ openssh-client \ @@ -52,10 +52,13 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-yaml \ rpm2cpio \ sed \ + socat \ sparse \ tar \ tesseract-ocr \ - tesseract-ocr-eng && \ + tesseract-ocr-eng \ + xorriso \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ @@ -142,7 +145,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ mkdir -p /usr/local/share/meson/cross && \ - echo "[binaries]\n\ + printf "[binaries]\n\ c = '/usr/bin/mipsel-linux-gnu-gcc'\n\ ar = '/usr/bin/mipsel-linux-gnu-gcc-ar'\n\ strip = '/usr/bin/mipsel-linux-gnu-strip'\n\ @@ -152,7 +155,7 @@ pkgconfig = '/usr/bin/mipsel-linux-gnu-pkg-config'\n\ system = 'linux'\n\ cpu_family = 'mips'\n\ cpu = 'mipsel'\n\ -endian = 'little'" > /usr/local/share/meson/cross/mipsel-linux-gnu && \ +endian = 'little'\n" > /usr/local/share/meson/cross/mipsel-linux-gnu && \ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/mipsel-linux-gnu-c++ && \ @@ -164,3 +167,8 @@ ENV ABI "mipsel-linux-gnu" ENV MESON_OPTS "--cross-file=mipsel-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=mipsel-linux-gnu- ENV DEF_TARGET_LIST mipsel-softmmu,mipsel-linux-user +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-native.docker b/tests/docker/dockerfiles/debian-native.docker index 8dd033097c..abac7d7cd7 100644 --- a/tests/docker/dockerfiles/debian-native.docker +++ b/tests/docker/dockerfiles/debian-native.docker @@ -47,3 +47,8 @@ RUN apt update && \ ENV QEMU_CONFIGURE_OPTS $QEMU_CONFIGURE_OPTS ENV DEF_TARGET_LIST "none" +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-powerpc-test-cross.docker b/tests/docker/dockerfiles/debian-powerpc-test-cross.docker index d6b2909cc4..23779413d3 100644 --- a/tests/docker/dockerfiles/debian-powerpc-test-cross.docker +++ b/tests/docker/dockerfiles/debian-powerpc-test-cross.docker @@ -16,4 +16,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libc6-dev-ppc64-cross \ gcc-10-powerpc64le-linux-gnu \ libc6-dev-ppc64el-cross - +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-ppc64el-cross.docker b/tests/docker/dockerfiles/debian-ppc64el-cross.docker index 2ae56c978e..1eeba7fcab 100644 --- a/tests/docker/dockerfiles/debian-ppc64el-cross.docker +++ b/tests/docker/dockerfiles/debian-ppc64el-cross.docker @@ -25,7 +25,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ findutils \ flex \ gcovr \ - genisoimage \ gettext \ git \ hostname \ @@ -37,6 +36,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ locales \ make \ meson \ + mtools \ ncat \ ninja-build \ openssh-client \ @@ -52,10 +52,13 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-yaml \ rpm2cpio \ sed \ + socat \ sparse \ tar \ tesseract-ocr \ - tesseract-ocr-eng && \ + tesseract-ocr-eng \ + xorriso \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ @@ -144,7 +147,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ mkdir -p /usr/local/share/meson/cross && \ - echo "[binaries]\n\ + printf "[binaries]\n\ c = '/usr/bin/powerpc64le-linux-gnu-gcc'\n\ ar = '/usr/bin/powerpc64le-linux-gnu-gcc-ar'\n\ strip = '/usr/bin/powerpc64le-linux-gnu-strip'\n\ @@ -154,7 +157,7 @@ pkgconfig = '/usr/bin/powerpc64le-linux-gnu-pkg-config'\n\ system = 'linux'\n\ cpu_family = 'ppc64'\n\ cpu = 'powerpc64le'\n\ -endian = 'little'" > /usr/local/share/meson/cross/powerpc64le-linux-gnu && \ +endian = 'little'\n" > /usr/local/share/meson/cross/powerpc64le-linux-gnu && \ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/powerpc64le-linux-gnu-c++ && \ @@ -166,3 +169,8 @@ ENV ABI "powerpc64le-linux-gnu" ENV MESON_OPTS "--cross-file=powerpc64le-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=powerpc64le-linux-gnu- ENV DEF_TARGET_LIST ppc64-softmmu,ppc64-linux-user +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-riscv64-cross.docker b/tests/docker/dockerfiles/debian-riscv64-cross.docker index 3daf93968a..081404e014 100644 --- a/tests/docker/dockerfiles/debian-riscv64-cross.docker +++ b/tests/docker/dockerfiles/debian-riscv64-cross.docker @@ -28,7 +28,8 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy \ libglib2.0-dev \ ninja-build \ pkg-config \ - python3 + python3 \ + python3-venv # Add ports and riscv64 architecture RUN echo "deb http://ftp.ports.debian.org/debian-ports/ sid main" >> /etc/apt/sources.list @@ -50,3 +51,8 @@ RUN apt update && \ # Specify the cross prefix for this image (see tests/docker/common.rc) ENV QEMU_CONFIGURE_OPTS --cross-prefix=riscv64-linux-gnu- ENV DEF_TARGET_LIST riscv64-softmmu,riscv64-linux-user +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-riscv64-test-cross.docker b/tests/docker/dockerfiles/debian-riscv64-test-cross.docker index e5f83a5aeb..6e631295bc 100644 --- a/tests/docker/dockerfiles/debian-riscv64-test-cross.docker +++ b/tests/docker/dockerfiles/debian-riscv64-test-cross.docker @@ -12,3 +12,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-riscv64-linux-gnu \ libc6-dev-riscv64-cross +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-s390x-cross.docker b/tests/docker/dockerfiles/debian-s390x-cross.docker index 0db86a0fcd..52e89a6dab 100644 --- a/tests/docker/dockerfiles/debian-s390x-cross.docker +++ b/tests/docker/dockerfiles/debian-s390x-cross.docker @@ -25,7 +25,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ findutils \ flex \ gcovr \ - genisoimage \ gettext \ git \ hostname \ @@ -37,6 +36,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ locales \ make \ meson \ + mtools \ ncat \ ninja-build \ openssh-client \ @@ -52,10 +52,13 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-yaml \ rpm2cpio \ sed \ + socat \ sparse \ tar \ tesseract-ocr \ - tesseract-ocr-eng && \ + tesseract-ocr-eng \ + xorriso \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ @@ -143,7 +146,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ mkdir -p /usr/local/share/meson/cross && \ - echo "[binaries]\n\ + printf "[binaries]\n\ c = '/usr/bin/s390x-linux-gnu-gcc'\n\ ar = '/usr/bin/s390x-linux-gnu-gcc-ar'\n\ strip = '/usr/bin/s390x-linux-gnu-strip'\n\ @@ -153,7 +156,7 @@ pkgconfig = '/usr/bin/s390x-linux-gnu-pkg-config'\n\ system = 'linux'\n\ cpu_family = 's390x'\n\ cpu = 's390x'\n\ -endian = 'big'" > /usr/local/share/meson/cross/s390x-linux-gnu && \ +endian = 'big'\n" > /usr/local/share/meson/cross/s390x-linux-gnu && \ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/s390x-linux-gnu-c++ && \ @@ -165,3 +168,8 @@ ENV ABI "s390x-linux-gnu" ENV MESON_OPTS "--cross-file=s390x-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=s390x-linux-gnu- ENV DEF_TARGET_LIST s390x-softmmu,s390x-linux-user +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-sh4-cross.docker b/tests/docker/dockerfiles/debian-sh4-cross.docker index d48ed9065f..6bd8171d33 100644 --- a/tests/docker/dockerfiles/debian-sh4-cross.docker +++ b/tests/docker/dockerfiles/debian-sh4-cross.docker @@ -12,3 +12,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-sh4-linux-gnu \ libc6-dev-sh4-cross +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-sparc64-cross.docker b/tests/docker/dockerfiles/debian-sparc64-cross.docker index 8d3d306bc1..1ef735f223 100644 --- a/tests/docker/dockerfiles/debian-sparc64-cross.docker +++ b/tests/docker/dockerfiles/debian-sparc64-cross.docker @@ -12,3 +12,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-sparc64-linux-gnu \ libc6-dev-sparc64-cross +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-toolchain.docker b/tests/docker/dockerfiles/debian-toolchain.docker index 6c73408b34..687a97fec4 100644 --- a/tests/docker/dockerfiles/debian-toolchain.docker +++ b/tests/docker/dockerfiles/debian-toolchain.docker @@ -30,7 +30,12 @@ ADD build-toolchain.sh /root/build-toolchain.sh RUN cd /root && ./build-toolchain.sh # Throw away the extra toolchain build deps, the downloaded source, -# and the build trees by restoring the original debian10 image, +# and the build trees by restoring the original image, # then copying the built toolchain from stage 0. -FROM docker.io/library/debian:bullseye-slim +FROM docker.io/library/debian:11-slim COPY --from=0 /usr/local /usr/local +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-tricore-cross.docker b/tests/docker/dockerfiles/debian-tricore-cross.docker index 5ae58efa09..269bfa8d42 100644 --- a/tests/docker/dockerfiles/debian-tricore-cross.docker +++ b/tests/docker/dockerfiles/debian-tricore-cross.docker @@ -20,6 +20,7 @@ RUN apt update && \ bzip2 \ ca-certificates \ ccache \ + curl \ flex \ g++ \ gcc \ @@ -32,16 +33,17 @@ RUN apt update && \ pkgconf \ python3-pip \ python3-setuptools \ - python3-wheel + python3-wheel \ + python3-venv -RUN git clone --single-branch \ - https://github.com/bkoppelmann/tricore-binutils.git \ - /usr/src/binutils && \ - cd /usr/src/binutils && chmod +x missing && \ - CFLAGS=-w ./configure --prefix=/usr/local --disable-nls --target=tricore && \ - make && make install && \ - rm -rf /usr/src/binutils +RUN curl -#SL https://github.com/bkoppelmann/package_940/releases/download/tricore-toolchain-9.40/tricore-toolchain-9.4.0.tar.gz \ + | tar -xzC /usr/local/ # This image can only build a very minimal QEMU as well as the tests ENV DEF_TARGET_LIST tricore-softmmu ENV QEMU_CONFIGURE_OPTS --disable-user --disable-tools --disable-fdt +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-xtensa-cross.docker b/tests/docker/dockerfiles/debian-xtensa-cross.docker index 2f11b3b7bc..72c25d63d9 100644 --- a/tests/docker/dockerfiles/debian-xtensa-cross.docker +++ b/tests/docker/dockerfiles/debian-xtensa-cross.docker @@ -5,7 +5,7 @@ # using a prebuilt toolchains for Xtensa cores from: # https://github.com/foss-xtensa/toolchain/releases # -FROM docker.io/library/debian:stretch-slim +FROM docker.io/library/debian:11-slim RUN apt-get update && \ DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ @@ -27,3 +27,8 @@ RUN for cpu in $CPU_LIST; do \ done ENV PATH $PATH:/opt/$TOOLCHAIN_RELEASE/xtensa-dc232b-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-dc233c-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-de233_fpu-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-dsp3400-elf/bin +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/fedora-cris-cross.docker b/tests/docker/dockerfiles/fedora-cris-cross.docker index 91c373fdd3..f2899af410 100644 --- a/tests/docker/dockerfiles/fedora-cris-cross.docker +++ b/tests/docker/dockerfiles/fedora-cris-cross.docker @@ -6,3 +6,8 @@ FROM registry.fedoraproject.org/fedora:33 ENV PACKAGES gcc-cris-linux-gnu RUN dnf install -y $PACKAGES RUN rpm -q $PACKAGES | sort > /packages.txt +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/fedora-i386-cross.docker b/tests/docker/dockerfiles/fedora-i386-cross.docker index f58b64dc3e..14c1fb2c93 100644 --- a/tests/docker/dockerfiles/fedora-i386-cross.docker +++ b/tests/docker/dockerfiles/fedora-i386-cross.docker @@ -32,3 +32,8 @@ ENV PKG_CONFIG_LIBDIR /usr/lib/pkgconfig RUN dnf update -y && dnf install -y $PACKAGES RUN rpm -q $PACKAGES | sort > /packages.txt +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/fedora-win32-cross.docker b/tests/docker/dockerfiles/fedora-win32-cross.docker index b659c0b8a8..dc72ae9cc9 100644 --- a/tests/docker/dockerfiles/fedora-win32-cross.docker +++ b/tests/docker/dockerfiles/fedora-win32-cross.docker @@ -7,14 +7,14 @@ FROM registry.fedoraproject.org/fedora:37 RUN dnf install -y nosync && \ - echo -e '#!/bin/sh\n\ + printf '#!/bin/sh\n\ if test -d /usr/lib64\n\ then\n\ export LD_PRELOAD=/usr/lib64/nosync/nosync.so\n\ else\n\ export LD_PRELOAD=/usr/lib/nosync/nosync.so\n\ fi\n\ -exec "$@"' > /usr/bin/nosync && \ +exec "$@"\n' > /usr/bin/nosync && \ chmod +x /usr/bin/nosync && \ nosync dnf update -y && \ nosync dnf install -y \ @@ -30,7 +30,6 @@ exec "$@"' > /usr/bin/nosync && \ findutils \ flex \ gcovr \ - genisoimage \ git \ glib2-devel \ glibc-langpack-en \ @@ -38,6 +37,7 @@ exec "$@"' > /usr/bin/nosync && \ llvm \ make \ meson \ + mtools \ ninja-build \ nmap-ncat \ openssh-clients \ @@ -50,15 +50,17 @@ exec "$@"' > /usr/bin/nosync && \ python3-pip \ python3-sphinx \ python3-sphinx_rtd_theme \ - rpm \ sed \ + socat \ sparse \ spice-protocol \ tar \ tesseract \ tesseract-langpack-eng \ util-linux \ - which && \ + which \ + xorriso \ + zstd && \ nosync dnf autoremove -y && \ nosync dnf clean all -y @@ -79,6 +81,7 @@ RUN nosync dnf install -y \ mingw32-glib2 \ mingw32-gnutls \ mingw32-gtk3 \ + mingw32-libepoxy \ mingw32-libgcrypt \ mingw32-libjpeg-turbo \ mingw32-libpng \ @@ -99,3 +102,8 @@ ENV ABI "i686-w64-mingw32" ENV MESON_OPTS "--cross-file=/usr/share/mingw/toolchain-mingw32.meson" ENV QEMU_CONFIGURE_OPTS --cross-prefix=i686-w64-mingw32- ENV DEF_TARGET_LIST i386-softmmu +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/fedora-win64-cross.docker b/tests/docker/dockerfiles/fedora-win64-cross.docker index 0a404c15bf..7eb4a5dba2 100644 --- a/tests/docker/dockerfiles/fedora-win64-cross.docker +++ b/tests/docker/dockerfiles/fedora-win64-cross.docker @@ -7,14 +7,14 @@ FROM registry.fedoraproject.org/fedora:37 RUN dnf install -y nosync && \ - echo -e '#!/bin/sh\n\ + printf '#!/bin/sh\n\ if test -d /usr/lib64\n\ then\n\ export LD_PRELOAD=/usr/lib64/nosync/nosync.so\n\ else\n\ export LD_PRELOAD=/usr/lib/nosync/nosync.so\n\ fi\n\ -exec "$@"' > /usr/bin/nosync && \ +exec "$@"\n' > /usr/bin/nosync && \ chmod +x /usr/bin/nosync && \ nosync dnf update -y && \ nosync dnf install -y \ @@ -30,7 +30,6 @@ exec "$@"' > /usr/bin/nosync && \ findutils \ flex \ gcovr \ - genisoimage \ git \ glib2-devel \ glibc-langpack-en \ @@ -38,6 +37,7 @@ exec "$@"' > /usr/bin/nosync && \ llvm \ make \ meson \ + mtools \ ninja-build \ nmap-ncat \ openssh-clients \ @@ -50,15 +50,17 @@ exec "$@"' > /usr/bin/nosync && \ python3-pip \ python3-sphinx \ python3-sphinx_rtd_theme \ - rpm \ sed \ + socat \ sparse \ spice-protocol \ tar \ tesseract \ tesseract-langpack-eng \ util-linux \ - which && \ + which \ + xorriso \ + zstd && \ nosync dnf autoremove -y && \ nosync dnf clean all -y @@ -80,6 +82,7 @@ RUN nosync dnf install -y \ mingw64-glib2 \ mingw64-gnutls \ mingw64-gtk3 \ + mingw64-libepoxy \ mingw64-libgcrypt \ mingw64-libjpeg-turbo \ mingw64-libpng \ @@ -99,3 +102,8 @@ ENV ABI "x86_64-w64-mingw32" ENV MESON_OPTS "--cross-file=/usr/share/mingw/toolchain-mingw64.meson" ENV QEMU_CONFIGURE_OPTS --cross-prefix=x86_64-w64-mingw32- ENV DEF_TARGET_LIST x86_64-softmmu +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/fedora.docker b/tests/docker/dockerfiles/fedora.docker index 5d60a96141..3a69eefdda 100644 --- a/tests/docker/dockerfiles/fedora.docker +++ b/tests/docker/dockerfiles/fedora.docker @@ -7,14 +7,14 @@ FROM registry.fedoraproject.org/fedora:37 RUN dnf install -y nosync && \ - echo -e '#!/bin/sh\n\ + printf '#!/bin/sh\n\ if test -d /usr/lib64\n\ then\n\ export LD_PRELOAD=/usr/lib64/nosync/nosync.so\n\ else\n\ export LD_PRELOAD=/usr/lib/nosync/nosync.so\n\ fi\n\ -exec "$@"' > /usr/bin/nosync && \ +exec "$@"\n' > /usr/bin/nosync && \ chmod +x /usr/bin/nosync && \ nosync dnf update -y && \ nosync dnf install -y \ @@ -43,7 +43,6 @@ exec "$@"' > /usr/bin/nosync && \ gcc \ gcc-c++ \ gcovr \ - genisoimage \ gettext \ git \ glib2-devel \ @@ -90,6 +89,7 @@ exec "$@"' > /usr/bin/nosync && \ make \ mesa-libgbm-devel \ meson \ + mtools \ ncurses-devel \ nettle-devel \ ninja-build \ @@ -110,9 +110,9 @@ exec "$@"' > /usr/bin/nosync && \ python3-sphinx \ python3-sphinx_rtd_theme \ rdma-core-devel \ - rpm \ sed \ snappy-devel \ + socat \ sparse \ spice-protocol \ spice-server-devel \ @@ -128,8 +128,10 @@ exec "$@"' > /usr/bin/nosync && \ which \ xen-devel \ xfsprogs-devel \ + xorriso \ zlib-devel \ - zlib-static && \ + zlib-static \ + zstd && \ nosync dnf autoremove -y && \ nosync dnf clean all -y && \ rpm -qa | sort > /packages.txt && \ @@ -145,3 +147,8 @@ ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" ENV NINJA "/usr/bin/ninja" ENV PYTHON "/usr/bin/python3" +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/opensuse-leap.docker b/tests/docker/dockerfiles/opensuse-leap.docker index 4b2c02d6ab..185abe57d8 100644 --- a/tests/docker/dockerfiles/opensuse-leap.docker +++ b/tests/docker/dockerfiles/opensuse-leap.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all opensuse-leap-153 qemu +# $ lcitool dockerfile --layers all opensuse-leap-15 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM registry.opensuse.org/opensuse/leap:15.3 +FROM registry.opensuse.org/opensuse/leap:15.4 RUN zypper update -y && \ zypper install -y \ @@ -81,7 +81,7 @@ RUN zypper update -y && \ lttng-ust-devel \ lzo-devel \ make \ - mkisofs \ + mtools \ ncat \ ncurses-devel \ ninja \ @@ -89,21 +89,14 @@ RUN zypper update -y && \ pam-devel \ pcre-devel-static \ pkgconfig \ - python3-Pillow \ - python3-PyYAML \ - python3-Sphinx \ - python3-base \ - python3-numpy \ - python3-opencv \ - python3-pip \ - python3-setuptools \ - python3-sphinx_rtd_theme \ - python3-wheel \ + python39-base \ + python39-pip \ + python39-setuptools \ rdma-core-devel \ - rpm \ sed \ snappy-devel \ sndio-devel \ + socat \ sparse \ spice-protocol-devel \ systemd-devel \ @@ -118,8 +111,10 @@ RUN zypper update -y && \ which \ xen-devel \ xfsprogs-devel \ + xorriso \ zlib-devel \ - zlib-devel-static && \ + zlib-devel-static \ + zstd && \ zypper clean --all && \ rpm -qa | sort > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ @@ -129,10 +124,20 @@ RUN zypper update -y && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc -RUN /usr/bin/pip3 install meson==0.56.0 +RUN /usr/bin/pip3.9 install \ + PyYAML \ + meson==0.63.2 \ + pillow \ + sphinx \ + sphinx-rtd-theme ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" ENV NINJA "/usr/bin/ninja" -ENV PYTHON "/usr/bin/python3" +ENV PYTHON "/usr/bin/python3.9" +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/python.docker b/tests/docker/dockerfiles/python.docker index 175c10a34e..383ccbdc3a 100644 --- a/tests/docker/dockerfiles/python.docker +++ b/tests/docker/dockerfiles/python.docker @@ -15,3 +15,8 @@ ENV PACKAGES \ RUN dnf install -y $PACKAGES RUN rpm -q $PACKAGES | sort > /packages.txt +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/ubuntu2004.docker b/tests/docker/dockerfiles/ubuntu2004.docker index 13ab0b6887..8f864d19e6 100644 --- a/tests/docker/dockerfiles/ubuntu2004.docker +++ b/tests/docker/dockerfiles/ubuntu2004.docker @@ -28,7 +28,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ g++ \ gcc \ gcovr \ - genisoimage \ gettext \ git \ hostname \ @@ -100,6 +99,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ llvm \ locales \ make \ + mtools \ multipath-tools \ ncat \ nettle-dev \ @@ -119,13 +119,16 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-yaml \ rpm2cpio \ sed \ + socat \ sparse \ systemtap-sdt-dev \ tar \ tesseract-ocr \ tesseract-ocr-eng \ xfslibs-dev \ - zlib1g-dev && \ + xorriso \ + zlib1g-dev \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ @@ -138,13 +141,15 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc -RUN /usr/bin/pip3 install meson==0.56.0 +RUN /usr/bin/pip3 install meson==0.63.2 ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" ENV NINJA "/usr/bin/ninja" ENV PYTHON "/usr/bin/python3" -# Apply patch https://reviews.llvm.org/D75820 -# This is required for TSan in clang-10 to compile with QEMU. -RUN sed -i 's/^const/static const/g' /usr/lib/llvm-10/lib/clang/10.0.0/include/sanitizer/tsan_interface.h +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/ubuntu2204.docker b/tests/docker/dockerfiles/ubuntu2204.docker new file mode 100644 index 0000000000..1d442cdfe6 --- /dev/null +++ b/tests/docker/dockerfiles/ubuntu2204.docker @@ -0,0 +1,154 @@ +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool dockerfile --layers all ubuntu-2204 qemu +# +# https://gitlab.com/libvirt/libvirt-ci + +FROM docker.io/library/ubuntu:22.04 + +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y eatmydata && \ + eatmydata apt-get dist-upgrade -y && \ + eatmydata apt-get install --no-install-recommends -y \ + bash \ + bc \ + bison \ + bsdextrautils \ + bzip2 \ + ca-certificates \ + ccache \ + clang \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ + findutils \ + flex \ + g++ \ + gcc \ + gcovr \ + gettext \ + git \ + hostname \ + libaio-dev \ + libasan5 \ + libasound2-dev \ + libattr1-dev \ + libbpf-dev \ + libbrlapi-dev \ + libbz2-dev \ + libc6-dev \ + libcacard-dev \ + libcap-ng-dev \ + libcapstone-dev \ + libcmocka-dev \ + libcurl4-gnutls-dev \ + libdaxctl-dev \ + libdrm-dev \ + libepoxy-dev \ + libfdt-dev \ + libffi-dev \ + libfuse3-dev \ + libgbm-dev \ + libgcrypt20-dev \ + libglib2.0-dev \ + libglusterfs-dev \ + libgnutls28-dev \ + libgtk-3-dev \ + libibumad-dev \ + libibverbs-dev \ + libiscsi-dev \ + libjemalloc-dev \ + libjpeg-turbo8-dev \ + libjson-c-dev \ + liblttng-ust-dev \ + liblzo2-dev \ + libncursesw5-dev \ + libnfs-dev \ + libnuma-dev \ + libpam0g-dev \ + libpcre2-dev \ + libpixman-1-dev \ + libpmem-dev \ + libpng-dev \ + libpulse-dev \ + librbd-dev \ + librdmacm-dev \ + libsasl2-dev \ + libsdl2-dev \ + libsdl2-image-dev \ + libseccomp-dev \ + libselinux1-dev \ + libslirp-dev \ + libsnappy-dev \ + libsndio-dev \ + libspice-protocol-dev \ + libspice-server-dev \ + libssh-dev \ + libsystemd-dev \ + libtasn1-6-dev \ + libubsan1 \ + libudev-dev \ + liburing-dev \ + libusb-1.0-0-dev \ + libusbredirhost-dev \ + libvdeplug-dev \ + libvirglrenderer-dev \ + libvte-2.91-dev \ + libxen-dev \ + libzstd-dev \ + llvm \ + locales \ + make \ + meson \ + mtools \ + multipath-tools \ + ncat \ + nettle-dev \ + ninja-build \ + openssh-client \ + pkgconf \ + python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-venv \ + python3-yaml \ + rpm2cpio \ + sed \ + socat \ + sparse \ + systemtap-sdt-dev \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + xfslibs-dev \ + xorriso \ + zlib1g-dev \ + zstd && \ + eatmydata apt-get autoremove -y && \ + eatmydata apt-get autoclean -y && \ + sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ + dpkg-reconfigure locales && \ + dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/c++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/test-tsan b/tests/docker/test-tsan index 53d90d2f79..f6d6590e39 100755 --- a/tests/docker/test-tsan +++ b/tests/docker/test-tsan @@ -21,7 +21,7 @@ setup_tsan() tsan_log_dir="/tmp/qemu-test/build/tsan" mkdir -p $tsan_log_dir > /dev/null || true EXTRA_CONFIGURE_OPTS="${EXTRA_CONFIGURE_OPTS} --enable-tsan \ - --cc=clang-10 --cxx=clang++-10 \ + --cc=clang --cxx=clang++ \ --disable-werror --extra-cflags=-O0" # detect deadlocks is false currently simply because # TSan crashes immediately with deadlock detector enabled. diff --git a/tests/fp/berkeley-testfloat-3 b/tests/fp/berkeley-testfloat-3 index 5a59dcec19..40619cbb3b 160000 --- a/tests/fp/berkeley-testfloat-3 +++ b/tests/fp/berkeley-testfloat-3 @@ -1 +1 @@ -Subproject commit 5a59dcec19327396a011a17fd924aed4fec416b3 +Subproject commit 40619cbb3bf32872df8c53cc457039229428a263 diff --git a/tests/fp/fp-test.c b/tests/fp/fp-test.c index 35829ad5f7..36b5712cda 100644 --- a/tests/fp/fp-test.c +++ b/tests/fp/fp-test.c @@ -106,7 +106,8 @@ static const char commands_string[] = " -l = thoroughness level (1 (default), 2)\n" " -r = rounding mode (even (default), zero, down, up, tieaway, odd)\n" " Set to 'all' to test all rounding modes, if applicable\n" - " -s = stop when a test fails"; + " -s = stop when a test fails\n" + " -q = minimise noise when testing, just show each function being tested"; static void usage_complete(int argc, char *argv[]) { @@ -190,9 +191,11 @@ static void do_testfloat(int op, int rmode, bool exact) ab_f128M_z_bool true_ab_f128M_z_bool; ab_f128M_z_bool subj_ab_f128M_z_bool; - fputs(">> Testing ", stderr); - verCases_writeFunctionName(stderr); - fputs("\n", stderr); + if (verCases_verbosity) { + fputs(">> Testing ", stderr); + verCases_writeFunctionName(stderr); + fputs("\n", stderr); + } if (!is_allowed(op, rmode)) { not_implemented(); @@ -837,7 +840,7 @@ static void parse_args(int argc, char *argv[]) int c; for (;;) { - c = getopt(argc, argv, "he:f:l:r:s"); + c = getopt(argc, argv, "he:f:l:r:sq"); if (c < 0) { break; } @@ -874,9 +877,15 @@ static void parse_args(int argc, char *argv[]) } } break; + /* + * The following flags are declared in testfloat/source/verCases_common.c + */ case 's': verCases_errorStop = true; break; + case 'q': + verCases_verbosity = 0; + break; case '?': /* invalid option or missing argument; getopt prints error info */ exit(EXIT_FAILURE); diff --git a/tests/fp/meson.build b/tests/fp/meson.build index 312a4d301f..f9ca6a93b4 100644 --- a/tests/fp/meson.build +++ b/tests/fp/meson.build @@ -609,7 +609,7 @@ softfloat_tests = { # The full test suite can take a bit of time, default to a quick run # "-l 2 -r all" can take more than a day for some operations and is best # run manually -fptest_args = ['-s', '-l', '1'] +fptest_args = ['-q', '-s', '-l', '1'] fptest_rounding_args = ['-r', 'all'] # Conversion Routines: diff --git a/tests/guest-debug/run-test.py b/tests/guest-debug/run-test.py index d865e46ecd..de6106a5e5 100755 --- a/tests/guest-debug/run-test.py +++ b/tests/guest-debug/run-test.py @@ -26,11 +26,12 @@ def get_args(): parser.add_argument("--qargs", help="Qemu arguments for test") parser.add_argument("--binary", help="Binary to debug", required=True) - parser.add_argument("--test", help="GDB test script", - required=True) + parser.add_argument("--test", help="GDB test script") parser.add_argument("--gdb", help="The gdb binary to use", default=None) + parser.add_argument("--gdb-args", help="Additional gdb arguments") parser.add_argument("--output", help="A file to redirect output to") + parser.add_argument("--stderr", help="A file to redirect stderr to") return parser.parse_args() @@ -58,6 +59,10 @@ if __name__ == '__main__': output = open(args.output, "w") else: output = None + if args.stderr: + stderr = open(args.stderr, "w") + else: + stderr = None socket_dir = TemporaryDirectory("qemu-gdbstub") socket_name = os.path.join(socket_dir.name, "gdbstub.socket") @@ -77,6 +82,8 @@ if __name__ == '__main__': # Now launch gdb with our test and collect the result gdb_cmd = "%s %s" % (args.gdb, args.binary) + if args.gdb_args: + gdb_cmd += " %s" % (args.gdb_args) # run quietly and ignore .gdbinit gdb_cmd += " -q -n -batch" # disable prompts in case of crash @@ -84,13 +91,14 @@ if __name__ == '__main__': # connect to remote gdb_cmd += " -ex 'target remote %s'" % (socket_name) # finally the test script itself - gdb_cmd += " -x %s" % (args.test) + if args.test: + gdb_cmd += " -x %s" % (args.test) sleep(1) log(output, "GDB CMD: %s" % (gdb_cmd)) - result = subprocess.call(gdb_cmd, shell=True, stdout=output) + result = subprocess.call(gdb_cmd, shell=True, stdout=output, stderr=stderr) # A result of greater than 128 indicates a fatal signal (likely a # crash due to gdb internal failure). That's a problem for GDB and diff --git a/tests/lcitool/libvirt-ci b/tests/lcitool/libvirt-ci index 319a534c22..c8971e90ac 160000 --- a/tests/lcitool/libvirt-ci +++ b/tests/lcitool/libvirt-ci @@ -1 +1 @@ -Subproject commit 319a534c220f53fc8670254cac25d6f662c82112 +Subproject commit c8971e90ac169ee2b539c747f74d96c876debdf9 diff --git a/tests/lcitool/mappings.yml b/tests/lcitool/mappings.yml new file mode 100644 index 0000000000..454963f07b --- /dev/null +++ b/tests/lcitool/mappings.yml @@ -0,0 +1,77 @@ +mappings: + flake8: + CentOSStream8: + OpenSUSELeap15: + + meson: + CentOSStream8: + OpenSUSELeap15: + + python3: + CentOSStream8: python38 + OpenSUSELeap15: python39-base + + python3-PyYAML: + CentOSStream8: python38-PyYAML + OpenSUSELeap15: + + python3-devel: + CentOSStream8: python38-devel + OpenSUSELeap15: python39-devel + + python3-docutils: + CentOSStream8: + OpenSUSELeap15: + + python3-numpy: + CentOSStream8: python38-numpy + OpenSUSELeap15: + + python3-opencv: + CentOSStream8: + OpenSUSELeap15: + + python3-pillow: + CentOSStream8: + OpenSUSELeap15: + + python3-pip: + CentOSStream8: python38-pip + OpenSUSELeap15: python39-pip + + python3-pillow: + CentOSStream8: + OpenSUSELeap15: + + python3-selinux: + CentOSStream8: + OpenSUSELeap15: + + python3-setuptools: + CentOSStream8: python38-setuptools + OpenSUSELeap15: python39-setuptools + + python3-sphinx: + CentOSStream8: + OpenSUSELeap15: + + python3-sphinx-rtd-theme: + CentOSStream8: + OpenSUSELeap15: + + python3-venv: + CentOSStream8: python38 + OpenSUSELeap15: python39-base + + python3-wheel: + CentOSStream8: python38-wheel + OpenSUSELeap15: python39-pip + +pypi_mappings: + # Request more recent version + meson: + default: meson==0.63.2 + + # Drop packages that need devel headers + python3-numpy: + OpenSUSELeap15: diff --git a/tests/lcitool/projects/qemu.yml b/tests/lcitool/projects/qemu.yml index 6467bcf08a..566db8313b 100644 --- a/tests/lcitool/projects/qemu.yml +++ b/tests/lcitool/projects/qemu.yml @@ -26,7 +26,6 @@ packages: - gcc - gcovr - gettext - - genisoimage - glib2 - glib2-native - glib2-static @@ -73,6 +72,7 @@ packages: - llvm - lttng-ust - lzo + - mtools - netcat - nettle - ninja @@ -101,6 +101,7 @@ packages: - sed - snappy - sndio + - socat - sparse - spice-protocol - spice-server @@ -115,5 +116,7 @@ packages: - which - xen - xfsprogs + - xorriso + - zstdtools - zlib - zlib-static diff --git a/tests/lcitool/refresh b/tests/lcitool/refresh index a5ea0efc3b..f1570b54df 100755 --- a/tests/lcitool/refresh +++ b/tests/lcitool/refresh @@ -53,6 +53,15 @@ def generate(filename, cmd, trailer): content += trailer atomic_write(filename, content) +# Optional user setting, this will always be the last thing added +# so maximise the number of layers that are cached +add_user_mapping = [ + "# As a final step configure the user (if env is defined)", + "ARG USER", + "ARG UID", + "RUN if [ \"${USER}\" ]; then \\", + " id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi\n" +] def generate_dockerfile(host, target, cross=None, trailer=None): filename = Path(src_dir, "tests", "docker", "dockerfiles", host + ".docker") @@ -60,6 +69,12 @@ def generate_dockerfile(host, target, cross=None, trailer=None): if cross is not None: cmd.extend(["--cross", cross]) cmd.extend([target, "qemu"]) + + if trailer is not None: + trailer += "\n".join(add_user_mapping) + else: + trailer = "\n".join(add_user_mapping) + generate(filename, cmd, trailer) @@ -69,13 +84,6 @@ def generate_cirrus(target, trailer=None): generate(filename, cmd, trailer) -ubuntu2004_tsanhack = [ - "# Apply patch https://reviews.llvm.org/D75820\n", - "# This is required for TSan in clang-10 to compile with QEMU.\n", - "RUN sed -i 's/^const/static const/g' /usr/lib/llvm-10/lib/clang/10.0.0/include/sanitizer/tsan_interface.h\n" -] - - # Netmap still needs to be manually built as it is yet to be packaged # into a distro. We also add cscope and gtags which are used in the CI # test @@ -112,9 +120,9 @@ try: generate_dockerfile("debian-amd64", "debian-11", trailer="".join(debian11_extras)) generate_dockerfile("fedora", "fedora-37") - generate_dockerfile("opensuse-leap", "opensuse-leap-153") - generate_dockerfile("ubuntu2004", "ubuntu-2004", - trailer="".join(ubuntu2004_tsanhack)) + generate_dockerfile("opensuse-leap", "opensuse-leap-15") + generate_dockerfile("ubuntu2004", "ubuntu-2004") + generate_dockerfile("ubuntu2204", "ubuntu-2204") # # Cross compiling builds @@ -174,7 +182,6 @@ try: # # Cirrus packages lists for GitLab # - generate_cirrus("freebsd-12") generate_cirrus("freebsd-13") generate_cirrus("macos-12") diff --git a/tests/lcitool/targets/centos-stream-8.yml b/tests/lcitool/targets/centos-stream-8.yml new file mode 100644 index 0000000000..6b11160fd1 --- /dev/null +++ b/tests/lcitool/targets/centos-stream-8.yml @@ -0,0 +1,3 @@ +paths: + pip3: /usr/bin/pip3.8 + python: /usr/bin/python3.8 diff --git a/tests/lcitool/targets/opensuse-leap-15.yml b/tests/lcitool/targets/opensuse-leap-15.yml new file mode 100644 index 0000000000..683016e007 --- /dev/null +++ b/tests/lcitool/targets/opensuse-leap-15.yml @@ -0,0 +1,3 @@ +paths: + pip3: /usr/bin/pip3.9 + python: /usr/bin/python3.9 diff --git a/tests/meson.build b/tests/meson.build index 8e318ec513..083f2990bd 100644 --- a/tests/meson.build +++ b/tests/meson.build @@ -74,10 +74,7 @@ if have_tools and have_vhost_user and 'CONFIG_LINUX' in config_host dependencies: [qemuutil, vhost_user]) endif -test('decodetree', sh, - args: [ files('decode/check.sh'), config_host['PYTHON'], files('../scripts/decodetree.py') ], - workdir: meson.current_source_dir() / 'decode', - suite: 'decodetree') +subdir('decode') if 'CONFIG_TCG' in config_all subdir('fp') diff --git a/tests/migration/meson.build b/tests/migration/meson.build index dd562355a1..ac71f13290 100644 --- a/tests/migration/meson.build +++ b/tests/migration/meson.build @@ -1,9 +1,11 @@ sysprof = dependency('sysprof-capture-4', required: false) +glib_static = dependency('glib-2.0', version: glib_req_ver, required: false, + method: 'pkg-config', static: true) stress = executable( 'stress', files('stress.c'), - dependencies: [glib, sysprof], + dependencies: [glib_static, sysprof], link_args: ['-static'], build_by_default: false, ) diff --git a/tests/qapi-schema/args-if-implicit.err b/tests/qapi-schema/args-if-implicit.err new file mode 100644 index 0000000000..da2447d397 --- /dev/null +++ b/tests/qapi-schema/args-if-implicit.err @@ -0,0 +1,2 @@ +args-if-implicit.json: In command 'test-if-cmd': +args-if-implicit.json:1: conditional command arguments require 'boxed': true diff --git a/tests/qapi-schema/args-if-implicit.json b/tests/qapi-schema/args-if-implicit.json new file mode 100644 index 0000000000..1eda39cb1e --- /dev/null +++ b/tests/qapi-schema/args-if-implicit.json @@ -0,0 +1,4 @@ +{ 'command': 'test-if-cmd', + 'data': { + 'foo': 'int', + 'bar': { 'type': 'str', 'if': 'TEST_IF_CMD_ARG' } } } diff --git a/tests/qapi-schema/args-if-implicit.out b/tests/qapi-schema/args-if-implicit.out new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/qapi-schema/args-if-unboxed.err b/tests/qapi-schema/args-if-unboxed.err new file mode 100644 index 0000000000..3d2fc836ef --- /dev/null +++ b/tests/qapi-schema/args-if-unboxed.err @@ -0,0 +1,2 @@ +args-if-unboxed.json: In command 'test-if-cmd': +args-if-unboxed.json:5: conditional command arguments require 'boxed': true diff --git a/tests/qapi-schema/args-if-unboxed.json b/tests/qapi-schema/args-if-unboxed.json new file mode 100644 index 0000000000..6e04c13e72 --- /dev/null +++ b/tests/qapi-schema/args-if-unboxed.json @@ -0,0 +1,6 @@ +{ 'struct': 'TestIfCmdArgs', + 'data': { + 'foo': 'int', + 'bar': { 'type': 'str', 'if': 'TEST_IF_CMD_ARG' } } } +{ 'command': 'test-if-cmd', + 'data': 'TestIfCmdArgs' } diff --git a/tests/qapi-schema/args-if-unboxed.out b/tests/qapi-schema/args-if-unboxed.out new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/qapi-schema/bad-data.err b/tests/qapi-schema/bad-data.err index 7991c8898d..a987df4108 100644 --- a/tests/qapi-schema/bad-data.err +++ b/tests/qapi-schema/bad-data.err @@ -1,2 +1,2 @@ bad-data.json: In command 'oops': -bad-data.json:2: 'data' cannot be an array +bad-data.json:2: 'data' should be an object or type name diff --git a/tests/qapi-schema/doc-bad-indent.err b/tests/qapi-schema/doc-bad-indent.err index 67844539bd..3c9699a8e0 100644 --- a/tests/qapi-schema/doc-bad-indent.err +++ b/tests/qapi-schema/doc-bad-indent.err @@ -1 +1 @@ -doc-bad-indent.json:6:1: unexpected de-indent (expected at least 4 spaces) +doc-bad-indent.json:7:1: unexpected de-indent (expected at least 2 spaces) diff --git a/tests/qapi-schema/doc-bad-indent.json b/tests/qapi-schema/doc-bad-indent.json index edde8f21dc..3f22a27717 100644 --- a/tests/qapi-schema/doc-bad-indent.json +++ b/tests/qapi-schema/doc-bad-indent.json @@ -3,6 +3,7 @@ ## # @foo: # @a: line one -# line two is wrongly indented +# line two +# line three is wrongly indented ## { 'command': 'foo', 'data': { 'a': 'int' } } diff --git a/tests/qapi-schema/doc-good.json b/tests/qapi-schema/doc-good.json index 74745fb405..354dfdf461 100644 --- a/tests/qapi-schema/doc-good.json +++ b/tests/qapi-schema/doc-good.json @@ -54,7 +54,7 @@ ## # @Enum: # -# @one: The _one_ {and only} +# @one: The _one_ {and only}, description on the same line # # Features: # @enum-feat: Also _one_ {and only} @@ -73,7 +73,8 @@ # @Base: # # @base1: -# the first member +# description starts on a new line, +# not indented ## { 'struct': 'Base', 'data': { 'base1': 'Enum' }, 'if': { 'all': ['IFALL1', 'IFALL2'] } } @@ -83,7 +84,9 @@ # # A paragraph # -# Another paragraph (but no @var: line) +# Another paragraph +# +# @var1 is undocumented # # Features: # @variant1-feat: a feature @@ -118,7 +121,8 @@ ## # @Alternate: # -# @i: an integer +# @i: description starts on the same line +# remainder indented the same # @b is undocumented # # Features: @@ -136,10 +140,12 @@ ## # @cmd: # -# @arg1: the first argument +# @arg1: +# description starts on a new line, +# indented # -# @arg2: the second -# argument +# @arg2: description starts on the same line +# remainder indented differently # # Features: # @cmd-feat1: a feature diff --git a/tests/qapi-schema/doc-good.out b/tests/qapi-schema/doc-good.out index 9dd65b9d92..24d9ea954d 100644 --- a/tests/qapi-schema/doc-good.out +++ b/tests/qapi-schema/doc-good.out @@ -104,7 +104,7 @@ doc symbol=Enum body= arg=one -The _one_ {and only} +The _one_ {and only}, description on the same line arg=two feature=enum-feat @@ -117,12 +117,15 @@ doc symbol=Base body= arg=base1 -the first member +description starts on a new line, +not indented doc symbol=Variant1 body= A paragraph -Another paragraph (but no @var: line) +Another paragraph + +@var1 is undocumented arg=var1 feature=variant1-feat @@ -141,7 +144,8 @@ doc symbol=Alternate body= arg=i -an integer +description starts on the same line +remainder indented the same @b is undocumented arg=b @@ -154,10 +158,11 @@ doc symbol=cmd body= arg=arg1 -the first argument +description starts on a new line, +indented arg=arg2 -the second -argument +description starts on the same line +remainder indented differently arg=arg3 feature=cmd-feat1 diff --git a/tests/qapi-schema/doc-interleaved-section.err b/tests/qapi-schema/doc-interleaved-section.err index 715d58cd31..e5d1ef54c1 100644 --- a/tests/qapi-schema/doc-interleaved-section.err +++ b/tests/qapi-schema/doc-interleaved-section.err @@ -1 +1 @@ -doc-interleaved-section.json:15:1: '@foobar:' can't follow 'Note' section +doc-interleaved-section.json:15:1: description of '@foobar:' follows a section diff --git a/tests/qapi-schema/event-args-if-unboxed.err b/tests/qapi-schema/event-args-if-unboxed.err new file mode 100644 index 0000000000..41ac64c6f3 --- /dev/null +++ b/tests/qapi-schema/event-args-if-unboxed.err @@ -0,0 +1,2 @@ +tests/qapi-schema/event-args-if-unboxed.json: In event 'TEST_IF_EVENT': +tests/qapi-schema/event-args-if-unboxed.json:1: event's 'data' members may have 'if' conditions only with 'boxed': true diff --git a/tests/qapi-schema/event-args-if-unboxed.json b/tests/qapi-schema/event-args-if-unboxed.json new file mode 100644 index 0000000000..ca42a74e3a --- /dev/null +++ b/tests/qapi-schema/event-args-if-unboxed.json @@ -0,0 +1,4 @@ + { 'event': 'TEST_IF_EVENT', + 'data': { + 'foo': 'int', + 'bar': { 'type': 'str', 'if': 'TEST_IF_CMD_ARG' } } } diff --git a/tests/qapi-schema/event-args-if-unboxed.out b/tests/qapi-schema/event-args-if-unboxed.out new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/qapi-schema/event-nest-struct.err b/tests/qapi-schema/event-nest-struct.err index 8c5f6ed311..15fc1406f8 100644 --- a/tests/qapi-schema/event-nest-struct.err +++ b/tests/qapi-schema/event-nest-struct.err @@ -1,2 +1,2 @@ event-nest-struct.json: In event 'EVENT_A': -event-nest-struct.json:1: 'data' member 'a' should be a type name +event-nest-struct.json:1: 'data' member 'a' should be a type name or array diff --git a/tests/qapi-schema/meson.build b/tests/qapi-schema/meson.build index d85b14f28c..af085f745d 100644 --- a/tests/qapi-schema/meson.build +++ b/tests/qapi-schema/meson.build @@ -27,6 +27,8 @@ schemas = [ 'args-bad-boxed.json', 'args-boxed-anon.json', 'args-boxed-string.json', + 'args-if-implicit.json', + 'args-if-unboxed.json', 'args-int.json', 'args-invalid.json', 'args-member-array-bad.json', @@ -164,6 +166,7 @@ schemas = [ 'struct-base-clash-deep.json', 'struct-base-clash.json', 'struct-data-invalid.json', + 'struct-data-typename.json', 'struct-member-if-invalid.json', 'struct-member-invalid-dict.json', 'struct-member-invalid.json', @@ -194,6 +197,8 @@ schemas = [ 'union-invalid-data.json', 'union-invalid-discriminator.json', 'union-invalid-if-discriminator.json', + 'union-invalid-union-subfield.json', + 'union-invalid-union-subtype.json', 'union-no-base.json', 'union-optional-discriminator.json', 'union-string-discriminator.json', diff --git a/tests/qapi-schema/nested-struct-data.err b/tests/qapi-schema/nested-struct-data.err index c7258a0182..7dc5c7cb2d 100644 --- a/tests/qapi-schema/nested-struct-data.err +++ b/tests/qapi-schema/nested-struct-data.err @@ -1,2 +1,2 @@ nested-struct-data.json: In command 'foo': -nested-struct-data.json:2: 'data' member 'a' should be a type name +nested-struct-data.json:2: 'data' member 'a' should be a type name or array diff --git a/tests/qapi-schema/qapi-schema-test.json b/tests/qapi-schema/qapi-schema-test.json index ba7302f42b..8ca977c49d 100644 --- a/tests/qapi-schema/qapi-schema-test.json +++ b/tests/qapi-schema/qapi-schema-test.json @@ -114,6 +114,38 @@ { 'struct': 'UserDefC', 'data': { 'string1': 'str', 'string2': 'str' } } +# this tests that unions can contain other unions in their branches +{ 'enum': 'TestUnionEnum', + 'data': [ 'value-a', 'value-b' ] } + +{ 'enum': 'TestUnionEnumA', + 'data': [ 'value-a1', 'value-a2' ] } + +{ 'struct': 'TestUnionTypeA1', + 'data': { 'integer': 'int', + 'name': 'str'} } + +{ 'struct': 'TestUnionTypeA2', + 'data': { 'integer': 'int', + 'size': 'int' } } + +{ 'union': 'TestUnionTypeA', + 'base': { 'type-a': 'TestUnionEnumA' }, + 'discriminator': 'type-a', + 'data': { 'value-a1': 'TestUnionTypeA1', + 'value-a2': 'TestUnionTypeA2' } } + +{ 'struct': 'TestUnionTypeB', + 'data': { 'integer': 'int', + 'onoff': 'bool' } } + +{ 'union': 'TestUnionInUnion', + 'base': { 'type': 'TestUnionEnum' }, + 'discriminator': 'type', + 'data': { 'value-a': 'TestUnionTypeA', + 'value-b': 'TestUnionTypeB' } } + + # for testing use of 'number' within alternates { 'alternate': 'AltEnumBool', 'data': { 'e': 'EnumOne', 'b': 'bool' } } { 'alternate': 'AltEnumNum', 'data': { 'e': 'EnumOne', 'n': 'number' } } @@ -220,18 +252,19 @@ { 'struct': 'TestIfStruct', 'data': { 'foo': 'int', - 'bar': { 'type': 'int', 'if': 'TEST_IF_STRUCT_BAR'} }, + 'bar': { 'type': 'int', 'if': 'TEST_IF_STRUCT_MEMBER'}, + '*baz': { 'type': 'str', 'if': 'TEST_IF_STRUCT_MEMBER'} }, 'if': 'TEST_IF_STRUCT' } { 'enum': 'TestIfEnum', - 'data': [ 'foo', { 'name' : 'bar', 'if': 'TEST_IF_ENUM_BAR' } ], - 'if': 'TEST_IF_ENUM' } + 'data': [ 'foo', { 'name' : 'bar', 'if': 'TEST_IF_ENUM_MEMBER' } ], + 'if': 'TEST_IF_UNION' } { 'union': 'TestIfUnion', 'base': { 'type': 'TestIfEnum' }, 'discriminator': 'type', 'data': { 'foo': 'TestStruct', - 'bar': { 'type': 'UserDefZero', 'if': 'TEST_IF_ENUM_BAR'} }, + 'bar': { 'type': 'UserDefZero', 'if': 'TEST_IF_ENUM_MEMBER'} }, 'if': { 'all': ['TEST_IF_UNION', 'TEST_IF_STRUCT'] } } { 'command': 'test-if-union-cmd', @@ -240,7 +273,7 @@ { 'alternate': 'TestIfAlternate', 'data': { 'foo': 'int', - 'bar': { 'type': 'TestStruct', 'if': 'TEST_IF_ALT_BAR'} }, + 'bar': { 'type': 'TestStruct', 'if': 'TEST_IF_ALT_MEMBER'} }, 'if': { 'all': ['TEST_IF_ALT', 'TEST_IF_STRUCT'] } } { 'command': 'test-if-alternate-cmd', @@ -248,17 +281,16 @@ 'if': { 'all': ['TEST_IF_ALT', 'TEST_IF_STRUCT'] } } { 'command': 'test-if-cmd', - 'data': { - 'foo': 'TestIfStruct', - 'bar': { 'type': 'TestIfEnum', 'if': 'TEST_IF_CMD_BAR' } }, + 'boxed': true, + 'data': 'TestIfStruct', 'returns': 'UserDefThree', 'if': { 'all': ['TEST_IF_CMD', 'TEST_IF_STRUCT'] } } { 'command': 'test-cmd-return-def-three', 'returns': 'UserDefThree' } { 'event': 'TEST_IF_EVENT', - 'data': { 'foo': 'TestIfStruct', - 'bar': { 'type': ['TestIfEnum'], 'if': 'TEST_IF_EVT_BAR' } }, + 'boxed': true, + 'data': 'TestIfStruct', 'if': { 'all': ['TEST_IF_EVT', 'TEST_IF_STRUCT'] } } { 'event': 'TEST_IF_EVENT2', 'data': {}, diff --git a/tests/qapi-schema/qapi-schema-test.out b/tests/qapi-schema/qapi-schema-test.out index 043d75c655..e2f0981348 100644 --- a/tests/qapi-schema/qapi-schema-test.out +++ b/tests/qapi-schema/qapi-schema-test.out @@ -105,6 +105,35 @@ alternate UserDefAlternate object UserDefC member string1: str optional=False member string2: str optional=False +enum TestUnionEnum + member value-a + member value-b +enum TestUnionEnumA + member value-a1 + member value-a2 +object TestUnionTypeA1 + member integer: int optional=False + member name: str optional=False +object TestUnionTypeA2 + member integer: int optional=False + member size: int optional=False +object q_obj_TestUnionTypeA-base + member type-a: TestUnionEnumA optional=False +object TestUnionTypeA + base q_obj_TestUnionTypeA-base + tag type-a + case value-a1: TestUnionTypeA1 + case value-a2: TestUnionTypeA2 +object TestUnionTypeB + member integer: int optional=False + member onoff: bool optional=False +object q_obj_TestUnionInUnion-base + member type: TestUnionEnum optional=False +object TestUnionInUnion + base q_obj_TestUnionInUnion-base + tag type + case value-a: TestUnionTypeA + case value-b: TestUnionTypeB alternate AltEnumBool tag type case e: EnumOne @@ -246,13 +275,15 @@ command __org.qemu_x-command q_obj___org.qemu_x-command-arg -> None object TestIfStruct member foo: int optional=False member bar: int optional=False - if TEST_IF_STRUCT_BAR + if TEST_IF_STRUCT_MEMBER + member baz: str optional=True + if TEST_IF_STRUCT_MEMBER if TEST_IF_STRUCT enum TestIfEnum member foo member bar - if TEST_IF_ENUM_BAR - if TEST_IF_ENUM + if TEST_IF_ENUM_MEMBER + if TEST_IF_UNION object q_obj_TestIfUnion-base member type: TestIfEnum optional=False if {'all': ['TEST_IF_UNION', 'TEST_IF_STRUCT']} @@ -261,7 +292,7 @@ object TestIfUnion tag type case foo: TestStruct case bar: UserDefZero - if TEST_IF_ENUM_BAR + if TEST_IF_ENUM_MEMBER if {'all': ['TEST_IF_UNION', 'TEST_IF_STRUCT']} object q_obj_test-if-union-cmd-arg member union-cmd-arg: TestIfUnion optional=False @@ -273,7 +304,7 @@ alternate TestIfAlternate tag type case foo: int case bar: TestStruct - if TEST_IF_ALT_BAR + if TEST_IF_ALT_MEMBER if {'all': ['TEST_IF_ALT', 'TEST_IF_STRUCT']} object q_obj_test-if-alternate-cmd-arg member alt-cmd-arg: TestIfAlternate optional=False @@ -281,25 +312,13 @@ object q_obj_test-if-alternate-cmd-arg command test-if-alternate-cmd q_obj_test-if-alternate-cmd-arg -> None gen=True success_response=True boxed=False oob=False preconfig=False if {'all': ['TEST_IF_ALT', 'TEST_IF_STRUCT']} -object q_obj_test-if-cmd-arg - member foo: TestIfStruct optional=False - member bar: TestIfEnum optional=False - if TEST_IF_CMD_BAR - if {'all': ['TEST_IF_CMD', 'TEST_IF_STRUCT']} -command test-if-cmd q_obj_test-if-cmd-arg -> UserDefThree - gen=True success_response=True boxed=False oob=False preconfig=False +command test-if-cmd TestIfStruct -> UserDefThree + gen=True success_response=True boxed=True oob=False preconfig=False if {'all': ['TEST_IF_CMD', 'TEST_IF_STRUCT']} command test-cmd-return-def-three None -> UserDefThree gen=True success_response=True boxed=False oob=False preconfig=False -array TestIfEnumList TestIfEnum - if TEST_IF_ENUM -object q_obj_TEST_IF_EVENT-arg - member foo: TestIfStruct optional=False - member bar: TestIfEnumList optional=False - if TEST_IF_EVT_BAR - if {'all': ['TEST_IF_EVT', 'TEST_IF_STRUCT']} -event TEST_IF_EVENT q_obj_TEST_IF_EVENT-arg - boxed=False +event TEST_IF_EVENT TestIfStruct + boxed=True if {'all': ['TEST_IF_EVT', 'TEST_IF_STRUCT']} event TEST_IF_EVENT2 None boxed=False diff --git a/tests/qapi-schema/returns-dict.err b/tests/qapi-schema/returns-dict.err index 9b2d90c010..bf160e754b 100644 --- a/tests/qapi-schema/returns-dict.err +++ b/tests/qapi-schema/returns-dict.err @@ -1,2 +1,2 @@ returns-dict.json: In command 'oops': -returns-dict.json:2: 'returns' should be a type name +returns-dict.json:2: 'returns' should be a type name or array diff --git a/tests/qapi-schema/struct-data-typename.err b/tests/qapi-schema/struct-data-typename.err new file mode 100644 index 0000000000..8fbfe99a42 --- /dev/null +++ b/tests/qapi-schema/struct-data-typename.err @@ -0,0 +1,2 @@ +struct-data-typename.json: In struct 'Stru2': +struct-data-typename.json:2: 'data' should be an object or type name diff --git a/tests/qapi-schema/struct-data-typename.json b/tests/qapi-schema/struct-data-typename.json new file mode 100644 index 0000000000..70fbad0ee4 --- /dev/null +++ b/tests/qapi-schema/struct-data-typename.json @@ -0,0 +1,2 @@ +{ 'struct': 'Stru1', 'data': {} } +{ 'struct': 'Stru2', 'data': 'Stru1' } diff --git a/tests/qapi-schema/struct-data-typename.out b/tests/qapi-schema/struct-data-typename.out new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/qapi-schema/struct-member-invalid.err b/tests/qapi-schema/struct-member-invalid.err index 7e01a41d7c..3130d69d9f 100644 --- a/tests/qapi-schema/struct-member-invalid.err +++ b/tests/qapi-schema/struct-member-invalid.err @@ -1,2 +1,2 @@ struct-member-invalid.json: In struct 'Foo': -struct-member-invalid.json:1: 'data' member 'a' should be a type name +struct-member-invalid.json:1: 'data' member 'a' should be a type name or array diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py index 2160cef082..d58c31f539 100755 --- a/tests/qapi-schema/test-qapi.py +++ b/tests/qapi-schema/test-qapi.py @@ -206,6 +206,7 @@ def main(argv): parser.add_argument('-d', '--dir', action='store', default='', help="directory containing tests") parser.add_argument('-u', '--update', action='store_true', + default='QAPI_TEST_UPDATE' in os.environ, help="update expected test results") parser.add_argument('tests', nargs='*', metavar='TEST', action='store') args = parser.parse_args() diff --git a/tests/qapi-schema/union-array-branch.err b/tests/qapi-schema/union-array-branch.err index 5db9c17481..2aa146261a 100644 --- a/tests/qapi-schema/union-array-branch.err +++ b/tests/qapi-schema/union-array-branch.err @@ -1,2 +1,2 @@ union-array-branch.json: In union 'TestUnion': -union-array-branch.json:8: 'data' member 'value1' cannot be an array +union-array-branch.json:8: 'data' member 'value1' should be a type name diff --git a/tests/qapi-schema/union-invalid-discriminator.err b/tests/qapi-schema/union-invalid-discriminator.err index 38efb24b98..6bd774c156 100644 --- a/tests/qapi-schema/union-invalid-discriminator.err +++ b/tests/qapi-schema/union-invalid-discriminator.err @@ -1,2 +1,2 @@ union-invalid-discriminator.json: In union 'TestUnion': -union-invalid-discriminator.json:10: discriminator 'enum_wrong' is not a member of 'base' +union-invalid-discriminator.json:10: discriminator 'type_tag' is not a member of 'base' diff --git a/tests/qapi-schema/union-invalid-discriminator.json b/tests/qapi-schema/union-invalid-discriminator.json index c4fce97362..f315f36e37 100644 --- a/tests/qapi-schema/union-invalid-discriminator.json +++ b/tests/qapi-schema/union-invalid-discriminator.json @@ -8,7 +8,7 @@ 'data': { 'integer': 'int' } } { 'union': 'TestUnion', - 'base': { 'enum1': 'TestEnum' }, - 'discriminator': 'enum_wrong', + 'base': { 'type-tag': 'TestEnum' }, + 'discriminator': 'type_tag', 'data': { 'value1': 'TestTypeA', 'value2': 'TestTypeB' } } diff --git a/tests/qapi-schema/union-invalid-union-subfield.err b/tests/qapi-schema/union-invalid-union-subfield.err new file mode 100644 index 0000000000..91aa87bcd8 --- /dev/null +++ b/tests/qapi-schema/union-invalid-union-subfield.err @@ -0,0 +1,2 @@ +union-invalid-union-subfield.json: In union 'TestUnion': +union-invalid-union-subfield.json:25: member 'teeth' of type 'TestTypeFish' collides with base member 'teeth' diff --git a/tests/qapi-schema/union-invalid-union-subfield.json b/tests/qapi-schema/union-invalid-union-subfield.json new file mode 100644 index 0000000000..e1639d3a96 --- /dev/null +++ b/tests/qapi-schema/union-invalid-union-subfield.json @@ -0,0 +1,30 @@ +# Clash between common member and union variant's variant member +# Base's member 'teeth' clashes with TestTypeFish's + +{ 'enum': 'TestEnum', + 'data': [ 'animals', 'plants' ] } + +{ 'enum': 'TestAnimals', + 'data': [ 'fish', 'birds'] } + +{ 'struct': 'TestTypeFish', + 'data': { 'scales': 'int', 'teeth': 'int' } } + +{ 'struct': 'TestTypeBirds', + 'data': { 'feathers': 'int' } } + +{ 'union': 'TestTypeAnimals', + 'base': { 'atype': 'TestAnimals' }, + 'discriminator': 'atype', + 'data': { 'fish': 'TestTypeFish', + 'birds': 'TestTypeBirds' } } + +{ 'struct': 'TestTypePlants', + 'data': { 'integer': 'int' } } + +{ 'union': 'TestUnion', + 'base': { 'type': 'TestEnum', + 'teeth': 'int' }, + 'discriminator': 'type', + 'data': { 'animals': 'TestTypeAnimals', + 'plants': 'TestTypePlants' } } diff --git a/tests/qapi-schema/union-invalid-union-subfield.out b/tests/qapi-schema/union-invalid-union-subfield.out new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/qapi-schema/union-invalid-union-subtype.err b/tests/qapi-schema/union-invalid-union-subtype.err new file mode 100644 index 0000000000..3538dc2e70 --- /dev/null +++ b/tests/qapi-schema/union-invalid-union-subtype.err @@ -0,0 +1,2 @@ +union-invalid-union-subtype.json: In union 'TestUnion': +union-invalid-union-subtype.json:25: base member 'type' of type 'TestTypeA' collides with base member 'type' diff --git a/tests/qapi-schema/union-invalid-union-subtype.json b/tests/qapi-schema/union-invalid-union-subtype.json new file mode 100644 index 0000000000..ce1de51d8d --- /dev/null +++ b/tests/qapi-schema/union-invalid-union-subtype.json @@ -0,0 +1,29 @@ +# Clash between common member and union variant's common member +# Base's member 'type' clashes with TestTypeA's + +{ 'enum': 'TestEnum', + 'data': [ 'value-a', 'value-b' ] } + +{ 'enum': 'TestEnumA', + 'data': [ 'value-a1', 'value-a2' ] } + +{ 'struct': 'TestTypeA1', + 'data': { 'integer': 'int' } } + +{ 'struct': 'TestTypeA2', + 'data': { 'integer': 'int' } } + +{ 'union': 'TestTypeA', + 'base': { 'type': 'TestEnumA' }, + 'discriminator': 'type', + 'data': { 'value-a1': 'TestTypeA1', + 'value-a2': 'TestTypeA2' } } + +{ 'struct': 'TestTypeB', + 'data': { 'integer': 'int' } } + +{ 'union': 'TestUnion', + 'base': { 'type': 'TestEnum' }, + 'discriminator': 'type', + 'data': { 'value-a': 'TestTypeA', + 'value-b': 'TestTypeB' } } diff --git a/tests/qapi-schema/union-invalid-union-subtype.out b/tests/qapi-schema/union-invalid-union-subtype.out new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/qemu-iotests/051.out b/tests/qemu-iotests/051.out index e5ddb03bda..d462156405 100644 --- a/tests/qemu-iotests/051.out +++ b/tests/qemu-iotests/051.out @@ -74,7 +74,7 @@ QEMU_PROG: -drive file=TEST_DIR/t.qcow2,node-name=foo#12: Invalid node-name: 'fo Testing: -device virtio-scsi -device scsi-hd QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device scsi-hd: drive property not set +QEMU_PROG: -device scsi-hd: drive property not set === Overriding backing file === @@ -134,7 +134,7 @@ QEMU X.Y.Z monitor - type 'help' for more information Testing: -drive if=virtio QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -drive if=virtio: Device needs media, but drive is empty +QEMU_PROG: -drive if=virtio: Device needs media, but drive is empty === Attach to node in non-default iothread === diff --git a/tests/qemu-iotests/051.pc.out b/tests/qemu-iotests/051.pc.out index bade1ff3b9..4d4af5a486 100644 --- a/tests/qemu-iotests/051.pc.out +++ b/tests/qemu-iotests/051.pc.out @@ -74,7 +74,7 @@ QEMU_PROG: -drive file=TEST_DIR/t.qcow2,node-name=foo#12: Invalid node-name: 'fo Testing: -device virtio-scsi -device scsi-hd QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device scsi-hd: drive property not set +QEMU_PROG: -device scsi-hd: drive property not set === Overriding backing file === @@ -142,11 +142,11 @@ QEMU X.Y.Z monitor - type 'help' for more information Testing: -drive if=ide QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: Device needs media, but drive is empty +QEMU_PROG: Device needs media, but drive is empty Testing: -drive if=virtio QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -drive if=virtio: Device needs media, but drive is empty +QEMU_PROG: -drive if=virtio: Device needs media, but drive is empty Testing: -drive if=none,id=disk -device ide-cd,drive=disk QEMU X.Y.Z monitor - type 'help' for more information @@ -158,22 +158,22 @@ QEMU X.Y.Z monitor - type 'help' for more information Testing: -drive if=none,id=disk -device ide-hd,drive=disk QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device ide-hd,drive=disk: Device needs media, but drive is empty +QEMU_PROG: -device ide-hd,drive=disk: Device needs media, but drive is empty Testing: -drive if=none,id=disk -device lsi53c895a -device scsi-hd,drive=disk QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device scsi-hd,drive=disk: Device needs media, but drive is empty +QEMU_PROG: -device scsi-hd,drive=disk: Device needs media, but drive is empty === Attach to node in non-default iothread === Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device ide-hd,drive=disk,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device ide-hd,drive=disk,share-rw=on: Cannot change iothread of active block backend +QEMU_PROG: -device ide-hd,drive=disk,share-rw=on: Cannot change iothread of active block backend Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-blk-pci,drive=disk,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device virtio-blk-pci,drive=disk,share-rw=on: Cannot change iothread of active block backend +QEMU_PROG: -device virtio-blk-pci,drive=disk,share-rw=on: Cannot change iothread of active block backend Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device lsi53c895a,id=lsi0 -device scsi-hd,bus=lsi0.0,drive=disk,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information @@ -185,7 +185,7 @@ QEMU X.Y.Z monitor - type 'help' for more information Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on: Cannot change iothread of active block backend +QEMU_PROG: -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on: Cannot change iothread of active block backend Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-scsi,id=virtio-scsi1,iothread=thread0 -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information @@ -204,7 +204,7 @@ QEMU X.Y.Z monitor - type 'help' for more information Testing: -drive file=TEST_DIR/t.qcow2,if=ide,readonly=on QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: Block node is read-only +QEMU_PROG: Block node is read-only Testing: -drive file=TEST_DIR/t.qcow2,if=virtio,readonly=on QEMU X.Y.Z monitor - type 'help' for more information @@ -220,7 +220,7 @@ QEMU X.Y.Z monitor - type 'help' for more information Testing: -drive file=TEST_DIR/t.qcow2,if=none,id=disk,readonly=on -device ide-hd,drive=disk QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device ide-hd,drive=disk: Block node is read-only +QEMU_PROG: -device ide-hd,drive=disk: Block node is read-only Testing: -drive file=TEST_DIR/t.qcow2,if=none,id=disk,readonly=on -device lsi53c895a -device scsi-hd,drive=disk QEMU X.Y.Z monitor - type 'help' for more information diff --git a/tests/qemu-iotests/172 b/tests/qemu-iotests/172 index ff269ca7b5..4da0e0f2e2 100755 --- a/tests/qemu-iotests/172 +++ b/tests/qemu-iotests/172 @@ -56,7 +56,7 @@ do_run_qemu() done fi echo quit - ) | $QEMU -accel qtest -nographic -monitor stdio -serial none "$@" + ) | $QEMU -accel qtest -nographic -monitor stdio -serial none -vga none -nic none "$@" echo } diff --git a/tests/qemu-iotests/227.out b/tests/qemu-iotests/227.out index 378c1b8fb1..a947b1a87d 100644 --- a/tests/qemu-iotests/227.out +++ b/tests/qemu-iotests/227.out @@ -17,6 +17,7 @@ Testing: -drive driver=null-co,read-zeroes=on,if=virtio "stats": { "unmap_operations": 0, "unmap_merged": 0, + "failed_zone_append_operations": 0, "flush_total_time_ns": 0, "wr_highest_offset": 0, "wr_total_time_ns": 0, @@ -27,6 +28,7 @@ Testing: -drive driver=null-co,read-zeroes=on,if=virtio "timed_stats": [ ], "failed_unmap_operations": 0, + "zone_append_merged": 0, "failed_flush_operations": 0, "account_invalid": true, "rd_total_time_ns": 0, @@ -39,7 +41,11 @@ Testing: -drive driver=null-co,read-zeroes=on,if=virtio "unmap_total_time_ns": 0, "invalid_flush_operations": 0, "account_failed": true, + "zone_append_total_time_ns": 0, + "zone_append_operations": 0, "rd_operations": 0, + "zone_append_bytes": 0, + "invalid_zone_append_operations": 0, "invalid_wr_operations": 0, "invalid_rd_operations": 0 }, @@ -82,6 +88,7 @@ Testing: -drive driver=null-co,if=none "stats": { "unmap_operations": 0, "unmap_merged": 0, + "failed_zone_append_operations": 0, "flush_total_time_ns": 0, "wr_highest_offset": 0, "wr_total_time_ns": 0, @@ -92,6 +99,7 @@ Testing: -drive driver=null-co,if=none "timed_stats": [ ], "failed_unmap_operations": 0, + "zone_append_merged": 0, "failed_flush_operations": 0, "account_invalid": true, "rd_total_time_ns": 0, @@ -104,7 +112,11 @@ Testing: -drive driver=null-co,if=none "unmap_total_time_ns": 0, "invalid_flush_operations": 0, "account_failed": true, + "zone_append_total_time_ns": 0, + "zone_append_operations": 0, "rd_operations": 0, + "zone_append_bytes": 0, + "invalid_zone_append_operations": 0, "invalid_wr_operations": 0, "invalid_rd_operations": 0 }, @@ -177,6 +189,7 @@ Testing: -blockdev driver=null-co,read-zeroes=on,node-name=null -device virtio-b "stats": { "unmap_operations": 0, "unmap_merged": 0, + "failed_zone_append_operations": 0, "flush_total_time_ns": 0, "wr_highest_offset": 0, "wr_total_time_ns": 0, @@ -187,6 +200,7 @@ Testing: -blockdev driver=null-co,read-zeroes=on,node-name=null -device virtio-b "timed_stats": [ ], "failed_unmap_operations": 0, + "zone_append_merged": 0, "failed_flush_operations": 0, "account_invalid": true, "rd_total_time_ns": 0, @@ -199,7 +213,11 @@ Testing: -blockdev driver=null-co,read-zeroes=on,node-name=null -device virtio-b "unmap_total_time_ns": 0, "invalid_flush_operations": 0, "account_failed": true, + "zone_append_total_time_ns": 0, + "zone_append_operations": 0, "rd_operations": 0, + "zone_append_bytes": 0, + "invalid_zone_append_operations": 0, "invalid_wr_operations": 0, "invalid_rd_operations": 0 }, diff --git a/tests/qemu-iotests/245 b/tests/qemu-iotests/245 index edaf29094b..92b28c79be 100755 --- a/tests/qemu-iotests/245 +++ b/tests/qemu-iotests/245 @@ -611,6 +611,7 @@ class TestBlockdevReopen(iotests.QMPTestCase): self.reopen(hd0_opts, {'file': 'hd0-file'}) # Insert (and remove) a compress filter + @iotests.skip_if_unsupported(['compress']) def test_insert_compress_filter(self): # Add an image to the VM: hd (raw) -> hd0 (qcow2) -> hd0-file (file) opts = {'driver': 'raw', 'node-name': 'hd', 'file': hd_opts(0)} @@ -650,9 +651,9 @@ class TestBlockdevReopen(iotests.QMPTestCase): # Check the first byte of the first three L2 entries and verify that # the second one is compressed (0x40) while the others are not (0x80) - iotests.qemu_io_log('-f', 'raw', '-c', 'read -P 0x80 0x40000 1', - '-c', 'read -P 0x40 0x40008 1', - '-c', 'read -P 0x80 0x40010 1', hd_path[0]) + iotests.qemu_io('-f', 'raw', '-c', 'read -P 0x80 0x40000 1', + '-c', 'read -P 0x40 0x40008 1', + '-c', 'read -P 0x80 0x40010 1', hd_path[0]) # Swap the disk images of two active block devices def test_swap_files(self): diff --git a/tests/qemu-iotests/245.out b/tests/qemu-iotests/245.out index a4e04a3266..0970ced62a 100644 --- a/tests/qemu-iotests/245.out +++ b/tests/qemu-iotests/245.out @@ -10,14 +10,7 @@ {"return": {}} {"data": {"id": "stream0", "type": "stream"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"device": "stream0", "len": 3145728, "offset": 3145728, "speed": 0, "type": "stream"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} -....read 1/1 bytes at offset 262144 -1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -read 1/1 bytes at offset 262152 -1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -read 1/1 bytes at offset 262160 -1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) - -................ +.................... ---------------------------------------------------------------------- Ran 26 tests diff --git a/tests/qemu-iotests/256 b/tests/qemu-iotests/256 index 13666813bd..d7e67f4a05 100755 --- a/tests/qemu-iotests/256 +++ b/tests/qemu-iotests/256 @@ -24,7 +24,7 @@ import os import iotests from iotests import log -iotests._verify_virtio_scsi_pci_or_ccw() +iotests.verify_virtio_scsi_pci_or_ccw() iotests.script_initialize(supported_fmts=['qcow2']) size = 64 * 1024 * 1024 diff --git a/tests/qemu-iotests/308 b/tests/qemu-iotests/308 index 09275e9a10..de12b2b1b9 100755 --- a/tests/qemu-iotests/308 +++ b/tests/qemu-iotests/308 @@ -370,6 +370,49 @@ echo echo '=== Compare copy with original ===' $QEMU_IMG compare -f raw -F $IMGFMT "$COPIED_IMG" "$TEST_IMG" +_cleanup_test_img + +echo +echo '=== Writing zeroes while unmapping ===' +# Regression test for https://gitlab.com/qemu-project/qemu/-/issues/1507 +_make_test_img 64M +$QEMU_IO -c 'write -s /dev/urandom 0 64M' "$TEST_IMG" | _filter_qemu_io + +_launch_qemu +_send_qemu_cmd $QEMU_HANDLE \ + "{'execute': 'qmp_capabilities'}" \ + 'return' + +_send_qemu_cmd $QEMU_HANDLE \ + "{'execute': 'blockdev-add', + 'arguments': { + 'driver': '$IMGFMT', + 'node-name': 'node-format', + 'file': { + 'driver': 'file', + 'filename': '$TEST_IMG' + } + } }" \ + 'return' + +fuse_export_add 'export' "'mountpoint': '$EXT_MP', 'writable': true" + +# Try writing zeroes by unmapping +$QEMU_IO -f raw -c 'write -zu 0 64M' "$EXT_MP" | _filter_qemu_io + +# Check the result +$QEMU_IO -f raw -c 'read -P 0 0 64M' "$EXT_MP" | _filter_qemu_io + +_send_qemu_cmd $QEMU_HANDLE \ + "{'execute': 'quit'}" \ + 'return' + +wait=yes _cleanup_qemu + +# Check the original image +$QEMU_IO -c 'read -P 0 0 64M' "$TEST_IMG" | _filter_qemu_io + +_cleanup_test_img # success, all done echo "*** done" diff --git a/tests/qemu-iotests/308.out b/tests/qemu-iotests/308.out index e4467a10cf..d5767133b1 100644 --- a/tests/qemu-iotests/308.out +++ b/tests/qemu-iotests/308.out @@ -171,4 +171,39 @@ OK: Post-truncate image size is as expected === Compare copy with original === Images are identical. + +=== Writing zeroes while unmapping === +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +wrote 67108864/67108864 bytes at offset 0 +64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +{'execute': 'qmp_capabilities'} +{"return": {}} +{'execute': 'blockdev-add', + 'arguments': { + 'driver': 'IMGFMT', + 'node-name': 'node-format', + 'file': { + 'driver': 'file', + 'filename': 'TEST_DIR/t.IMGFMT' + } + } } +{"return": {}} +{'execute': 'block-export-add', + 'arguments': { + 'type': 'fuse', + 'id': 'export', + 'node-name': 'node-format', + 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true + } } +{"return": {}} +wrote 67108864/67108864 bytes at offset 0 +64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 67108864/67108864 bytes at offset 0 +64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +{'execute': 'quit'} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export"}} +read 67108864/67108864 bytes at offset 0 +64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) *** done diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check index 9bdda1394e..f2e9d27dcf 100755 --- a/tests/qemu-iotests/check +++ b/tests/qemu-iotests/check @@ -26,9 +26,23 @@ from findtests import TestFinder from testenv import TestEnv from testrunner import TestRunner +def get_default_path(follow_link=False): + """ + Try to automagically figure out the path we are running from. + """ + # called from the build tree? + if os.path.islink(sys.argv[0]): + if follow_link: + return os.path.dirname(os.readlink(sys.argv[0])) + else: + return os.path.dirname(os.path.abspath(sys.argv[0])) + else: # or source tree? + return os.getcwd() def make_argparser() -> argparse.ArgumentParser: - p = argparse.ArgumentParser(description="Test run options") + p = argparse.ArgumentParser( + description="Test run options", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) p.add_argument('-n', '--dry-run', action='store_true', help='show me, do not run tests') @@ -113,6 +127,11 @@ def make_argparser() -> argparse.ArgumentParser: 'middle of the process.') g_sel.add_argument('tests', metavar='TEST_FILES', nargs='*', help='tests to run, or "--" followed by a command') + g_sel.add_argument('--build-dir', default=get_default_path(), + help='Path to iotests build directory') + g_sel.add_argument('--source-dir', + default=get_default_path(follow_link=True), + help='Path to iotests build directory') return p @@ -120,11 +139,14 @@ def make_argparser() -> argparse.ArgumentParser: if __name__ == '__main__': args = make_argparser().parse_args() - env = TestEnv(imgfmt=args.imgfmt, imgproto=args.imgproto, + env = TestEnv(source_dir=args.source_dir, + build_dir=args.build_dir, + imgfmt=args.imgfmt, imgproto=args.imgproto, aiomode=args.aiomode, cachemode=args.cachemode, imgopts=args.imgopts, misalign=args.misalign, debug=args.debug, valgrind=args.valgrind, - gdb=args.gdb, qprint=args.print) + gdb=args.gdb, qprint=args.print, + dry_run=args.dry_run) if len(sys.argv) > 1 and sys.argv[-len(args.tests)-1] == '--': if not args.tests: @@ -162,7 +184,7 @@ if __name__ == '__main__': sys.exit(str(e)) if args.dry_run: - print('\n'.join(tests)) + print('\n'.join([os.path.basename(t) for t in tests])) else: with TestRunner(env, tap=args.tap, color=args.color) as tr: diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index 3e82c634cf..ef66fbd62b 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -462,6 +462,10 @@ class QemuStorageDaemon: assert self._qmp is not None return self._qmp.cmd(cmd, args) + def get_qmp(self) -> QEMUMonitorProtocol: + assert self._qmp is not None + return self._qmp + def stop(self, kill_signal=15): self._p.send_signal(kill_signal) self._p.wait() @@ -1417,7 +1421,7 @@ def _verify_virtio_blk() -> None: if 'virtio-blk' not in out: notrun('Missing virtio-blk in QEMU binary') -def _verify_virtio_scsi_pci_or_ccw() -> None: +def verify_virtio_scsi_pci_or_ccw() -> None: out = qemu_pipe('-M', 'none', '-device', 'help') if 'virtio-scsi-pci' not in out and 'virtio-scsi-ccw' not in out: notrun('Missing virtio-scsi-pci or virtio-scsi-ccw in QEMU binary') diff --git a/tests/qemu-iotests/meson.build b/tests/qemu-iotests/meson.build index 323a4acb6a..44761e1e4d 100644 --- a/tests/qemu-iotests/meson.build +++ b/tests/qemu-iotests/meson.build @@ -2,7 +2,7 @@ if not have_tools or targetos == 'windows' or get_option('gprof') subdir_done() endif -foreach cflag: config_host['QEMU_CFLAGS'].split() +foreach cflag: qemu_ldflags if cflag.startswith('-fsanitize') and \ not cflag.contains('safe-stack') and not cflag.contains('cfi-icall') message('Sanitizers are enabled ==> Disabled the qemu-iotests.') @@ -32,16 +32,40 @@ foreach k, v : emulators endif endforeach +qemu_iotests_check_cmd = files('check') + foreach format, speed: qemu_iotests_formats if speed == 'quick' suites = 'block' else suites = ['block-' + speed, speed] endif - test('qemu-iotests ' + format, sh, args: [files('../check-block.sh'), format], - depends: qemu_iotests_binaries, env: qemu_iotests_env, - protocol: 'tap', - suite: suites, - timeout: 0, - is_parallel: false) + + args = ['-tap', '-' + format] + if speed == 'quick' + args += ['-g', 'auto'] + endif + + rc = run_command( + [python, qemu_iotests_check_cmd] + args + ['-n'], + check: true, + ) + + foreach item: rc.stdout().strip().split() + args = [qemu_iotests_check_cmd, + '-tap', '-' + format, item, + '--source-dir', meson.current_source_dir(), + '--build-dir', meson.current_build_dir()] + # Some individual tests take as long as 45 seconds + # Bump the timeout to 3 minutes for some headroom + # on slow machines to minimize spurious failures + test('io-' + format + '-' + item, + python, + args: args, + depends: qemu_iotests_binaries, + env: qemu_iotests_env, + protocol: 'tap', + timeout: 180, + suite: suites) + endforeach endforeach diff --git a/tests/qemu-iotests/testenv.py b/tests/qemu-iotests/testenv.py index a864c74b12..9a37ad9152 100644 --- a/tests/qemu-iotests/testenv.py +++ b/tests/qemu-iotests/testenv.py @@ -170,14 +170,16 @@ class TestEnv(ContextManager['TestEnv']): if not isxfile(b): sys.exit('Not executable: ' + b) - def __init__(self, imgfmt: str, imgproto: str, aiomode: str, + def __init__(self, source_dir: str, build_dir: str, + imgfmt: str, imgproto: str, aiomode: str, cachemode: Optional[str] = None, imgopts: Optional[str] = None, misalign: bool = False, debug: bool = False, valgrind: bool = False, gdb: bool = False, - qprint: bool = False) -> None: + qprint: bool = False, + dry_run: bool = False) -> None: self.imgfmt = imgfmt self.imgproto = imgproto self.aiomode = aiomode @@ -211,18 +213,16 @@ class TestEnv(ContextManager['TestEnv']): # which are needed to initialize some environment variables. They are # used by init_*() functions as well. - if os.path.islink(sys.argv[0]): - # called from the build tree - self.source_iotests = os.path.dirname(os.readlink(sys.argv[0])) - self.build_iotests = os.path.dirname(os.path.abspath(sys.argv[0])) - else: - # called from the source tree - self.source_iotests = os.getcwd() - self.build_iotests = self.source_iotests + self.source_iotests = source_dir + self.build_iotests = build_dir self.build_root = os.path.join(self.build_iotests, '..', '..') self.init_directories() + + if dry_run: + return + self.init_binaries() self.malloc_perturb_ = os.getenv('MALLOC_PERTURB_', diff --git a/tests/qemu-iotests/testrunner.py b/tests/qemu-iotests/testrunner.py index 5a771da86e..7b322272e9 100644 --- a/tests/qemu-iotests/testrunner.py +++ b/tests/qemu-iotests/testrunner.py @@ -24,12 +24,10 @@ import difflib import subprocess import contextlib import json -import termios import shutil import sys from multiprocessing import Pool -from contextlib import contextmanager -from typing import List, Optional, Iterator, Any, Sequence, Dict, \ +from typing import List, Optional, Any, Sequence, Dict, \ ContextManager from testenv import TestEnv @@ -56,22 +54,6 @@ def file_diff(file1: str, file2: str) -> List[str]: return res -# We want to save current tty settings during test run, -# since an aborting qemu call may leave things screwed up. -@contextmanager -def savetty() -> Iterator[None]: - isterm = sys.stdin.isatty() - if isterm: - fd = sys.stdin.fileno() - attr = termios.tcgetattr(fd) - - try: - yield - finally: - if isterm: - termios.tcsetattr(fd, termios.TCSADRAIN, attr) - - class LastElapsedTime(ContextManager['LastElapsedTime']): """ Cache for elapsed time for tests, to show it during new test run @@ -169,7 +151,6 @@ class TestRunner(ContextManager['TestRunner']): self._stack = contextlib.ExitStack() self._stack.enter_context(self.env) self._stack.enter_context(self.last_elapsed) - self._stack.enter_context(savetty()) return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: @@ -247,13 +228,11 @@ class TestRunner(ContextManager['TestRunner']): return f'{test}.out' - def do_run_test(self, test: str, mp: bool) -> TestResult: + def do_run_test(self, test: str) -> TestResult: """ Run one test :param test: test file path - :param mp: if true, we are in a multiprocessing environment, use - personal subdirectories for test run Note: this method may be called from subprocess, so it does not change ``self`` object in any way! @@ -276,12 +255,14 @@ class TestRunner(ContextManager['TestRunner']): args = [str(f_test.resolve())] env = self.env.prepare_subprocess(args) - if mp: - # Split test directories, so that tests running in parallel don't - # break each other. - for d in ['TEST_DIR', 'SOCK_DIR']: - env[d] = os.path.join(env[d], f_test.name) - Path(env[d]).mkdir(parents=True, exist_ok=True) + + # Split test directories, so that tests running in parallel don't + # break each other. + for d in ['TEST_DIR', 'SOCK_DIR']: + env[d] = os.path.join( + env[d], + f"{self.env.imgfmt}-{self.env.imgproto}-{f_test.name}") + Path(env[d]).mkdir(parents=True, exist_ok=True) test_dir = env['TEST_DIR'] f_bad = Path(test_dir, f_test.name + '.out.bad') @@ -294,6 +275,7 @@ class TestRunner(ContextManager['TestRunner']): t0 = time.time() with f_bad.open('w', encoding="utf-8") as f: with subprocess.Popen(args, cwd=str(f_test.parent), env=env, + stdin=subprocess.DEVNULL, stdout=f, stderr=subprocess.STDOUT) as proc: try: proc.wait() @@ -365,7 +347,7 @@ class TestRunner(ContextManager['TestRunner']): testname = os.path.basename(test) print(f'# running {self.env.imgfmt} {testname}') - res = self.do_run_test(test, mp) + res = self.do_run_test(test) end = datetime.datetime.now().strftime('%H:%M:%S') self.test_print_one_line(test=test, @@ -391,6 +373,7 @@ class TestRunner(ContextManager['TestRunner']): casenotrun = [] if self.tap: + print('TAP version 13') self.env.print_env('# ') print('1..%d' % len(tests)) else: diff --git a/tests/qemu-iotests/tests/graph-changes-while-io b/tests/qemu-iotests/tests/graph-changes-while-io index 7664f33689..750e7d4d38 100755 --- a/tests/qemu-iotests/tests/graph-changes-while-io +++ b/tests/qemu-iotests/tests/graph-changes-while-io @@ -22,19 +22,19 @@ import os from threading import Thread import iotests -from iotests import imgfmt, qemu_img, qemu_img_create, QMPTestCase, \ - QemuStorageDaemon +from iotests import imgfmt, qemu_img, qemu_img_create, qemu_io, \ + QMPTestCase, QemuStorageDaemon top = os.path.join(iotests.test_dir, 'top.img') nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock') -def do_qemu_img_bench() -> None: +def do_qemu_img_bench(count: int = 2000000) -> None: """ Do some I/O requests on `nbd_sock`. """ - qemu_img('bench', '-f', 'raw', '-c', '2000000', + qemu_img('bench', '-f', 'raw', '-c', str(count), f'nbd+unix:///node0?socket={nbd_sock}') @@ -84,6 +84,54 @@ class TestGraphChangesWhileIO(QMPTestCase): bench_thr.join() + def test_commit_while_io(self) -> None: + # Run qemu-img bench in the background + bench_thr = Thread(target=do_qemu_img_bench, args=(200000, )) + bench_thr.start() + + qemu_io('-c', 'write 0 64k', top) + qemu_io('-c', 'write 128k 64k', top) + + result = self.qsd.qmp('blockdev-add', { + 'driver': imgfmt, + 'node-name': 'overlay', + 'backing': None, + 'file': { + 'driver': 'file', + 'filename': top + } + }) + self.assert_qmp(result, 'return', {}) + + result = self.qsd.qmp('blockdev-snapshot', { + 'node': 'node0', + 'overlay': 'overlay', + }) + self.assert_qmp(result, 'return', {}) + + # While qemu-img bench is running, repeatedly commit overlay to node0 + while bench_thr.is_alive(): + result = self.qsd.qmp('block-commit', { + 'job-id': 'job0', + 'device': 'overlay', + }) + self.assert_qmp(result, 'return', {}) + + result = self.qsd.qmp('block-job-cancel', { + 'device': 'job0', + }) + self.assert_qmp(result, 'return', {}) + + cancelled = False + while not cancelled: + for event in self.qsd.get_qmp().get_events(wait=10.0): + if event['event'] != 'JOB_STATUS_CHANGE': + continue + if event['data']['status'] == 'null': + cancelled = True + + bench_thr.join() + if __name__ == '__main__': # Format must support raw backing files iotests.main(supported_fmts=['qcow', 'qcow2', 'qed'], diff --git a/tests/qemu-iotests/tests/graph-changes-while-io.out b/tests/qemu-iotests/tests/graph-changes-while-io.out index ae1213e6f8..fbc63e62f8 100644 --- a/tests/qemu-iotests/tests/graph-changes-while-io.out +++ b/tests/qemu-iotests/tests/graph-changes-while-io.out @@ -1,5 +1,5 @@ -. +.. ---------------------------------------------------------------------- -Ran 1 tests +Ran 2 tests OK diff --git a/tests/qemu-iotests/tests/iothreads-create b/tests/qemu-iotests/tests/iothreads-create new file mode 100755 index 0000000000..0c862d73f2 --- /dev/null +++ b/tests/qemu-iotests/tests/iothreads-create @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# Copyright (C) 2023 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Creator/Owner: Kevin Wolf + +import asyncio +import iotests + +iotests.script_initialize(supported_fmts=['qcow2', 'qcow', 'qed', 'vdi', + 'vmdk', 'parallels']) +iotests.verify_virtio_scsi_pci_or_ccw() + +with iotests.FilePath('disk.img') as img_path, \ + iotests.VM() as vm: + + iotests.qemu_img_create('-f', 'raw', img_path, '0') + + vm.add_object('iothread,id=iothread0') + vm.add_blockdev(f'file,node-name=img-file,read-only=on,' + f'filename={img_path}') + vm.add_device('virtio-scsi,iothread=iothread0') + vm.add_device('scsi-hd,drive=img-file,share-rw=on') + + vm.launch() + + iotests.log(vm.qmp( + 'blockdev-reopen', + options=[{ + 'driver': 'file', + 'filename': img_path, + 'node-name': 'img-file', + 'read-only': False, + }], + )) + iotests.log(vm.qmp( + 'blockdev-create', + job_id='job0', + options={ + 'driver': iotests.imgfmt, + 'file': 'img-file', + 'size': 1024 * 1024, + }, + )) + + # Should succeed and not time out + try: + vm.run_job('job0', wait=5.0) + vm.shutdown() + except asyncio.TimeoutError: + # VM may be stuck, kill it + vm.kill() + raise diff --git a/tests/qemu-iotests/tests/iothreads-create.out b/tests/qemu-iotests/tests/iothreads-create.out new file mode 100644 index 0000000000..5c974ff77e --- /dev/null +++ b/tests/qemu-iotests/tests/iothreads-create.out @@ -0,0 +1,4 @@ +{"return": {}} +{"return": {}} +{"execute": "job-dismiss", "arguments": {"id": "job0"}} +{"return": {}} diff --git a/tests/qemu-iotests/tests/iothreads-resize b/tests/qemu-iotests/tests/iothreads-resize new file mode 100755 index 0000000000..36e4598c62 --- /dev/null +++ b/tests/qemu-iotests/tests/iothreads-resize @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# group: rw auto quick +# +# Test resizing an image that is attached to a separate iothread +# +# Copyright (C) 2023 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +# creator +owner=kwolf@redhat.com + +seq=`basename $0` +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +cd .. +. ./common.rc +. ./common.filter + +# Resizing images is only supported by a few block drivers +_supported_fmt raw qcow2 qed +_supported_proto file +_require_devices virtio-scsi-pci + +size=64M +_make_test_img $size + +qmp() { +cat <. +# + +# creator +owner=kwolf@redhat.com + +seq=`basename $0` +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +cd .. +. ./common.rc +. ./common.filter + +_supported_fmt generic +_supported_proto generic +_unsupported_imgopts "subformat=streamOptimized" + +size=64M +_make_test_img $size + +echo +echo "creating pattern" +$QEMU_IO -c "write -P 1 32M 4k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "write -P 2 0 4k" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "read -P 1 32M 4k" "$TEST_IMG" | _filter_qemu_io + +echo +echo "checking image for errors" +_check_test_img + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/tests/regression-vhdx-log.out b/tests/qemu-iotests/tests/regression-vhdx-log.out new file mode 100644 index 0000000000..350c257354 --- /dev/null +++ b/tests/qemu-iotests/tests/regression-vhdx-log.out @@ -0,0 +1,14 @@ +QA output created by regression-vhdx-log +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 + +creating pattern +wrote 4096/4096 bytes at offset 33554432 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 33554432 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +checking image for errors +No errors were found on the image. +*** done diff --git a/tests/qemu-iotests/tests/zoned b/tests/qemu-iotests/tests/zoned new file mode 100755 index 0000000000..3d23ce9cc1 --- /dev/null +++ b/tests/qemu-iotests/tests/zoned @@ -0,0 +1,105 @@ +#!/usr/bin/env bash +# +# Test zone management operations. +# + +seq="$(basename $0)" +echo "QA output created by $seq" +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img + sudo -n rmmod null_blk +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ../common.rc +. ../common.filter +. ../common.qemu + +# This test only runs on Linux hosts with raw image files. +_supported_fmt raw +_supported_proto file +_supported_os Linux + +sudo -n true || \ + _notrun 'Password-less sudo required' + +IMG="--image-opts -n driver=host_device,filename=/dev/nullb0" +QEMU_IO_OPTIONS=$QEMU_IO_OPTIONS_NO_FMT + +echo "Testing a null_blk device:" +echo "case 1: if the operations work" +sudo -n modprobe null_blk nr_devices=1 zoned=1 +sudo -n chmod 0666 /dev/nullb0 + +echo "(1) report the first zone:" +$QEMU_IO $IMG -c "zrp 0 1" +echo +echo "report the first 10 zones" +$QEMU_IO $IMG -c "zrp 0 10" +echo +echo "report the last zone:" +$QEMU_IO $IMG -c "zrp 0x3e70000000 2" # 0x3e70000000 / 512 = 0x1f380000 +echo +echo +echo "(2) opening the first zone" +$QEMU_IO $IMG -c "zo 0 268435456" # 268435456 / 512 = 524288 +echo "report after:" +$QEMU_IO $IMG -c "zrp 0 1" +echo +echo "opening the second zone" +$QEMU_IO $IMG -c "zo 268435456 268435456" # +echo "report after:" +$QEMU_IO $IMG -c "zrp 268435456 1" +echo +echo "opening the last zone" +$QEMU_IO $IMG -c "zo 0x3e70000000 268435456" +echo "report after:" +$QEMU_IO $IMG -c "zrp 0x3e70000000 2" +echo +echo +echo "(3) closing the first zone" +$QEMU_IO $IMG -c "zc 0 268435456" +echo "report after:" +$QEMU_IO $IMG -c "zrp 0 1" +echo +echo "closing the last zone" +$QEMU_IO $IMG -c "zc 0x3e70000000 268435456" +echo "report after:" +$QEMU_IO $IMG -c "zrp 0x3e70000000 2" +echo +echo +echo "(4) finishing the second zone" +$QEMU_IO $IMG -c "zf 268435456 268435456" +echo "After finishing a zone:" +$QEMU_IO $IMG -c "zrp 268435456 1" +echo +echo +echo "(5) resetting the second zone" +$QEMU_IO $IMG -c "zrs 268435456 268435456" +echo "After resetting a zone:" +$QEMU_IO $IMG -c "zrp 268435456 1" +echo +echo +echo "(6) append write" # the physical block size of the device is 4096 +$QEMU_IO $IMG -c "zrp 0 1" +$QEMU_IO $IMG -c "zap -p 0 0x1000 0x2000" +echo "After appending the first zone firstly:" +$QEMU_IO $IMG -c "zrp 0 1" +$QEMU_IO $IMG -c "zap -p 0 0x1000 0x2000" +echo "After appending the first zone secondly:" +$QEMU_IO $IMG -c "zrp 0 1" +$QEMU_IO $IMG -c "zap -p 268435456 0x1000 0x2000" +echo "After appending the second zone firstly:" +$QEMU_IO $IMG -c "zrp 268435456 1" +$QEMU_IO $IMG -c "zap -p 268435456 0x1000 0x2000" +echo "After appending the second zone secondly:" +$QEMU_IO $IMG -c "zrp 268435456 1" + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/tests/zoned.out b/tests/qemu-iotests/tests/zoned.out new file mode 100644 index 0000000000..fe53ba4744 --- /dev/null +++ b/tests/qemu-iotests/tests/zoned.out @@ -0,0 +1,69 @@ +QA output created by zoned +Testing a null_blk device: +case 1: if the operations work +(1) report the first zone: +start: 0x0, len 0x80000, cap 0x80000, wptr 0x0, zcond:1, [type: 2] + +report the first 10 zones +start: 0x0, len 0x80000, cap 0x80000, wptr 0x0, zcond:1, [type: 2] +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80000, zcond:1, [type: 2] +start: 0x100000, len 0x80000, cap 0x80000, wptr 0x100000, zcond:1, [type: 2] +start: 0x180000, len 0x80000, cap 0x80000, wptr 0x180000, zcond:1, [type: 2] +start: 0x200000, len 0x80000, cap 0x80000, wptr 0x200000, zcond:1, [type: 2] +start: 0x280000, len 0x80000, cap 0x80000, wptr 0x280000, zcond:1, [type: 2] +start: 0x300000, len 0x80000, cap 0x80000, wptr 0x300000, zcond:1, [type: 2] +start: 0x380000, len 0x80000, cap 0x80000, wptr 0x380000, zcond:1, [type: 2] +start: 0x400000, len 0x80000, cap 0x80000, wptr 0x400000, zcond:1, [type: 2] +start: 0x480000, len 0x80000, cap 0x80000, wptr 0x480000, zcond:1, [type: 2] + +report the last zone: +start: 0x1f380000, len 0x80000, cap 0x80000, wptr 0x1f380000, zcond:1, [type: 2] + + +(2) opening the first zone +report after: +start: 0x0, len 0x80000, cap 0x80000, wptr 0x0, zcond:3, [type: 2] + +opening the second zone +report after: +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80000, zcond:3, [type: 2] + +opening the last zone +report after: +start: 0x1f380000, len 0x80000, cap 0x80000, wptr 0x1f380000, zcond:3, [type: 2] + + +(3) closing the first zone +report after: +start: 0x0, len 0x80000, cap 0x80000, wptr 0x0, zcond:1, [type: 2] + +closing the last zone +report after: +start: 0x1f380000, len 0x80000, cap 0x80000, wptr 0x1f380000, zcond:1, [type: 2] + + +(4) finishing the second zone +After finishing a zone: +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x100000, zcond:14, [type: 2] + + +(5) resetting the second zone +After resetting a zone: +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80000, zcond:1, [type: 2] + + +(6) append write +start: 0x0, len 0x80000, cap 0x80000, wptr 0x0, zcond:1, [type: 2] +After zap done, the append sector is 0x0 +After appending the first zone firstly: +start: 0x0, len 0x80000, cap 0x80000, wptr 0x18, zcond:2, [type: 2] +After zap done, the append sector is 0x18 +After appending the first zone secondly: +start: 0x0, len 0x80000, cap 0x80000, wptr 0x30, zcond:2, [type: 2] +After zap done, the append sector is 0x80000 +After appending the second zone firstly: +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80018, zcond:2, [type: 2] +After zap done, the append sector is 0x80018 +After appending the second zone secondly: +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80030, zcond:2, [type: 2] +*** done diff --git a/tests/qtest/ac97-test.c b/tests/qtest/ac97-test.c index 74103efdfa..b71bd60a8a 100644 --- a/tests/qtest/ac97-test.c +++ b/tests/qtest/ac97-test.c @@ -42,16 +42,54 @@ static void *ac97_create(void *pci_bus, QGuestAllocator *alloc, void *addr) return &ac97->obj; } +/* + * This is rather a test of the audio subsystem and not an AC97 test. Test if + * the audio subsystem can handle a 44100/1 upsample ratio. For some time this + * used to trigger QEMU aborts. + */ +static void ac97_playback_upsample(void *obj, void *data, QGuestAllocator *alloc) +{ + QAC97 *ac97 = obj; + QPCIDevice *dev = &ac97->dev; + QPCIBar bar0; + + qpci_device_enable(dev); + bar0 = qpci_iomap(dev, 0, NULL); + /* IOBAR0 offset 0x2c: PCM Front DAC Rate */ + qpci_io_writew(dev, bar0, 0x2c, 0x1); +} + +/* + * This test is similar to the playback upsample test. QEMU shouldn't abort if + * asked for a 1/44100 downsample ratio. + */ +static void ac97_record_downsample(void *obj, void *data, QGuestAllocator *alloc) +{ + QAC97 *ac97 = obj; + QPCIDevice *dev = &ac97->dev; + QPCIBar bar0; + + qpci_device_enable(dev); + bar0 = qpci_iomap(dev, 0, NULL); + /* IOBAR0 offset 0x32: PCM L/R ADC Rate */ + qpci_io_writew(dev, bar0, 0x32, 0x1); +} + static void ac97_register_nodes(void) { QOSGraphEdgeOptions opts = { - .extra_device_opts = "addr=04.0", + .extra_device_opts = "addr=04.0,audiodev=snd0", + .after_cmd_line = "-audiodev none,id=snd0" + ",out.frequency=44100,in.frequency=44100", }; add_qpci_address(&opts, &(QPCIAddress) { .devfn = QPCI_DEVFN(4, 0) }); qos_node_create_driver("AC97", ac97_create); qos_node_produces("AC97", "pci-device"); qos_node_consumes("AC97", "pci-bus", &opts); + + qos_add_test("playback_upsample", "AC97", ac97_playback_upsample, NULL); + qos_add_test("record_downsample", "AC97", ac97_record_downsample, NULL); } libqos_init(ac97_register_nodes); diff --git a/tests/qtest/ahci-test.c b/tests/qtest/ahci-test.c index 1967cd5898..abab761c26 100644 --- a/tests/qtest/ahci-test.c +++ b/tests/qtest/ahci-test.c @@ -36,9 +36,6 @@ #include "hw/pci/pci_ids.h" #include "hw/pci/pci_regs.h" -/* TODO actually test the results and get rid of this */ -#define qmp_discard_response(s, ...) qobject_unref(qtest_qmp(s, __VA_ARGS__)) - /* Test images sizes in MB */ #define TEST_IMAGE_SIZE_MB_LARGE (200 * 1024) #define TEST_IMAGE_SIZE_MB_SMALL 64 @@ -1595,9 +1592,9 @@ static void test_atapi_tray(void) rsp = qtest_qmp_receive(ahci->parent->qts); qobject_unref(rsp); - qmp_discard_response(ahci->parent->qts, - "{'execute': 'blockdev-remove-medium', " - "'arguments': {'id': 'cd0'}}"); + qtest_qmp_assert_success(ahci->parent->qts, + "{'execute': 'blockdev-remove-medium', " + "'arguments': {'id': 'cd0'}}"); /* Test the tray without a medium */ ahci_atapi_load(ahci, port); @@ -1607,16 +1604,18 @@ static void test_atapi_tray(void) atapi_wait_tray(ahci, true); /* Re-insert media */ - qmp_discard_response(ahci->parent->qts, - "{'execute': 'blockdev-add', " - "'arguments': {'node-name': 'node0', " - "'driver': 'raw', " - "'file': { 'driver': 'file', " - "'filename': %s }}}", iso); - qmp_discard_response(ahci->parent->qts, - "{'execute': 'blockdev-insert-medium'," - "'arguments': { 'id': 'cd0', " - "'node-name': 'node0' }}"); + qtest_qmp_assert_success( + ahci->parent->qts, + "{'execute': 'blockdev-add', " + "'arguments': {'node-name': 'node0', " + "'driver': 'raw', " + "'file': { 'driver': 'file', " + "'filename': %s }}}", iso); + qtest_qmp_assert_success( + ahci->parent->qts, + "{'execute': 'blockdev-insert-medium'," + "'arguments': { 'id': 'cd0', " + "'node-name': 'node0' }}"); /* Again, the event shows up first */ qtest_qmp_send(ahci->parent->qts, "{'execute': 'blockdev-close-tray', " diff --git a/tests/qtest/arm-cpu-features.c b/tests/qtest/arm-cpu-features.c index 1cb08138ad..3fc33fc24d 100644 --- a/tests/qtest/arm-cpu-features.c +++ b/tests/qtest/arm-cpu-features.c @@ -506,9 +506,23 @@ static void test_query_cpu_model_expansion_kvm(const void *data) QDict *resp; char *error; - assert_error(qts, "cortex-a15", - "We cannot guarantee the CPU type 'cortex-a15' works " - "with KVM on this host", NULL); + /* + * When using KVM, only the 'host' and 'max' CPU models are + * supported. Test that we're emitting a suitable error for + * unsupported CPU models. + */ + if (qtest_has_accel("tcg")) { + assert_error(qts, "cortex-a7", + "We cannot guarantee the CPU type 'cortex-a7' works " + "with KVM on this host", NULL); + } else { + /* + * With a KVM-only build the 32-bit CPUs are not present. + */ + assert_error(qts, "cortex-a7", + "The CPU type 'cortex-a7' is not a " + "recognized ARM CPU type", NULL); + } assert_has_feature_enabled(qts, "host", "aarch64"); diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c index d29a4e47af..ed1c69cf01 100644 --- a/tests/qtest/bios-tables-test.c +++ b/tests/qtest/bios-tables-test.c @@ -438,10 +438,9 @@ static void test_acpi_asl(test_data *data) { int i; AcpiSdtTable *sdt, *exp_sdt; - test_data exp_data; + test_data exp_data = {}; gboolean exp_err, err, all_tables_match = true; - memset(&exp_data, 0, sizeof(exp_data)); exp_data.tables = load_expected_aml(data); dump_aml_files(data, false); for (i = 0; i < data->tables->len; ++i) { @@ -853,12 +852,11 @@ static uint8_t base_required_struct_types[] = { static void test_acpi_piix4_tcg(void) { - test_data data; + test_data data = {}; /* Supplying -machine accel argument overrides the default (qtest). * This is to make guest actually run. */ - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.required_struct_types = base_required_struct_types; data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types); @@ -868,9 +866,8 @@ static void test_acpi_piix4_tcg(void) static void test_acpi_piix4_tcg_bridge(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".bridge"; data.required_struct_types = base_required_struct_types; @@ -906,9 +903,8 @@ static void test_acpi_piix4_tcg_bridge(void) static void test_acpi_piix4_no_root_hotplug(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".roothp"; data.required_struct_types = base_required_struct_types; @@ -923,9 +919,8 @@ static void test_acpi_piix4_no_root_hotplug(void) static void test_acpi_piix4_no_bridge_hotplug(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".hpbridge"; data.required_struct_types = base_required_struct_types; @@ -940,26 +935,29 @@ static void test_acpi_piix4_no_bridge_hotplug(void) static void test_acpi_piix4_no_acpi_pci_hotplug(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".hpbrroot"; data.required_struct_types = base_required_struct_types; data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types); test_acpi_one("-global PIIX4_PM.acpi-root-pci-hotplug=off " "-global PIIX4_PM.acpi-pci-hotplug-with-bridge-support=off " - "-device pci-bridge,chassis_nr=1 " - "-device pci-testdev,bus=pci.0 " - "-device pci-testdev,bus=pci.1", &data); + "-device pci-bridge,chassis_nr=1,addr=4.0 " + "-device pci-testdev,bus=pci.0,addr=5.0 " + "-device pci-testdev,bus=pci.0,addr=6.0,acpi-index=101 " + "-device pci-testdev,bus=pci.1,addr=1.0 " + "-device pci-testdev,bus=pci.1,addr=2.0,acpi-index=201 " + "-device pci-bridge,id=nhpbr,chassis_nr=2,shpc=off,addr=7.0 " + "-device pci-testdev,bus=nhpbr,addr=1.0,acpi-index=301 " + , &data); free_test_data(&data); } static void test_acpi_q35_tcg(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.required_struct_types = base_required_struct_types; data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types); @@ -989,9 +987,8 @@ static void test_acpi_q35_tcg_core_count2(void) static void test_acpi_q35_tcg_bridge(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".bridge"; data.required_struct_types = base_required_struct_types; @@ -1002,18 +999,41 @@ static void test_acpi_q35_tcg_bridge(void) free_test_data(&data); } +static void test_acpi_q35_tcg_no_acpi_hotplug(void) +{ + test_data data = {}; + + data.machine = MACHINE_Q35; + data.variant = ".noacpihp"; + data.required_struct_types = base_required_struct_types; + data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types); + test_acpi_one("-global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=off" + " -device pci-testdev,bus=pcie.0,acpi-index=101,addr=3.0" + " -device pci-bridge,chassis_nr=1,id=shpcbr,addr=4.0" + " -device pci-testdev,bus=shpcbr,addr=1.0,acpi-index=201" + " -device pci-bridge,chassis_nr=2,shpc=off,id=noshpcbr,addr=5.0" + " -device pci-testdev,bus=noshpcbr,addr=1.0,acpi-index=301" + " -device pcie-root-port,id=hprp,port=0x0,chassis=1,addr=6.0" + " -device pci-testdev,bus=hprp,acpi-index=401" + " -device pcie-root-port,id=nohprp,port=0x0,chassis=2,hotplug=off," + "addr=7.0" + " -device pci-testdev,bus=nohprp,acpi-index=501" + " -device pcie-root-port,id=nohprpint,port=0x0,chassis=3,hotplug=off," + "multifunction=on,addr=8.0" + " -device pci-testdev,bus=nohprpint,acpi-index=601,addr=8.1" + " -device pcie-root-port,id=hprp2,port=0x0,chassis=4,bus=nohprpint," + "addr=9.0" + " -device pci-testdev,bus=hprp2,acpi-index=602" + , &data); + free_test_data(&data); +} + static void test_acpi_q35_multif_bridge(void) { test_data data = { .machine = MACHINE_Q35, .variant = ".multi-bridge", }; - - if (!qtest_has_device("pcie-root-port")) { - g_test_skip("Device pcie-root-port is not available"); - goto out; - } - test_vm_prepare("-S" " -device virtio-balloon,id=balloon0,addr=0x4.0x2" " -device pcie-root-port,id=rp0,multifunction=on," @@ -1025,9 +1045,14 @@ static void test_acpi_q35_multif_bridge(void) " -device pcie-root-port,id=rphptgt2,port=0x0,chassis=6,addr=2.2" " -device pcie-root-port,id=rphptgt3,port=0x0,chassis=7,addr=2.3" " -device pci-testdev,bus=pcie.0,addr=2.4" + " -device pci-testdev,bus=pcie.0,addr=2.5,acpi-index=102" " -device pci-testdev,bus=pcie.0,addr=5.0" + " -device pci-testdev,bus=pcie.0,addr=0xf.0,acpi-index=101" " -device pci-testdev,bus=rp0,addr=0.0" - " -device pci-testdev,bus=br1", &data); + " -device pci-testdev,bus=br1" + " -device pcie-root-port,id=rpnohp,chassis=8,addr=0xA.0,hotplug=off" + " -device pcie-root-port,id=rp3,chassis=9,bus=rpnohp" + , &data); /* hotplugged bridges section */ qtest_qmp_device_add(data.qts, "pci-bridge", "hpbr1", @@ -1049,7 +1074,6 @@ static void test_acpi_q35_multif_bridge(void) /* check that reboot/reset doesn't change any ACPI tables */ qtest_qmp_send(data.qts, "{'execute':'system_reset' }"); process_acpi_tables(&data); -out: free_test_data(&data); } @@ -1072,9 +1096,8 @@ static void test_acpi_q35_tcg_mmio64(void) static void test_acpi_piix4_tcg_cphp(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".cphp"; test_acpi_one("-smp 2,cores=3,sockets=2,maxcpus=6" @@ -1088,9 +1111,8 @@ static void test_acpi_piix4_tcg_cphp(void) static void test_acpi_q35_tcg_cphp(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".cphp"; test_acpi_one(" -smp 2,cores=3,sockets=2,maxcpus=6" @@ -1108,9 +1130,8 @@ static uint8_t ipmi_required_struct_types[] = { static void test_acpi_q35_tcg_ipmi(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".ipmibt"; data.required_struct_types = ipmi_required_struct_types; @@ -1123,9 +1144,8 @@ static void test_acpi_q35_tcg_ipmi(void) static void test_acpi_q35_tcg_smbus_ipmi(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".ipmismbus"; data.required_struct_types = ipmi_required_struct_types; @@ -1138,12 +1158,11 @@ static void test_acpi_q35_tcg_smbus_ipmi(void) static void test_acpi_piix4_tcg_ipmi(void) { - test_data data; + test_data data = {}; /* Supplying -machine accel argument overrides the default (qtest). * This is to make guest actually run. */ - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".ipmikcs"; data.required_struct_types = ipmi_required_struct_types; @@ -1156,9 +1175,8 @@ static void test_acpi_piix4_tcg_ipmi(void) static void test_acpi_q35_tcg_memhp(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".memhp"; test_acpi_one(" -m 128,slots=3,maxmem=1G" @@ -1172,9 +1190,8 @@ static void test_acpi_q35_tcg_memhp(void) static void test_acpi_piix4_tcg_memhp(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".memhp"; test_acpi_one(" -m 128,slots=3,maxmem=1G" @@ -1188,9 +1205,8 @@ static void test_acpi_piix4_tcg_memhp(void) static void test_acpi_piix4_tcg_nosmm(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".nosmm"; test_acpi_one("-machine smm=off", &data); @@ -1199,9 +1215,8 @@ static void test_acpi_piix4_tcg_nosmm(void) static void test_acpi_piix4_tcg_smm_compat(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".smm-compat"; test_acpi_one("-global PIIX4_PM.smm-compat=on", &data); @@ -1210,9 +1225,8 @@ static void test_acpi_piix4_tcg_smm_compat(void) static void test_acpi_piix4_tcg_smm_compat_nosmm(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".smm-compat-nosmm"; test_acpi_one("-global PIIX4_PM.smm-compat=on -machine smm=off", &data); @@ -1221,9 +1235,8 @@ static void test_acpi_piix4_tcg_smm_compat_nosmm(void) static void test_acpi_piix4_tcg_nohpet(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.machine_param = ",hpet=off"; data.variant = ".nohpet"; @@ -1233,9 +1246,8 @@ static void test_acpi_piix4_tcg_nohpet(void) static void test_acpi_q35_tcg_numamem(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".numamem"; test_acpi_one(" -object memory-backend-ram,id=ram0,size=128M" @@ -1245,9 +1257,8 @@ static void test_acpi_q35_tcg_numamem(void) static void test_acpi_q35_kvm_xapic(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".xapic"; test_acpi_one(" -object memory-backend-ram,id=ram0,size=128M" @@ -1258,9 +1269,8 @@ static void test_acpi_q35_kvm_xapic(void) static void test_acpi_q35_tcg_nosmm(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".nosmm"; test_acpi_one("-machine smm=off", &data); @@ -1269,9 +1279,8 @@ static void test_acpi_q35_tcg_nosmm(void) static void test_acpi_q35_tcg_smm_compat(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".smm-compat"; test_acpi_one("-global ICH9-LPC.smm-compat=on", &data); @@ -1280,9 +1289,8 @@ static void test_acpi_q35_tcg_smm_compat(void) static void test_acpi_q35_tcg_smm_compat_nosmm(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".smm-compat-nosmm"; test_acpi_one("-global ICH9-LPC.smm-compat=on -machine smm=off", &data); @@ -1291,9 +1299,8 @@ static void test_acpi_q35_tcg_smm_compat_nosmm(void) static void test_acpi_q35_tcg_nohpet(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.machine_param = ",hpet=off"; data.variant = ".nohpet"; @@ -1303,9 +1310,8 @@ static void test_acpi_q35_tcg_nohpet(void) static void test_acpi_q35_kvm_dmar(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".dmar"; test_acpi_one("-machine kernel-irqchip=split -accel kvm" @@ -1315,9 +1321,8 @@ static void test_acpi_q35_kvm_dmar(void) static void test_acpi_q35_tcg_ivrs(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".ivrs"; data.tcg_only = true, @@ -1327,9 +1332,8 @@ static void test_acpi_q35_tcg_ivrs(void) static void test_acpi_piix4_tcg_numamem(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".numamem"; test_acpi_one(" -object memory-backend-ram,id=ram0,size=128M" @@ -1346,7 +1350,7 @@ static void test_acpi_tcg_tpm(const char *machine, const char *tpm_if, machine, tpm_if); char *tmp_path = g_dir_make_tmp(tmp_dir_name, NULL); TPMTestState test; - test_data data; + test_data data = {}; GThread *thread; const char *suffix = tpm_version == TPM_VERSION_2_0 ? "tpm2" : "tpm12"; char *args, *variant = g_strdup_printf(".%s.%s", tpm_if, suffix); @@ -1366,7 +1370,6 @@ static void test_acpi_tcg_tpm(const char *machine, const char *tpm_if, thread = g_thread_new(NULL, tpm_emu_ctrl_thread, &test); tpm_emu_test_wait_cond(&test); - memset(&data, 0, sizeof(data)); data.machine = machine; data.variant = variant; @@ -1401,14 +1404,8 @@ static void test_acpi_q35_tcg_tpm12_tis(void) static void test_acpi_tcg_dimm_pxm(const char *machine) { - test_data data; + test_data data = {}; - if (!qtest_has_device("nvdimm")) { - g_test_skip("Device nvdimm is not available"); - return; - } - - memset(&data, 0, sizeof(data)); data.machine = machine; data.variant = ".dimmpxm"; test_acpi_one(" -machine nvdimm=on,nvdimm-persistence=cpu" @@ -1456,11 +1453,6 @@ static void test_acpi_virt_tcg_memhp(void) .scan_len = 256ULL * 1024 * 1024, }; - if (!qtest_has_device("nvdimm")) { - g_test_skip("Device nvdimm is not available"); - goto out; - } - data.variant = ".memhp"; test_acpi_one(" -machine nvdimm=on" " -cpu cortex-a57" @@ -1474,14 +1466,13 @@ static void test_acpi_virt_tcg_memhp(void) " -device pc-dimm,id=dimm0,memdev=ram2,node=0" " -device nvdimm,id=dimm1,memdev=nvm0,node=1", &data); -out: + free_test_data(&data); } static void test_acpi_microvm_prepare(test_data *data) { - memset(data, 0, sizeof(*data)); data->machine = "microvm"; data->required_struct_types = NULL; /* no smbios */ data->required_struct_types_len = 0; @@ -1490,12 +1481,7 @@ static void test_acpi_microvm_prepare(test_data *data) static void test_acpi_microvm_tcg(void) { - test_data data; - - if (!qtest_has_device("virtio-blk-device")) { - g_test_skip("Device virtio-blk-device is not available"); - return; - } + test_data data = {}; test_acpi_microvm_prepare(&data); test_acpi_one(" -machine microvm,acpi=on,ioapic2=off,rtc=off", @@ -1505,12 +1491,7 @@ static void test_acpi_microvm_tcg(void) static void test_acpi_microvm_usb_tcg(void) { - test_data data; - - if (!qtest_has_device("virtio-blk-device")) { - g_test_skip("Device virtio-blk-device is not available"); - return; - } + test_data data = {}; test_acpi_microvm_prepare(&data); data.variant = ".usb"; @@ -1521,12 +1502,7 @@ static void test_acpi_microvm_usb_tcg(void) static void test_acpi_microvm_rtc_tcg(void) { - test_data data; - - if (!qtest_has_device("virtio-blk-device")) { - g_test_skip("Device virtio-blk-device is not available"); - return; - } + test_data data = {}; test_acpi_microvm_prepare(&data); data.variant = ".rtc"; @@ -1537,12 +1513,7 @@ static void test_acpi_microvm_rtc_tcg(void) static void test_acpi_microvm_pcie_tcg(void) { - test_data data; - - if (!qtest_has_device("virtio-blk-device")) { - g_test_skip("Device virtio-blk-device is not available"); - return; - } + test_data data = {}; test_acpi_microvm_prepare(&data); data.variant = ".pcie"; @@ -1554,12 +1525,7 @@ static void test_acpi_microvm_pcie_tcg(void) static void test_acpi_microvm_ioapic2_tcg(void) { - test_data data; - - if (!qtest_has_device("virtio-blk-device")) { - g_test_skip("Device virtio-blk-device is not available"); - return; - } + test_data data = {}; test_acpi_microvm_prepare(&data); data.variant = ".ioapic2"; @@ -1600,12 +1566,6 @@ static void test_acpi_virt_tcg_pxb(void) .ram_start = 0x40000000ULL, .scan_len = 128ULL * 1024 * 1024, }; - - if (!qtest_has_device("pcie-root-port")) { - g_test_skip("Device pcie-root-port is not available"); - goto out; - } - /* * While using -cdrom, the cdrom would auto plugged into pxb-pcie, * the reason is the bus of pxb-pcie is also root bus, it would lead @@ -1624,15 +1584,14 @@ static void test_acpi_virt_tcg_pxb(void) " -cpu cortex-a57" " -device pxb-pcie,bus_nr=128", &data); -out: + free_test_data(&data); } static void test_acpi_tcg_acpi_hmat(const char *machine) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = machine; data.variant = ".acpihmat"; test_acpi_one(" -machine hmat=on" @@ -1687,9 +1646,9 @@ static void test_acpi_virt_tcg_acpi_hmat(void) test_acpi_one(" -machine hmat=on" " -cpu cortex-a57" " -smp 4,sockets=2" - " -m 256M" - " -object memory-backend-ram,size=64M,id=ram0" - " -object memory-backend-ram,size=64M,id=ram1" + " -m 384M" + " -object memory-backend-ram,size=128M,id=ram0" + " -object memory-backend-ram,size=128M,id=ram1" " -object memory-backend-ram,size=128M,id=ram2" " -numa node,nodeid=0,memdev=ram0" " -numa node,nodeid=1,memdev=ram1" @@ -1729,9 +1688,8 @@ static void test_acpi_virt_tcg_acpi_hmat(void) static void test_acpi_q35_tcg_acpi_hmat_noinitiator(void) { - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.variant = ".acpihmat-noinitiator"; test_acpi_one(" -machine hmat=on" @@ -1780,9 +1738,8 @@ static void test_acpi_erst(const char *machine) { gchar *tmp_path = g_dir_make_tmp("qemu-test-erst.XXXXXX", NULL); gchar *params; - test_data data; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = machine; data.variant = ".acpierst"; params = g_strdup_printf( @@ -1810,13 +1767,7 @@ static void test_acpi_microvm_acpi_erst(void) { gchar *tmp_path = g_dir_make_tmp("qemu-test-erst.XXXXXX", NULL); gchar *params; - test_data data; - - if (!qtest_has_device("virtio-blk-device")) { - g_test_skip("Device virtio-blk-device is not available"); - g_free(tmp_path); - return; - } + test_data data = {}; test_acpi_microvm_prepare(&data); data.variant = ".pcie"; @@ -1878,11 +1829,6 @@ static void test_acpi_q35_viot(void) .variant = ".viot", }; - if (!qtest_has_device("virtio-iommu")) { - g_test_skip("Device virtio-iommu is not available"); - goto out; - } - /* * To keep things interesting, two buses bypass the IOMMU. * VIOT should only describes the other two buses. @@ -1893,7 +1839,6 @@ static void test_acpi_q35_viot(void) "-device pxb-pcie,bus_nr=0x20,id=pcie.200,bus=pcie.0,bypass_iommu=on " "-device pxb-pcie,bus_nr=0x30,id=pcie.300,bus=pcie.0", &data); -out: free_test_data(&data); } @@ -1922,13 +1867,13 @@ static void test_acpi_q35_cxl(void) " -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1" " -device pxb-cxl,bus_nr=222,bus=pcie.0,id=cxl.2" " -device cxl-rp,port=0,bus=cxl.1,id=rp1,chassis=0,slot=2" - " -device cxl-type3,bus=rp1,memdev=cxl-mem1,lsa=lsa1" + " -device cxl-type3,bus=rp1,persistent-memdev=cxl-mem1,lsa=lsa1" " -device cxl-rp,port=1,bus=cxl.1,id=rp2,chassis=0,slot=3" - " -device cxl-type3,bus=rp2,memdev=cxl-mem2,lsa=lsa2" + " -device cxl-type3,bus=rp2,persistent-memdev=cxl-mem2,lsa=lsa2" " -device cxl-rp,port=0,bus=cxl.2,id=rp3,chassis=0,slot=5" - " -device cxl-type3,bus=rp3,memdev=cxl-mem3,lsa=lsa3" + " -device cxl-type3,bus=rp3,persistent-memdev=cxl-mem3,lsa=lsa3" " -device cxl-rp,port=1,bus=cxl.2,id=rp4,chassis=0,slot=6" - " -device cxl-type3,bus=rp4,memdev=cxl-mem4,lsa=lsa4" + " -device cxl-type3,bus=rp4,persistent-memdev=cxl-mem4,lsa=lsa4" " -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k," "cxl-fmw.1.targets.0=cxl.1,cxl-fmw.1.targets.1=cxl.2,cxl-fmw.1.size=4G,cxl-fmw.1.interleave-granularity=8k", tmp_path, tmp_path, tmp_path, tmp_path, @@ -1954,10 +1899,8 @@ static void test_acpi_virt_viot(void) .scan_len = 128ULL * 1024 * 1024, }; - if (qtest_has_device("virtio-iommu")) { - test_acpi_one("-cpu cortex-a57 " - "-device virtio-iommu-pci", &data); - } + test_acpi_one("-cpu cortex-a57 " + "-device virtio-iommu-pci", &data); free_test_data(&data); } @@ -2025,10 +1968,9 @@ static void test_oem_fields(test_data *data) static void test_acpi_piix4_oem_fields(void) { - test_data data; char *args; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.required_struct_types = base_required_struct_types; data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types); @@ -2044,10 +1986,9 @@ static void test_acpi_piix4_oem_fields(void) static void test_acpi_q35_oem_fields(void) { - test_data data; char *args; + test_data data = {}; - memset(&data, 0, sizeof(data)); data.machine = MACHINE_Q35; data.required_struct_types = base_required_struct_types; data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types); @@ -2063,14 +2004,9 @@ static void test_acpi_q35_oem_fields(void) static void test_acpi_microvm_oem_fields(void) { - test_data data; + test_data data = {}; char *args; - if (!qtest_has_device("virtio-blk-device")) { - g_test_skip("Device virtio-blk-device is not available"); - return; - } - test_acpi_microvm_prepare(&data); args = test_acpi_create_args(&data, @@ -2109,8 +2045,7 @@ static void test_acpi_virt_oem_fields(void) int main(int argc, char *argv[]) { const char *arch = qtest_get_arch(); - const bool has_kvm = qtest_has_accel("kvm"); - const bool has_tcg = qtest_has_accel("tcg"); + bool has_kvm, has_tcg; char *v_env = getenv("V"); int ret; @@ -2120,6 +2055,14 @@ int main(int argc, char *argv[]) g_test_init(&argc, &argv, NULL); + has_kvm = qtest_has_accel("kvm"); + has_tcg = qtest_has_accel("tcg"); + + if (!has_tcg && !has_kvm) { + g_test_skip("No KVM or TCG accelerator available"); + return 0; + } + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { ret = boot_sector_init(disk); if (ret) { @@ -2161,6 +2104,8 @@ int main(int argc, char *argv[]) test_acpi_q35_tcg_tpm12_tis); } qtest_add_func("acpi/q35/bridge", test_acpi_q35_tcg_bridge); + qtest_add_func("acpi/q35/no-acpi-hotplug", + test_acpi_q35_tcg_no_acpi_hotplug); qtest_add_func("acpi/q35/multif-bridge", test_acpi_q35_multif_bridge); qtest_add_func("acpi/q35/mmio64", test_acpi_q35_tcg_mmio64); @@ -2219,7 +2164,7 @@ int main(int argc, char *argv[]) } } } else if (strcmp(arch, "aarch64") == 0) { - if (has_tcg) { + if (has_tcg && qtest_has_device("virtio-blk-pci")) { qtest_add_func("acpi/virt", test_acpi_virt_tcg); qtest_add_func("acpi/virt/acpihmatvirt", test_acpi_virt_tcg_acpi_hmat); diff --git a/tests/qtest/boot-order-test.c b/tests/qtest/boot-order-test.c index 0680d79d6d..8f2b6ef05a 100644 --- a/tests/qtest/boot-order-test.c +++ b/tests/qtest/boot-order-test.c @@ -16,9 +16,6 @@ #include "qapi/qmp/qdict.h" #include "standard-headers/linux/qemu_fw_cfg.h" -/* TODO actually test the results and get rid of this */ -#define qmp_discard_response(qs, ...) qobject_unref(qtest_qmp(qs, __VA_ARGS__)) - typedef struct { const char *args; uint64_t expected_boot; @@ -43,7 +40,7 @@ static void test_a_boot_order(const char *machine, machine ?: "", test_args); actual = read_boot_order(qts); g_assert_cmphex(actual, ==, expected_boot); - qmp_discard_response(qts, "{ 'execute': 'system_reset' }"); + qtest_qmp_assert_success(qts, "{ 'execute': 'system_reset' }"); /* * system_reset only requests reset. We get a RESET event after * the actual reset completes. Need to wait for that. diff --git a/tests/qtest/boot-serial-test.c b/tests/qtest/boot-serial-test.c index 3aef3a97a9..6dd06aeaf4 100644 --- a/tests/qtest/boot-serial-test.c +++ b/tests/qtest/boot-serial-test.c @@ -287,6 +287,11 @@ int main(int argc, char *argv[]) g_test_init(&argc, &argv, NULL); + if (!qtest_has_accel("tcg") && !qtest_has_accel("kvm")) { + g_test_skip("No KVM or TCG accelerator available"); + return 0; + } + for (i = 0; tests[i].arch != NULL; i++) { if (g_str_equal(arch, tests[i].arch) && qtest_has_machine(tests[i].machine)) { diff --git a/tests/qtest/cdrom-test.c b/tests/qtest/cdrom-test.c index 26a2400181..f2a8d91929 100644 --- a/tests/qtest/cdrom-test.c +++ b/tests/qtest/cdrom-test.c @@ -17,7 +17,7 @@ static char isoimage[] = "cdrom-boot-iso-XXXXXX"; -static int exec_genisoimg(const char **args) +static int exec_xorrisofs(const char **args) { gchar *out_err = NULL; gint exit_status = -1; @@ -43,7 +43,7 @@ static int prepare_image(const char *arch, char *isoimage) char *codefile = NULL; int ifh, ret = -1; const char *args[] = { - "genisoimage", "-quiet", "-l", "-no-emul-boot", + "xorrisofs", "-quiet", "-l", "-no-emul-boot", "-b", NULL, "-o", isoimage, srcdir, NULL }; @@ -75,9 +75,9 @@ static int prepare_image(const char *arch, char *isoimage) } args[5] = strchr(codefile, '/') + 1; - ret = exec_genisoimg(args); + ret = exec_xorrisofs(args); if (ret) { - fprintf(stderr, "genisoimage failed: %i\n", ret); + fprintf(stderr, "xorrisofs failed: %i\n", ret); } unlink(codefile); @@ -130,10 +130,18 @@ static void test_cdboot(gconstpointer data) static void add_x86_tests(void) { + if (!qtest_has_accel("tcg") && !qtest_has_accel("kvm")) { + g_test_skip("No KVM or TCG accelerator available, skipping boot tests"); + return; + } + qtest_add_data_func("cdrom/boot/default", "-cdrom ", test_cdboot); - qtest_add_data_func("cdrom/boot/virtio-scsi", - "-device virtio-scsi -device scsi-cd,drive=cdr " - "-blockdev file,node-name=cdr,filename=", test_cdboot); + if (qtest_has_device("virtio-scsi-ccw")) { + qtest_add_data_func("cdrom/boot/virtio-scsi", + "-device virtio-scsi -device scsi-cd,drive=cdr " + "-blockdev file,node-name=cdr,filename=", + test_cdboot); + } /* * Unstable CI test under load * See https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg05509.html @@ -176,7 +184,19 @@ static void add_x86_tests(void) static void add_s390x_tests(void) { + if (!qtest_has_accel("tcg") && !qtest_has_accel("kvm")) { + g_test_skip("No KVM or TCG accelerator available, skipping boot tests"); + } + if (!qtest_has_device("virtio-blk-ccw")) { + return; + } + qtest_add_data_func("cdrom/boot/default", "-cdrom ", test_cdboot); + + if (!qtest_has_device("virtio-scsi-ccw")) { + return; + } + qtest_add_data_func("cdrom/boot/virtio-scsi", "-device virtio-scsi -device scsi-cd,drive=cdr " "-blockdev file,node-name=cdr,filename=", test_cdboot); @@ -201,12 +221,12 @@ int main(int argc, char **argv) { int ret; const char *arch = qtest_get_arch(); - const char *genisocheck[] = { "genisoimage", "-version", NULL }; + const char *xorrisocheck[] = { "xorrisofs", "-version", NULL }; g_test_init(&argc, &argv, NULL); - if (exec_genisoimg(genisocheck)) { - /* genisoimage not available - so can't run tests */ + if (exec_xorrisofs(xorrisocheck)) { + /* xorrisofs not available - so can't run tests */ return g_test_run(); } @@ -244,9 +264,13 @@ int main(int argc, char **argv) const char *armmachines[] = { "realview-eb", "realview-eb-mpcore", "realview-pb-a8", "realview-pbx-a9", "versatileab", "versatilepb", "vexpress-a15", - "vexpress-a9", "virt", NULL + "vexpress-a9", NULL }; add_cdrom_param_tests(armmachines); + if (qtest_has_device("virtio-blk-pci")) { + const char *virtmachine[] = { "virt", NULL }; + add_cdrom_param_tests(virtmachine); + } } else { const char *nonemachine[] = { "none", NULL }; add_cdrom_param_tests(nonemachine); diff --git a/tests/qtest/cxl-test.c b/tests/qtest/cxl-test.c index 61f25a72b6..edcad4a0ce 100644 --- a/tests/qtest/cxl-test.c +++ b/tests/qtest/cxl-test.c @@ -8,50 +8,72 @@ #include "qemu/osdep.h" #include "libqtest-single.h" -#define QEMU_PXB_CMD "-machine q35,cxl=on " \ - "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \ - "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.size=4G " +#define QEMU_PXB_CMD \ + "-machine q35,cxl=on " \ + "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \ + "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.size=4G " -#define QEMU_2PXB_CMD "-machine q35,cxl=on " \ - "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \ - "-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \ - "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G " +#define QEMU_2PXB_CMD \ + "-machine q35,cxl=on " \ + "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \ + "-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \ + "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G " -#define QEMU_RP "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " +#define QEMU_RP \ + "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " /* Dual ports on first pxb */ -#define QEMU_2RP "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " \ - "-device cxl-rp,id=rp1,bus=cxl.0,chassis=0,slot=1 " +#define QEMU_2RP \ + "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " \ + "-device cxl-rp,id=rp1,bus=cxl.0,chassis=0,slot=1 " /* Dual ports on each of the pxb instances */ -#define QEMU_4RP "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " \ - "-device cxl-rp,id=rp1,bus=cxl.0,chassis=0,slot=1 " \ - "-device cxl-rp,id=rp2,bus=cxl.1,chassis=0,slot=2 " \ - "-device cxl-rp,id=rp3,bus=cxl.1,chassis=0,slot=3 " +#define QEMU_4RP \ + "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " \ + "-device cxl-rp,id=rp1,bus=cxl.0,chassis=0,slot=1 " \ + "-device cxl-rp,id=rp2,bus=cxl.1,chassis=0,slot=2 " \ + "-device cxl-rp,id=rp3,bus=cxl.1,chassis=0,slot=3 " -#define QEMU_T3D "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ - "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ - "-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 " +#define QEMU_T3D_DEPRECATED \ + "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 " -#define QEMU_2T3D "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ - "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ - "-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 " \ - "-object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M " \ - "-object memory-backend-file,id=lsa1,mem-path=%s,size=256M " \ - "-device cxl-type3,bus=rp1,memdev=cxl-mem1,lsa=lsa1,id=cxl-pmem1 " +#define QEMU_T3D_PMEM \ + "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp0,persistent-memdev=cxl-mem0,lsa=lsa0,id=pmem0 " -#define QEMU_4T3D "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ - "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ - "-device cxl-type3,bus=rp0,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 " \ - "-object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M " \ - "-object memory-backend-file,id=lsa1,mem-path=%s,size=256M " \ - "-device cxl-type3,bus=rp1,memdev=cxl-mem1,lsa=lsa1,id=cxl-pmem1 " \ - "-object memory-backend-file,id=cxl-mem2,mem-path=%s,size=256M " \ - "-object memory-backend-file,id=lsa2,mem-path=%s,size=256M " \ - "-device cxl-type3,bus=rp2,memdev=cxl-mem2,lsa=lsa2,id=cxl-pmem2 " \ - "-object memory-backend-file,id=cxl-mem3,mem-path=%s,size=256M " \ - "-object memory-backend-file,id=lsa3,mem-path=%s,size=256M " \ - "-device cxl-type3,bus=rp3,memdev=cxl-mem3,lsa=lsa3,id=cxl-pmem3 " +#define QEMU_T3D_VMEM \ + "-object memory-backend-ram,id=cxl-mem0,size=256M " \ + "-device cxl-type3,bus=rp0,volatile-memdev=cxl-mem0,id=mem0 " + +#define QEMU_T3D_VMEM_LSA \ + "-object memory-backend-ram,id=cxl-mem0,size=256M " \ + "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp0,volatile-memdev=cxl-mem0,lsa=lsa0,id=mem0 " + +#define QEMU_2T3D \ + "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp0,persistent-memdev=cxl-mem0,lsa=lsa0,id=pmem0 " \ + "-object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa1,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp1,persistent-memdev=cxl-mem1,lsa=lsa1,id=pmem1 " + +#define QEMU_4T3D \ + "-object memory-backend-file,id=cxl-mem0,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa0,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp0,persistent-memdev=cxl-mem0,lsa=lsa0,id=pmem0 " \ + "-object memory-backend-file,id=cxl-mem1,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa1,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp1,persistent-memdev=cxl-mem1,lsa=lsa1,id=pmem1 " \ + "-object memory-backend-file,id=cxl-mem2,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa2,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp2,persistent-memdev=cxl-mem2,lsa=lsa2,id=pmem2 " \ + "-object memory-backend-file,id=cxl-mem3,mem-path=%s,size=256M " \ + "-object memory-backend-file,id=lsa3,mem-path=%s,size=256M " \ + "-device cxl-type3,bus=rp3,persistent-memdev=cxl-mem3,lsa=lsa3,id=pmem3 " static void cxl_basic_hb(void) { @@ -90,14 +112,53 @@ static void cxl_2root_port(void) } #ifdef CONFIG_POSIX -static void cxl_t3d(void) +static void cxl_t3d_deprecated(void) { g_autoptr(GString) cmdline = g_string_new(NULL); g_autofree const char *tmpfs = NULL; tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL); - g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D, tmpfs, tmpfs); + g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D_DEPRECATED, + tmpfs, tmpfs); + + qtest_start(cmdline->str); + qtest_end(); +} + +static void cxl_t3d_persistent(void) +{ + g_autoptr(GString) cmdline = g_string_new(NULL); + g_autofree const char *tmpfs = NULL; + + tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL); + + g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D_PMEM, + tmpfs, tmpfs); + + qtest_start(cmdline->str); + qtest_end(); +} + +static void cxl_t3d_volatile(void) +{ + g_autoptr(GString) cmdline = g_string_new(NULL); + + g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D_VMEM); + + qtest_start(cmdline->str); + qtest_end(); +} + +static void cxl_t3d_volatile_lsa(void) +{ + g_autoptr(GString) cmdline = g_string_new(NULL); + g_autofree const char *tmpfs = NULL; + + tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL); + + g_string_printf(cmdline, QEMU_PXB_CMD QEMU_RP QEMU_T3D_VMEM_LSA, + tmpfs); qtest_start(cmdline->str); qtest_end(); @@ -147,7 +208,10 @@ int main(int argc, char **argv) qtest_add_func("/pci/cxl/rp", cxl_root_port); qtest_add_func("/pci/cxl/rp_x2", cxl_2root_port); #ifdef CONFIG_POSIX - qtest_add_func("/pci/cxl/type3_device", cxl_t3d); + qtest_add_func("/pci/cxl/type3_device", cxl_t3d_deprecated); + qtest_add_func("/pci/cxl/type3_device_pmem", cxl_t3d_persistent); + qtest_add_func("/pci/cxl/type3_device_vmem", cxl_t3d_volatile); + qtest_add_func("/pci/cxl/type3_device_vmem_lsa", cxl_t3d_volatile_lsa); qtest_add_func("/pci/cxl/rp_x2_type3_x2", cxl_1pxb_2rp_2t3d); qtest_add_func("/pci/cxl/pxb_x2_root_port_x4_type3_x4", cxl_2pxb_4rp_4t3d); #endif diff --git a/tests/qtest/device-plug-test.c b/tests/qtest/device-plug-test.c index 01cecd6e20..c6f33153eb 100644 --- a/tests/qtest/device-plug-test.c +++ b/tests/qtest/device-plug-test.c @@ -156,7 +156,14 @@ static void test_q35_pci_unplug_json_request(void) static void test_ccw_unplug(void) { - QTestState *qtest = qtest_initf("-device virtio-balloon-ccw,id=dev0"); + QTestState *qtest; + + if (!qtest_has_device("virtio-balloon-ccw")) { + g_test_skip("Device virtio-balloon-ccw not available"); + return; + } + + qtest = qtest_initf("-device virtio-balloon-ccw,id=dev0"); qtest_qmp_device_del_send(qtest, "dev0"); wait_device_deleted_event(qtest, "dev0"); @@ -168,8 +175,8 @@ static void test_spapr_cpu_unplug_request(void) { QTestState *qtest; - qtest = qtest_initf("-cpu power9_v2.0 -smp 1,maxcpus=2 " - "-device power9_v2.0-spapr-cpu-core,core-id=1,id=dev0"); + qtest = qtest_initf("-cpu power9_v2.2 -smp 1,maxcpus=2 " + "-device power9_v2.2-spapr-cpu-core,core-id=1,id=dev0"); /* similar to test_pci_unplug_request */ process_device_remove(qtest, "dev0"); diff --git a/tests/qtest/e1000e-test.c b/tests/qtest/e1000e-test.c index b63a4d3c91..de9738fdb7 100644 --- a/tests/qtest/e1000e-test.c +++ b/tests/qtest/e1000e-test.c @@ -27,6 +27,7 @@ #include "qemu/osdep.h" #include "libqtest-single.h" #include "libqos/pci-pc.h" +#include "net/eth.h" #include "qemu/sockets.h" #include "qemu/iov.h" #include "qemu/module.h" @@ -35,9 +36,13 @@ #include "libqos/e1000e.h" #include "hw/net/e1000_regs.h" +static const struct eth_header packet = { + .h_dest = E1000E_ADDRESS, + .h_source = E1000E_ADDRESS, +}; + static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc) { - static const char test[] = "TEST"; struct e1000_tx_desc descr; char buffer[64]; int ret; @@ -45,7 +50,7 @@ static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *a /* Prepare test data buffer */ uint64_t data = guest_alloc(alloc, sizeof(buffer)); - memwrite(data, test, sizeof(test)); + memwrite(data, &packet, sizeof(packet)); /* Prepare TX descriptor */ memset(&descr, 0, sizeof(descr)); @@ -71,7 +76,7 @@ static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *a g_assert_cmpint(ret, == , sizeof(recv_len)); ret = recv(test_sockets[0], buffer, sizeof(buffer), 0); g_assert_cmpint(ret, ==, sizeof(buffer)); - g_assert_cmpstr(buffer, == , test); + g_assert_false(memcmp(buffer, &packet, sizeof(packet))); /* Free test data buffer */ guest_free(alloc, data); @@ -81,15 +86,15 @@ static void e1000e_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator { union e1000_rx_desc_extended descr; - char test[] = "TEST"; - int len = htonl(sizeof(test)); + struct eth_header test_iov = packet; + int len = htonl(sizeof(packet)); struct iovec iov[] = { { .iov_base = &len, .iov_len = sizeof(len), },{ - .iov_base = test, - .iov_len = sizeof(test), + .iov_base = &test_iov, + .iov_len = sizeof(packet), }, }; @@ -97,8 +102,8 @@ static void e1000e_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator int ret; /* Send a dummy packet to device's socket*/ - ret = iov_send(test_sockets[0], iov, 2, 0, sizeof(len) + sizeof(test)); - g_assert_cmpint(ret, == , sizeof(test) + sizeof(len)); + ret = iov_send(test_sockets[0], iov, 2, 0, sizeof(len) + sizeof(packet)); + g_assert_cmpint(ret, == , sizeof(packet) + sizeof(len)); /* Prepare test data buffer */ uint64_t data = guest_alloc(alloc, sizeof(buffer)); @@ -119,7 +124,7 @@ static void e1000e_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator /* Check data sent to the backend */ memread(data, buffer, sizeof(buffer)); - g_assert_cmpstr(buffer, == , test); + g_assert_false(memcmp(buffer, &packet, sizeof(packet))); /* Free test data buffer */ guest_free(alloc, data); diff --git a/tests/qtest/fdc-test.c b/tests/qtest/fdc-test.c index 1f9b99ad6d..5e8fbda9df 100644 --- a/tests/qtest/fdc-test.c +++ b/tests/qtest/fdc-test.c @@ -28,9 +28,6 @@ #include "libqtest-single.h" #include "qapi/qmp/qdict.h" -/* TODO actually test the results and get rid of this */ -#define qmp_discard_response(...) qobject_unref(qmp(__VA_ARGS__)) - #define DRIVE_FLOPPY_BLANK \ "-drive if=floppy,file=null-co://,file.read-zeroes=on,format=raw,size=1440k" @@ -304,9 +301,10 @@ static void test_media_insert(void) /* Insert media in drive. DSKCHK should not be reset until a step pulse * is sent. */ - qmp_discard_response("{'execute':'blockdev-change-medium', 'arguments':{" - " 'id':'floppy0', 'filename': %s, 'format': 'raw' }}", - test_image); + qtest_qmp_assert_success(global_qtest, + "{'execute':'blockdev-change-medium', 'arguments':{" + " 'id':'floppy0', 'filename': %s, 'format': 'raw' }}", + test_image); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); @@ -335,8 +333,9 @@ static void test_media_change(void) /* Eject the floppy and check that DSKCHG is set. Reading it out doesn't * reset the bit. */ - qmp_discard_response("{'execute':'eject', 'arguments':{" - " 'id':'floppy0' }}"); + qtest_qmp_assert_success(global_qtest, + "{'execute':'eject', 'arguments':{" + " 'id':'floppy0' }}"); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); diff --git a/tests/qtest/fuzz-lsi53c895a-test.c b/tests/qtest/fuzz-lsi53c895a-test.c index a9254b455d..1b55928b9f 100644 --- a/tests/qtest/fuzz-lsi53c895a-test.c +++ b/tests/qtest/fuzz-lsi53c895a-test.c @@ -8,6 +8,36 @@ #include "qemu/osdep.h" #include "libqtest.h" +/* + * This used to trigger a DMA reentrancy issue + * leading to memory corruption bugs like stack + * overflow or use-after-free + * https://gitlab.com/qemu-project/qemu/-/issues/1563 + */ +static void test_lsi_dma_reentrancy(void) +{ + QTestState *s; + + s = qtest_init("-M q35 -m 512M -nodefaults " + "-blockdev driver=null-co,node-name=null0 " + "-device lsi53c810 -device scsi-cd,drive=null0"); + + qtest_outl(s, 0xcf8, 0x80000804); /* PCI Command Register */ + qtest_outw(s, 0xcfc, 0x7); /* Enables accesses */ + qtest_outl(s, 0xcf8, 0x80000814); /* Memory Bar 1 */ + qtest_outl(s, 0xcfc, 0xff100000); /* Set MMIO Address*/ + qtest_outl(s, 0xcf8, 0x80000818); /* Memory Bar 2 */ + qtest_outl(s, 0xcfc, 0xff000000); /* Set RAM Address*/ + qtest_writel(s, 0xff000000, 0xc0000024); + qtest_writel(s, 0xff000114, 0x00000080); + qtest_writel(s, 0xff00012c, 0xff000000); + qtest_writel(s, 0xff000004, 0xff000114); + qtest_writel(s, 0xff000008, 0xff100014); + qtest_writel(s, 0xff10002f, 0x000000ff); + + qtest_quit(s); +} + /* * This used to trigger a UAF in lsi_do_msgout() * https://gitlab.com/qemu-project/qemu/-/issues/972 @@ -112,17 +142,20 @@ static void test_lsi_do_dma_empty_queue(void) int main(int argc, char **argv) { + g_test_init(&argc, &argv, NULL); + if (!qtest_has_device("lsi53c895a")) { return 0; } - g_test_init(&argc, &argv, NULL); - qtest_add_func("fuzz/lsi53c895a/lsi_do_dma_empty_queue", test_lsi_do_dma_empty_queue); qtest_add_func("fuzz/lsi53c895a/lsi_do_msgout_cancel_req", test_lsi_do_msgout_cancel_req); + qtest_add_func("fuzz/lsi53c895a/lsi_dma_reentrancy", + test_lsi_dma_reentrancy); + return g_test_run(); } diff --git a/tests/qtest/fuzz/generic_fuzz_configs.h b/tests/qtest/fuzz/generic_fuzz_configs.h index a825b78c14..50689da653 100644 --- a/tests/qtest/fuzz/generic_fuzz_configs.h +++ b/tests/qtest/fuzz/generic_fuzz_configs.h @@ -90,6 +90,11 @@ const generic_fuzz_config predefined_configs[] = { .args = "-M q35 -nodefaults " "-device e1000e,netdev=net0 -netdev user,id=net0", .objects = "e1000e", + },{ + .name = "igb", + .args = "-M q35 -nodefaults " + "-device igb,netdev=net0 -netdev user,id=net0", + .objects = "igb", },{ .name = "cirrus-vga", .args = "-machine q35 -nodefaults -device cirrus-vga", diff --git a/tests/qtest/ide-test.c b/tests/qtest/ide-test.c index dcb050bf9b..d6b4f6e36a 100644 --- a/tests/qtest/ide-test.c +++ b/tests/qtest/ide-test.c @@ -34,9 +34,6 @@ #include "hw/pci/pci_ids.h" #include "hw/pci/pci_regs.h" -/* TODO actually test the results and get rid of this */ -#define qmp_discard_response(q, ...) qobject_unref(qtest_qmp(q, __VA_ARGS__)) - #define TEST_IMAGE_SIZE 64 * 1024 * 1024 #define IDE_PCI_DEV 1 @@ -766,7 +763,7 @@ static void test_pci_retry_flush(void) qtest_qmp_eventwait(qts, "STOP"); /* Complete the command */ - qmp_discard_response(qts, "{'execute':'cont' }"); + qtest_qmp_assert_success(qts, "{'execute':'cont' }"); /* Check registers */ data = qpci_io_readb(dev, ide_bar, reg_device); diff --git a/tests/qtest/igb-test.c b/tests/qtest/igb-test.c new file mode 100644 index 0000000000..3d397ea697 --- /dev/null +++ b/tests/qtest/igb-test.c @@ -0,0 +1,256 @@ +/* + * QTest testcase for igb NIC + * + * Copyright (c) 2022-2023 Red Hat, Inc. + * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Akihiko Odaki + * Dmitry Fleytman + * Leonid Bloch + * Yan Vugenfirer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + + +#include "qemu/osdep.h" +#include "libqtest-single.h" +#include "libqos/pci-pc.h" +#include "net/eth.h" +#include "qemu/sockets.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "qemu/bitops.h" +#include "libqos/libqos-malloc.h" +#include "libqos/e1000e.h" +#include "hw/net/igb_regs.h" + +#ifndef _WIN32 + +static const struct eth_header packet = { + .h_dest = E1000E_ADDRESS, + .h_source = E1000E_ADDRESS, +}; + +static void igb_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc) +{ + union e1000_adv_tx_desc descr; + char buffer[64]; + int ret; + uint32_t recv_len; + + /* Prepare test data buffer */ + uint64_t data = guest_alloc(alloc, sizeof(buffer)); + memwrite(data, &packet, sizeof(packet)); + + /* Prepare TX descriptor */ + memset(&descr, 0, sizeof(descr)); + descr.read.buffer_addr = cpu_to_le64(data); + descr.read.cmd_type_len = cpu_to_le32(E1000_TXD_CMD_RS | + E1000_TXD_CMD_EOP | + E1000_TXD_DTYP_D | + sizeof(buffer)); + + /* Put descriptor to the ring */ + e1000e_tx_ring_push(d, &descr); + + /* Wait for TX WB interrupt */ + e1000e_wait_isr(d, E1000E_TX0_MSG_ID); + + /* Check DD bit */ + g_assert_cmphex(le32_to_cpu(descr.wb.status) & E1000_TXD_STAT_DD, ==, + E1000_TXD_STAT_DD); + + /* Check data sent to the backend */ + ret = recv(test_sockets[0], &recv_len, sizeof(recv_len), 0); + g_assert_cmpint(ret, == , sizeof(recv_len)); + ret = recv(test_sockets[0], buffer, sizeof(buffer), 0); + g_assert_cmpint(ret, ==, sizeof(buffer)); + g_assert_false(memcmp(buffer, &packet, sizeof(packet))); + + /* Free test data buffer */ + guest_free(alloc, data); +} + +static void igb_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc) +{ + union e1000_adv_rx_desc descr; + + struct eth_header test_iov = packet; + int len = htonl(sizeof(packet)); + struct iovec iov[] = { + { + .iov_base = &len, + .iov_len = sizeof(len), + },{ + .iov_base = &test_iov, + .iov_len = sizeof(packet), + }, + }; + + char buffer[64]; + int ret; + + /* Send a dummy packet to device's socket*/ + ret = iov_send(test_sockets[0], iov, 2, 0, sizeof(len) + sizeof(packet)); + g_assert_cmpint(ret, == , sizeof(packet) + sizeof(len)); + + /* Prepare test data buffer */ + uint64_t data = guest_alloc(alloc, sizeof(buffer)); + + /* Prepare RX descriptor */ + memset(&descr, 0, sizeof(descr)); + descr.read.pkt_addr = cpu_to_le64(data); + + /* Put descriptor to the ring */ + e1000e_rx_ring_push(d, &descr); + + /* Wait for TX WB interrupt */ + e1000e_wait_isr(d, E1000E_RX0_MSG_ID); + + /* Check DD bit */ + g_assert_cmphex(le32_to_cpu(descr.wb.upper.status_error) & + E1000_RXD_STAT_DD, ==, E1000_RXD_STAT_DD); + + /* Check data sent to the backend */ + memread(data, buffer, sizeof(buffer)); + g_assert_false(memcmp(buffer, &packet, sizeof(packet))); + + /* Free test data buffer */ + guest_free(alloc, data); +} + +static void test_e1000e_init(void *obj, void *data, QGuestAllocator * alloc) +{ + /* init does nothing */ +} + +static void test_igb_tx(void *obj, void *data, QGuestAllocator * alloc) +{ + QE1000E_PCI *e1000e = obj; + QE1000E *d = &e1000e->e1000e; + QOSGraphObject *e_object = obj; + QPCIDevice *dev = e_object->get_driver(e_object, "pci-device"); + + /* FIXME: add spapr support */ + if (qpci_check_buggy_msi(dev)) { + return; + } + + igb_send_verify(d, data, alloc); +} + +static void test_igb_rx(void *obj, void *data, QGuestAllocator * alloc) +{ + QE1000E_PCI *e1000e = obj; + QE1000E *d = &e1000e->e1000e; + QOSGraphObject *e_object = obj; + QPCIDevice *dev = e_object->get_driver(e_object, "pci-device"); + + /* FIXME: add spapr support */ + if (qpci_check_buggy_msi(dev)) { + return; + } + + igb_receive_verify(d, data, alloc); +} + +static void test_igb_multiple_transfers(void *obj, void *data, + QGuestAllocator *alloc) +{ + static const long iterations = 4 * 1024; + long i; + + QE1000E_PCI *e1000e = obj; + QE1000E *d = &e1000e->e1000e; + QOSGraphObject *e_object = obj; + QPCIDevice *dev = e_object->get_driver(e_object, "pci-device"); + + /* FIXME: add spapr support */ + if (qpci_check_buggy_msi(dev)) { + return; + } + + for (i = 0; i < iterations; i++) { + igb_send_verify(d, data, alloc); + igb_receive_verify(d, data, alloc); + } + +} + +static void data_test_clear(void *sockets) +{ + int *test_sockets = sockets; + + close(test_sockets[0]); + qos_invalidate_command_line(); + close(test_sockets[1]); + g_free(test_sockets); +} + +static void *data_test_init(GString *cmd_line, void *arg) +{ + int *test_sockets = g_new(int, 2); + int ret = socketpair(PF_UNIX, SOCK_STREAM, 0, test_sockets); + g_assert_cmpint(ret, != , -1); + + g_string_append_printf(cmd_line, " -netdev socket,fd=%d,id=hs0 ", + test_sockets[1]); + + g_test_queue_destroy(data_test_clear, test_sockets); + return test_sockets; +} + +#endif + +static void *data_test_init_no_socket(GString *cmd_line, void *arg) +{ + g_string_append(cmd_line, " -netdev hubport,hubid=0,id=hs0 "); + return arg; +} + +static void test_igb_hotplug(void *obj, void *data, QGuestAllocator * alloc) +{ + QTestState *qts = global_qtest; /* TODO: get rid of global_qtest here */ + QE1000E_PCI *dev = obj; + + if (dev->pci_dev.bus->not_hotpluggable) { + g_test_skip("pci bus does not support hotplug"); + return; + } + + qtest_qmp_device_add(qts, "igb", "igb_net", "{'addr': '0x06'}"); + qpci_unplug_acpi_device_test(qts, "igb_net", 0x06); +} + +static void register_igb_test(void) +{ + QOSGraphTestOptions opts = { 0 }; + +#ifndef _WIN32 + opts.before = data_test_init, + qos_add_test("init", "igb", test_e1000e_init, &opts); + qos_add_test("tx", "igb", test_igb_tx, &opts); + qos_add_test("rx", "igb", test_igb_rx, &opts); + qos_add_test("multiple_transfers", "igb", + test_igb_multiple_transfers, &opts); +#endif + + opts.before = data_test_init_no_socket; + qos_add_test("hotplug", "igb", test_igb_hotplug, &opts); +} + +libqos_init(register_igb_test); diff --git a/tests/qtest/libqos/e1000e.c b/tests/qtest/libqos/e1000e.c index 28fb3052aa..925654c7fd 100644 --- a/tests/qtest/libqos/e1000e.c +++ b/tests/qtest/libqos/e1000e.c @@ -36,18 +36,6 @@ #define E1000E_RING_LEN (0x1000) -static void e1000e_macreg_write(QE1000E *d, uint32_t reg, uint32_t val) -{ - QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e); - qpci_io_writel(&d_pci->pci_dev, d_pci->mac_regs, reg, val); -} - -static uint32_t e1000e_macreg_read(QE1000E *d, uint32_t reg) -{ - QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e); - return qpci_io_readl(&d_pci->pci_dev, d_pci->mac_regs, reg); -} - void e1000e_tx_ring_push(QE1000E *d, void *descr) { QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e); diff --git a/tests/qtest/libqos/e1000e.h b/tests/qtest/libqos/e1000e.h index 091ce139da..30643c8094 100644 --- a/tests/qtest/libqos/e1000e.h +++ b/tests/qtest/libqos/e1000e.h @@ -25,6 +25,8 @@ #define E1000E_RX0_MSG_ID (0) #define E1000E_TX0_MSG_ID (1) +#define E1000E_ADDRESS { 0x52, 0x54, 0x00, 0x12, 0x34, 0x56 } + typedef struct QE1000E QE1000E; typedef struct QE1000E_PCI QE1000E_PCI; @@ -40,6 +42,18 @@ struct QE1000E_PCI { QE1000E e1000e; }; +static inline void e1000e_macreg_write(QE1000E *d, uint32_t reg, uint32_t val) +{ + QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e); + qpci_io_writel(&d_pci->pci_dev, d_pci->mac_regs, reg, val); +} + +static inline uint32_t e1000e_macreg_read(QE1000E *d, uint32_t reg) +{ + QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e); + return qpci_io_readl(&d_pci->pci_dev, d_pci->mac_regs, reg); +} + void e1000e_wait_isr(QE1000E *d, uint16_t msg_id); void e1000e_tx_ring_push(QE1000E *d, void *descr); void e1000e_rx_ring_push(QE1000E *d, void *descr); diff --git a/tests/qtest/libqos/igb.c b/tests/qtest/libqos/igb.c new file mode 100644 index 0000000000..a603468beb --- /dev/null +++ b/tests/qtest/libqos/igb.c @@ -0,0 +1,186 @@ +/* + * libqos driver framework + * + * Copyright (c) 2022-2023 Red Hat, Inc. + * Copyright (c) 2018 Emanuele Giuseppe Esposito + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "hw/net/igb_regs.h" +#include "hw/net/mii.h" +#include "hw/pci/pci_ids.h" +#include "../libqtest.h" +#include "pci-pc.h" +#include "qemu/sockets.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "qemu/bitops.h" +#include "libqos-malloc.h" +#include "qgraph.h" +#include "e1000e.h" + +#define IGB_IVAR_TEST_CFG \ + ((E1000E_RX0_MSG_ID | E1000_IVAR_VALID) << (igb_ivar_entry_rx(0) * 8) | \ + ((E1000E_TX0_MSG_ID | E1000_IVAR_VALID) << (igb_ivar_entry_tx(0) * 8))) + +#define E1000E_RING_LEN (0x1000) + +static void e1000e_foreach_callback(QPCIDevice *dev, int devfn, void *data) +{ + QPCIDevice *res = data; + memcpy(res, dev, sizeof(QPCIDevice)); + g_free(dev); +} + +static void e1000e_pci_destructor(QOSGraphObject *obj) +{ + QE1000E_PCI *epci = (QE1000E_PCI *) obj; + qpci_iounmap(&epci->pci_dev, epci->mac_regs); + qpci_msix_disable(&epci->pci_dev); +} + +static void igb_pci_start_hw(QOSGraphObject *obj) +{ + static const uint8_t address[] = E1000E_ADDRESS; + QE1000E_PCI *d = (QE1000E_PCI *) obj; + uint32_t val; + + /* Enable the device */ + qpci_device_enable(&d->pci_dev); + + /* Reset the device */ + val = e1000e_macreg_read(&d->e1000e, E1000_CTRL); + e1000e_macreg_write(&d->e1000e, E1000_CTRL, val | E1000_CTRL_RST | E1000_CTRL_SLU); + + /* Setup link */ + e1000e_macreg_write(&d->e1000e, E1000_MDIC, + MII_BMCR_AUTOEN | MII_BMCR_ANRESTART | + (MII_BMCR << E1000_MDIC_REG_SHIFT) | + (1 << E1000_MDIC_PHY_SHIFT) | + E1000_MDIC_OP_WRITE); + + qtest_clock_step(d->pci_dev.bus->qts, 900000000); + + /* Enable and configure MSI-X */ + qpci_msix_enable(&d->pci_dev); + e1000e_macreg_write(&d->e1000e, E1000_IVAR0, IGB_IVAR_TEST_CFG); + + /* Check the device link status */ + val = e1000e_macreg_read(&d->e1000e, E1000_STATUS); + g_assert_cmphex(val & E1000_STATUS_LU, ==, E1000_STATUS_LU); + + /* Initialize TX/RX logic */ + e1000e_macreg_write(&d->e1000e, E1000_RCTL, 0); + e1000e_macreg_write(&d->e1000e, E1000_TCTL, 0); + + e1000e_macreg_write(&d->e1000e, E1000_TDBAL(0), + (uint32_t) d->e1000e.tx_ring); + e1000e_macreg_write(&d->e1000e, E1000_TDBAH(0), + (uint32_t) (d->e1000e.tx_ring >> 32)); + e1000e_macreg_write(&d->e1000e, E1000_TDLEN(0), E1000E_RING_LEN); + e1000e_macreg_write(&d->e1000e, E1000_TDT(0), 0); + e1000e_macreg_write(&d->e1000e, E1000_TDH(0), 0); + + /* Enable transmit */ + e1000e_macreg_write(&d->e1000e, E1000_TCTL, E1000_TCTL_EN); + + e1000e_macreg_write(&d->e1000e, E1000_RDBAL(0), + (uint32_t)d->e1000e.rx_ring); + e1000e_macreg_write(&d->e1000e, E1000_RDBAH(0), + (uint32_t)(d->e1000e.rx_ring >> 32)); + e1000e_macreg_write(&d->e1000e, E1000_RDLEN(0), E1000E_RING_LEN); + e1000e_macreg_write(&d->e1000e, E1000_RDT(0), 0); + e1000e_macreg_write(&d->e1000e, E1000_RDH(0), 0); + e1000e_macreg_write(&d->e1000e, E1000_RA, + le32_to_cpu(*(uint32_t *)address)); + e1000e_macreg_write(&d->e1000e, E1000_RA + 4, + E1000_RAH_AV | E1000_RAH_POOL_1 | + le16_to_cpu(*(uint16_t *)(address + 4))); + + /* Enable receive */ + e1000e_macreg_write(&d->e1000e, E1000_RFCTL, E1000_RFCTL_EXTEN); + e1000e_macreg_write(&d->e1000e, E1000_RCTL, E1000_RCTL_EN); + + /* Enable all interrupts */ + e1000e_macreg_write(&d->e1000e, E1000_GPIE, E1000_GPIE_MSIX_MODE); + e1000e_macreg_write(&d->e1000e, E1000_IMS, 0xFFFFFFFF); + e1000e_macreg_write(&d->e1000e, E1000_EIMS, 0xFFFFFFFF); + +} + +static void *igb_pci_get_driver(void *obj, const char *interface) +{ + QE1000E_PCI *epci = obj; + if (!g_strcmp0(interface, "igb-if")) { + return &epci->e1000e; + } + + /* implicit contains */ + if (!g_strcmp0(interface, "pci-device")) { + return &epci->pci_dev; + } + + fprintf(stderr, "%s not present in igb\n", interface); + g_assert_not_reached(); +} + +static void *igb_pci_create(void *pci_bus, QGuestAllocator *alloc, void *addr) +{ + QE1000E_PCI *d = g_new0(QE1000E_PCI, 1); + QPCIBus *bus = pci_bus; + QPCIAddress *address = addr; + + qpci_device_foreach(bus, address->vendor_id, address->device_id, + e1000e_foreach_callback, &d->pci_dev); + + /* Map BAR0 (mac registers) */ + d->mac_regs = qpci_iomap(&d->pci_dev, 0, NULL); + + /* Allocate and setup TX ring */ + d->e1000e.tx_ring = guest_alloc(alloc, E1000E_RING_LEN); + g_assert(d->e1000e.tx_ring != 0); + + /* Allocate and setup RX ring */ + d->e1000e.rx_ring = guest_alloc(alloc, E1000E_RING_LEN); + g_assert(d->e1000e.rx_ring != 0); + + d->obj.get_driver = igb_pci_get_driver; + d->obj.start_hw = igb_pci_start_hw; + d->obj.destructor = e1000e_pci_destructor; + + return &d->obj; +} + +static void igb_register_nodes(void) +{ + QPCIAddress addr = { + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = E1000_DEV_ID_82576, + }; + + /* + * FIXME: every test using this node needs to setup a -netdev socket,id=hs0 + * otherwise QEMU is not going to start + */ + QOSGraphEdgeOptions opts = { + .extra_device_opts = "netdev=hs0", + }; + add_qpci_address(&opts, &addr); + + qos_node_create_driver("igb", igb_pci_create); + qos_node_consumes("igb", "pci-bus", &opts); +} + +libqos_init(igb_register_nodes); diff --git a/tests/qtest/libqos/meson.build b/tests/qtest/libqos/meson.build index 32f028872c..cc209a8de5 100644 --- a/tests/qtest/libqos/meson.build +++ b/tests/qtest/libqos/meson.build @@ -30,6 +30,7 @@ libqos_srcs = files( 'i2c.c', 'i2c-imx.c', 'i2c-omap.c', + 'igb.c', 'sdhci.c', 'tpci200.c', 'virtio.c', diff --git a/tests/qtest/libqos/virtio-9p-client.c b/tests/qtest/libqos/virtio-9p-client.c index e4a368e036..b8adc8d4b9 100644 --- a/tests/qtest/libqos/virtio-9p-client.c +++ b/tests/qtest/libqos/virtio-9p-client.c @@ -594,6 +594,8 @@ void v9fs_rreaddir(P9Req *req, uint32_t *count, uint32_t *nentries, { uint32_t local_count; struct V9fsDirent *e = NULL; + /* only used to avoid a leak if entries was NULL */ + struct V9fsDirent *unused_entries = NULL; uint16_t slen; uint32_t n = 0; @@ -612,6 +614,8 @@ void v9fs_rreaddir(P9Req *req, uint32_t *count, uint32_t *nentries, e = g_new(struct V9fsDirent, 1); if (entries) { *entries = e; + } else { + unused_entries = e; } } else { e = e->next = g_new(struct V9fsDirent, 1); @@ -628,6 +632,7 @@ void v9fs_rreaddir(P9Req *req, uint32_t *count, uint32_t *nentries, *nentries = n; } + v9fs_free_dirents(unused_entries); v9fs_req_free(req); } diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c index 2bfd460531..c3a0ef5bb4 100644 --- a/tests/qtest/libqtest.c +++ b/tests/qtest/libqtest.c @@ -124,7 +124,7 @@ static int socket_accept(int sock) (void *)&timeout, sizeof(timeout))) { fprintf(stderr, "%s failed to set SO_RCVTIMEO: %s\n", __func__, strerror(errno)); - closesocket(sock); + close(sock); return -1; } @@ -135,7 +135,7 @@ static int socket_accept(int sock) if (ret == -1) { fprintf(stderr, "%s failed: %s\n", __func__, strerror(errno)); } - closesocket(sock); + close(sock); return ret; } @@ -564,8 +564,8 @@ void qtest_quit(QTestState *s) qtest_remove_abrt_handler(s); qtest_kill_qemu(s); - closesocket(s->fd); - closesocket(s->qmp_fd); + close(s->fd); + close(s->qmp_fd); g_string_free(s->rx, true); for (GList *it = s->pending_events; it != NULL; it = it->next) { @@ -1478,13 +1478,28 @@ void qtest_qmp_device_add(QTestState *qts, const char *driver, const char *id, qobject_unref(args); } -#ifndef _WIN32 void qtest_qmp_add_client(QTestState *qts, const char *protocol, int fd) { QDict *resp; +#ifdef WIN32 + WSAPROTOCOL_INFOW info; + g_autofree char *info64 = NULL; + SOCKET s; + + assert(fd_is_socket(fd)); + s = _get_osfhandle(fd); + if (WSADuplicateSocketW(s, GetProcessId((HANDLE)qts->qemu_pid), &info) == SOCKET_ERROR) { + g_autofree char *emsg = g_win32_error_message(WSAGetLastError()); + g_error("WSADuplicateSocketW failed: %s", emsg); + } + info64 = g_base64_encode((guchar *)&info, sizeof(info)); + resp = qtest_qmp(qts, "{'execute': 'get-win32-socket'," + "'arguments': {'fdname': 'fdname', 'info': %s}}", info64); +#else resp = qtest_qmp_fds(qts, &fd, 1, "{'execute': 'getfd'," "'arguments': {'fdname': 'fdname'}}"); +#endif g_assert(resp); g_assert(!qdict_haskey(resp, "event")); /* We don't expect any events */ g_assert(!qdict_haskey(resp, "error")); @@ -1498,7 +1513,6 @@ void qtest_qmp_add_client(QTestState *qts, const char *protocol, int fd) g_assert(!qdict_haskey(resp, "error")); qobject_unref(resp); } -#endif /* * Generic hot-unplugging test via the device_del QMP command. diff --git a/tests/qtest/libqtest.h b/tests/qtest/libqtest.h index fcf1c3c3b3..8d7d450963 100644 --- a/tests/qtest/libqtest.h +++ b/tests/qtest/libqtest.h @@ -758,17 +758,16 @@ void qtest_qmp_device_add_qdict(QTestState *qts, const char *drv, void qtest_qmp_device_add(QTestState *qts, const char *driver, const char *id, const char *fmt, ...) G_GNUC_PRINTF(4, 5); -#ifndef _WIN32 /** * qtest_qmp_add_client: * @qts: QTestState instance to operate on * @protocol: the protocol to add to * @fd: the client file-descriptor * - * Call QMP ``getfd`` followed by ``add_client`` with the given @fd. + * Call QMP ``getfd`` (on Windows ``get-win32-socket``) followed by + * ``add_client`` with the given @fd. */ void qtest_qmp_add_client(QTestState *qts, const char *protocol, int fd); -#endif /* _WIN32 */ /** * qtest_qmp_device_del_send: diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index 29a4efb4c2..087f2dc9d7 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -23,7 +23,7 @@ qtests_generic = [ 'readconfig-test', 'netdev-socket', ] -if config_host.has_key('CONFIG_MODULES') +if enable_modules qtests_generic += [ 'modules-test' ] endif @@ -34,10 +34,12 @@ qtests_pci = \ qtests_cxl = \ (config_all_devices.has_key('CONFIG_CXL') ? ['cxl-test'] : []) +# FIXME: Get rid of get_option('default_devices') here and check +# for the availability of the default NICs in the tests qtests_filter = \ - (slirp.found() ? ['test-netfilter'] : []) + \ - (config_host.has_key('CONFIG_POSIX') ? ['test-filter-mirror'] : []) + \ - (config_host.has_key('CONFIG_POSIX') ? ['test-filter-redirector'] : []) + (get_option('default_devices') and slirp.found() ? ['test-netfilter'] : []) + \ + (get_option('default_devices') and config_host.has_key('CONFIG_POSIX') ? ['test-filter-mirror'] : []) + \ + (get_option('default_devices') and config_host.has_key('CONFIG_POSIX') ? ['test-filter-redirector'] : []) qtests_i386 = \ (slirp.found() ? ['pxe-test'] : []) + \ @@ -101,7 +103,7 @@ qtests_i386 = \ 'numa-test' ] -if dbus_display +if dbus_display and targetos != 'windows' qtests_i386 += ['dbus-display-test'] endif @@ -196,13 +198,15 @@ qtests_arm = \ (config_all_devices.has_key('CONFIG_CMSDK_APB_DUALTIMER') ? ['cmsdk-apb-dualtimer-test'] : []) + \ (config_all_devices.has_key('CONFIG_CMSDK_APB_TIMER') ? ['cmsdk-apb-timer-test'] : []) + \ (config_all_devices.has_key('CONFIG_CMSDK_APB_WATCHDOG') ? ['cmsdk-apb-watchdog-test'] : []) + \ - (config_all_devices.has_key('CONFIG_PFLASH_CFI02') ? ['pflash-cfi02-test'] : []) + \ + (config_all_devices.has_key('CONFIG_PFLASH_CFI02') and + config_all_devices.has_key('CONFIG_MUSICPAL') ? ['pflash-cfi02-test'] : []) + \ (config_all_devices.has_key('CONFIG_ASPEED_SOC') ? qtests_aspeed : []) + \ (config_all_devices.has_key('CONFIG_NPCM7XX') ? qtests_npcm7xx : []) + \ (config_all_devices.has_key('CONFIG_GENERIC_LOADER') ? ['hexloader-test'] : []) + \ + (config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \ + (config_all_devices.has_key('CONFIG_VEXPRESS') ? ['test-arm-mptimer'] : []) + \ + (config_all_devices.has_key('CONFIG_MICROBIT') ? ['microbit-test'] : []) + \ ['arm-cpu-features', - 'microbit-test', - 'test-arm-mptimer', 'boot-serial-test'] # TODO: once aarch64 TCG is fixed on ARM 32 bit host, make bios-tables-test unconditional @@ -212,15 +216,15 @@ qtests_aarch64 = \ ['tpm-tis-device-test', 'tpm-tis-device-swtpm-test'] : []) + \ (config_all_devices.has_key('CONFIG_XLNX_ZYNQMP_ARM') ? ['xlnx-can-test', 'fuzz-xlnx-dp-test'] : []) + \ (config_all_devices.has_key('CONFIG_RASPI') ? ['bcm2835-dma-test'] : []) + \ + (config_all.has_key('CONFIG_TCG') and \ + config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \ ['arm-cpu-features', 'numa-test', 'boot-serial-test', 'migration-test'] qtests_s390x = \ - (slirp.found() ? ['pxe-test', 'test-netfilter'] : []) + \ - (config_host.has_key('CONFIG_POSIX') ? ['test-filter-mirror'] : []) + \ - (config_host.has_key('CONFIG_POSIX') ? ['test-filter-redirector'] : []) + \ + qtests_filter + \ ['boot-serial-test', 'drive_del-test', 'device-plug-test', @@ -259,6 +263,7 @@ qos_test_ss.add( 'virtio-scsi-test.c', 'virtio-iommu-test.c', 'vmxnet3-test.c', + 'igb-test.c', ) if config_all_devices.has_key('CONFIG_VIRTIO_SERIAL') @@ -303,16 +308,19 @@ qtests = { 'tpm-crb-test': [io, tpmemu_files], 'tpm-tis-swtpm-test': [io, tpmemu_files, 'tpm-tis-util.c'], 'tpm-tis-test': [io, tpmemu_files, 'tpm-tis-util.c'], + 'tpm-tis-i2c-test': [io, tpmemu_files, 'qtest_aspeed.c'], 'tpm-tis-device-swtpm-test': [io, tpmemu_files, 'tpm-tis-util.c'], 'tpm-tis-device-test': [io, tpmemu_files, 'tpm-tis-util.c'], 'vmgenid-test': files('boot-sector.c', 'acpi-utils.c'), 'netdev-socket': files('netdev-socket.c', '../unit/socket-helpers.c'), } -gvnc = dependency('gvnc-1.0', required: false) -if gvnc.found() - qtests += {'vnc-display-test': [gvnc]} - qtests_generic += [ 'vnc-display-test' ] +if vnc.found() + gvnc = dependency('gvnc-1.0', required: false) + if gvnc.found() + qtests += {'vnc-display-test': [gvnc]} + qtests_generic += [ 'vnc-display-test' ] + endif endif if dbus_display diff --git a/tests/qtest/microbit-test.c b/tests/qtest/microbit-test.c index 4bc267020b..6022a92b6a 100644 --- a/tests/qtest/microbit-test.c +++ b/tests/qtest/microbit-test.c @@ -107,7 +107,7 @@ static void test_nrf51_uart(void) g_assert_true(recv(sock_fd, s, 10, 0) == 5); g_assert_true(memcmp(s, "world", 5) == 0); - closesocket(sock_fd); + close(sock_fd); qtest_quit(qts); } diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index 109bc8e7b1..b99b49a314 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -40,9 +40,6 @@ #include "linux/kvm.h" #endif -/* TODO actually test the results and get rid of this */ -#define qtest_qmp_discard_response(...) qobject_unref(qtest_qmp(__VA_ARGS__)) - unsigned start_address; unsigned end_address; static bool uffd_feature_thread_id; @@ -406,10 +403,45 @@ static void migrate_set_parameter_str(QTestState *who, const char *parameter, migrate_check_parameter_str(who, parameter, value); } +static long long migrate_get_parameter_bool(QTestState *who, + const char *parameter) +{ + QDict *rsp; + int result; + + rsp = wait_command(who, "{ 'execute': 'query-migrate-parameters' }"); + result = qdict_get_bool(rsp, parameter); + qobject_unref(rsp); + return !!result; +} + +static void migrate_check_parameter_bool(QTestState *who, const char *parameter, + int value) +{ + int result; + + result = migrate_get_parameter_bool(who, parameter); + g_assert_cmpint(result, ==, value); +} + +static void migrate_set_parameter_bool(QTestState *who, const char *parameter, + int value) +{ + QDict *rsp; + + rsp = qtest_qmp(who, + "{ 'execute': 'migrate-set-parameters'," + "'arguments': { %s: %i } }", + parameter, value); + g_assert(qdict_haskey(rsp, "return")); + qobject_unref(rsp); + migrate_check_parameter_bool(who, parameter, value); +} + static void migrate_ensure_non_converge(QTestState *who) { - /* Can't converge with 1ms downtime + 30 mbs bandwidth limit */ - migrate_set_parameter_int(who, "max-bandwidth", 30 * 1000 * 1000); + /* Can't converge with 1ms downtime + 3 mbs bandwidth limit */ + migrate_set_parameter_int(who, "max-bandwidth", 3 * 1000 * 1000); migrate_set_parameter_int(who, "downtime-limit", 1); } @@ -731,7 +763,7 @@ static void test_migrate_end(QTestState *from, QTestState *to, bool test_dest) usleep(1000 * 10); } while (dest_byte_a == dest_byte_b); - qtest_qmp_discard_response(to, "{ 'execute' : 'stop'}"); + qtest_qmp_assert_success(to, "{ 'execute' : 'stop'}"); /* With it stopped, check nothing changes */ qtest_memread(to, start_address, &dest_byte_c, 1); @@ -1092,6 +1124,36 @@ test_migrate_tls_x509_finish(QTestState *from, #endif /* CONFIG_TASN1 */ #endif /* CONFIG_GNUTLS */ +static void * +test_migrate_compress_start(QTestState *from, + QTestState *to) +{ + migrate_set_parameter_int(from, "compress-level", 1); + migrate_set_parameter_int(from, "compress-threads", 4); + migrate_set_parameter_bool(from, "compress-wait-thread", true); + migrate_set_parameter_int(to, "decompress-threads", 4); + + migrate_set_capability(from, "compress", true); + migrate_set_capability(to, "compress", true); + + return NULL; +} + +static void * +test_migrate_compress_nowait_start(QTestState *from, + QTestState *to) +{ + migrate_set_parameter_int(from, "compress-level", 9); + migrate_set_parameter_int(from, "compress-threads", 1); + migrate_set_parameter_bool(from, "compress-wait-thread", false); + migrate_set_parameter_int(to, "decompress-threads", 1); + + migrate_set_capability(from, "compress", true); + migrate_set_capability(to, "compress", true); + + return NULL; +} + static int migrate_postcopy_prepare(QTestState **from_ptr, QTestState **to_ptr, MigrateCommon *args) @@ -1169,6 +1231,15 @@ static void test_postcopy(void) test_postcopy_common(&args); } +static void test_postcopy_compress(void) +{ + MigrateCommon args = { + .start_hook = test_migrate_compress_start + }; + + test_postcopy_common(&args); +} + static void test_postcopy_preempt(void) { MigrateCommon args = { @@ -1270,6 +1341,15 @@ static void test_postcopy_recovery(void) test_postcopy_recovery_common(&args); } +static void test_postcopy_recovery_compress(void) +{ + MigrateCommon args = { + .start_hook = test_migrate_compress_start + }; + + test_postcopy_recovery_common(&args); +} + #ifdef CONFIG_GNUTLS static void test_postcopy_recovery_tls_psk(void) { @@ -1303,6 +1383,7 @@ static void test_postcopy_preempt_all(void) test_postcopy_recovery_common(&args); } + #endif static void test_baddest(void) @@ -1524,6 +1605,40 @@ static void test_precopy_unix_xbzrle(void) test_precopy_common(&args); } +static void test_precopy_unix_compress(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = uri, + .start_hook = test_migrate_compress_start, + /* + * Test that no invalid thread state is left over from + * the previous iteration. + */ + .iterations = 2, + }; + + test_precopy_common(&args); +} + +static void test_precopy_unix_compress_nowait(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = uri, + .start_hook = test_migrate_compress_nowait_start, + /* + * Test that no invalid thread state is left over from + * the previous iteration. + */ + .iterations = 2, + }; + + test_precopy_common(&args); +} + static void test_precopy_tcp_plain(void) { MigrateCommon args = { @@ -1796,6 +1911,21 @@ static void test_validate_uuid_dst_not_set(void) do_test_validate_uuid(&args, false); } +/* + * The way auto_converge works, we need to do too many passes to + * run this test. Auto_converge logic is only run once every + * three iterations, so: + * + * - 3 iterations without auto_converge enabled + * - 3 iterations with pct = 5 + * - 3 iterations with pct = 30 + * - 3 iterations with pct = 55 + * - 3 iterations with pct = 80 + * - 3 iterations with pct = 95 (max(95, 80 + 25)) + * + * To make things even worse, we need to run the initial stage at + * 3MB/s so we enter autoconverge even when host is (over)loaded. + */ static void test_migrate_auto_converge(void) { g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); @@ -1808,7 +1938,7 @@ static void test_migrate_auto_converge(void) * E.g., with 1Gb/s bandwith migration may pass without throttling, * so we need to decrease a bandwidth. */ - const int64_t init_pct = 5, inc_pct = 50, max_pct = 95; + const int64_t init_pct = 5, inc_pct = 25, max_pct = 95; if (test_migrate_start(&from, &to, uri, &args)) { return; @@ -1835,13 +1965,16 @@ static void test_migrate_auto_converge(void) /* Wait for throttling begins */ percentage = 0; - while (percentage == 0) { + do { percentage = read_migrate_property_int(from, "cpu-throttle-percentage"); - usleep(100); + if (percentage != 0) { + break; + } + usleep(20); g_assert_false(got_stop); - } - /* The first percentage of throttling should be equal to init_pct */ - g_assert_cmpint(percentage, ==, init_pct); + } while (true); + /* The first percentage of throttling should be at least init_pct */ + g_assert_cmpint(percentage, >=, init_pct); /* Now, when we tested that throttling works, let it converge */ migrate_ensure_converge(from); @@ -2459,14 +2592,25 @@ static bool kvm_dirty_ring_supported(void) int main(int argc, char **argv) { - const bool has_kvm = qtest_has_accel("kvm"); - const bool has_uffd = ufd_version_check(); - const char *arch = qtest_get_arch(); + bool has_kvm, has_tcg; + bool has_uffd; + const char *arch; g_autoptr(GError) err = NULL; int ret; g_test_init(&argc, &argv, NULL); + has_kvm = qtest_has_accel("kvm"); + has_tcg = qtest_has_accel("tcg"); + + if (!has_tcg && !has_kvm) { + g_test_skip("No KVM or TCG accelerator available"); + return 0; + } + + has_uffd = ufd_version_check(); + arch = qtest_get_arch(); + /* * On ppc64, the test only works with kvm-hv, but not with kvm-pr and TCG * is touchy due to race conditions on dirty bits (especially on PPC for @@ -2503,11 +2647,27 @@ int main(int argc, char **argv) qtest_add_func("/migration/postcopy/preempt/plain", test_postcopy_preempt); qtest_add_func("/migration/postcopy/preempt/recovery/plain", test_postcopy_preempt_recovery); + if (getenv("QEMU_TEST_FLAKY_TESTS")) { + qtest_add_func("/migration/postcopy/compress/plain", + test_postcopy_compress); + qtest_add_func("/migration/postcopy/recovery/compress/plain", + test_postcopy_recovery_compress); + } } qtest_add_func("/migration/bad_dest", test_baddest); qtest_add_func("/migration/precopy/unix/plain", test_precopy_unix_plain); qtest_add_func("/migration/precopy/unix/xbzrle", test_precopy_unix_xbzrle); + /* + * Compression fails from time to time. + * Put test here but don't enable it until everything is fixed. + */ + if (getenv("QEMU_TEST_FLAKY_TESTS")) { + qtest_add_func("/migration/precopy/unix/compress/wait", + test_precopy_unix_compress); + qtest_add_func("/migration/precopy/unix/compress/nowait", + test_precopy_unix_compress_nowait); + } #ifdef CONFIG_GNUTLS qtest_add_func("/migration/precopy/unix/tls/psk", test_precopy_unix_tls_psk); @@ -2568,12 +2728,22 @@ int main(int argc, char **argv) test_validate_uuid_src_not_set); qtest_add_func("/migration/validate_uuid_dst_not_set", test_validate_uuid_dst_not_set); - - qtest_add_func("/migration/auto_converge", test_migrate_auto_converge); + /* + * See explanation why this test is slow on function definition + */ + if (g_test_slow()) { + qtest_add_func("/migration/auto_converge", test_migrate_auto_converge); + } qtest_add_func("/migration/multifd/tcp/plain/none", test_multifd_tcp_none); - qtest_add_func("/migration/multifd/tcp/plain/cancel", - test_multifd_tcp_cancel); + /* + * This test is flaky and sometimes fails in CI and otherwise: + * don't run unless user opts in via environment variable. + */ + if (getenv("QEMU_TEST_FLAKY_TESTS")) { + qtest_add_func("/migration/multifd/tcp/plain/cancel", + test_multifd_tcp_cancel); + } qtest_add_func("/migration/multifd/tcp/plain/zlib", test_multifd_tcp_zlib); #ifdef CONFIG_ZSTD diff --git a/tests/qtest/netdev-socket.c b/tests/qtest/netdev-socket.c index 270e424bee..097abc0230 100644 --- a/tests/qtest/netdev-socket.c +++ b/tests/qtest/netdev-socket.c @@ -99,7 +99,7 @@ static int inet_get_free_port_multiple(int nb, int *port, bool ipv6) nb = i; for (i = 0; i < nb; i++) { - closesocket(sock[i]); + close(sock[i]); } return nb; @@ -189,28 +189,26 @@ static void wait_stream_disconnected(QTestState *qts, const char *id) qobject_unref(resp); } -static void test_stream_inet_reconnect(void) +static void test_stream_unix_reconnect(void) { QTestState *qts0, *qts1; - int port; SocketAddress *addr; + gchar *path; - port = inet_get_free_port(false); + path = g_strconcat(tmpdir, "/stream_unix_reconnect", NULL); qts0 = qtest_initf("-nodefaults -M none " - "-netdev stream,id=st0,server=true,addr.type=inet," - "addr.ipv4=on,addr.ipv6=off," - "addr.host=127.0.0.1,addr.port=%d", port); + "-netdev stream,id=st0,server=true,addr.type=unix," + "addr.path=%s", path); EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " - "-netdev stream,server=false,id=st0,addr.type=inet," - "addr.ipv4=on,addr.ipv6=off,reconnect=1," - "addr.host=127.0.0.1,addr.port=%d", port); + "-netdev stream,server=false,id=st0,addr.type=unix," + "addr.path=%s,reconnect=1", path); wait_stream_connected(qts0, "st0", &addr); - g_assert_cmpint(addr->type, ==, SOCKET_ADDRESS_TYPE_INET); - g_assert_cmpstr(addr->u.inet.host, ==, "127.0.0.1"); + g_assert_cmpint(addr->type, ==, SOCKET_ADDRESS_TYPE_UNIX); + g_assert_cmpstr(addr->u.q_unix.path, ==, path); qapi_free_SocketAddress(addr); /* kill server */ @@ -221,24 +219,23 @@ static void test_stream_inet_reconnect(void) /* restart server */ qts0 = qtest_initf("-nodefaults -M none " - "-netdev stream,id=st0,server=true,addr.type=inet," - "addr.ipv4=on,addr.ipv6=off," - "addr.host=127.0.0.1,addr.port=%d", port); + "-netdev stream,id=st0,server=true,addr.type=unix," + "addr.path=%s", path); /* wait connection events*/ wait_stream_connected(qts0, "st0", &addr); - g_assert_cmpint(addr->type, ==, SOCKET_ADDRESS_TYPE_INET); - g_assert_cmpstr(addr->u.inet.host, ==, "127.0.0.1"); + g_assert_cmpint(addr->type, ==, SOCKET_ADDRESS_TYPE_UNIX); + g_assert_cmpstr(addr->u.q_unix.path, ==, path); qapi_free_SocketAddress(addr); wait_stream_connected(qts1, "st0", &addr); - g_assert_cmpint(addr->type, ==, SOCKET_ADDRESS_TYPE_INET); - g_assert_cmpstr(addr->u.inet.host, ==, "127.0.0.1"); - g_assert_cmpint(atoi(addr->u.inet.port), ==, port); + g_assert_cmpint(addr->type, ==, SOCKET_ADDRESS_TYPE_UNIX); + g_assert_cmpstr(addr->u.q_unix.path, ==, path); qapi_free_SocketAddress(addr); qtest_quit(qts1); qtest_quit(qts0); + g_free(path); } static void test_stream_inet_ipv6(void) @@ -361,8 +358,8 @@ static void test_stream_fd(void) qtest_quit(qts1); qtest_quit(qts0); - closesocket(sock[0]); - closesocket(sock[1]); + close(sock[0]); + close(sock[1]); } #endif @@ -487,8 +484,8 @@ static void test_dgram_fd(void) qtest_quit(qts1); qtest_quit(qts0); - closesocket(sv[0]); - closesocket(sv[1]); + close(sv[0]); + close(sv[1]); } #endif @@ -517,8 +514,6 @@ int main(int argc, char **argv) #ifndef _WIN32 qtest_add_func("/netdev/dgram/mcast", test_dgram_mcast); #endif - qtest_add_func("/netdev/stream/inet/reconnect", - test_stream_inet_reconnect); } if (has_ipv6) { qtest_add_func("/netdev/stream/inet/ipv6", test_stream_inet_ipv6); @@ -530,6 +525,8 @@ int main(int argc, char **argv) qtest_add_func("/netdev/dgram/unix", test_dgram_unix); #endif qtest_add_func("/netdev/stream/unix", test_stream_unix); + qtest_add_func("/netdev/stream/unix/reconnect", + test_stream_unix_reconnect); #ifdef CONFIG_LINUX qtest_add_func("/netdev/stream/unix/abstract", test_stream_unix_abstract); diff --git a/tests/qtest/pxe-test.c b/tests/qtest/pxe-test.c index 62b6eef464..e4b48225a5 100644 --- a/tests/qtest/pxe-test.c +++ b/tests/qtest/pxe-test.c @@ -131,11 +131,17 @@ int main(int argc, char *argv[]) int ret; const char *arch = qtest_get_arch(); + g_test_init(&argc, &argv, NULL); + + if (!qtest_has_accel("tcg") && !qtest_has_accel("kvm")) { + g_test_skip("No KVM or TCG accelerator available"); + return 0; + } + ret = boot_sector_init(disk); if(ret) return ret; - g_test_init(&argc, &argv, NULL); if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { test_batch(x86_tests, false); diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c index 98caf6fef6..a58de48d2a 100644 --- a/tests/qtest/qmp-cmd-test.c +++ b/tests/qtest/qmp-cmd-test.c @@ -54,6 +54,7 @@ static int query_error_class(const char *cmd) /* Only valid with accel=tcg */ { "x-query-jit", ERROR_CLASS_GENERIC_ERROR }, { "x-query-opcount", ERROR_CLASS_GENERIC_ERROR }, + { "xen-event-list", ERROR_CLASS_GENERIC_ERROR }, { NULL, -1 } }; int i; diff --git a/tests/qtest/qtest_aspeed.c b/tests/qtest/qtest_aspeed.c new file mode 100644 index 0000000000..f6da9adea9 --- /dev/null +++ b/tests/qtest/qtest_aspeed.c @@ -0,0 +1,117 @@ +/* + * Aspeed i2c bus interface for reading from and writing to i2c device registers + * + * Copyright (c) 2023 IBM Corporation + * + * Authors: + * Stefan Berger + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "qtest_aspeed.h" +#include "hw/i2c/aspeed_i2c.h" + +static void aspeed_i2c_startup(QTestState *s, uint32_t baseaddr, + uint8_t slave_addr, uint8_t reg) +{ + uint32_t v; + static int once; + + if (!once) { + /* one time: enable master */ + qtest_writel(s, baseaddr + A_I2CC_FUN_CTRL, 0); + v = qtest_readl(s, baseaddr + A_I2CC_FUN_CTRL) | A_I2CD_MASTER_EN; + qtest_writel(s, baseaddr + A_I2CC_FUN_CTRL, v); + once = 1; + } + + /* select device */ + qtest_writel(s, baseaddr + A_I2CD_BYTE_BUF, slave_addr << 1); + qtest_writel(s, baseaddr + A_I2CD_CMD, + A_I2CD_M_START_CMD | A_I2CD_M_RX_CMD); + + /* select the register to write to */ + qtest_writel(s, baseaddr + A_I2CD_BYTE_BUF, reg); + qtest_writel(s, baseaddr + A_I2CD_CMD, A_I2CD_M_TX_CMD); +} + +static uint32_t aspeed_i2c_read_n(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, + uint8_t reg, size_t nbytes) +{ + uint32_t res = 0; + uint32_t v; + size_t i; + + aspeed_i2c_startup(s, baseaddr, slave_addr, reg); + + for (i = 0; i < nbytes; i++) { + qtest_writel(s, baseaddr + A_I2CD_CMD, A_I2CD_M_RX_CMD); + v = qtest_readl(s, baseaddr + A_I2CD_BYTE_BUF) >> 8; + res |= (v & 0xff) << (i * 8); + } + + qtest_writel(s, baseaddr + A_I2CD_CMD, A_I2CD_M_STOP_CMD); + + return res; +} + +uint32_t aspeed_i2c_readl(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, uint8_t reg) +{ + return aspeed_i2c_read_n(s, baseaddr, slave_addr, reg, sizeof(uint32_t)); +} + +uint16_t aspeed_i2c_readw(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, uint8_t reg) +{ + return aspeed_i2c_read_n(s, baseaddr, slave_addr, reg, sizeof(uint16_t)); +} + +uint8_t aspeed_i2c_readb(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, uint8_t reg) +{ + return aspeed_i2c_read_n(s, baseaddr, slave_addr, reg, sizeof(uint8_t)); +} + +static void aspeed_i2c_write_n(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, + uint8_t reg, uint32_t v, size_t nbytes) +{ + size_t i; + + aspeed_i2c_startup(s, baseaddr, slave_addr, reg); + + for (i = 0; i < nbytes; i++) { + qtest_writel(s, baseaddr + A_I2CD_BYTE_BUF, v & 0xff); + v >>= 8; + qtest_writel(s, baseaddr + A_I2CD_CMD, A_I2CD_M_TX_CMD); + } + + qtest_writel(s, baseaddr + A_I2CD_CMD, A_I2CD_M_STOP_CMD); +} + +void aspeed_i2c_writel(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, + uint8_t reg, uint32_t v) +{ + aspeed_i2c_write_n(s, baseaddr, slave_addr, reg, v, sizeof(v)); +} + +void aspeed_i2c_writew(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, + uint8_t reg, uint16_t v) +{ + aspeed_i2c_write_n(s, baseaddr, slave_addr, reg, v, sizeof(v)); +} + +void aspeed_i2c_writeb(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, + uint8_t reg, uint8_t v) +{ + aspeed_i2c_write_n(s, baseaddr, slave_addr, reg, v, sizeof(v)); +} diff --git a/tests/qtest/qtest_aspeed.h b/tests/qtest/qtest_aspeed.h new file mode 100644 index 0000000000..235dfaa186 --- /dev/null +++ b/tests/qtest/qtest_aspeed.h @@ -0,0 +1,41 @@ +/* + * Aspeed i2c bus interface to reading and writing to i2c device registers + * + * Copyright (c) 2023 IBM Corporation + * + * Authors: + * Stefan Berger + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QTEST_ASPEED_H +#define QTEST_ASPEED_H + +#include + +#include "libqtest.h" + +#define AST2600_ASPEED_I2C_BASE_ADDR 0x1e78a000 + +/* Implements only AST2600 I2C controller */ + +static inline uint32_t ast2600_i2c_calc_bus_addr(uint8_t bus_num) +{ + return AST2600_ASPEED_I2C_BASE_ADDR + 0x80 + bus_num * 0x80; +} + +uint8_t aspeed_i2c_readb(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, uint8_t reg); +uint16_t aspeed_i2c_readw(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, uint8_t reg); +uint32_t aspeed_i2c_readl(QTestState *s, + uint32_t baseaddr, uint8_t slave_addr, uint8_t reg); +void aspeed_i2c_writeb(QTestState *s, uint32_t baseaddr, uint8_t slave_addr, + uint8_t reg, uint8_t v); +void aspeed_i2c_writew(QTestState *s, uint32_t baseaddr, uint8_t slave_addr, + uint8_t reg, uint16_t v); +void aspeed_i2c_writel(QTestState *s, uint32_t baseaddr, uint8_t slave_addr, + uint8_t reg, uint32_t v); + +#endif diff --git a/tests/qtest/readconfig-test.c b/tests/qtest/readconfig-test.c index 9ef870643d..ac7242451b 100644 --- a/tests/qtest/readconfig-test.c +++ b/tests/qtest/readconfig-test.c @@ -86,8 +86,8 @@ static void test_x86_memdev(void) qtest_quit(qts); } - -#ifdef CONFIG_SPICE +/* FIXME: The test is currently broken on FreeBSD */ +#if defined(CONFIG_SPICE) && !defined(__FreeBSD__) static void test_spice_resp(QObject *res) { Visitor *v; @@ -124,13 +124,15 @@ static void test_spice(void) } #endif -static void test_object_rng_resp(QObject *res) +static void test_object_available(QObject *res, const char *name, + const char *type) { Visitor *v; g_autoptr(ObjectPropertyInfoList) objs = NULL; ObjectPropertyInfoList *tmp; ObjectPropertyInfo *obj; - bool seen_rng = false; + bool object_available = false; + g_autofree char *childtype = g_strdup_printf("child<%s>", type); g_assert(res); v = qobject_input_visitor_new(res); @@ -142,16 +144,15 @@ static void test_object_rng_resp(QObject *res) g_assert(tmp->value); obj = tmp->value; - if (g_str_equal(obj->name, "rng0") && - g_str_equal(obj->type, "child")) { - seen_rng = true; + if (g_str_equal(obj->name, name) && g_str_equal(obj->type, childtype)) { + object_available = true; break; } tmp = tmp->next; } - g_assert(seen_rng); + g_assert(object_available); visit_free(v); } @@ -170,7 +171,27 @@ static void test_object_rng(void) resp = qtest_qmp(qts, "{ 'execute': 'qom-list'," " 'arguments': {'path': '/objects' }}"); - test_object_rng_resp(qdict_get(resp, "return")); + test_object_available(qdict_get(resp, "return"), "rng0", "rng-builtin"); + qobject_unref(resp); + + qtest_quit(qts); +} + +static void test_docs_config_ich9(void) +{ + QTestState *qts; + QDict *resp; + QObject *qobj; + + qts = qtest_initf("-nodefaults -readconfig docs/config/ich9-ehci-uhci.cfg"); + + resp = qtest_qmp(qts, "{ 'execute': 'qom-list'," + " 'arguments': {'path': '/machine/peripheral' }}"); + qobj = qdict_get(resp, "return"); + test_object_available(qobj, "ehci", "ich9-usb-ehci1"); + test_object_available(qobj, "uhci-1", "ich9-usb-uhci1"); + test_object_available(qobj, "uhci-2", "ich9-usb-uhci2"); + test_object_available(qobj, "uhci-3", "ich9-usb-uhci3"); qobject_unref(resp); qtest_quit(qts); @@ -186,8 +207,12 @@ int main(int argc, char *argv[]) if (g_str_equal(arch, "i386") || g_str_equal(arch, "x86_64")) { qtest_add_func("readconfig/x86/memdev", test_x86_memdev); + if (qtest_has_device("ich9-usb-ehci1") && + qtest_has_device("ich9-usb-uhci1")) { + qtest_add_func("readconfig/x86/ich9-ehci-uhci", test_docs_config_ich9); + } } -#ifdef CONFIG_SPICE +#if defined(CONFIG_SPICE) && !defined(__FreeBSD__) qtest_add_func("readconfig/spice", test_spice); #endif diff --git a/tests/qtest/rtl8139-test.c b/tests/qtest/rtl8139-test.c index 1beb83805c..4dc0a0d22e 100644 --- a/tests/qtest/rtl8139-test.c +++ b/tests/qtest/rtl8139-test.c @@ -207,9 +207,14 @@ int main(int argc, char **argv) verbosity_level = atoi(v_env); } + g_test_init(&argc, &argv, NULL); + + if (!qtest_has_device("rtl8139")) { + return 0; + } + qtest_start("-device rtl8139"); - g_test_init(&argc, &argv, NULL); qtest_add_func("/rtl8139/nop", nop); qtest_add_func("/rtl8139/timer", test_init); diff --git a/tests/qtest/tco-test.c b/tests/qtest/tco-test.c index d865e95dfc..0547d41173 100644 --- a/tests/qtest/tco-test.c +++ b/tests/qtest/tco-test.c @@ -14,7 +14,7 @@ #include "libqos/pci-pc.h" #include "qapi/qmp/qdict.h" #include "hw/pci/pci_regs.h" -#include "hw/i386/ich9.h" +#include "hw/southbridge/ich9.h" #include "hw/acpi/ich9.h" #include "hw/acpi/ich9_tco.h" diff --git a/tests/qtest/test-filter-mirror.c b/tests/qtest/test-filter-mirror.c index 248fc88699..adeada3eb8 100644 --- a/tests/qtest/test-filter-mirror.c +++ b/tests/qtest/test-filter-mirror.c @@ -16,9 +16,6 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" -/* TODO actually test the results and get rid of this */ -#define qmp_discard_response(qs, ...) qobject_unref(qtest_qmp(qs, __VA_ARGS__)) - static void test_mirror(void) { int send_sock[2], recv_sock[2]; @@ -52,7 +49,7 @@ static void test_mirror(void) }; /* send a qmp command to guarantee that 'connected' is setting to true. */ - qmp_discard_response(qts, "{ 'execute' : 'query-status'}"); + qtest_qmp_assert_success(qts, "{ 'execute' : 'query-status'}"); ret = iov_send(send_sock[0], iov, 2, 0, sizeof(size) + sizeof(send_buf)); g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); close(send_sock[0]); diff --git a/tests/qtest/test-filter-redirector.c b/tests/qtest/test-filter-redirector.c index 24ca9280f8..e72e3b7873 100644 --- a/tests/qtest/test-filter-redirector.c +++ b/tests/qtest/test-filter-redirector.c @@ -58,9 +58,6 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" -/* TODO actually test the results and get rid of this */ -#define qmp_discard_response(qs, ...) qobject_unref(qtest_qmp(qs, __VA_ARGS__)) - static void test_redirector_tx(void) { int backend_sock[2], recv_sock; @@ -98,7 +95,7 @@ static void test_redirector_tx(void) g_assert_cmpint(recv_sock, !=, -1); /* send a qmp command to guarantee that 'connected' is setting to true. */ - qmp_discard_response(qts, "{ 'execute' : 'query-status'}"); + qtest_qmp_assert_success(qts, "{ 'execute' : 'query-status'}"); struct iovec iov[] = { { @@ -176,7 +173,7 @@ static void test_redirector_rx(void) send_sock = unix_connect(sock_path1, NULL); g_assert_cmpint(send_sock, !=, -1); /* send a qmp command to guarantee that 'connected' is setting to true. */ - qmp_discard_response(qts, "{ 'execute' : 'query-status'}"); + qtest_qmp_assert_success(qts, "{ 'execute' : 'query-status'}"); ret = iov_send(send_sock, iov, 2, 0, sizeof(size) + sizeof(send_buf)); g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); diff --git a/tests/qtest/test-hmp.c b/tests/qtest/test-hmp.c index b4a920df89..6704be239b 100644 --- a/tests/qtest/test-hmp.c +++ b/tests/qtest/test-hmp.c @@ -56,6 +56,7 @@ static const char *hmp_cmds[] = { "o /w 0 0x1234", "object_add memory-backend-ram,id=mem1,size=256M", "object_del mem1", + "one-insn-per-tb on", "pmemsave 0 4096 \"/dev/null\"", "p $pc + 8", "qom-list /", diff --git a/tests/qtest/tpm-crb-swtpm-test.c b/tests/qtest/tpm-crb-swtpm-test.c index 40254f762f..ffeb1c396b 100644 --- a/tests/qtest/tpm-crb-swtpm-test.c +++ b/tests/qtest/tpm-crb-swtpm-test.c @@ -19,9 +19,6 @@ #include "tpm-tests.h" #include "hw/acpi/tpm.h" -/* Not used but needed for linking */ -uint64_t tpm_tis_base_addr = TPM_TIS_ADDR_BASE; - typedef struct TestState { char *src_tpm_path; char *dst_tpm_path; diff --git a/tests/qtest/tpm-crb-test.c b/tests/qtest/tpm-crb-test.c index 7b94453390..396ae3f91c 100644 --- a/tests/qtest/tpm-crb-test.c +++ b/tests/qtest/tpm-crb-test.c @@ -19,9 +19,6 @@ #include "qemu/module.h" #include "tpm-emu.h" -/* Not used but needed for linking */ -uint64_t tpm_tis_base_addr = TPM_TIS_ADDR_BASE; - #define TPM_CMD "\x80\x01\x00\x00\x00\x0c\x00\x00\x01\x44\x00\x00" static void tpm_crb_test(const void *data) diff --git a/tests/qtest/tpm-tis-device-swtpm-test.c b/tests/qtest/tpm-tis-device-swtpm-test.c index 8c067fddd4..517a077005 100644 --- a/tests/qtest/tpm-tis-device-swtpm-test.c +++ b/tests/qtest/tpm-tis-device-swtpm-test.c @@ -18,6 +18,7 @@ #include "libqtest.h" #include "qemu/module.h" #include "tpm-tests.h" +#include "tpm-tis-util.h" #include "hw/acpi/tpm.h" uint64_t tpm_tis_base_addr = 0xc000000; @@ -33,7 +34,7 @@ static void tpm_tis_swtpm_test(const void *data) { const TestState *ts = data; - tpm_test_swtpm_test(ts->src_tpm_path, tpm_util_tis_transfer, + tpm_test_swtpm_test(ts->src_tpm_path, tpm_tis_transfer, "tpm-tis-device", MACHINE_OPTIONS); } @@ -42,7 +43,7 @@ static void tpm_tis_swtpm_migration_test(const void *data) const TestState *ts = data; tpm_test_swtpm_migration_test(ts->src_tpm_path, ts->dst_tpm_path, ts->uri, - tpm_util_tis_transfer, "tpm-tis-device", + tpm_tis_transfer, "tpm-tis-device", MACHINE_OPTIONS); } diff --git a/tests/qtest/tpm-tis-i2c-test.c b/tests/qtest/tpm-tis-i2c-test.c new file mode 100644 index 0000000000..7a590ac551 --- /dev/null +++ b/tests/qtest/tpm-tis-i2c-test.c @@ -0,0 +1,663 @@ +/* + * QTest testcases for TPM TIS on I2C (derived from TPM TIS test) + * + * Copyright (c) 2023 IBM Corporation + * Copyright (c) 2023 Red Hat, Inc. + * + * Authors: + * Stefan Berger + * Marc-André Lureau + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include + +#include "libqtest-single.h" +#include "hw/acpi/tpm.h" +#include "hw/pci/pci_ids.h" +#include "qtest_aspeed.h" +#include "tpm-emu.h" + +#define DEBUG_TIS_TEST 0 + +#define DPRINTF(fmt, ...) do { \ + if (DEBUG_TIS_TEST) { \ + printf(fmt, ## __VA_ARGS__); \ + } \ +} while (0) + +#define DPRINTF_ACCESS \ + DPRINTF("%s: %d: locty=%d l=%d access=0x%02x pending_request_flag=0x%x\n", \ + __func__, __LINE__, locty, l, access, pending_request_flag) + +#define DPRINTF_STS \ + DPRINTF("%s: %d: sts = 0x%08x\n", __func__, __LINE__, sts) + +#define I2C_SLAVE_ADDR 0x2e +#define I2C_DEV_BUS_NUM 10 + +static const uint8_t TPM_CMD[12] = + "\x80\x01\x00\x00\x00\x0c\x00\x00\x01\x44\x00\x00"; + +static uint32_t aspeed_bus_addr; + +static uint8_t cur_locty = 0xff; + +static void tpm_tis_i2c_set_locty(uint8_t locty) +{ + if (cur_locty != locty) { + cur_locty = locty; + aspeed_i2c_writeb(global_qtest, aspeed_bus_addr, I2C_SLAVE_ADDR, + TPM_I2C_REG_LOC_SEL, locty); + } +} + +static uint8_t tpm_tis_i2c_readb(uint8_t locty, uint8_t reg) +{ + tpm_tis_i2c_set_locty(locty); + return aspeed_i2c_readb(global_qtest, aspeed_bus_addr, I2C_SLAVE_ADDR, reg); +} + +static uint16_t tpm_tis_i2c_readw(uint8_t locty, uint8_t reg) +{ + tpm_tis_i2c_set_locty(locty); + return aspeed_i2c_readw(global_qtest, aspeed_bus_addr, I2C_SLAVE_ADDR, reg); +} + +static uint32_t tpm_tis_i2c_readl(uint8_t locty, uint8_t reg) +{ + tpm_tis_i2c_set_locty(locty); + return aspeed_i2c_readl(global_qtest, aspeed_bus_addr, I2C_SLAVE_ADDR, reg); +} + +static void tpm_tis_i2c_writeb(uint8_t locty, uint8_t reg, uint8_t v) +{ + if (reg != TPM_I2C_REG_LOC_SEL) { + tpm_tis_i2c_set_locty(locty); + } + aspeed_i2c_writeb(global_qtest, aspeed_bus_addr, I2C_SLAVE_ADDR, reg, v); +} + +static void tpm_tis_i2c_writel(uint8_t locty, uint8_t reg, uint32_t v) +{ + if (reg != TPM_I2C_REG_LOC_SEL) { + tpm_tis_i2c_set_locty(locty); + } + aspeed_i2c_writel(global_qtest, aspeed_bus_addr, I2C_SLAVE_ADDR, reg, v); +} + +static void tpm_tis_i2c_test_basic(const void *data) +{ + uint8_t access; + uint32_t v, v2; + + /* + * All register accesses below must work without locality 0 being the + * active locality. Therefore, ensure access is released. + */ + tpm_tis_i2c_writeb(0, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_ACTIVE_LOCALITY); + access = tpm_tis_i2c_readb(0, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* read interrupt capability -- none are supported */ + v = tpm_tis_i2c_readl(0, TPM_I2C_REG_INT_CAPABILITY); + g_assert_cmpint(v, ==, 0); + + /* try to enable all interrupts */ + tpm_tis_i2c_writel(0, TPM_I2C_REG_INT_ENABLE, 0xffffffff); + v = tpm_tis_i2c_readl(0, TPM_I2C_REG_INT_ENABLE); + /* none could be enabled */ + g_assert_cmpint(v, ==, 0); + + /* enable csum */ + tpm_tis_i2c_writeb(0, TPM_I2C_REG_DATA_CSUM_ENABLE, TPM_DATA_CSUM_ENABLED); + /* check csum enable register has bit 0 set */ + v = tpm_tis_i2c_readb(0, TPM_I2C_REG_DATA_CSUM_ENABLE); + g_assert_cmpint(v, ==, TPM_DATA_CSUM_ENABLED); + /* reading it as 32bit register returns same result */ + v = tpm_tis_i2c_readl(0, TPM_I2C_REG_DATA_CSUM_ENABLE); + g_assert_cmpint(v, ==, TPM_DATA_CSUM_ENABLED); + + /* disable csum */ + tpm_tis_i2c_writeb(0, TPM_I2C_REG_DATA_CSUM_ENABLE, 0); + /* check csum enable register has bit 0 clear */ + v = tpm_tis_i2c_readb(0, TPM_I2C_REG_DATA_CSUM_ENABLE); + g_assert_cmpint(v, ==, 0); + + /* write to unsupported register '1' */ + tpm_tis_i2c_writel(0, 1, 0x12345678); + v = tpm_tis_i2c_readl(0, 1); + g_assert_cmpint(v, ==, 0xffffffff); + + /* request use of locality */ + tpm_tis_i2c_writeb(0, TPM_I2C_REG_ACCESS, TPM_TIS_ACCESS_REQUEST_USE); + + /* read byte from STS + 3 */ + v = tpm_tis_i2c_readb(0, TPM_I2C_REG_STS + 3); + g_assert_cmpint(v, ==, 0); + + /* check STS after writing to STS + 3 */ + v = tpm_tis_i2c_readl(0, TPM_I2C_REG_STS); + tpm_tis_i2c_writeb(0, TPM_I2C_REG_STS + 3, 0xf); + v2 = tpm_tis_i2c_readl(0, TPM_I2C_REG_STS); + g_assert_cmpint(v, ==, v2); + + /* release access */ + tpm_tis_i2c_writeb(0, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_ACTIVE_LOCALITY); + + /* select locality 5 -- must not be possible */ + tpm_tis_i2c_writeb(0, TPM_I2C_REG_LOC_SEL, 5); + v = tpm_tis_i2c_readb(0, TPM_I2C_REG_LOC_SEL); + g_assert_cmpint(v, ==, 0); +} + +static void tpm_tis_i2c_test_check_localities(const void *data) +{ + uint8_t locty, l; + uint8_t access; + uint32_t capability, i2c_cap; + uint32_t didvid; + uint32_t rid; + + for (locty = 0; locty < TPM_TIS_NUM_LOCALITIES; locty++) { + access = tpm_tis_i2c_readb(locty, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + capability = tpm_tis_i2c_readl(locty, TPM_I2C_REG_INTF_CAPABILITY); + i2c_cap = (TPM_I2C_CAP_INTERFACE_TYPE | + TPM_I2C_CAP_INTERFACE_VER | + TPM_I2C_CAP_TPM2_FAMILY | + TPM_I2C_CAP_LOCALITY_CAP | + TPM_I2C_CAP_BUS_SPEED | + TPM_I2C_CAP_DEV_ADDR_CHANGE); + g_assert_cmpint(capability, ==, i2c_cap); + + didvid = tpm_tis_i2c_readl(locty, TPM_I2C_REG_DID_VID); + g_assert_cmpint(didvid, ==, (1 << 16) | PCI_VENDOR_ID_IBM); + + rid = tpm_tis_i2c_readl(locty, TPM_I2C_REG_RID); + g_assert_cmpint(rid, !=, 0); + g_assert_cmpint(rid, !=, 0xffffffff); + + /* locality selection must be at locty */ + l = tpm_tis_i2c_readb(locty, TPM_I2C_REG_LOC_SEL); + g_assert_cmpint(l, ==, locty); + } +} + +static void tpm_tis_i2c_test_check_access_reg(const void *data) +{ + uint8_t locty; + uint8_t access; + + /* do not test locality 4 (hw only) */ + for (locty = 0; locty < TPM_TIS_NUM_LOCALITIES - 1; locty++) { + access = tpm_tis_i2c_readb(locty, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* request use of locality */ + tpm_tis_i2c_writeb(locty, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_REQUEST_USE); + + access = tpm_tis_i2c_readb(locty, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_ACTIVE_LOCALITY | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* release access */ + tpm_tis_i2c_writeb(locty, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_ACTIVE_LOCALITY); + access = tpm_tis_i2c_readb(locty, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + } +} + +/* + * Test case for seizing access by a higher number locality + */ +static void tpm_tis_i2c_test_check_access_reg_seize(const void *data) +{ + int locty, l; + uint8_t access; + uint8_t pending_request_flag; + + /* do not test locality 4 (hw only) */ + for (locty = 0; locty < TPM_TIS_NUM_LOCALITIES - 1; locty++) { + pending_request_flag = 0; + + access = tpm_tis_i2c_readb(locty, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* request use of locality */ + tpm_tis_i2c_writeb(locty, + TPM_I2C_REG_ACCESS, TPM_TIS_ACCESS_REQUEST_USE); + access = tpm_tis_i2c_readb(locty, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_ACTIVE_LOCALITY | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* lower localities cannot seize access */ + for (l = 0; l < locty; l++) { + /* lower locality is not active */ + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* try to request use from 'l' */ + tpm_tis_i2c_writeb(l, + TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_REQUEST_USE); + + /* + * requesting use from 'l' was not possible; + * we must see REQUEST_USE and possibly PENDING_REQUEST + */ + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_REQUEST_USE | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* + * locality 'locty' must be unchanged; + * we must see PENDING_REQUEST + */ + access = tpm_tis_i2c_readb(locty, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_ACTIVE_LOCALITY | + TPM_TIS_ACCESS_PENDING_REQUEST | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* try to seize from 'l' */ + tpm_tis_i2c_writeb(l, + TPM_I2C_REG_ACCESS, TPM_TIS_ACCESS_SEIZE); + /* seize from 'l' was not possible */ + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_REQUEST_USE | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* locality 'locty' must be unchanged */ + access = tpm_tis_i2c_readb(locty, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_ACTIVE_LOCALITY | + TPM_TIS_ACCESS_PENDING_REQUEST | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* + * on the next loop we will have a PENDING_REQUEST flag + * set for locality 'l' + */ + pending_request_flag = TPM_TIS_ACCESS_PENDING_REQUEST; + } + + /* + * higher localities can 'seize' access but not 'request use'; + * note: this will activate first l+1, then l+2 etc. + */ + for (l = locty + 1; l < TPM_TIS_NUM_LOCALITIES - 1; l++) { + /* try to 'request use' from 'l' */ + tpm_tis_i2c_writeb(l, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_REQUEST_USE); + + /* + * requesting use from 'l' was not possible; we should see + * REQUEST_USE and may see PENDING_REQUEST + */ + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_REQUEST_USE | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* + * locality 'l-1' must be unchanged; we should always + * see PENDING_REQUEST from 'l' requesting access + */ + access = tpm_tis_i2c_readb(l - 1, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_ACTIVE_LOCALITY | + TPM_TIS_ACCESS_PENDING_REQUEST | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* try to seize from 'l' */ + tpm_tis_i2c_writeb(l, TPM_I2C_REG_ACCESS, TPM_TIS_ACCESS_SEIZE); + + /* seize from 'l' was possible */ + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_ACTIVE_LOCALITY | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* l - 1 should show that it has BEEN_SEIZED */ + access = tpm_tis_i2c_readb(l - 1, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_BEEN_SEIZED | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* clear the BEEN_SEIZED flag and make sure it's gone */ + tpm_tis_i2c_writeb(l - 1, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_BEEN_SEIZED); + + access = tpm_tis_i2c_readb(l - 1, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + } + + /* + * PENDING_REQUEST will not be set if locty = 0 since all localities + * were active; in case of locty = 1, locality 0 will be active + * but no PENDING_REQUEST anywhere + */ + if (locty <= 1) { + pending_request_flag = 0; + } + + /* release access from l - 1; this activates locty - 1 */ + l--; + + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + + DPRINTF("%s: %d: relinquishing control on l = %d\n", + __func__, __LINE__, l); + tpm_tis_i2c_writeb(l, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_ACTIVE_LOCALITY); + + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + for (l = locty - 1; l >= 0; l--) { + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_ACTIVE_LOCALITY | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* release this locality */ + tpm_tis_i2c_writeb(l, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_ACTIVE_LOCALITY); + + if (l == 1) { + pending_request_flag = 0; + } + } + + /* no locality may be active now */ + for (l = 0; l < TPM_TIS_NUM_LOCALITIES - 1; l++) { + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + } + } +} + +/* + * Test case for getting access when higher number locality relinquishes access + */ +static void tpm_tis_i2c_test_check_access_reg_release(const void *data) +{ + int locty, l; + uint8_t access; + uint8_t pending_request_flag; + + /* do not test locality 4 (hw only) */ + for (locty = TPM_TIS_NUM_LOCALITIES - 2; locty >= 0; locty--) { + pending_request_flag = 0; + + access = tpm_tis_i2c_readb(locty, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* request use of locality */ + tpm_tis_i2c_writeb(locty, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_REQUEST_USE); + access = tpm_tis_i2c_readb(locty, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_ACTIVE_LOCALITY | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + /* request use of all other localities */ + for (l = 0; l < TPM_TIS_NUM_LOCALITIES - 1; l++) { + if (l == locty) { + continue; + } + /* + * request use of locality 'l' -- we MUST see REQUEST USE and + * may see PENDING_REQUEST + */ + tpm_tis_i2c_writeb(l, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_REQUEST_USE); + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_REQUEST_USE | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + pending_request_flag = TPM_TIS_ACCESS_PENDING_REQUEST; + } + /* release locality 'locty' */ + tpm_tis_i2c_writeb(locty, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_ACTIVE_LOCALITY); + /* + * highest locality should now be active; release it and make sure the + * next higest locality is active afterwards + */ + for (l = TPM_TIS_NUM_LOCALITIES - 2; l >= 0; l--) { + if (l == locty) { + continue; + } + /* 'l' should be active now */ + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_ACTIVE_LOCALITY | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + /* 'l' relinquishes access */ + tpm_tis_i2c_writeb(l, TPM_I2C_REG_ACCESS, + TPM_TIS_ACCESS_ACTIVE_LOCALITY); + access = tpm_tis_i2c_readb(l, TPM_I2C_REG_ACCESS); + DPRINTF_ACCESS; + if (l == 1 || (locty <= 1 && l == 2)) { + pending_request_flag = 0; + } + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + pending_request_flag | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + } + } +} + +/* + * Test case for transmitting packets + */ +static void tpm_tis_i2c_test_check_transmit(const void *data) +{ + const TPMTestState *s = data; + uint8_t access; + uint32_t sts, v; + uint16_t bcount, csum, bcount2; + size_t i; + + /* enable csum */ + tpm_tis_i2c_writeb(0, TPM_I2C_REG_DATA_CSUM_ENABLE, TPM_DATA_CSUM_ENABLED); + /* check csum enable register has bit 0 set */ + v = tpm_tis_i2c_readb(0, TPM_I2C_REG_DATA_CSUM_ENABLE); + g_assert_cmpint(v, ==, TPM_DATA_CSUM_ENABLED); + /* reading it as 32bit register returns same result */ + v = tpm_tis_i2c_readl(0, TPM_I2C_REG_DATA_CSUM_ENABLE); + g_assert_cmpint(v, ==, TPM_DATA_CSUM_ENABLED); + + /* request use of locality 0 */ + tpm_tis_i2c_writeb(0, TPM_I2C_REG_ACCESS, TPM_TIS_ACCESS_REQUEST_USE); + access = tpm_tis_i2c_readb(0, TPM_I2C_REG_ACCESS); + g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | + TPM_TIS_ACCESS_ACTIVE_LOCALITY | + TPM_TIS_ACCESS_TPM_ESTABLISHMENT); + + sts = tpm_tis_i2c_readl(0, TPM_I2C_REG_STS); + DPRINTF_STS; + + g_assert_cmpint(sts & 0xff, ==, 0); + + bcount = (sts >> 8) & 0xffff; + g_assert_cmpint(bcount, >=, 128); + + /* read bcount from STS + 1 must work also */ + bcount2 = tpm_tis_i2c_readw(0, TPM_I2C_REG_STS + 1); + g_assert_cmpint(bcount, ==, bcount2); + + /* ic2 must have bits 26-31 zero */ + g_assert_cmpint(sts & (0x1f << 26), ==, 0); + + tpm_tis_i2c_writel(0, TPM_I2C_REG_STS, TPM_TIS_STS_COMMAND_READY); + sts = tpm_tis_i2c_readl(0, TPM_I2C_REG_STS); + DPRINTF_STS; + g_assert_cmpint(sts & 0xff, ==, TPM_TIS_STS_COMMAND_READY); + + /* transmit command */ + for (i = 0; i < sizeof(TPM_CMD); i++) { + tpm_tis_i2c_writeb(0, TPM_I2C_REG_DATA_FIFO, TPM_CMD[i]); + sts = tpm_tis_i2c_readl(0, TPM_I2C_REG_STS); + DPRINTF_STS; + if (i < sizeof(TPM_CMD) - 1) { + g_assert_cmpint(sts & 0xff, ==, + TPM_TIS_STS_EXPECT | TPM_TIS_STS_VALID); + } else { + g_assert_cmpint(sts & 0xff, ==, TPM_TIS_STS_VALID); + } + g_assert_cmpint((sts >> 8) & 0xffff, ==, --bcount); + } + /* read the checksum */ + csum = tpm_tis_i2c_readw(0, TPM_I2C_REG_DATA_CSUM_GET); + g_assert_cmpint(csum, ==, 0x6733); + + /* start processing */ + tpm_tis_i2c_writeb(0, TPM_I2C_REG_STS, TPM_TIS_STS_TPM_GO); + + uint64_t end_time = g_get_monotonic_time() + 50 * G_TIME_SPAN_SECOND; + do { + sts = tpm_tis_i2c_readl(0, TPM_I2C_REG_STS); + if ((sts & TPM_TIS_STS_DATA_AVAILABLE) != 0) { + break; + } + } while (g_get_monotonic_time() < end_time); + + sts = tpm_tis_i2c_readl(0, TPM_I2C_REG_STS); + DPRINTF_STS; + g_assert_cmpint(sts & 0xff, == , + TPM_TIS_STS_VALID | TPM_TIS_STS_DATA_AVAILABLE); + bcount = (sts >> 8) & 0xffff; + + /* read response */ + uint8_t tpm_msg[sizeof(struct tpm_hdr)]; + g_assert_cmpint(sizeof(tpm_msg), ==, bcount); + + for (i = 0; i < sizeof(tpm_msg); i++) { + tpm_msg[i] = tpm_tis_i2c_readb(0, TPM_I2C_REG_DATA_FIFO); + sts = tpm_tis_i2c_readl(0, TPM_I2C_REG_STS); + DPRINTF_STS; + if (sts & TPM_TIS_STS_DATA_AVAILABLE) { + g_assert_cmpint((sts >> 8) & 0xffff, ==, --bcount); + } + } + g_assert_cmpmem(tpm_msg, sizeof(tpm_msg), s->tpm_msg, sizeof(*s->tpm_msg)); + + /* relinquish use of locality 0 */ + tpm_tis_i2c_writeb(0, + TPM_I2C_REG_ACCESS, TPM_TIS_ACCESS_ACTIVE_LOCALITY); + access = tpm_tis_i2c_readb(0, TPM_I2C_REG_ACCESS); +} + +int main(int argc, char **argv) +{ + int ret; + char *args; + char *tmp_path = g_dir_make_tmp("qemu-tpm-tis-i2c-test.XXXXXX", NULL); + GThread *thread; + TPMTestState test; + + module_call_init(MODULE_INIT_QOM); + g_test_init(&argc, &argv, NULL); + + test.addr = g_new0(SocketAddress, 1); + test.addr->type = SOCKET_ADDRESS_TYPE_UNIX; + test.addr->u.q_unix.path = g_build_filename(tmp_path, "sock", NULL); + g_mutex_init(&test.data_mutex); + g_cond_init(&test.data_cond); + test.data_cond_signal = false; + test.tpm_version = TPM_VERSION_2_0; + + thread = g_thread_new(NULL, tpm_emu_ctrl_thread, &test); + tpm_emu_test_wait_cond(&test); + + aspeed_bus_addr = ast2600_i2c_calc_bus_addr(I2C_DEV_BUS_NUM); + + args = g_strdup_printf( + "-machine rainier-bmc -accel tcg " + "-chardev socket,id=chr,path=%s " + "-tpmdev emulator,id=tpm0,chardev=chr " + "-device tpm-tis-i2c,tpmdev=tpm0,bus=aspeed.i2c.bus.%d,address=0x%x", + test.addr->u.q_unix.path, + I2C_DEV_BUS_NUM, + I2C_SLAVE_ADDR); + qtest_start(args); + + qtest_add_data_func("/tpm-tis-i2c/test_basic", &test, + tpm_tis_i2c_test_basic); + + qtest_add_data_func("/tpm-tis-i2c/test_check_localities", &test, + tpm_tis_i2c_test_check_localities); + + qtest_add_data_func("/tpm-tis-i2c/check_access_reg", &test, + tpm_tis_i2c_test_check_access_reg); + + qtest_add_data_func("/tpm-tis-i2c/check_access_reg_seize", &test, + tpm_tis_i2c_test_check_access_reg_seize); + + qtest_add_data_func("/tpm-tis-i2c/check_access_reg_release", &test, + tpm_tis_i2c_test_check_access_reg_release); + + qtest_add_data_func("/tpm-tis-i2c/test_check_transmit", &test, + tpm_tis_i2c_test_check_transmit); + + ret = g_test_run(); + + qtest_end(); + + g_thread_join(thread); + g_unlink(test.addr->u.q_unix.path); + qapi_free_SocketAddress(test.addr); + g_rmdir(tmp_path); + g_free(tmp_path); + g_free(args); + return ret; +} diff --git a/tests/qtest/tpm-tis-swtpm-test.c b/tests/qtest/tpm-tis-swtpm-test.c index 11539c0a52..105e42e21d 100644 --- a/tests/qtest/tpm-tis-swtpm-test.c +++ b/tests/qtest/tpm-tis-swtpm-test.c @@ -17,6 +17,7 @@ #include "libqtest.h" #include "qemu/module.h" #include "tpm-tests.h" +#include "tpm-tis-util.h" #include "hw/acpi/tpm.h" uint64_t tpm_tis_base_addr = TPM_TIS_ADDR_BASE; @@ -31,7 +32,7 @@ static void tpm_tis_swtpm_test(const void *data) { const TestState *ts = data; - tpm_test_swtpm_test(ts->src_tpm_path, tpm_util_tis_transfer, + tpm_test_swtpm_test(ts->src_tpm_path, tpm_tis_transfer, "tpm-tis", NULL); } @@ -40,7 +41,7 @@ static void tpm_tis_swtpm_migration_test(const void *data) const TestState *ts = data; tpm_test_swtpm_migration_test(ts->src_tpm_path, ts->dst_tpm_path, ts->uri, - tpm_util_tis_transfer, "tpm-tis", NULL); + tpm_tis_transfer, "tpm-tis", NULL); } int main(int argc, char **argv) diff --git a/tests/qtest/tpm-tis-util.c b/tests/qtest/tpm-tis-util.c index 939893bf01..728cd3e065 100644 --- a/tests/qtest/tpm-tis-util.c +++ b/tests/qtest/tpm-tis-util.c @@ -52,7 +52,7 @@ void tpm_tis_test_check_localities(const void *data) uint32_t rid; for (locty = 0; locty < TPM_TIS_NUM_LOCALITIES; locty++) { - access = readb(TIS_REG(0, TPM_TIS_REG_ACCESS)); + access = readb(TIS_REG(locty, TPM_TIS_REG_ACCESS)); g_assert_cmpint(access, ==, TPM_TIS_ACCESS_TPM_REG_VALID_STS | TPM_TIS_ACCESS_TPM_ESTABLISHMENT); @@ -449,3 +449,48 @@ void tpm_tis_test_check_transmit(const void *data) writeb(TIS_REG(0, TPM_TIS_REG_ACCESS), TPM_TIS_ACCESS_ACTIVE_LOCALITY); access = readb(TIS_REG(0, TPM_TIS_REG_ACCESS)); } + +void tpm_tis_transfer(QTestState *s, + const unsigned char *req, size_t req_size, + unsigned char *rsp, size_t rsp_size) +{ + uint32_t sts; + uint16_t bcount; + size_t i; + + /* request use of locality 0 */ + qtest_writeb(s, TIS_REG(0, TPM_TIS_REG_ACCESS), TPM_TIS_ACCESS_REQUEST_USE); + qtest_writel(s, TIS_REG(0, TPM_TIS_REG_STS), TPM_TIS_STS_COMMAND_READY); + + sts = qtest_readl(s, TIS_REG(0, TPM_TIS_REG_STS)); + bcount = (sts >> 8) & 0xffff; + g_assert_cmpint(bcount, >=, req_size); + + /* transmit command */ + for (i = 0; i < req_size; i++) { + qtest_writeb(s, TIS_REG(0, TPM_TIS_REG_DATA_FIFO), req[i]); + } + + /* start processing */ + qtest_writeb(s, TIS_REG(0, TPM_TIS_REG_STS), TPM_TIS_STS_TPM_GO); + + uint64_t end_time = g_get_monotonic_time() + 50 * G_TIME_SPAN_SECOND; + do { + sts = qtest_readl(s, TIS_REG(0, TPM_TIS_REG_STS)); + if ((sts & TPM_TIS_STS_DATA_AVAILABLE) != 0) { + break; + } + } while (g_get_monotonic_time() < end_time); + + sts = qtest_readl(s, TIS_REG(0, TPM_TIS_REG_STS)); + bcount = (sts >> 8) & 0xffff; + + memset(rsp, 0, rsp_size); + for (i = 0; i < bcount; i++) { + rsp[i] = qtest_readb(s, TIS_REG(0, TPM_TIS_REG_DATA_FIFO)); + } + + /* relinquish use of locality 0 */ + qtest_writeb(s, TIS_REG(0, TPM_TIS_REG_ACCESS), + TPM_TIS_ACCESS_ACTIVE_LOCALITY); +} diff --git a/tests/qtest/tpm-tis-util.h b/tests/qtest/tpm-tis-util.h index d10efe86ae..03910a7ba7 100644 --- a/tests/qtest/tpm-tis-util.h +++ b/tests/qtest/tpm-tis-util.h @@ -20,4 +20,8 @@ void tpm_tis_test_check_access_reg_seize(const void *data); void tpm_tis_test_check_access_reg_release(const void *data); void tpm_tis_test_check_transmit(const void *data); +void tpm_tis_transfer(QTestState *s, + const unsigned char *req, size_t req_size, + unsigned char *rsp, size_t rsp_size); + #endif /* TESTS_TPM_TIS_UTIL_H */ diff --git a/tests/qtest/tpm-util.c b/tests/qtest/tpm-util.c index a7efe2d0d2..1c0319e6e7 100644 --- a/tests/qtest/tpm-util.c +++ b/tests/qtest/tpm-util.c @@ -51,51 +51,6 @@ void tpm_util_crb_transfer(QTestState *s, qtest_memread(s, raddr, rsp, rsp_size); } -void tpm_util_tis_transfer(QTestState *s, - const unsigned char *req, size_t req_size, - unsigned char *rsp, size_t rsp_size) -{ - uint32_t sts; - uint16_t bcount; - size_t i; - - /* request use of locality 0 */ - qtest_writeb(s, TIS_REG(0, TPM_TIS_REG_ACCESS), TPM_TIS_ACCESS_REQUEST_USE); - qtest_writel(s, TIS_REG(0, TPM_TIS_REG_STS), TPM_TIS_STS_COMMAND_READY); - - sts = qtest_readl(s, TIS_REG(0, TPM_TIS_REG_STS)); - bcount = (sts >> 8) & 0xffff; - g_assert_cmpint(bcount, >=, req_size); - - /* transmit command */ - for (i = 0; i < req_size; i++) { - qtest_writeb(s, TIS_REG(0, TPM_TIS_REG_DATA_FIFO), req[i]); - } - - /* start processing */ - qtest_writeb(s, TIS_REG(0, TPM_TIS_REG_STS), TPM_TIS_STS_TPM_GO); - - uint64_t end_time = g_get_monotonic_time() + 50 * G_TIME_SPAN_SECOND; - do { - sts = qtest_readl(s, TIS_REG(0, TPM_TIS_REG_STS)); - if ((sts & TPM_TIS_STS_DATA_AVAILABLE) != 0) { - break; - } - } while (g_get_monotonic_time() < end_time); - - sts = qtest_readl(s, TIS_REG(0, TPM_TIS_REG_STS)); - bcount = (sts >> 8) & 0xffff; - - memset(rsp, 0, rsp_size); - for (i = 0; i < bcount; i++) { - rsp[i] = qtest_readb(s, TIS_REG(0, TPM_TIS_REG_DATA_FIFO)); - } - - /* relinquish use of locality 0 */ - qtest_writeb(s, TIS_REG(0, TPM_TIS_REG_ACCESS), - TPM_TIS_ACCESS_ACTIVE_LOCALITY); -} - void tpm_util_startup(QTestState *s, tx_func *tx) { unsigned char buffer[1024]; diff --git a/tests/qtest/tpm-util.h b/tests/qtest/tpm-util.h index 80720afac0..0cb28dd6e5 100644 --- a/tests/qtest/tpm-util.h +++ b/tests/qtest/tpm-util.h @@ -27,9 +27,6 @@ typedef void (tx_func)(QTestState *s, void tpm_util_crb_transfer(QTestState *s, const unsigned char *req, size_t req_size, unsigned char *rsp, size_t rsp_size); -void tpm_util_tis_transfer(QTestState *s, - const unsigned char *req, size_t req_size, - unsigned char *rsp, size_t rsp_size); void tpm_util_startup(QTestState *s, tx_func *tx); void tpm_util_pcrextend(QTestState *s, tx_func *tx); diff --git a/tests/qtest/usb-hcd-ehci-test.c b/tests/qtest/usb-hcd-ehci-test.c index c51e8bb223..87e37cdd7c 100644 --- a/tests/qtest/usb-hcd-ehci-test.c +++ b/tests/qtest/usb-hcd-ehci-test.c @@ -149,6 +149,11 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); + if (!qtest_has_device("ich9-usb-ehci1") || + !qtest_has_device("ich9-usb-uhci1")) { + return 0; + } + qtest_add_func("/ehci/pci/uhci-port-1", pci_uhci_port_1); qtest_add_func("/ehci/pci/ehci-port-1", pci_ehci_port_1); qtest_add_func("/ehci/pci/ehci-config", pci_ehci_config); diff --git a/tests/qtest/usb-hcd-uhci-test.c b/tests/qtest/usb-hcd-uhci-test.c index f264d2bf73..28751f53da 100644 --- a/tests/qtest/usb-hcd-uhci-test.c +++ b/tests/qtest/usb-hcd-uhci-test.c @@ -66,10 +66,17 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); + if (!qtest_has_device("piix3-usb-uhci")) { + g_debug("piix3-usb-uhci not available"); + return 0; + } + qtest_add_func("/uhci/pci/init", test_uhci_init); qtest_add_func("/uhci/pci/port1", test_port_1); qtest_add_func("/uhci/pci/hotplug", test_uhci_hotplug); - qtest_add_func("/uhci/pci/hotplug/usb-storage", test_usb_storage_hotplug); + if (qtest_has_device("usb-storage")) { + qtest_add_func("/uhci/pci/hotplug/usb-storage", test_usb_storage_hotplug); + } if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { qs = qtest_pc_boot("%s", cmd); diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c index bf9f7c4248..e4f95b2858 100644 --- a/tests/qtest/vhost-user-test.c +++ b/tests/qtest/vhost-user-test.c @@ -351,7 +351,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) if (size != msg.size) { qos_printf("%s: Wrong message size received %d != %d\n", __func__, size, msg.size); - return; + goto out; } } @@ -509,6 +509,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) break; } +out: g_mutex_unlock(&s->data_mutex); } diff --git a/tests/qtest/virtio-blk-test.c b/tests/qtest/virtio-blk-test.c index 19c01f808b..98c906ebb4 100644 --- a/tests/qtest/virtio-blk-test.c +++ b/tests/qtest/virtio-blk-test.c @@ -17,9 +17,6 @@ #include "libqos/qgraph.h" #include "libqos/virtio-blk.h" -/* TODO actually test the results and get rid of this */ -#define qmp_discard_response(...) qobject_unref(qmp(__VA_ARGS__)) - #define TEST_IMAGE_SIZE (64 * 1024 * 1024) #define QVIRTIO_BLK_TIMEOUT_US (30 * 1000 * 1000) #define PCI_SLOT_HP 0x06 @@ -453,9 +450,10 @@ static void config(void *obj, void *data, QGuestAllocator *t_alloc) qvirtio_set_driver_ok(dev); - qmp_discard_response("{ 'execute': 'block_resize', " - " 'arguments': { 'device': 'drive0', " - " 'size': %d } }", n_size); + qtest_qmp_assert_success(global_qtest, + "{ 'execute': 'block_resize', " + " 'arguments': { 'device': 'drive0', " + " 'size': %d } }", n_size); qvirtio_wait_config_isr(dev, QVIRTIO_BLK_TIMEOUT_US); capacity = qvirtio_config_readq(dev, 0); @@ -502,9 +500,10 @@ static void msix(void *obj, void *u_data, QGuestAllocator *t_alloc) qvirtio_set_driver_ok(dev); - qmp_discard_response("{ 'execute': 'block_resize', " - " 'arguments': { 'device': 'drive0', " - " 'size': %d } }", n_size); + qtest_qmp_assert_success(global_qtest, + "{ 'execute': 'block_resize', " + " 'arguments': { 'device': 'drive0', " + " 'size': %d } }", n_size); qvirtio_wait_config_isr(dev, QVIRTIO_BLK_TIMEOUT_US); @@ -758,9 +757,10 @@ static void resize(void *obj, void *data, QGuestAllocator *t_alloc) vq = test_basic(dev, t_alloc); - qmp_discard_response("{ 'execute': 'block_resize', " - " 'arguments': { 'device': 'drive0', " - " 'size': %d } }", n_size); + qtest_qmp_assert_success(global_qtest, + "{ 'execute': 'block_resize', " + " 'arguments': { 'device': 'drive0', " + " 'size': %d } }", n_size); qvirtio_wait_queue_isr(qts, dev, vq, QVIRTIO_BLK_TIMEOUT_US); diff --git a/tests/qtest/virtio-ccw-test.c b/tests/qtest/virtio-ccw-test.c index 2de77bb6fe..f4f5858b84 100644 --- a/tests/qtest/virtio-ccw-test.c +++ b/tests/qtest/virtio-ccw-test.c @@ -17,12 +17,6 @@ #include "libqtest-single.h" #include "libqos/virtio.h" -static void virtio_balloon_nop(void) -{ - global_qtest = qtest_initf("-device virtio-balloon-ccw"); - qtest_end(); -} - static void virtconsole_nop(void) { global_qtest = qtest_initf("-device virtio-serial-ccw,id=vser0 " @@ -53,20 +47,6 @@ static void virtio_serial_hotplug(void) qtest_quit(qts); } -static void virtio_blk_nop(void) -{ - global_qtest = qtest_initf("-drive if=none,id=drv0,file=null-co://," - "file.read-zeroes=on,format=raw " - "-device virtio-blk-ccw,drive=drv0"); - qtest_end(); -} - -static void virtio_net_nop(void) -{ - global_qtest = qtest_initf("-device virtio-net-ccw"); - qtest_end(); -} - static void virtio_rng_nop(void) { global_qtest = qtest_initf("-device virtio-rng-ccw"); @@ -96,16 +76,19 @@ static void virtio_scsi_hotplug(void) int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); - qtest_add_func("/virtio/balloon/nop", virtio_balloon_nop); - qtest_add_func("/virtio/console/nop", virtconsole_nop); - qtest_add_func("/virtio/serialport/nop", virtserialport_nop); - qtest_add_func("/virtio/serial/nop", virtio_serial_nop); - qtest_add_func("/virtio/serial/hotplug", virtio_serial_hotplug); - qtest_add_func("/virtio/block/nop", virtio_blk_nop); - qtest_add_func("/virtio/net/nop", virtio_net_nop); - qtest_add_func("/virtio/rng/nop", virtio_rng_nop); - qtest_add_func("/virtio/scsi/nop", virtio_scsi_nop); - qtest_add_func("/virtio/scsi/hotplug", virtio_scsi_hotplug); + if (qtest_has_device("virtio-serial-ccw")) { + qtest_add_func("/virtio/console/nop", virtconsole_nop); + qtest_add_func("/virtio/serialport/nop", virtserialport_nop); + qtest_add_func("/virtio/serial/nop", virtio_serial_nop); + qtest_add_func("/virtio/serial/hotplug", virtio_serial_hotplug); + } + if (qtest_has_device("virtio-rng-ccw")) { + qtest_add_func("/virtio/rng/nop", virtio_rng_nop); + } + if (qtest_has_device("virtio-rng-ccw")) { + qtest_add_func("/virtio/scsi/nop", virtio_scsi_nop); + qtest_add_func("/virtio/scsi/hotplug", virtio_scsi_hotplug); + } return g_test_run(); } diff --git a/tests/qtest/vmgenid-test.c b/tests/qtest/vmgenid-test.c index efba76e716..324db08c7a 100644 --- a/tests/qtest/vmgenid-test.c +++ b/tests/qtest/vmgenid-test.c @@ -165,13 +165,18 @@ int main(int argc, char **argv) { int ret; + g_test_init(&argc, &argv, NULL); + + if (!qtest_has_accel("tcg") && !qtest_has_accel("kvm")) { + g_test_skip("No KVM or TCG accelerator available"); + return 0; + } + ret = boot_sector_init(disk); if (ret) { return ret; } - g_test_init(&argc, &argv, NULL); - qtest_add_func("/vmgenid/vmgenid/set-guid", vmgenid_set_guid_test); qtest_add_func("/vmgenid/vmgenid/set-guid-auto", diff --git a/tests/qtest/vnc-display-test.c b/tests/qtest/vnc-display-test.c index e52a4326ec..f8933b0761 100644 --- a/tests/qtest/vnc-display-test.c +++ b/tests/qtest/vnc-display-test.c @@ -19,7 +19,7 @@ typedef struct Test { GMainLoop *loop; } Test; -#if !defined(WIN32) && !defined(CONFIG_DARWIN) +#if !defined(CONFIG_DARWIN) static void on_vnc_error(VncConnection* self, const char* msg) @@ -38,10 +38,7 @@ static void on_vnc_auth_failure(VncConnection *self, static bool test_setup(Test *test) { -#ifdef WIN32 - g_test_skip("Not supported on Windows yet"); - return false; -#elif defined(CONFIG_DARWIN) +#if defined(CONFIG_DARWIN) g_test_skip("Broken on Darwin"); return false; #else @@ -59,7 +56,12 @@ test_setup(Test *test) g_signal_connect(test->conn, "vnc-auth-failure", G_CALLBACK(on_vnc_auth_failure), NULL); vnc_connection_set_auth_type(test->conn, VNC_CONNECTION_AUTH_NONE); + +#ifdef WIN32 + vnc_connection_open_fd(test->conn, _get_osfhandle(pair[0])); +#else vnc_connection_open_fd(test->conn, pair[0]); +#endif test->loop = g_main_loop_new(NULL, FALSE); return true; diff --git a/tests/requirements.txt b/tests/requirements.txt index 0ba561b6bd..0e008b9aec 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,6 +1,9 @@ # Add Python module requirements, one per line, to be installed -# in the tests/venv Python virtual environment. For more info, +# in the qemu build_dir/pyvenv Python virtual environment. For more info, # refer to: https://pip.pypa.io/en/stable/user_guide/#id1 -# Note that qemu.git/python/ is always implicitly installed. -avocado-framework==88.1 +# +# Note that qemu.git/python/ is implicitly installed to this venv when +# 'make check-venv' is run, and will persist until configure is run +# again. +avocado-framework==101.0 pycdlib==1.11.0 diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target index a3b0aaf8af..72876cc84e 100644 --- a/tests/tcg/Makefile.target +++ b/tests/tcg/Makefile.target @@ -152,13 +152,17 @@ PLUGINS=$(patsubst %.c, lib%.so, $(notdir $(wildcard $(PLUGIN_SRC)/*.c))) # We need to ensure expand the run-plugin-TEST-with-PLUGIN # pre-requistes manually here as we can't use stems to handle it. We -# also add some special helpers the run-plugin- rules can use bellow. +# only expand MULTIARCH_TESTS which are common on most of our targets +# to avoid an exponential explosion as new tests are added. We also +# add some special helpers the run-plugin- rules can use bellow. +ifneq ($(MULTIARCH_TESTS),) $(foreach p,$(PLUGINS), \ - $(foreach t,$(TESTS),\ + $(foreach t,$(MULTIARCH_TESTS),\ $(eval run-plugin-$(t)-with-$(p): $t $p) \ $(eval RUN_TESTS+=run-plugin-$(t)-with-$(p)))) -endif +endif # MULTIARCH_TESTS +endif # CONFIG_PLUGIN strip-plugin = $(wordlist 1, 1, $(subst -with-, ,$1)) extract-plugin = $(wordlist 2, 2, $(subst -with-, ,$1)) @@ -201,3 +205,10 @@ clean: distclean: rm -f config-cc.mak config-target.mak ../config-$(TARGET).mak + +.PHONY: help +help: + @echo "TCG tests help $(TARGET_NAME)" + @echo "Built with $(CC)" + @echo "Available tests:" + @$(foreach t,$(RUN_TESTS),echo " $t";) diff --git a/tests/tcg/aarch64/Makefile.softmmu-target b/tests/tcg/aarch64/Makefile.softmmu-target index df9747bae8..b74a2534e3 100644 --- a/tests/tcg/aarch64/Makefile.softmmu-target +++ b/tests/tcg/aarch64/Makefile.softmmu-target @@ -81,6 +81,4 @@ pauth-3: $(call skip-test, "BUILD of $@", "missing compiler support") run-pauth-3: $(call skip-test, "RUN of pauth-3", "not built") -run-plugin-pauth-3-with-%: - $(call skip-test, "RUN of pauth-3 ($*)", "not built") endif diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target index db122ab4ff..0315795487 100644 --- a/tests/tcg/aarch64/Makefile.target +++ b/tests/tcg/aarch64/Makefile.target @@ -32,7 +32,6 @@ ifneq ($(CROSS_CC_HAS_ARMV8_3),) AARCH64_TESTS += pauth-1 pauth-2 pauth-4 pauth-5 pauth-%: CFLAGS += -march=armv8.3-a run-pauth-%: QEMU_OPTS += -cpu max -run-plugin-pauth-%: QEMU_OPTS += -cpu max endif # BTI Tests @@ -81,7 +80,7 @@ sha512-vector: sha512.c TESTS += sha512-vector -ifneq ($(HAVE_GDB_BIN),) +ifeq ($(HOST_GDB_SUPPORTS_ARCH),y) GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py run-gdbstub-sysregs: sysregs diff --git a/tests/tcg/arm/Makefile.softmmu-target b/tests/tcg/arm/Makefile.softmmu-target index 7df88ddea8..8b546e2aa3 100644 --- a/tests/tcg/arm/Makefile.softmmu-target +++ b/tests/tcg/arm/Makefile.softmmu-target @@ -23,4 +23,6 @@ LDFLAGS+=-nostdlib -N -static test-armv6m-undef: EXTRA_CFLAGS+=-mcpu=cortex-m0 -mfloat-abi=soft run-test-armv6m-undef: QEMU_OPTS+=-semihosting -M microbit -kernel -run-plugin-test-armv6m-undef-%: QEMU_OPTS+=-semihosting -M microbit -kernel + +# We don't currently support the multiarch system tests +undefine MULTIARCH_TESTS diff --git a/tests/tcg/arm/Makefile.target b/tests/tcg/arm/Makefile.target index b3b1504a1c..0038cef02c 100644 --- a/tests/tcg/arm/Makefile.target +++ b/tests/tcg/arm/Makefile.target @@ -46,11 +46,6 @@ semihosting-arm: semihosting.c run-semihosting-arm: semihosting-arm $(call run-test,$<,$(QEMU) $< 2> $<.err) -run-plugin-semihosting-arm-with-%: - $(call run-test, $@, $(QEMU) $(QEMU_OPTS) \ - -plugin $(PLUGIN_LIB)/$(call extract-plugin,$@) \ - $(call strip-plugin,$<) 2> $<.err) - ARM_TESTS += semiconsole-arm semiconsole: CFLAGS += -mthumb @@ -62,9 +57,6 @@ semiconsole-arm: semihosting.c run-semiconsole-arm: semiconsole-arm $(call skip-test, $<, "MANUAL ONLY") -run-plugin-semiconsole-arm-with-%: - $(call skip-test, $<, "MANUAL ONLY") - endif ARM_TESTS += commpage diff --git a/tests/tcg/cris/Makefile.target b/tests/tcg/cris/Makefile.target index 372287bd03..43587d2769 100644 --- a/tests/tcg/cris/Makefile.target +++ b/tests/tcg/cris/Makefile.target @@ -57,3 +57,6 @@ SIMG:=cris-axis-linux-gnu-run # e.g.: make -f ../../tests/tcg/Makefile run-check_orm-on-sim run-%-on-sim: $(call run-test, $<, $(SIMG) $<) + +# We don't currently support the multiarch tests +undefine MULTIARCH_TESTS diff --git a/tests/tcg/hexagon/Makefile.target b/tests/tcg/hexagon/Makefile.target index 18e6a5969e..87ed2c90b9 100644 --- a/tests/tcg/hexagon/Makefile.target +++ b/tests/tcg/hexagon/Makefile.target @@ -1,5 +1,5 @@ ## -## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. +## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by @@ -45,6 +45,18 @@ HEX_TESTS += fpstuff HEX_TESTS += overflow HEX_TESTS += signal_context HEX_TESTS += reg_mut +HEX_TESTS += read_write_overlap +HEX_TESTS += vector_add_int +HEX_TESTS += scatter_gather +HEX_TESTS += hvx_misc +HEX_TESTS += hvx_histogram +HEX_TESTS += invalid-slots + +run-and-check-exception = $(call run-test,$2,$3 2>$2.stderr; \ + test $$? -eq 1 && grep -q "exception $(strip $1)" $2.stderr) + +run-invalid-slots: invalid-slots + $(call run-and-check-exception, 0x15, $@, $(QEMU) $(QEMU_OPTS) $<) HEX_TESTS += test_abs HEX_TESTS += test_bitcnt @@ -72,9 +84,47 @@ HEX_TESTS += test_vminh HEX_TESTS += test_vpmpyh HEX_TESTS += test_vspliceb +HEX_TESTS += v68_scalar +HEX_TESTS += v68_hvx +HEX_TESTS += v69_hvx +HEX_TESTS += v73_scalar + TESTS += $(HEX_TESTS) +atomics: atomics.c hex_test.h +brev: brev.c hex_test.h +circ: circ.c hex_test.h +dual_stores: dual_stores.c hex_test.h +fpstuff: fpstuff.c hex_test.h +hex_sigsegv: hex_sigsegv.c hex_test.h +load_align: load_align.c hex_test.h +load_unpack: load_unpack.c hex_test.h +mem_noshuf_exception: mem_noshuf_exception.c hex_test.h +mem_noshuf: mem_noshuf.c hex_test.h +misc: misc.c hex_test.h +multi_result: multi_result.c hex_test.h +overflow: overflow.c hex_test.h +preg_alias: preg_alias.c hex_test.h +read_write_overlap: read_write_overlap.c hex_test.h +reg_mut: reg_mut.c hex_test.h + # This test has to be compiled for the -mv67t target -usr: usr.c +usr: usr.c hex_test.h $(CC) $(CFLAGS) -mv67t -O2 -Wno-inline-asm -Wno-expansion-to-defined $< -o $@ $(LDFLAGS) +# Build this test with -mv71 to exercise the CABAC instruction +misc: misc.c + $(CC) $(CFLAGS) -mv71 -O2 $< -o $@ $(LDFLAGS) +scatter_gather: CFLAGS += -mhvx +vector_add_int: CFLAGS += -mhvx -fvectorize +hvx_misc: hvx_misc.c hvx_misc.h +hvx_misc: CFLAGS += -mhvx +hvx_histogram: CFLAGS += -mhvx -Wno-gnu-folding-constant +v68_hvx: v68_hvx.c hvx_misc.h v6mpy_ref.c.inc +v68_hvx: CFLAGS += -mhvx -Wno-unused-function +v69_hvx: v69_hvx.c hvx_misc.h +v69_hvx: CFLAGS += -mhvx -Wno-unused-function +v73_scalar: CFLAGS += -Wno-unused-function + +hvx_histogram: hvx_histogram.c hvx_histogram_row.S + $(CC) $(CFLAGS) $(CROSS_CC_GUEST_CFLAGS) $^ -o $@ $(LDFLAGS) diff --git a/tests/tcg/hexagon/atomics.c b/tests/tcg/hexagon/atomics.c index ff1ceee241..1c2169b28b 100644 --- a/tests/tcg/hexagon/atomics.c +++ b/tests/tcg/hexagon/atomics.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,15 +17,19 @@ #include #include +#include #include #include #include #include -/* Using volatile because we are testing atomics */ -static inline int atomic_inc32(volatile int *x) +int err; + +#include "hex_test.h" + +static inline int32_t atomic_inc32(int32_t *x) { - int old, dummy; + int32_t old, dummy; __asm__ __volatile__( "1: %0 = memw_locked(%2)\n\t" " %1 = add(%0, #1)\n\t" @@ -37,10 +41,9 @@ static inline int atomic_inc32(volatile int *x) return old; } -/* Using volatile because we are testing atomics */ -static inline long long atomic_inc64(volatile long long *x) +static inline int64_t atomic_inc64(int64_t *x) { - long long old, dummy; + int64_t old, dummy; __asm__ __volatile__( "1: %0 = memd_locked(%2)\n\t" " %1 = #1\n\t" @@ -53,10 +56,9 @@ static inline long long atomic_inc64(volatile long long *x) return old; } -/* Using volatile because we are testing atomics */ -static inline int atomic_dec32(volatile int *x) +static inline int32_t atomic_dec32(int32_t *x) { - int old, dummy; + int32_t old, dummy; __asm__ __volatile__( "1: %0 = memw_locked(%2)\n\t" " %1 = add(%0, #-1)\n\t" @@ -68,10 +70,9 @@ static inline int atomic_dec32(volatile int *x) return old; } -/* Using volatile because we are testing atomics */ -static inline long long atomic_dec64(volatile long long *x) +static inline int64_t atomic_dec64(int64_t *x) { - long long old, dummy; + int64_t old, dummy; __asm__ __volatile__( "1: %0 = memd_locked(%2)\n\t" " %1 = #-1\n\t" @@ -85,17 +86,12 @@ static inline long long atomic_dec64(volatile long long *x) } #define LOOP_CNT 1000 -/* Using volatile because we are testing atomics */ -volatile int tick32 = 1; -/* Using volatile because we are testing atomics */ -volatile long long tick64 = 1; -int err; +volatile int32_t tick32 = 1; /* Using volatile because we are testing atomics */ +volatile int64_t tick64 = 1; /* Using volatile because we are testing atomics */ void *thread1_func(void *arg) { - int i; - - for (i = 0; i < LOOP_CNT; i++) { + for (int i = 0; i < LOOP_CNT; i++) { atomic_inc32(&tick32); atomic_dec64(&tick64); } @@ -104,8 +100,7 @@ void *thread1_func(void *arg) void *thread2_func(void *arg) { - int i; - for (i = 0; i < LOOP_CNT; i++) { + for (int i = 0; i < LOOP_CNT; i++) { atomic_dec32(&tick32); atomic_inc64(&tick64); } @@ -121,14 +116,8 @@ void test_pthread(void) pthread_join(tid1, NULL); pthread_join(tid2, NULL); - if (tick32 != 1) { - printf("ERROR: tick32 %d != 1\n", tick32); - err++; - } - if (tick64 != 1) { - printf("ERROR: tick64 %lld != 1\n", tick64); - err++; - } + check32(tick32, 1); + check64(tick64, 1); } int main(int argc, char **argv) diff --git a/tests/tcg/hexagon/brev.c b/tests/tcg/hexagon/brev.c index 9736a2405d..6c7b134084 100644 --- a/tests/tcg/hexagon/brev.c +++ b/tests/tcg/hexagon/brev.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,16 +17,19 @@ #include #include +#include int err; +#include "hex_test.h" + #define NBITS 8 #define SIZE (1 << NBITS) -long long dbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; -int wbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; -short hbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; -unsigned char bbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; +int64_t dbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; +int32_t wbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; +int16_t hbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; +uint8_t bbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; /* * We use the C preporcessor to deal with the combinations of types @@ -90,11 +93,10 @@ unsigned char bbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; #define BREV_STORE_wnew(ADDR, VAL, INC) \ BREV_STORE_NEW(w, ADDR, VAL, INC) -int bitreverse(int x) +uint32_t bitreverse(uint32_t x) { - int result = 0; - int i; - for (i = 0; i < NBITS; i++) { + uint32_t result = 0; + for (int i = 0; i < NBITS; i++) { result <<= 1; result |= x & 1; x >>= 1; @@ -102,26 +104,18 @@ int bitreverse(int x) return result; } -int sext8(int x) +int32_t sext8(int32_t x) { return (x << 24) >> 24; } -void check(int i, long long result, long long expect) -{ - if (result != expect) { - printf("ERROR(%d): 0x%04llx != 0x%04llx\n", i, result, expect); - err++; - } -} - #define TEST_BREV_LOAD(SZ, TYPE, BUF, SHIFT, EXP) \ do { \ p = BUF; \ - for (i = 0; i < SIZE; i++) { \ + for (int i = 0; i < SIZE; i++) { \ TYPE result; \ BREV_LOAD_##SZ(result, p, 1 << (SHIFT - NBITS)); \ - check(i, result, EXP); \ + check32(result, EXP); \ } \ } while (0) @@ -129,11 +123,11 @@ void check(int i, long long result, long long expect) do { \ p = BUF; \ memset(BUF, 0xff, sizeof(BUF)); \ - for (i = 0; i < SIZE; i++) { \ + for (int i = 0; i < SIZE; i++) { \ BREV_STORE_##SZ(p, (TYPE)(VAL), 1 << (SHIFT - NBITS)); \ } \ - for (i = 0; i < SIZE; i++) { \ - check(i, BUF[i], bitreverse(i)); \ + for (int i = 0; i < SIZE; i++) { \ + check32(BUF[i], bitreverse(i)); \ } \ } while (0) @@ -141,11 +135,11 @@ void check(int i, long long result, long long expect) do { \ p = BUF; \ memset(BUF, 0xff, sizeof(BUF)); \ - for (i = 0; i < SIZE; i++) { \ + for (int i = 0; i < SIZE; i++) { \ BREV_STORE_##SZ(p, i, 1 << (SHIFT - NBITS)); \ } \ - for (i = 0; i < SIZE; i++) { \ - check(i, BUF[i], bitreverse(i)); \ + for (int i = 0; i < SIZE; i++) { \ + check32(BUF[i], bitreverse(i)); \ } \ } while (0) @@ -158,9 +152,8 @@ int high_half[SIZE]; int main() { void *p; - int i; - for (i = 0; i < SIZE; i++) { + for (int i = 0; i < SIZE; i++) { bbuf[i] = bitreverse(i); hbuf[i] = bitreverse(i); wbuf[i] = bitreverse(i); @@ -168,18 +161,18 @@ int main() high_half[i] = i << 16; } - TEST_BREV_LOAD(b, int, bbuf, 16, sext8(i)); - TEST_BREV_LOAD(ub, int, bbuf, 16, i); - TEST_BREV_LOAD(h, int, hbuf, 15, i); - TEST_BREV_LOAD(uh, int, hbuf, 15, i); - TEST_BREV_LOAD(w, int, wbuf, 14, i); - TEST_BREV_LOAD(d, long long, dbuf, 13, i); + TEST_BREV_LOAD(b, int32_t, bbuf, 16, sext8(i)); + TEST_BREV_LOAD(ub, int32_t, bbuf, 16, i); + TEST_BREV_LOAD(h, int32_t, hbuf, 15, i); + TEST_BREV_LOAD(uh, int32_t, hbuf, 15, i); + TEST_BREV_LOAD(w, int32_t, wbuf, 14, i); + TEST_BREV_LOAD(d, int64_t, dbuf, 13, i); - TEST_BREV_STORE(b, int, bbuf, i, 16); - TEST_BREV_STORE(h, int, hbuf, i, 15); - TEST_BREV_STORE(f, int, hbuf, high_half[i], 15); - TEST_BREV_STORE(w, int, wbuf, i, 14); - TEST_BREV_STORE(d, long long, dbuf, i, 13); + TEST_BREV_STORE(b, int32_t, bbuf, i, 16); + TEST_BREV_STORE(h, int32_t, hbuf, i, 15); + TEST_BREV_STORE(f, int32_t, hbuf, high_half[i], 15); + TEST_BREV_STORE(w, int32_t, wbuf, i, 14); + TEST_BREV_STORE(d, int64_t, dbuf, i, 13); TEST_BREV_STORE_NEW(bnew, bbuf, 16); TEST_BREV_STORE_NEW(hnew, hbuf, 15); diff --git a/tests/tcg/hexagon/circ.c b/tests/tcg/hexagon/circ.c index 354416eb6d..ab949ebef1 100644 --- a/tests/tcg/hexagon/circ.c +++ b/tests/tcg/hexagon/circ.c @@ -16,6 +16,11 @@ */ #include +#include + +int err; + +#include "hex_test.h" #define DEBUG 0 #define DEBUG_PRINTF(...) \ @@ -31,10 +36,10 @@ #define NWORDS (NBYTES / sizeof(int)) #define NDOBLS (NBYTES / sizeof(long long)) -long long dbuf[NDOBLS] __attribute__((aligned(1 << 12))) = {0}; -int wbuf[NWORDS] __attribute__((aligned(1 << 12))) = {0}; -short hbuf[NHALFS] __attribute__((aligned(1 << 12))) = {0}; -unsigned char bbuf[NBYTES] __attribute__((aligned(1 << 12))) = {0}; +int64_t dbuf[NDOBLS] __attribute__((aligned(1 << 12))) = {0}; +int32_t wbuf[NWORDS] __attribute__((aligned(1 << 12))) = {0}; +int16_t hbuf[NHALFS] __attribute__((aligned(1 << 12))) = {0}; +uint8_t bbuf[NBYTES] __attribute__((aligned(1 << 12))) = {0}; /* * We use the C preporcessor to deal with the combinations of types @@ -43,8 +48,7 @@ unsigned char bbuf[NBYTES] __attribute__((aligned(1 << 12))) = {0}; #define INIT(BUF, N) \ void init_##BUF(void) \ { \ - int i; \ - for (i = 0; i < N; i++) { \ + for (int i = 0; i < N; i++) { \ BUF[i] = i; \ } \ } \ @@ -91,7 +95,7 @@ INIT(dbuf, NDOBLS) * mreg[23:17] increment[6:0] * mreg[16:0] circular buffer length */ -static int build_mreg(int inc, int K, int len) +static int32_t build_mreg(int32_t inc, int32_t K, int32_t len) { return ((inc & 0x780) << 21) | ((K & 0xf) << 24) | @@ -213,31 +217,27 @@ static int build_mreg(int inc, int K, int len) CIRC_STORE_NEW_REG(w, VAL, ADDR, START, LEN, INC) -int err; - /* We'll test increments +1 and -1 */ -void check_load(int i, long long result, int inc, int size) +void __check_load(int line, int32_t i, int64_t res, int32_t inc, int32_t size) { - int expect = (i * inc); + int32_t expect = (i * inc); while (expect >= size) { expect -= size; } while (expect < 0) { expect += size; } - if (result != expect) { - printf("ERROR(%d): %lld != %d\n", i, result, expect); - err++; - } + __check32(line, res, expect); } +#define check_load(I, RES, INC, SZ) __check_load(__LINE__, I, RES, INC, SZ) + #define TEST_LOAD_IMM(SZ, TYPE, BUF, BUFSIZE, INC, FMT) \ void circ_test_load_imm_##SZ(void) \ { \ TYPE *p = (TYPE *)BUF; \ - int size = 10; \ - int i; \ - for (i = 0; i < BUFSIZE; i++) { \ + int32_t size = 10; \ + for (int i = 0; i < BUFSIZE; i++) { \ TYPE element; \ CIRC_LOAD_IMM_##SZ(element, p, BUF, size * sizeof(TYPE), (INC)); \ DEBUG_PRINTF("i = %2d, p = 0x%p, element = %2" #FMT "\n", \ @@ -245,7 +245,7 @@ void circ_test_load_imm_##SZ(void) \ check_load(i, element, ((INC) / (int)sizeof(TYPE)), size); \ } \ p = (TYPE *)BUF; \ - for (i = 0; i < BUFSIZE; i++) { \ + for (int i = 0; i < BUFSIZE; i++) { \ TYPE element; \ CIRC_LOAD_IMM_##SZ(element, p, BUF, size * sizeof(TYPE), -(INC)); \ DEBUG_PRINTF("i = %2d, p = 0x%p, element = %2" #FMT "\n", \ @@ -254,20 +254,19 @@ void circ_test_load_imm_##SZ(void) \ } \ } -TEST_LOAD_IMM(b, char, bbuf, NBYTES, 1, d) -TEST_LOAD_IMM(ub, unsigned char, bbuf, NBYTES, 1, d) -TEST_LOAD_IMM(h, short, hbuf, NHALFS, 2, d) -TEST_LOAD_IMM(uh, unsigned short, hbuf, NHALFS, 2, d) -TEST_LOAD_IMM(w, int, wbuf, NWORDS, 4, d) -TEST_LOAD_IMM(d, long long, dbuf, NDOBLS, 8, lld) +TEST_LOAD_IMM(b, int8_t, bbuf, NBYTES, 1, d) +TEST_LOAD_IMM(ub, uint8_t, bbuf, NBYTES, 1, d) +TEST_LOAD_IMM(h, int16_t, hbuf, NHALFS, 2, d) +TEST_LOAD_IMM(uh, uint16_t, hbuf, NHALFS, 2, d) +TEST_LOAD_IMM(w, int32_t, wbuf, NWORDS, 4, d) +TEST_LOAD_IMM(d, int64_t, dbuf, NDOBLS, 8, lld) #define TEST_LOAD_REG(SZ, TYPE, BUF, BUFSIZE, FMT) \ void circ_test_load_reg_##SZ(void) \ { \ TYPE *p = (TYPE *)BUF; \ - int size = 13; \ - int i; \ - for (i = 0; i < BUFSIZE; i++) { \ + int32_t size = 13; \ + for (int i = 0; i < BUFSIZE; i++) { \ TYPE element; \ CIRC_LOAD_REG_##SZ(element, p, BUF, size * sizeof(TYPE), 1); \ DEBUG_PRINTF("i = %2d, p = 0x%p, element = %2" #FMT "\n", \ @@ -275,7 +274,7 @@ void circ_test_load_reg_##SZ(void) \ check_load(i, element, 1, size); \ } \ p = (TYPE *)BUF; \ - for (i = 0; i < BUFSIZE; i++) { \ + for (int i = 0; i < BUFSIZE; i++) { \ TYPE element; \ CIRC_LOAD_REG_##SZ(element, p, BUF, size * sizeof(TYPE), -1); \ DEBUG_PRINTF("i = %2d, p = 0x%p, element = %2" #FMT "\n", \ @@ -284,16 +283,16 @@ void circ_test_load_reg_##SZ(void) \ } \ } -TEST_LOAD_REG(b, char, bbuf, NBYTES, d) -TEST_LOAD_REG(ub, unsigned char, bbuf, NBYTES, d) -TEST_LOAD_REG(h, short, hbuf, NHALFS, d) -TEST_LOAD_REG(uh, unsigned short, hbuf, NHALFS, d) -TEST_LOAD_REG(w, int, wbuf, NWORDS, d) -TEST_LOAD_REG(d, long long, dbuf, NDOBLS, lld) +TEST_LOAD_REG(b, int8_t, bbuf, NBYTES, d) +TEST_LOAD_REG(ub, uint8_t, bbuf, NBYTES, d) +TEST_LOAD_REG(h, int16_t, hbuf, NHALFS, d) +TEST_LOAD_REG(uh, uint16_t, hbuf, NHALFS, d) +TEST_LOAD_REG(w, int32_t, wbuf, NWORDS, d) +TEST_LOAD_REG(d, int64_t, dbuf, NDOBLS, lld) /* The circular stores will wrap around somewhere inside the buffer */ #define CIRC_VAL(SZ, TYPE, BUFSIZE) \ -TYPE circ_val_##SZ(int i, int inc, int size) \ +TYPE circ_val_##SZ(int i, int32_t inc, int32_t size) \ { \ int mod = BUFSIZE % size; \ int elem = i * inc; \ @@ -310,33 +309,25 @@ TYPE circ_val_##SZ(int i, int inc, int size) \ } \ } -CIRC_VAL(b, unsigned char, NBYTES) -CIRC_VAL(h, short, NHALFS) -CIRC_VAL(w, int, NWORDS) -CIRC_VAL(d, long long, NDOBLS) +CIRC_VAL(b, uint8_t, NBYTES) +CIRC_VAL(h, int16_t, NHALFS) +CIRC_VAL(w, int32_t, NWORDS) +CIRC_VAL(d, int64_t, NDOBLS) /* * Circular stores should only write to the first "size" elements of the buffer * the remainder of the elements should have BUF[i] == i */ #define CHECK_STORE(SZ, BUF, BUFSIZE, FMT) \ -void check_store_##SZ(int inc, int size) \ +void check_store_##SZ(int32_t inc, int32_t size) \ { \ - int i; \ - for (i = 0; i < size; i++) { \ + for (int i = 0; i < size; i++) { \ DEBUG_PRINTF(#BUF "[%3d] = 0x%02" #FMT ", guess = 0x%02" #FMT "\n", \ i, BUF[i], circ_val_##SZ(i, inc, size)); \ - if (BUF[i] != circ_val_##SZ(i, inc, size)) { \ - printf("ERROR(%3d): 0x%02" #FMT " != 0x%02" #FMT "\n", \ - i, BUF[i], circ_val_##SZ(i, inc, size)); \ - err++; \ - } \ + check64(BUF[i], circ_val_##SZ(i, inc, size)); \ } \ - for (i = size; i < BUFSIZE; i++) { \ - if (BUF[i] != i) { \ - printf("ERROR(%3d): 0x%02" #FMT " != 0x%02x\n", i, BUF[i], i); \ - err++; \ - } \ + for (int i = size; i < BUFSIZE; i++) { \ + check64(BUF[i], i); \ } \ } @@ -348,12 +339,11 @@ CHECK_STORE(d, dbuf, NDOBLS, llx) #define CIRC_TEST_STORE_IMM(SZ, CHK, TYPE, BUF, BUFSIZE, SHIFT, INC) \ void circ_test_store_imm_##SZ(void) \ { \ - unsigned int size = 27; \ + uint32_t size = 27; \ TYPE *p = BUF; \ TYPE val = 0; \ - int i; \ init_##BUF(); \ - for (i = 0; i < BUFSIZE; i++) { \ + for (int i = 0; i < BUFSIZE; i++) { \ CIRC_STORE_IMM_##SZ(val << SHIFT, p, BUF, size * sizeof(TYPE), INC); \ val++; \ } \ @@ -361,7 +351,7 @@ void circ_test_store_imm_##SZ(void) \ p = BUF; \ val = 0; \ init_##BUF(); \ - for (i = 0; i < BUFSIZE; i++) { \ + for (int i = 0; i < BUFSIZE; i++) { \ CIRC_STORE_IMM_##SZ(val << SHIFT, p, BUF, size * sizeof(TYPE), \ -(INC)); \ val++; \ @@ -369,24 +359,23 @@ void circ_test_store_imm_##SZ(void) \ check_store_##CHK((-(INC) / (int)sizeof(TYPE)), size); \ } -CIRC_TEST_STORE_IMM(b, b, unsigned char, bbuf, NBYTES, 0, 1) -CIRC_TEST_STORE_IMM(h, h, short, hbuf, NHALFS, 0, 2) -CIRC_TEST_STORE_IMM(f, h, short, hbuf, NHALFS, 16, 2) -CIRC_TEST_STORE_IMM(w, w, int, wbuf, NWORDS, 0, 4) -CIRC_TEST_STORE_IMM(d, d, long long, dbuf, NDOBLS, 0, 8) -CIRC_TEST_STORE_IMM(bnew, b, unsigned char, bbuf, NBYTES, 0, 1) -CIRC_TEST_STORE_IMM(hnew, h, short, hbuf, NHALFS, 0, 2) -CIRC_TEST_STORE_IMM(wnew, w, int, wbuf, NWORDS, 0, 4) +CIRC_TEST_STORE_IMM(b, b, uint8_t, bbuf, NBYTES, 0, 1) +CIRC_TEST_STORE_IMM(h, h, int16_t, hbuf, NHALFS, 0, 2) +CIRC_TEST_STORE_IMM(f, h, int16_t, hbuf, NHALFS, 16, 2) +CIRC_TEST_STORE_IMM(w, w, int32_t, wbuf, NWORDS, 0, 4) +CIRC_TEST_STORE_IMM(d, d, int64_t, dbuf, NDOBLS, 0, 8) +CIRC_TEST_STORE_IMM(bnew, b, uint8_t, bbuf, NBYTES, 0, 1) +CIRC_TEST_STORE_IMM(hnew, h, int16_t, hbuf, NHALFS, 0, 2) +CIRC_TEST_STORE_IMM(wnew, w, int32_t, wbuf, NWORDS, 0, 4) #define CIRC_TEST_STORE_REG(SZ, CHK, TYPE, BUF, BUFSIZE, SHIFT) \ void circ_test_store_reg_##SZ(void) \ { \ TYPE *p = BUF; \ - unsigned int size = 19; \ + uint32_t size = 19; \ TYPE val = 0; \ - int i; \ init_##BUF(); \ - for (i = 0; i < BUFSIZE; i++) { \ + for (int i = 0; i < BUFSIZE; i++) { \ CIRC_STORE_REG_##SZ(val << SHIFT, p, BUF, size * sizeof(TYPE), 1); \ val++; \ } \ @@ -394,35 +383,34 @@ void circ_test_store_reg_##SZ(void) \ p = BUF; \ val = 0; \ init_##BUF(); \ - for (i = 0; i < BUFSIZE; i++) { \ + for (int i = 0; i < BUFSIZE; i++) { \ CIRC_STORE_REG_##SZ(val << SHIFT, p, BUF, size * sizeof(TYPE), -1); \ val++; \ } \ check_store_##CHK(-1, size); \ } -CIRC_TEST_STORE_REG(b, b, unsigned char, bbuf, NBYTES, 0) -CIRC_TEST_STORE_REG(h, h, short, hbuf, NHALFS, 0) -CIRC_TEST_STORE_REG(f, h, short, hbuf, NHALFS, 16) -CIRC_TEST_STORE_REG(w, w, int, wbuf, NWORDS, 0) -CIRC_TEST_STORE_REG(d, d, long long, dbuf, NDOBLS, 0) -CIRC_TEST_STORE_REG(bnew, b, unsigned char, bbuf, NBYTES, 0) -CIRC_TEST_STORE_REG(hnew, h, short, hbuf, NHALFS, 0) -CIRC_TEST_STORE_REG(wnew, w, int, wbuf, NWORDS, 0) +CIRC_TEST_STORE_REG(b, b, uint8_t, bbuf, NBYTES, 0) +CIRC_TEST_STORE_REG(h, h, int16_t, hbuf, NHALFS, 0) +CIRC_TEST_STORE_REG(f, h, int16_t, hbuf, NHALFS, 16) +CIRC_TEST_STORE_REG(w, w, int32_t, wbuf, NWORDS, 0) +CIRC_TEST_STORE_REG(d, d, int64_t, dbuf, NDOBLS, 0) +CIRC_TEST_STORE_REG(bnew, b, uint8_t, bbuf, NBYTES, 0) +CIRC_TEST_STORE_REG(hnew, h, int16_t, hbuf, NHALFS, 0) +CIRC_TEST_STORE_REG(wnew, w, int32_t, wbuf, NWORDS, 0) /* Test the old scheme used in Hexagon V3 */ static void circ_test_v3(void) { int *p = wbuf; - int size = 15; + int32_t size = 15; /* set high bit in K to test unsigned extract in fcirc */ - int K = 8; /* 1024 bytes */ - int element; - int i; + int32_t K = 8; /* 1024 bytes */ + int32_t element; init_wbuf(); - for (i = 0; i < NWORDS; i++) { + for (int i = 0; i < NWORDS; i++) { __asm__( "r4 = %2\n\t" "m1 = r4\n\t" diff --git a/tests/tcg/hexagon/dual_stores.c b/tests/tcg/hexagon/dual_stores.c index a86a381ab9..775458e0fc 100644 --- a/tests/tcg/hexagon/dual_stores.c +++ b/tests/tcg/hexagon/dual_stores.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,13 +16,18 @@ */ #include +#include + +int err; + +#include "hex_test.h" /* * Make sure that two stores in the same packet honor proper * semantics: slot 1 executes first, then slot 0. * This is important when the addresses overlap. */ -static inline void dual_stores(int *p, char *q, int x, char y) +static inline void dual_stores(int32_t *p, int8_t *q, int32_t x, int8_t y) { asm volatile("{\n\t" " memw(%0) = %2\n\t" @@ -33,27 +38,17 @@ static inline void dual_stores(int *p, char *q, int x, char y) } typedef union { - int word; - char byte; + int32_t word; + int8_t byte; } Dual; -int err; - -static void check(Dual d, int expect) -{ - if (d.word != expect) { - printf("ERROR: 0x%08x != 0x%08x\n", d.word, expect); - err++; - } -} - int main() { Dual d; d.word = ~0; dual_stores(&d.word, &d.byte, 0x12345678, 0xff); - check(d, 0x123456ff); + check32(d.word, 0x123456ff); puts(err ? "FAIL" : "PASS"); return err; diff --git a/tests/tcg/hexagon/fpstuff.c b/tests/tcg/hexagon/fpstuff.c index 56bf562a40..344b9f7772 100644 --- a/tests/tcg/hexagon/fpstuff.c +++ b/tests/tcg/hexagon/fpstuff.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2020-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2020-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -20,89 +20,44 @@ */ #include - -const int FPINVF_BIT = 1; /* Invalid */ -const int FPINVF = 1 << FPINVF_BIT; -const int FPDBZF_BIT = 2; /* Divide by zero */ -const int FPDBZF = 1 << FPDBZF_BIT; -const int FPOVFF_BIT = 3; /* Overflow */ -const int FPOVFF = 1 << FPOVFF_BIT; -const int FPUNFF_BIT = 4; /* Underflow */ -const int FPUNFF = 1 << FPUNFF_BIT; -const int FPINPF_BIT = 5; /* Inexact */ -const int FPINPF = 1 << FPINPF_BIT; - -const int SF_ZERO = 0x00000000; -const int SF_NaN = 0x7fc00000; -const int SF_NaN_special = 0x7f800001; -const int SF_ANY = 0x3f800000; -const int SF_HEX_NAN = 0xffffffff; -const int SF_small_neg = 0xab98fba8; -const int SF_denorm = 0x00000001; -const int SF_random = 0x346001d6; - -const long long DF_QNaN = 0x7ff8000000000000ULL; -const long long DF_SNaN = 0x7ff7000000000000ULL; -const long long DF_ANY = 0x3f80000000000000ULL; -const long long DF_HEX_NAN = 0xffffffffffffffffULL; -const long long DF_small_neg = 0xbd731f7500000000ULL; +#include +#include int err; -#define CLEAR_FPSTATUS \ - "r2 = usr\n\t" \ - "r2 = clrbit(r2, #1)\n\t" \ - "r2 = clrbit(r2, #2)\n\t" \ - "r2 = clrbit(r2, #3)\n\t" \ - "r2 = clrbit(r2, #4)\n\t" \ - "r2 = clrbit(r2, #5)\n\t" \ - "usr = r2\n\t" +#include "hex_test.h" -static void check_fpstatus_bit(int usr, int expect, int flag, const char *n) +static void check_fpstatus_bit(uint32_t usr, uint32_t expect, uint32_t flag, + const char *name) { - int bit = 1 << flag; + uint32_t bit = 1 << flag; if ((usr & bit) != (expect & bit)) { - printf("ERROR %s: usr = %d, expect = %d\n", n, + printf("ERROR %s: usr = %d, expect = %d\n", name, (usr >> flag) & 1, (expect >> flag) & 1); err++; } } -static void check_fpstatus(int usr, int expect) +static void check_fpstatus(uint32_t usr, uint32_t expect) { - check_fpstatus_bit(usr, expect, FPINVF_BIT, "Invalid"); - check_fpstatus_bit(usr, expect, FPDBZF_BIT, "Div by zero"); - check_fpstatus_bit(usr, expect, FPOVFF_BIT, "Overflow"); - check_fpstatus_bit(usr, expect, FPUNFF_BIT, "Underflow"); - check_fpstatus_bit(usr, expect, FPINPF_BIT, "Inexact"); -} - -static void check32(int val, int expect) -{ - if (val != expect) { - printf("ERROR: 0x%x != 0x%x\n", val, expect); - err++; - } -} -static void check64(unsigned long long val, unsigned long long expect) -{ - if (val != expect) { - printf("ERROR: 0x%llx != 0x%llx\n", val, expect); - err++; - } + check_fpstatus_bit(usr, expect, USR_FPINVF_BIT, "Invalid"); + check_fpstatus_bit(usr, expect, USR_FPDBZF_BIT, "Div by zero"); + check_fpstatus_bit(usr, expect, USR_FPOVFF_BIT, "Overflow"); + check_fpstatus_bit(usr, expect, USR_FPUNFF_BIT, "Underflow"); + check_fpstatus_bit(usr, expect, USR_FPINPF_BIT, "Inexact"); } static void check_compare_exception(void) { - int cmp; - int usr; + uint32_t cmp; + uint32_t usr; /* Check that FP compares are quiet (don't raise any execptions) */ asm (CLEAR_FPSTATUS "p0 = sfcmp.eq(%2, %3)\n\t" "%0 = p0\n\t" "%1 = usr\n\t" - : "=r"(cmp), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "=r"(cmp), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "p0", "usr"); check32(cmp, 0); check_fpstatus(usr, 0); @@ -111,7 +66,7 @@ static void check_compare_exception(void) "p0 = sfcmp.gt(%2, %3)\n\t" "%0 = p0\n\t" "%1 = usr\n\t" - : "=r"(cmp), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "=r"(cmp), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "p0", "usr"); check32(cmp, 0); check_fpstatus(usr, 0); @@ -120,7 +75,7 @@ static void check_compare_exception(void) "p0 = sfcmp.ge(%2, %3)\n\t" "%0 = p0\n\t" "%1 = usr\n\t" - : "=r"(cmp), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "=r"(cmp), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "p0", "usr"); check32(cmp, 0); check_fpstatus(usr, 0); @@ -129,7 +84,7 @@ static void check_compare_exception(void) "p0 = dfcmp.eq(%2, %3)\n\t" "%0 = p0\n\t" "%1 = usr\n\t" - : "=r"(cmp), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) + : "=r"(cmp), "=r"(usr) : "r"(DF_QNaN), "r"(DF_any) : "r2", "p0", "usr"); check32(cmp, 0); check_fpstatus(usr, 0); @@ -138,7 +93,7 @@ static void check_compare_exception(void) "p0 = dfcmp.gt(%2, %3)\n\t" "%0 = p0\n\t" "%1 = usr\n\t" - : "=r"(cmp), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) + : "=r"(cmp), "=r"(usr) : "r"(DF_QNaN), "r"(DF_any) : "r2", "p0", "usr"); check32(cmp, 0); check_fpstatus(usr, 0); @@ -147,7 +102,7 @@ static void check_compare_exception(void) "p0 = dfcmp.ge(%2, %3)\n\t" "%0 = p0\n\t" "%1 = usr\n\t" - : "=r"(cmp), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) + : "=r"(cmp), "=r"(usr) : "r"(DF_QNaN), "r"(DF_any) : "r2", "p0", "usr"); check32(cmp, 0); check_fpstatus(usr, 0); @@ -155,8 +110,8 @@ static void check_compare_exception(void) static void check_sfminmax(void) { - int minmax; - int usr; + uint32_t minmax; + uint32_t usr; /* * Execute sfmin/sfmax instructions with one operand as NaN @@ -167,46 +122,46 @@ static void check_sfminmax(void) asm (CLEAR_FPSTATUS "%0 = sfmin(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "=r"(minmax), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "usr"); - check64(minmax, SF_ANY); + check32(minmax, SF_any); check_fpstatus(usr, 0); asm (CLEAR_FPSTATUS "%0 = sfmax(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "=r"(minmax), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "usr"); - check64(minmax, SF_ANY); + check32(minmax, SF_any); check_fpstatus(usr, 0); /* * Execute sfmin/sfmax instructions with both operands NaN * Check that - * Result is SF_HEX_NAN + * Result is SF_HEX_NaN * Invalid bit in USR is set */ asm (CLEAR_FPSTATUS "%0 = sfmin(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(SF_NaN), "r"(SF_NaN) + : "=r"(minmax), "=r"(usr) : "r"(SF_QNaN), "r"(SF_QNaN) : "r2", "usr"); - check64(minmax, SF_HEX_NAN); + check32(minmax, SF_HEX_NaN); check_fpstatus(usr, 0); asm (CLEAR_FPSTATUS "%0 = sfmax(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(SF_NaN), "r"(SF_NaN) + : "=r"(minmax), "=r"(usr) : "r"(SF_QNaN), "r"(SF_QNaN) : "r2", "usr"); - check64(minmax, SF_HEX_NAN); + check32(minmax, SF_HEX_NaN); check_fpstatus(usr, 0); } static void check_dfminmax(void) { - unsigned long long minmax; - int usr; + uint64_t minmax; + uint32_t usr; /* * Execute dfmin/dfmax instructions with one operand as SNaN @@ -217,18 +172,18 @@ static void check_dfminmax(void) asm (CLEAR_FPSTATUS "%0 = dfmin(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(DF_SNaN), "r"(DF_ANY) + : "=r"(minmax), "=r"(usr) : "r"(DF_SNaN), "r"(DF_any) : "r2", "usr"); - check64(minmax, DF_ANY); - check_fpstatus(usr, FPINVF); + check64(minmax, DF_any); + check_fpstatus(usr, USR_FPINVF); asm (CLEAR_FPSTATUS "%0 = dfmax(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(DF_SNaN), "r"(DF_ANY) + : "=r"(minmax), "=r"(usr) : "r"(DF_SNaN), "r"(DF_any) : "r2", "usr"); - check64(minmax, DF_ANY); - check_fpstatus(usr, FPINVF); + check64(minmax, DF_any); + check_fpstatus(usr, USR_FPINVF); /* * Execute dfmin/dfmax instructions with one operand as QNaN @@ -239,23 +194,23 @@ static void check_dfminmax(void) asm (CLEAR_FPSTATUS "%0 = dfmin(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) + : "=r"(minmax), "=r"(usr) : "r"(DF_QNaN), "r"(DF_any) : "r2", "usr"); - check64(minmax, DF_ANY); + check64(minmax, DF_any); check_fpstatus(usr, 0); asm (CLEAR_FPSTATUS "%0 = dfmax(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(minmax), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) + : "=r"(minmax), "=r"(usr) : "r"(DF_QNaN), "r"(DF_any) : "r2", "usr"); - check64(minmax, DF_ANY); + check64(minmax, DF_any); check_fpstatus(usr, 0); /* * Execute dfmin/dfmax instructions with both operands SNaN * Check that - * Result is DF_HEX_NAN + * Result is DF_HEX_NaN * Invalid bit in USR is set */ asm (CLEAR_FPSTATUS @@ -263,21 +218,21 @@ static void check_dfminmax(void) "%1 = usr\n\t" : "=r"(minmax), "=r"(usr) : "r"(DF_SNaN), "r"(DF_SNaN) : "r2", "usr"); - check64(minmax, DF_HEX_NAN); - check_fpstatus(usr, FPINVF); + check64(minmax, DF_HEX_NaN); + check_fpstatus(usr, USR_FPINVF); asm (CLEAR_FPSTATUS "%0 = dfmax(%2, %3)\n\t" "%1 = usr\n\t" : "=r"(minmax), "=r"(usr) : "r"(DF_SNaN), "r"(DF_SNaN) : "r2", "usr"); - check64(minmax, DF_HEX_NAN); - check_fpstatus(usr, FPINVF); + check64(minmax, DF_HEX_NaN); + check_fpstatus(usr, USR_FPINVF); /* * Execute dfmin/dfmax instructions with both operands QNaN * Check that - * Result is DF_HEX_NAN + * Result is DF_HEX_NaN * No bit in USR is set */ asm (CLEAR_FPSTATUS @@ -285,7 +240,7 @@ static void check_dfminmax(void) "%1 = usr\n\t" : "=r"(minmax), "=r"(usr) : "r"(DF_QNaN), "r"(DF_QNaN) : "r2", "usr"); - check64(minmax, DF_HEX_NAN); + check64(minmax, DF_HEX_NaN); check_fpstatus(usr, 0); asm (CLEAR_FPSTATUS @@ -293,15 +248,15 @@ static void check_dfminmax(void) "%1 = usr\n\t" : "=r"(minmax), "=r"(usr) : "r"(DF_QNaN), "r"(DF_QNaN) : "r2", "usr"); - check64(minmax, DF_HEX_NAN); + check64(minmax, DF_HEX_NaN); check_fpstatus(usr, 0); } static void check_sfrecipa(void) { - int result; - int usr; - int pred; + uint32_t result; + uint32_t usr; + uint32_t pred; /* * Check that sfrecipa doesn't set status bits when @@ -310,25 +265,25 @@ static void check_sfrecipa(void) asm (CLEAR_FPSTATUS "%0,p0 = sfrecipa(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(result), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "=r"(result), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "p0", "usr"); - check32(result, SF_HEX_NAN); + check32(result, SF_HEX_NaN); check_fpstatus(usr, 0); asm (CLEAR_FPSTATUS "%0,p0 = sfrecipa(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(result), "=r"(usr) : "r"(SF_ANY), "r"(SF_NaN) + : "=r"(result), "=r"(usr) : "r"(SF_any), "r"(SF_QNaN) : "r2", "p0", "usr"); - check32(result, SF_HEX_NAN); + check32(result, SF_HEX_NaN); check_fpstatus(usr, 0); asm (CLEAR_FPSTATUS "%0,p0 = sfrecipa(%2, %2)\n\t" "%1 = usr\n\t" - : "=r"(result), "=r"(usr) : "r"(SF_NaN) + : "=r"(result), "=r"(usr) : "r"(SF_QNaN) : "r2", "p0", "usr"); - check32(result, SF_HEX_NAN); + check32(result, SF_HEX_NaN); check_fpstatus(usr, 0); /* @@ -338,26 +293,26 @@ static void check_sfrecipa(void) asm (CLEAR_FPSTATUS "%0,p0 = sfrecipa(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(result), "=r"(usr) : "r"(SF_NaN_special), "r"(SF_ANY) + : "=r"(result), "=r"(usr) : "r"(SF_QNaN_special), "r"(SF_any) : "r2", "p0", "usr"); - check32(result, SF_HEX_NAN); - check_fpstatus(usr, FPINVF); + check32(result, SF_HEX_NaN); + check_fpstatus(usr, USR_FPINVF); asm (CLEAR_FPSTATUS "%0,p0 = sfrecipa(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(result), "=r"(usr) : "r"(SF_ANY), "r"(SF_NaN_special) + : "=r"(result), "=r"(usr) : "r"(SF_any), "r"(SF_QNaN_special) : "r2", "p0", "usr"); - check32(result, SF_HEX_NAN); - check_fpstatus(usr, FPINVF); + check32(result, SF_HEX_NaN); + check_fpstatus(usr, USR_FPINVF); asm (CLEAR_FPSTATUS "%0,p0 = sfrecipa(%2, %2)\n\t" "%1 = usr\n\t" - : "=r"(result), "=r"(usr) : "r"(SF_NaN_special) + : "=r"(result), "=r"(usr) : "r"(SF_QNaN_special) : "r2", "p0", "usr"); - check32(result, SF_HEX_NAN); - check_fpstatus(usr, FPINVF); + check32(result, SF_HEX_NaN); + check_fpstatus(usr, USR_FPINVF); /* * Check that sfrecipa properly sets divid-by-zero @@ -368,12 +323,12 @@ static void check_sfrecipa(void) : "=r"(result), "=r"(usr) : "r"(0x885dc960), "r"(0x80000000) : "r2", "p0", "usr"); check32(result, 0x3f800000); - check_fpstatus(usr, FPDBZF); + check_fpstatus(usr, USR_FPDBZF); asm (CLEAR_FPSTATUS "%0,p0 = sfrecipa(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(result), "=r"(usr) : "r"(0x7f800000), "r"(SF_ZERO) + : "=r"(result), "=r"(usr) : "r"(0x7f800000), "r"(SF_zero) : "r2", "p0", "usr"); check32(result, 0x3f800000); check_fpstatus(usr, 0); @@ -392,79 +347,79 @@ static void check_sfrecipa(void) static void check_canonical_NaN(void) { - int sf_result; - unsigned long long df_result; - int usr; + uint32_t sf_result; + uint64_t df_result; + uint32_t usr; - /* Check that each FP instruction properly returns SF_HEX_NAN/DF_HEX_NAN */ + /* Check that each FP instruction properly returns SF_HEX_NaN/DF_HEX_NaN */ asm(CLEAR_FPSTATUS "%0 = sfadd(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(sf_result), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "=r"(sf_result), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "usr"); - check32(sf_result, SF_HEX_NAN); + check32(sf_result, SF_HEX_NaN); check_fpstatus(usr, 0); asm(CLEAR_FPSTATUS "%0 = sfsub(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(sf_result), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "=r"(sf_result), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "usr"); - check32(sf_result, SF_HEX_NAN); + check32(sf_result, SF_HEX_NaN); check_fpstatus(usr, 0); asm(CLEAR_FPSTATUS "%0 = sfmpy(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(sf_result), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "=r"(sf_result), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "usr"); - check32(sf_result, SF_HEX_NAN); + check32(sf_result, SF_HEX_NaN); check_fpstatus(usr, 0); - sf_result = SF_ZERO; + sf_result = SF_zero; asm(CLEAR_FPSTATUS "%0 += sfmpy(%2, %3)\n\t" "%1 = usr\n\t" - : "+r"(sf_result), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "+r"(sf_result), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "usr"); - check32(sf_result, SF_HEX_NAN); + check32(sf_result, SF_HEX_NaN); check_fpstatus(usr, 0); - sf_result = SF_ZERO; + sf_result = SF_zero; asm(CLEAR_FPSTATUS "p0 = !cmp.eq(r0, r0)\n\t" "%0 += sfmpy(%2, %3, p0):scale\n\t" "%1 = usr\n\t" - : "+r"(sf_result), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "+r"(sf_result), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "usr", "p0"); - check32(sf_result, SF_HEX_NAN); + check32(sf_result, SF_HEX_NaN); check_fpstatus(usr, 0); - sf_result = SF_ZERO; + sf_result = SF_zero; asm(CLEAR_FPSTATUS "%0 -= sfmpy(%2, %3)\n\t" "%1 = usr\n\t" - : "+r"(sf_result), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "+r"(sf_result), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "usr"); - check32(sf_result, SF_HEX_NAN); + check32(sf_result, SF_HEX_NaN); check_fpstatus(usr, 0); - sf_result = SF_ZERO; + sf_result = SF_zero; asm(CLEAR_FPSTATUS "%0 += sfmpy(%2, %3):lib\n\t" "%1 = usr\n\t" - : "+r"(sf_result), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "+r"(sf_result), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "usr"); - check32(sf_result, SF_HEX_NAN); + check32(sf_result, SF_HEX_NaN); check_fpstatus(usr, 0); - sf_result = SF_ZERO; + sf_result = SF_zero; asm(CLEAR_FPSTATUS "%0 -= sfmpy(%2, %3):lib\n\t" "%1 = usr\n\t" - : "+r"(sf_result), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "+r"(sf_result), "=r"(usr) : "r"(SF_QNaN), "r"(SF_any) : "r2", "usr"); - check32(sf_result, SF_HEX_NAN); + check32(sf_result, SF_HEX_NaN); check_fpstatus(usr, 0); asm(CLEAR_FPSTATUS @@ -472,38 +427,38 @@ static void check_canonical_NaN(void) "%1 = usr\n\t" : "=r"(sf_result), "=r"(usr) : "r"(DF_QNaN) : "r2", "usr"); - check32(sf_result, SF_HEX_NAN); + check32(sf_result, SF_HEX_NaN); check_fpstatus(usr, 0); asm(CLEAR_FPSTATUS "%0 = dfadd(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(df_result), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) + : "=r"(df_result), "=r"(usr) : "r"(DF_QNaN), "r"(DF_any) : "r2", "usr"); - check64(df_result, DF_HEX_NAN); + check64(df_result, DF_HEX_NaN); check_fpstatus(usr, 0); asm(CLEAR_FPSTATUS "%0 = dfsub(%2, %3)\n\t" "%1 = usr\n\t" - : "=r"(df_result), "=r"(usr) : "r"(DF_QNaN), "r"(DF_ANY) + : "=r"(df_result), "=r"(usr) : "r"(DF_QNaN), "r"(DF_any) : "r2", "usr"); - check64(df_result, DF_HEX_NAN); + check64(df_result, DF_HEX_NaN); check_fpstatus(usr, 0); asm(CLEAR_FPSTATUS "%0 = convert_sf2df(%2)\n\t" "%1 = usr\n\t" - : "=r"(df_result), "=r"(usr) : "r"(SF_NaN) + : "=r"(df_result), "=r"(usr) : "r"(SF_QNaN) : "r2", "usr"); - check64(df_result, DF_HEX_NAN); + check64(df_result, DF_HEX_NaN); check_fpstatus(usr, 0); } static void check_invsqrta(void) { - int result; - int predval; + uint32_t result; + uint32_t predval; asm volatile("%0,p0 = sfinvsqrta(%2)\n\t" "%1 = p0\n\t" @@ -516,7 +471,7 @@ static void check_invsqrta(void) static void check_sffixupn(void) { - int result; + uint32_t result; /* Check that sffixupn properly deals with denorm */ asm volatile("%0 = sffixupn(%1, %2)\n\t" @@ -527,7 +482,7 @@ static void check_sffixupn(void) static void check_sffixupd(void) { - int result; + uint32_t result; /* Check that sffixupd properly deals with denorm */ asm volatile("%0 = sffixupd(%1, %2)\n\t" @@ -536,11 +491,38 @@ static void check_sffixupd(void) check32(result, 0x146001d6); } +static void check_sffms(void) +{ + uint32_t result; + + /* Check that sffms properly deals with -0 */ + result = SF_zero_neg; + asm ("%0 -= sfmpy(%1 , %2)\n\t" + : "+r"(result) + : "r"(SF_zero), "r"(SF_zero) + : "r12", "r8"); + check32(result, SF_zero_neg); + + result = SF_zero; + asm ("%0 -= sfmpy(%1 , %2)\n\t" + : "+r"(result) + : "r"(SF_zero_neg), "r"(SF_zero) + : "r12", "r8"); + check32(result, SF_zero); + + result = SF_zero; + asm ("%0 -= sfmpy(%1 , %2)\n\t" + : "+r"(result) + : "r"(SF_zero), "r"(SF_zero_neg) + : "r12", "r8"); + check32(result, SF_zero); +} + static void check_float2int_convs() { - int res32; - long long res64; - int usr; + uint32_t res32; + uint64_t res64; + uint32_t usr; /* * Check that the various forms of float-to-unsigned @@ -552,7 +534,7 @@ static void check_float2int_convs() : "=r"(res32), "=r"(usr) : "r"(SF_small_neg) : "r2", "usr"); check32(res32, 0); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_sf2uw(%2):chop\n\t" @@ -560,7 +542,7 @@ static void check_float2int_convs() : "=r"(res32), "=r"(usr) : "r"(SF_small_neg) : "r2", "usr"); check32(res32, 0); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_sf2ud(%2)\n\t" @@ -568,7 +550,7 @@ static void check_float2int_convs() : "=r"(res64), "=r"(usr) : "r"(SF_small_neg) : "r2", "usr"); check64(res64, 0); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_sf2ud(%2):chop\n\t" @@ -576,7 +558,7 @@ static void check_float2int_convs() : "=r"(res64), "=r"(usr) : "r"(SF_small_neg) : "r2", "usr"); check64(res64, 0); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_df2uw(%2)\n\t" @@ -584,7 +566,7 @@ static void check_float2int_convs() : "=r"(res32), "=r"(usr) : "r"(DF_small_neg) : "r2", "usr"); check32(res32, 0); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_df2uw(%2):chop\n\t" @@ -592,7 +574,7 @@ static void check_float2int_convs() : "=r"(res32), "=r"(usr) : "r"(DF_small_neg) : "r2", "usr"); check32(res32, 0); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_df2ud(%2)\n\t" @@ -600,7 +582,7 @@ static void check_float2int_convs() : "=r"(res64), "=r"(usr) : "r"(DF_small_neg) : "r2", "usr"); check64(res64, 0); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_df2ud(%2):chop\n\t" @@ -608,7 +590,7 @@ static void check_float2int_convs() : "=r"(res64), "=r"(usr) : "r"(DF_small_neg) : "r2", "usr"); check64(res64, 0); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); /* * Check that the various forms of float-to-signed return -1 for NaN @@ -616,34 +598,34 @@ static void check_float2int_convs() asm(CLEAR_FPSTATUS "%0 = convert_sf2w(%2)\n\t" "%1 = usr\n\t" - : "=r"(res32), "=r"(usr) : "r"(SF_NaN) + : "=r"(res32), "=r"(usr) : "r"(SF_QNaN) : "r2", "usr"); check32(res32, -1); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_sf2w(%2):chop\n\t" "%1 = usr\n\t" - : "=r"(res32), "=r"(usr) : "r"(SF_NaN) + : "=r"(res32), "=r"(usr) : "r"(SF_QNaN) : "r2", "usr"); check32(res32, -1); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_sf2d(%2)\n\t" "%1 = usr\n\t" - : "=r"(res64), "=r"(usr) : "r"(SF_NaN) + : "=r"(res64), "=r"(usr) : "r"(SF_QNaN) : "r2", "usr"); check64(res64, -1); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_sf2d(%2):chop\n\t" "%1 = usr\n\t" - : "=r"(res64), "=r"(usr) : "r"(SF_NaN) + : "=r"(res64), "=r"(usr) : "r"(SF_QNaN) : "r2", "usr"); check64(res64, -1); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_df2w(%2)\n\t" @@ -651,7 +633,7 @@ static void check_float2int_convs() : "=r"(res32), "=r"(usr) : "r"(DF_QNaN) : "r2", "usr"); check32(res32, -1); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_df2w(%2):chop\n\t" @@ -659,7 +641,7 @@ static void check_float2int_convs() : "=r"(res32), "=r"(usr) : "r"(DF_QNaN) : "r2", "usr"); check32(res32, -1); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_df2d(%2)\n\t" @@ -667,7 +649,7 @@ static void check_float2int_convs() : "=r"(res64), "=r"(usr) : "r"(DF_QNaN) : "r2", "usr"); check64(res64, -1); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); asm(CLEAR_FPSTATUS "%0 = convert_df2d(%2):chop\n\t" @@ -675,7 +657,58 @@ static void check_float2int_convs() : "=r"(res64), "=r"(usr) : "r"(DF_QNaN) : "r2", "usr"); check64(res64, -1); - check_fpstatus(usr, FPINVF); + check_fpstatus(usr, USR_FPINVF); +} + +static void check_float_consts(void) +{ + uint32_t res32; + uint64_t res64; + + asm("%0 = sfmake(#%1):neg\n\t" : "=r"(res32) : "i"(0xf)); + check32(res32, 0xbc9e0000); + + asm("%0 = sfmake(#%1):pos\n\t" : "=r"(res32) : "i"(0xf)); + check32(res32, 0x3c9e0000); + + asm("%0 = dfmake(#%1):neg\n\t" : "=r"(res64) : "i"(0xf)); + check64(res64, 0xbf93c00000000000ULL); + + asm("%0 = dfmake(#%1):pos\n\t" : "=r"(res64) : "i"(0xf)); + check64(res64, 0x3f93c00000000000ULL); +} + +static inline uint64_t dfmpyll(double x, double y) +{ + uint64_t res64; + asm("%0 = dfmpyll(%1, %2)" : "=r"(res64) : "r"(x), "r"(y)); + return res64; +} + +static inline uint64_t dfmpylh(double acc, double x, double y) +{ + uint64_t res64 = *(uint64_t *)&acc; + asm("%0 += dfmpylh(%1, %2)" : "+r"(res64) : "r"(x), "r"(y)); + return res64; +} + +static void check_dfmpyxx(void) +{ + uint64_t res64; + + res64 = dfmpyll(DBL_MIN, DBL_MIN); + check64(res64, 0ULL); + res64 = dfmpyll(-1.0, DBL_MIN); + check64(res64, 0ULL); + res64 = dfmpyll(DBL_MAX, DBL_MAX); + check64(res64, 0x1fffffffdULL); + + res64 = dfmpylh(DBL_MIN, DBL_MIN, DBL_MIN); + check64(res64, 0x10000000000000ULL); + res64 = dfmpylh(-1.0, DBL_MAX, DBL_MIN); + check64(res64, 0xc00fffffffe00000ULL); + res64 = dfmpylh(DBL_MAX, 0.0, -1.0); + check64(res64, 0x7fefffffffffffffULL); } int main() @@ -688,7 +721,10 @@ int main() check_invsqrta(); check_sffixupn(); check_sffixupd(); + check_sffms(); check_float2int_convs(); + check_float_consts(); + check_dfmpyxx(); puts(err ? "FAIL" : "PASS"); return err ? 1 : 0; diff --git a/tests/tcg/hexagon/hex_sigsegv.c b/tests/tcg/hexagon/hex_sigsegv.c index dc2b349257..f43e0308f9 100644 --- a/tests/tcg/hexagon/hex_sigsegv.c +++ b/tests/tcg/hexagon/hex_sigsegv.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2021-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,6 +25,8 @@ */ #include +#include +#include #include #include #include @@ -32,45 +34,23 @@ #include #include -typedef unsigned char uint8_t; - int err; -int segv_caught; + +#include "hex_test.h" + +bool segv_caught; #define SHOULD_NOT_CHANGE_VAL 5 -int should_not_change = SHOULD_NOT_CHANGE_VAL; +int32_t should_not_change = SHOULD_NOT_CHANGE_VAL; #define BUF_SIZE 300 -unsigned char buf[BUF_SIZE]; - - -static void __check(const char *filename, int line, int x, int expect) -{ - if (x != expect) { - printf("ERROR %s:%d - %d != %d\n", - filename, line, x, expect); - err++; - } -} - -#define check(x, expect) __check(__FILE__, __LINE__, (x), (expect)) - -static void __chk_error(const char *filename, int line, int ret) -{ - if (ret < 0) { - printf("ERROR %s:%d - %d\n", filename, line, ret); - err++; - } -} - -#define chk_error(ret) __chk_error(__FILE__, __LINE__, (ret)) - +uint8_t buf[BUF_SIZE]; jmp_buf jmp_env; static void sig_segv(int sig, siginfo_t *info, void *puc) { - check(sig, SIGSEGV); - segv_caught = 1; + check32(sig, SIGSEGV); + segv_caught = true; longjmp(jmp_env, 1); } @@ -98,8 +78,8 @@ int main() act.sa_flags = 0; chk_error(sigaction(SIGSEGV, &act, NULL)); - check(segv_caught, 1); - check(should_not_change, SHOULD_NOT_CHANGE_VAL); + check32(segv_caught, true); + check32(should_not_change, SHOULD_NOT_CHANGE_VAL); puts(err ? "FAIL" : "PASS"); return err ? EXIT_FAILURE : EXIT_SUCCESS; diff --git a/tests/tcg/hexagon/hex_test.h b/tests/tcg/hexagon/hex_test.h new file mode 100644 index 0000000000..cfed06a58b --- /dev/null +++ b/tests/tcg/hexagon/hex_test.h @@ -0,0 +1,145 @@ +/* + * Copyright(c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + + +#ifndef HEX_TEST_H +#define HEX_TEST_H + +static inline void __check32(int line, uint32_t val, uint32_t expect) +{ + if (val != expect) { + printf("ERROR at line %d: 0x%08x != 0x%08x\n", line, val, expect); + err++; + } +} + +#define check32(RES, EXP) __check32(__LINE__, RES, EXP) + +static inline void __check64(int line, uint64_t val, uint64_t expect) +{ + if (val != expect) { + printf("ERROR at line %d: 0x%016llx != 0x%016llx\n", line, val, expect); + err++; + } +} + +#define check64(RES, EXP) __check64(__LINE__, RES, EXP) + +static inline void __chk_error(const char *filename, int line, int ret) +{ + if (ret < 0) { + printf("ERROR %s:%d - %d\n", filename, line, ret); + err++; + } +} + +#define chk_error(ret) __chk_error(__FILE__, __LINE__, (ret)) + +static inline void __checkp(int line, void *p, void *expect) +{ + if (p != expect) { + printf("ERROR at line %d: 0x%p != 0x%p\n", line, p, expect); + err++; + } +} + +#define checkp(RES, EXP) __checkp(__LINE__, RES, EXP) + +static inline void __check32_ne(int line, uint32_t val, uint32_t expect) +{ + if (val == expect) { + printf("ERROR at line %d: 0x%08x == 0x%08x\n", line, val, expect); + err++; + } +} + +#define check32_ne(RES, EXP) __check32_ne(__LINE__, RES, EXP) + +static inline void __check64_ne(int line, uint64_t val, uint64_t expect) +{ + if (val == expect) { + printf("ERROR at line %d: 0x%016llx == 0x%016llx\n", line, val, expect); + err++; + } +} + +#define check64_ne(RES, EXP) __check64_ne(__LINE__, RES, EXP) + +/* Define the bits in Hexagon USR register */ +#define USR_OVF_BIT 0 /* Sticky saturation overflow */ +#define USR_FPINVF_BIT 1 /* IEEE FP invalid sticky flag */ +#define USR_FPDBZF_BIT 2 /* IEEE FP divide-by-zero sticky flag */ +#define USR_FPOVFF_BIT 3 /* IEEE FP overflow sticky flag */ +#define USR_FPUNFF_BIT 4 /* IEEE FP underflow sticky flag */ +#define USR_FPINPF_BIT 5 /* IEEE FP inexact sticky flag */ + +/* Corresponding values in USR */ +#define USR_CLEAR 0 +#define USR_OVF (1 << USR_OVF_BIT) +#define USR_FPINVF (1 << USR_FPINVF_BIT) +#define USR_FPDBZF (1 << USR_FPDBZF_BIT) +#define USR_FPOVFF (1 << USR_FPOVFF_BIT) +#define USR_FPUNFF (1 << USR_FPUNFF_BIT) +#define USR_FPINPF (1 << USR_FPINPF_BIT) + +/* Clear bits 0-5 in USR */ +#define CLEAR_USRBITS \ + "r2 = usr\n\t" \ + "r2 = and(r2, #0xffffffc0)\n\t" \ + "usr = r2\n\t" + +/* Clear bits 1-5 in USR */ +#define CLEAR_FPSTATUS \ + "r2 = usr\n\t" \ + "r2 = and(r2, #0xffffffc1)\n\t" \ + "usr = r2\n\t" + +/* Some useful floating point values */ +const uint32_t SF_INF = 0x7f800000; +const uint32_t SF_QNaN = 0x7fc00000; +const uint32_t SF_QNaN_special = 0x7f800001; +const uint32_t SF_SNaN = 0x7fb00000; +const uint32_t SF_QNaN_neg = 0xffc00000; +const uint32_t SF_SNaN_neg = 0xffb00000; +const uint32_t SF_HEX_NaN = 0xffffffff; +const uint32_t SF_zero = 0x00000000; +const uint32_t SF_zero_neg = 0x80000000; +const uint32_t SF_one = 0x3f800000; +const uint32_t SF_one_recip = 0x3f7f0001; /* 0.9960... */ +const uint32_t SF_one_invsqrta = 0x3f7f0000; /* 0.99609375 */ +const uint32_t SF_two = 0x40000000; +const uint32_t SF_four = 0x40800000; +const uint32_t SF_small_neg = 0xab98fba8; +const uint32_t SF_large_pos = 0x5afa572e; +const uint32_t SF_any = 0x3f800000; +const uint32_t SF_denorm = 0x00000001; +const uint32_t SF_random = 0x346001d6; + +const uint64_t DF_QNaN = 0x7ff8000000000000ULL; +const uint64_t DF_SNaN = 0x7ff7000000000000ULL; +const uint64_t DF_QNaN_neg = 0xfff8000000000000ULL; +const uint64_t DF_SNaN_neg = 0xfff7000000000000ULL; +const uint64_t DF_HEX_NaN = 0xffffffffffffffffULL; +const uint64_t DF_zero = 0x0000000000000000ULL; +const uint64_t DF_zero_neg = 0x8000000000000000ULL; +const uint64_t DF_any = 0x3f80000000000000ULL; +const uint64_t DF_one = 0x3ff0000000000000ULL; +const uint64_t DF_one_hh = 0x3ff001ff80000000ULL; /* 1.00048... */ +const uint64_t DF_small_neg = 0xbd731f7500000000ULL; +const uint64_t DF_large_pos = 0x7f80000000000001ULL; + +#endif diff --git a/tests/tcg/hexagon/hvx_misc.c b/tests/tcg/hexagon/hvx_misc.c index 53d5c9b44f..b45170acd1 100644 --- a/tests/tcg/hexagon/hvx_misc.c +++ b/tests/tcg/hexagon/hvx_misc.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2021-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2021-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -23,69 +23,7 @@ int err; -static void __check(int line, int i, int j, uint64_t result, uint64_t expect) -{ - if (result != expect) { - printf("ERROR at line %d: [%d][%d] 0x%016llx != 0x%016llx\n", - line, i, j, result, expect); - err++; - } -} - -#define check(RES, EXP) __check(__LINE__, RES, EXP) - -#define MAX_VEC_SIZE_BYTES 128 - -typedef union { - uint64_t ud[MAX_VEC_SIZE_BYTES / 8]; - int64_t d[MAX_VEC_SIZE_BYTES / 8]; - uint32_t uw[MAX_VEC_SIZE_BYTES / 4]; - int32_t w[MAX_VEC_SIZE_BYTES / 4]; - uint16_t uh[MAX_VEC_SIZE_BYTES / 2]; - int16_t h[MAX_VEC_SIZE_BYTES / 2]; - uint8_t ub[MAX_VEC_SIZE_BYTES / 1]; - int8_t b[MAX_VEC_SIZE_BYTES / 1]; -} MMVector; - -#define BUFSIZE 16 -#define OUTSIZE 16 -#define MASKMOD 3 - -MMVector buffer0[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); -MMVector buffer1[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); -MMVector mask[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); -MMVector output[OUTSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); -MMVector expect[OUTSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); - -#define CHECK_OUTPUT_FUNC(FIELD, FIELDSZ) \ -static void check_output_##FIELD(int line, size_t num_vectors) \ -{ \ - for (int i = 0; i < num_vectors; i++) { \ - for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ - __check(line, i, j, output[i].FIELD[j], expect[i].FIELD[j]); \ - } \ - } \ -} - -CHECK_OUTPUT_FUNC(d, 8) -CHECK_OUTPUT_FUNC(w, 4) -CHECK_OUTPUT_FUNC(h, 2) -CHECK_OUTPUT_FUNC(b, 1) - -static void init_buffers(void) -{ - int counter0 = 0; - int counter1 = 17; - for (int i = 0; i < BUFSIZE; i++) { - for (int j = 0; j < MAX_VEC_SIZE_BYTES; j++) { - buffer0[i].b[j] = counter0++; - buffer1[i].b[j] = counter1++; - } - for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { - mask[i].w[j] = (i + j % MASKMOD == 0) ? 0 : 1; - } - } -} +#include "hvx_misc.h" static void test_load_tmp(void) { @@ -122,6 +60,36 @@ static void test_load_tmp(void) check_output_w(__LINE__, BUFSIZE); } +static void test_load_tmp2(void) +{ + void *pout0 = &output[0]; + void *pout1 = &output[1]; + + asm volatile( + "r0 = #0x03030303\n\t" + "v16 = vsplat(r0)\n\t" + "r0 = #0x04040404\n\t" + "v18 = vsplat(r0)\n\t" + "r0 = #0x05050505\n\t" + "v21 = vsplat(r0)\n\t" + "{\n\t" + " v25:24 += vmpyo(v18.w, v14.h)\n\t" + " v15:14.tmp = vcombine(v21, v16)\n\t" + "}\n\t" + "vmem(%0 + #0) = v24\n\t" + "vmem(%1 + #0) = v25\n\t" + : : "r"(pout0), "r"(pout1) + : "r0", "v16", "v18", "v21", "v24", "v25", "memory" + ); + + for (int i = 0; i < MAX_VEC_SIZE_BYTES / 4; ++i) { + expect[0].w[i] = 0x180c0000; + expect[1].w[i] = 0x000c1818; + } + + check_output_w(__LINE__, 2); +} + static void test_load_cur(void) { void *p0 = buffer0; @@ -322,100 +290,6 @@ static void test_max_temps() check_output_b(__LINE__, 5); } -#define VEC_OP1(ASM, EL, IN, OUT) \ - asm("v2 = vmem(%0 + #0)\n\t" \ - "v2" #EL " = " #ASM "(v2" #EL ")\n\t" \ - "vmem(%1 + #0) = v2\n\t" \ - : : "r"(IN), "r"(OUT) : "v2", "memory") - -#define VEC_OP2(ASM, EL, IN0, IN1, OUT) \ - asm("v2 = vmem(%0 + #0)\n\t" \ - "v3 = vmem(%1 + #0)\n\t" \ - "v2" #EL " = " #ASM "(v2" #EL ", v3" #EL ")\n\t" \ - "vmem(%2 + #0) = v2\n\t" \ - : : "r"(IN0), "r"(IN1), "r"(OUT) : "v2", "v3", "memory") - -#define TEST_VEC_OP1(NAME, ASM, EL, FIELD, FIELDSZ, OP) \ -static void test_##NAME(void) \ -{ \ - void *pin = buffer0; \ - void *pout = output; \ - for (int i = 0; i < BUFSIZE; i++) { \ - VEC_OP1(ASM, EL, pin, pout); \ - pin += sizeof(MMVector); \ - pout += sizeof(MMVector); \ - } \ - for (int i = 0; i < BUFSIZE; i++) { \ - for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ - expect[i].FIELD[j] = OP buffer0[i].FIELD[j]; \ - } \ - } \ - check_output_##FIELD(__LINE__, BUFSIZE); \ -} - -#define TEST_VEC_OP2(NAME, ASM, EL, FIELD, FIELDSZ, OP) \ -static void test_##NAME(void) \ -{ \ - void *p0 = buffer0; \ - void *p1 = buffer1; \ - void *pout = output; \ - for (int i = 0; i < BUFSIZE; i++) { \ - VEC_OP2(ASM, EL, p0, p1, pout); \ - p0 += sizeof(MMVector); \ - p1 += sizeof(MMVector); \ - pout += sizeof(MMVector); \ - } \ - for (int i = 0; i < BUFSIZE; i++) { \ - for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ - expect[i].FIELD[j] = buffer0[i].FIELD[j] OP buffer1[i].FIELD[j]; \ - } \ - } \ - check_output_##FIELD(__LINE__, BUFSIZE); \ -} - -#define THRESHOLD 31 - -#define PRED_OP2(ASM, IN0, IN1, OUT, INV) \ - asm("r4 = #%3\n\t" \ - "v1.b = vsplat(r4)\n\t" \ - "v2 = vmem(%0 + #0)\n\t" \ - "q0 = vcmp.gt(v2.b, v1.b)\n\t" \ - "v3 = vmem(%1 + #0)\n\t" \ - "q1 = vcmp.gt(v3.b, v1.b)\n\t" \ - "q2 = " #ASM "(q0, " INV "q1)\n\t" \ - "r4 = #0xff\n\t" \ - "v1.b = vsplat(r4)\n\t" \ - "if (q2) vmem(%2 + #0) = v1\n\t" \ - : : "r"(IN0), "r"(IN1), "r"(OUT), "i"(THRESHOLD) \ - : "r4", "v1", "v2", "v3", "q0", "q1", "q2", "memory") - -#define TEST_PRED_OP2(NAME, ASM, OP, INV) \ -static void test_##NAME(bool invert) \ -{ \ - void *p0 = buffer0; \ - void *p1 = buffer1; \ - void *pout = output; \ - memset(output, 0, sizeof(expect)); \ - for (int i = 0; i < BUFSIZE; i++) { \ - PRED_OP2(ASM, p0, p1, pout, INV); \ - p0 += sizeof(MMVector); \ - p1 += sizeof(MMVector); \ - pout += sizeof(MMVector); \ - } \ - for (int i = 0; i < BUFSIZE; i++) { \ - for (int j = 0; j < MAX_VEC_SIZE_BYTES; j++) { \ - bool p0 = (buffer0[i].b[j] > THRESHOLD); \ - bool p1 = (buffer1[i].b[j] > THRESHOLD); \ - if (invert) { \ - expect[i].b[j] = (p0 OP !p1) ? 0xff : 0x00; \ - } else { \ - expect[i].b[j] = (p0 OP p1) ? 0xff : 0x00; \ - } \ - } \ - } \ - check_output_b(__LINE__, BUFSIZE); \ -} - TEST_VEC_OP2(vadd_w, vadd, .w, w, 4, +) TEST_VEC_OP2(vadd_h, vadd, .h, h, 2, +) TEST_VEC_OP2(vadd_b, vadd, .b, b, 1, +) @@ -498,49 +372,6 @@ static void test_vsubuwsat_dv(void) check_output_w(__LINE__, 2); } -static void test_vshuff(void) -{ - /* Test that vshuff works when the two operands are the same register */ - const uint32_t splat = 0x089be55c; - const uint32_t shuff = 0x454fa926; - MMVector v0, v1; - - memset(expect, 0x12, sizeof(MMVector)); - memset(output, 0x34, sizeof(MMVector)); - - asm volatile("v25 = vsplat(%0)\n\t" - "vshuff(v25, v25, %1)\n\t" - "vmem(%2 + #0) = v25\n\t" - : /* no outputs */ - : "r"(splat), "r"(shuff), "r"(output) - : "v25", "memory"); - - /* - * The semantics of Hexagon are the operands are pass-by-value, so create - * two copies of the vsplat result. - */ - for (int i = 0; i < MAX_VEC_SIZE_BYTES / 4; i++) { - v0.uw[i] = splat; - v1.uw[i] = splat; - } - /* Do the vshuff operation */ - for (int offset = 1; offset < MAX_VEC_SIZE_BYTES; offset <<= 1) { - if (shuff & offset) { - for (int k = 0; k < MAX_VEC_SIZE_BYTES; k++) { - if (!(k & offset)) { - uint8_t tmp = v0.ub[k]; - v0.ub[k] = v1.ub[k + offset]; - v1.ub[k + offset] = tmp; - } - } - } - } - /* Put the result in the expect buffer for verification */ - expect[0] = v1; - - check_output_b(__LINE__, 1); -} - static void test_load_tmp_predicated(void) { void *p0 = buffer0; @@ -610,11 +441,31 @@ static void test_load_cur_predicated(void) check_output_w(__LINE__, BUFSIZE); } +static void test_vcombine(void) +{ + for (int i = 0; i < BUFSIZE / 2; i++) { + asm volatile("v2 = vsplat(%0)\n\t" + "v3 = vsplat(%1)\n\t" + "v3:2 = vcombine(v2, v3)\n\t" + "vmem(%2+#0) = v2\n\t" + "vmem(%2+#1) = v3\n\t" + : + : "r"(2 * i), "r"(2 * i + 1), "r"(&output[2 * i]) + : "v2", "v3", "memory"); + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + expect[2 * i].w[j] = 2 * i + 1; + expect[2 * i + 1].w[j] = 2 * i; + } + } + check_output_w(__LINE__, BUFSIZE); +} + int main() { init_buffers(); test_load_tmp(); + test_load_tmp2(); test_load_cur(); test_load_aligned(); test_load_unaligned(); @@ -645,11 +496,11 @@ int main() test_vadduwsat(); test_vsubuwsat_dv(); - test_vshuff(); - test_load_tmp_predicated(); test_load_cur_predicated(); + test_vcombine(); + puts(err ? "FAIL" : "PASS"); return err ? 1 : 0; } diff --git a/tests/tcg/hexagon/hvx_misc.h b/tests/tcg/hexagon/hvx_misc.h new file mode 100644 index 0000000000..2e868340fd --- /dev/null +++ b/tests/tcg/hexagon/hvx_misc.h @@ -0,0 +1,178 @@ +/* + * Copyright(c) 2021-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HVX_MISC_H +#define HVX_MISC_H + +static inline void check(int line, int i, int j, + uint64_t result, uint64_t expect) +{ + if (result != expect) { + printf("ERROR at line %d: [%d][%d] 0x%016llx != 0x%016llx\n", + line, i, j, result, expect); + err++; + } +} + +#define MAX_VEC_SIZE_BYTES 128 + +typedef union { + uint64_t ud[MAX_VEC_SIZE_BYTES / 8]; + int64_t d[MAX_VEC_SIZE_BYTES / 8]; + uint32_t uw[MAX_VEC_SIZE_BYTES / 4]; + int32_t w[MAX_VEC_SIZE_BYTES / 4]; + uint16_t uh[MAX_VEC_SIZE_BYTES / 2]; + int16_t h[MAX_VEC_SIZE_BYTES / 2]; + uint8_t ub[MAX_VEC_SIZE_BYTES / 1]; + int8_t b[MAX_VEC_SIZE_BYTES / 1]; +} MMVector; + +#define BUFSIZE 16 +#define OUTSIZE 16 +#define MASKMOD 3 + +MMVector buffer0[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector buffer1[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector mask[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector output[OUTSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector expect[OUTSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); + +#define CHECK_OUTPUT_FUNC(FIELD, FIELDSZ) \ +static inline void check_output_##FIELD(int line, size_t num_vectors) \ +{ \ + for (int i = 0; i < num_vectors; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ + check(line, i, j, output[i].FIELD[j], expect[i].FIELD[j]); \ + } \ + } \ +} + +CHECK_OUTPUT_FUNC(d, 8) +CHECK_OUTPUT_FUNC(w, 4) +CHECK_OUTPUT_FUNC(h, 2) +CHECK_OUTPUT_FUNC(b, 1) + +static inline void init_buffers(void) +{ + int counter0 = 0; + int counter1 = 17; + for (int i = 0; i < BUFSIZE; i++) { + for (int j = 0; j < MAX_VEC_SIZE_BYTES; j++) { + buffer0[i].b[j] = counter0++; + buffer1[i].b[j] = counter1++; + } + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + mask[i].w[j] = (i + j % MASKMOD == 0) ? 0 : 1; + } + } +} + +#define VEC_OP1(ASM, EL, IN, OUT) \ + asm("v2 = vmem(%0 + #0)\n\t" \ + "v2" #EL " = " #ASM "(v2" #EL ")\n\t" \ + "vmem(%1 + #0) = v2\n\t" \ + : : "r"(IN), "r"(OUT) : "v2", "memory") + +#define VEC_OP2(ASM, EL, IN0, IN1, OUT) \ + asm("v2 = vmem(%0 + #0)\n\t" \ + "v3 = vmem(%1 + #0)\n\t" \ + "v2" #EL " = " #ASM "(v2" #EL ", v3" #EL ")\n\t" \ + "vmem(%2 + #0) = v2\n\t" \ + : : "r"(IN0), "r"(IN1), "r"(OUT) : "v2", "v3", "memory") + +#define TEST_VEC_OP1(NAME, ASM, EL, FIELD, FIELDSZ, OP) \ +static inline void test_##NAME(void) \ +{ \ + void *pin = buffer0; \ + void *pout = output; \ + for (int i = 0; i < BUFSIZE; i++) { \ + VEC_OP1(ASM, EL, pin, pout); \ + pin += sizeof(MMVector); \ + pout += sizeof(MMVector); \ + } \ + for (int i = 0; i < BUFSIZE; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ + expect[i].FIELD[j] = OP buffer0[i].FIELD[j]; \ + } \ + } \ + check_output_##FIELD(__LINE__, BUFSIZE); \ +} + +#define TEST_VEC_OP2(NAME, ASM, EL, FIELD, FIELDSZ, OP) \ +static inline void test_##NAME(void) \ +{ \ + void *p0 = buffer0; \ + void *p1 = buffer1; \ + void *pout = output; \ + for (int i = 0; i < BUFSIZE; i++) { \ + VEC_OP2(ASM, EL, p0, p1, pout); \ + p0 += sizeof(MMVector); \ + p1 += sizeof(MMVector); \ + pout += sizeof(MMVector); \ + } \ + for (int i = 0; i < BUFSIZE; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ + expect[i].FIELD[j] = buffer0[i].FIELD[j] OP buffer1[i].FIELD[j]; \ + } \ + } \ + check_output_##FIELD(__LINE__, BUFSIZE); \ +} + +#define THRESHOLD 31 + +#define PRED_OP2(ASM, IN0, IN1, OUT, INV) \ + asm("r4 = #%3\n\t" \ + "v1.b = vsplat(r4)\n\t" \ + "v2 = vmem(%0 + #0)\n\t" \ + "q0 = vcmp.gt(v2.b, v1.b)\n\t" \ + "v3 = vmem(%1 + #0)\n\t" \ + "q1 = vcmp.gt(v3.b, v1.b)\n\t" \ + "q2 = " #ASM "(q0, " INV "q1)\n\t" \ + "r4 = #0xff\n\t" \ + "v1.b = vsplat(r4)\n\t" \ + "if (q2) vmem(%2 + #0) = v1\n\t" \ + : : "r"(IN0), "r"(IN1), "r"(OUT), "i"(THRESHOLD) \ + : "r4", "v1", "v2", "v3", "q0", "q1", "q2", "memory") + +#define TEST_PRED_OP2(NAME, ASM, OP, INV) \ +static inline void test_##NAME(bool invert) \ +{ \ + void *p0 = buffer0; \ + void *p1 = buffer1; \ + void *pout = output; \ + memset(output, 0, sizeof(expect)); \ + for (int i = 0; i < BUFSIZE; i++) { \ + PRED_OP2(ASM, p0, p1, pout, INV); \ + p0 += sizeof(MMVector); \ + p1 += sizeof(MMVector); \ + pout += sizeof(MMVector); \ + } \ + for (int i = 0; i < BUFSIZE; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES; j++) { \ + bool p0 = (buffer0[i].b[j] > THRESHOLD); \ + bool p1 = (buffer1[i].b[j] > THRESHOLD); \ + if (invert) { \ + expect[i].b[j] = (p0 OP !p1) ? 0xff : 0x00; \ + } else { \ + expect[i].b[j] = (p0 OP p1) ? 0xff : 0x00; \ + } \ + } \ + } \ + check_output_b(__LINE__, BUFSIZE); \ +} + +#endif diff --git a/tests/tcg/hexagon/invalid-slots.c b/tests/tcg/hexagon/invalid-slots.c new file mode 100644 index 0000000000..366ce4f42f --- /dev/null +++ b/tests/tcg/hexagon/invalid-slots.c @@ -0,0 +1,29 @@ +/* + * Copyright(c) 2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +char mem[8] __attribute__((aligned(8))); + +int main() +{ + asm volatile( + "r0 = #mem\n" + /* Invalid packet (2 instructions at slot 0): */ + ".word 0xa1804100\n" /* { memw(r0) = r1; */ + ".word 0x28032804\n" /* r3 = #0; r4 = #0 } */ + : : : "r0", "r3", "r4", "memory"); + return 0; +} diff --git a/tests/tcg/hexagon/load_align.c b/tests/tcg/hexagon/load_align.c index 12fc9cbd8f..f5948fd539 100644 --- a/tests/tcg/hexagon/load_align.c +++ b/tests/tcg/hexagon/load_align.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -29,41 +29,22 @@ */ #include +#include #include int err; -char buf[16] __attribute__((aligned(1 << 16))); +#include "hex_test.h" + +int8_t buf[16] __attribute__((aligned(1 << 16))); void init_buf(void) { - int i; - for (i = 0; i < 16; i++) { + for (int i = 0; i < 16; i++) { buf[i] = i + 1; } } -void __check(int line, long long result, long long expect) -{ - if (result != expect) { - printf("ERROR at line %d: 0x%016llx != 0x%016llx\n", - line, result, expect); - err++; - } -} - -#define check(RES, EXP) __check(__LINE__, RES, EXP) - -void __checkp(int line, void *p, void *expect) -{ - if (p != expect) { - printf("ERROR at line %d: 0x%p != 0x%p\n", line, p, expect); - err++; - } -} - -#define checkp(RES, EXP) __checkp(__LINE__, RES, EXP) - /* **************************************************************************** * _io addressing mode (addr + offset) @@ -81,15 +62,15 @@ void __checkp(int line, void *p, void *expect) #define TEST_io(NAME, SZ, SIZE, EXP1, EXP2, EXP3, EXP4) \ void test_##NAME(void) \ { \ - long long result = ~0LL; \ + int64_t result = ~0LL; \ LOAD_io_##SZ(result, buf, 0 * (SIZE)); \ - check(result, (EXP1)); \ + check64(result, (EXP1)); \ LOAD_io_##SZ(result, buf, 1 * (SIZE)); \ - check(result, (EXP2)); \ + check64(result, (EXP2)); \ LOAD_io_##SZ(result, buf, 2 * (SIZE)); \ - check(result, (EXP3)); \ + check64(result, (EXP3)); \ LOAD_io_##SZ(result, buf, 3 * (SIZE)); \ - check(result, (EXP4)); \ + check64(result, (EXP4)); \ } TEST_io(loadalignb_io, b, 1, @@ -116,15 +97,15 @@ TEST_io(loadalignh_io, h, 2, #define TEST_ur(NAME, SZ, SHIFT, RES1, RES2, RES3, RES4) \ void test_##NAME(void) \ { \ - long long result = ~0LL; \ + uint64_t result = ~0LL; \ LOAD_ur_##SZ(result, (SHIFT), 0); \ - check(result, (RES1)); \ + check64(result, (RES1)); \ LOAD_ur_##SZ(result, (SHIFT), 1); \ - check(result, (RES2)); \ + check64(result, (RES2)); \ LOAD_ur_##SZ(result, (SHIFT), 2); \ - check(result, (RES3)); \ + check64(result, (RES3)); \ LOAD_ur_##SZ(result, (SHIFT), 3); \ - check(result, (RES4)); \ + check64(result, (RES4)); \ } TEST_ur(loadalignb_ur, b, 1, @@ -150,19 +131,19 @@ TEST_ur(loadalignh_ur, h, 1, #define TEST_ap(NAME, SZ, SIZE, RES1, RES2, RES3, RES4) \ void test_##NAME(void) \ { \ - long long result = ~0LL; \ + int64_t result = ~0LL; \ void *ptr; \ LOAD_ap_##SZ(result, ptr, (buf + 0 * (SIZE))); \ - check(result, (RES1)); \ + check64(result, (RES1)); \ checkp(ptr, &buf[0 * (SIZE)]); \ LOAD_ap_##SZ(result, ptr, (buf + 1 * (SIZE))); \ - check(result, (RES2)); \ + check64(result, (RES2)); \ checkp(ptr, &buf[1 * (SIZE)]); \ LOAD_ap_##SZ(result, ptr, (buf + 2 * (SIZE))); \ - check(result, (RES3)); \ + check64(result, (RES3)); \ checkp(ptr, &buf[2 * (SIZE)]); \ LOAD_ap_##SZ(result, ptr, (buf + 3 * (SIZE))); \ - check(result, (RES4)); \ + check64(result, (RES4)); \ checkp(ptr, &buf[3 * (SIZE)]); \ } @@ -192,19 +173,19 @@ TEST_ap(loadalignh_ap, h, 2, #define TEST_pr(NAME, SZ, SIZE, RES1, RES2, RES3, RES4) \ void test_##NAME(void) \ { \ - long long result = ~0LL; \ + int64_t result = ~0LL; \ void *ptr = buf; \ LOAD_pr_##SZ(result, ptr, (SIZE)); \ - check(result, (RES1)); \ + check64(result, (RES1)); \ checkp(ptr, &buf[1 * (SIZE)]); \ LOAD_pr_##SZ(result, ptr, (SIZE)); \ - check(result, (RES2)); \ + check64(result, (RES2)); \ checkp(ptr, &buf[2 * (SIZE)]); \ LOAD_pr_##SZ(result, ptr, (SIZE)); \ - check(result, (RES3)); \ + check64(result, (RES3)); \ checkp(ptr, &buf[3 * (SIZE)]); \ LOAD_pr_##SZ(result, ptr, (SIZE)); \ - check(result, (RES4)); \ + check64(result, (RES4)); \ checkp(ptr, &buf[4 * (SIZE)]); \ } @@ -235,16 +216,16 @@ TEST_pr(loadalignh_pr, h, 2, #define TEST_pbr(NAME, SZ, RES1, RES2, RES3, RES4) \ void test_##NAME(void) \ { \ - long long result = ~0LL; \ + int64_t result = ~0LL; \ void *ptr = buf; \ LOAD_pbr_##SZ(result, ptr); \ - check(result, (RES1)); \ + check64(result, (RES1)); \ LOAD_pbr_##SZ(result, ptr); \ - check(result, (RES2)); \ + check64(result, (RES2)); \ LOAD_pbr_##SZ(result, ptr); \ - check(result, (RES3)); \ + check64(result, (RES3)); \ LOAD_pbr_##SZ(result, ptr); \ - check(result, (RES4)); \ + check64(result, (RES4)); \ } TEST_pbr(loadalignb_pbr, b, @@ -270,19 +251,19 @@ TEST_pbr(loadalignh_pbr, h, #define TEST_pi(NAME, SZ, INC, RES1, RES2, RES3, RES4) \ void test_##NAME(void) \ { \ - long long result = ~0LL; \ + int64_t result = ~0LL; \ void *ptr = buf; \ LOAD_pi_##SZ(result, ptr, (INC)); \ - check(result, (RES1)); \ + check64(result, (RES1)); \ checkp(ptr, &buf[1 * (INC)]); \ LOAD_pi_##SZ(result, ptr, (INC)); \ - check(result, (RES2)); \ + check64(result, (RES2)); \ checkp(ptr, &buf[2 * (INC)]); \ LOAD_pi_##SZ(result, ptr, (INC)); \ - check(result, (RES3)); \ + check64(result, (RES3)); \ checkp(ptr, &buf[3 * (INC)]); \ LOAD_pi_##SZ(result, ptr, (INC)); \ - check(result, (RES4)); \ + check64(result, (RES4)); \ checkp(ptr, &buf[4 * (INC)]); \ } @@ -314,19 +295,19 @@ TEST_pi(loadalignh_pi, h, 2, #define TEST_pci(NAME, SZ, LEN, INC, RES1, RES2, RES3, RES4) \ void test_##NAME(void) \ { \ - long long result = ~0LL; \ + int64_t result = ~0LL; \ void *ptr = buf; \ LOAD_pci_##SZ(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES1)); \ + check64(result, (RES1)); \ checkp(ptr, &buf[(1 * (INC)) % (LEN)]); \ LOAD_pci_##SZ(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES2)); \ + check64(result, (RES2)); \ checkp(ptr, &buf[(2 * (INC)) % (LEN)]); \ LOAD_pci_##SZ(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES3)); \ + check64(result, (RES3)); \ checkp(ptr, &buf[(3 * (INC)) % (LEN)]); \ LOAD_pci_##SZ(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES4)); \ + check64(result, (RES4)); \ checkp(ptr, &buf[(4 * (INC)) % (LEN)]); \ } @@ -359,19 +340,19 @@ TEST_pci(loadalignh_pci, h, 4, 2, #define TEST_pcr(NAME, SZ, SIZE, LEN, INC, RES1, RES2, RES3, RES4) \ void test_##NAME(void) \ { \ - long long result = ~0LL; \ + int64_t result = ~0LL; \ void *ptr = buf; \ LOAD_pcr_##SZ(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES1)); \ + check64(result, (RES1)); \ checkp(ptr, &buf[(1 * (INC) * (SIZE)) % (LEN)]); \ LOAD_pcr_##SZ(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES2)); \ + check64(result, (RES2)); \ checkp(ptr, &buf[(2 * (INC) * (SIZE)) % (LEN)]); \ LOAD_pcr_##SZ(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES3)); \ + check64(result, (RES3)); \ checkp(ptr, &buf[(3 * (INC) * (SIZE)) % (LEN)]); \ LOAD_pcr_##SZ(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES4)); \ + check64(result, (RES4)); \ checkp(ptr, &buf[(4 * (INC) * (SIZE)) % (LEN)]); \ } diff --git a/tests/tcg/hexagon/load_unpack.c b/tests/tcg/hexagon/load_unpack.c index 4aa26fc388..c30f4d80aa 100644 --- a/tests/tcg/hexagon/load_unpack.c +++ b/tests/tcg/hexagon/load_unpack.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -31,42 +31,23 @@ */ #include +#include #include int err; -char buf[16] __attribute__((aligned(1 << 16))); +#include "hex_test.h" + +int8_t buf[16] __attribute__((aligned(1 << 16))); void init_buf(void) { - int i; - for (i = 0; i < 16; i++) { + for (int i = 0; i < 16; i++) { int sign = i % 2 == 0 ? 0x80 : 0; buf[i] = sign | (i + 1); } } -void __check(int line, long long result, long long expect) -{ - if (result != expect) { - printf("ERROR at line %d: 0x%08llx != 0x%08llx\n", - line, result, expect); - err++; - } -} - -#define check(RES, EXP) __check(__LINE__, RES, EXP) - -void __checkp(int line, void *p, void *expect) -{ - if (p != expect) { - printf("ERROR at line %d: 0x%p != 0x%p\n", line, p, expect); - err++; - } -} - -#define checkp(RES, EXP) __checkp(__LINE__, RES, EXP) - /* **************************************************************************** * _io addressing mode (addr + offset) @@ -87,24 +68,24 @@ void test_##NAME(void) \ TYPE result; \ init_buf(); \ BxW_LOAD_io_##SIGN(result, buf, 0 * (SIZE)); \ - check(result, (EXP1) | (EXT)); \ + check64(result, (EXP1) | (EXT)); \ BxW_LOAD_io_##SIGN(result, buf, 1 * (SIZE)); \ - check(result, (EXP2) | (EXT)); \ + check64(result, (EXP2) | (EXT)); \ BxW_LOAD_io_##SIGN(result, buf, 2 * (SIZE)); \ - check(result, (EXP3) | (EXT)); \ + check64(result, (EXP3) | (EXT)); \ BxW_LOAD_io_##SIGN(result, buf, 3 * (SIZE)); \ - check(result, (EXP4) | (EXT)); \ + check64(result, (EXP4) | (EXT)); \ } -TEST_io(loadbzw2_io, int, Z, 2, 0x00000000, +TEST_io(loadbzw2_io, int32_t, Z, 2, 0x00000000, 0x00020081, 0x00040083, 0x00060085, 0x00080087) -TEST_io(loadbsw2_io, int, S, 2, 0x0000ff00, +TEST_io(loadbsw2_io, int32_t, S, 2, 0x0000ff00, 0x00020081, 0x00040083, 0x00060085, 0x00080087) -TEST_io(loadbzw4_io, long long, Z, 4, 0x0000000000000000LL, +TEST_io(loadbzw4_io, int64_t, Z, 4, 0x0000000000000000LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x000c008b000a0089LL, 0x0010008f000e008dLL) -TEST_io(loadbsw4_io, long long, S, 4, 0x0000ff000000ff00LL, +TEST_io(loadbsw4_io, int64_t, S, 4, 0x0000ff000000ff00LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x000c008b000a0089LL, 0x0010008f000e008dLL) @@ -128,23 +109,23 @@ void test_##NAME(void) \ TYPE result; \ init_buf(); \ BxW_LOAD_ur_##SIGN(result, (SHIFT), 0); \ - check(result, (RES1) | (EXT)); \ + check64(result, (RES1) | (EXT)); \ BxW_LOAD_ur_##SIGN(result, (SHIFT), 1); \ - check(result, (RES2) | (EXT)); \ + check64(result, (RES2) | (EXT)); \ BxW_LOAD_ur_##SIGN(result, (SHIFT), 2); \ - check(result, (RES3) | (EXT)); \ + check64(result, (RES3) | (EXT)); \ BxW_LOAD_ur_##SIGN(result, (SHIFT), 3); \ - check(result, (RES4) | (EXT)); \ + check64(result, (RES4) | (EXT)); \ } \ -TEST_ur(loadbzw2_ur, int, Z, 1, 0x00000000, +TEST_ur(loadbzw2_ur, int32_t, Z, 1, 0x00000000, 0x00020081, 0x00040083, 0x00060085, 0x00080087) -TEST_ur(loadbsw2_ur, int, S, 1, 0x0000ff00, +TEST_ur(loadbsw2_ur, int32_t, S, 1, 0x0000ff00, 0x00020081, 0x00040083, 0x00060085, 0x00080087) -TEST_ur(loadbzw4_ur, long long, Z, 2, 0x0000000000000000LL, +TEST_ur(loadbzw4_ur, int64_t, Z, 2, 0x0000000000000000LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x000c008b000a0089LL, 0x0010008f000e008dLL) -TEST_ur(loadbsw4_ur, long long, S, 2, 0x0000ff000000ff00LL, +TEST_ur(loadbsw4_ur, int64_t, S, 2, 0x0000ff000000ff00LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x000c008b000a0089LL, 0x0010008f000e008dLL) @@ -168,27 +149,27 @@ void test_##NAME(void) \ void *ptr; \ init_buf(); \ BxW_LOAD_ap_##SIGN(result, ptr, (buf + 0 * (SIZE))); \ - check(result, (RES1) | (EXT)); \ + check64(result, (RES1) | (EXT)); \ checkp(ptr, &buf[0 * (SIZE)]); \ BxW_LOAD_ap_##SIGN(result, ptr, (buf + 1 * (SIZE))); \ - check(result, (RES2) | (EXT)); \ + check64(result, (RES2) | (EXT)); \ checkp(ptr, &buf[1 * (SIZE)]); \ BxW_LOAD_ap_##SIGN(result, ptr, (buf + 2 * (SIZE))); \ - check(result, (RES3) | (EXT)); \ + check64(result, (RES3) | (EXT)); \ checkp(ptr, &buf[2 * (SIZE)]); \ BxW_LOAD_ap_##SIGN(result, ptr, (buf + 3 * (SIZE))); \ - check(result, (RES4) | (EXT)); \ + check64(result, (RES4) | (EXT)); \ checkp(ptr, &buf[3 * (SIZE)]); \ } -TEST_ap(loadbzw2_ap, int, Z, 2, 0x00000000, +TEST_ap(loadbzw2_ap, int32_t, Z, 2, 0x00000000, 0x00020081, 0x00040083, 0x00060085, 0x00080087) -TEST_ap(loadbsw2_ap, int, S, 2, 0x0000ff00, +TEST_ap(loadbsw2_ap, int32_t, S, 2, 0x0000ff00, 0x00020081, 0x00040083, 0x00060085, 0x00080087) -TEST_ap(loadbzw4_ap, long long, Z, 4, 0x0000000000000000LL, +TEST_ap(loadbzw4_ap, int64_t, Z, 4, 0x0000000000000000LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x000c008b000a0089LL, 0x0010008f000e008dLL) -TEST_ap(loadbsw4_ap, long long, S, 4, 0x0000ff000000ff00LL, +TEST_ap(loadbsw4_ap, int64_t, S, 4, 0x0000ff000000ff00LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x000c008b000a0089LL, 0x0010008f000e008dLL) @@ -215,27 +196,27 @@ void test_##NAME(void) \ void *ptr = buf; \ init_buf(); \ BxW_LOAD_pr_##SIGN(result, ptr, (SIZE)); \ - check(result, (RES1) | (EXT)); \ + check64(result, (RES1) | (EXT)); \ checkp(ptr, &buf[1 * (SIZE)]); \ BxW_LOAD_pr_##SIGN(result, ptr, (SIZE)); \ - check(result, (RES2) | (EXT)); \ + check64(result, (RES2) | (EXT)); \ checkp(ptr, &buf[2 * (SIZE)]); \ BxW_LOAD_pr_##SIGN(result, ptr, (SIZE)); \ - check(result, (RES3) | (EXT)); \ + check64(result, (RES3) | (EXT)); \ checkp(ptr, &buf[3 * (SIZE)]); \ BxW_LOAD_pr_##SIGN(result, ptr, (SIZE)); \ - check(result, (RES4) | (EXT)); \ + check64(result, (RES4) | (EXT)); \ checkp(ptr, &buf[4 * (SIZE)]); \ } -TEST_pr(loadbzw2_pr, int, Z, 2, 0x00000000, +TEST_pr(loadbzw2_pr, int32_t, Z, 2, 0x00000000, 0x00020081, 0x0040083, 0x00060085, 0x00080087) -TEST_pr(loadbsw2_pr, int, S, 2, 0x0000ff00, +TEST_pr(loadbsw2_pr, int32_t, S, 2, 0x0000ff00, 0x00020081, 0x0040083, 0x00060085, 0x00080087) -TEST_pr(loadbzw4_pr, long long, Z, 4, 0x0000000000000000LL, +TEST_pr(loadbzw4_pr, int64_t, Z, 4, 0x0000000000000000LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x000c008b000a0089LL, 0x0010008f000e008dLL) -TEST_pr(loadbsw4_pr, long long, S, 4, 0x0000ff000000ff00LL, +TEST_pr(loadbsw4_pr, int64_t, S, 4, 0x0000ff000000ff00LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x000c008b000a0089LL, 0x0010008f000e008dLL) @@ -263,23 +244,23 @@ void test_##NAME(void) \ void *ptr = buf; \ init_buf(); \ BxW_LOAD_pbr_##SIGN(result, ptr); \ - check(result, (RES1) | (EXT)); \ + check64(result, (RES1) | (EXT)); \ BxW_LOAD_pbr_##SIGN(result, ptr); \ - check(result, (RES2) | (EXT)); \ + check64(result, (RES2) | (EXT)); \ BxW_LOAD_pbr_##SIGN(result, ptr); \ - check(result, (RES3) | (EXT)); \ + check64(result, (RES3) | (EXT)); \ BxW_LOAD_pbr_##SIGN(result, ptr); \ - check(result, (RES4) | (EXT)); \ + check64(result, (RES4) | (EXT)); \ } -TEST_pbr(loadbzw2_pbr, int, Z, 0x00000000, +TEST_pbr(loadbzw2_pbr, int32_t, Z, 0x00000000, 0x00020081, 0x000a0089, 0x00060085, 0x000e008d) -TEST_pbr(loadbsw2_pbr, int, S, 0x0000ff00, +TEST_pbr(loadbsw2_pbr, int32_t, S, 0x0000ff00, 0x00020081, 0x000aff89, 0x0006ff85, 0x000eff8d) -TEST_pbr(loadbzw4_pbr, long long, Z, 0x0000000000000000LL, +TEST_pbr(loadbzw4_pbr, int64_t, Z, 0x0000000000000000LL, 0x0004008300020081LL, 0x000c008b000a0089LL, 0x0008008700060085LL, 0x0010008f000e008dLL) -TEST_pbr(loadbsw4_pbr, long long, S, 0x0000ff000000ff00LL, +TEST_pbr(loadbsw4_pbr, int64_t, S, 0x0000ff000000ff00LL, 0x0004008300020081LL, 0x000cff8b000aff89LL, 0x0008ff870006ff85LL, 0x0010ff8f000eff8dLL) @@ -303,27 +284,27 @@ void test_##NAME(void) \ void *ptr = buf; \ init_buf(); \ BxW_LOAD_pi_##SIGN(result, ptr, (INC)); \ - check(result, (RES1) | (EXT)); \ + check64(result, (RES1) | (EXT)); \ checkp(ptr, &buf[1 * (INC)]); \ BxW_LOAD_pi_##SIGN(result, ptr, (INC)); \ - check(result, (RES2) | (EXT)); \ + check64(result, (RES2) | (EXT)); \ checkp(ptr, &buf[2 * (INC)]); \ BxW_LOAD_pi_##SIGN(result, ptr, (INC)); \ - check(result, (RES3) | (EXT)); \ + check64(result, (RES3) | (EXT)); \ checkp(ptr, &buf[3 * (INC)]); \ BxW_LOAD_pi_##SIGN(result, ptr, (INC)); \ - check(result, (RES4) | (EXT)); \ + check64(result, (RES4) | (EXT)); \ checkp(ptr, &buf[4 * (INC)]); \ } -TEST_pi(loadbzw2_pi, int, Z, 2, 0x00000000, +TEST_pi(loadbzw2_pi, int32_t, Z, 2, 0x00000000, 0x00020081, 0x00040083, 0x00060085, 0x00080087) -TEST_pi(loadbsw2_pi, int, S, 2, 0x0000ff00, +TEST_pi(loadbsw2_pi, int32_t, S, 2, 0x0000ff00, 0x00020081, 0x00040083, 0x00060085, 0x00080087) -TEST_pi(loadbzw4_pi, long long, Z, 4, 0x0000000000000000LL, +TEST_pi(loadbzw4_pi, int64_t, Z, 4, 0x0000000000000000LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x000c008b000a0089LL, 0x0010008f000e008dLL) -TEST_pi(loadbsw4_pi, long long, S, 4, 0x0000ff000000ff00LL, +TEST_pi(loadbsw4_pi, int64_t, S, 4, 0x0000ff000000ff00LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x000c008b000a0089LL, 0x0010008f000e008dLL) @@ -352,27 +333,27 @@ void test_##NAME(void) \ void *ptr = buf; \ init_buf(); \ BxW_LOAD_pci_##SIGN(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES1) | (EXT)); \ + check64(result, (RES1) | (EXT)); \ checkp(ptr, &buf[(1 * (INC)) % (LEN)]); \ BxW_LOAD_pci_##SIGN(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES2) | (EXT)); \ + check64(result, (RES2) | (EXT)); \ checkp(ptr, &buf[(2 * (INC)) % (LEN)]); \ BxW_LOAD_pci_##SIGN(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES3) | (EXT)); \ + check64(result, (RES3) | (EXT)); \ checkp(ptr, &buf[(3 * (INC)) % (LEN)]); \ BxW_LOAD_pci_##SIGN(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES4) | (EXT)); \ + check64(result, (RES4) | (EXT)); \ checkp(ptr, &buf[(4 * (INC)) % (LEN)]); \ } -TEST_pci(loadbzw2_pci, int, Z, 6, 2, 0x00000000, +TEST_pci(loadbzw2_pci, int32_t, Z, 6, 2, 0x00000000, 0x00020081, 0x00040083, 0x00060085, 0x00020081) -TEST_pci(loadbsw2_pci, int, S, 6, 2, 0x0000ff00, +TEST_pci(loadbsw2_pci, int32_t, S, 6, 2, 0x0000ff00, 0x00020081, 0x00040083, 0x00060085, 0x00020081) -TEST_pci(loadbzw4_pci, long long, Z, 8, 4, 0x0000000000000000LL, +TEST_pci(loadbzw4_pci, int64_t, Z, 8, 4, 0x0000000000000000LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x0004008300020081LL, 0x0008008700060085LL) -TEST_pci(loadbsw4_pci, long long, S, 8, 4, 0x0000ff000000ff00LL, +TEST_pci(loadbsw4_pci, int64_t, S, 8, 4, 0x0000ff000000ff00LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x0004008300020081LL, 0x0008008700060085LL) @@ -403,27 +384,27 @@ void test_##NAME(void) \ void *ptr = buf; \ init_buf(); \ BxW_LOAD_pcr_##SIGN(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES1) | (EXT)); \ + check64(result, (RES1) | (EXT)); \ checkp(ptr, &buf[(1 * (INC) * (SIZE)) % (LEN)]); \ BxW_LOAD_pcr_##SIGN(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES2) | (EXT)); \ + check64(result, (RES2) | (EXT)); \ checkp(ptr, &buf[(2 * (INC) * (SIZE)) % (LEN)]); \ BxW_LOAD_pcr_##SIGN(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES3) | (EXT)); \ + check64(result, (RES3) | (EXT)); \ checkp(ptr, &buf[(3 * (INC) * (SIZE)) % (LEN)]); \ BxW_LOAD_pcr_##SIGN(result, ptr, buf, (LEN), (INC)); \ - check(result, (RES4) | (EXT)); \ + check64(result, (RES4) | (EXT)); \ checkp(ptr, &buf[(4 * (INC) * (SIZE)) % (LEN)]); \ } -TEST_pcr(loadbzw2_pcr, int, Z, 2, 8, 2, 0x00000000, +TEST_pcr(loadbzw2_pcr, int32_t, Z, 2, 8, 2, 0x00000000, 0x00020081, 0x00060085, 0x00020081, 0x00060085) -TEST_pcr(loadbsw2_pcr, int, S, 2, 8, 2, 0x0000ff00, +TEST_pcr(loadbsw2_pcr, int32_t, S, 2, 8, 2, 0x0000ff00, 0x00020081, 0x00060085, 0x00020081, 0x00060085) -TEST_pcr(loadbzw4_pcr, long long, Z, 4, 8, 1, 0x0000000000000000LL, +TEST_pcr(loadbzw4_pcr, int64_t, Z, 4, 8, 1, 0x0000000000000000LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x0004008300020081LL, 0x0008008700060085LL) -TEST_pcr(loadbsw4_pcr, long long, S, 4, 8, 1, 0x0000ff000000ff00LL, +TEST_pcr(loadbsw4_pcr, int64_t, S, 4, 8, 1, 0x0000ff000000ff00LL, 0x0004008300020081LL, 0x0008008700060085LL, 0x0004008300020081LL, 0x0008008700060085LL) diff --git a/tests/tcg/hexagon/mem_noshuf.c b/tests/tcg/hexagon/mem_noshuf.c index 210b2f1208..6263d5ef8e 100644 --- a/tests/tcg/hexagon/mem_noshuf.c +++ b/tests/tcg/hexagon/mem_noshuf.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,6 +16,12 @@ */ #include +#include +#include + +int err; + +#include "hex_test.h" /* * Make sure that the :mem_noshuf packet attribute is honored. @@ -25,9 +31,9 @@ */ #define MEM_NOSHUF32(NAME, ST_TYPE, LD_TYPE, ST_OP, LD_OP) \ -static inline unsigned int NAME(ST_TYPE * p, LD_TYPE * q, ST_TYPE x) \ +static inline uint32_t NAME(ST_TYPE * p, LD_TYPE * q, ST_TYPE x) \ { \ - unsigned int ret; \ + uint32_t ret; \ asm volatile("{\n\t" \ " " #ST_OP "(%1) = %3\n\t" \ " %0 = " #LD_OP "(%2)\n\t" \ @@ -39,9 +45,9 @@ static inline unsigned int NAME(ST_TYPE * p, LD_TYPE * q, ST_TYPE x) \ } #define MEM_NOSHUF64(NAME, ST_TYPE, LD_TYPE, ST_OP, LD_OP) \ -static inline unsigned long long NAME(ST_TYPE * p, LD_TYPE * q, ST_TYPE x) \ +static inline uint64_t NAME(ST_TYPE * p, LD_TYPE * q, ST_TYPE x) \ { \ - unsigned long long ret; \ + uint64_t ret; \ asm volatile("{\n\t" \ " " #ST_OP "(%1) = %3\n\t" \ " %0 = " #LD_OP "(%2)\n\t" \ @@ -53,38 +59,39 @@ static inline unsigned long long NAME(ST_TYPE * p, LD_TYPE * q, ST_TYPE x) \ } /* Store byte combinations */ -MEM_NOSHUF32(mem_noshuf_sb_lb, signed char, signed char, memb, memb) -MEM_NOSHUF32(mem_noshuf_sb_lub, signed char, unsigned char, memb, memub) -MEM_NOSHUF32(mem_noshuf_sb_lh, signed char, signed short, memb, memh) -MEM_NOSHUF32(mem_noshuf_sb_luh, signed char, unsigned short, memb, memuh) -MEM_NOSHUF32(mem_noshuf_sb_lw, signed char, signed int, memb, memw) -MEM_NOSHUF64(mem_noshuf_sb_ld, signed char, signed long long, memb, memd) +MEM_NOSHUF32(mem_noshuf_sb_lb, int8_t, int8_t, memb, memb) +MEM_NOSHUF32(mem_noshuf_sb_lub, int8_t, uint8_t, memb, memub) +MEM_NOSHUF32(mem_noshuf_sb_lh, int8_t, int16_t, memb, memh) +MEM_NOSHUF32(mem_noshuf_sb_luh, int8_t, uint16_t, memb, memuh) +MEM_NOSHUF32(mem_noshuf_sb_lw, int8_t, int32_t, memb, memw) +MEM_NOSHUF64(mem_noshuf_sb_ld, int8_t, int64_t, memb, memd) /* Store half combinations */ -MEM_NOSHUF32(mem_noshuf_sh_lb, signed short, signed char, memh, memb) -MEM_NOSHUF32(mem_noshuf_sh_lub, signed short, unsigned char, memh, memub) -MEM_NOSHUF32(mem_noshuf_sh_lh, signed short, signed short, memh, memh) -MEM_NOSHUF32(mem_noshuf_sh_luh, signed short, unsigned short, memh, memuh) -MEM_NOSHUF32(mem_noshuf_sh_lw, signed short, signed int, memh, memw) -MEM_NOSHUF64(mem_noshuf_sh_ld, signed short, signed long long, memh, memd) +MEM_NOSHUF32(mem_noshuf_sh_lb, int16_t, int8_t, memh, memb) +MEM_NOSHUF32(mem_noshuf_sh_lub, int16_t, uint8_t, memh, memub) +MEM_NOSHUF32(mem_noshuf_sh_lh, int16_t, int16_t, memh, memh) +MEM_NOSHUF32(mem_noshuf_sh_luh, int16_t, uint16_t, memh, memuh) +MEM_NOSHUF32(mem_noshuf_sh_lw, int16_t, int32_t, memh, memw) +MEM_NOSHUF64(mem_noshuf_sh_ld, int16_t, int64_t, memh, memd) /* Store word combinations */ -MEM_NOSHUF32(mem_noshuf_sw_lb, signed int, signed char, memw, memb) -MEM_NOSHUF32(mem_noshuf_sw_lub, signed int, unsigned char, memw, memub) -MEM_NOSHUF32(mem_noshuf_sw_lh, signed int, signed short, memw, memh) -MEM_NOSHUF32(mem_noshuf_sw_luh, signed int, unsigned short, memw, memuh) -MEM_NOSHUF32(mem_noshuf_sw_lw, signed int, signed int, memw, memw) -MEM_NOSHUF64(mem_noshuf_sw_ld, signed int, signed long long, memw, memd) +MEM_NOSHUF32(mem_noshuf_sw_lb, int32_t, int8_t, memw, memb) +MEM_NOSHUF32(mem_noshuf_sw_lub, int32_t, uint8_t, memw, memub) +MEM_NOSHUF32(mem_noshuf_sw_lh, int32_t, int16_t, memw, memh) +MEM_NOSHUF32(mem_noshuf_sw_luh, int32_t, uint16_t, memw, memuh) +MEM_NOSHUF32(mem_noshuf_sw_lw, int32_t, int32_t, memw, memw) +MEM_NOSHUF64(mem_noshuf_sw_ld, int32_t, int64_t, memw, memd) /* Store double combinations */ -MEM_NOSHUF32(mem_noshuf_sd_lb, long long, signed char, memd, memb) -MEM_NOSHUF32(mem_noshuf_sd_lub, long long, unsigned char, memd, memub) -MEM_NOSHUF32(mem_noshuf_sd_lh, long long, signed short, memd, memh) -MEM_NOSHUF32(mem_noshuf_sd_luh, long long, unsigned short, memd, memuh) -MEM_NOSHUF32(mem_noshuf_sd_lw, long long, signed int, memd, memw) -MEM_NOSHUF64(mem_noshuf_sd_ld, long long, signed long long, memd, memd) +MEM_NOSHUF32(mem_noshuf_sd_lb, int64_t, int8_t, memd, memb) +MEM_NOSHUF32(mem_noshuf_sd_lub, int64_t, uint8_t, memd, memub) +MEM_NOSHUF32(mem_noshuf_sd_lh, int64_t, int16_t, memd, memh) +MEM_NOSHUF32(mem_noshuf_sd_luh, int64_t, uint16_t, memd, memuh) +MEM_NOSHUF32(mem_noshuf_sd_lw, int64_t, int32_t, memd, memw) +MEM_NOSHUF64(mem_noshuf_sd_ld, int64_t, int64_t, memd, memd) -static inline int pred_lw_sw(int pred, int *p, int *q, int x, int y) +static inline int pred_lw_sw(bool pred, int32_t *p, int32_t *q, + int32_t x, int32_t y) { int ret; asm volatile("p0 = cmp.eq(%5, #0)\n\t" @@ -99,7 +106,8 @@ static inline int pred_lw_sw(int pred, int *p, int *q, int x, int y) return ret; } -static inline int pred_lw_sw_pi(int pred, int *p, int *q, int x, int y) +static inline int pred_lw_sw_pi(bool pred, int32_t *p, int32_t *q, + int32_t x, int32_t y) { int ret; asm volatile("p0 = cmp.eq(%5, #0)\n\t" @@ -115,10 +123,10 @@ static inline int pred_lw_sw_pi(int pred, int *p, int *q, int x, int y) return ret; } -static inline long long pred_ld_sd(int pred, long long *p, long long *q, - long long x, long long y) +static inline int64_t pred_ld_sd(bool pred, int64_t *p, int64_t *q, + int64_t x, int64_t y) { - unsigned long long ret; + int64_t ret; asm volatile("p0 = cmp.eq(%5, #0)\n\t" "%0 = %3\n\t" "{\n\t" @@ -131,10 +139,10 @@ static inline long long pred_ld_sd(int pred, long long *p, long long *q, return ret; } -static inline long long pred_ld_sd_pi(int pred, long long *p, long long *q, - long long x, long long y) +static inline int64_t pred_ld_sd_pi(bool pred, int64_t *p, int64_t *q, + int64_t x, int64_t y) { - long long ret; + int64_t ret; asm volatile("p0 = cmp.eq(%5, #0)\n\t" "%0 = %3\n\t" "r7 = %2\n\t" @@ -148,9 +156,9 @@ static inline long long pred_ld_sd_pi(int pred, long long *p, long long *q, return ret; } -static inline unsigned int cancel_sw_lb(int pred, int *p, signed char *q, int x) +static inline int32_t cancel_sw_lb(bool pred, int32_t *p, int8_t *q, int32_t x) { - unsigned int ret; + int32_t ret; asm volatile("p0 = cmp.eq(%4, #0)\n\t" "{\n\t" " if (!p0) memw(%1) = %3\n\t" @@ -162,10 +170,9 @@ static inline unsigned int cancel_sw_lb(int pred, int *p, signed char *q, int x) return ret; } -static inline -unsigned long long cancel_sw_ld(int pred, int *p, long long *q, int x) +static inline int64_t cancel_sw_ld(bool pred, int32_t *p, int64_t *q, int32_t x) { - long long ret; + int64_t ret; asm volatile("p0 = cmp.eq(%4, #0)\n\t" "{\n\t" " if (!p0) memw(%1) = %3\n\t" @@ -178,43 +185,21 @@ unsigned long long cancel_sw_ld(int pred, int *p, long long *q, int x) } typedef union { - signed long long d[2]; - unsigned long long ud[2]; - signed int w[4]; - unsigned int uw[4]; - signed short h[8]; - unsigned short uh[8]; - signed char b[16]; - unsigned char ub[16]; + int64_t d[2]; + uint64_t ud[2]; + int32_t w[4]; + uint32_t uw[4]; + int16_t h[8]; + uint16_t uh[8]; + int8_t b[16]; + uint8_t ub[16]; } Memory; -int err; - -#define check32(n, expect) check32_(n, expect, __LINE__) - -static void check32_(int n, int expect, int line) -{ - if (n != expect) { - printf("ERROR: 0x%08x != 0x%08x, line %d\n", n, expect, line); - err++; - } -} - -#define check64(n, expect) check64_(n, expect, __LINE__) - -static void check64_(long long n, long long expect, int line) -{ - if (n != expect) { - printf("ERROR: 0x%08llx != 0x%08llx, line %d\n", n, expect, line); - err++; - } -} - int main() { Memory n; - unsigned int res32; - unsigned long long res64; + uint32_t res32; + uint64_t res64; /* * Store byte combinations @@ -328,30 +313,30 @@ int main() * Predicated word stores */ n.w[0] = ~0; - res32 = cancel_sw_lb(0, &n.w[0], &n.b[0], 0x12345678); + res32 = cancel_sw_lb(false, &n.w[0], &n.b[0], 0x12345678); check32(res32, 0xffffffff); n.w[0] = ~0; - res32 = cancel_sw_lb(1, &n.w[0], &n.b[0], 0x12345687); + res32 = cancel_sw_lb(true, &n.w[0], &n.b[0], 0x12345687); check32(res32, 0xffffff87); /* * Predicated double stores */ n.d[0] = ~0LL; - res64 = cancel_sw_ld(0, &n.w[0], &n.d[0], 0x12345678); + res64 = cancel_sw_ld(false, &n.w[0], &n.d[0], 0x12345678); check64(res64, 0xffffffffffffffffLL); n.d[0] = ~0LL; - res64 = cancel_sw_ld(1, &n.w[0], &n.d[0], 0x12345678); + res64 = cancel_sw_ld(true, &n.w[0], &n.d[0], 0x12345678); check64(res64, 0xffffffff12345678LL); n.d[0] = ~0LL; - res64 = cancel_sw_ld(0, &n.w[1], &n.d[0], 0x12345678); + res64 = cancel_sw_ld(false, &n.w[1], &n.d[0], 0x12345678); check64(res64, 0xffffffffffffffffLL); n.d[0] = ~0LL; - res64 = cancel_sw_ld(1, &n.w[1], &n.d[0], 0x12345678); + res64 = cancel_sw_ld(true, &n.w[1], &n.d[0], 0x12345678); check64(res64, 0x12345678ffffffffLL); /* @@ -392,45 +377,45 @@ int main() check64(res64, 0xffffffffffffffffLL); n.w[0] = ~0; - res32 = pred_lw_sw(0, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + res32 = pred_lw_sw(false, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); check32(res32, 0x12345678); check32(n.w[0], 0xc0ffeeda); n.w[0] = ~0; - res32 = pred_lw_sw(1, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + res32 = pred_lw_sw(true, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); check32(res32, 0xc0ffeeda); check32(n.w[0], 0xc0ffeeda); n.w[0] = ~0; - res32 = pred_lw_sw_pi(0, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + res32 = pred_lw_sw_pi(false, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); check32(res32, 0x12345678); check32(n.w[0], 0xc0ffeeda); n.w[0] = ~0; - res32 = pred_lw_sw_pi(1, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + res32 = pred_lw_sw_pi(true, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); check32(res32, 0xc0ffeeda); check32(n.w[0], 0xc0ffeeda); n.d[0] = ~0LL; - res64 = pred_ld_sd(0, &n.d[0], &n.d[0], + res64 = pred_ld_sd(false, &n.d[0], &n.d[0], 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); check64(res64, 0x1234567812345678LL); check64(n.d[0], 0xc0ffeedac0ffeedaLL); n.d[0] = ~0LL; - res64 = pred_ld_sd(1, &n.d[0], &n.d[0], + res64 = pred_ld_sd(true, &n.d[0], &n.d[0], 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); check64(res64, 0xc0ffeedac0ffeedaLL); check64(n.d[0], 0xc0ffeedac0ffeedaLL); n.d[0] = ~0LL; - res64 = pred_ld_sd_pi(0, &n.d[0], &n.d[0], + res64 = pred_ld_sd_pi(false, &n.d[0], &n.d[0], 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); check64(res64, 0x1234567812345678LL); check64(n.d[0], 0xc0ffeedac0ffeedaLL); n.d[0] = ~0LL; - res64 = pred_ld_sd_pi(1, &n.d[0], &n.d[0], + res64 = pred_ld_sd_pi(true, &n.d[0], &n.d[0], 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); check64(res64, 0xc0ffeedac0ffeedaLL); check64(n.d[0], 0xc0ffeedac0ffeedaLL); diff --git a/tests/tcg/hexagon/mem_noshuf_exception.c b/tests/tcg/hexagon/mem_noshuf_exception.c index 08660ea3e1..61108a99be 100644 --- a/tests/tcg/hexagon/mem_noshuf_exception.c +++ b/tests/tcg/hexagon/mem_noshuf_exception.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -34,6 +34,8 @@ #include #include +#include +#include #include #include #include @@ -41,49 +43,31 @@ #include int err; -int segv_caught; + +#include "hex_test.h" + +bool segv_caught; #define SHOULD_NOT_CHANGE_VAL 5 -int should_not_change = SHOULD_NOT_CHANGE_VAL; +int32_t should_not_change = SHOULD_NOT_CHANGE_VAL; #define OK_TO_CHANGE_VAL 13 -int ok_to_change = OK_TO_CHANGE_VAL; - -static void __check(const char *filename, int line, int x, int expect) -{ - if (x != expect) { - printf("ERROR %s:%d - %d != %d\n", - filename, line, x, expect); - err++; - } -} - -#define check(x, expect) __check(__FILE__, __LINE__, (x), (expect)) - -static void __chk_error(const char *filename, int line, int ret) -{ - if (ret < 0) { - printf("ERROR %s:%d - %d\n", filename, line, ret); - err++; - } -} - -#define chk_error(ret) __chk_error(__FILE__, __LINE__, (ret)) +int32_t ok_to_change = OK_TO_CHANGE_VAL; jmp_buf jmp_env; static void sig_segv(int sig, siginfo_t *info, void *puc) { - check(sig, SIGSEGV); - segv_caught = 1; + check32(sig, SIGSEGV); + segv_caught = true; longjmp(jmp_env, 1); } int main() { struct sigaction act; - int dummy32; - long long dummy64; + int32_t dummy32; + int64_t dummy64; void *p; /* SIGSEGV test */ @@ -106,8 +90,8 @@ int main() act.sa_flags = 0; chk_error(sigaction(SIGSEGV, &act, NULL)); - check(segv_caught, 1); - check(should_not_change, SHOULD_NOT_CHANGE_VAL); + check32(segv_caught, true); + check32(should_not_change, SHOULD_NOT_CHANGE_VAL); /* * Check that a predicated load where the predicate is false doesn't @@ -122,7 +106,7 @@ int main() "}:mem_noshuf\n\t" : "=r"(dummy32) : : "r18", "r19", "p0", "memory"); - check(ok_to_change, 7); + check32(ok_to_change, 7); /* * Also check that the post-increment doesn't happen when the @@ -138,8 +122,8 @@ int main() "}:mem_noshuf\n\t" : "+r"(p), "=r"(dummy64) : : "r18", "p0", "memory"); - check(ok_to_change, 9); - check((int)p, (int)NULL); + check32(ok_to_change, 9); + check32((int)p, (int)NULL); puts(err ? "FAIL" : "PASS"); return err ? EXIT_FAILURE : EXIT_SUCCESS; diff --git a/tests/tcg/hexagon/misc.c b/tests/tcg/hexagon/misc.c index e73ab57334..ca22bb79f7 100644 --- a/tests/tcg/hexagon/misc.c +++ b/tests/tcg/hexagon/misc.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,12 +16,15 @@ */ #include +#include +#include #include -typedef unsigned char uint8_t; -typedef unsigned short uint16_t; -typedef unsigned int uint32_t; +int err; +#include "hex_test.h" + +#define CORE_HAS_CABAC (__HEXAGON_ARCH__ <= 71) static inline void S4_storerhnew_rr(void *p, int index, uint16_t v) { @@ -73,7 +76,7 @@ static inline void *S4_storerinew_ap(uint32_t v) return ret; } -static inline void S4_storeirbt_io(void *p, int pred) +static inline void S4_storeirbt_io(void *p, bool pred) { asm volatile("p0 = cmp.eq(%0, #1)\n\t" "if (p0) memb(%1+#4)=#27\n\t" @@ -81,7 +84,7 @@ static inline void S4_storeirbt_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeirbf_io(void *p, int pred) +static inline void S4_storeirbf_io(void *p, bool pred) { asm volatile("p0 = cmp.eq(%0, #1)\n\t" "if (!p0) memb(%1+#4)=#27\n\t" @@ -89,7 +92,7 @@ static inline void S4_storeirbf_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeirbtnew_io(void *p, int pred) +static inline void S4_storeirbtnew_io(void *p, bool pred) { asm volatile("{\n\t" " p0 = cmp.eq(%0, #1)\n\t" @@ -99,7 +102,7 @@ static inline void S4_storeirbtnew_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeirbfnew_io(void *p, int pred) +static inline void S4_storeirbfnew_io(void *p, bool pred) { asm volatile("{\n\t" " p0 = cmp.eq(%0, #1)\n\t" @@ -109,7 +112,7 @@ static inline void S4_storeirbfnew_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeirht_io(void *p, int pred) +static inline void S4_storeirht_io(void *p, bool pred) { asm volatile("p0 = cmp.eq(%0, #1)\n\t" "if (p0) memh(%1+#4)=#27\n\t" @@ -117,7 +120,7 @@ static inline void S4_storeirht_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeirhf_io(void *p, int pred) +static inline void S4_storeirhf_io(void *p, bool pred) { asm volatile("p0 = cmp.eq(%0, #1)\n\t" "if (!p0) memh(%1+#4)=#27\n\t" @@ -125,7 +128,7 @@ static inline void S4_storeirhf_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeirhtnew_io(void *p, int pred) +static inline void S4_storeirhtnew_io(void *p, bool pred) { asm volatile("{\n\t" " p0 = cmp.eq(%0, #1)\n\t" @@ -135,7 +138,7 @@ static inline void S4_storeirhtnew_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeirhfnew_io(void *p, int pred) +static inline void S4_storeirhfnew_io(void *p, bool pred) { asm volatile("{\n\t" " p0 = cmp.eq(%0, #1)\n\t" @@ -145,7 +148,7 @@ static inline void S4_storeirhfnew_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeirit_io(void *p, int pred) +static inline void S4_storeirit_io(void *p, bool pred) { asm volatile("p0 = cmp.eq(%0, #1)\n\t" "if (p0) memw(%1+#4)=#27\n\t" @@ -153,7 +156,7 @@ static inline void S4_storeirit_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeirif_io(void *p, int pred) +static inline void S4_storeirif_io(void *p, bool pred) { asm volatile("p0 = cmp.eq(%0, #1)\n\t" "if (!p0) memw(%1+#4)=#27\n\t" @@ -161,7 +164,7 @@ static inline void S4_storeirif_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeiritnew_io(void *p, int pred) +static inline void S4_storeiritnew_io(void *p, bool pred) { asm volatile("{\n\t" " p0 = cmp.eq(%0, #1)\n\t" @@ -171,7 +174,7 @@ static inline void S4_storeiritnew_io(void *p, int pred) : "p0", "memory"); } -static inline void S4_storeirifnew_io(void *p, int pred) +static inline void S4_storeirifnew_io(void *p, bool pred) { asm volatile("{\n\t" " p0 = cmp.eq(%0, #1)\n\t" @@ -181,9 +184,9 @@ static inline void S4_storeirifnew_io(void *p, int pred) : "p0", "memory"); } -static int L2_ploadrifnew_pi(void *p, int pred) +static int32_t L2_ploadrifnew_pi(void *p, bool pred) { - int result; + int32_t result; asm volatile("%0 = #31\n\t" "{\n\t" " p0 = cmp.eq(%2, #1)\n\t" @@ -200,9 +203,9 @@ static int L2_ploadrifnew_pi(void *p, int pred) * account for auto-anding. Then, we can do the predicated * jump. */ -static inline int cmpnd_cmp_jump(void) +static inline int32_t cmpnd_cmp_jump(void) { - int retval; + int32_t retval; asm ("r5 = #7\n\t" "r6 = #9\n\t" "{\n\t" @@ -219,9 +222,9 @@ static inline int cmpnd_cmp_jump(void) return retval; } -static inline int test_clrtnew(int arg1, int old_val) +static inline int32_t test_clrtnew(int32_t arg1, int32_t old_val) { - int ret; + int32_t ret; asm volatile("r5 = %2\n\t" "{\n\t" "p0 = cmp.eq(%1, #1)\n\t" @@ -234,34 +237,16 @@ static inline int test_clrtnew(int arg1, int old_val) return ret; } -int err; - -static void check(int val, int expect) -{ - if (val != expect) { - printf("ERROR: 0x%04x != 0x%04x\n", val, expect); - err++; - } -} - -static void check64(long long val, long long expect) -{ - if (val != expect) { - printf("ERROR: 0x%016llx != 0x%016llx\n", val, expect); - err++; - } -} - uint32_t init[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; uint32_t array[10]; -uint32_t early_exit; +bool early_exit; /* * Write this as a function because we can't guarantee the compiler will * allocate a frame with just the SL2_return_tnew packet. */ -static void SL2_return_tnew(int x); +static void SL2_return_tnew(bool pred); asm ("SL2_return_tnew:\n\t" " allocframe(#0)\n\t" " r1 = #1\n\t" @@ -275,9 +260,9 @@ asm ("SL2_return_tnew:\n\t" " dealloc_return\n\t" ); -static long long creg_pair(int x, int y) +static int64_t creg_pair(int32_t x, int32_t y) { - long long retval; + int64_t retval; asm ("m0 = %1\n\t" "m1 = %2\n\t" "%0 = c7:6\n\t" @@ -285,20 +270,22 @@ static long long creg_pair(int x, int y) return retval; } -static long long decbin(long long x, long long y, int *pred) +#if CORE_HAS_CABAC +static int64_t decbin(int64_t x, int64_t y, bool *pred) { - long long retval; + int64_t retval; asm ("%0 = decbin(%2, %3)\n\t" "%1 = p0\n\t" : "=r"(retval), "=r"(*pred) : "r"(x), "r"(y)); return retval; } +#endif /* Check that predicates are auto-and'ed in a packet */ -static int auto_and(void) +static bool auto_and(void) { - int retval; + bool retval; asm ("r5 = #1\n\t" "{\n\t" " p0 = cmp.eq(r1, #1)\n\t" @@ -313,7 +300,7 @@ static int auto_and(void) void test_lsbnew(void) { - int result; + int32_t result; asm("r0 = #2\n\t" "r1 = #5\n\t" @@ -323,7 +310,7 @@ void test_lsbnew(void) "}\n\t" "%0 = r1\n\t" : "=r"(result) :: "r0", "r1", "p0"); - check(result, 5); + check32(result, 5); } void test_l2fetch(void) @@ -333,141 +320,233 @@ void test_l2fetch(void) "l2fetch(r0, r3:2)\n\t"); } +static inline int32_t ct0(uint32_t x) +{ + int32_t res; + asm("%0 = ct0(%1)\n\t" : "=r"(res) : "r"(x)); + return res; +} + +static inline int32_t ct1(uint32_t x) +{ + int32_t res; + asm("%0 = ct1(%1)\n\t" : "=r"(res) : "r"(x)); + return res; +} + +static inline int32_t ct0p(uint64_t x) +{ + int32_t res; + asm("%0 = ct0(%1)\n\t" : "=r"(res) : "r"(x)); + return res; +} + +static inline int32_t ct1p(uint64_t x) +{ + int32_t res; + asm("%0 = ct1(%1)\n\t" : "=r"(res) : "r"(x)); + return res; +} + +void test_count_trailing_zeros_ones(void) +{ + check32(ct0(0x0000000f), 0); + check32(ct0(0x00000000), 32); + check32(ct0(0x000000f0), 4); + + check32(ct1(0x000000f0), 0); + check32(ct1(0x0000000f), 4); + check32(ct1(0x00000000), 0); + check32(ct1(0xffffffff), 32); + + check32(ct0p(0x000000000000000fULL), 0); + check32(ct0p(0x0000000000000000ULL), 64); + check32(ct0p(0x00000000000000f0ULL), 4); + + check32(ct1p(0x00000000000000f0ULL), 0); + check32(ct1p(0x000000000000000fULL), 4); + check32(ct1p(0x0000000000000000ULL), 0); + check32(ct1p(0xffffffffffffffffULL), 64); + check32(ct1p(0xffffffffff0fffffULL), 20); + check32(ct1p(0xffffff0fffffffffULL), 36); +} + +static inline int32_t dpmpyss_rnd_s0(int32_t x, int32_t y) +{ + int32_t res; + asm("%0 = mpy(%1, %2):rnd\n\t" : "=r"(res) : "r"(x), "r"(y)); + return res; +} + +void test_dpmpyss_rnd_s0(void) +{ + check32(dpmpyss_rnd_s0(-1, 0x80000000), 1); + check32(dpmpyss_rnd_s0(0, 0x80000000), 0); + check32(dpmpyss_rnd_s0(1, 0x80000000), 0); + check32(dpmpyss_rnd_s0(0x7fffffff, 0x80000000), 0xc0000001); + check32(dpmpyss_rnd_s0(0x80000000, -1), 1); + check32(dpmpyss_rnd_s0(-1, -1), 0); + check32(dpmpyss_rnd_s0(0, -1), 0); + check32(dpmpyss_rnd_s0(1, -1), 0); + check32(dpmpyss_rnd_s0(0x7fffffff, -1), 0); + check32(dpmpyss_rnd_s0(0x80000000, 0), 0); + check32(dpmpyss_rnd_s0(-1, 0), 0); + check32(dpmpyss_rnd_s0(0, 0), 0); + check32(dpmpyss_rnd_s0(1, 0), 0); + check32(dpmpyss_rnd_s0(-1, -1), 0); + check32(dpmpyss_rnd_s0(0, -1), 0); + check32(dpmpyss_rnd_s0(1, -1), 0); + check32(dpmpyss_rnd_s0(0x7fffffff, 1), 0); + check32(dpmpyss_rnd_s0(0x80000000, 0x7fffffff), 0xc0000001); + check32(dpmpyss_rnd_s0(-1, 0x7fffffff), 0); + check32(dpmpyss_rnd_s0(0, 0x7fffffff), 0); + check32(dpmpyss_rnd_s0(1, 0x7fffffff), 0); + check32(dpmpyss_rnd_s0(0x7fffffff, 0x7fffffff), 0x3fffffff); +} + int main() { - int res; - long long res64; - int pred; + int32_t res; + int64_t res64; + bool pred; memcpy(array, init, sizeof(array)); S4_storerhnew_rr(array, 4, 0xffff); - check(array[4], 0xffff); + check32(array[4], 0xffff); data = ~0; - check((uint32_t)S4_storerbnew_ap(0x12), (uint32_t)&data); - check(data, 0xffffff12); + checkp(S4_storerbnew_ap(0x12), &data); + check32(data, 0xffffff12); data = ~0; - check((uint32_t)S4_storerhnew_ap(0x1234), (uint32_t)&data); - check(data, 0xffff1234); + checkp(S4_storerhnew_ap(0x1234), &data); + check32(data, 0xffff1234); data = ~0; - check((uint32_t)S4_storerinew_ap(0x12345678), (uint32_t)&data); - check(data, 0x12345678); + checkp(S4_storerinew_ap(0x12345678), &data); + check32(data, 0x12345678); /* Byte */ memcpy(array, init, sizeof(array)); - S4_storeirbt_io(&array[1], 1); - check(array[2], 27); - S4_storeirbt_io(&array[2], 0); - check(array[3], 3); + S4_storeirbt_io(&array[1], true); + check32(array[2], 27); + S4_storeirbt_io(&array[2], false); + check32(array[3], 3); memcpy(array, init, sizeof(array)); - S4_storeirbf_io(&array[3], 0); - check(array[4], 27); - S4_storeirbf_io(&array[4], 1); - check(array[5], 5); + S4_storeirbf_io(&array[3], false); + check32(array[4], 27); + S4_storeirbf_io(&array[4], true); + check32(array[5], 5); memcpy(array, init, sizeof(array)); - S4_storeirbtnew_io(&array[5], 1); - check(array[6], 27); - S4_storeirbtnew_io(&array[6], 0); - check(array[7], 7); + S4_storeirbtnew_io(&array[5], true); + check32(array[6], 27); + S4_storeirbtnew_io(&array[6], false); + check32(array[7], 7); memcpy(array, init, sizeof(array)); - S4_storeirbfnew_io(&array[7], 0); - check(array[8], 27); - S4_storeirbfnew_io(&array[8], 1); - check(array[9], 9); + S4_storeirbfnew_io(&array[7], false); + check32(array[8], 27); + S4_storeirbfnew_io(&array[8], true); + check32(array[9], 9); /* Half word */ memcpy(array, init, sizeof(array)); - S4_storeirht_io(&array[1], 1); - check(array[2], 27); - S4_storeirht_io(&array[2], 0); - check(array[3], 3); + S4_storeirht_io(&array[1], true); + check32(array[2], 27); + S4_storeirht_io(&array[2], false); + check32(array[3], 3); memcpy(array, init, sizeof(array)); - S4_storeirhf_io(&array[3], 0); - check(array[4], 27); - S4_storeirhf_io(&array[4], 1); - check(array[5], 5); + S4_storeirhf_io(&array[3], false); + check32(array[4], 27); + S4_storeirhf_io(&array[4], true); + check32(array[5], 5); memcpy(array, init, sizeof(array)); - S4_storeirhtnew_io(&array[5], 1); - check(array[6], 27); - S4_storeirhtnew_io(&array[6], 0); - check(array[7], 7); + S4_storeirhtnew_io(&array[5], true); + check32(array[6], 27); + S4_storeirhtnew_io(&array[6], false); + check32(array[7], 7); memcpy(array, init, sizeof(array)); - S4_storeirhfnew_io(&array[7], 0); - check(array[8], 27); - S4_storeirhfnew_io(&array[8], 1); - check(array[9], 9); + S4_storeirhfnew_io(&array[7], false); + check32(array[8], 27); + S4_storeirhfnew_io(&array[8], true); + check32(array[9], 9); /* Word */ memcpy(array, init, sizeof(array)); - S4_storeirit_io(&array[1], 1); - check(array[2], 27); - S4_storeirit_io(&array[2], 0); - check(array[3], 3); + S4_storeirit_io(&array[1], true); + check32(array[2], 27); + S4_storeirit_io(&array[2], false); + check32(array[3], 3); memcpy(array, init, sizeof(array)); - S4_storeirif_io(&array[3], 0); - check(array[4], 27); - S4_storeirif_io(&array[4], 1); - check(array[5], 5); + S4_storeirif_io(&array[3], false); + check32(array[4], 27); + S4_storeirif_io(&array[4], true); + check32(array[5], 5); memcpy(array, init, sizeof(array)); - S4_storeiritnew_io(&array[5], 1); - check(array[6], 27); - S4_storeiritnew_io(&array[6], 0); - check(array[7], 7); + S4_storeiritnew_io(&array[5], true); + check32(array[6], 27); + S4_storeiritnew_io(&array[6], false); + check32(array[7], 7); memcpy(array, init, sizeof(array)); - S4_storeirifnew_io(&array[7], 0); - check(array[8], 27); - S4_storeirifnew_io(&array[8], 1); - check(array[9], 9); + S4_storeirifnew_io(&array[7], false); + check32(array[8], 27); + S4_storeirifnew_io(&array[8], true); + check32(array[9], 9); memcpy(array, init, sizeof(array)); - res = L2_ploadrifnew_pi(&array[6], 0); - check(res, 6); - res = L2_ploadrifnew_pi(&array[7], 1); - check(res, 31); + res = L2_ploadrifnew_pi(&array[6], false); + check32(res, 6); + res = L2_ploadrifnew_pi(&array[7], true); + check32(res, 31); - int x = cmpnd_cmp_jump(); - check(x, 12); + res = cmpnd_cmp_jump(); + check32(res, 12); - SL2_return_tnew(0); - check(early_exit, 0); - SL2_return_tnew(1); - check(early_exit, 1); + SL2_return_tnew(false); + check32(early_exit, false); + SL2_return_tnew(true); + check32(early_exit, true); - long long pair = creg_pair(5, 7); - check((int)pair, 5); - check((int)(pair >> 32), 7); + res64 = creg_pair(5, 7); + check32((int32_t)res64, 5); + check32((int32_t)(res64 >> 32), 7); res = test_clrtnew(1, 7); - check(res, 0); + check32(res, 0); res = test_clrtnew(2, 7); - check(res, 7); + check32(res, 7); +#if CORE_HAS_CABAC res64 = decbin(0xf0f1f2f3f4f5f6f7LL, 0x7f6f5f4f3f2f1f0fLL, &pred); check64(res64, 0x357980003700010cLL); - check(pred, 0); + check32(pred, false); res64 = decbin(0xfLL, 0x1bLL, &pred); check64(res64, 0x78000100LL); - check(pred, 1); + check32(pred, true); +#else + puts("Skipping cabac tests"); +#endif - res = auto_and(); - check(res, 0); + pred = auto_and(); + check32(pred, false); test_lsbnew(); test_l2fetch(); + test_count_trailing_zeros_ones(); + + test_dpmpyss_rnd_s0(); + puts(err ? "FAIL" : "PASS"); return err; } diff --git a/tests/tcg/hexagon/multi_result.c b/tests/tcg/hexagon/multi_result.c index 52997b3128..38ee369e76 100644 --- a/tests/tcg/hexagon/multi_result.c +++ b/tests/tcg/hexagon/multi_result.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,11 +16,17 @@ */ #include +#include +#include -static int sfrecipa(int Rs, int Rt, int *pred_result) +int err; + +#include "hex_test.h" + +static int32_t sfrecipa(int32_t Rs, int32_t Rt, bool *pred_result) { - int result; - int predval; + int32_t result; + bool predval; asm volatile("%0,p0 = sfrecipa(%2, %3)\n\t" "%1 = p0\n\t" @@ -31,10 +37,10 @@ static int sfrecipa(int Rs, int Rt, int *pred_result) return result; } -static int sfinvsqrta(int Rs, int *pred_result) +static int32_t sfinvsqrta(int32_t Rs, int32_t *pred_result) { - int result; - int predval; + int32_t result; + int32_t predval; asm volatile("%0,p0 = sfinvsqrta(%2)\n\t" "%1 = p0\n\t" @@ -45,12 +51,12 @@ static int sfinvsqrta(int Rs, int *pred_result) return result; } -static long long vacsh(long long Rxx, long long Rss, long long Rtt, - int *pred_result, int *ovf_result) +static int64_t vacsh(int64_t Rxx, int64_t Rss, int64_t Rtt, + int *pred_result, bool *ovf_result) { - long long result = Rxx; + int64_t result = Rxx; int predval; - int usr; + uint32_t usr; /* * This instruction can set bit 0 (OVF/overflow) in usr @@ -70,11 +76,10 @@ static long long vacsh(long long Rxx, long long Rss, long long Rtt, return result; } -static long long vminub(long long Rtt, long long Rss, - int *pred_result) +static int64_t vminub(int64_t Rtt, int64_t Rss, int32_t *pred_result) { - long long result; - int predval; + int64_t result; + int32_t predval; asm volatile("%0,p0 = vminub(%2, %3)\n\t" "%1 = p0\n\t" @@ -85,11 +90,11 @@ static long long vminub(long long Rtt, long long Rss, return result; } -static long long add_carry(long long Rss, long long Rtt, - int pred_in, int *pred_result) +static int64_t add_carry(int64_t Rss, int64_t Rtt, + int32_t pred_in, int32_t *pred_result) { - long long result; - int predval = pred_in; + int64_t result; + int32_t predval = pred_in; asm volatile("p0 = %1\n\t" "%0 = add(%2, %3, p0):carry\n\t" @@ -101,11 +106,11 @@ static long long add_carry(long long Rss, long long Rtt, return result; } -static long long sub_carry(long long Rss, long long Rtt, - int pred_in, int *pred_result) +static int64_t sub_carry(int64_t Rss, int64_t Rtt, + int32_t pred_in, int32_t *pred_result) { - long long result; - int predval = pred_in; + int64_t result; + int32_t predval = pred_in; asm volatile("p0 = !cmp.eq(%1, #0)\n\t" "%0 = sub(%2, %3, p0):carry\n\t" @@ -117,155 +122,129 @@ static long long sub_carry(long long Rss, long long Rtt, return result; } -int err; - -static void check_ll(long long val, long long expect) -{ - if (val != expect) { - printf("ERROR: 0x%016llx != 0x%016llx\n", val, expect); - err++; - } -} - -static void check(int val, int expect) -{ - if (val != expect) { - printf("ERROR: 0x%08x != 0x%08x\n", val, expect); - err++; - } -} - -static void check_p(int val, int expect) -{ - if (val != expect) { - printf("ERROR: 0x%02x != 0x%02x\n", val, expect); - err++; - } -} - static void test_sfrecipa() { - int res; - int pred_result; + int32_t res; + bool pred_result; res = sfrecipa(0x04030201, 0x05060708, &pred_result); - check(res, 0x59f38001); - check_p(pred_result, 0x00); + check32(res, 0x59f38001); + check32(pred_result, false); } static void test_sfinvsqrta() { - int res; - int pred_result; + int32_t res; + int32_t pred_result; res = sfinvsqrta(0x04030201, &pred_result); - check(res, 0x4d330000); - check_p(pred_result, 0xe0); + check32(res, 0x4d330000); + check32(pred_result, 0xe0); res = sfinvsqrta(0x0, &pred_result); - check(res, 0x3f800000); - check_p(pred_result, 0x0); + check32(res, 0x3f800000); + check32(pred_result, 0x0); } static void test_vacsh() { - long long res64; - int pred_result; - int ovf_result; + int64_t res64; + int32_t pred_result; + bool ovf_result; res64 = vacsh(0x0004000300020001LL, 0x0001000200030004LL, 0x0000000000000000LL, &pred_result, &ovf_result); - check_ll(res64, 0x0004000300030004LL); - check_p(pred_result, 0xf0); - check(ovf_result, 0); + check64(res64, 0x0004000300030004LL); + check32(pred_result, 0xf0); + check32(ovf_result, false); res64 = vacsh(0x0004000300020001LL, 0x0001000200030004LL, 0x000affff000d0000LL, &pred_result, &ovf_result); - check_ll(res64, 0x000e0003000f0004LL); - check_p(pred_result, 0xcc); - check(ovf_result, 0); + check64(res64, 0x000e0003000f0004LL); + check32(pred_result, 0xcc); + check32(ovf_result, false); res64 = vacsh(0x00047fff00020001LL, 0x00017fff00030004LL, 0x000a0fff000d0000LL, &pred_result, &ovf_result); - check_ll(res64, 0x000e7fff000f0004LL); - check_p(pred_result, 0xfc); - check(ovf_result, 1); + check64(res64, 0x000e7fff000f0004LL); + check32(pred_result, 0xfc); + check32(ovf_result, true); res64 = vacsh(0x0004000300020001LL, 0x0001000200030009LL, 0x000affff000d0001LL, &pred_result, &ovf_result); - check_ll(res64, 0x000e0003000f0008LL); - check_p(pred_result, 0xcc); - check(ovf_result, 0); + check64(res64, 0x000e0003000f0008LL); + check32(pred_result, 0xcc); + check32(ovf_result, false); } static void test_vminub() { - long long res64; - int pred_result; + int64_t res64; + int32_t pred_result; res64 = vminub(0x0807060504030201LL, 0x0102030405060708LL, &pred_result); - check_ll(res64, 0x0102030404030201LL); - check_p(pred_result, 0xf0); + check64(res64, 0x0102030404030201LL); + check32(pred_result, 0xf0); res64 = vminub(0x0802060405030701LL, 0x0107030504060208LL, &pred_result); - check_ll(res64, 0x0102030404030201LL); - check_p(pred_result, 0xaa); + check64(res64, 0x0102030404030201LL); + check32(pred_result, 0xaa); } static void test_add_carry() { - long long res64; - int pred_result; + int64_t res64; + int32_t pred_result; res64 = add_carry(0x0000000000000000LL, 0xffffffffffffffffLL, 1, &pred_result); - check_ll(res64, 0x0000000000000000LL); - check_p(pred_result, 0xff); + check64(res64, 0x0000000000000000LL); + check32(pred_result, 0xff); res64 = add_carry(0x0000000100000000LL, 0xffffffffffffffffLL, 0, &pred_result); - check_ll(res64, 0x00000000ffffffffLL); - check_p(pred_result, 0xff); + check64(res64, 0x00000000ffffffffLL); + check32(pred_result, 0xff); res64 = add_carry(0x0000000100000000LL, 0xffffffffffffffffLL, 0, &pred_result); - check_ll(res64, 0x00000000ffffffffLL); - check_p(pred_result, 0xff); + check64(res64, 0x00000000ffffffffLL); + check32(pred_result, 0xff); } static void test_sub_carry() { - long long res64; - int pred_result; + int64_t res64; + int32_t pred_result; res64 = sub_carry(0x0000000000000000LL, 0x0000000000000000LL, 1, &pred_result); - check_ll(res64, 0x0000000000000000LL); - check_p(pred_result, 0xff); + check64(res64, 0x0000000000000000LL); + check32(pred_result, 0xff); res64 = sub_carry(0x0000000100000000LL, 0x0000000000000000LL, 0, &pred_result); - check_ll(res64, 0x00000000ffffffffLL); - check_p(pred_result, 0xff); + check64(res64, 0x00000000ffffffffLL); + check32(pred_result, 0xff); res64 = sub_carry(0x0000000100000000LL, 0x0000000000000000LL, 0, &pred_result); - check_ll(res64, 0x00000000ffffffffLL); - check_p(pred_result, 0xff); + check64(res64, 0x00000000ffffffffLL); + check32(pred_result, 0xff); } int main() diff --git a/tests/tcg/hexagon/overflow.c b/tests/tcg/hexagon/overflow.c index 94087851b0..7b5b9ebdde 100644 --- a/tests/tcg/hexagon/overflow.c +++ b/tests/tcg/hexagon/overflow.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2021-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2021-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,30 +17,21 @@ #include #include +#include #include #include #include #include #include - int err; -static void __check(const char *filename, int line, int x, int expect) -{ - if (x != expect) { - printf("ERROR %s:%d - %d != %d\n", - filename, line, x, expect); - err++; - } -} +#include "hex_test.h" -#define check(x, expect) __check(__FILE__, __LINE__, (x), (expect)) - -static int satub(int src, int *p, int *ovf_result) +static int32_t satub(int32_t src, int32_t *p, bool *ovf_result) { - int result; - int usr; + int32_t result; + uint32_t usr; /* * This instruction can set bit 0 (OVF/overflow) in usr @@ -65,30 +56,30 @@ static int satub(int src, int *p, int *ovf_result) return result; } -int read_usr_overflow(void) +bool read_usr_overflow(void) { - int result; - asm volatile("%0 = usr\n\t" : "=r"(result)); - return result & 1; + uint32_t usr; + asm volatile("%0 = usr\n\t" : "=r"(usr)); + return usr & 1; } -int get_usr_overflow(int usr) +bool get_usr_overflow(uint32_t usr) { return usr & 1; } -int get_usr_fp_invalid(int usr) +bool get_usr_fp_invalid(uint32_t usr) { return (usr >> 1) & 1; } -int get_usr_lpcfg(int usr) +int32_t get_usr_lpcfg(uint32_t usr) { return (usr >> 8) & 0x3; } jmp_buf jmp_env; -int usr_overflow; +bool usr_overflow; static void sig_segv(int sig, siginfo_t *info, void *puc) { @@ -98,9 +89,9 @@ static void sig_segv(int sig, siginfo_t *info, void *puc) static void test_packet(void) { - int convres; - int satres; - int usr; + int32_t convres; + int32_t satres; + uint32_t usr; asm("r2 = usr\n\t" "r2 = clrbit(r2, #0)\n\t" /* clear overflow bit */ @@ -115,10 +106,10 @@ static void test_packet(void) : "r"(0x6a051b86), "r"(0x0410eec0) : "r2", "usr"); - check(convres, 0xffffffff); - check(satres, 0x7f); - check(get_usr_overflow(usr), 1); - check(get_usr_fp_invalid(usr), 1); + check32(convres, 0xffffffff); + check32(satres, 0x7f); + check32(get_usr_overflow(usr), true); + check32(get_usr_fp_invalid(usr), true); asm("r2 = usr\n\t" "r2 = clrbit(r2, #0)\n\t" /* clear overflow bit */ @@ -134,15 +125,15 @@ static void test_packet(void) : "r"(0x0410eec0) : "r2", "usr", "p3", "sa0", "lc0"); - check(satres, 0x7f); - check(get_usr_overflow(usr), 1); - check(get_usr_lpcfg(usr), 2); + check32(satres, 0x7f); + check32(get_usr_overflow(usr), true); + check32(get_usr_lpcfg(usr), 2); } int main() { struct sigaction act; - int ovf; + bool ovf; /* SIGSEGV test */ act.sa_sigaction = sig_segv; @@ -157,7 +148,7 @@ int main() sigemptyset(&act.sa_mask); act.sa_flags = 0; - check(usr_overflow, 0); + check32(usr_overflow, false); test_packet(); diff --git a/tests/tcg/hexagon/preg_alias.c b/tests/tcg/hexagon/preg_alias.c index b44a8112b4..892ecbbdbf 100644 --- a/tests/tcg/hexagon/preg_alias.c +++ b/tests/tcg/hexagon/preg_alias.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,10 +16,15 @@ */ #include +#include -static inline int preg_alias(int v0, int v1, int v2, int v3) +int err; + +#include "hex_test.h" + +static uint32_t preg_alias(uint8_t v0, uint8_t v1, uint8_t v2, uint8_t v3) { - int ret; + uint32_t ret; asm volatile("p0 = %1\n\t" "p1 = %2\n\t" "p2 = %3\n\t" @@ -31,9 +36,9 @@ static inline int preg_alias(int v0, int v1, int v2, int v3) return ret; } -static inline int preg_alias_pair(int v0, int v1, int v2, int v3) +static uint32_t preg_alias_pair(uint8_t v0, uint8_t v1, uint8_t v2, uint8_t v3) { - long long c54; + uint64_t c54; asm volatile("p0 = %1\n\t" "p1 = %2\n\t" "p2 = %3\n\t" @@ -42,20 +47,20 @@ static inline int preg_alias_pair(int v0, int v1, int v2, int v3) : "=r"(c54) : "r"(v0), "r"(v1), "r"(v2), "r"(v3) : "p0", "p1", "p2", "p3"); - return (int)c54; + return (uint32_t)c54; } typedef union { - int creg; + uint32_t creg; struct { - unsigned char p0; - unsigned char p1; - unsigned char p2; - unsigned char p3; + uint8_t p0; + uint8_t p1; + uint8_t p2; + uint8_t p3; } pregs; } PRegs; -static inline void creg_alias(int cval, PRegs *pregs) +static inline void creg_alias(uint32_t cval, PRegs *pregs) { asm("c4 = %4\n\t" "%0 = p0\n\t" @@ -65,23 +70,13 @@ static inline void creg_alias(int cval, PRegs *pregs) : "=r"(pregs->pregs.p0), "=r"(pregs->pregs.p1), "=r"(pregs->pregs.p2), "=r"(pregs->pregs.p3) : "r"(cval) - : "p0", "p1", "p2", "p3"); + : "c4", "p0", "p1", "p2", "p3"); } -int err; - -static void check(int val, int expect) +static inline void creg_alias_pair(uint32_t cval, PRegs *pregs) { - if (val != expect) { - printf("ERROR: 0x%08x != 0x%08x\n", val, expect); - err++; - } -} - -static inline void creg_alias_pair(unsigned int cval, PRegs *pregs) -{ - unsigned long long cval_pair = (0xdeadbeefULL << 32) | cval; - int c5; + uint64_t cval_pair = (0xdeadbeefULL << 32) | cval; + uint32_t c5; asm ("c5:4 = %5\n\t" "%0 = p0\n\t" @@ -92,9 +87,9 @@ static inline void creg_alias_pair(unsigned int cval, PRegs *pregs) : "=r"(pregs->pregs.p0), "=r"(pregs->pregs.p1), "=r"(pregs->pregs.p2), "=r"(pregs->pregs.p3), "=r"(c5) : "r"(cval_pair) - : "p0", "p1", "p2", "p3"); + : "c4", "c5", "p0", "p1", "p2", "p3"); - check(c5, 0xdeadbeef); + check32(c5, 0xdeadbeef); } static void test_packet(void) @@ -104,8 +99,8 @@ static void test_packet(void) * that are read during the packet. */ - int result; - int old_val = 0x0000001c; + uint32_t result; + uint32_t old_val = 0x0000001c; /* Test a predicated register transfer */ result = old_val; @@ -117,8 +112,8 @@ static void test_packet(void) "}\n\t" : "+r"(result) : "r"(0xffffffff), "r"(0xff00ffff), "r"(0x837ed653) - : "p0", "p1", "p2", "p3"); - check(result, old_val); + : "c4", "p0", "p1", "p2", "p3"); + check32(result, old_val); /* Test a predicated store */ result = 0xffffffff; @@ -129,74 +124,74 @@ static void test_packet(void) "}\n\t" : : "r"(0), "r"(0xffffffff), "r"(&result) - : "p0", "p1", "p2", "p3", "memory"); - check(result, 0x0); + : "c4", "p0", "p1", "p2", "p3", "memory"); + check32(result, 0x0); } int main() { - int c4; + uint32_t c4; PRegs pregs; c4 = preg_alias(0xff, 0x00, 0xff, 0x00); - check(c4, 0x00ff00ff); + check32(c4, 0x00ff00ff); c4 = preg_alias(0xff, 0x00, 0x00, 0x00); - check(c4, 0x000000ff); + check32(c4, 0x000000ff); c4 = preg_alias(0x00, 0xff, 0x00, 0x00); - check(c4, 0x0000ff00); + check32(c4, 0x0000ff00); c4 = preg_alias(0x00, 0x00, 0xff, 0x00); - check(c4, 0x00ff0000); + check32(c4, 0x00ff0000); c4 = preg_alias(0x00, 0x00, 0x00, 0xff); - check(c4, 0xff000000); + check32(c4, 0xff000000); c4 = preg_alias(0xff, 0xff, 0xff, 0xff); - check(c4, 0xffffffff); + check32(c4, 0xffffffff); c4 = preg_alias_pair(0xff, 0x00, 0xff, 0x00); - check(c4, 0x00ff00ff); + check32(c4, 0x00ff00ff); c4 = preg_alias_pair(0xff, 0x00, 0x00, 0x00); - check(c4, 0x000000ff); + check32(c4, 0x000000ff); c4 = preg_alias_pair(0x00, 0xff, 0x00, 0x00); - check(c4, 0x0000ff00); + check32(c4, 0x0000ff00); c4 = preg_alias_pair(0x00, 0x00, 0xff, 0x00); - check(c4, 0x00ff0000); + check32(c4, 0x00ff0000); c4 = preg_alias_pair(0x00, 0x00, 0x00, 0xff); - check(c4, 0xff000000); + check32(c4, 0xff000000); c4 = preg_alias_pair(0xff, 0xff, 0xff, 0xff); - check(c4, 0xffffffff); + check32(c4, 0xffffffff); creg_alias(0x00ff00ff, &pregs); - check(pregs.creg, 0x00ff00ff); + check32(pregs.creg, 0x00ff00ff); creg_alias(0x00ffff00, &pregs); - check(pregs.creg, 0x00ffff00); + check32(pregs.creg, 0x00ffff00); creg_alias(0x00000000, &pregs); - check(pregs.creg, 0x00000000); + check32(pregs.creg, 0x00000000); creg_alias(0xff000000, &pregs); - check(pregs.creg, 0xff000000); + check32(pregs.creg, 0xff000000); creg_alias(0x00ff0000, &pregs); - check(pregs.creg, 0x00ff0000); + check32(pregs.creg, 0x00ff0000); creg_alias(0x0000ff00, &pregs); - check(pregs.creg, 0x0000ff00); + check32(pregs.creg, 0x0000ff00); creg_alias(0x000000ff, &pregs); - check(pregs.creg, 0x000000ff); + check32(pregs.creg, 0x000000ff); creg_alias(0xffffffff, &pregs); - check(pregs.creg, 0xffffffff); + check32(pregs.creg, 0xffffffff); creg_alias_pair(0x00ff00ff, &pregs); - check(pregs.creg, 0x00ff00ff); + check32(pregs.creg, 0x00ff00ff); creg_alias_pair(0x00ffff00, &pregs); - check(pregs.creg, 0x00ffff00); + check32(pregs.creg, 0x00ffff00); creg_alias_pair(0x00000000, &pregs); - check(pregs.creg, 0x00000000); + check32(pregs.creg, 0x00000000); creg_alias_pair(0xff000000, &pregs); - check(pregs.creg, 0xff000000); + check32(pregs.creg, 0xff000000); creg_alias_pair(0x00ff0000, &pregs); - check(pregs.creg, 0x00ff0000); + check32(pregs.creg, 0x00ff0000); creg_alias_pair(0x0000ff00, &pregs); - check(pregs.creg, 0x0000ff00); + check32(pregs.creg, 0x0000ff00); creg_alias_pair(0x000000ff, &pregs); - check(pregs.creg, 0x000000ff); + check32(pregs.creg, 0x000000ff); creg_alias_pair(0xffffffff, &pregs); - check(pregs.creg, 0xffffffff); + check32(pregs.creg, 0xffffffff); test_packet(); diff --git a/tests/tcg/hexagon/read_write_overlap.c b/tests/tcg/hexagon/read_write_overlap.c new file mode 100644 index 0000000000..95c54ccd63 --- /dev/null +++ b/tests/tcg/hexagon/read_write_overlap.c @@ -0,0 +1,127 @@ +/* + * Copyright(c) 2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * Test instructions where the semantics write to the destination + * before all the operand reads have been completed. + * + * These instructions are problematic when we short-circuit the + * register writes because the destination and source operands could + * be the same TCGv. + * + * We test by forcing the read and write to be register r7. + */ + +#include +#include +#include + +int err; + +#include "hex_test.h" + +#define insert(RES, X, WIDTH, OFFSET) \ + asm("r7 = %1\n\t" \ + "r7 = insert(r7, #" #WIDTH ", #" #OFFSET ")\n\t" \ + "%0 = r7\n\t" \ + : "=r"(RES) : "r"(X) : "r7") + +static void test_insert(void) +{ + uint32_t res; + + insert(res, 0x12345678, 8, 1); + check32(res, 0x123456f0); + insert(res, 0x12345678, 0, 1); + check32(res, 0x12345678); + insert(res, 0x12345678, 20, 16); + check32(res, 0x56785678); +} + +static inline uint32_t insert_rp(uint32_t x, uint32_t width, uint32_t offset) +{ + uint64_t width_offset = (uint64_t)width << 32 | offset; + uint32_t res; + asm("r7 = %1\n\t" + "r7 = insert(r7, %2)\n\t" + "%0 = r7\n\t" + : "=r"(res) : "r"(x), "r"(width_offset) : "r7"); + return res; + +} + +static void test_insert_rp(void) +{ + check32(insert_rp(0x12345678, 8, 1), 0x123456f0); + check32(insert_rp(0x12345678, 63, 8), 0x34567878); + check32(insert_rp(0x12345678, 127, 8), 0x34567878); + check32(insert_rp(0x12345678, 8, 24), 0x78345678); + check32(insert_rp(0x12345678, 8, 63), 0x12345678); + check32(insert_rp(0x12345678, 8, 64), 0x00000000); +} + +static inline uint32_t asr_r_svw_trun(uint64_t x, uint32_t y) +{ + uint32_t res; + asm("r7 = %2\n\t" + "r7 = vasrw(%1, r7)\n\t" + "%0 = r7\n\t" + : "=r"(res) : "r"(x), "r"(y) : "r7"); + return res; +} + +static void test_asr_r_svw_trun(void) +{ + check32(asr_r_svw_trun(0x1111111122222222ULL, 5), + 0x88881111); + check32(asr_r_svw_trun(0x1111111122222222ULL, 63), + 0x00000000); + check32(asr_r_svw_trun(0x1111111122222222ULL, 64), + 0x00000000); + check32(asr_r_svw_trun(0x1111111122222222ULL, 127), + 0x22224444); + check32(asr_r_svw_trun(0x1111111122222222ULL, 128), + 0x11112222); + check32(asr_r_svw_trun(0xffffffff22222222ULL, 128), + 0xffff2222); +} + +static inline uint32_t swiz(uint32_t x) +{ + uint32_t res; + asm("r7 = %1\n\t" + "r7 = swiz(r7)\n\t" + "%0 = r7\n\t" + : "=r"(res) : "r"(x) : "r7"); + return res; +} + +static void test_swiz(void) +{ + check32(swiz(0x11223344), 0x44332211); +} + +int main() +{ + test_insert(); + test_insert_rp(); + test_asr_r_svw_trun(); + test_swiz(); + + puts(err ? "FAIL" : "PASS"); + return err ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/tests/tcg/hexagon/reg_mut.c b/tests/tcg/hexagon/reg_mut.c index 910e663ace..c5a39e5510 100644 --- a/tests/tcg/hexagon/reg_mut.c +++ b/tests/tcg/hexagon/reg_mut.c @@ -1,6 +1,6 @@ /* - * Copyright(c) 2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,27 +21,7 @@ static int err; -#define check(N, EXPECT) \ - do { \ - uint64_t value = N; \ - uint64_t expect = EXPECT; \ - if (value != EXPECT) { \ - printf("ERROR: \"%s\" 0x%04llx != 0x%04llx at %s:%d\n", #N, value, \ - expect, __FILE__, __LINE__); \ - err++; \ - } \ - } while (0) - -#define check_ne(N, EXPECT) \ - do { \ - uint64_t value = N; \ - uint64_t expect = EXPECT; \ - if (value == EXPECT) { \ - printf("ERROR: \"%s\" 0x%04llx == 0x%04llx at %s:%d\n", #N, value, \ - expect, __FILE__, __LINE__); \ - err++; \ - } \ - } while (0) +#include "hex_test.h" #define WRITE_REG_NOCLOBBER(output, reg_name, input) \ asm volatile(reg_name " = %1\n\t" \ @@ -85,35 +65,35 @@ static inline void write_control_registers(void) uint32_t result = 0; WRITE_REG_NOCLOBBER(result, "usr", 0xffffffff); - check(result, 0x3ecfff3f); + check32(result, 0x3ecfff3f); WRITE_REG_NOCLOBBER(result, "gp", 0xffffffff); - check(result, 0xffffffc0); + check32(result, 0xffffffc0); WRITE_REG_NOCLOBBER(result, "upcyclelo", 0xffffffff); - check(result, 0x00000000); + check32(result, 0x00000000); WRITE_REG_NOCLOBBER(result, "upcyclehi", 0xffffffff); - check(result, 0x00000000); + check32(result, 0x00000000); WRITE_REG_NOCLOBBER(result, "utimerlo", 0xffffffff); - check(result, 0x00000000); + check32(result, 0x00000000); WRITE_REG_NOCLOBBER(result, "utimerhi", 0xffffffff); - check(result, 0x00000000); + check32(result, 0x00000000); /* * PC is special. Setting it to these values * should cause a catastrophic failure. */ WRITE_REG_ENCODED(result, "pc", 0x00000000, PC_EQ_R0); - check_ne(result, 0x00000000); + check32_ne(result, 0x00000000); WRITE_REG_ENCODED(result, "pc", 0x00000001, PC_EQ_R0); - check_ne(result, 0x00000001); + check32_ne(result, 0x00000001); WRITE_REG_ENCODED(result, "pc", 0xffffffff, PC_EQ_R0); - check_ne(result, 0xffffffff); + check32_ne(result, 0xffffffff); } static inline void write_control_register_pairs(void) @@ -121,23 +101,23 @@ static inline void write_control_register_pairs(void) uint64_t result = 0; WRITE_REG_NOCLOBBER(result, "c11:10", 0xffffffffffffffff); - check(result, 0xffffffc0ffffffff); + check64(result, 0xffffffc0ffffffff); WRITE_REG_NOCLOBBER(result, "c15:14", 0xffffffffffffffff); - check(result, 0x0000000000000000); + check64(result, 0x0000000000000000); WRITE_REG_NOCLOBBER(result, "c31:30", 0xffffffffffffffff); - check(result, 0x0000000000000000); + check64(result, 0x0000000000000000); WRITE_REG_PAIR_ENCODED(result, "c9:8", (uint64_t) 0x0000000000000000, C9_8_EQ_R1_0); - check_ne(result, 0x000000000000000); + check64_ne(result, 0x000000000000000); WRITE_REG_PAIR_ENCODED(result, "c9:8", 0x0000000100000000, C9_8_EQ_R1_0); - check_ne(result, 0x0000000100000000); + check64_ne(result, 0x0000000100000000); WRITE_REG_PAIR_ENCODED(result, "c9:8", 0xffffffffffffffff, C9_8_EQ_R1_0); - check_ne(result, 0xffffffffffffffff); + check64_ne(result, 0xffffffffffffffff); } int main() diff --git a/tests/tcg/hexagon/scatter_gather.c b/tests/tcg/hexagon/scatter_gather.c index b93eb18133..bf8b5e0317 100644 --- a/tests/tcg/hexagon/scatter_gather.c +++ b/tests/tcg/hexagon/scatter_gather.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -40,47 +40,6 @@ typedef long HVX_VectorPair __attribute__((__vector_size__(256))) typedef long HVX_VectorPred __attribute__((__vector_size__(128))) __attribute__((aligned(128))); -#define VSCATTER_16(BASE, RGN, OFF, VALS) \ - __builtin_HEXAGON_V6_vscattermh_128B((int)BASE, RGN, OFF, VALS) -#define VSCATTER_16_MASKED(MASK, BASE, RGN, OFF, VALS) \ - __builtin_HEXAGON_V6_vscattermhq_128B(MASK, (int)BASE, RGN, OFF, VALS) -#define VSCATTER_32(BASE, RGN, OFF, VALS) \ - __builtin_HEXAGON_V6_vscattermw_128B((int)BASE, RGN, OFF, VALS) -#define VSCATTER_32_MASKED(MASK, BASE, RGN, OFF, VALS) \ - __builtin_HEXAGON_V6_vscattermwq_128B(MASK, (int)BASE, RGN, OFF, VALS) -#define VSCATTER_16_32(BASE, RGN, OFF, VALS) \ - __builtin_HEXAGON_V6_vscattermhw_128B((int)BASE, RGN, OFF, VALS) -#define VSCATTER_16_32_MASKED(MASK, BASE, RGN, OFF, VALS) \ - __builtin_HEXAGON_V6_vscattermhwq_128B(MASK, (int)BASE, RGN, OFF, VALS) -#define VSCATTER_16_ACC(BASE, RGN, OFF, VALS) \ - __builtin_HEXAGON_V6_vscattermh_add_128B((int)BASE, RGN, OFF, VALS) -#define VSCATTER_32_ACC(BASE, RGN, OFF, VALS) \ - __builtin_HEXAGON_V6_vscattermw_add_128B((int)BASE, RGN, OFF, VALS) -#define VSCATTER_16_32_ACC(BASE, RGN, OFF, VALS) \ - __builtin_HEXAGON_V6_vscattermhw_add_128B((int)BASE, RGN, OFF, VALS) - -#define VGATHER_16(DSTADDR, BASE, RGN, OFF) \ - __builtin_HEXAGON_V6_vgathermh_128B(DSTADDR, (int)BASE, RGN, OFF) -#define VGATHER_16_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \ - __builtin_HEXAGON_V6_vgathermhq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF) -#define VGATHER_32(DSTADDR, BASE, RGN, OFF) \ - __builtin_HEXAGON_V6_vgathermw_128B(DSTADDR, (int)BASE, RGN, OFF) -#define VGATHER_32_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \ - __builtin_HEXAGON_V6_vgathermwq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF) -#define VGATHER_16_32(DSTADDR, BASE, RGN, OFF) \ - __builtin_HEXAGON_V6_vgathermhw_128B(DSTADDR, (int)BASE, RGN, OFF) -#define VGATHER_16_32_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \ - __builtin_HEXAGON_V6_vgathermhwq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF) - -#define VSHUFF_H(V) \ - __builtin_HEXAGON_V6_vshuffh_128B(V) -#define VSPLAT_H(X) \ - __builtin_HEXAGON_V6_lvsplath_128B(X) -#define VAND_VAL(PRED, VAL) \ - __builtin_HEXAGON_V6_vandvrt_128B(PRED, VAL) -#define VDEAL_H(V) \ - __builtin_HEXAGON_V6_vdealh_128B(V) - int err; /* define the number of rows/cols in a square matrix */ @@ -108,22 +67,22 @@ unsigned short vscatter16_32_ref[SCATTER_BUFFER_SIZE]; unsigned short vgather16_32_ref[MATRIX_SIZE]; /* declare the arrays of offsets */ -unsigned short half_offsets[MATRIX_SIZE]; -unsigned int word_offsets[MATRIX_SIZE]; +unsigned short half_offsets[MATRIX_SIZE] __attribute__((aligned(128))); +unsigned int word_offsets[MATRIX_SIZE] __attribute__((aligned(128))); /* declare the arrays of values */ -unsigned short half_values[MATRIX_SIZE]; -unsigned short half_values_acc[MATRIX_SIZE]; -unsigned short half_values_masked[MATRIX_SIZE]; -unsigned int word_values[MATRIX_SIZE]; -unsigned int word_values_acc[MATRIX_SIZE]; -unsigned int word_values_masked[MATRIX_SIZE]; +unsigned short half_values[MATRIX_SIZE] __attribute__((aligned(128))); +unsigned short half_values_acc[MATRIX_SIZE] __attribute__((aligned(128))); +unsigned short half_values_masked[MATRIX_SIZE] __attribute__((aligned(128))); +unsigned int word_values[MATRIX_SIZE] __attribute__((aligned(128))); +unsigned int word_values_acc[MATRIX_SIZE] __attribute__((aligned(128))); +unsigned int word_values_masked[MATRIX_SIZE] __attribute__((aligned(128))); /* declare the arrays of predicates */ -unsigned short half_predicates[MATRIX_SIZE]; -unsigned int word_predicates[MATRIX_SIZE]; +unsigned short half_predicates[MATRIX_SIZE] __attribute__((aligned(128))); +unsigned int word_predicates[MATRIX_SIZE] __attribute__((aligned(128))); -/* make this big enough for all the intrinsics */ +/* make this big enough for all the operations */ const size_t region_len = sizeof(vtcm); /* optionally add sync instructions */ @@ -261,164 +220,201 @@ void create_offsets_values_preds_16_32(void) } } -/* scatter the 16 bit elements using intrinsics */ +/* scatter the 16 bit elements using HVX */ void vector_scatter_16(void) { - /* copy the offsets and values to vectors */ - HVX_Vector offsets = *(HVX_Vector *)half_offsets; - HVX_Vector values = *(HVX_Vector *)half_values; - - VSCATTER_16(&vtcm.vscatter16, region_len, offsets, values); + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "v1 = vmem(%3 + #0)\n\t" + "vscatter(%0, m0, v0.h).h = v1\n\t" + : : "r"(vtcm.vscatter16), "r"(region_len), + "r"(half_offsets), "r"(half_values) + : "m0", "v0", "v1", "memory"); sync_scatter(vtcm.vscatter16); } -/* scatter-accumulate the 16 bit elements using intrinsics */ +/* scatter-accumulate the 16 bit elements using HVX */ void vector_scatter_16_acc(void) { - /* copy the offsets and values to vectors */ - HVX_Vector offsets = *(HVX_Vector *)half_offsets; - HVX_Vector values = *(HVX_Vector *)half_values_acc; - - VSCATTER_16_ACC(&vtcm.vscatter16, region_len, offsets, values); + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "v1 = vmem(%3 + #0)\n\t" + "vscatter(%0, m0, v0.h).h += v1\n\t" + : : "r"(vtcm.vscatter16), "r"(region_len), + "r"(half_offsets), "r"(half_values_acc) + : "m0", "v0", "v1", "memory"); sync_scatter(vtcm.vscatter16); } -/* scatter the 16 bit elements using intrinsics */ +/* masked scatter the 16 bit elements using HVX */ void vector_scatter_16_masked(void) { - /* copy the offsets and values to vectors */ - HVX_Vector offsets = *(HVX_Vector *)half_offsets; - HVX_Vector values = *(HVX_Vector *)half_values_masked; - HVX_Vector pred_reg = *(HVX_Vector *)half_predicates; - HVX_VectorPred preds = VAND_VAL(pred_reg, ~0); - - VSCATTER_16_MASKED(preds, &vtcm.vscatter16, region_len, offsets, values); + asm ("r1 = #-1\n\t" + "v0 = vmem(%0 + #0)\n\t" + "q0 = vand(v0, r1)\n\t" + "m0 = %2\n\t" + "v0 = vmem(%3 + #0)\n\t" + "v1 = vmem(%4 + #0)\n\t" + "if (q0) vscatter(%1, m0, v0.h).h = v1\n\t" + : : "r"(half_predicates), "r"(vtcm.vscatter16), "r"(region_len), + "r"(half_offsets), "r"(half_values_masked) + : "r1", "q0", "m0", "q0", "v0", "v1", "memory"); sync_scatter(vtcm.vscatter16); } -/* scatter the 32 bit elements using intrinsics */ +/* scatter the 32 bit elements using HVX */ void vector_scatter_32(void) { - /* copy the offsets and values to vectors */ - HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; - HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; - HVX_Vector valueslo = *(HVX_Vector *)word_values; - HVX_Vector valueshi = *(HVX_Vector *)&word_values[MATRIX_SIZE / 2]; + HVX_Vector *offsetslo = (HVX_Vector *)word_offsets; + HVX_Vector *offsetshi = (HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector *valueslo = (HVX_Vector *)word_values; + HVX_Vector *valueshi = (HVX_Vector *)&word_values[MATRIX_SIZE / 2]; - VSCATTER_32(&vtcm.vscatter32, region_len, offsetslo, valueslo); - VSCATTER_32(&vtcm.vscatter32, region_len, offsetshi, valueshi); + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "v1 = vmem(%3 + #0)\n\t" + "vscatter(%0, m0, v0.w).w = v1\n\t" + : : "r"(vtcm.vscatter32), "r"(region_len), + "r"(offsetslo), "r"(valueslo) + : "m0", "v0", "v1", "memory"); + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "v1 = vmem(%3 + #0)\n\t" + "vscatter(%0, m0, v0.w).w = v1\n\t" + : : "r"(vtcm.vscatter32), "r"(region_len), + "r"(offsetshi), "r"(valueshi) + : "m0", "v0", "v1", "memory"); sync_scatter(vtcm.vscatter32); } -/* scatter-acc the 32 bit elements using intrinsics */ +/* scatter-accumulate the 32 bit elements using HVX */ void vector_scatter_32_acc(void) { - /* copy the offsets and values to vectors */ - HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; - HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; - HVX_Vector valueslo = *(HVX_Vector *)word_values_acc; - HVX_Vector valueshi = *(HVX_Vector *)&word_values_acc[MATRIX_SIZE / 2]; + HVX_Vector *offsetslo = (HVX_Vector *)word_offsets; + HVX_Vector *offsetshi = (HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector *valueslo = (HVX_Vector *)word_values_acc; + HVX_Vector *valueshi = (HVX_Vector *)&word_values_acc[MATRIX_SIZE / 2]; - VSCATTER_32_ACC(&vtcm.vscatter32, region_len, offsetslo, valueslo); - VSCATTER_32_ACC(&vtcm.vscatter32, region_len, offsetshi, valueshi); + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "v1 = vmem(%3 + #0)\n\t" + "vscatter(%0, m0, v0.w).w += v1\n\t" + : : "r"(vtcm.vscatter32), "r"(region_len), + "r"(offsetslo), "r"(valueslo) + : "m0", "v0", "v1", "memory"); + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "v1 = vmem(%3 + #0)\n\t" + "vscatter(%0, m0, v0.w).w += v1\n\t" + : : "r"(vtcm.vscatter32), "r"(region_len), + "r"(offsetshi), "r"(valueshi) + : "m0", "v0", "v1", "memory"); sync_scatter(vtcm.vscatter32); } -/* scatter the 32 bit elements using intrinsics */ +/* masked scatter the 32 bit elements using HVX */ void vector_scatter_32_masked(void) { - /* copy the offsets and values to vectors */ - HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; - HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; - HVX_Vector valueslo = *(HVX_Vector *)word_values_masked; - HVX_Vector valueshi = *(HVX_Vector *)&word_values_masked[MATRIX_SIZE / 2]; - HVX_Vector pred_reglo = *(HVX_Vector *)word_predicates; - HVX_Vector pred_reghi = *(HVX_Vector *)&word_predicates[MATRIX_SIZE / 2]; - HVX_VectorPred predslo = VAND_VAL(pred_reglo, ~0); - HVX_VectorPred predshi = VAND_VAL(pred_reghi, ~0); + HVX_Vector *offsetslo = (HVX_Vector *)word_offsets; + HVX_Vector *offsetshi = (HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector *valueslo = (HVX_Vector *)word_values_masked; + HVX_Vector *valueshi = (HVX_Vector *)&word_values_masked[MATRIX_SIZE / 2]; + HVX_Vector *predslo = (HVX_Vector *)word_predicates; + HVX_Vector *predshi = (HVX_Vector *)&word_predicates[MATRIX_SIZE / 2]; - VSCATTER_32_MASKED(predslo, &vtcm.vscatter32, region_len, offsetslo, - valueslo); - VSCATTER_32_MASKED(predshi, &vtcm.vscatter32, region_len, offsetshi, - valueshi); + asm ("r1 = #-1\n\t" + "v0 = vmem(%0 + #0)\n\t" + "q0 = vand(v0, r1)\n\t" + "m0 = %2\n\t" + "v0 = vmem(%3 + #0)\n\t" + "v1 = vmem(%4 + #0)\n\t" + "if (q0) vscatter(%1, m0, v0.w).w = v1\n\t" + : : "r"(predslo), "r"(vtcm.vscatter32), "r"(region_len), + "r"(offsetslo), "r"(valueslo) + : "r1", "q0", "m0", "q0", "v0", "v1", "memory"); + asm ("r1 = #-1\n\t" + "v0 = vmem(%0 + #0)\n\t" + "q0 = vand(v0, r1)\n\t" + "m0 = %2\n\t" + "v0 = vmem(%3 + #0)\n\t" + "v1 = vmem(%4 + #0)\n\t" + "if (q0) vscatter(%1, m0, v0.w).w = v1\n\t" + : : "r"(predshi), "r"(vtcm.vscatter32), "r"(region_len), + "r"(offsetshi), "r"(valueshi) + : "r1", "q0", "m0", "q0", "v0", "v1", "memory"); - sync_scatter(vtcm.vscatter16); + sync_scatter(vtcm.vscatter32); } -/* scatter the 16 bit elements with 32 bit offsets using intrinsics */ +/* scatter the 16 bit elements with 32 bit offsets using HVX */ void vector_scatter_16_32(void) { - HVX_VectorPair offsets; - HVX_Vector values; - - /* get the word offsets in a vector pair */ - offsets = *(HVX_VectorPair *)word_offsets; - - /* these values need to be shuffled for the scatter */ - values = *(HVX_Vector *)half_values; - values = VSHUFF_H(values); - - VSCATTER_16_32(&vtcm.vscatter16_32, region_len, offsets, values); + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "v1 = vmem(%2 + #1)\n\t" + "v2 = vmem(%3 + #0)\n\t" + "v2.h = vshuff(v2.h)\n\t" /* shuffle the values for the scatter */ + "vscatter(%0, m0, v1:0.w).h = v2\n\t" + : : "r"(vtcm.vscatter16_32), "r"(region_len), + "r"(word_offsets), "r"(half_values) + : "m0", "v0", "v1", "v2", "memory"); sync_scatter(vtcm.vscatter16_32); } -/* scatter-acc the 16 bit elements with 32 bit offsets using intrinsics */ +/* scatter-accumulate the 16 bit elements with 32 bit offsets using HVX */ void vector_scatter_16_32_acc(void) { - HVX_VectorPair offsets; - HVX_Vector values; - - /* get the word offsets in a vector pair */ - offsets = *(HVX_VectorPair *)word_offsets; - - /* these values need to be shuffled for the scatter */ - values = *(HVX_Vector *)half_values_acc; - values = VSHUFF_H(values); - - VSCATTER_16_32_ACC(&vtcm.vscatter16_32, region_len, offsets, values); + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "v1 = vmem(%2 + #1)\n\t" + "v2 = vmem(%3 + #0)\n\t" \ + "v2.h = vshuff(v2.h)\n\t" /* shuffle the values for the scatter */ + "vscatter(%0, m0, v1:0.w).h += v2\n\t" + : : "r"(vtcm.vscatter16_32), "r"(region_len), + "r"(word_offsets), "r"(half_values_acc) + : "m0", "v0", "v1", "v2", "memory"); sync_scatter(vtcm.vscatter16_32); } -/* masked scatter the 16 bit elements with 32 bit offsets using intrinsics */ +/* masked scatter the 16 bit elements with 32 bit offsets using HVX */ void vector_scatter_16_32_masked(void) { - HVX_VectorPair offsets; - HVX_Vector values; - HVX_Vector pred_reg; - - /* get the word offsets in a vector pair */ - offsets = *(HVX_VectorPair *)word_offsets; - - /* these values need to be shuffled for the scatter */ - values = *(HVX_Vector *)half_values_masked; - values = VSHUFF_H(values); - - pred_reg = *(HVX_Vector *)half_predicates; - pred_reg = VSHUFF_H(pred_reg); - HVX_VectorPred preds = VAND_VAL(pred_reg, ~0); - - VSCATTER_16_32_MASKED(preds, &vtcm.vscatter16_32, region_len, offsets, - values); + asm ("r1 = #-1\n\t" + "v0 = vmem(%0 + #0)\n\t" + "v0.h = vshuff(v0.h)\n\t" /* shuffle the predicates */ + "q0 = vand(v0, r1)\n\t" + "m0 = %2\n\t" + "v0 = vmem(%3 + #0)\n\t" + "v1 = vmem(%3 + #1)\n\t" + "v2 = vmem(%4 + #0)\n\t" \ + "v2.h = vshuff(v2.h)\n\t" /* shuffle the values for the scatter */ + "if (q0) vscatter(%1, m0, v1:0.w).h = v2\n\t" + : : "r"(half_predicates), "r"(vtcm.vscatter16_32), "r"(region_len), + "r"(word_offsets), "r"(half_values_masked) + : "r1", "q0", "m0", "v0", "v1", "v2", "memory"); sync_scatter(vtcm.vscatter16_32); } -/* gather the elements from the scatter16 buffer */ +/* gather the elements from the scatter16 buffer using HVX */ void vector_gather_16(void) { - HVX_Vector *vgather = (HVX_Vector *)&vtcm.vgather16; - HVX_Vector offsets = *(HVX_Vector *)half_offsets; + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "{ vtmp.h = vgather(%0, m0, v0.h).h\n\t" + " vmem(%3 + #0) = vtmp.new }\n\t" + : : "r"(vtcm.vscatter16), "r"(region_len), + "r"(half_offsets), "r"(vtcm.vgather16) + : "m0", "v0", "memory"); - VGATHER_16(vgather, &vtcm.vscatter16, region_len, offsets); - - sync_gather(vgather); + sync_gather(vtcm.vgather16); } static unsigned short gather_16_masked_init(void) @@ -427,31 +423,51 @@ static unsigned short gather_16_masked_init(void) return letter | (letter << 8); } +/* masked gather the elements from the scatter16 buffer using HVX */ void vector_gather_16_masked(void) { - HVX_Vector *vgather = (HVX_Vector *)&vtcm.vgather16; - HVX_Vector offsets = *(HVX_Vector *)half_offsets; - HVX_Vector pred_reg = *(HVX_Vector *)half_predicates; - HVX_VectorPred preds = VAND_VAL(pred_reg, ~0); + unsigned short init = gather_16_masked_init(); - *vgather = VSPLAT_H(gather_16_masked_init()); - VGATHER_16_MASKED(vgather, preds, &vtcm.vscatter16, region_len, offsets); + asm ("v0.h = vsplat(%5)\n\t" + "vmem(%4 + #0) = v0\n\t" /* initialize the write area */ + "r1 = #-1\n\t" + "v0 = vmem(%0 + #0)\n\t" + "q0 = vand(v0, r1)\n\t" + "m0 = %2\n\t" + "v0 = vmem(%3 + #0)\n\t" + "{ if (q0) vtmp.h = vgather(%1, m0, v0.h).h\n\t" + " vmem(%4 + #0) = vtmp.new }\n\t" + : : "r"(half_predicates), "r"(vtcm.vscatter16), "r"(region_len), + "r"(half_offsets), "r"(vtcm.vgather16), "r"(init) + : "r1", "q0", "m0", "v0", "memory"); - sync_gather(vgather); + sync_gather(vtcm.vgather16); } -/* gather the elements from the scatter32 buffer */ +/* gather the elements from the scatter32 buffer using HVX */ void vector_gather_32(void) { - HVX_Vector *vgatherlo = (HVX_Vector *)&vtcm.vgather32; - HVX_Vector *vgatherhi = - (HVX_Vector *)((int)&vtcm.vgather32 + (MATRIX_SIZE * 2)); - HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; - HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector *vgatherlo = (HVX_Vector *)vtcm.vgather32; + HVX_Vector *vgatherhi = (HVX_Vector *)&vtcm.vgather32[MATRIX_SIZE / 2]; + HVX_Vector *offsetslo = (HVX_Vector *)word_offsets; + HVX_Vector *offsetshi = (HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; - VGATHER_32(vgatherlo, &vtcm.vscatter32, region_len, offsetslo); - VGATHER_32(vgatherhi, &vtcm.vscatter32, region_len, offsetshi); + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "{ vtmp.w = vgather(%0, m0, v0.w).w\n\t" + " vmem(%3 + #0) = vtmp.new }\n\t" + : : "r"(vtcm.vscatter32), "r"(region_len), + "r"(offsetslo), "r"(vgatherlo) + : "m0", "v0", "memory"); + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "{ vtmp.w = vgather(%0, m0, v0.w).w\n\t" + " vmem(%3 + #0) = vtmp.new }\n\t" + : : "r"(vtcm.vscatter32), "r"(region_len), + "r"(offsetshi), "r"(vgatherhi) + : "m0", "v0", "memory"); + sync_gather(vgatherlo); sync_gather(vgatherhi); } @@ -461,79 +477,88 @@ static unsigned int gather_32_masked_init(void) return letter | (letter << 8) | (letter << 16) | (letter << 24); } +/* masked gather the elements from the scatter32 buffer using HVX */ void vector_gather_32_masked(void) { - HVX_Vector *vgatherlo = (HVX_Vector *)&vtcm.vgather32; - HVX_Vector *vgatherhi = - (HVX_Vector *)((int)&vtcm.vgather32 + (MATRIX_SIZE * 2)); - HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; - HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; - HVX_Vector pred_reglo = *(HVX_Vector *)word_predicates; - HVX_VectorPred predslo = VAND_VAL(pred_reglo, ~0); - HVX_Vector pred_reghi = *(HVX_Vector *)&word_predicates[MATRIX_SIZE / 2]; - HVX_VectorPred predshi = VAND_VAL(pred_reghi, ~0); + unsigned int init = gather_32_masked_init(); + HVX_Vector *vgatherlo = (HVX_Vector *)vtcm.vgather32; + HVX_Vector *vgatherhi = (HVX_Vector *)&vtcm.vgather32[MATRIX_SIZE / 2]; + HVX_Vector *offsetslo = (HVX_Vector *)word_offsets; + HVX_Vector *offsetshi = (HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector *predslo = (HVX_Vector *)word_predicates; + HVX_Vector *predshi = (HVX_Vector *)&word_predicates[MATRIX_SIZE / 2]; - *vgatherlo = VSPLAT_H(gather_32_masked_init()); - *vgatherhi = VSPLAT_H(gather_32_masked_init()); - VGATHER_32_MASKED(vgatherlo, predslo, &vtcm.vscatter32, region_len, - offsetslo); - VGATHER_32_MASKED(vgatherhi, predshi, &vtcm.vscatter32, region_len, - offsetshi); + asm ("v0.h = vsplat(%5)\n\t" + "vmem(%4 + #0) = v0\n\t" /* initialize the write area */ + "r1 = #-1\n\t" + "v0 = vmem(%0 + #0)\n\t" + "q0 = vand(v0, r1)\n\t" + "m0 = %2\n\t" + "v0 = vmem(%3 + #0)\n\t" + "{ if (q0) vtmp.w = vgather(%1, m0, v0.w).w\n\t" + " vmem(%4 + #0) = vtmp.new }\n\t" + : : "r"(predslo), "r"(vtcm.vscatter32), "r"(region_len), + "r"(offsetslo), "r"(vgatherlo), "r"(init) + : "r1", "q0", "m0", "v0", "memory"); + asm ("v0.h = vsplat(%5)\n\t" + "vmem(%4 + #0) = v0\n\t" /* initialize the write area */ + "r1 = #-1\n\t" + "v0 = vmem(%0 + #0)\n\t" + "q0 = vand(v0, r1)\n\t" + "m0 = %2\n\t" + "v0 = vmem(%3 + #0)\n\t" + "{ if (q0) vtmp.w = vgather(%1, m0, v0.w).w\n\t" + " vmem(%4 + #0) = vtmp.new }\n\t" + : : "r"(predshi), "r"(vtcm.vscatter32), "r"(region_len), + "r"(offsetshi), "r"(vgatherhi), "r"(init) + : "r1", "q0", "m0", "v0", "memory"); sync_gather(vgatherlo); sync_gather(vgatherhi); } -/* gather the elements from the scatter16_32 buffer */ +/* gather the elements from the scatter16_32 buffer using HVX */ void vector_gather_16_32(void) { - HVX_Vector *vgather; - HVX_VectorPair offsets; - HVX_Vector values; + asm ("m0 = %1\n\t" + "v0 = vmem(%2 + #0)\n\t" + "v1 = vmem(%2 + #1)\n\t" + "{ vtmp.h = vgather(%0, m0, v1:0.w).h\n\t" + " vmem(%3 + #0) = vtmp.new }\n\t" + "v0 = vmem(%3 + #0)\n\t" + "v0.h = vdeal(v0.h)\n\t" /* deal the elements to get the order back */ + "vmem(%3 + #0) = v0\n\t" + : : "r"(vtcm.vscatter16_32), "r"(region_len), + "r"(word_offsets), "r"(vtcm.vgather16_32) + : "m0", "v0", "v1", "memory"); - /* get the vtcm address to gather from */ - vgather = (HVX_Vector *)&vtcm.vgather16_32; - - /* get the word offsets in a vector pair */ - offsets = *(HVX_VectorPair *)word_offsets; - - VGATHER_16_32(vgather, &vtcm.vscatter16_32, region_len, offsets); - - /* deal the elements to get the order back */ - values = *(HVX_Vector *)vgather; - values = VDEAL_H(values); - - /* write it back to vtcm address */ - *(HVX_Vector *)vgather = values; + sync_gather(vtcm.vgather16_32); } +/* masked gather the elements from the scatter16_32 buffer using HVX */ void vector_gather_16_32_masked(void) { - HVX_Vector *vgather; - HVX_VectorPair offsets; - HVX_Vector pred_reg; - HVX_VectorPred preds; - HVX_Vector values; + unsigned short init = gather_16_masked_init(); - /* get the vtcm address to gather from */ - vgather = (HVX_Vector *)&vtcm.vgather16_32; + asm ("v0.h = vsplat(%5)\n\t" + "vmem(%4 + #0) = v0\n\t" /* initialize the write area */ + "r1 = #-1\n\t" + "v0 = vmem(%0 + #0)\n\t" + "v0.h = vshuff(v0.h)\n\t" /* shuffle the predicates */ + "q0 = vand(v0, r1)\n\t" + "m0 = %2\n\t" + "v0 = vmem(%3 + #0)\n\t" + "v1 = vmem(%3 + #1)\n\t" + "{ if (q0) vtmp.h = vgather(%1, m0, v1:0.w).h\n\t" + " vmem(%4 + #0) = vtmp.new }\n\t" + "v0 = vmem(%4 + #0)\n\t" + "v0.h = vdeal(v0.h)\n\t" /* deal the elements to get the order back */ + "vmem(%4 + #0) = v0\n\t" + : : "r"(half_predicates), "r"(vtcm.vscatter16_32), "r"(region_len), + "r"(word_offsets), "r"(vtcm.vgather16_32), "r"(init) + : "r1", "q0", "m0", "v0", "v1", "memory"); - /* get the word offsets in a vector pair */ - offsets = *(HVX_VectorPair *)word_offsets; - pred_reg = *(HVX_Vector *)half_predicates; - pred_reg = VSHUFF_H(pred_reg); - preds = VAND_VAL(pred_reg, ~0); - - *vgather = VSPLAT_H(gather_16_masked_init()); - VGATHER_16_32_MASKED(vgather, preds, &vtcm.vscatter16_32, region_len, - offsets); - - /* deal the elements to get the order back */ - values = *(HVX_Vector *)vgather; - values = VDEAL_H(values); - - /* write it back to vtcm address */ - *(HVX_Vector *)vgather = values; + sync_gather(vtcm.vgather16_32); } static void check_buffer(const char *name, void *c, void *r, size_t size) @@ -579,6 +604,7 @@ void scalar_scatter_16_acc(unsigned short *vscatter16) } } +/* scatter-accumulate the 16 bit elements using C */ void check_scatter_16_acc() { memset(vscatter16_ref, FILL_CHAR, @@ -589,7 +615,7 @@ void check_scatter_16_acc() SCATTER_BUFFER_SIZE * sizeof(unsigned short)); } -/* scatter the 16 bit elements using C */ +/* masked scatter the 16 bit elements using C */ void scalar_scatter_16_masked(unsigned short *vscatter16) { for (int i = 0; i < MATRIX_SIZE; i++) { @@ -628,7 +654,7 @@ void check_scatter_32() SCATTER_BUFFER_SIZE * sizeof(unsigned int)); } -/* scatter the 32 bit elements using C */ +/* scatter-accumulate the 32 bit elements using C */ void scalar_scatter_32_acc(unsigned int *vscatter32) { for (int i = 0; i < MATRIX_SIZE; ++i) { @@ -646,7 +672,7 @@ void check_scatter_32_acc() SCATTER_BUFFER_SIZE * sizeof(unsigned int)); } -/* scatter the 32 bit elements using C */ +/* masked scatter the 32 bit elements using C */ void scalar_scatter_32_masked(unsigned int *vscatter32) { for (int i = 0; i < MATRIX_SIZE; i++) { @@ -667,7 +693,7 @@ void check_scatter_32_masked() SCATTER_BUFFER_SIZE * sizeof(unsigned int)); } -/* scatter the 32 bit elements using C */ +/* scatter the 16 bit elements with 32 bit offsets using C */ void scalar_scatter_16_32(unsigned short *vscatter16_32) { for (int i = 0; i < MATRIX_SIZE; ++i) { @@ -684,7 +710,7 @@ void check_scatter_16_32() SCATTER_BUFFER_SIZE * sizeof(unsigned short)); } -/* scatter the 32 bit elements using C */ +/* scatter-accumulate the 16 bit elements with 32 bit offsets using C */ void scalar_scatter_16_32_acc(unsigned short *vscatter16_32) { for (int i = 0; i < MATRIX_SIZE; ++i) { @@ -702,6 +728,7 @@ void check_scatter_16_32_acc() SCATTER_BUFFER_SIZE * sizeof(unsigned short)); } +/* masked scatter the 16 bit elements with 32 bit offsets using C */ void scalar_scatter_16_32_masked(unsigned short *vscatter16_32) { for (int i = 0; i < MATRIX_SIZE; i++) { @@ -738,6 +765,7 @@ void check_gather_16() MATRIX_SIZE * sizeof(unsigned short)); } +/* masked gather the elements from the scatter buffer using C */ void scalar_gather_16_masked(unsigned short *vgather16) { for (int i = 0; i < MATRIX_SIZE; ++i) { @@ -756,7 +784,7 @@ void check_gather_16_masked() MATRIX_SIZE * sizeof(unsigned short)); } -/* gather the elements from the scatter buffer using C */ +/* gather the elements from the scatter32 buffer using C */ void scalar_gather_32(unsigned int *vgather32) { for (int i = 0; i < MATRIX_SIZE; ++i) { @@ -772,6 +800,7 @@ void check_gather_32(void) MATRIX_SIZE * sizeof(unsigned int)); } +/* masked gather the elements from the scatter32 buffer using C */ void scalar_gather_32_masked(unsigned int *vgather32) { for (int i = 0; i < MATRIX_SIZE; ++i) { @@ -781,7 +810,6 @@ void scalar_gather_32_masked(unsigned int *vgather32) } } - void check_gather_32_masked(void) { memset(vgather32_ref, gather_32_masked_init(), @@ -791,7 +819,7 @@ void check_gather_32_masked(void) vgather32_ref, MATRIX_SIZE * sizeof(unsigned int)); } -/* gather the elements from the scatter buffer using C */ +/* gather the elements from the scatter16_32 buffer using C */ void scalar_gather_16_32(unsigned short *vgather16_32) { for (int i = 0; i < MATRIX_SIZE; ++i) { @@ -807,6 +835,7 @@ void check_gather_16_32(void) MATRIX_SIZE * sizeof(unsigned short)); } +/* masked gather the elements from the scatter16_32 buffer using C */ void scalar_gather_16_32_masked(unsigned short *vgather16_32) { for (int i = 0; i < MATRIX_SIZE; ++i) { diff --git a/tests/tcg/hexagon/usr.c b/tests/tcg/hexagon/usr.c index 5f68c539dd..92bc86a213 100644 --- a/tests/tcg/hexagon/usr.c +++ b/tests/tcg/hexagon/usr.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -24,35 +24,7 @@ int err; -static void __check(int line, uint32_t val, uint32_t expect) -{ - if (val != expect) { - printf("ERROR at line %d: %d != %d\n", line, val, expect); - err++; - } -} - -#define check(RES, EXP) __check(__LINE__, RES, EXP) - -static void __check32(int line, uint32_t val, uint32_t expect) -{ - if (val != expect) { - printf("ERROR at line %d: 0x%08x != 0x%08x\n", line, val, expect); - err++; - } -} - -#define check32(RES, EXP) __check32(__LINE__, RES, EXP) - -static void __check64(int line, uint64_t val, uint64_t expect) -{ - if (val != expect) { - printf("ERROR at line %d: 0x%016llx != 0x%016llx\n", line, val, expect); - err++; - } -} - -#define check64(RES, EXP) __check64(__LINE__, RES, EXP) +#include "hex_test.h" /* * Some of the instructions tested are only available on certain versions @@ -61,53 +33,6 @@ static void __check64(int line, uint64_t val, uint64_t expect) #define CORE_HAS_AUDIO (__HEXAGON_ARCH__ >= 67 && defined(__HEXAGON_AUDIO__)) #define CORE_IS_V67 (__HEXAGON_ARCH__ >= 67) -/* Define the bits in Hexagon USR register */ -#define USR_OVF_BIT 0 /* Sticky saturation overflow */ -#define USR_FPINVF_BIT 1 /* IEEE FP invalid sticky flag */ -#define USR_FPDBZF_BIT 2 /* IEEE FP divide-by-zero sticky flag */ -#define USR_FPOVFF_BIT 3 /* IEEE FP overflow sticky flag */ -#define USR_FPUNFF_BIT 4 /* IEEE FP underflow sticky flag */ -#define USR_FPINPF_BIT 5 /* IEEE FP inexact sticky flag */ - -/* Corresponding values in USR */ -#define USR_CLEAR 0 -#define USR_OVF (1 << USR_OVF_BIT) -#define USR_FPINVF (1 << USR_FPINVF_BIT) -#define USR_FPDBZF (1 << USR_FPDBZF_BIT) -#define USR_FPOVFF (1 << USR_FPOVFF_BIT) -#define USR_FPUNFF (1 << USR_FPUNFF_BIT) -#define USR_FPINPF (1 << USR_FPINPF_BIT) - -/* Some useful floating point values */ -const uint32_t SF_INF = 0x7f800000; -const uint32_t SF_QNaN = 0x7fc00000; -const uint32_t SF_SNaN = 0x7fb00000; -const uint32_t SF_QNaN_neg = 0xffc00000; -const uint32_t SF_SNaN_neg = 0xffb00000; -const uint32_t SF_HEX_NaN = 0xffffffff; -const uint32_t SF_zero = 0x00000000; -const uint32_t SF_zero_neg = 0x80000000; -const uint32_t SF_one = 0x3f800000; -const uint32_t SF_one_recip = 0x3f7f0001; /* 0.9960... */ -const uint32_t SF_one_invsqrta = 0x3f7f0000; /* 0.99609375 */ -const uint32_t SF_two = 0x40000000; -const uint32_t SF_four = 0x40800000; -const uint32_t SF_small_neg = 0xab98fba8; -const uint32_t SF_large_pos = 0x5afa572e; - -const uint64_t DF_QNaN = 0x7ff8000000000000ULL; -const uint64_t DF_SNaN = 0x7ff7000000000000ULL; -const uint64_t DF_QNaN_neg = 0xfff8000000000000ULL; -const uint64_t DF_SNaN_neg = 0xfff7000000000000ULL; -const uint64_t DF_HEX_NaN = 0xffffffffffffffffULL; -const uint64_t DF_zero = 0x0000000000000000ULL; -const uint64_t DF_zero_neg = 0x8000000000000000ULL; -const uint64_t DF_any = 0x3f80000000000000ULL; -const uint64_t DF_one = 0x3ff0000000000000ULL; -const uint64_t DF_one_hh = 0x3ff001ff80000000ULL; /* 1.00048... */ -const uint64_t DF_small_neg = 0xbd731f7500000000ULL; -const uint64_t DF_large_pos = 0x7f80000000000001ULL; - /* * Templates for functions to execute an instruction * @@ -122,12 +47,6 @@ const uint64_t DF_large_pos = 0x7f80000000000001ULL; * Xx read/write */ -/* Clear bits 0-5 in USR */ -#define CLEAR_USRBITS \ - "r2 = usr\n\t" \ - "r2 = and(r2, #0xffffffc0)\n\t" \ - "usr = r2\n\t" - /* Template for instructions with one register operand */ #define FUNC_x_OP_x(RESTYPE, SRCTYPE, NAME, INSN) \ static RESTYPE NAME(SRCTYPE src, uint32_t *usr_result) \ @@ -508,7 +427,7 @@ FUNC_Rp_OP_R(sfinvsqrta, "%0, p2 = sfinvsqrta(%3)") uint32_t usr_result; \ result = FUNC(src, &usr_result); \ CHECKFN(result, RES); \ - check(usr_result, USR_RES); \ + check32(usr_result, USR_RES); \ } while (0) #define TEST_R_OP_R(FUNC, SRC, RES, USR_RES) \ @@ -532,8 +451,8 @@ TEST_x_OP_x(uint64_t, check64, uint32_t, FUNC, SRC, RES, USR_RES) uint32_t usr_result; \ result = FUNC(src, &pred_result, &usr_result); \ CHECKFN(result, RES); \ - check(pred_result, PRED_RES); \ - check(usr_result, USR_RES); \ + check32(pred_result, PRED_RES); \ + check32(usr_result, USR_RES); \ } while (0) #define TEST_Rp_OP_R(FUNC, SRC, RES, PRED_RES, USR_RES) \ @@ -548,7 +467,7 @@ TEST_xp_OP_x(uint32_t, check32, uint32_t, FUNC, SRC, RES, PRED_RES, USR_RES) uint32_t usr_result; \ result = FUNC(src1, src2, &usr_result); \ CHECKFN(result, RES); \ - check(usr_result, USR_RES); \ + check32(usr_result, USR_RES); \ } while (0) #define TEST_P_OP_PP(FUNC, SRC1, SRC2, RES, USR_RES) \ @@ -585,8 +504,8 @@ TEST_x_OP_xx(uint64_t, check64, uint64_t, uint32_t, \ uint32_t usr_result; \ result = FUNC(src1, src2, &pred_result, &usr_result); \ CHECKFN(result, RES); \ - check(pred_result, PRED_RES); \ - check(usr_result, USR_RES); \ + check32(pred_result, PRED_RES); \ + check32(usr_result, USR_RES); \ } while (0) #define TEST_Rp_OP_RR(FUNC, SRC1, SRC2, RES, PRED_RES, USR_RES) \ @@ -602,7 +521,7 @@ TEST_xp_OP_xx(uint32_t, check32, uint32_t, uint32_t, FUNC, SRC1, SRC2, \ uint32_t usr_result; \ result = FUNC(src1, src2, &usr_result); \ CHECKFN(result, RES); \ - check(usr_result, USR_RES); \ + check32(usr_result, USR_RES); \ } while (0) #define TEST_R_OP_RI(FUNC, SRC1, SRC2, RES, USR_RES) \ @@ -622,7 +541,7 @@ TEST_x_OP_xI(uint32_t, check64, uint64_t, \ uint32_t usr_result; \ result = FUNC(result, src1, src2, &usr_result); \ CHECKFN(result, RES); \ - check(usr_result, USR_RES); \ + check32(usr_result, USR_RES); \ } while (0) #define TEST_XR_OP_RR(FUNC, RESIN, SRC1, SRC2, RES, USR_RES) \ @@ -647,7 +566,7 @@ TEST_Xx_OP_xx(uint64_t, check64, uint32_t, uint32_t, \ uint32_t usr_result; \ result = FUNC(result, src1, src2, &pred_res, &usr_result); \ CHECKFN(result, RES); \ - check(usr_result, USR_RES); \ + check32(usr_result, USR_RES); \ } while (0) #define TEST_XPp_OP_PP(FUNC, RESIN, SRC1, SRC2, RES, PRED_RES, USR_RES) \ @@ -664,7 +583,7 @@ TEST_Xxp_OP_xx(uint64_t, check64, uint64_t, uint64_t, FUNC, RESIN, SRC1, SRC2, \ uint32_t usr_result; \ result = FUNC(result, src1, src2, pred, &usr_result); \ CHECKFN(result, RES); \ - check(usr_result, USR_RES); \ + check32(usr_result, USR_RES); \ } while (0) #define TEST_XR_OP_RRp(FUNC, RESIN, SRC1, SRC2, PRED, RES, USR_RES) \ @@ -679,8 +598,8 @@ TEST_Xx_OP_xxp(uint32_t, check32, uint32_t, uint32_t, \ SRC2TYPE src2 = SRC2; \ uint32_t usr_result; \ result = FUNC(src1, src2, &usr_result); \ - check(result, RES); \ - check(usr_result, USR_RES); \ + check32(result, RES); \ + check32(usr_result, USR_RES); \ } while (0) #define TEST_CMP_RR(FUNC, SRC1, SRC2, RES, USR_RES) \ diff --git a/tests/tcg/hexagon/v68_hvx.c b/tests/tcg/hexagon/v68_hvx.c new file mode 100644 index 0000000000..02718722a3 --- /dev/null +++ b/tests/tcg/hexagon/v68_hvx.c @@ -0,0 +1,90 @@ +/* + * Copyright(c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include + +int err; + +#include "hvx_misc.h" + +MMVector v6mpy_buffer0[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector v6mpy_buffer1[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); + +static void init_v6mpy_buffers(void) +{ + int counter0 = 0; + int counter1 = 17; + for (int i = 0; i < BUFSIZE; i++) { + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + v6mpy_buffer0[i].w[j] = counter0++; + v6mpy_buffer1[i].w[j] = counter1++; + } + } +} + +int v6mpy_ref[BUFSIZE][MAX_VEC_SIZE_BYTES / 4] = { +#include "v6mpy_ref.c.inc" +}; + +static void test_v6mpy(void) +{ + void *p00 = buffer0; + void *p01 = v6mpy_buffer0; + void *p10 = buffer1; + void *p11 = v6mpy_buffer1; + void *pout = output; + + memset(expect, 0xff, sizeof(expect)); + memset(output, 0xff, sizeof(expect)); + + for (int i = 0; i < BUFSIZE; i++) { + asm("v2 = vmem(%0 + #0)\n\t" + "v3 = vmem(%1 + #0)\n\t" + "v4 = vmem(%2 + #0)\n\t" + "v5 = vmem(%3 + #0)\n\t" + "v5:4.w = v6mpy(v5:4.ub, v3:2.b, #1):v\n\t" + "vmem(%4 + #0) = v4\n\t" + : : "r"(p00), "r"(p01), "r"(p10), "r"(p11), "r"(pout) + : "v2", "v3", "v4", "v5", "memory"); + p00 += sizeof(MMVector); + p01 += sizeof(MMVector); + p10 += sizeof(MMVector); + p11 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + expect[i].w[j] = v6mpy_ref[i][j]; + } + } + + check_output_w(__LINE__, BUFSIZE); +} + +int main() +{ + init_buffers(); + init_v6mpy_buffers(); + + test_v6mpy(); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/v68_scalar.c b/tests/tcg/hexagon/v68_scalar.c new file mode 100644 index 0000000000..7a8adb1130 --- /dev/null +++ b/tests/tcg/hexagon/v68_scalar.c @@ -0,0 +1,186 @@ +/* + * Copyright(c) 2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include + +/* + * Test the scalar core instructions that are new in v68 + */ + +int err; + +static int buffer32[] = { 1, 2, 3, 4 }; +static long long buffer64[] = { 5, 6, 7, 8 }; + +static void __check32(int line, uint32_t result, uint32_t expect) +{ + if (result != expect) { + printf("ERROR at line %d: 0x%08x != 0x%08x\n", + line, result, expect); + err++; + } +} + +#define check32(RES, EXP) __check32(__LINE__, RES, EXP) + +static void __check64(int line, uint64_t result, uint64_t expect) +{ + if (result != expect) { + printf("ERROR at line %d: 0x%016llx != 0x%016llx\n", + line, result, expect); + err++; + } +} + +#define check64(RES, EXP) __check64(__LINE__, RES, EXP) + +static inline int loadw_aq(int *p) +{ + int res; + asm volatile("%0 = memw_aq(%1)\n\t" + : "=r"(res) : "r"(p)); + return res; +} + +static void test_loadw_aq(void) +{ + int res; + + res = loadw_aq(&buffer32[0]); + check32(res, 1); + res = loadw_aq(&buffer32[1]); + check32(res, 2); +} + +static inline long long loadd_aq(long long *p) +{ + long long res; + asm volatile("%0 = memd_aq(%1)\n\t" + : "=r"(res) : "r"(p)); + return res; +} + +static void test_loadd_aq(void) +{ + long long res; + + res = loadd_aq(&buffer64[2]); + check64(res, 7); + res = loadd_aq(&buffer64[3]); + check64(res, 8); +} + +static inline void release_at(int *p) +{ + asm volatile("release(%0):at\n\t" + : : "r"(p)); +} + +static void test_release_at(void) +{ + release_at(&buffer32[2]); + check64(buffer32[2], 3); + release_at(&buffer32[3]); + check64(buffer32[3], 4); +} + +static inline void release_st(int *p) +{ + asm volatile("release(%0):st\n\t" + : : "r"(p)); +} + +static void test_release_st(void) +{ + release_st(&buffer32[2]); + check64(buffer32[2], 3); + release_st(&buffer32[3]); + check64(buffer32[3], 4); +} + +static inline void storew_rl_at(int *p, int val) +{ + asm volatile("memw_rl(%0):at = %1\n\t" + : : "r"(p), "r"(val) : "memory"); +} + +static void test_storew_rl_at(void) +{ + storew_rl_at(&buffer32[2], 9); + check64(buffer32[2], 9); + storew_rl_at(&buffer32[3], 10); + check64(buffer32[3], 10); +} + +static inline void stored_rl_at(long long *p, long long val) +{ + asm volatile("memd_rl(%0):at = %1\n\t" + : : "r"(p), "r"(val) : "memory"); +} + +static void test_stored_rl_at(void) +{ + stored_rl_at(&buffer64[2], 11); + check64(buffer64[2], 11); + stored_rl_at(&buffer64[3], 12); + check64(buffer64[3], 12); +} + +static inline void storew_rl_st(int *p, int val) +{ + asm volatile("memw_rl(%0):st = %1\n\t" + : : "r"(p), "r"(val) : "memory"); +} + +static void test_storew_rl_st(void) +{ + storew_rl_st(&buffer32[0], 13); + check64(buffer32[0], 13); + storew_rl_st(&buffer32[1], 14); + check64(buffer32[1], 14); +} + +static inline void stored_rl_st(long long *p, long long val) +{ + asm volatile("memd_rl(%0):st = %1\n\t" + : : "r"(p), "r"(val) : "memory"); +} + +static void test_stored_rl_st(void) +{ + stored_rl_st(&buffer64[0], 15); + check64(buffer64[0], 15); + stored_rl_st(&buffer64[1], 15); + check64(buffer64[1], 15); +} + +int main() +{ + test_loadw_aq(); + test_loadd_aq(); + test_release_at(); + test_release_st(); + test_storew_rl_at(); + test_stored_rl_at(); + test_storew_rl_st(); + test_stored_rl_st(); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/v69_hvx.c b/tests/tcg/hexagon/v69_hvx.c new file mode 100644 index 0000000000..a0d567d142 --- /dev/null +++ b/tests/tcg/hexagon/v69_hvx.c @@ -0,0 +1,318 @@ +/* + * Copyright(c) 2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include + +int err; + +#include "hvx_misc.h" + +#define fVROUND(VAL, SHAMT) \ + ((VAL) + (((SHAMT) > 0) ? (1LL << ((SHAMT) - 1)) : 0)) + +#define fVSATUB(VAL) \ + ((((VAL) & 0xffLL) == (VAL)) ? \ + (VAL) : \ + ((((int32_t)(VAL)) < 0) ? 0 : 0xff)) + +#define fVSATUH(VAL) \ + ((((VAL) & 0xffffLL) == (VAL)) ? \ + (VAL) : \ + ((((int32_t)(VAL)) < 0) ? 0 : 0xffff)) + +static void test_vasrvuhubrndsat(void) +{ + void *p0 = buffer0; + void *p1 = buffer1; + void *pout = output; + + memset(expect, 0xaa, sizeof(expect)); + memset(output, 0xbb, sizeof(output)); + + for (int i = 0; i < BUFSIZE / 2; i++) { + asm("v4 = vmem(%0 + #0)\n\t" + "v5 = vmem(%0 + #1)\n\t" + "v6 = vmem(%1 + #0)\n\t" + "v5.ub = vasr(v5:4.uh, v6.ub):rnd:sat\n\t" + "vmem(%2) = v5\n\t" + : : "r"(p0), "r"(p1), "r"(pout) + : "v4", "v5", "v6", "memory"); + p0 += sizeof(MMVector) * 2; + p1 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 2; j++) { + int shamt; + uint8_t byte0; + uint8_t byte1; + + shamt = buffer1[i].ub[2 * j + 0] & 0x7; + byte0 = fVSATUB(fVROUND(buffer0[2 * i + 0].uh[j], shamt) >> shamt); + shamt = buffer1[i].ub[2 * j + 1] & 0x7; + byte1 = fVSATUB(fVROUND(buffer0[2 * i + 1].uh[j], shamt) >> shamt); + expect[i].uh[j] = (byte1 << 8) | (byte0 & 0xff); + } + } + + check_output_h(__LINE__, BUFSIZE / 2); +} + +static void test_vasrvuhubsat(void) +{ + void *p0 = buffer0; + void *p1 = buffer1; + void *pout = output; + + memset(expect, 0xaa, sizeof(expect)); + memset(output, 0xbb, sizeof(output)); + + for (int i = 0; i < BUFSIZE / 2; i++) { + asm("v4 = vmem(%0 + #0)\n\t" + "v5 = vmem(%0 + #1)\n\t" + "v6 = vmem(%1 + #0)\n\t" + "v5.ub = vasr(v5:4.uh, v6.ub):sat\n\t" + "vmem(%2) = v5\n\t" + : : "r"(p0), "r"(p1), "r"(pout) + : "v4", "v5", "v6", "memory"); + p0 += sizeof(MMVector) * 2; + p1 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 2; j++) { + int shamt; + uint8_t byte0; + uint8_t byte1; + + shamt = buffer1[i].ub[2 * j + 0] & 0x7; + byte0 = fVSATUB(buffer0[2 * i + 0].uh[j] >> shamt); + shamt = buffer1[i].ub[2 * j + 1] & 0x7; + byte1 = fVSATUB(buffer0[2 * i + 1].uh[j] >> shamt); + expect[i].uh[j] = (byte1 << 8) | (byte0 & 0xff); + } + } + + check_output_h(__LINE__, BUFSIZE / 2); +} + +static void test_vasrvwuhrndsat(void) +{ + void *p0 = buffer0; + void *p1 = buffer1; + void *pout = output; + + memset(expect, 0xaa, sizeof(expect)); + memset(output, 0xbb, sizeof(output)); + + for (int i = 0; i < BUFSIZE / 2; i++) { + asm("v4 = vmem(%0 + #0)\n\t" + "v5 = vmem(%0 + #1)\n\t" + "v6 = vmem(%1 + #0)\n\t" + "v5.uh = vasr(v5:4.w, v6.uh):rnd:sat\n\t" + "vmem(%2) = v5\n\t" + : : "r"(p0), "r"(p1), "r"(pout) + : "v4", "v5", "v6", "memory"); + p0 += sizeof(MMVector) * 2; + p1 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + int shamt; + uint16_t half0; + uint16_t half1; + + shamt = buffer1[i].uh[2 * j + 0] & 0xf; + half0 = fVSATUH(fVROUND(buffer0[2 * i + 0].w[j], shamt) >> shamt); + shamt = buffer1[i].uh[2 * j + 1] & 0xf; + half1 = fVSATUH(fVROUND(buffer0[2 * i + 1].w[j], shamt) >> shamt); + expect[i].w[j] = (half1 << 16) | (half0 & 0xffff); + } + } + + check_output_w(__LINE__, BUFSIZE / 2); +} + +static void test_vasrvwuhsat(void) +{ + void *p0 = buffer0; + void *p1 = buffer1; + void *pout = output; + + memset(expect, 0xaa, sizeof(expect)); + memset(output, 0xbb, sizeof(output)); + + for (int i = 0; i < BUFSIZE / 2; i++) { + asm("v4 = vmem(%0 + #0)\n\t" + "v5 = vmem(%0 + #1)\n\t" + "v6 = vmem(%1 + #0)\n\t" + "v5.uh = vasr(v5:4.w, v6.uh):sat\n\t" + "vmem(%2) = v5\n\t" + : : "r"(p0), "r"(p1), "r"(pout) + : "v4", "v5", "v6", "memory"); + p0 += sizeof(MMVector) * 2; + p1 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + int shamt; + uint16_t half0; + uint16_t half1; + + shamt = buffer1[i].uh[2 * j + 0] & 0xf; + half0 = fVSATUH(buffer0[2 * i + 0].w[j] >> shamt); + shamt = buffer1[i].uh[2 * j + 1] & 0xf; + half1 = fVSATUH(buffer0[2 * i + 1].w[j] >> shamt); + expect[i].w[j] = (half1 << 16) | (half0 & 0xffff); + } + } + + check_output_w(__LINE__, BUFSIZE / 2); +} + +static void test_vassign_tmp(void) +{ + void *p0 = buffer0; + void *pout = output; + + memset(expect, 0xaa, sizeof(expect)); + memset(output, 0xbb, sizeof(output)); + + for (int i = 0; i < BUFSIZE; i++) { + /* + * Assign into v12 as .tmp, then use it in the next packet + * Should get the new value within the same packet and + * the old value in the next packet + */ + asm("v3 = vmem(%0 + #0)\n\t" + "r1 = #1\n\t" + "v12 = vsplat(r1)\n\t" + "r1 = #2\n\t" + "v13 = vsplat(r1)\n\t" + "{\n\t" + " v12.tmp = v13\n\t" + " v4.w = vadd(v12.w, v3.w)\n\t" + "}\n\t" + "v4.w = vadd(v4.w, v12.w)\n\t" + "vmem(%1 + #0) = v4\n\t" + : : "r"(p0), "r"(pout) + : "r1", "v3", "v4", "v12", "v13", "memory"); + p0 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + expect[i].w[j] = buffer0[i].w[j] + 3; + } + } + + check_output_w(__LINE__, BUFSIZE); +} + +static void test_vcombine_tmp(void) +{ + void *p0 = buffer0; + void *p1 = buffer1; + void *pout = output; + + memset(expect, 0xaa, sizeof(expect)); + memset(output, 0xbb, sizeof(output)); + + for (int i = 0; i < BUFSIZE; i++) { + /* + * Combine into v13:12 as .tmp, then use it in the next packet + * Should get the new value within the same packet and + * the old value in the next packet + */ + asm("v3 = vmem(%0 + #0)\n\t" + "r1 = #1\n\t" + "v12 = vsplat(r1)\n\t" + "r1 = #2\n\t" + "v13 = vsplat(r1)\n\t" + "r1 = #3\n\t" + "v14 = vsplat(r1)\n\t" + "r1 = #4\n\t" + "v15 = vsplat(r1)\n\t" + "{\n\t" + " v13:12.tmp = vcombine(v15, v14)\n\t" + " v4.w = vadd(v12.w, v3.w)\n\t" + " v16 = v13\n\t" + "}\n\t" + "v4.w = vadd(v4.w, v12.w)\n\t" + "v4.w = vadd(v4.w, v13.w)\n\t" + "v4.w = vadd(v4.w, v16.w)\n\t" + "vmem(%2 + #0) = v4\n\t" + : : "r"(p0), "r"(p1), "r"(pout) + : "r1", "v3", "v4", "v12", "v13", "v14", "v15", "v16", "memory"); + p0 += sizeof(MMVector); + p1 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + expect[i].w[j] = buffer0[i].w[j] + 10; + } + } + + check_output_w(__LINE__, BUFSIZE); +} + +static void test_vmpyuhvs(void) +{ + void *p0 = buffer0; + void *p1 = buffer1; + void *pout = output; + + memset(expect, 0xaa, sizeof(expect)); + memset(output, 0xbb, sizeof(output)); + + for (int i = 0; i < BUFSIZE; i++) { + asm("v4 = vmem(%0 + #0)\n\t" + "v5 = vmem(%1 + #0)\n\t" + "v4.uh = vmpy(V4.uh, v5.uh):>>16\n\t" + "vmem(%2) = v4\n\t" + : : "r"(p0), "r"(p1), "r"(pout) + : "v4", "v5", "memory"); + p0 += sizeof(MMVector); + p1 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 2; j++) { + expect[i].uh[j] = (buffer0[i].uh[j] * buffer1[i].uh[j]) >> 16; + } + } + + check_output_h(__LINE__, BUFSIZE); +} + +int main() +{ + init_buffers(); + + test_vasrvuhubrndsat(); + test_vasrvuhubsat(); + test_vasrvwuhrndsat(); + test_vasrvwuhsat(); + + test_vassign_tmp(); + test_vcombine_tmp(); + + test_vmpyuhvs(); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/v6mpy_ref.c.inc b/tests/tcg/hexagon/v6mpy_ref.c.inc new file mode 100644 index 0000000000..8258cddcb1 --- /dev/null +++ b/tests/tcg/hexagon/v6mpy_ref.c.inc @@ -0,0 +1,161 @@ +/* + * Copyright(c) 2021-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +{ 0xffffee11, 0xfffffcca, 0xffffc1b3, 0xffffd0cc, + 0xffffe215, 0xfffff58e, 0xffffaf37, 0xffffc310, + 0xffffd919, 0xfffff152, 0xffff9fbb, 0xffffb854, + 0xffffd31d, 0xfffff016, 0xffff933f, 0xffffb098, + 0xffffd021, 0xfffff1da, 0xffff89c3, 0xffffabdc, + 0xffffd025, 0xfffff69e, 0xffff8347, 0xffffaa20, + 0xffffd329, 0xfffffe62, 0xffff7fcb, 0xffffab64, + 0xffffd92d, 0x00000926, 0xffff7f4f, 0xffffafa8, + }, +{ 0xffffe231, 0x000016ea, 0xffff81d3, 0xffffb6ec, + 0xffffee35, 0x000027ae, 0xffff8757, 0xffffc130, + 0xfffffd39, 0x00003b72, 0xffff8fdb, 0xffffce74, + 0x00000f3d, 0x00005236, 0xffff9b5f, 0xffffdeb8, + 0x00002441, 0x00006bfa, 0xffffa9e3, 0xfffff1fc, + 0x00003c45, 0x000088be, 0xffffbb67, 0x00000840, + 0x00005749, 0x0000a882, 0xffffcfeb, 0xffffe684, + 0x0000494d, 0x00009a46, 0xffffb16f, 0x000002c8, + }, +{ 0xfffff351, 0x0000440a, 0xffff4af3, 0xffff9c0c, + 0xffffef55, 0x000044ce, 0xffff4077, 0xffff9650, + 0xffffee59, 0x00004892, 0xffff38fb, 0xffff9394, + 0xfffff05d, 0x00004f56, 0xffff347f, 0xffff93d8, + 0xfffff561, 0x0000591a, 0xffff3303, 0xffff971c, + 0xfffffd65, 0x000065de, 0xffff3487, 0xffff9d60, + 0x00000869, 0x000075a2, 0xffff390b, 0xffffa6a4, + 0x0000166d, 0x00008866, 0xffff408f, 0xffffb2e8, + }, +{ 0x00002771, 0x00009e2a, 0xffff4b13, 0xffffc22c, + 0x00003b75, 0x0000b6ee, 0xffff5897, 0xffffd470, + 0x00005279, 0x0000d2b2, 0xffff691b, 0xffffe9b4, + 0x00006c7d, 0x0000f176, 0xffff7c9f, 0x000001f8, + 0x00008981, 0x0001133a, 0xffff9323, 0x00001d3c, + 0x0000a985, 0x000137fe, 0xffffaca7, 0x00003b80, + 0x0000cc89, 0x00015fc2, 0xffffc92b, 0xffffe1c4, + 0x0000868d, 0x00011986, 0xffff72af, 0x00000608, + }, +{ 0xfffff891, 0x00008b4a, 0xfffed433, 0xffff674c, + 0xfffffc95, 0x0000940e, 0xfffed1b7, 0xffff6990, + 0x00000399, 0x00009fd2, 0xfffed23b, 0xffff6ed4, + 0x00000d9d, 0x0000ae96, 0xfffed5bf, 0xffff7718, + 0x00001aa1, 0x0000c05a, 0xfffedc43, 0xffff825c, + 0x00002aa5, 0x0000d51e, 0xfffee5c7, 0xffff90a0, + 0x00003da9, 0x0000ece2, 0xfffef24b, 0xffffa1e4, + 0x000053ad, 0x000107a6, 0xffff01cf, 0xffffb628, + }, +{ 0x00006cb1, 0x0001256a, 0xffff1453, 0xffffcd6c, + 0x000088b5, 0x0001462e, 0xffff29d7, 0xffffe7b0, + 0x0000a7b9, 0x000169f2, 0xffff425b, 0x000004f4, + 0x0000c9bd, 0x000190b6, 0xffff5ddf, 0x00002538, + 0x0000eec1, 0x0001ba7a, 0xffff7c63, 0x0000487c, + 0x000116c5, 0x0001e73e, 0xffff9de7, 0x00006ec0, + 0x000141c9, 0x00021702, 0xffffc26b, 0xffffdd04, + 0x0000c3cd, 0x000198c6, 0xffff33ef, 0x00000948, + }, +{ 0xfffffdd1, 0x0000d28a, 0xfffe5d73, 0xffff328c, + 0x000009d5, 0x0000e34e, 0xfffe62f7, 0xffff3cd0, + 0x000018d9, 0x0000f712, 0xfffe6b7b, 0xffff4a14, + 0x00002add, 0x00010dd6, 0xfffe76ff, 0xffff5a58, + 0x00003fe1, 0x0001279a, 0xfffe8583, 0xffff6d9c, + 0x000057e5, 0x0001445e, 0xfffe9707, 0xffff83e0, + 0x000072e9, 0x00016422, 0xfffeab8b, 0xffff9d24, + 0x000090ed, 0x000186e6, 0xfffec30f, 0xffffb968, + }, +{ 0x0000b1f1, 0x0001acaa, 0xfffedd93, 0xffffd8ac, + 0x0000d5f5, 0x0001d56e, 0xfffefb17, 0xfffffaf0, + 0x0000fcf9, 0x00020132, 0xffff1b9b, 0x00002034, + 0x000126fd, 0x00022ff6, 0xffff3f1f, 0x00008b36, + 0x000093c3, 0x00009d80, 0x00009d6d, 0x0000a78a, + 0x0000b4d7, 0x0000c354, 0x0000b801, 0x0000c6de, + 0x0000d4eb, 0x0000e828, 0x0000d195, 0xffffea32, + 0x00000fff, 0x000022fc, 0xfffffc29, 0x00000f86, + }, +{ 0xffffee13, 0xfffffcd0, 0xffffc1bd, 0xffffd0da, + 0xffffe327, 0xfffff6a4, 0xffffb051, 0xffffc42e, + 0xffffd73b, 0xffffef78, 0xffff9de5, 0xffffb682, + 0xffffd24f, 0xffffef4c, 0xffff9279, 0xffffafd6, + 0xffffd063, 0xfffff220, 0xffff8a0d, 0xffffac2a, + 0xffffd177, 0xfffff7f4, 0xffff84a1, 0xffffab7e, + 0xffffd18b, 0xfffffcc8, 0xffff7e35, 0xffffa9d2, + 0xffffd89f, 0x0000089c, 0xffff7ec9, 0xffffaf26, + }, +{ 0xffffe2b3, 0x00001770, 0xffff825d, 0xffffb77a, + 0xffffefc7, 0x00002944, 0xffff88f1, 0xffffc2ce, + 0xfffffbdb, 0x00003a18, 0xffff8e85, 0xffffcd22, + 0x00000eef, 0x000051ec, 0xffff9b19, 0xffffde76, + 0x00002503, 0x00006cc0, 0xffffaaad, 0xfffff2ca, + 0x00003e17, 0x00008a94, 0xffffbd41, 0x00000a1e, + 0x0000562b, 0x0000a768, 0xffffced5, 0xffffe572, + 0x0000493f, 0x00009a3c, 0xffffb169, 0x000002c6, + }, +{ 0xfffff353, 0x00004410, 0xffff4afd, 0xffff9c1a, + 0xfffff067, 0x000045e4, 0xffff4191, 0xffff976e, + 0xffffec7b, 0x000046b8, 0xffff3725, 0xffff91c2, + 0xffffef8f, 0x00004e8c, 0xffff33b9, 0xffff9316, + 0xfffff5a3, 0x00005960, 0xffff334d, 0xffff976a, + 0xfffffeb7, 0x00006734, 0xffff35e1, 0xffff9ebe, + 0x000006cb, 0x00007408, 0xffff3775, 0xffffa512, + 0x000015df, 0x000087dc, 0xffff4009, 0xffffb266, + }, +{ 0x000027f3, 0x00009eb0, 0xffff4b9d, 0xffffc2ba, + 0x00003d07, 0x0000b884, 0xffff5a31, 0xffffd60e, + 0x0000511b, 0x0000d158, 0xffff67c5, 0xffffe862, + 0x00006c2f, 0x0000f12c, 0xffff7c59, 0x000001b6, + 0x00008a43, 0x00011400, 0xffff93ed, 0x00001e0a, + 0x0000ab57, 0x000139d4, 0xffffae81, 0x00003d5e, + 0x0000cb6b, 0x00015ea8, 0xffffc815, 0xffffe0b2, + 0x0000867f, 0x0001197c, 0xffff72a9, 0x00000606, + }, +{ 0xfffff893, 0x00008b50, 0xfffed43d, 0xffff675a, + 0xfffffda7, 0x00009524, 0xfffed2d1, 0xffff6aae, + 0x000001bb, 0x00009df8, 0xfffed065, 0xffff6d02, + 0x00000ccf, 0x0000adcc, 0xfffed4f9, 0xffff7656, + 0x00001ae3, 0x0000c0a0, 0xfffedc8d, 0xffff82aa, + 0x00002bf7, 0x0000d674, 0xfffee721, 0xffff91fe, + 0x00003c0b, 0x0000eb48, 0xfffef0b5, 0xffffa052, + 0x0000531f, 0x0001071c, 0xffff0149, 0xffffb5a6, + }, +{ 0x00006d33, 0x000125f0, 0xffff14dd, 0xffffcdfa, + 0x00008a47, 0x000147c4, 0xffff2b71, 0xffffe94e, + 0x0000a65b, 0x00016898, 0xffff4105, 0x000003a2, + 0x0000c96f, 0x0001906c, 0xffff5d99, 0x000024f6, + 0x0000ef83, 0x0001bb40, 0xffff7d2d, 0x0000494a, + 0x00011897, 0x0001e914, 0xffff9fc1, 0x0000709e, + 0x000140ab, 0x000215e8, 0xffffc155, 0xffffdbf2, + 0x0000c3bf, 0x000198bc, 0xffff33e9, 0x00000946, + }, +{ 0xfffffdd3, 0x0000d290, 0xfffe5d7d, 0xffff329a, + 0x00000ae7, 0x0000e464, 0xfffe6411, 0xffff3dee, + 0x000016fb, 0x0000f538, 0xfffe69a5, 0xffff4842, + 0x00002a0f, 0x00010d0c, 0xfffe7639, 0xffff5996, + 0x00004023, 0x000127e0, 0xfffe85cd, 0xffff6dea, + 0x00005937, 0x000145b4, 0xfffe9861, 0xffff853e, + 0x0000714b, 0x00016288, 0xfffea9f5, 0xffff9b92, + 0x0000905f, 0x0001865c, 0xfffec289, 0xffffb8e6, + }, +{ 0x0000b273, 0x0001ad30, 0xfffede1d, 0xffffd93a, + 0x0000d787, 0x0001d704, 0xfffefcb1, 0xfffffc8e, + 0x0000fb9b, 0x0001ffd8, 0xffff1a45, 0x00001ee2, + 0x000126af, 0x00022fac, 0xffff3ed9, 0x00008af4, + 0x00009485, 0x00009e46, 0x00009e37, 0x0000a858, + 0x0000b6a9, 0x0000c52a, 0x0000b9db, 0x0000c8bc, + 0x0000d3cd, 0x0000e70e, 0x0000d07f, 0xffffe920, + 0x00000ff1, 0x000022f2, 0xfffffc23, 0x00000f84, + }, diff --git a/tests/tcg/hexagon/v73_scalar.c b/tests/tcg/hexagon/v73_scalar.c new file mode 100644 index 0000000000..fee67fc531 --- /dev/null +++ b/tests/tcg/hexagon/v73_scalar.c @@ -0,0 +1,96 @@ +/* + * Copyright(c) 2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include + +/* + * Test the scalar core instructions that are new in v73 + */ + +int err; + +static void __check32(int line, uint32_t result, uint32_t expect) +{ + if (result != expect) { + printf("ERROR at line %d: 0x%08x != 0x%08x\n", + line, result, expect); + err++; + } +} + +#define check32(RES, EXP) __check32(__LINE__, RES, EXP) + +static void __check64(int line, uint64_t result, uint64_t expect) +{ + if (result != expect) { + printf("ERROR at line %d: 0x%016llx != 0x%016llx\n", + line, result, expect); + err++; + } +} + +#define check64(RES, EXP) __check64(__LINE__, RES, EXP) + +static bool my_func_called; + +static void my_func(void) +{ + my_func_called = true; +} + +static inline void callrh(void *func) +{ + asm volatile("callrh %0\n\t" + : : "r"(func) + /* Mark the caller-save registers as clobbered */ + : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", + "r10", "r11", "r12", "r13", "r14", "r15", "r28", + "p0", "p1", "p2", "p3"); +} + +static void test_callrh(void) +{ + my_func_called = false; + callrh(&my_func); + check32(my_func_called, true); +} + +static void test_jumprh(void) +{ + uint32_t res; + asm ("%0 = #5\n\t" + "r0 = ##1f\n\t" + "jumprh r0\n\t" + "%0 = #3\n\t" + "jump 2f\n\t" + "1:\n\t" + "%0 = #1\n\t" + "2:\n\t" + : "=r"(res) : : "r0"); + check32(res, 1); +} + +int main() +{ + test_callrh(); + test_jumprh(); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hppa/Makefile.target b/tests/tcg/hppa/Makefile.target index b78e6b4849..cdd0d572a7 100644 --- a/tests/tcg/hppa/Makefile.target +++ b/tests/tcg/hppa/Makefile.target @@ -10,8 +10,6 @@ EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-16384 run-test-mmap-65536 # it requires the full vdso with dwarf2 unwind info. run-signals: signals $(call skip-test, $<, "BROKEN awaiting vdso support") -run-plugin-signals-with-%: - $(call skip-test, $<, "BROKEN awaiting vdso support") VPATH += $(SRC_PATH)/tests/tcg/hppa TESTS += stby diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target index bafd8c2180..821822ed0c 100644 --- a/tests/tcg/i386/Makefile.target +++ b/tests/tcg/i386/Makefile.target @@ -18,19 +18,15 @@ X86_64_TESTS:=$(filter test-i386-adcox test-i386-bmi2 $(SKIP_I386_TESTS), $(ALL_ test-i386-sse-exceptions: CFLAGS += -msse4.1 -mfpmath=sse run-test-i386-sse-exceptions: QEMU_OPTS += -cpu max -run-plugin-test-i386-sse-exceptions-%: QEMU_OPTS += -cpu max test-i386-pcmpistri: CFLAGS += -msse4.2 run-test-i386-pcmpistri: QEMU_OPTS += -cpu max -run-plugin-test-i386-pcmpistri-%: QEMU_OPTS += -cpu max test-i386-bmi2: CFLAGS=-O2 run-test-i386-bmi2: QEMU_OPTS += -cpu max -run-plugin-test-i386-bmi2-%: QEMU_OPTS += -cpu max test-i386-adcox: CFLAGS=-O2 run-test-i386-adcox: QEMU_OPTS += -cpu max -run-plugin-test-i386-adcox-%: QEMU_OPTS += -cpu max # # hello-i386 is a barebones app @@ -52,8 +48,6 @@ test-i386: $(call skip-test, "BUILD of $@", "missing -no-pie compiler support") run-test-i386: $(call skip-test, "RUN of test-i386", "not built") -run-plugin-test-i386-with-%: - $(call skip-test, "RUN of test-i386 ($*)", "not built") endif ifeq ($(SPEED), slow) @@ -87,7 +81,6 @@ sha512-sse: sha512.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) run-sha512-sse: QEMU_OPTS+=-cpu max -run-plugin-sha512-sse-with-%: QEMU_OPTS+=-cpu max TESTS+=sha512-sse @@ -103,15 +96,12 @@ test-avx.h: test-avx.py x86.csv test-3dnow: CFLAGS += -masm=intel -O -I. run-test-3dnow: QEMU_OPTS += -cpu max -run-plugin-test-3dnow: QEMU_OPTS += -cpu max test-3dnow: test-3dnow.h test-mmx: CFLAGS += -masm=intel -O -I. run-test-mmx: QEMU_OPTS += -cpu max -run-plugin-test-mmx: QEMU_OPTS += -cpu max test-mmx: test-mmx.h test-avx: CFLAGS += -mavx -masm=intel -O -I. run-test-avx: QEMU_OPTS += -cpu max -run-plugin-test-avx: QEMU_OPTS += -cpu max test-avx: test-avx.h diff --git a/tests/tcg/i386/test-avx.py b/tests/tcg/i386/test-avx.py index d9ca00a49e..641a2ef69e 100755 --- a/tests/tcg/i386/test-avx.py +++ b/tests/tcg/i386/test-avx.py @@ -49,7 +49,7 @@ imask = { 'VEXTRACT[FI]128': 0x01, 'VINSERT[FI]128': 0x01, 'VPBLENDD': 0xff, - 'VPERM2[FI]128': 0x33, + 'VPERM2[FI]128': 0xbb, 'VPERMPD': 0xff, 'VPERMQ': 0xff, 'VPERMILPS': 0xff, diff --git a/tests/tcg/i386/test-i386-bmi2.c b/tests/tcg/i386/test-i386-bmi2.c index 982d4abda4..0244df7987 100644 --- a/tests/tcg/i386/test-i386-bmi2.c +++ b/tests/tcg/i386/test-i386-bmi2.c @@ -123,6 +123,9 @@ int main(int argc, char *argv[]) { result = bzhiq(mask, 0x1f); assert(result == (mask & ~(-1 << 30))); + result = bzhiq(mask, 0x40); + assert(result == mask); + result = rorxq(0x2132435465768798, 8); assert(result == 0x9821324354657687); diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target index ae8b3d7268..373db69648 100644 --- a/tests/tcg/multiarch/Makefile.target +++ b/tests/tcg/multiarch/Makefile.target @@ -64,6 +64,7 @@ run-test-mmap-%: test-mmap $(call run-test, test-mmap-$*, $(QEMU) -p $* $<, $< ($* byte pages)) ifneq ($(HAVE_GDB_BIN),) +ifeq ($(HOST_GDB_SUPPORTS_ARCH),y) GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py run-gdbstub-sha1: sha1 @@ -87,6 +88,10 @@ run-gdbstub-thread-breakpoint: testthread --bin $< --test $(MULTIARCH_SRC)/gdbstub/test-thread-breakpoint.py, \ hitting a breakpoint on non-main thread) +else +run-gdbstub-%: + $(call skip-test, "gdbstub test $*", "no guest arch support") +endif else run-gdbstub-%: $(call skip-test, "gdbstub test $*", "need working gdb") diff --git a/tests/tcg/multiarch/system/Makefile.softmmu-target b/tests/tcg/multiarch/system/Makefile.softmmu-target index 368b64d531..fe40195d39 100644 --- a/tests/tcg/multiarch/system/Makefile.softmmu-target +++ b/tests/tcg/multiarch/system/Makefile.softmmu-target @@ -15,6 +15,7 @@ MULTIARCH_TEST_SRCS=$(wildcard $(MULTIARCH_SYSTEM_SRC)/*.c) MULTIARCH_TESTS = $(patsubst $(MULTIARCH_SYSTEM_SRC)/%.c, %, $(MULTIARCH_TEST_SRCS)) ifneq ($(HAVE_GDB_BIN),) +ifeq ($(HOST_GDB_SUPPORTS_ARCH),y) GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py run-gdbstub-memory: memory @@ -27,9 +28,26 @@ run-gdbstub-memory: memory --bin $< --test $(MULTIARCH_SRC)/gdbstub/memory.py, \ softmmu gdbstub support) +run-gdbstub-untimely-packet: hello + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(HAVE_GDB_BIN) \ + --gdb-args "-ex 'set debug remote 1'" \ + --output untimely-packet.gdb.out \ + --stderr untimely-packet.gdb.err \ + --qemu $(QEMU) \ + --bin $< --qargs \ + "-monitor none -display none -chardev file$(COMMA)path=untimely-packet.out$(COMMA)id=output $(QEMU_OPTS)", \ + "softmmu gdbstub untimely packets") + $(call quiet-command, \ + (! grep -Fq 'Packet instead of Ack, ignoring it' untimely-packet.gdb.err), \ + "GREP", "file untimely-packet.gdb.err") +else +run-gdbstub-%: + $(call skip-test, "gdbstub test $*", "no guest arch support") +endif else run-gdbstub-%: $(call skip-test, "gdbstub test $*", "need working gdb") endif -MULTIARCH_RUNS += run-gdbstub-memory +MULTIARCH_RUNS += run-gdbstub-memory run-gdbstub-untimely-packet diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c index 214f7d4f54..e29786ae55 100644 --- a/tests/tcg/multiarch/system/memory.c +++ b/tests/tcg/multiarch/system/memory.c @@ -40,18 +40,21 @@ static void pdot(int count) } /* - * Helper macros for shift/extract so we can keep our endian handling - * in one place. + * Helper macros for endian handling. */ -#define BYTE_SHIFT(b, pos) ((uint64_t)b << (pos * 8)) -#define BYTE_EXTRACT(b, pos) ((b >> (pos * 8)) & 0xff) +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define BYTE_SHIFT(b, pos) (b << (pos * 8)) +#define BYTE_NEXT(b) ((b)++) +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define BYTE_SHIFT(b, pos) (b << ((sizeof(b) - 1 - (pos)) * 8)) +#define BYTE_NEXT(b) (--(b)) +#else +#error Unsupported __BYTE_ORDER__ +#endif /* - * Fill the data with ascending value bytes. - * - * Currently we only support Little Endian machines so write in - * ascending address order. When we read higher address bytes should - * either be zero or higher than the lower bytes. + * Fill the data with ascending (for little-endian) or descending (for + * big-endian) value bytes. */ static void init_test_data_u8(int unused_offset) @@ -62,14 +65,14 @@ static void init_test_data_u8(int unused_offset) ml_printf("Filling test area with u8:"); for (i = 0; i < TEST_SIZE; i++) { - *ptr++ = count++; + *ptr++ = BYTE_NEXT(count); pdot(i); } ml_printf("done\n"); } /* - * Full the data with alternating positive and negative bytes. This + * Fill the data with alternating positive and negative bytes. This * should mean for reads larger than a byte all subsequent reads will * stay either negative or positive. We never write 0. */ @@ -119,7 +122,7 @@ static void init_test_data_u16(int offset) reset_start_data(offset); for (i = 0; i < max; i++) { - uint8_t low = count++, high = count++; + uint16_t low = BYTE_NEXT(count), high = BYTE_NEXT(count); word = BYTE_SHIFT(high, 1) | BYTE_SHIFT(low, 0); *ptr++ = word; pdot(i); @@ -139,9 +142,10 @@ static void init_test_data_u32(int offset) reset_start_data(offset); for (i = 0; i < max; i++) { - uint8_t b4 = count++, b3 = count++; - uint8_t b2 = count++, b1 = count++; - word = BYTE_SHIFT(b1, 3) | BYTE_SHIFT(b2, 2) | BYTE_SHIFT(b3, 1) | b4; + uint32_t b4 = BYTE_NEXT(count), b3 = BYTE_NEXT(count); + uint32_t b2 = BYTE_NEXT(count), b1 = BYTE_NEXT(count); + word = BYTE_SHIFT(b1, 3) | BYTE_SHIFT(b2, 2) | BYTE_SHIFT(b3, 1) | + BYTE_SHIFT(b4, 0); *ptr++ = word; pdot(i); } @@ -160,13 +164,13 @@ static void init_test_data_u64(int offset) reset_start_data(offset); for (i = 0; i < max; i++) { - uint8_t b8 = count++, b7 = count++; - uint8_t b6 = count++, b5 = count++; - uint8_t b4 = count++, b3 = count++; - uint8_t b2 = count++, b1 = count++; + uint64_t b8 = BYTE_NEXT(count), b7 = BYTE_NEXT(count); + uint64_t b6 = BYTE_NEXT(count), b5 = BYTE_NEXT(count); + uint64_t b4 = BYTE_NEXT(count), b3 = BYTE_NEXT(count); + uint64_t b2 = BYTE_NEXT(count), b1 = BYTE_NEXT(count); word = BYTE_SHIFT(b1, 7) | BYTE_SHIFT(b2, 6) | BYTE_SHIFT(b3, 5) | BYTE_SHIFT(b4, 4) | BYTE_SHIFT(b5, 3) | BYTE_SHIFT(b6, 2) | - BYTE_SHIFT(b7, 1) | b8; + BYTE_SHIFT(b7, 1) | BYTE_SHIFT(b8, 0); *ptr++ = word; pdot(i); } @@ -374,12 +378,20 @@ static bool read_test_data_s16(int offset, bool neg_first) ml_printf("Reading s16 from %#lx (offset %d, %s):", ptr, offset, neg_first ? "neg" : "pos"); + /* + * If the first byte is negative, then the last byte is positive. + * Therefore the logic below must be flipped for big-endian. + */ +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + neg_first = !neg_first; +#endif + for (i = 0; i < max; i++) { int32_t data = *ptr++; if (neg_first && data < 0) { pdot(i); - } else if (data > 0) { + } else if (!neg_first && data > 0) { pdot(i); } else { ml_printf("Error %d %c 0\n", data, neg_first ? '<' : '>'); @@ -399,12 +411,20 @@ static bool read_test_data_s32(int offset, bool neg_first) ml_printf("Reading s32 from %#lx (offset %d, %s):", ptr, offset, neg_first ? "neg" : "pos"); + /* + * If the first byte is negative, then the last byte is positive. + * Therefore the logic below must be flipped for big-endian. + */ +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + neg_first = !neg_first; +#endif + for (i = 0; i < max; i++) { int64_t data = *ptr++; if (neg_first && data < 0) { pdot(i); - } else if (data > 0) { + } else if (!neg_first && data > 0) { pdot(i); } else { ml_printf("Error %d %c 0\n", data, neg_first ? '<' : '>'); @@ -419,8 +439,7 @@ static bool read_test_data_s32(int offset, bool neg_first) * Read the test data and verify at various offsets * * For everything except bytes all our reads should be either positive - * or negative depending on what offset we are reading from. Currently - * we only handle LE systems. + * or negative depending on what offset we are reading from. */ read_sfn read_sfns[] = { read_test_data_s8, read_test_data_s16, diff --git a/tests/tcg/ppc64/Makefile.target b/tests/tcg/ppc64/Makefile.target index f081f1c683..b084963b9a 100644 --- a/tests/tcg/ppc64/Makefile.target +++ b/tests/tcg/ppc64/Makefile.target @@ -20,18 +20,19 @@ PPC64_TESTS += mtfsf PPC64_TESTS += mffsce ifneq ($(CROSS_CC_HAS_POWER10),) -PPC64_TESTS += byte_reverse sha512-vector +PPC64_TESTS += byte_reverse sha512-vector vector endif byte_reverse: CFLAGS += -mcpu=power10 run-byte_reverse: QEMU_OPTS+=-cpu POWER10 -run-plugin-byte_reverse-with-%: QEMU_OPTS+=-cpu POWER10 sha512-vector: CFLAGS +=-mcpu=power10 -O3 sha512-vector: sha512.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) run-sha512-vector: QEMU_OPTS+=-cpu POWER10 -run-plugin-sha512-vector-with-%: QEMU_OPTS+=-cpu POWER10 + +vector: CFLAGS += -mcpu=power10 -I$(SRC_PATH)/include +run-vector: QEMU_OPTS += -cpu POWER10 PPC64_TESTS += signal_save_restore_xer PPC64_TESTS += xxspltw diff --git a/tests/tcg/ppc64/vector.c b/tests/tcg/ppc64/vector.c new file mode 100644 index 0000000000..cbf4ae9332 --- /dev/null +++ b/tests/tcg/ppc64/vector.c @@ -0,0 +1,51 @@ +#include +#include +#include "qemu/compiler.h" + +int main(void) +{ + unsigned int result_wi; + vector unsigned char vbc_bi_src = { 0xFF, 0xFF, 0, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0, 0, 0, + 0, 0xFF, 0xFF}; + vector unsigned short vbc_hi_src = { 0xFFFF, 0, 0, 0xFFFF, + 0, 0, 0xFFFF, 0xFFFF}; + vector unsigned int vbc_wi_src = {0, 0, 0xFFFFFFFF, 0xFFFFFFFF}; + vector unsigned long long vbc_di_src = {0xFFFFFFFFFFFFFFFF, 0}; + vector __uint128_t vbc_qi_src; + + asm("vextractbm %0, %1" : "=r" (result_wi) : "v" (vbc_bi_src)); +#if HOST_BIG_ENDIAN + assert(result_wi == 0b1101111111000011); +#else + assert(result_wi == 0b1100001111111011); +#endif + + asm("vextracthm %0, %1" : "=r" (result_wi) : "v" (vbc_hi_src)); +#if HOST_BIG_ENDIAN + assert(result_wi == 0b10010011); +#else + assert(result_wi == 0b11001001); +#endif + + asm("vextractwm %0, %1" : "=r" (result_wi) : "v" (vbc_wi_src)); +#if HOST_BIG_ENDIAN + assert(result_wi == 0b0011); +#else + assert(result_wi == 0b1100); +#endif + + asm("vextractdm %0, %1" : "=r" (result_wi) : "v" (vbc_di_src)); +#if HOST_BIG_ENDIAN + assert(result_wi == 0b10); +#else + assert(result_wi == 0b01); +#endif + + vbc_qi_src[0] = 0x1; + vbc_qi_src[0] = vbc_qi_src[0] << 127; + asm("vextractqm %0, %1" : "=r" (result_wi) : "v" (vbc_qi_src)); + assert(result_wi == 0b1); + + return 0; +} diff --git a/tests/tcg/riscv64/Makefile.softmmu-target b/tests/tcg/riscv64/Makefile.softmmu-target index e22cdb34c5..d5b126e5f1 100644 --- a/tests/tcg/riscv64/Makefile.softmmu-target +++ b/tests/tcg/riscv64/Makefile.softmmu-target @@ -19,3 +19,6 @@ QEMU_OPTS += -M virt -display none -semihosting -device loader,file= EXTRA_RUNS += run-issue1060 run-issue1060: issue1060 $(call run-test, $<, $(QEMU) $(QEMU_OPTS)$<) + +# We don't currently support the multiarch system tests +undefine MULTIARCH_TESTS diff --git a/tests/tcg/riscv64/Makefile.target b/tests/tcg/riscv64/Makefile.target index cc3ed65ffd..9973ba3b5f 100644 --- a/tests/tcg/riscv64/Makefile.target +++ b/tests/tcg/riscv64/Makefile.target @@ -9,4 +9,3 @@ TESTS += noexec TESTS += test-noc test-noc: LDFLAGS = -nostdlib -static run-test-noc: QEMU_OPTS += -cpu rv64,c=false -run-plugin-test-noc-%: QEMU_OPTS += -cpu rv64,c=false diff --git a/tests/tcg/s390x/Makefile.softmmu-target b/tests/tcg/s390x/Makefile.softmmu-target index 725b6c598d..44dfd71629 100644 --- a/tests/tcg/s390x/Makefile.softmmu-target +++ b/tests/tcg/s390x/Makefile.softmmu-target @@ -1,11 +1,40 @@ S390X_SRC=$(SRC_PATH)/tests/tcg/s390x VPATH+=$(S390X_SRC) -QEMU_OPTS=-action panic=exit-failure -kernel +QEMU_OPTS=-action panic=exit-failure -nographic -kernel +LINK_SCRIPT=$(S390X_SRC)/softmmu.ld +CFLAGS+=-ggdb -O0 +LDFLAGS=-nostdlib -static -%: %.S - $(CC) -march=z13 -m64 -nostdlib -static -Wl,-Ttext=0 \ - -Wl,--build-id=none $< -o $@ +%.o: %.S + $(CC) -march=z13 -m64 -c $< -o $@ -TESTS += unaligned-lowcore -TESTS += bal -TESTS += sam +%.o: %.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -march=z13 -m64 -c $< -o $@ + +%: %.o + $(CC) $< -o $@ $(LDFLAGS) + +ASM_TESTS = \ + bal \ + exrl-ssm-early \ + sam \ + lpsw \ + lpswe-early \ + ssm-early \ + stosm-early \ + unaligned-lowcore + +include $(S390X_SRC)/pgm-specification.mak +$(PGM_SPECIFICATION_TESTS): pgm-specification-softmmu.o +$(PGM_SPECIFICATION_TESTS): LDFLAGS+=pgm-specification-softmmu.o +ASM_TESTS += $(PGM_SPECIFICATION_TESTS) + +$(ASM_TESTS): LDFLAGS += -Wl,-T$(LINK_SCRIPT) -Wl,--build-id=none +$(ASM_TESTS): $(LINK_SCRIPT) +TESTS += $(ASM_TESTS) + +S390X_MULTIARCH_RUNTIME_OBJS = head64.o console.o $(MINILIB_OBJS) +$(MULTIARCH_TESTS): $(S390X_MULTIARCH_RUNTIME_OBJS) +$(MULTIARCH_TESTS): LDFLAGS += $(S390X_MULTIARCH_RUNTIME_OBJS) +$(MULTIARCH_TESTS): CFLAGS += $(MINILIB_INC) +memory: CFLAGS += -DCHECK_UNALIGNED=0 diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target index 72ad309b27..23dc8b6a63 100644 --- a/tests/tcg/s390x/Makefile.target +++ b/tests/tcg/s390x/Makefile.target @@ -2,6 +2,9 @@ S390X_SRC=$(SRC_PATH)/tests/tcg/s390x VPATH+=$(S390X_SRC) CFLAGS+=-march=zEC12 -m64 +%.o: %.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -c $< -o $@ + config-cc.mak: Makefile $(quiet-@)( \ $(call cc-option,-march=z14, CROSS_CC_HAS_Z14); \ @@ -28,10 +31,21 @@ TESTS+=div TESTS+=clst TESTS+=long-double TESTS+=cdsg +TESTS+=chrl +TESTS+=rxsbg +TESTS+=ex-relative-long +TESTS+=ex-branch cdsg: CFLAGS+=-pthread cdsg: LDFLAGS+=-pthread +rxsbg: CFLAGS+=-O2 + +include $(S390X_SRC)/pgm-specification.mak +$(PGM_SPECIFICATION_TESTS): pgm-specification-user.o +$(PGM_SPECIFICATION_TESTS): LDFLAGS+=pgm-specification-user.o +TESTS += $(PGM_SPECIFICATION_TESTS) + Z13_TESTS=vistr $(Z13_TESTS): CFLAGS+=-march=z13 -O2 TESTS+=$(Z13_TESTS) @@ -51,7 +65,7 @@ $(Z15_TESTS): CFLAGS+=-march=z15 -O2 TESTS+=$(Z15_TESTS) endif -ifneq ($(HAVE_GDB_BIN),) +ifeq ($(HOST_GDB_SUPPORTS_ARCH),y) GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py run-gdbstub-signals-s390x: signals-s390x diff --git a/tests/tcg/s390x/br-odd.S b/tests/tcg/s390x/br-odd.S new file mode 100644 index 0000000000..2fae47a9e3 --- /dev/null +++ b/tests/tcg/s390x/br-odd.S @@ -0,0 +1,16 @@ +/* + * Test BRanching to a non-mapped odd address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + lgrl %r1,odd_addr + br %r1 + + .align 8 +odd_addr: + .quad 0xDDDDDDDDDDDDDDDD + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,0xDDDDDDDDDDDDDDDD diff --git a/tests/tcg/s390x/cgrl-unaligned.S b/tests/tcg/s390x/cgrl-unaligned.S new file mode 100644 index 0000000000..164d68f2e6 --- /dev/null +++ b/tests/tcg/s390x/cgrl-unaligned.S @@ -0,0 +1,16 @@ +/* + * Test CGRL with a non-doubleword aligned address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + cgrl %r1,unaligned + + .align 8 + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,test + .long 0 +unaligned: + .quad 0 diff --git a/tests/tcg/s390x/chrl.c b/tests/tcg/s390x/chrl.c new file mode 100644 index 0000000000..b1c3a1c561 --- /dev/null +++ b/tests/tcg/s390x/chrl.c @@ -0,0 +1,80 @@ +#include +#include +#include + +static void test_chrl(void) +{ + uint32_t program_mask, cc; + + asm volatile ( + ".pushsection .rodata\n" + "0:\n\t" + ".short 1, 0x8000\n\t" + ".popsection\n\t" + + "chrl %[r], 0b\n\t" + "ipm %[program_mask]\n" + : [program_mask] "=r" (program_mask) + : [r] "r" (1) + ); + + cc = program_mask >> 28; + assert(!cc); + + asm volatile ( + ".pushsection .rodata\n" + "0:\n\t" + ".short -1, 0x8000\n\t" + ".popsection\n\t" + + "chrl %[r], 0b\n\t" + "ipm %[program_mask]\n" + : [program_mask] "=r" (program_mask) + : [r] "r" (-1) + ); + + cc = program_mask >> 28; + assert(!cc); +} + +static void test_cghrl(void) +{ + uint32_t program_mask, cc; + + asm volatile ( + ".pushsection .rodata\n" + "0:\n\t" + ".short 1, 0x8000, 0, 0\n\t" + ".popsection\n\t" + + "cghrl %[r], 0b\n\t" + "ipm %[program_mask]\n" + : [program_mask] "=r" (program_mask) + : [r] "r" (1L) + ); + + cc = program_mask >> 28; + assert(!cc); + + asm volatile ( + ".pushsection .rodata\n" + "0:\n\t" + ".short -1, 0x8000, 0, 0\n\t" + ".popsection\n\t" + + "cghrl %[r], 0b\n\t" + "ipm %[program_mask]\n" + : [program_mask] "=r" (program_mask) + : [r] "r" (-1L) + ); + + cc = program_mask >> 28; + assert(!cc); +} + +int main(void) +{ + test_chrl(); + test_cghrl(); + return EXIT_SUCCESS; +} diff --git a/tests/tcg/s390x/clrl-unaligned.S b/tests/tcg/s390x/clrl-unaligned.S new file mode 100644 index 0000000000..182b1b6462 --- /dev/null +++ b/tests/tcg/s390x/clrl-unaligned.S @@ -0,0 +1,16 @@ +/* + * Test CLRL with a non-word aligned address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + clrl %r1,unaligned + + .align 8 + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,test + .short 0 +unaligned: + .long 0 diff --git a/tests/tcg/s390x/console.c b/tests/tcg/s390x/console.c new file mode 100644 index 0000000000..d43ce3f44b --- /dev/null +++ b/tests/tcg/s390x/console.c @@ -0,0 +1,12 @@ +/* + * Console code for multiarch tests. + * Reuses the pc-bios/s390-ccw implementation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "../../../pc-bios/s390-ccw/sclp.c" + +void __sys_outc(char c) +{ + write(1, &c, sizeof(c)); +} diff --git a/tests/tcg/s390x/crl-unaligned.S b/tests/tcg/s390x/crl-unaligned.S new file mode 100644 index 0000000000..b86fbe0ef3 --- /dev/null +++ b/tests/tcg/s390x/crl-unaligned.S @@ -0,0 +1,16 @@ +/* + * Test CRL with a non-word aligned address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + crl %r1,unaligned + + .align 8 + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,test + .short 0 +unaligned: + .long 0 diff --git a/tests/tcg/s390x/ex-branch.c b/tests/tcg/s390x/ex-branch.c new file mode 100644 index 0000000000..c606719152 --- /dev/null +++ b/tests/tcg/s390x/ex-branch.c @@ -0,0 +1,158 @@ +/* Check EXECUTE with relative branch instructions as targets. */ +#include +#include +#include +#include +#include + +struct test { + const char *name; + void (*func)(long *link, long *magic); + long exp_link; +}; + +/* Branch instructions and their expected effects. */ +#define LINK_64(test) ((long)test ## _exp_link) +#define LINK_NONE(test) -1L +#define FOR_EACH_INSN(F) \ + F(bras, "%[link]", LINK_64) \ + F(brasl, "%[link]", LINK_64) \ + F(brc, "0x8", LINK_NONE) \ + F(brcl, "0x8", LINK_NONE) \ + F(brct, "%%r0", LINK_NONE) \ + F(brctg, "%%r0", LINK_NONE) \ + F(brxh, "%%r2,%%r0", LINK_NONE) \ + F(brxhg, "%%r2,%%r0", LINK_NONE) \ + F(brxle, "%%r0,%%r1", LINK_NONE) \ + F(brxlg, "%%r0,%%r1", LINK_NONE) \ + F(crj, "%%r0,%%r0,8", LINK_NONE) \ + F(cgrj, "%%r0,%%r0,8", LINK_NONE) \ + F(cij, "%%r0,0,8", LINK_NONE) \ + F(cgij, "%%r0,0,8", LINK_NONE) \ + F(clrj, "%%r0,%%r0,8", LINK_NONE) \ + F(clgrj, "%%r0,%%r0,8", LINK_NONE) \ + F(clij, "%%r0,0,8", LINK_NONE) \ + F(clgij, "%%r0,0,8", LINK_NONE) + +#define INIT_TEST \ + "xgr %%r0,%%r0\n" /* %r0 = 0; %cc = 0 */ \ + "lghi %%r1,1\n" /* %r1 = 1 */ \ + "lghi %%r2,2\n" /* %r2 = 2 */ + +#define CLOBBERS_TEST "cc", "0", "1", "2" + +#define DEFINE_TEST(insn, args, exp_link) \ + extern char insn ## _exp_link[]; \ + static void test_ ## insn(long *link, long *magic) \ + { \ + asm(INIT_TEST \ + #insn " " args ",0f\n" \ + ".globl " #insn "_exp_link\n" \ + #insn "_exp_link:\n" \ + ".org . + 90\n" \ + "0: lgfi %[magic],0x12345678\n" \ + : [link] "+r" (*link) \ + , [magic] "+r" (*magic) \ + : : CLOBBERS_TEST); \ + } \ + extern char ex_ ## insn ## _exp_link[]; \ + static void test_ex_ ## insn(long *link, long *magic) \ + { \ + unsigned long target; \ + \ + asm(INIT_TEST \ + "larl %[target],0f\n" \ + "ex %%r0,0(%[target])\n" \ + ".globl ex_" #insn "_exp_link\n" \ + "ex_" #insn "_exp_link:\n" \ + ".org . + 60\n" \ + "0: " #insn " " args ",1f\n" \ + ".org . + 120\n" \ + "1: lgfi %[magic],0x12345678\n" \ + : [target] "=r" (target) \ + , [link] "+r" (*link) \ + , [magic] "+r" (*magic) \ + : : CLOBBERS_TEST); \ + } \ + extern char exrl_ ## insn ## _exp_link[]; \ + static void test_exrl_ ## insn(long *link, long *magic) \ + { \ + asm(INIT_TEST \ + "exrl %%r0,0f\n" \ + ".globl exrl_" #insn "_exp_link\n" \ + "exrl_" #insn "_exp_link:\n" \ + ".org . + 60\n" \ + "0: " #insn " " args ",1f\n" \ + ".org . + 120\n" \ + "1: lgfi %[magic],0x12345678\n" \ + : [link] "+r" (*link) \ + , [magic] "+r" (*magic) \ + : : CLOBBERS_TEST); \ + } + +/* Test functions. */ +FOR_EACH_INSN(DEFINE_TEST) + +/* Test definitions. */ +#define REGISTER_TEST(insn, args, _exp_link) \ + { \ + .name = #insn, \ + .func = test_ ## insn, \ + .exp_link = (_exp_link(insn)), \ + }, \ + { \ + .name = "ex " #insn, \ + .func = test_ex_ ## insn, \ + .exp_link = (_exp_link(ex_ ## insn)), \ + }, \ + { \ + .name = "exrl " #insn, \ + .func = test_exrl_ ## insn, \ + .exp_link = (_exp_link(exrl_ ## insn)), \ + }, + +static const struct test tests[] = { + FOR_EACH_INSN(REGISTER_TEST) +}; + +int main(int argc, char **argv) +{ + const struct test *test; + int ret = EXIT_SUCCESS; + bool verbose = false; + long link, magic; + size_t i; + + for (i = 1; i < argc; i++) { + if (strcmp(argv[i], "-v") == 0) { + verbose = true; + } + } + + for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + test = &tests[i]; + if (verbose) { + fprintf(stderr, "[ RUN ] %s\n", test->name); + } + link = -1; + magic = -1; + test->func(&link, &magic); +#define ASSERT_EQ(expected, actual) do { \ + if (expected != actual) { \ + fprintf(stderr, "%s: " #expected " (0x%lx) != " #actual " (0x%lx)\n", \ + test->name, expected, actual); \ + ret = EXIT_FAILURE; \ + } \ +} while (0) + ASSERT_EQ(test->exp_link, link); + ASSERT_EQ(0x12345678L, magic); +#undef ASSERT_EQ + } + + if (verbose) { + fprintf(stderr, ret == EXIT_SUCCESS ? "[ PASSED ]\n" : + "[ FAILED ]\n"); + } + + return ret; +} diff --git a/tests/tcg/s390x/ex-odd.S b/tests/tcg/s390x/ex-odd.S new file mode 100644 index 0000000000..4e42a47df3 --- /dev/null +++ b/tests/tcg/s390x/ex-odd.S @@ -0,0 +1,17 @@ +/* + * Test EXECUTing a non-mapped odd address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + lgrl %r1,odd_addr +fail: + ex 0,0(%r1) + + .align 8 +odd_addr: + .quad 0xDDDDDDDDDDDDDDDD + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,fail diff --git a/tests/tcg/s390x/ex-relative-long.c b/tests/tcg/s390x/ex-relative-long.c new file mode 100644 index 0000000000..21fbef6258 --- /dev/null +++ b/tests/tcg/s390x/ex-relative-long.c @@ -0,0 +1,156 @@ +/* Check EXECUTE with relative long instructions as targets. */ +#include +#include + +struct test { + const char *name; + long (*func)(long reg, long *cc); + long exp_reg; + long exp_mem; + long exp_cc; +}; + +/* + * Each test sets the MEM_IDXth element of the mem array to MEM and uses a + * single relative long instruction on it. The other elements remain zero. + * This is in order to prevent stumbling upon MEM in random memory in case + * there is an off-by-a-small-value bug. + * + * Note that while gcc supports the ZL constraint for relative long operands, + * clang doesn't, so the assembly code accesses mem[MEM_IDX] using MEM_ASM. + */ +static long mem[0x1000]; +#define MEM_IDX 0x800 +#define MEM_ASM "mem+0x800*8" + +/* Initial %r2 value. */ +#define REG 0x1234567887654321 + +/* Initial mem[MEM_IDX] value. */ +#define MEM 0xfedcba9889abcdef + +/* Initial cc value. */ +#define CC 0 + +/* Relative long instructions and their expected effects. */ +#define FOR_EACH_INSN(F) \ + F(cgfrl, REG, MEM, 2) \ + F(cghrl, REG, MEM, 2) \ + F(cgrl, REG, MEM, 2) \ + F(chrl, REG, MEM, 1) \ + F(clgfrl, REG, MEM, 2) \ + F(clghrl, REG, MEM, 2) \ + F(clgrl, REG, MEM, 1) \ + F(clhrl, REG, MEM, 2) \ + F(clrl, REG, MEM, 1) \ + F(crl, REG, MEM, 1) \ + F(larl, (long)&mem[MEM_IDX], MEM, CC) \ + F(lgfrl, 0xfffffffffedcba98, MEM, CC) \ + F(lghrl, 0xfffffffffffffedc, MEM, CC) \ + F(lgrl, MEM, MEM, CC) \ + F(lhrl, 0x12345678fffffedc, MEM, CC) \ + F(llghrl, 0x000000000000fedc, MEM, CC) \ + F(llhrl, 0x123456780000fedc, MEM, CC) \ + F(lrl, 0x12345678fedcba98, MEM, CC) \ + F(stgrl, REG, REG, CC) \ + F(sthrl, REG, 0x4321ba9889abcdef, CC) \ + F(strl, REG, 0x8765432189abcdef, CC) + +/* Test functions. */ +#define DEFINE_EX_TEST(insn, exp_reg, exp_mem, exp_cc) \ + static long test_ex_ ## insn(long reg, long *cc) \ + { \ + register long r2 asm("r2"); \ + char mask = 0x20; /* make target use %r2 */ \ + long pm, target; \ + \ + r2 = reg; \ + asm("larl %[target],0f\n" \ + "cr %%r0,%%r0\n" /* initial cc */ \ + "ex %[mask],0(%[target])\n" \ + "jg 1f\n" \ + "0: " #insn " %%r0," MEM_ASM "\n" \ + "1: ipm %[pm]\n" \ + : [target] "=&a" (target), [r2] "+r" (r2), [pm] "=r" (pm) \ + : [mask] "a" (mask) \ + : "cc", "memory"); \ + reg = r2; \ + *cc = (pm >> 28) & 3; \ + \ + return reg; \ + } + +#define DEFINE_EXRL_TEST(insn, exp_reg, exp_mem, exp_cc) \ + static long test_exrl_ ## insn(long reg, long *cc) \ + { \ + register long r2 asm("r2"); \ + char mask = 0x20; /* make target use %r2 */ \ + long pm; \ + \ + r2 = reg; \ + asm("cr %%r0,%%r0\n" /* initial cc */ \ + "exrl %[mask],0f\n" \ + "jg 1f\n" \ + "0: " #insn " %%r0," MEM_ASM "\n" \ + "1: ipm %[pm]\n" \ + : [r2] "+r" (r2), [pm] "=r" (pm) \ + : [mask] "a" (mask) \ + : "cc", "memory"); \ + reg = r2; \ + *cc = (pm >> 28) & 3; \ + \ + return reg; \ + } + +FOR_EACH_INSN(DEFINE_EX_TEST) +FOR_EACH_INSN(DEFINE_EXRL_TEST) + +/* Test definitions. */ +#define REGISTER_EX_EXRL_TEST(ex_insn, insn, _exp_reg, _exp_mem, _exp_cc) \ + { \ + .name = #ex_insn " " #insn, \ + .func = test_ ## ex_insn ## _ ## insn, \ + .exp_reg = (_exp_reg), \ + .exp_mem = (_exp_mem), \ + .exp_cc = (_exp_cc), \ + }, + +#define REGISTER_EX_TEST(insn, exp_reg, exp_mem, exp_cc) \ + REGISTER_EX_EXRL_TEST(ex, insn, exp_reg, exp_mem, exp_cc) + +#define REGISTER_EXRL_TEST(insn, exp_reg, exp_mem, exp_cc) \ + REGISTER_EX_EXRL_TEST(exrl, insn, exp_reg, exp_mem, exp_cc) + +static const struct test tests[] = { + FOR_EACH_INSN(REGISTER_EX_TEST) + FOR_EACH_INSN(REGISTER_EXRL_TEST) +}; + +/* Loop over all tests and run them. */ +int main(void) +{ + const struct test *test; + int ret = EXIT_SUCCESS; + long reg, cc; + size_t i; + + for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + test = &tests[i]; + mem[MEM_IDX] = MEM; + cc = -1; + reg = test->func(REG, &cc); +#define ASSERT_EQ(expected, actual) do { \ + if (expected != actual) { \ + fprintf(stderr, "%s: " #expected " (0x%lx) != " #actual " (0x%lx)\n", \ + test->name, expected, actual); \ + ret = EXIT_FAILURE; \ + } \ +} while (0) + ASSERT_EQ(test->exp_reg, reg); + ASSERT_EQ(test->exp_mem, mem[MEM_IDX]); + ASSERT_EQ(test->exp_cc, cc); +#undef ASSERT_EQ + } + + return ret; +} diff --git a/tests/tcg/s390x/exrl-ssm-early.S b/tests/tcg/s390x/exrl-ssm-early.S new file mode 100644 index 0000000000..68fbd87b3a --- /dev/null +++ b/tests/tcg/s390x/exrl-ssm-early.S @@ -0,0 +1,43 @@ +/* + * Test early exception recognition using EXRL + SSM. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .org 0x8d +ilc: + .org 0x8e +program_interruption_code: + .org 0x150 +program_old_psw: + .org 0x1D0 /* program new PSW */ + .quad 0,pgm + .org 0x200 /* lowcore padding */ + + .globl _start +_start: + exrl %r0,ssm +expected_pswa: + j failure +ssm: + ssm ssm_op + +pgm: + chhsi program_interruption_code,0x6 /* specification exception? */ + jne failure + cli ilc,6 /* ilc for EXRL? */ + jne failure + clc program_old_psw(16),expected_old_psw /* correct old PSW? */ + jne failure + lpswe success_psw +failure: + lpswe failure_psw + +ssm_op: + .byte 0x08 /* bit 4 set */ + .align 8 +expected_old_psw: + .quad 0x0800000180000000,expected_pswa /* bit 2 set */ +success_psw: + .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */ +failure_psw: + .quad 0x2000000000000,0 /* disabled wait */ diff --git a/tests/tcg/s390x/head64.S b/tests/tcg/s390x/head64.S new file mode 100644 index 0000000000..c6f36dfea4 --- /dev/null +++ b/tests/tcg/s390x/head64.S @@ -0,0 +1,31 @@ +/* + * Startup code for multiarch tests. + * Reuses the pc-bios/s390-ccw implementation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#define main main_pre +#include "../../../pc-bios/s390-ccw/start.S" +#undef main + +main_pre: + aghi %r15,-160 /* reserve stack for C code */ + brasl %r14,sclp_setup + brasl %r14,main + larl %r1,success_psw /* check main() return code */ + ltgr %r2,%r2 + je 0f + larl %r1,failure_psw +0: + lpswe 0(%r1) + + .align 8 +success_psw: + .quad 0x2000180000000,0xfff /* see is_special_wait_psw() */ +failure_psw: + .quad 0x2000180000000,0 /* disabled wait */ + + .section .bss + .align 0x1000 +stack: + .skip 0x8000 diff --git a/tests/tcg/s390x/lgrl-unaligned.S b/tests/tcg/s390x/lgrl-unaligned.S new file mode 100644 index 0000000000..ef8d51d47c --- /dev/null +++ b/tests/tcg/s390x/lgrl-unaligned.S @@ -0,0 +1,16 @@ +/* + * Test LGRL from a non-doubleword aligned address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + lgrl %r1,unaligned + + .align 8 + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,test + .long 0 +unaligned: + .quad 0 diff --git a/tests/tcg/s390x/llgfrl-unaligned.S b/tests/tcg/s390x/llgfrl-unaligned.S new file mode 100644 index 0000000000..c9b4eeaecf --- /dev/null +++ b/tests/tcg/s390x/llgfrl-unaligned.S @@ -0,0 +1,16 @@ +/* + * Test LLGFRL from a non-word aligned address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + llgfrl %r1,unaligned + + .align 8 + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,test + .short 0 +unaligned: + .long 0 diff --git a/tests/tcg/s390x/lpsw.S b/tests/tcg/s390x/lpsw.S new file mode 100644 index 0000000000..b37dec59b7 --- /dev/null +++ b/tests/tcg/s390x/lpsw.S @@ -0,0 +1,36 @@ +/* + * Test the LPSW instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .org 0x140 +svc_old_psw: + .org 0x1c0 /* supervisor call new PSW */ + .quad 0x80000000,svc /* 31-bit mode */ + .org 0x200 /* lowcore padding */ + + .globl _start +_start: + lpsw short_psw +lpsw_target: + svc 0 +expected_pswa: + j failure + +svc: + clc svc_old_psw(16),expected_psw /* correct full PSW? */ + jne failure + lpswe success_psw +failure: + lpswe failure_psw + + .align 8 +short_psw: + .long 0x90001,0x80000000+lpsw_target /* problem state, + 64-bit mode */ +expected_psw: + .quad 0x1000180000000,expected_pswa /* corresponds to short_psw */ +success_psw: + .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */ +failure_psw: + .quad 0x2000000000000,0 /* disabled wait */ diff --git a/tests/tcg/s390x/lpswe-early.S b/tests/tcg/s390x/lpswe-early.S new file mode 100644 index 0000000000..90a7f213df --- /dev/null +++ b/tests/tcg/s390x/lpswe-early.S @@ -0,0 +1,38 @@ +/* + * Test early exception recognition using LPSWE. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .org 0x8d +ilc: + .org 0x8e +program_interruption_code: + .org 0x150 +program_old_psw: + .org 0x1D0 /* program new PSW */ + .quad 0,pgm + .org 0x200 /* lowcore padding */ + + .globl _start +_start: + lpswe bad_psw + j failure + +pgm: + chhsi program_interruption_code,0x6 /* specification exception? */ + jne failure + cli ilc,0 /* ilc zero? */ + jne failure + clc program_old_psw(16),bad_psw /* correct old PSW? */ + jne failure + lpswe success_psw +failure: + lpswe failure_psw + + .align 8 +bad_psw: + .quad 0x8000000000000000,0xfedcba9876543210 /* bit 0 set */ +success_psw: + .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */ +failure_psw: + .quad 0x2000000000000,0 /* disabled wait */ diff --git a/tests/tcg/s390x/lpswe-unaligned.S b/tests/tcg/s390x/lpswe-unaligned.S new file mode 100644 index 0000000000..989f249a6a --- /dev/null +++ b/tests/tcg/s390x/lpswe-unaligned.S @@ -0,0 +1,18 @@ +/* + * Test LPSWE from a non-doubleword aligned address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + larl %r1,unaligned +fail: + lpswe 0(%r1) + + .align 8 + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,fail + .long 0 +unaligned: + .quad 0 diff --git a/tests/tcg/s390x/lrl-unaligned.S b/tests/tcg/s390x/lrl-unaligned.S new file mode 100644 index 0000000000..11eb07f93a --- /dev/null +++ b/tests/tcg/s390x/lrl-unaligned.S @@ -0,0 +1,16 @@ +/* + * Test LRL from a non-word aligned address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + lrl %r1,unaligned + + .align 8 + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,test + .short 0 +unaligned: + .long 0 diff --git a/tests/tcg/s390x/pgm-specification-softmmu.S b/tests/tcg/s390x/pgm-specification-softmmu.S new file mode 100644 index 0000000000..d534f4e505 --- /dev/null +++ b/tests/tcg/s390x/pgm-specification-softmmu.S @@ -0,0 +1,40 @@ +/* + * Common softmmu code for specification exception testing. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .section .head + .org 0x8d +ilc: + .org 0x8e +program_interruption_code: + .org 0x150 +program_old_psw: + .org 0x1D0 /* program new PSW */ + .quad 0x180000000,pgm /* 64-bit mode */ + .org 0x200 /* lowcore padding */ + + .globl _start +_start: + lpswe test_psw + +pgm: + chhsi program_interruption_code,0x6 /* PGM_SPECIFICATION? */ + jne failure + lg %r0,expected_old_psw+8 /* ilc adjustment */ + llgc %r1,ilc + agr %r0,%r1 + stg %r0,expected_old_psw+8 + clc expected_old_psw(16),program_old_psw /* correct location? */ + jne failure + lpswe success_psw +failure: + lpswe failure_psw + + .align 8 +test_psw: + .quad 0x180000000,test /* 64-bit mode */ +success_psw: + .quad 0x2000180000000,0xfff /* see is_special_wait_psw() */ +failure_psw: + .quad 0x2000180000000,0 /* disabled wait */ diff --git a/tests/tcg/s390x/pgm-specification-user.c b/tests/tcg/s390x/pgm-specification-user.c new file mode 100644 index 0000000000..9ee6907b7c --- /dev/null +++ b/tests/tcg/s390x/pgm-specification-user.c @@ -0,0 +1,37 @@ +/* + * Common user code for specification exception testing. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include +#include + +extern void test(void); +extern long expected_old_psw[2]; + +static void handle_sigill(int sig, siginfo_t *info, void *ucontext) +{ + if ((long)info->si_addr != expected_old_psw[1]) { + _exit(EXIT_FAILURE); + } + _exit(EXIT_SUCCESS); +} + +int main(void) +{ + struct sigaction act; + int err; + + memset(&act, 0, sizeof(act)); + act.sa_sigaction = handle_sigill; + act.sa_flags = SA_SIGINFO; + err = sigaction(SIGILL, &act, NULL); + assert(err == 0); + + test(); + + return EXIT_FAILURE; +} diff --git a/tests/tcg/s390x/pgm-specification.mak b/tests/tcg/s390x/pgm-specification.mak new file mode 100644 index 0000000000..2999aee26e --- /dev/null +++ b/tests/tcg/s390x/pgm-specification.mak @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# List of specification exception tests. +# Shared between the softmmu and the user makefiles. +PGM_SPECIFICATION_TESTS = \ + br-odd \ + cgrl-unaligned \ + clrl-unaligned \ + crl-unaligned \ + ex-odd \ + lgrl-unaligned \ + llgfrl-unaligned \ + lpswe-unaligned \ + lrl-unaligned \ + stgrl-unaligned \ + strl-unaligned diff --git a/tests/tcg/s390x/rxsbg.c b/tests/tcg/s390x/rxsbg.c new file mode 100644 index 0000000000..4b155db304 --- /dev/null +++ b/tests/tcg/s390x/rxsbg.c @@ -0,0 +1,46 @@ +/* + * Test the RXSBG instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include + +static inline __attribute__((__always_inline__)) void +rxsbg(unsigned long *r1, unsigned long r2, int i3, int i4, int i5, int *cc) +{ + asm("rxsbg %[r1],%[r2],%[i3],%[i4],%[i5]\n" + "ipm %[cc]" + : [r1] "+r" (*r1), [cc] "=r" (*cc) + : [r2] "r" (r2) , [i3] "i" (i3) , [i4] "i" (i4) , [i5] "i" (i5) + : "cc"); + *cc = (*cc >> 28) & 3; +} + +void test_cc0(void) +{ + unsigned long r1 = 6; + int cc; + + rxsbg(&r1, 3, 61 | 0x80, 62, 1, &cc); + assert(r1 == 6); + assert(cc == 0); +} + +void test_cc1(void) +{ + unsigned long r1 = 2; + int cc; + + rxsbg(&r1, 3, 61 | 0x80, 62, 1, &cc); + assert(r1 == 2); + assert(cc == 1); +} + +int main(void) +{ + test_cc0(); + test_cc1(); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/s390x/softmmu.ld b/tests/tcg/s390x/softmmu.ld new file mode 100644 index 0000000000..ea944eaa3c --- /dev/null +++ b/tests/tcg/s390x/softmmu.ld @@ -0,0 +1,20 @@ +/* + * Linker script for the softmmu test kernels. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +ENTRY(_start) + +SECTIONS { + . = 0; + + .text : { + *(.head) + *(.text) + } + + /DISCARD/ : { + *(*) + } +} diff --git a/tests/tcg/s390x/ssm-early.S b/tests/tcg/s390x/ssm-early.S new file mode 100644 index 0000000000..6dfe40c597 --- /dev/null +++ b/tests/tcg/s390x/ssm-early.S @@ -0,0 +1,41 @@ +/* + * Test early exception recognition using SSM. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .org 0x8d +ilc: + .org 0x8e +program_interruption_code: + .org 0x150 +program_old_psw: + .org 0x1D0 /* program new PSW */ + .quad 0,pgm + .org 0x200 /* lowcore padding */ + + .globl _start +_start: + ssm ssm_op +expected_pswa: + j failure + +pgm: + chhsi program_interruption_code,0x6 /* specification exception? */ + jne failure + cli ilc,4 /* ilc for SSM? */ + jne failure + clc program_old_psw(16),expected_old_psw /* correct old PSW? */ + jne failure + lpswe success_psw +failure: + lpswe failure_psw + +ssm_op: + .byte 0x20 /* bit 2 set */ + .align 8 +expected_old_psw: + .quad 0x2000000180000000,expected_pswa /* bit 2 set */ +success_psw: + .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */ +failure_psw: + .quad 0x2000000000000,0 /* disabled wait */ diff --git a/tests/tcg/s390x/stgrl-unaligned.S b/tests/tcg/s390x/stgrl-unaligned.S new file mode 100644 index 0000000000..32df37780a --- /dev/null +++ b/tests/tcg/s390x/stgrl-unaligned.S @@ -0,0 +1,16 @@ +/* + * Test STGRL to a non-doubleword aligned address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + stgrl %r1,unaligned + + .align 8 + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,test + .long 0 +unaligned: + .quad 0 diff --git a/tests/tcg/s390x/stosm-early.S b/tests/tcg/s390x/stosm-early.S new file mode 100644 index 0000000000..0689924f3a --- /dev/null +++ b/tests/tcg/s390x/stosm-early.S @@ -0,0 +1,41 @@ +/* + * Test early exception recognition using STOSM. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .org 0x8d +ilc: + .org 0x8e +program_interruption_code: + .org 0x150 +program_old_psw: + .org 0x1D0 /* program new PSW */ + .quad 0,pgm + .org 0x200 /* lowcore padding */ + + .globl _start +_start: + stosm ssm_op,0x10 /* bit 3 set */ +expected_pswa: + j failure + +pgm: + chhsi program_interruption_code,0x6 /* specification exception? */ + jne failure + cli ilc,4 /* ilc for STOSM? */ + jne failure + clc program_old_psw(16),expected_old_psw /* correct old PSW? */ + jne failure + lpswe success_psw +failure: + lpswe failure_psw + +ssm_op: + .byte 0 + .align 8 +expected_old_psw: + .quad 0x1000000180000000,expected_pswa /* bit 3 set */ +success_psw: + .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */ +failure_psw: + .quad 0x2000000000000,0 /* disabled wait */ diff --git a/tests/tcg/s390x/strl-unaligned.S b/tests/tcg/s390x/strl-unaligned.S new file mode 100644 index 0000000000..1d248819f0 --- /dev/null +++ b/tests/tcg/s390x/strl-unaligned.S @@ -0,0 +1,16 @@ +/* + * Test STRL to a non-word aligned address. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + .globl test +test: + strl %r1,unaligned + + .align 8 + .globl expected_old_psw +expected_old_psw: + .quad 0x180000000,test + .short 0 +unaligned: + .long 0 diff --git a/tests/tcg/tricore/Makefile.softmmu-target b/tests/tcg/tricore/Makefile.softmmu-target index d2446af8b4..49e573bc3b 100644 --- a/tests/tcg/tricore/Makefile.softmmu-target +++ b/tests/tcg/tricore/Makefile.softmmu-target @@ -1,7 +1,7 @@ TESTS_PATH = $(SRC_PATH)/tests/tcg/tricore -LDFLAGS = -T$(TESTS_PATH)/link.ld -ASFLAGS = +LDFLAGS = -T$(TESTS_PATH)/link.ld --mcpu=tc162 +ASFLAGS = -mtc162 TESTS += test_abs.tst TESTS += test_bmerge.tst @@ -19,7 +19,7 @@ TESTS += test_madd.tst TESTS += test_msub.tst TESTS += test_muls.tst -QEMU_OPTS += -M tricore_testboard -nographic -kernel +QEMU_OPTS += -M tricore_testboard -cpu tc27x -nographic -kernel %.pS: $(TESTS_PATH)/%.S $(HOST_CC) -E -o $@ $< @@ -29,3 +29,6 @@ QEMU_OPTS += -M tricore_testboard -nographic -kernel %.tst: %.o $(LD) $(LDFLAGS) $< -o $@ + +# We don't currently support the multiarch system tests +undefine MULTIARCH_TESTS diff --git a/tests/tcg/tricore/macros.h b/tests/tcg/tricore/macros.h index ec4f5bff52..3df2e0de82 100644 --- a/tests/tcg/tricore/macros.h +++ b/tests/tcg/tricore/macros.h @@ -174,7 +174,7 @@ test_ ## num: \ TEST_CASE_E(num, res_lo, res_hi, \ LI(DREG_RS1, rs1); \ rstv; \ - insn EREG_CALC_RESULT, imm1, DREG_RS1, imm2); \ + insn EREG_CALC_RESULT, imm1, DREG_RS1, imm2; \ ) diff --git a/tests/tcg/xtensa/Makefile.softmmu-target b/tests/tcg/xtensa/Makefile.softmmu-target index 973e55298e..78bf72dfaa 100644 --- a/tests/tcg/xtensa/Makefile.softmmu-target +++ b/tests/tcg/xtensa/Makefile.softmmu-target @@ -2,7 +2,8 @@ # Xtensa softmmu tests # -ifneq ($(TARGET_BIG_ENDIAN),y) +CORE=dc232b +ifneq ($(shell $(QEMU) -cpu help | grep -w $(CORE)),) XTENSA_SRC = $(SRC_PATH)/tests/tcg/xtensa XTENSA_ALL = $(filter-out $(XTENSA_SRC)/linker.ld.S,$(wildcard $(XTENSA_SRC)/*.S)) @@ -15,7 +16,6 @@ XTENSA_USABLE_TESTS = $(filter-out $(XTENSA_BROKEN_TESTS), $(XTENSA_TESTS)) TESTS += $(XTENSA_USABLE_TESTS) VPATH += $(XTENSA_SRC) -CORE=dc232b QEMU_OPTS+=-M sim -cpu $(CORE) -nographic -semihosting -icount 6 $(EXTFLAGS) -kernel INCLUDE_DIRS = $(SRC_PATH)/target/xtensa/core-$(CORE) @@ -26,6 +26,7 @@ ASFLAGS = -Wa,--no-absolute-literals LDFLAGS = -Tlinker.ld -nostartfiles -nostdlib CRT = crt.o vectors.o +CLEANFILES += linker.ld linker.ld: linker.ld.S $(CC) $(XTENSA_INC) -E -P $< -o $@ @@ -40,3 +41,6 @@ $(XTENSA_USABLE_TESTS): linker.ld macros.inc $(CRT) Makefile.softmmu-target $(CC) $(XTENSA_INC) $(ASFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) $(NOSTDFLAGS) $(CRT) endif + +# We don't currently support the multiarch system tests +undefine MULTIARCH_TESTS diff --git a/tests/tcg/xtensaeb/Makefile.softmmu-target b/tests/tcg/xtensaeb/Makefile.softmmu-target new file mode 100644 index 0000000000..4204a96d53 --- /dev/null +++ b/tests/tcg/xtensaeb/Makefile.softmmu-target @@ -0,0 +1,5 @@ +# +# Xtensa softmmu tests +# + +include $(SRC_PATH)/tests/tcg/xtensa/Makefile.softmmu-target diff --git a/tests/unit/meson.build b/tests/unit/meson.build index ffa444f432..93977cc32d 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -11,6 +11,7 @@ tests = { 'check-qobject': [], 'check-qjson': [], 'check-qlit': [], + 'test-error-report': [], 'test-qobject-output-visitor': [testqapi], 'test-clone-visitor': [testqapi], 'test-qobject-input-visitor': [testqapi], @@ -35,6 +36,7 @@ tests = { 'test-rcu-slist': [], 'test-qdist': [], 'test-qht': [], + 'test-qtree': [], 'test-bitops': [], 'test-bitcnt': [], 'test-qgraph': ['../qtest/libqos/qgraph.c'], @@ -46,8 +48,8 @@ tests = { 'test-uuid': [], 'ptimer-test': ['ptimer-test-stubs.c', meson.project_source_root() / 'hw/core/ptimer.c'], 'test-qapi-util': [], - 'test-smp-parse': [qom, meson.project_source_root() / 'hw/core/machine-smp.c'], 'test-interval-tree': [], + 'test-xs-node': [qom], } if have_system or have_tools @@ -112,7 +114,10 @@ if have_block tests += {'test-crypto-xts': [crypto, io]} endif if 'CONFIG_POSIX' in config_host - tests += {'test-image-locking': [testblock]} + tests += { + 'test-image-locking': [testblock], + 'test-nested-aio-poll': [testblock], + } endif if config_host_data.get('CONFIG_REPLICATION') tests += {'test-replication': [testblock]} @@ -120,9 +125,6 @@ if have_block if nettle.found() or gcrypt.found() tests += {'test-crypto-pbkdf': [io]} endif - if config_host_data.get('CONFIG_EPOLL_CREATE1') - tests += {'test-fdmon-epoll': [testblock]} - endif endif if have_system @@ -134,6 +136,7 @@ if have_system 'test-util-sockets': ['socket-helpers.c'], 'test-base64': [], 'test-bufferiszero': [], + 'test-smp-parse': [qom, meson.project_source_root() / 'hw/core/machine-smp.c'], 'test-vmstate': [migration, io], 'test-yank': ['socket-helpers.c', qom, io, chardev] } @@ -144,7 +147,7 @@ if have_system # Some tests: test-char, test-qdev-global-props, and test-qga, # are not runnable under TSan due to a known issue. # https://github.com/google/sanitizers/issues/1116 - if 'CONFIG_TSAN' not in config_host + if not get_option('tsan') if 'CONFIG_POSIX' in config_host tests += { 'test-char': ['socket-helpers.c', qom, io, chardev] diff --git a/tests/unit/ptimer-test-stubs.c b/tests/unit/ptimer-test-stubs.c index f5e75a96b6..8c9407c560 100644 --- a/tests/unit/ptimer-test-stubs.c +++ b/tests/unit/ptimer-test-stubs.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" -#include "sysemu/replay.h" +#include "exec/replay-core.h" #include "migration/vmstate.h" #include "ptimer-test.h" @@ -107,7 +107,8 @@ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask) return deadline; } -QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name) +QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name, + MemReentrancyGuard *reentrancy_guard) { QEMUBH *bh = g_new(QEMUBH, 1); diff --git a/tests/unit/socket-helpers.c b/tests/unit/socket-helpers.c index eecadf3a3c..6de27baee2 100644 --- a/tests/unit/socket-helpers.c +++ b/tests/unit/socket-helpers.c @@ -160,7 +160,7 @@ void socket_check_afunix_support(bool *has_afunix) int fd; fd = socket(PF_UNIX, SOCK_STREAM, 0); - closesocket(fd); + close(fd); #ifdef _WIN32 *has_afunix = (fd != (int)INVALID_SOCKET); diff --git a/tests/unit/test-aio-multithread.c b/tests/unit/test-aio-multithread.c index a555cc8835..80c5d4e2e6 100644 --- a/tests/unit/test-aio-multithread.c +++ b/tests/unit/test-aio-multithread.c @@ -107,8 +107,7 @@ static void test_lifecycle(void) /* aio_co_schedule test. */ static Coroutine *to_schedule[NUM_CONTEXTS]; - -static bool now_stopping; +static bool stop[NUM_CONTEXTS]; static int count_retry; static int count_here; @@ -136,6 +135,7 @@ static bool schedule_next(int n) static void finish_cb(void *opaque) { + stop[id] = true; schedule_next(id); } @@ -143,13 +143,19 @@ static coroutine_fn void test_multi_co_schedule_entry(void *opaque) { g_assert(to_schedule[id] == NULL); - while (!qatomic_mb_read(&now_stopping)) { + /* + * The next iteration will set to_schedule[id] again, but once finish_cb + * is scheduled there is no guarantee that it will actually be woken up, + * so at that point it must not go to sleep. + */ + while (!stop[id]) { int n; n = g_test_rand_int_range(0, NUM_CONTEXTS); schedule_next(n); qatomic_mb_set(&to_schedule[id], qemu_coroutine_self()); + /* finish_cb can run here. */ qemu_coroutine_yield(); g_assert(to_schedule[id] == NULL); } @@ -161,7 +167,6 @@ static void test_multi_co_schedule(int seconds) int i; count_here = count_other = count_retry = 0; - now_stopping = false; create_aio_contexts(); for (i = 0; i < NUM_CONTEXTS; i++) { @@ -171,10 +176,10 @@ static void test_multi_co_schedule(int seconds) g_usleep(seconds * 1000000); - qatomic_mb_set(&now_stopping, true); + /* Guarantee that each AioContext is woken up from its last wait. */ for (i = 0; i < NUM_CONTEXTS; i++) { ctx_run(i, finish_cb, NULL); - to_schedule[i] = NULL; + g_assert(to_schedule[i] == NULL); } join_aio_contexts(); @@ -199,10 +204,11 @@ static uint32_t atomic_counter; static uint32_t running; static uint32_t counter; static CoMutex comutex; +static bool now_stopping; static void coroutine_fn test_multi_co_mutex_entry(void *opaque) { - while (!qatomic_mb_read(&now_stopping)) { + while (!qatomic_read(&now_stopping)) { qemu_co_mutex_lock(&comutex); counter++; qemu_co_mutex_unlock(&comutex); @@ -236,7 +242,7 @@ static void test_multi_co_mutex(int threads, int seconds) g_usleep(seconds * 1000000); - qatomic_mb_set(&now_stopping, true); + qatomic_set(&now_stopping, true); while (running > 0) { g_usleep(100000); } @@ -327,7 +333,7 @@ static void mcs_mutex_unlock(void) static void test_multi_fair_mutex_entry(void *opaque) { - while (!qatomic_mb_read(&now_stopping)) { + while (!qatomic_read(&now_stopping)) { mcs_mutex_lock(); counter++; mcs_mutex_unlock(); @@ -355,7 +361,7 @@ static void test_multi_fair_mutex(int threads, int seconds) g_usleep(seconds * 1000000); - qatomic_mb_set(&now_stopping, true); + qatomic_set(&now_stopping, true); while (running > 0) { g_usleep(100000); } @@ -383,7 +389,7 @@ static QemuMutex mutex; static void test_multi_mutex_entry(void *opaque) { - while (!qatomic_mb_read(&now_stopping)) { + while (!qatomic_read(&now_stopping)) { qemu_mutex_lock(&mutex); counter++; qemu_mutex_unlock(&mutex); @@ -411,7 +417,7 @@ static void test_multi_mutex(int threads, int seconds) g_usleep(seconds * 1000000); - qatomic_mb_set(&now_stopping, true); + qatomic_set(&now_stopping, true); while (running > 0) { g_usleep(100000); } diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c index 321d7ab01a..519440eed3 100644 --- a/tests/unit/test-aio.c +++ b/tests/unit/test-aio.c @@ -130,7 +130,7 @@ static void *test_acquire_thread(void *opaque) static void set_event_notifier(AioContext *ctx, EventNotifier *notifier, EventNotifierHandler *handler) { - aio_set_event_notifier(ctx, notifier, false, handler, NULL, NULL); + aio_set_event_notifier(ctx, notifier, handler, NULL, NULL); } static void dummy_notifier_read(EventNotifier *n) @@ -383,30 +383,6 @@ static void test_flush_event_notifier(void) event_notifier_cleanup(&data.e); } -static void test_aio_external_client(void) -{ - int i, j; - - for (i = 1; i < 3; i++) { - EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; - event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL, NULL); - event_notifier_set(&data.e); - for (j = 0; j < i; j++) { - aio_disable_external(ctx); - } - for (j = 0; j < i; j++) { - assert(!aio_poll(ctx, false)); - assert(event_notifier_test_and_clear(&data.e)); - event_notifier_set(&data.e); - aio_enable_external(ctx); - } - assert(aio_poll(ctx, false)); - set_event_notifier(ctx, &data.e, NULL); - event_notifier_cleanup(&data.e); - } -} - static void test_wait_event_notifier_noflush(void) { EventNotifierTestData data = { .n = 0 }; @@ -935,7 +911,6 @@ int main(int argc, char **argv) g_test_add_func("/aio/event/wait", test_wait_event_notifier); g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush); g_test_add_func("/aio/event/flush", test_flush_event_notifier); - g_test_add_func("/aio/external-client", test_aio_external_client); g_test_add_func("/aio/timer/schedule", test_timer_schedule); g_test_add_func("/aio/coroutine/queue-chaining", test_queue_chaining); diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index d9d3807062..ccc453c29e 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -188,6 +188,25 @@ static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState } } +static BlockBackend * no_coroutine_fn test_setup(void) +{ + BlockBackend *blk; + BlockDriverState *bs, *backing; + + blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); + bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, + &error_abort); + blk_insert_bs(blk, bs, &error_abort); + + backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); + bdrv_set_backing_hd(bs, backing, &error_abort); + + bdrv_unref(backing); + bdrv_unref(bs); + + return blk; +} + static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs) { if (drain_type != BDRV_DRAIN_ALL) { @@ -199,25 +218,19 @@ static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState * } } -static void test_drv_cb_common(enum drain_type drain_type, bool recursive) +static void test_drv_cb_common(BlockBackend *blk, enum drain_type drain_type, + bool recursive) { - BlockBackend *blk; - BlockDriverState *bs, *backing; + BlockDriverState *bs = blk_bs(blk); + BlockDriverState *backing = bs->backing->bs; BDRVTestState *s, *backing_s; BlockAIOCB *acb; int aio_ret; QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); - blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); - bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, - &error_abort); s = bs->opaque; - blk_insert_bs(blk, bs, &error_abort); - - backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); backing_s = backing->opaque; - bdrv_set_backing_hd(bs, backing, &error_abort); /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */ g_assert_cmpint(s->drain_count, ==, 0); @@ -252,44 +265,53 @@ static void test_drv_cb_common(enum drain_type drain_type, bool recursive) g_assert_cmpint(s->drain_count, ==, 0); g_assert_cmpint(backing_s->drain_count, ==, 0); - - bdrv_unref(backing); - bdrv_unref(bs); - blk_unref(blk); } static void test_drv_cb_drain_all(void) { - test_drv_cb_common(BDRV_DRAIN_ALL, true); + BlockBackend *blk = test_setup(); + test_drv_cb_common(blk, BDRV_DRAIN_ALL, true); + blk_unref(blk); } static void test_drv_cb_drain(void) { - test_drv_cb_common(BDRV_DRAIN, false); + BlockBackend *blk = test_setup(); + test_drv_cb_common(blk, BDRV_DRAIN, false); + blk_unref(blk); +} + +static void coroutine_fn test_drv_cb_co_drain_all_entry(void) +{ + BlockBackend *blk = blk_all_next(NULL); + test_drv_cb_common(blk, BDRV_DRAIN_ALL, true); } static void test_drv_cb_co_drain_all(void) { - call_in_coroutine(test_drv_cb_drain_all); + BlockBackend *blk = test_setup(); + call_in_coroutine(test_drv_cb_co_drain_all_entry); + blk_unref(blk); +} + +static void coroutine_fn test_drv_cb_co_drain_entry(void) +{ + BlockBackend *blk = blk_all_next(NULL); + test_drv_cb_common(blk, BDRV_DRAIN, false); } static void test_drv_cb_co_drain(void) { - call_in_coroutine(test_drv_cb_drain); + BlockBackend *blk = test_setup(); + call_in_coroutine(test_drv_cb_co_drain_entry); + blk_unref(blk); } -static void test_quiesce_common(enum drain_type drain_type, bool recursive) +static void test_quiesce_common(BlockBackend *blk, enum drain_type drain_type, + bool recursive) { - BlockBackend *blk; - BlockDriverState *bs, *backing; - - blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); - bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, - &error_abort); - blk_insert_bs(blk, bs, &error_abort); - - backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); - bdrv_set_backing_hd(bs, backing, &error_abort); + BlockDriverState *bs = blk_bs(blk); + BlockDriverState *backing = bs->backing->bs; g_assert_cmpint(bs->quiesce_counter, ==, 0); g_assert_cmpint(backing->quiesce_counter, ==, 0); @@ -307,30 +329,46 @@ static void test_quiesce_common(enum drain_type drain_type, bool recursive) g_assert_cmpint(bs->quiesce_counter, ==, 0); g_assert_cmpint(backing->quiesce_counter, ==, 0); - - bdrv_unref(backing); - bdrv_unref(bs); - blk_unref(blk); } static void test_quiesce_drain_all(void) { - test_quiesce_common(BDRV_DRAIN_ALL, true); + BlockBackend *blk = test_setup(); + test_quiesce_common(blk, BDRV_DRAIN_ALL, true); + blk_unref(blk); } static void test_quiesce_drain(void) { - test_quiesce_common(BDRV_DRAIN, false); + BlockBackend *blk = test_setup(); + test_quiesce_common(blk, BDRV_DRAIN, false); + blk_unref(blk); +} + +static void coroutine_fn test_quiesce_co_drain_all_entry(void) +{ + BlockBackend *blk = blk_all_next(NULL); + test_quiesce_common(blk, BDRV_DRAIN_ALL, true); } static void test_quiesce_co_drain_all(void) { - call_in_coroutine(test_quiesce_drain_all); + BlockBackend *blk = test_setup(); + call_in_coroutine(test_quiesce_co_drain_all_entry); + blk_unref(blk); +} + +static void coroutine_fn test_quiesce_co_drain_entry(void) +{ + BlockBackend *blk = blk_all_next(NULL); + test_quiesce_common(blk, BDRV_DRAIN, false); } static void test_quiesce_co_drain(void) { - call_in_coroutine(test_quiesce_drain); + BlockBackend *blk = test_setup(); + call_in_coroutine(test_quiesce_co_drain_entry); + blk_unref(blk); } static void test_nested(void) @@ -435,7 +473,6 @@ static void test_graph_change_drain_all(void) g_assert_cmpint(bs_b->quiesce_counter, ==, 0); g_assert_cmpint(b_s->drain_count, ==, 0); - g_assert_cmpint(qemu_get_aio_context()->external_disable_cnt, ==, 0); bdrv_unref(bs_b); blk_unref(blk_b); @@ -445,19 +482,19 @@ struct test_iothread_data { BlockDriverState *bs; enum drain_type drain_type; int *aio_ret; + bool co_done; }; -static void test_iothread_drain_entry(void *opaque) +static void coroutine_fn test_iothread_drain_co_entry(void *opaque) { struct test_iothread_data *data = opaque; - aio_context_acquire(bdrv_get_aio_context(data->bs)); do_drain_begin(data->drain_type, data->bs); g_assert_cmpint(*data->aio_ret, ==, 0); do_drain_end(data->drain_type, data->bs); - aio_context_release(bdrv_get_aio_context(data->bs)); - qemu_event_set(&done_event); + data->co_done = true; + aio_wait_kick(); } static void test_iothread_aio_cb(void *opaque, int ret) @@ -493,6 +530,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) BlockDriverState *bs; BDRVTestState *s; BlockAIOCB *acb; + Coroutine *co; int aio_ret; struct test_iothread_data data; @@ -571,8 +609,9 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) } break; case 1: - aio_bh_schedule_oneshot(ctx_a, test_iothread_drain_entry, &data); - qemu_event_wait(&done_event); + co = qemu_coroutine_create(test_iothread_drain_co_entry, &data); + aio_co_enter(ctx_a, co); + AIO_WAIT_WHILE_UNLOCKED(NULL, !data.co_done); break; default: g_assert_not_reached(); @@ -966,8 +1005,6 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque) void *buffer = g_malloc(65536); QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536); - GRAPH_RDLOCK_GUARD(); - /* Pretend some internal write operation from parent to child. * Important: We have to read from the child, not from the parent! * Draining works by first propagating it all up the tree to the @@ -976,12 +1013,14 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque) * everything will be drained before we go back down the tree, but * we do not want that. We want to be in the middle of draining * when this following requests returns. */ + bdrv_graph_co_rdlock(); bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0); + bdrv_graph_co_rdunlock(); g_assert_cmpint(bs->refcnt, ==, 1); if (!dbdd->detach_instead_of_delete) { - blk_unref(blk); + blk_co_unref(blk); } else { BdrvChild *c, *next_c; QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index 3a5e1eb2c4..f081c09729 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -833,9 +833,9 @@ static void test_attach_second_node(void) qdict_put_str(options, "driver", "raw"); qdict_put_str(options, "file", "base"); - aio_context_acquire(ctx); + aio_context_acquire(main_ctx); filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); - aio_context_release(ctx); + aio_context_release(main_ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == ctx); diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index c0426bd10c..a130f6fefb 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -531,6 +531,13 @@ int main(int argc, char **argv) g_test_add_func("/blockjob/cancel/standby", test_cancel_standby); g_test_add_func("/blockjob/cancel/pending", test_cancel_pending); g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded); - g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby); + + /* + * This test is flaky and sometimes fails in CI and otherwise: + * don't run unless user opts in via environment variable. + */ + if (getenv("QEMU_TEST_FLAKY_TESTS")) { + g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby); + } return g_test_run(); } diff --git a/tests/unit/test-crypto-secret.c b/tests/unit/test-crypto-secret.c index 34a4aecc12..147b4af828 100644 --- a/tests/unit/test-crypto-secret.c +++ b/tests/unit/test-crypto-secret.c @@ -24,7 +24,7 @@ #include "crypto/secret.h" #include "qapi/error.h" #include "qemu/module.h" -#ifdef CONFIG_KEYUTILS +#if defined(CONFIG_KEYUTILS) && defined(CONFIG_SECRET_KEYRING) #include "crypto/secret_keyring.h" #include #endif @@ -128,7 +128,7 @@ static void test_secret_indirect_emptyfile(void) g_free(fname); } -#ifdef CONFIG_KEYUTILS +#if defined(CONFIG_KEYUTILS) && defined(CONFIG_SECRET_KEYRING) #define DESCRIPTION "qemu_test_secret" #define PAYLOAD "Test Payload" @@ -268,7 +268,7 @@ static void test_secret_keyring_bad_key_access_right(void) keyctl_unlink(key, KEY_SPEC_PROCESS_KEYRING); } -#endif /* CONFIG_KEYUTILS */ +#endif /* CONFIG_KEYUTILS && CONFIG_SECRET_KEYRING */ static void test_secret_noconv_base64_good(void) { @@ -571,7 +571,7 @@ int main(int argc, char **argv) g_test_add_func("/crypto/secret/indirect/emptyfile", test_secret_indirect_emptyfile); -#ifdef CONFIG_KEYUTILS +#if defined(CONFIG_KEYUTILS) && defined(CONFIG_SECRET_KEYRING) g_test_add_func("/crypto/secret/keyring/good", test_secret_keyring_good); g_test_add_func("/crypto/secret/keyring/revoked_key", @@ -582,7 +582,7 @@ int main(int argc, char **argv) test_secret_keyring_bad_serial_key); g_test_add_func("/crypto/secret/keyring/bad_key_access_right", test_secret_keyring_bad_key_access_right); -#endif /* CONFIG_KEYUTILS */ +#endif /* CONFIG_KEYUTILS && CONFIG_SECRET_KEYRING */ g_test_add_func("/crypto/secret/noconv/base64/good", test_secret_noconv_base64_good); diff --git a/tests/unit/test-error-report.c b/tests/unit/test-error-report.c new file mode 100644 index 0000000000..54319c86c9 --- /dev/null +++ b/tests/unit/test-error-report.c @@ -0,0 +1,139 @@ +/* + * Error reporting test + * + * Copyright (C) 2022 Red Hat Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "glib-compat.h" +#include + +#include "qemu/error-report.h" +#include "qapi/error.h" + +static void +test_error_report_simple(void) +{ + if (g_test_subprocess()) { + error_report("%s", "test error"); + warn_report("%s", "test warn"); + info_report("%s", "test info"); + return; + } + + g_test_trap_subprocess(NULL, 0, 0); + g_test_trap_assert_passed(); + g_test_trap_assert_stderr("\ +test-error-report: test error*\ +test-error-report: warning: test warn*\ +test-error-report: info: test info*\ +"); +} + +static void +test_error_report_loc(void) +{ + if (g_test_subprocess()) { + loc_set_file("some-file.c", 7717); + error_report("%s", "test error1"); + loc_set_none(); + error_report("%s", "test error2"); + return; + } + + g_test_trap_subprocess(NULL, 0, 0); + g_test_trap_assert_passed(); + g_test_trap_assert_stderr("\ +test-error-report:some-file.c:7717: test error1*\ +test-error-report: test error2*\ +"); +} + +static void +test_error_report_glog(void) +{ + if (g_test_subprocess()) { + g_message("gmessage"); + return; + } + + g_test_trap_subprocess(NULL, 0, 0); + g_test_trap_assert_passed(); + g_test_trap_assert_stderr("test-error-report: info: gmessage*"); +} + +static void +test_error_report_once(void) +{ + int i; + + if (g_test_subprocess()) { + for (i = 0; i < 3; i++) { + warn_report_once("warn"); + error_report_once("err"); + } + return; + } + + g_test_trap_subprocess(NULL, 0, 0); + g_test_trap_assert_passed(); + g_test_trap_assert_stderr("\ +test-error-report: warning: warn*\ +test-error-report: err*\ +"); +} + +static void +test_error_report_timestamp(void) +{ + if (g_test_subprocess()) { + message_with_timestamp = true; + warn_report("warn"); + error_report("err"); + return; + } + + g_test_trap_subprocess(NULL, 0, 0); + g_test_trap_assert_passed(); + g_test_trap_assert_stderr("\ +*-*-*:*:* test-error-report: warning: warn*\ +*-*-*:*:* test-error-report: err*\ +"); +} + +static void +test_error_warn(void) +{ + if (g_test_subprocess()) { + error_setg(&error_warn, "Testing &error_warn"); + return; + } + + g_test_trap_subprocess(NULL, 0, 0); + g_test_trap_assert_passed(); + g_test_trap_assert_stderr("\ +test-error-report: warning: Testing &error_warn*\ +"); +} + + +int +main(int argc, char *argv[]) +{ + setlocale(LC_ALL, ""); + + g_test_init(&argc, &argv, NULL); + error_init("test-error-report"); + + g_test_add_func("/error-report/simple", test_error_report_simple); + g_test_add_func("/error-report/loc", test_error_report_loc); + g_test_add_func("/error-report/glog", test_error_report_glog); + g_test_add_func("/error-report/once", test_error_report_once); + g_test_add_func("/error-report/timestamp", test_error_report_timestamp); + g_test_add_func("/error-report/warn", test_error_warn); + + return g_test_run(); +} diff --git a/tests/unit/test-fdmon-epoll.c b/tests/unit/test-fdmon-epoll.c deleted file mode 100644 index ef5a856d09..0000000000 --- a/tests/unit/test-fdmon-epoll.c +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * fdmon-epoll tests - * - * Copyright (c) 2020 Red Hat, Inc. - */ - -#include "qemu/osdep.h" -#include "block/aio.h" -#include "qapi/error.h" -#include "qemu/main-loop.h" - -static AioContext *ctx; - -static void dummy_fd_handler(EventNotifier *notifier) -{ - event_notifier_test_and_clear(notifier); -} - -static void add_event_notifiers(EventNotifier *notifiers, size_t n) -{ - for (size_t i = 0; i < n; i++) { - event_notifier_init(¬ifiers[i], false); - aio_set_event_notifier(ctx, ¬ifiers[i], false, - dummy_fd_handler, NULL, NULL); - } -} - -static void remove_event_notifiers(EventNotifier *notifiers, size_t n) -{ - for (size_t i = 0; i < n; i++) { - aio_set_event_notifier(ctx, ¬ifiers[i], false, NULL, NULL, NULL); - event_notifier_cleanup(¬ifiers[i]); - } -} - -/* Check that fd handlers work when external clients are disabled */ -static void test_external_disabled(void) -{ - EventNotifier notifiers[100]; - - /* fdmon-epoll is only enabled when many fd handlers are registered */ - add_event_notifiers(notifiers, G_N_ELEMENTS(notifiers)); - - event_notifier_set(¬ifiers[0]); - assert(aio_poll(ctx, true)); - - aio_disable_external(ctx); - event_notifier_set(¬ifiers[0]); - assert(aio_poll(ctx, true)); - aio_enable_external(ctx); - - remove_event_notifiers(notifiers, G_N_ELEMENTS(notifiers)); -} - -int main(int argc, char **argv) -{ - /* - * This code relies on the fact that fdmon-io_uring disables itself when - * the glib main loop is in use. The main loop uses fdmon-poll and upgrades - * to fdmon-epoll when the number of fds exceeds a threshold. - */ - qemu_init_main_loop(&error_fatal); - ctx = qemu_get_aio_context(); - - while (g_main_context_iteration(NULL, false)) { - /* Do nothing */ - } - - g_test_init(&argc, &argv, NULL); - g_test_add_func("/fdmon-epoll/external-disabled", test_external_disabled); - return g_test_run(); -} diff --git a/tests/unit/test-io-channel-command.c b/tests/unit/test-io-channel-command.c index 425e2f5594..4f022617df 100644 --- a/tests/unit/test-io-channel-command.c +++ b/tests/unit/test-io-channel-command.c @@ -31,17 +31,18 @@ static char *socat = NULL; -#ifndef _WIN32 +#if !defined(_WIN32) && !defined(CONFIG_DARWIN) static void test_io_channel_command_fifo(bool async) { g_autofree gchar *tmpdir = g_dir_make_tmp("qemu-test-io-channel.XXXXXX", NULL); - g_autofree gchar *fifo = g_strdup_printf("%s/%s", tmpdir, TEST_FIFO); + g_autofree gchar *fifo = g_build_filename(tmpdir, TEST_FIFO, NULL); g_autofree gchar *srcargs = g_strdup_printf("%s - PIPE:%s,wronly", socat, fifo); g_autofree gchar *dstargs = g_strdup_printf("%s PIPE:%s,rdonly -", socat, fifo); g_auto(GStrv) srcargv = g_strsplit(srcargs, " ", -1); g_auto(GStrv) dstargv = g_strsplit(dstargs, " ", -1); QIOChannel *src, *dst; QIOChannelTest *test; + int err; if (mkfifo(fifo, 0600)) { g_error("mkfifo: %s", strerror(errno)); @@ -61,7 +62,10 @@ static void test_io_channel_command_fifo(bool async) object_unref(OBJECT(src)); object_unref(OBJECT(dst)); - g_rmdir(tmpdir); + err = g_unlink(fifo); + g_assert(err == 0); + err = g_rmdir(tmpdir); + g_assert(err == 0); } static void test_io_channel_command_fifo_async(void) @@ -128,7 +132,7 @@ int main(int argc, char **argv) socat = g_find_program_in_path("socat"); -#ifndef _WIN32 +#if !defined(_WIN32) && !defined(CONFIG_DARWIN) g_test_add_func("/io/channel/command/fifo/sync", test_io_channel_command_fifo_sync); g_test_add_func("/io/channel/command/fifo/async", diff --git a/tests/unit/test-nested-aio-poll.c b/tests/unit/test-nested-aio-poll.c new file mode 100644 index 0000000000..db33742af3 --- /dev/null +++ b/tests/unit/test-nested-aio-poll.c @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Test that poll handlers are not re-entrant in nested aio_poll() + * + * Copyright Red Hat + * + * Poll handlers are usually level-triggered. That means they continue firing + * until the condition is reset (e.g. a virtqueue becomes empty). If a poll + * handler calls nested aio_poll() before the condition is reset, then infinite + * recursion occurs. + * + * aio_poll() is supposed to prevent this by disabling poll handlers in nested + * aio_poll() calls. This test case checks that this is indeed what happens. + */ +#include "qemu/osdep.h" +#include "block/aio.h" +#include "qapi/error.h" + +typedef struct { + AioContext *ctx; + + /* This is the EventNotifier that drives the test */ + EventNotifier poll_notifier; + + /* This EventNotifier is only used to wake aio_poll() */ + EventNotifier dummy_notifier; + + bool nested; +} TestData; + +static void io_read(EventNotifier *notifier) +{ + fprintf(stderr, "%s %p\n", __func__, notifier); + event_notifier_test_and_clear(notifier); +} + +static bool io_poll_true(void *opaque) +{ + fprintf(stderr, "%s %p\n", __func__, opaque); + return true; +} + +static bool io_poll_false(void *opaque) +{ + fprintf(stderr, "%s %p\n", __func__, opaque); + return false; +} + +static void io_poll_ready(EventNotifier *notifier) +{ + TestData *td = container_of(notifier, TestData, poll_notifier); + + fprintf(stderr, "> %s\n", __func__); + + g_assert(!td->nested); + td->nested = true; + + /* Wake the following nested aio_poll() call */ + event_notifier_set(&td->dummy_notifier); + + /* This nested event loop must not call io_poll()/io_poll_ready() */ + g_assert(aio_poll(td->ctx, true)); + + td->nested = false; + + fprintf(stderr, "< %s\n", __func__); +} + +/* dummy_notifier never triggers */ +static void io_poll_never_ready(EventNotifier *notifier) +{ + g_assert_not_reached(); +} + +static void test(void) +{ + TestData td = { + .ctx = aio_context_new(&error_abort), + }; + + qemu_set_current_aio_context(td.ctx); + + /* Enable polling */ + aio_context_set_poll_params(td.ctx, 1000000, 2, 2, &error_abort); + + /* + * The GSource is unused but this has the side-effect of changing the fdmon + * that AioContext uses. + */ + aio_get_g_source(td.ctx); + + /* Make the event notifier active (set) right away */ + event_notifier_init(&td.poll_notifier, 1); + aio_set_event_notifier(td.ctx, &td.poll_notifier, + io_read, io_poll_true, io_poll_ready); + + /* This event notifier will be used later */ + event_notifier_init(&td.dummy_notifier, 0); + aio_set_event_notifier(td.ctx, &td.dummy_notifier, + io_read, io_poll_false, io_poll_never_ready); + + /* Consume aio_notify() */ + g_assert(!aio_poll(td.ctx, false)); + + /* + * Run the io_read() handler. This has the side-effect of activating + * polling in future aio_poll() calls. + */ + g_assert(aio_poll(td.ctx, true)); + + /* The second time around the io_poll()/io_poll_ready() handler runs */ + g_assert(aio_poll(td.ctx, true)); + + /* Run io_poll()/io_poll_ready() one more time to show it keeps working */ + g_assert(aio_poll(td.ctx, true)); + + aio_set_event_notifier(td.ctx, &td.dummy_notifier, NULL, NULL, NULL); + aio_set_event_notifier(td.ctx, &td.poll_notifier, NULL, NULL, NULL); + event_notifier_cleanup(&td.dummy_notifier); + event_notifier_cleanup(&td.poll_notifier); + aio_context_unref(td.ctx); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + g_test_add_func("/nested-aio-poll", test); + return g_test_run(); +} diff --git a/tests/unit/test-qga.c b/tests/unit/test-qga.c index b4e0a14573..360b4cab23 100644 --- a/tests/unit/test-qga.c +++ b/tests/unit/test-qga.c @@ -755,32 +755,16 @@ static void test_qga_fsfreeze_status(gconstpointer fix) g_assert_cmpstr(status, ==, "thawed"); } -static void test_qga_guest_exec(gconstpointer fix) +static QDict *wait_for_guest_exec_completion(int fd, int64_t pid) { - const TestFixture *fixture = fix; - g_autoptr(QDict) ret = NULL; - QDict *val; - const gchar *out; - g_autofree guchar *decoded = NULL; - int64_t pid, now, exitcode; - gsize len; + QDict *ret = NULL; + int64_t now; bool exited; + QDict *val; - /* exec 'echo foo bar' */ - ret = qmp_fd(fixture->fd, "{'execute': 'guest-exec', 'arguments': {" - " 'path': '/bin/echo', 'arg': [ '-n', '\" test_str \"' ]," - " 'capture-output': true } }"); - g_assert_nonnull(ret); - qmp_assert_no_error(ret); - val = qdict_get_qdict(ret, "return"); - pid = qdict_get_int(val, "pid"); - g_assert_cmpint(pid, >, 0); - qobject_unref(ret); - - /* wait for completion */ now = g_get_monotonic_time(); do { - ret = qmp_fd(fixture->fd, + ret = qmp_fd(fd, "{'execute': 'guest-exec-status'," " 'arguments': { 'pid': %" PRId64 " } }", pid); g_assert_nonnull(ret); @@ -793,7 +777,34 @@ static void test_qga_guest_exec(gconstpointer fix) g_get_monotonic_time() < now + 5 * G_TIME_SPAN_SECOND); g_assert(exited); + return ret; +} + +static void test_qga_guest_exec(gconstpointer fix) +{ + const TestFixture *fixture = fix; + g_autoptr(QDict) ret = NULL; + QDict *val; + const gchar *out; + g_autofree guchar *decoded = NULL; + int64_t pid, exitcode; + gsize len; + + /* exec 'echo foo bar' */ + ret = qmp_fd(fixture->fd, "{'execute': 'guest-exec', 'arguments': {" + " 'path': '/bin/echo', 'arg': [ '-n', '\" test_str \"' ]," + " 'capture-output': true } }"); + g_assert_nonnull(ret); + qmp_assert_no_error(ret); + val = qdict_get_qdict(ret, "return"); + pid = qdict_get_int(val, "pid"); + g_assert_cmpint(pid, >, 0); + qobject_unref(ret); + + ret = wait_for_guest_exec_completion(fixture->fd, pid); + /* check stdout */ + val = qdict_get_qdict(ret, "return"); exitcode = qdict_get_int(val, "exitcode"); g_assert_cmpint(exitcode, ==, 0); out = qdict_get_str(val, "out-data"); @@ -802,6 +813,115 @@ static void test_qga_guest_exec(gconstpointer fix) g_assert_cmpstr((char *)decoded, ==, "\" test_str \""); } +#if defined(G_OS_WIN32) +static void test_qga_guest_exec_separated(gconstpointer fix) +{ +} +static void test_qga_guest_exec_merged(gconstpointer fix) +{ + const TestFixture *fixture = fix; + g_autoptr(QDict) ret = NULL; + QDict *val; + const gchar *class, *desc; + g_autofree guchar *decoded = NULL; + + /* exec 'echo foo bar' */ + ret = qmp_fd(fixture->fd, "{'execute': 'guest-exec', 'arguments': {" + " 'path': 'echo'," + " 'arg': [ 'execution never reaches here' ]," + " 'capture-output': 'merged' } }"); + + g_assert_nonnull(ret); + val = qdict_get_qdict(ret, "error"); + g_assert_nonnull(val); + class = qdict_get_str(val, "class"); + desc = qdict_get_str(val, "desc"); + g_assert_cmpstr(class, ==, "GenericError"); + g_assert_cmpint(strlen(desc), >, 0); +} +#else +static void test_qga_guest_exec_separated(gconstpointer fix) +{ + const TestFixture *fixture = fix; + g_autoptr(QDict) ret = NULL; + QDict *val; + const gchar *out, *err; + g_autofree guchar *out_decoded = NULL; + g_autofree guchar *err_decoded = NULL; + int64_t pid, exitcode; + gsize len; + + /* exec 'echo foo bar' */ + ret = qmp_fd(fixture->fd, "{'execute': 'guest-exec', 'arguments': {" + " 'path': '/bin/bash'," + " 'arg': [ '-c', 'for i in $(seq 4); do if (( $i %% 2 )); then echo stdout; else echo stderr 1>&2; fi; done;' ]," + " 'capture-output': 'separated' } }"); + g_assert_nonnull(ret); + qmp_assert_no_error(ret); + val = qdict_get_qdict(ret, "return"); + pid = qdict_get_int(val, "pid"); + g_assert_cmpint(pid, >, 0); + qobject_unref(ret); + + ret = wait_for_guest_exec_completion(fixture->fd, pid); + + val = qdict_get_qdict(ret, "return"); + exitcode = qdict_get_int(val, "exitcode"); + g_assert_cmpint(exitcode, ==, 0); + + /* check stdout */ + out = qdict_get_str(val, "out-data"); + out_decoded = g_base64_decode(out, &len); + g_assert_cmpint(len, ==, 14); + g_assert_cmpstr((char *)out_decoded, ==, "stdout\nstdout\n"); + + /* check stderr */ + err = qdict_get_try_str(val, "err-data"); + err_decoded = g_base64_decode(err, &len); + g_assert_cmpint(len, ==, 14); + g_assert_cmpstr((char *)err_decoded, ==, "stderr\nstderr\n"); +} + +static void test_qga_guest_exec_merged(gconstpointer fix) +{ + const TestFixture *fixture = fix; + g_autoptr(QDict) ret = NULL; + QDict *val; + const gchar *out, *err; + g_autofree guchar *decoded = NULL; + int64_t pid, exitcode; + gsize len; + + /* exec 'echo foo bar' */ + ret = qmp_fd(fixture->fd, "{'execute': 'guest-exec', 'arguments': {" + " 'path': '/bin/bash'," + " 'arg': [ '-c', 'for i in $(seq 4); do if (( $i %% 2 )); then echo stdout; else echo stderr 1>&2; fi; done;' ]," + " 'capture-output': 'merged' } }"); + g_assert_nonnull(ret); + qmp_assert_no_error(ret); + val = qdict_get_qdict(ret, "return"); + pid = qdict_get_int(val, "pid"); + g_assert_cmpint(pid, >, 0); + qobject_unref(ret); + + ret = wait_for_guest_exec_completion(fixture->fd, pid); + + val = qdict_get_qdict(ret, "return"); + exitcode = qdict_get_int(val, "exitcode"); + g_assert_cmpint(exitcode, ==, 0); + + /* check stdout */ + out = qdict_get_str(val, "out-data"); + decoded = g_base64_decode(out, &len); + g_assert_cmpint(len, ==, 28); + g_assert_cmpstr((char *)decoded, ==, "stdout\nstderr\nstdout\nstderr\n"); + + /* check stderr */ + err = qdict_get_try_str(val, "err-data"); + g_assert_null(err); +} +#endif + static void test_qga_guest_exec_invalid(gconstpointer fix) { const TestFixture *fixture = fix; @@ -972,6 +1092,10 @@ int main(int argc, char **argv) g_test_add_data_func("/qga/blockedrpcs", NULL, test_qga_blockedrpcs); g_test_add_data_func("/qga/config", NULL, test_qga_config); g_test_add_data_func("/qga/guest-exec", &fix, test_qga_guest_exec); + g_test_add_data_func("/qga/guest-exec-separated", &fix, + test_qga_guest_exec_separated); + g_test_add_data_func("/qga/guest-exec-merged", &fix, + test_qga_guest_exec_merged); g_test_add_data_func("/qga/guest-exec-invalid", &fix, test_qga_guest_exec_invalid); g_test_add_data_func("/qga/guest-get-osinfo", &fix, diff --git a/tests/unit/test-qobject-input-visitor.c b/tests/unit/test-qobject-input-visitor.c index 77fbf985be..9b3e2dbe14 100644 --- a/tests/unit/test-qobject-input-visitor.c +++ b/tests/unit/test-qobject-input-visitor.c @@ -706,6 +706,51 @@ static void test_visitor_in_union_flat(TestInputVisitorData *data, g_assert(&base->enum1 == &tmp->enum1); } +static void test_visitor_in_union_in_union(TestInputVisitorData *data, + const void *unused) +{ + Visitor *v; + g_autoptr(TestUnionInUnion) tmp = NULL; + + v = visitor_input_test_init(data, + "{ 'type': 'value-a', " + " 'type-a': 'value-a1', " + " 'integer': 2, " + " 'name': 'fish' }"); + + visit_type_TestUnionInUnion(v, NULL, &tmp, &error_abort); + g_assert_cmpint(tmp->type, ==, TEST_UNION_ENUM_VALUE_A); + g_assert_cmpint(tmp->u.value_a.type_a, ==, TEST_UNION_ENUMA_VALUE_A1); + g_assert_cmpint(tmp->u.value_a.u.value_a1.integer, ==, 2); + g_assert_cmpint(strcmp(tmp->u.value_a.u.value_a1.name, "fish"), ==, 0); + + qapi_free_TestUnionInUnion(tmp); + + v = visitor_input_test_init(data, + "{ 'type': 'value-a', " + " 'type-a': 'value-a2', " + " 'integer': 1729, " + " 'size': 87539319 }"); + + visit_type_TestUnionInUnion(v, NULL, &tmp, &error_abort); + g_assert_cmpint(tmp->type, ==, TEST_UNION_ENUM_VALUE_A); + g_assert_cmpint(tmp->u.value_a.type_a, ==, TEST_UNION_ENUMA_VALUE_A2); + g_assert_cmpint(tmp->u.value_a.u.value_a2.integer, ==, 1729); + g_assert_cmpint(tmp->u.value_a.u.value_a2.size, ==, 87539319); + + qapi_free_TestUnionInUnion(tmp); + + v = visitor_input_test_init(data, + "{ 'type': 'value-b', " + " 'integer': 1729, " + " 'onoff': true }"); + + visit_type_TestUnionInUnion(v, NULL, &tmp, &error_abort); + g_assert_cmpint(tmp->type, ==, TEST_UNION_ENUM_VALUE_B); + g_assert_cmpint(tmp->u.value_b.integer, ==, 1729); + g_assert_cmpint(tmp->u.value_b.onoff, ==, true); +} + static void test_visitor_in_alternate(TestInputVisitorData *data, const void *unused) { @@ -1216,6 +1261,8 @@ int main(int argc, char **argv) NULL, test_visitor_in_null); input_visitor_test_add("/visitor/input/union-flat", NULL, test_visitor_in_union_flat); + input_visitor_test_add("/visitor/input/union-in-union", + NULL, test_visitor_in_union_in_union); input_visitor_test_add("/visitor/input/alternate", NULL, test_visitor_in_alternate); input_visitor_test_add("/visitor/input/errors", diff --git a/tests/unit/test-qobject-output-visitor.c b/tests/unit/test-qobject-output-visitor.c index 7f054289fe..1535b3ad17 100644 --- a/tests/unit/test-qobject-output-visitor.c +++ b/tests/unit/test-qobject-output-visitor.c @@ -352,6 +352,62 @@ static void test_visitor_out_union_flat(TestOutputVisitorData *data, qapi_free_UserDefFlatUnion(tmp); } +static void test_visitor_out_union_in_union(TestOutputVisitorData *data, + const void *unused) +{ + QDict *qdict; + + TestUnionInUnion *tmp = g_new0(TestUnionInUnion, 1); + tmp->type = TEST_UNION_ENUM_VALUE_A; + tmp->u.value_a.type_a = TEST_UNION_ENUMA_VALUE_A1; + tmp->u.value_a.u.value_a1.integer = 42; + tmp->u.value_a.u.value_a1.name = g_strdup("fish"); + + visit_type_TestUnionInUnion(data->ov, NULL, &tmp, &error_abort); + qdict = qobject_to(QDict, visitor_get(data)); + g_assert(qdict); + g_assert_cmpstr(qdict_get_str(qdict, "type"), ==, "value-a"); + g_assert_cmpstr(qdict_get_str(qdict, "type-a"), ==, "value-a1"); + g_assert_cmpint(qdict_get_int(qdict, "integer"), ==, 42); + g_assert_cmpstr(qdict_get_str(qdict, "name"), ==, "fish"); + + qapi_free_TestUnionInUnion(tmp); + + + visitor_reset(data); + tmp = g_new0(TestUnionInUnion, 1); + tmp->type = TEST_UNION_ENUM_VALUE_A; + tmp->u.value_a.type_a = TEST_UNION_ENUMA_VALUE_A2; + tmp->u.value_a.u.value_a2.integer = 1729; + tmp->u.value_a.u.value_a2.size = 87539319; + + visit_type_TestUnionInUnion(data->ov, NULL, &tmp, &error_abort); + qdict = qobject_to(QDict, visitor_get(data)); + g_assert(qdict); + g_assert_cmpstr(qdict_get_str(qdict, "type"), ==, "value-a"); + g_assert_cmpstr(qdict_get_str(qdict, "type-a"), ==, "value-a2"); + g_assert_cmpint(qdict_get_int(qdict, "integer"), ==, 1729); + g_assert_cmpint(qdict_get_int(qdict, "size"), ==, 87539319); + + qapi_free_TestUnionInUnion(tmp); + + + visitor_reset(data); + tmp = g_new0(TestUnionInUnion, 1); + tmp->type = TEST_UNION_ENUM_VALUE_B; + tmp->u.value_b.integer = 1729; + tmp->u.value_b.onoff = true; + + visit_type_TestUnionInUnion(data->ov, NULL, &tmp, &error_abort); + qdict = qobject_to(QDict, visitor_get(data)); + g_assert(qdict); + g_assert_cmpstr(qdict_get_str(qdict, "type"), ==, "value-b"); + g_assert_cmpint(qdict_get_int(qdict, "integer"), ==, 1729); + g_assert_cmpint(qdict_get_bool(qdict, "onoff"), ==, true); + + qapi_free_TestUnionInUnion(tmp); +} + static void test_visitor_out_alternate(TestOutputVisitorData *data, const void *unused) { @@ -586,6 +642,8 @@ int main(int argc, char **argv) &out_visitor_data, test_visitor_out_list_qapi_free); output_visitor_test_add("/visitor/output/union-flat", &out_visitor_data, test_visitor_out_union_flat); + output_visitor_test_add("/visitor/output/union-in-union", + &out_visitor_data, test_visitor_out_union_in_union); output_visitor_test_add("/visitor/output/alternate", &out_visitor_data, test_visitor_out_alternate); output_visitor_test_add("/visitor/output/null", diff --git a/tests/unit/test-qtree.c b/tests/unit/test-qtree.c new file mode 100644 index 0000000000..4d836d22c7 --- /dev/null +++ b/tests/unit/test-qtree.c @@ -0,0 +1,333 @@ +/* + * SPDX-License-Identifier: LGPL-2.1-or-later + * + * Tests for QTree. + * Original source: glib + * https://gitlab.gnome.org/GNOME/glib/-/blob/main/glib/tests/tree.c + * LGPL license. + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + */ + +#include "qemu/osdep.h" +#include "qemu/qtree.h" + +static gint my_compare(gconstpointer a, gconstpointer b) +{ + const char *cha = a; + const char *chb = b; + + return *cha - *chb; +} + +static gint my_compare_with_data(gconstpointer a, + gconstpointer b, + gpointer user_data) +{ + const char *cha = a; + const char *chb = b; + + /* just check that we got the right data */ + g_assert(GPOINTER_TO_INT(user_data) == 123); + + return *cha - *chb; +} + +static gint my_search(gconstpointer a, gconstpointer b) +{ + return my_compare(b, a); +} + +static gpointer destroyed_key; +static gpointer destroyed_value; +static guint destroyed_key_count; +static guint destroyed_value_count; + +static void my_key_destroy(gpointer key) +{ + destroyed_key = key; + destroyed_key_count++; +} + +static void my_value_destroy(gpointer value) +{ + destroyed_value = value; + destroyed_value_count++; +} + +static gint my_traverse(gpointer key, gpointer value, gpointer data) +{ + char *ch = key; + + g_assert((*ch) > 0); + + if (*ch == 'd') { + return TRUE; + } + + return FALSE; +} + +char chars[] = + "0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz"; + +char chars2[] = + "0123456789" + "abcdefghijklmnopqrstuvwxyz"; + +static gint check_order(gpointer key, gpointer value, gpointer data) +{ + char **p = data; + char *ch = key; + + g_assert(**p == *ch); + + (*p)++; + + return FALSE; +} + +static void test_tree_search(void) +{ + gint i; + QTree *tree; + gboolean removed; + gchar c; + gchar *p, *d; + + tree = q_tree_new_with_data(my_compare_with_data, GINT_TO_POINTER(123)); + + for (i = 0; chars[i]; i++) { + q_tree_insert(tree, &chars[i], &chars[i]); + } + + q_tree_foreach(tree, my_traverse, NULL); + + g_assert(q_tree_nnodes(tree) == strlen(chars)); + g_assert(q_tree_height(tree) == 6); + + p = chars; + q_tree_foreach(tree, check_order, &p); + + for (i = 0; i < 26; i++) { + removed = q_tree_remove(tree, &chars[i + 10]); + g_assert(removed); + } + + c = '\0'; + removed = q_tree_remove(tree, &c); + g_assert(!removed); + + q_tree_foreach(tree, my_traverse, NULL); + + g_assert(q_tree_nnodes(tree) == strlen(chars2)); + g_assert(q_tree_height(tree) == 6); + + p = chars2; + q_tree_foreach(tree, check_order, &p); + + for (i = 25; i >= 0; i--) { + q_tree_insert(tree, &chars[i + 10], &chars[i + 10]); + } + + p = chars; + q_tree_foreach(tree, check_order, &p); + + c = '0'; + p = q_tree_lookup(tree, &c); + g_assert(p && *p == c); + g_assert(q_tree_lookup_extended(tree, &c, (gpointer *)&d, (gpointer *)&p)); + g_assert(c == *d && c == *p); + + c = 'A'; + p = q_tree_lookup(tree, &c); + g_assert(p && *p == c); + + c = 'a'; + p = q_tree_lookup(tree, &c); + g_assert(p && *p == c); + + c = 'z'; + p = q_tree_lookup(tree, &c); + g_assert(p && *p == c); + + c = '!'; + p = q_tree_lookup(tree, &c); + g_assert(p == NULL); + + c = '='; + p = q_tree_lookup(tree, &c); + g_assert(p == NULL); + + c = '|'; + p = q_tree_lookup(tree, &c); + g_assert(p == NULL); + + c = '0'; + p = q_tree_search(tree, my_search, &c); + g_assert(p && *p == c); + + c = 'A'; + p = q_tree_search(tree, my_search, &c); + g_assert(p && *p == c); + + c = 'a'; + p = q_tree_search(tree, my_search, &c); + g_assert(p && *p == c); + + c = 'z'; + p = q_tree_search(tree, my_search, &c); + g_assert(p && *p == c); + + c = '!'; + p = q_tree_search(tree, my_search, &c); + g_assert(p == NULL); + + c = '='; + p = q_tree_search(tree, my_search, &c); + g_assert(p == NULL); + + c = '|'; + p = q_tree_search(tree, my_search, &c); + g_assert(p == NULL); + + q_tree_destroy(tree); +} + +static void test_tree_remove(void) +{ + QTree *tree; + char c, d; + gint i; + gboolean removed; + + tree = q_tree_new_full((GCompareDataFunc)my_compare, NULL, + my_key_destroy, + my_value_destroy); + + for (i = 0; chars[i]; i++) { + q_tree_insert(tree, &chars[i], &chars[i]); + } + + c = '0'; + q_tree_insert(tree, &c, &c); + g_assert(destroyed_key == &c); + g_assert(destroyed_value == &chars[0]); + destroyed_key = NULL; + destroyed_value = NULL; + + d = '1'; + q_tree_replace(tree, &d, &d); + g_assert(destroyed_key == &chars[1]); + g_assert(destroyed_value == &chars[1]); + destroyed_key = NULL; + destroyed_value = NULL; + + c = '2'; + removed = q_tree_remove(tree, &c); + g_assert(removed); + g_assert(destroyed_key == &chars[2]); + g_assert(destroyed_value == &chars[2]); + destroyed_key = NULL; + destroyed_value = NULL; + + c = '3'; + removed = q_tree_steal(tree, &c); + g_assert(removed); + g_assert(destroyed_key == NULL); + g_assert(destroyed_value == NULL); + + const gchar *remove = "omkjigfedba"; + for (i = 0; remove[i]; i++) { + removed = q_tree_remove(tree, &remove[i]); + g_assert(removed); + } + + q_tree_destroy(tree); +} + +static void test_tree_destroy(void) +{ + QTree *tree; + gint i; + + tree = q_tree_new(my_compare); + + for (i = 0; chars[i]; i++) { + q_tree_insert(tree, &chars[i], &chars[i]); + } + + g_assert(q_tree_nnodes(tree) == strlen(chars)); + + g_test_message("nnodes: %d", q_tree_nnodes(tree)); + q_tree_ref(tree); + q_tree_destroy(tree); + + g_test_message("nnodes: %d", q_tree_nnodes(tree)); + g_assert(q_tree_nnodes(tree) == 0); + + q_tree_unref(tree); +} + +static void test_tree_insert(void) +{ + QTree *tree; + gchar *p; + gint i; + gchar *scrambled; + + tree = q_tree_new(my_compare); + + for (i = 0; chars[i]; i++) { + q_tree_insert(tree, &chars[i], &chars[i]); + } + p = chars; + q_tree_foreach(tree, check_order, &p); + + q_tree_unref(tree); + tree = q_tree_new(my_compare); + + for (i = strlen(chars) - 1; i >= 0; i--) { + q_tree_insert(tree, &chars[i], &chars[i]); + } + p = chars; + q_tree_foreach(tree, check_order, &p); + + q_tree_unref(tree); + tree = q_tree_new(my_compare); + + scrambled = g_strdup(chars); + + for (i = 0; i < 30; i++) { + gchar tmp; + gint a, b; + + a = g_random_int_range(0, strlen(scrambled)); + b = g_random_int_range(0, strlen(scrambled)); + tmp = scrambled[a]; + scrambled[a] = scrambled[b]; + scrambled[b] = tmp; + } + + for (i = 0; scrambled[i]; i++) { + q_tree_insert(tree, &scrambled[i], &scrambled[i]); + } + p = chars; + q_tree_foreach(tree, check_order, &p); + + g_free(scrambled); + q_tree_unref(tree); +} + +int main(int argc, char *argv[]) +{ + g_test_init(&argc, &argv, NULL); + + g_test_add_func("/qtree/search", test_tree_search); + g_test_add_func("/qtree/remove", test_tree_remove); + g_test_add_func("/qtree/destroy", test_tree_destroy); + g_test_add_func("/qtree/insert", test_tree_insert); + + return g_test_run(); +} diff --git a/tests/unit/test-rcu-list.c b/tests/unit/test-rcu-list.c index 9964171da4..8f0adb8b00 100644 --- a/tests/unit/test-rcu-list.c +++ b/tests/unit/test-rcu-list.c @@ -151,7 +151,7 @@ static QSLIST_HEAD(, list_element) Q_list_head; #define TEST_NAME "qslist" #define TEST_LIST_REMOVE_RCU(el, f) \ - QSLIST_REMOVE_RCU(&Q_list_head, el, list_element, f) + QSLIST_REMOVE_RCU(&Q_list_head, el, list_element, f) #define TEST_LIST_INSERT_AFTER_RCU(list_el, el, f) \ QSLIST_INSERT_AFTER_RCU(&Q_list_head, list_el, el, f) diff --git a/tests/unit/test-thread-pool.c b/tests/unit/test-thread-pool.c index 6020e65d69..1483e53473 100644 --- a/tests/unit/test-thread-pool.c +++ b/tests/unit/test-thread-pool.c @@ -8,7 +8,6 @@ #include "qemu/main-loop.h" static AioContext *ctx; -static ThreadPool *pool; static int active; typedef struct { @@ -47,7 +46,7 @@ static void done_cb(void *opaque, int ret) static void test_submit(void) { WorkerTestData data = { .n = 0 }; - thread_pool_submit(pool, worker_cb, &data); + thread_pool_submit(worker_cb, &data); while (data.n == 0) { aio_poll(ctx, true); } @@ -57,7 +56,7 @@ static void test_submit(void) static void test_submit_aio(void) { WorkerTestData data = { .n = 0, .ret = -EINPROGRESS }; - data.aiocb = thread_pool_submit_aio(pool, worker_cb, &data, + data.aiocb = thread_pool_submit_aio(worker_cb, &data, done_cb, &data); /* The callbacks are not called until after the first wait. */ @@ -71,14 +70,14 @@ static void test_submit_aio(void) g_assert_cmpint(data.ret, ==, 0); } -static void co_test_cb(void *opaque) +static void coroutine_fn co_test_cb(void *opaque) { WorkerTestData *data = opaque; active = 1; data->n = 0; data->ret = -EINPROGRESS; - thread_pool_submit_co(pool, worker_cb, data); + thread_pool_submit_co(worker_cb, data); /* The test continues in test_submit_co, after qemu_coroutine_enter... */ @@ -122,7 +121,7 @@ static void test_submit_many(void) for (i = 0; i < 100; i++) { data[i].n = 0; data[i].ret = -EINPROGRESS; - thread_pool_submit_aio(pool, worker_cb, &data[i], done_cb, &data[i]); + thread_pool_submit_aio(worker_cb, &data[i], done_cb, &data[i]); } active = 100; @@ -150,7 +149,7 @@ static void do_test_cancel(bool sync) for (i = 0; i < 100; i++) { data[i].n = 0; data[i].ret = -EINPROGRESS; - data[i].aiocb = thread_pool_submit_aio(pool, long_cb, &data[i], + data[i].aiocb = thread_pool_submit_aio(long_cb, &data[i], done_cb, &data[i]); } @@ -235,7 +234,6 @@ int main(int argc, char **argv) { qemu_init_main_loop(&error_abort); ctx = qemu_get_current_aio_context(); - pool = aio_get_thread_pool(ctx); g_test_init(&argc, &argv, NULL); g_test_add_func("/thread-pool/submit", test_submit); diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c index 79357b29ca..0b7d5ecd68 100644 --- a/tests/unit/test-vmstate.c +++ b/tests/unit/test-vmstate.c @@ -1073,7 +1073,6 @@ static gboolean diff_tree(gpointer key, gpointer value, gpointer data) struct match_node_data d = {tp->tree2, key, value}; g_tree_foreach(tp->tree2, tp->match_node, &d); - g_tree_remove(tp->tree1, key); return false; } @@ -1082,9 +1081,9 @@ static void compare_trees(GTree *tree1, GTree *tree2, { struct tree_cmp_data tp = {tree1, tree2, function}; + assert(g_tree_nnodes(tree1) == g_tree_nnodes(tree2)); g_tree_foreach(tree1, diff_tree, &tp); - assert(g_tree_nnodes(tree1) == 0); - assert(g_tree_nnodes(tree2) == 0); + g_tree_destroy(g_tree_ref(tree1)); } static void diff_domain(TestGTreeDomain *d1, TestGTreeDomain *d2) diff --git a/tests/unit/test-xbzrle.c b/tests/unit/test-xbzrle.c index 547046d093..b6996de69a 100644 --- a/tests/unit/test-xbzrle.c +++ b/tests/unit/test-xbzrle.c @@ -16,35 +16,6 @@ #define XBZRLE_PAGE_SIZE 4096 -int (*xbzrle_encode_buffer_func)(uint8_t *, uint8_t *, int, - uint8_t *, int) = xbzrle_encode_buffer; -#if defined(CONFIG_AVX512BW_OPT) -#include "qemu/cpuid.h" -static void __attribute__((constructor)) init_cpu_flag(void) -{ - unsigned max = __get_cpuid_max(0, NULL); - int a, b, c, d; - if (max >= 1) { - __cpuid(1, a, b, c, d); - /* We must check that AVX is not just available, but usable. */ - if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) { - int bv; - __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); - __cpuid_count(7, 0, a, b, c, d); - /* 0xe6: - * XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15 - * and ZMM16-ZMM31 state are enabled by OS) - * XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS) - */ - if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512BW)) { - xbzrle_encode_buffer_func = xbzrle_encode_buffer_avx512; - } - } - } - return ; -} -#endif - static void test_uleb(void) { uint32_t i, val; @@ -83,8 +54,8 @@ static void test_encode_decode_zero(void) buffer[1000 + diff_len + 5] = 105; /* encode zero page */ - dlen = xbzrle_encode_buffer_func(buffer, buffer, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); + dlen = xbzrle_encode_buffer(buffer, buffer, XBZRLE_PAGE_SIZE, + compressed, XBZRLE_PAGE_SIZE); g_assert(dlen == 0); g_free(buffer); @@ -107,8 +78,8 @@ static void test_encode_decode_unchanged(void) test[1000 + diff_len + 5] = 109; /* test unchanged buffer */ - dlen = xbzrle_encode_buffer_func(test, test, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); + dlen = xbzrle_encode_buffer(test, test, XBZRLE_PAGE_SIZE, + compressed, XBZRLE_PAGE_SIZE); g_assert(dlen == 0); g_free(test); @@ -125,8 +96,8 @@ static void test_encode_decode_1_byte(void) test[XBZRLE_PAGE_SIZE - 1] = 1; - dlen = xbzrle_encode_buffer_func(buffer, test, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); + dlen = xbzrle_encode_buffer(buffer, test, XBZRLE_PAGE_SIZE, + compressed, XBZRLE_PAGE_SIZE); g_assert(dlen == (uleb128_encode_small(&buf[0], 4095) + 2)); rc = xbzrle_decode_buffer(compressed, dlen, buffer, XBZRLE_PAGE_SIZE); @@ -150,8 +121,8 @@ static void test_encode_decode_overflow(void) } /* encode overflow */ - rc = xbzrle_encode_buffer_func(buffer, test, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); + rc = xbzrle_encode_buffer(buffer, test, XBZRLE_PAGE_SIZE, + compressed, XBZRLE_PAGE_SIZE); g_assert(rc == -1); g_free(buffer); @@ -181,8 +152,8 @@ static void encode_decode_range(void) test[1000 + diff_len + 5] = 109; /* test encode/decode */ - dlen = xbzrle_encode_buffer_func(test, buffer, XBZRLE_PAGE_SIZE, compressed, - XBZRLE_PAGE_SIZE); + dlen = xbzrle_encode_buffer(test, buffer, XBZRLE_PAGE_SIZE, + compressed, XBZRLE_PAGE_SIZE); rc = xbzrle_decode_buffer(compressed, dlen, test, XBZRLE_PAGE_SIZE); g_assert(rc < XBZRLE_PAGE_SIZE); diff --git a/tests/unit/test-xs-node.c b/tests/unit/test-xs-node.c new file mode 100644 index 0000000000..b80d10ff98 --- /dev/null +++ b/tests/unit/test-xs-node.c @@ -0,0 +1,871 @@ +/* + * QEMU XenStore XsNode testing + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" + +static int nr_xs_nodes; +static GList *xs_node_list; + +#define XS_NODE_UNIT_TEST + +/* + * We don't need the core Xen definitions. And we *do* want to be able + * to run the unit tests even on architectures that Xen doesn't support + * (because life's too short to bother doing otherwise, and test coverage + * doesn't hurt). + */ +#define __XEN_PUBLIC_XEN_H__ +typedef unsigned long xen_pfn_t; + +#include "hw/i386/kvm/xenstore_impl.c" + +#define DOMID_QEMU 0 +#define DOMID_GUEST 1 + +static void dump_ref(const char *name, XsNode *n, int indent) +{ + int i; + + if (!indent && name) { + printf("%s:\n", name); + } + + for (i = 0; i < indent; i++) { + printf(" "); + } + + printf("->%p(%d, '%s'): '%.*s'%s%s\n", n, n->ref, n->name, + (int)(n->content ? n->content->len : strlen("")), + n->content ? (char *)n->content->data : "", + n->modified_in_tx ? " MODIFIED" : "", + n->deleted_in_tx ? " DELETED" : ""); + + if (n->children) { + g_hash_table_foreach(n->children, (void *)dump_ref, + GINT_TO_POINTER(indent + 2)); + } +} + +/* This doesn't happen in qemu but we want to make valgrind happy */ +static void xs_impl_delete(XenstoreImplState *s, bool last) +{ + int err; + + xs_impl_reset_watches(s, DOMID_GUEST); + g_assert(!s->nr_domu_watches); + + err = xs_impl_rm(s, DOMID_QEMU, XBT_NULL, "/local"); + g_assert(!err); + g_assert(s->nr_nodes == 1); + + g_hash_table_unref(s->watches); + g_hash_table_unref(s->transactions); + xs_node_unref(s->root); + g_free(s); + + if (!last) { + return; + } + + if (xs_node_list) { + GList *l; + for (l = xs_node_list; l; l = l->next) { + XsNode *n = l->data; + printf("Remaining node at %p name %s ref %u\n", n, n->name, + n->ref); + } + } + g_assert(!nr_xs_nodes); +} + +struct compare_walk { + char path[XENSTORE_ABS_PATH_MAX + 1]; + XsNode *parent_2; + bool compare_ok; +}; + + +static bool compare_perms(GList *p1, GList *p2) +{ + while (p1) { + if (!p2 || g_strcmp0(p1->data, p2->data)) { + return false; + } + p1 = p1->next; + p2 = p2->next; + } + return (p2 == NULL); +} + +static bool compare_content(GByteArray *c1, GByteArray *c2) +{ + size_t len1 = 0, len2 = 0; + + if (c1) { + len1 = c1->len; + } + if (c2) { + len2 = c2->len; + } + if (len1 != len2) { + return false; + } + + if (!len1) { + return true; + } + + return !memcmp(c1->data, c2->data, len1); +} + +static void compare_child(gpointer, gpointer, gpointer); + +static void compare_nodes(struct compare_walk *cw, XsNode *n1, XsNode *n2) +{ + int nr_children1 = 0, nr_children2 = 0; + + if (n1->children) { + nr_children1 = g_hash_table_size(n1->children); + } + if (n2->children) { + nr_children2 = g_hash_table_size(n2->children); + } + + if (n1->ref != n2->ref || + n1->deleted_in_tx != n2->deleted_in_tx || + n1->modified_in_tx != n2->modified_in_tx || + !compare_perms(n1->perms, n2->perms) || + !compare_content(n1->content, n2->content) || + nr_children1 != nr_children2) { + cw->compare_ok = false; + printf("Compare failure on '%s'\n", cw->path); + } + + if (nr_children1) { + XsNode *oldparent = cw->parent_2; + cw->parent_2 = n2; + g_hash_table_foreach(n1->children, compare_child, cw); + + cw->parent_2 = oldparent; + } +} + +static void compare_child(gpointer key, gpointer val, gpointer opaque) +{ + struct compare_walk *cw = opaque; + char *childname = key; + XsNode *child1 = val; + XsNode *child2 = g_hash_table_lookup(cw->parent_2->children, childname); + int pathlen = strlen(cw->path); + + if (!child2) { + cw->compare_ok = false; + printf("Child '%s' does not exist under '%s'\n", childname, cw->path); + return; + } + + strncat(cw->path, "/", sizeof(cw->path) - 1); + strncat(cw->path, childname, sizeof(cw->path) - 1); + + compare_nodes(cw, child1, child2); + cw->path[pathlen] = '\0'; +} + +static bool compare_trees(XsNode *n1, XsNode *n2) +{ + struct compare_walk cw; + + cw.path[0] = '\0'; + cw.parent_2 = n2; + cw.compare_ok = true; + + if (!n1 || !n2) { + return false; + } + + compare_nodes(&cw, n1, n2); + return cw.compare_ok; +} + +static void compare_tx(gpointer key, gpointer val, gpointer opaque) +{ + XenstoreImplState *s2 = opaque; + XsTransaction *t1 = val, *t2; + unsigned int tx_id = GPOINTER_TO_INT(key); + + t2 = g_hash_table_lookup(s2->transactions, key); + g_assert(t2); + + g_assert(t1->tx_id == tx_id); + g_assert(t2->tx_id == tx_id); + g_assert(t1->base_tx == t2->base_tx); + g_assert(t1->dom_id == t2->dom_id); + if (!compare_trees(t1->root, t2->root)) { + printf("Comparison failure in TX %u after serdes:\n", tx_id); + dump_ref("Original", t1->root, 0); + dump_ref("Deserialised", t2->root, 0); + g_assert(0); + } + g_assert(t1->nr_nodes == t2->nr_nodes); +} + +static int write_str(XenstoreImplState *s, unsigned int dom_id, + unsigned int tx_id, const char *path, + const char *content) +{ + GByteArray *d = g_byte_array_new(); + int err; + + g_byte_array_append(d, (void *)content, strlen(content)); + err = xs_impl_write(s, dom_id, tx_id, path, d); + g_byte_array_unref(d); + return err; +} + +static void watch_cb(void *_str, const char *path, const char *token) +{ + GString *str = _str; + + g_string_append(str, path); + g_string_append(str, token); +} + +static void check_serdes(XenstoreImplState *s) +{ + XenstoreImplState *s2 = xs_impl_create(DOMID_GUEST); + GByteArray *bytes = xs_impl_serialize(s); + int nr_transactions1, nr_transactions2; + int ret; + + ret = xs_impl_deserialize(s2, bytes, DOMID_GUEST, watch_cb, NULL); + g_assert(!ret); + + g_byte_array_unref(bytes); + + g_assert(s->last_tx == s2->last_tx); + g_assert(s->root_tx == s2->root_tx); + + if (!compare_trees(s->root, s2->root)) { + printf("Comparison failure in main tree after serdes:\n"); + dump_ref("Original", s->root, 0); + dump_ref("Deserialised", s2->root, 0); + g_assert(0); + } + + nr_transactions1 = g_hash_table_size(s->transactions); + nr_transactions2 = g_hash_table_size(s2->transactions); + g_assert(nr_transactions1 == nr_transactions2); + + g_hash_table_foreach(s->transactions, compare_tx, s2); + + g_assert(s->nr_domu_watches == s2->nr_domu_watches); + g_assert(s->nr_domu_transactions == s2->nr_domu_transactions); + g_assert(s->nr_nodes == s2->nr_nodes); + xs_impl_delete(s2, false); +} + +static XenstoreImplState *setup(void) +{ + XenstoreImplState *s = xs_impl_create(DOMID_GUEST); + char *abspath; + GList *perms; + int err; + + abspath = g_strdup_printf("/local/domain/%u", DOMID_GUEST); + + err = write_str(s, DOMID_QEMU, XBT_NULL, abspath, ""); + g_assert(!err); + g_assert(s->nr_nodes == 4); + + perms = g_list_append(NULL, g_strdup_printf("n%u", DOMID_QEMU)); + perms = g_list_append(perms, g_strdup_printf("r%u", DOMID_GUEST)); + + err = xs_impl_set_perms(s, DOMID_QEMU, XBT_NULL, abspath, perms); + g_assert(!err); + + g_list_free_full(perms, g_free); + g_free(abspath); + + abspath = g_strdup_printf("/local/domain/%u/some", DOMID_GUEST); + + err = write_str(s, DOMID_QEMU, XBT_NULL, abspath, ""); + g_assert(!err); + g_assert(s->nr_nodes == 5); + + perms = g_list_append(NULL, g_strdup_printf("n%u", DOMID_GUEST)); + + err = xs_impl_set_perms(s, DOMID_QEMU, XBT_NULL, abspath, perms); + g_assert(!err); + + g_list_free_full(perms, g_free); + g_free(abspath); + + return s; +} + +static void test_xs_node_simple(void) +{ + GByteArray *data = g_byte_array_new(); + XenstoreImplState *s = setup(); + GString *guest_watches = g_string_new(NULL); + GString *qemu_watches = g_string_new(NULL); + GList *items = NULL; + XsNode *old_root; + uint64_t gencnt; + int err; + + g_assert(s); + + err = xs_impl_watch(s, DOMID_GUEST, "some", "guestwatch", + watch_cb, guest_watches); + g_assert(!err); + g_assert(guest_watches->len == strlen("someguestwatch")); + g_assert(!strcmp(guest_watches->str, "someguestwatch")); + g_string_truncate(guest_watches, 0); + + err = xs_impl_watch(s, 0, "/local/domain/1/some", "qemuwatch", + watch_cb, qemu_watches); + g_assert(!err); + g_assert(qemu_watches->len == strlen("/local/domain/1/someqemuwatch")); + g_assert(!strcmp(qemu_watches->str, "/local/domain/1/someqemuwatch")); + g_string_truncate(qemu_watches, 0); + + /* Read gives ENOENT when it should */ + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "foo", data); + g_assert(err == ENOENT); + + /* Write works */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative/path", + "something"); + g_assert(s->nr_nodes == 7); + g_assert(!err); + g_assert(!strcmp(guest_watches->str, + "some/relative/pathguestwatch")); + g_assert(!strcmp(qemu_watches->str, + "/local/domain/1/some/relative/pathqemuwatch")); + + g_string_truncate(qemu_watches, 0); + g_string_truncate(guest_watches, 0); + xs_impl_reset_watches(s, 0); + + /* Read gives back what we wrote */ + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative/path", data); + g_assert(!err); + g_assert(data->len == strlen("something")); + g_assert(!memcmp(data->data, "something", data->len)); + + /* Even if we use an abolute path */ + g_byte_array_set_size(data, 0); + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, + "/local/domain/1/some/relative/path", data); + g_assert(!err); + g_assert(data->len == strlen("something")); + + g_assert(!qemu_watches->len); + g_assert(!guest_watches->len); + /* Keep a copy, to force COW mode */ + old_root = xs_node_ref(s->root); + + /* Write somewhere we aren't allowed, in COW mode */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "/local/domain/badplace", + "moredata"); + g_assert(err == EACCES); + g_assert(s->nr_nodes == 7); + + /* Write works again */ + err = write_str(s, DOMID_GUEST, XBT_NULL, + "/local/domain/1/some/relative/path2", + "something else"); + g_assert(!err); + g_assert(s->nr_nodes == 8); + g_assert(!qemu_watches->len); + g_assert(!strcmp(guest_watches->str, "some/relative/path2guestwatch")); + g_string_truncate(guest_watches, 0); + + /* Overwrite an existing node */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative/path", + "another thing"); + g_assert(!err); + g_assert(s->nr_nodes == 8); + g_assert(!qemu_watches->len); + g_assert(!strcmp(guest_watches->str, "some/relative/pathguestwatch")); + g_string_truncate(guest_watches, 0); + + /* We can list the two files we wrote */ + err = xs_impl_directory(s, DOMID_GUEST, XBT_NULL, "some/relative", &gencnt, + &items); + g_assert(!err); + g_assert(items); + g_assert(gencnt == 2); + g_assert(!strcmp(items->data, "path")); + g_assert(items->next); + g_assert(!strcmp(items->next->data, "path2")); + g_assert(!items->next->next); + g_list_free_full(items, g_free); + + err = xs_impl_unwatch(s, DOMID_GUEST, "some", "guestwatch", + watch_cb, guest_watches); + g_assert(!err); + + err = xs_impl_unwatch(s, DOMID_GUEST, "some", "guestwatch", + watch_cb, guest_watches); + g_assert(err == ENOENT); + + err = xs_impl_watch(s, DOMID_GUEST, "some/relative/path2", "watchp2", + watch_cb, guest_watches); + g_assert(!err); + g_assert(guest_watches->len == strlen("some/relative/path2watchp2")); + g_assert(!strcmp(guest_watches->str, "some/relative/path2watchp2")); + g_string_truncate(guest_watches, 0); + + err = xs_impl_watch(s, DOMID_GUEST, "/local/domain/1/some/relative", + "watchrel", watch_cb, guest_watches); + g_assert(!err); + g_assert(guest_watches->len == + strlen("/local/domain/1/some/relativewatchrel")); + g_assert(!strcmp(guest_watches->str, + "/local/domain/1/some/relativewatchrel")); + g_string_truncate(guest_watches, 0); + + /* Write somewhere else which already existed */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative", "moredata"); + g_assert(!err); + g_assert(s->nr_nodes == 8); + + /* Write somewhere we aren't allowed */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "/local/domain/badplace", + "moredata"); + g_assert(err == EACCES); + + g_assert(!strcmp(guest_watches->str, + "/local/domain/1/some/relativewatchrel")); + g_string_truncate(guest_watches, 0); + + g_byte_array_set_size(data, 0); + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative", data); + g_assert(!err); + g_assert(data->len == strlen("moredata")); + g_assert(!memcmp(data->data, "moredata", data->len)); + + /* Overwrite existing data */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative", "otherdata"); + g_assert(!err); + g_string_truncate(guest_watches, 0); + + g_byte_array_set_size(data, 0); + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative", data); + g_assert(!err); + g_assert(data->len == strlen("otherdata")); + g_assert(!memcmp(data->data, "otherdata", data->len)); + + /* Remove the subtree */ + err = xs_impl_rm(s, DOMID_GUEST, XBT_NULL, "some/relative"); + g_assert(!err); + g_assert(s->nr_nodes == 5); + + /* Each watch fires with the least specific relevant path */ + g_assert(strstr(guest_watches->str, + "some/relative/path2watchp2")); + g_assert(strstr(guest_watches->str, + "/local/domain/1/some/relativewatchrel")); + g_string_truncate(guest_watches, 0); + + g_byte_array_set_size(data, 0); + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative", data); + g_assert(err == ENOENT); + g_byte_array_unref(data); + + xs_impl_reset_watches(s, DOMID_GUEST); + g_string_free(qemu_watches, true); + g_string_free(guest_watches, true); + xs_node_unref(old_root); + xs_impl_delete(s, true); +} + + +static void do_test_xs_node_tx(bool fail, bool commit) +{ + XenstoreImplState *s = setup(); + GString *watches = g_string_new(NULL); + GByteArray *data = g_byte_array_new(); + unsigned int tx_id = XBT_NULL; + int err; + + g_assert(s); + + /* Set a watch */ + err = xs_impl_watch(s, DOMID_GUEST, "some", "watch", + watch_cb, watches); + g_assert(!err); + g_assert(watches->len == strlen("somewatch")); + g_assert(!strcmp(watches->str, "somewatch")); + g_string_truncate(watches, 0); + + /* Write something */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative/path", + "something"); + g_assert(s->nr_nodes == 7); + g_assert(!err); + g_assert(!strcmp(watches->str, + "some/relative/pathwatch")); + g_string_truncate(watches, 0); + + /* Create a transaction */ + err = xs_impl_transaction_start(s, DOMID_GUEST, &tx_id); + g_assert(!err); + + if (fail) { + /* Write something else in the root */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative/path", + "another thing"); + g_assert(!err); + g_assert(s->nr_nodes == 7); + g_assert(!strcmp(watches->str, + "some/relative/pathwatch")); + g_string_truncate(watches, 0); + } + + g_assert(!watches->len); + + /* Perform a write in the transaction */ + err = write_str(s, DOMID_GUEST, tx_id, "some/relative/path", + "something else"); + g_assert(!err); + g_assert(s->nr_nodes == 7); + g_assert(!watches->len); + + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative/path", data); + g_assert(!err); + if (fail) { + g_assert(data->len == strlen("another thing")); + g_assert(!memcmp(data->data, "another thing", data->len)); + } else { + g_assert(data->len == strlen("something")); + g_assert(!memcmp(data->data, "something", data->len)); + } + g_byte_array_set_size(data, 0); + + err = xs_impl_read(s, DOMID_GUEST, tx_id, "some/relative/path", data); + g_assert(!err); + g_assert(data->len == strlen("something else")); + g_assert(!memcmp(data->data, "something else", data->len)); + g_byte_array_set_size(data, 0); + + check_serdes(s); + + /* Attempt to commit the transaction */ + err = xs_impl_transaction_end(s, DOMID_GUEST, tx_id, commit); + if (commit && fail) { + g_assert(err == EAGAIN); + } else { + g_assert(!err); + } + if (commit && !fail) { + g_assert(!strcmp(watches->str, + "some/relative/pathwatch")); + g_string_truncate(watches, 0); + } else { + g_assert(!watches->len); + } + g_assert(s->nr_nodes == 7); + + check_serdes(s); + + err = xs_impl_unwatch(s, DOMID_GUEST, "some", "watch", + watch_cb, watches); + g_assert(!err); + + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative/path", data); + g_assert(!err); + if (fail) { + g_assert(data->len == strlen("another thing")); + g_assert(!memcmp(data->data, "another thing", data->len)); + } else if (commit) { + g_assert(data->len == strlen("something else")); + g_assert(!memcmp(data->data, "something else", data->len)); + } else { + g_assert(data->len == strlen("something")); + g_assert(!memcmp(data->data, "something", data->len)); + } + g_byte_array_unref(data); + g_string_free(watches, true); + xs_impl_delete(s, true); +} + +static void test_xs_node_tx_fail(void) +{ + do_test_xs_node_tx(true, true); +} + +static void test_xs_node_tx_abort(void) +{ + do_test_xs_node_tx(false, false); + do_test_xs_node_tx(true, false); +} +static void test_xs_node_tx_succeed(void) +{ + do_test_xs_node_tx(false, true); +} + +static void test_xs_node_tx_rm(void) +{ + XenstoreImplState *s = setup(); + GString *watches = g_string_new(NULL); + GByteArray *data = g_byte_array_new(); + unsigned int tx_id = XBT_NULL; + int err; + + g_assert(s); + + /* Set a watch */ + err = xs_impl_watch(s, DOMID_GUEST, "some", "watch", + watch_cb, watches); + g_assert(!err); + g_assert(watches->len == strlen("somewatch")); + g_assert(!strcmp(watches->str, "somewatch")); + g_string_truncate(watches, 0); + + /* Write something */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path", + "something"); + g_assert(!err); + g_assert(s->nr_nodes == 9); + g_assert(!strcmp(watches->str, + "some/deep/dark/relative/pathwatch")); + g_string_truncate(watches, 0); + + /* Create a transaction */ + err = xs_impl_transaction_start(s, DOMID_GUEST, &tx_id); + g_assert(!err); + + /* Delete the tree in the transaction */ + err = xs_impl_rm(s, DOMID_GUEST, tx_id, "some/deep/dark"); + g_assert(!err); + g_assert(s->nr_nodes == 9); + g_assert(!watches->len); + + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path", + data); + g_assert(!err); + g_assert(data->len == strlen("something")); + g_assert(!memcmp(data->data, "something", data->len)); + g_byte_array_set_size(data, 0); + + check_serdes(s); + + /* Commit the transaction */ + err = xs_impl_transaction_end(s, DOMID_GUEST, tx_id, true); + g_assert(!err); + g_assert(s->nr_nodes == 6); + + g_assert(!strcmp(watches->str, "some/deep/darkwatch")); + g_string_truncate(watches, 0); + + /* Now the node is gone */ + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path", + data); + g_assert(err == ENOENT); + g_byte_array_unref(data); + + err = xs_impl_unwatch(s, DOMID_GUEST, "some", "watch", + watch_cb, watches); + g_assert(!err); + + g_string_free(watches, true); + xs_impl_delete(s, true); +} + +static void test_xs_node_tx_resurrect(void) +{ + XenstoreImplState *s = setup(); + GString *watches = g_string_new(NULL); + GByteArray *data = g_byte_array_new(); + unsigned int tx_id = XBT_NULL; + int err; + + g_assert(s); + + /* Write something */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path", + "something"); + g_assert(!err); + g_assert(s->nr_nodes == 9); + + /* Another node to remain shared */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/place/safe", "keepme"); + g_assert(!err); + g_assert(s->nr_nodes == 11); + + /* This node will be wiped and resurrected */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/deep/dark", + "foo"); + g_assert(!err); + g_assert(s->nr_nodes == 11); + + /* Set a watch */ + err = xs_impl_watch(s, DOMID_GUEST, "some", "watch", + watch_cb, watches); + g_assert(!err); + g_assert(watches->len == strlen("somewatch")); + g_assert(!strcmp(watches->str, "somewatch")); + g_string_truncate(watches, 0); + + /* Create a transaction */ + err = xs_impl_transaction_start(s, DOMID_GUEST, &tx_id); + g_assert(!err); + + /* Delete the tree in the transaction */ + err = xs_impl_rm(s, DOMID_GUEST, tx_id, "some/deep"); + g_assert(!err); + g_assert(s->nr_nodes == 11); + g_assert(!watches->len); + + /* Resurrect part of it */ + err = write_str(s, DOMID_GUEST, tx_id, "some/deep/dark/different/path", + "something"); + g_assert(!err); + g_assert(s->nr_nodes == 11); + + check_serdes(s); + + /* Commit the transaction */ + err = xs_impl_transaction_end(s, DOMID_GUEST, tx_id, true); + g_assert(!err); + g_assert(s->nr_nodes == 11); + + check_serdes(s); + + /* lost data */ + g_assert(strstr(watches->str, "some/deep/dark/different/pathwatch")); + /* topmost deleted */ + g_assert(strstr(watches->str, "some/deep/dark/relativewatch")); + /* lost data */ + g_assert(strstr(watches->str, "some/deep/darkwatch")); + + g_string_truncate(watches, 0); + + /* Now the node is gone */ + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path", + data); + g_assert(err == ENOENT); + g_byte_array_unref(data); + + check_serdes(s); + + err = xs_impl_unwatch(s, DOMID_GUEST, "some", "watch", + watch_cb, watches); + g_assert(!err); + + g_string_free(watches, true); + xs_impl_delete(s, true); +} + +static void test_xs_node_tx_resurrect2(void) +{ + XenstoreImplState *s = setup(); + GString *watches = g_string_new(NULL); + GByteArray *data = g_byte_array_new(); + unsigned int tx_id = XBT_NULL; + int err; + + g_assert(s); + + /* Write something */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path", + "something"); + g_assert(!err); + g_assert(s->nr_nodes == 9); + + /* Another node to remain shared */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/place/safe", "keepme"); + g_assert(!err); + g_assert(s->nr_nodes == 11); + + /* This node will be wiped and resurrected */ + err = write_str(s, DOMID_GUEST, XBT_NULL, "some/deep/dark", + "foo"); + g_assert(!err); + g_assert(s->nr_nodes == 11); + + /* Set a watch */ + err = xs_impl_watch(s, DOMID_GUEST, "some", "watch", + watch_cb, watches); + g_assert(!err); + g_assert(watches->len == strlen("somewatch")); + g_assert(!strcmp(watches->str, "somewatch")); + g_string_truncate(watches, 0); + + /* Create a transaction */ + err = xs_impl_transaction_start(s, DOMID_GUEST, &tx_id); + g_assert(!err); + + /* Delete the tree in the transaction */ + err = xs_impl_rm(s, DOMID_GUEST, tx_id, "some/deep"); + g_assert(!err); + g_assert(s->nr_nodes == 11); + g_assert(!watches->len); + + /* Resurrect part of it */ + err = write_str(s, DOMID_GUEST, tx_id, "some/deep/dark/relative/path", + "something"); + g_assert(!err); + g_assert(s->nr_nodes == 11); + + check_serdes(s); + + /* Commit the transaction */ + err = xs_impl_transaction_end(s, DOMID_GUEST, tx_id, true); + g_assert(!err); + g_assert(s->nr_nodes == 11); + + check_serdes(s); + + /* lost data */ + g_assert(strstr(watches->str, "some/deep/dark/relative/pathwatch")); + /* lost data */ + g_assert(strstr(watches->str, "some/deep/darkwatch")); + + g_string_truncate(watches, 0); + + /* Now the node is gone */ + err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path", + data); + g_assert(!err); + g_assert(data->len == strlen("something")); + g_assert(!memcmp(data->data, "something", data->len)); + + g_byte_array_unref(data); + + check_serdes(s); + + err = xs_impl_unwatch(s, DOMID_GUEST, "some", "watch", + watch_cb, watches); + g_assert(!err); + + g_string_free(watches, true); + xs_impl_delete(s, true); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + module_call_init(MODULE_INIT_QOM); + + g_test_add_func("/xs_node/simple", test_xs_node_simple); + g_test_add_func("/xs_node/tx_abort", test_xs_node_tx_abort); + g_test_add_func("/xs_node/tx_fail", test_xs_node_tx_fail); + g_test_add_func("/xs_node/tx_succeed", test_xs_node_tx_succeed); + g_test_add_func("/xs_node/tx_rm", test_xs_node_tx_rm); + g_test_add_func("/xs_node/tx_resurrect", test_xs_node_tx_resurrect); + g_test_add_func("/xs_node/tx_resurrect2", test_xs_node_tx_resurrect2); + + return g_test_run(); +} diff --git a/tests/vm/Makefile.include b/tests/vm/Makefile.include index 2cc2203d09..f0f5d32fb0 100644 --- a/tests/vm/Makefile.include +++ b/tests/vm/Makefile.include @@ -1,14 +1,12 @@ # Makefile for VM tests # Hack to allow running in an unconfigured build tree -ifeq ($(wildcard $(SRC_PATH)/config-host.mak),) +ifeq ($(realpath $(SRC_PATH)),$(realpath .)) VM_PYTHON = PYTHONPATH=$(SRC_PATH)/python /usr/bin/env python3 VM_VENV = -HOST_ARCH := $(shell uname -m) else -VM_PYTHON = $(TESTS_PYTHON) +VM_PYTHON = $(PYTHON) VM_VENV = check-venv -HOST_ARCH = $(ARCH) endif .PHONY: vm-build-all vm-clean-all @@ -23,6 +21,7 @@ ARM64_IMAGES += ubuntu.aarch64 centos.aarch64 endif endif +HOST_ARCH = $(shell uname -m) ifeq ($(HOST_ARCH),x86_64) IMAGES=$(X86_IMAGES) $(if $(USE_TCG),$(ARM64_IMAGES)) else ifeq ($(HOST_ARCH),aarch64) diff --git a/tests/vm/freebsd b/tests/vm/freebsd index ba2ba23d24..11de6473f4 100755 --- a/tests/vm/freebsd +++ b/tests/vm/freebsd @@ -28,15 +28,15 @@ class FreeBSDVM(basevm.BaseVM): name = "freebsd" arch = "x86_64" - link = "https://download.freebsd.org/ftp/releases/ISO-IMAGES/12.4/FreeBSD-12.4-RELEASE-amd64-disc1.iso.xz" - csum = "1dcf6446e31bf3f81b582e9aba3319a258c29a937a2af6138ee4b181ed719a87" + link = "https://download.freebsd.org/releases/CI-IMAGES/13.2-RELEASE/amd64/Latest/FreeBSD-13.2-RELEASE-amd64-BASIC-CI.raw.xz" + csum = "a4fb3b6c7b75dd4d58fb0d75e4caf72844bffe0ca00e66459c028b198ffb3c0e" size = "20G" pkgs = [ # build tools "git", "pkgconf", "bzip2", - "python37", + "python39", "ninja", # gnu tools @@ -78,72 +78,42 @@ class FreeBSDVM(basevm.BaseVM): mkdir src build; cd src; tar -xf /dev/vtbd1; cd ../build - ../src/configure --python=python3.7 {configure_opts}; + ../src/configure --python=python3.9 {configure_opts}; gmake --output-sync -j{jobs} {target} {verbose}; """ - def console_boot_serial(self): - self.console_wait_send("Autoboot", "3") - self.console_wait_send("OK", "set console=comconsole\n") - self.console_wait_send("OK", "boot\n") - def build_image(self, img): - self.print_step("Downloading install iso") + self.print_step("Downloading disk image") cimg = self._download_with_cache(self.link, sha256sum=self.csum) - img_tmp = img + ".tmp" - iso = img + ".install.iso" - iso_xz = iso + ".xz" + tmp_raw = img + ".tmp.raw" + tmp_raw_xz = tmp_raw + ".xz" + img_tmp = img + ".tmp.qcow2" - self.print_step("Preparing iso and disk image") - subprocess.check_call(["cp", "-f", cimg, iso_xz]) - subprocess.check_call(["xz", "-dvf", iso_xz]) - self.exec_qemu_img("create", "-f", "qcow2", img_tmp, self.size) + self.print_step("Preparing disk image") + subprocess.check_call(["cp", "-f", cimg, tmp_raw_xz]) + subprocess.check_call(["xz", "-dvf", tmp_raw_xz]) + self.exec_qemu_img("convert", "-O", "qcow2", tmp_raw, img_tmp) + self.exec_qemu_img("resize", img_tmp, self.size) + os.remove(tmp_raw) - self.print_step("Booting installer") + self.print_step("Preparing disk image") self.boot(img_tmp, extra_args = [ "-machine", "graphics=off", - "-device", "VGA", - "-cdrom", iso + "-vga", "none" ]) self.console_init() - self.console_boot_serial() - self.console_wait_send("Console type", "xterm\n") + self.console_wait_send("login:", "root\n") + self.console_wait_send("~ #", "service growfs onestart\n") - # pre-install configuration - self.console_wait_send("Welcome", "\n") - self.console_wait_send("Keymap Selection", "\n") - self.console_wait_send("Set Hostname", "freebsd\n") - self.console_wait_send("Distribution Select", "\n") - self.console_wait_send("Partitioning", "\n") - self.console_wait_send("Partition", "\n") - self.console_wait_send("Scheme", "\n") - self.console_wait_send("Editor", "f") - self.console_wait_send("Confirmation", "c") - - self.print_step("Installation started now, this will take a while") - - # post-install configuration + # root user + self.console_wait_send("~ #", "passwd\n") self.console_wait("New Password:") self.console_send("%s\n" % self._config["root_pass"]) self.console_wait("Retype New Password:") self.console_send("%s\n" % self._config["root_pass"]) - self.console_wait_send("Network Configuration", "\n") - self.console_wait_send("IPv4", "y") - self.console_wait_send("DHCP", "y") - self.console_wait_send("IPv6", "n") - self.console_wait_send("Resolver", "\n") - - self.console_wait_send("Time Zone Selector", "0\n") - self.console_wait_send("Confirmation", "y") - self.console_wait_send("Time & Date", "\n") - self.console_wait_send("Time & Date", "\n") - - self.console_wait_send("System Configuration", "\n") - self.console_wait_send("System Hardening", "\n") - # qemu user - self.console_wait_send("Add User Accounts", "y") + self.console_wait_send("~ #", "adduser\n") self.console_wait("Username") self.console_send("%s\n" % self._config["guest_user"]) self.console_wait("Full name") @@ -165,13 +135,7 @@ class FreeBSDVM(basevm.BaseVM): self.console_wait_send("Lock out", "\n") self.console_wait_send("OK", "yes\n") self.console_wait_send("Add another user", "no\n") - - self.console_wait_send("Final Configuration", "\n") - self.console_wait_send("Manual Configuration", "\n") - self.console_wait_send("Complete", "\n") - - self.print_step("Installation finished, rebooting") - self.console_boot_serial() + self.console_wait_send("~ #", "exit\n") # setup qemu user prompt = "$" @@ -183,35 +147,20 @@ class FreeBSDVM(basevm.BaseVM): self.console_ssh_init(prompt, "root", self._config["root_pass"]) self.console_sshd_config(prompt) - # setup serial console - self.console_wait(prompt) - self.console_send("echo 'console=comconsole' >> /boot/loader.conf\n") - - # setup boot delay - self.console_wait(prompt) - self.console_send("echo 'autoboot_delay=1' >> /boot/loader.conf\n") - # setup virtio-blk #1 (tarfile) self.console_wait(prompt) self.console_send("echo 'chmod 666 /dev/vtbd1' >> /etc/rc.local\n") - self.print_step("Configuration finished, rebooting") - self.console_wait_send(prompt, "reboot\n") - self.console_wait("login:") - self.wait_ssh() - self.print_step("Installing packages") self.ssh_root_check("pkg install -y %s\n" % " ".join(self.pkgs)) # shutdown self.ssh_root(self.poweroff) - self.console_wait("Uptime:") self.wait() if os.path.exists(img): os.remove(img) os.rename(img_tmp, img) - os.remove(iso) self.print_step("All done") if __name__ == "__main__": diff --git a/tests/vm/netbsd b/tests/vm/netbsd index aa54338dfa..c7e3f1e735 100755 --- a/tests/vm/netbsd +++ b/tests/vm/netbsd @@ -30,7 +30,8 @@ class NetBSDVM(basevm.BaseVM): "git-base", "pkgconf", "xz", - "python37", + "python310", + "py310-expat", "ninja-build", # gnu tools @@ -66,7 +67,7 @@ class NetBSDVM(basevm.BaseVM): mkdir src build; cd src; tar -xf /dev/rld1a; cd ../build - ../src/configure --python=python3.7 --disable-opengl {configure_opts}; + ../src/configure --disable-opengl {configure_opts}; gmake --output-sync -j{jobs} {target} {verbose}; """ poweroff = "/sbin/poweroff" diff --git a/tests/vm/openbsd b/tests/vm/openbsd index eaeb201e91..6b4fc29793 100755 --- a/tests/vm/openbsd +++ b/tests/vm/openbsd @@ -106,8 +106,7 @@ class OpenBSDVM(basevm.BaseVM): self.console_wait("Password for root account") self.console_send("%s\n" % self._config["root_pass"]) self.console_wait_send("Start sshd(8)", "yes\n") - self.console_wait_send("X Window System", "\n") - self.console_wait_send("xenodm", "\n") + self.console_wait_send("X Window System", "no\n") self.console_wait_send("console to com0", "\n") self.console_wait_send("Which speed", "\n") @@ -124,7 +123,32 @@ class OpenBSDVM(basevm.BaseVM): self.console_wait_send("timezone", "UTC\n") self.console_wait_send("root disk", "\n") self.console_wait_send("(W)hole disk", "\n") - self.console_wait_send("(A)uto layout", "\n") + self.console_wait_send("(A)uto layout", "c\n") + + # 4000 MB / as /dev/sd0a, at start of disk + self.console_wait_send("sd0>", "a a\n") + self.console_wait_send("offset:", "\n") + self.console_wait_send("size:", "4000M\n") + self.console_wait_send("FS type", "4.2BSD\n") + self.console_wait_send("mount point:", "/\n") + + # 256 MB swap as /dev/sd0b + self.console_wait_send("sd0*>", "a b\n") + self.console_wait_send("offset:", "\n") + self.console_wait_send("size:", "256M\n") + self.console_wait_send("FS type", "swap\n") + + # All remaining space for /home as /dev/sd0d + # NB, 'c' isn't allowed to be used. + self.console_wait_send("sd0*>", "a d\n") + self.console_wait_send("offset:", "\n") + self.console_wait_send("size:", "\n") + self.console_wait_send("FS type", "4.2BSD\n") + self.console_wait_send("mount point:", "/home\n") + + self.console_wait_send("sd0*>", "q\n") + self.console_wait_send("Write new label?:", "y\n") + self.console_wait_send("Location of sets", "cd0\n") self.console_wait_send("Pathname to the sets", "\n") self.console_wait_send("Set name(s)", "\n") diff --git a/tools/ebpf/Makefile.ebpf b/tools/ebpf/Makefile.ebpf index 8f327ae3b8..3391e7ce08 100755 --- a/tools/ebpf/Makefile.ebpf +++ b/tools/ebpf/Makefile.ebpf @@ -1,9 +1,9 @@ OBJS = rss.bpf.o -LLC ?= llc +LLVM_STRIP ?= llvm-strip CLANG ?= clang INC_FLAGS = `$(CLANG) -print-file-name=include` -EXTRA_CFLAGS ?= -O2 -emit-llvm -fno-stack-protector +EXTRA_CFLAGS ?= -O2 -g -target bpf all: $(OBJS) @@ -11,11 +11,13 @@ all: $(OBJS) clean: rm -f $(OBJS) + rm -f rss.bpf.skeleton.h $(OBJS): %.o:%.c $(CLANG) $(INC_FLAGS) \ -D__KERNEL__ -D__ASM_SYSREG_H \ -I../include $(LINUXINCLUDE) \ - $(EXTRA_CFLAGS) -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ + $(EXTRA_CFLAGS) -c $< -o $@ + $(LLVM_STRIP) -g $@ bpftool gen skeleton rss.bpf.o > rss.bpf.skeleton.h cp rss.bpf.skeleton.h ../../ebpf/ diff --git a/tools/ebpf/rss.bpf.c b/tools/ebpf/rss.bpf.c index e85ec55f9b..20f227e2ac 100644 --- a/tools/ebpf/rss.bpf.c +++ b/tools/ebpf/rss.bpf.c @@ -76,29 +76,26 @@ struct packet_hash_info_t { }; }; -struct bpf_map_def SEC("maps") -tap_rss_map_configurations = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct rss_config_t), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct rss_config_t)); + __uint(max_entries, 1); +} tap_rss_map_configurations SEC(".maps"); -struct bpf_map_def SEC("maps") -tap_rss_map_toeplitz_key = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct toeplitz_key_data_t), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct toeplitz_key_data_t)); + __uint(max_entries, 1); +} tap_rss_map_toeplitz_key SEC(".maps"); -struct bpf_map_def SEC("maps") -tap_rss_map_indirection_table = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u16), - .max_entries = INDIRECTION_TABLE_SIZE, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u16)); + __uint(max_entries, INDIRECTION_TABLE_SIZE); +} tap_rss_map_indirection_table SEC(".maps"); static inline void net_rx_rss_add_chunk(__u8 *rss_input, size_t *bytes_written, const void *ptr, size_t size) { diff --git a/trace/control-target.c b/trace/control-target.c index 232c97a4a1..c0c1e2310a 100644 --- a/trace/control-target.c +++ b/trace/control-target.c @@ -8,6 +8,7 @@ */ #include "qemu/osdep.h" +#include "qemu/lockable.h" #include "cpu.h" #include "trace/trace-root.h" #include "trace/control.h" @@ -116,11 +117,9 @@ static bool adding_first_cpu1(void) static bool adding_first_cpu(void) { - bool res; - cpu_list_lock(); - res = adding_first_cpu1(); - cpu_list_unlock(); - return res; + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); + + return adding_first_cpu1(); } void trace_init_vcpu(CPUState *vcpu) diff --git a/trace/meson.build b/trace/meson.build index d565948b09..8e80be895c 100644 --- a/trace/meson.build +++ b/trace/meson.build @@ -89,4 +89,6 @@ if 'ftrace' in get_option('trace_backends') trace_ss.add(files('ftrace.c')) endif trace_ss.add(files('control.c')) -trace_ss.add(files('qmp.c')) +if have_system or have_tools or have_ga + trace_ss.add(files('qmp.c')) +endif diff --git a/ui/cocoa.m b/ui/cocoa.m index e915c344a8..168170a8a6 100644 --- a/ui/cocoa.m +++ b/ui/cocoa.m @@ -46,6 +46,7 @@ #include "qemu/cutils.h" #include "qemu/main-loop.h" #include "qemu/module.h" +#include "qemu/error-report.h" #include #include "hw/core/cpu.h" @@ -72,6 +73,9 @@ #define cgrect(nsrect) (*(CGRect *)&(nsrect)) +#define UC_CTRL_KEY "\xe2\x8c\x83" +#define UC_ALT_KEY "\xe2\x8c\xa5" + typedef struct { int width; int height; @@ -1135,9 +1139,9 @@ static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEven if (!isFullscreen) { if (qemu_name) - [normalWindow setTitle:[NSString stringWithFormat:@"QEMU %s - (Press ctrl + alt + g to release Mouse)", qemu_name]]; + [normalWindow setTitle:[NSString stringWithFormat:@"QEMU %s - (Press " UC_CTRL_KEY " " UC_ALT_KEY " G to release Mouse)", qemu_name]]; else - [normalWindow setTitle:@"QEMU - (Press ctrl + alt + g to release Mouse)"]; + [normalWindow setTitle:@"QEMU - (Press " UC_CTRL_KEY " " UC_ALT_KEY " G to release Mouse)"]; } [self hideCursor]; CGAssociateMouseAndMouseCursorPosition(isAbsoluteEnabled); @@ -1327,10 +1331,15 @@ static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEven return NO; } -/* Called when QEMU goes into the background */ -- (void) applicationWillResignActive: (NSNotification *)aNotification +/* + * Called when QEMU goes into the background. Note that + * [-NSWindowDelegate windowDidResignKey:] is used here instead of + * [-NSApplicationDelegate applicationWillResignActive:] because it cannot + * detect that the window loses focus when the deck is clicked on macOS 13.2.1. + */ +- (void) windowDidResignKey: (NSNotification *)aNotification { - COCOA_DEBUG("QemuCocoaAppController: applicationWillResignActive\n"); + COCOA_DEBUG("%s\n", __func__); [cocoaView ungrabMouse]; [cocoaView raiseAllKeys]; } diff --git a/ui/console.c b/ui/console.c index 98b701f5a3..e173731e20 100644 --- a/ui/console.c +++ b/ui/console.c @@ -94,6 +94,8 @@ struct QemuConsole { uint32_t head; QemuUIInfo ui_info; QEMUTimer *ui_timer; + QEMUCursor *cursor; + int cursor_x, cursor_y, cursor_on; const GraphicHwOps *hw_ops; void *hw; @@ -309,7 +311,7 @@ static bool png_save(int fd, pixman_image_t *image, Error **errp) png_struct *png_ptr; png_info *info_ptr; g_autoptr(pixman_image_t) linebuf = - qemu_pixman_linebuf_create(PIXMAN_a8r8g8b8, width); + qemu_pixman_linebuf_create(PIXMAN_BE_r8g8b8, width); uint8_t *buf = (uint8_t *)pixman_image_get_data(linebuf); FILE *f = fdopen(fd, "wb"); int y; @@ -339,7 +341,7 @@ static bool png_save(int fd, pixman_image_t *image, Error **errp) png_init_io(png_ptr, f); png_set_IHDR(png_ptr, info_ptr, width, height, 8, - PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE, + PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE); png_write_info(png_ptr, info_ptr); @@ -1661,6 +1663,12 @@ void register_displaychangelistener(DisplayChangeListener *dcl) con = active_console; } displaychangelistener_display_console(dcl, con, dcl->con ? &error_fatal : NULL); + if (con && con->cursor && dcl->ops->dpy_cursor_define) { + dcl->ops->dpy_cursor_define(dcl, con->cursor); + } + if (con && dcl->ops->dpy_mouse_set) { + dcl->ops->dpy_mouse_set(dcl, con->cursor_x, con->cursor_y, con->cursor_on); + } text_console_update_cursor(NULL); } @@ -1905,6 +1913,9 @@ void dpy_mouse_set(QemuConsole *con, int x, int y, int on) DisplayState *s = con->ds; DisplayChangeListener *dcl; + con->cursor_x = x; + con->cursor_y = y; + con->cursor_on = on; if (!qemu_console_is_visible(con)) { return; } @@ -1923,6 +1934,8 @@ void dpy_cursor_define(QemuConsole *con, QEMUCursor *cursor) DisplayState *s = con->ds; DisplayChangeListener *dcl; + cursor_unref(con->cursor); + con->cursor = cursor_ref(cursor); if (!qemu_console_is_visible(con)) { return; } @@ -2288,6 +2301,14 @@ QemuConsole *qemu_console_lookup_unused(void) return NULL; } +QEMUCursor *qemu_console_get_cursor(QemuConsole *con) +{ + if (con == NULL) { + con = active_console; + } + return con->cursor; +} + bool qemu_console_is_visible(QemuConsole *con) { return (con == active_console) || (con->dcls > 0); diff --git a/ui/cursor.c b/ui/cursor.c index 835f0802f9..29717b3ecb 100644 --- a/ui/cursor.c +++ b/ui/cursor.c @@ -90,11 +90,12 @@ QEMUCursor *cursor_builtin_left_ptr(void) return cursor_parse_xpm(cursor_left_ptr_xpm); } -QEMUCursor *cursor_alloc(int width, int height) +QEMUCursor *cursor_alloc(uint16_t width, uint16_t height) { QEMUCursor *c; size_t datasize = width * height * sizeof(uint32_t); + /* Modern physical hardware typically uses 512x512 sprites */ if (width > 512 || height > 512) { return NULL; } @@ -106,12 +107,13 @@ QEMUCursor *cursor_alloc(int width, int height) return c; } -void cursor_get(QEMUCursor *c) +QEMUCursor *cursor_ref(QEMUCursor *c) { c->refcount++; + return c; } -void cursor_put(QEMUCursor *c) +void cursor_unref(QEMUCursor *c) { if (c == NULL) return; diff --git a/ui/dbus-clipboard.c b/ui/dbus-clipboard.c index df9a754a8d..fe7fcdecb6 100644 --- a/ui/dbus-clipboard.c +++ b/ui/dbus-clipboard.c @@ -204,15 +204,6 @@ dbus_clipboard_unregister_proxy(DBusDisplay *dpy) g_clear_object(&dpy->clipboard_proxy); } -static void -dbus_on_clipboard_proxy_name_owner_changed( - DBusDisplay *dpy, - GObject *object, - GParamSpec *pspec) -{ - dbus_clipboard_unregister_proxy(dpy); -} - static gboolean dbus_clipboard_register( DBusDisplay *dpy, @@ -220,6 +211,7 @@ dbus_clipboard_register( { g_autoptr(GError) err = NULL; const char *name = NULL; + GDBusConnection *connection = g_dbus_method_invocation_get_connection(invocation); if (dpy->clipboard_proxy) { g_dbus_method_invocation_return_error( @@ -232,7 +224,7 @@ dbus_clipboard_register( dpy->clipboard_proxy = qemu_dbus_display1_clipboard_proxy_new_sync( - g_dbus_method_invocation_get_connection(invocation), + connection, G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START, g_dbus_method_invocation_get_sender(invocation), "/org/qemu/Display1/Clipboard", @@ -252,7 +244,11 @@ dbus_clipboard_register( g_object_connect(dpy->clipboard_proxy, "swapped-signal::notify::g-name-owner", - dbus_on_clipboard_proxy_name_owner_changed, dpy, + dbus_clipboard_unregister_proxy, dpy, + NULL); + g_object_connect(connection, + "swapped-signal::closed", + dbus_clipboard_unregister_proxy, dpy, NULL); qemu_clipboard_reset_serial(); diff --git a/ui/dbus-console.c b/ui/dbus-console.c index 0bfaa2298d..f77bc49d2e 100644 --- a/ui/dbus-console.c +++ b/ui/dbus-console.c @@ -411,15 +411,21 @@ dbus_mouse_release(DBusDisplayConsole *ddc, return DBUS_METHOD_INVOCATION_HANDLED; } +static void +dbus_mouse_update_is_absolute(DBusDisplayConsole *ddc) +{ + g_object_set(ddc->iface_mouse, + "is-absolute", qemu_input_is_absolute(), + NULL); +} + static void dbus_mouse_mode_change(Notifier *notify, void *data) { DBusDisplayConsole *ddc = container_of(notify, DBusDisplayConsole, mouse_mode_notifier); - g_object_set(ddc->iface_mouse, - "is-absolute", qemu_input_is_absolute(), - NULL); + dbus_mouse_update_is_absolute(ddc); } int dbus_display_console_get_index(DBusDisplayConsole *ddc) @@ -492,6 +498,7 @@ dbus_display_console_new(DBusDisplay *display, QemuConsole *con) register_displaychangelistener(&ddc->dcl); ddc->mouse_mode_notifier.notify = dbus_mouse_mode_change; qemu_add_mouse_mode_change_notifier(&ddc->mouse_mode_notifier); + dbus_mouse_update_is_absolute(ddc); return ddc; } diff --git a/ui/dbus-listener.c b/ui/dbus-listener.c index 57d4e401db..23034eebf9 100644 --- a/ui/dbus-listener.c +++ b/ui/dbus-listener.c @@ -27,9 +27,11 @@ #include "dbus.h" #include +#ifdef CONFIG_OPENGL #include "ui/shader.h" #include "ui/egl-helpers.h" #include "ui/egl-context.h" +#endif #include "trace.h" struct _DBusDisplayListener { @@ -48,6 +50,7 @@ struct _DBusDisplayListener { G_DEFINE_TYPE(DBusDisplayListener, dbus_display_listener, G_TYPE_OBJECT) +#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM) static void dbus_update_gl_cb(GObject *source_object, GAsyncResult *res, gpointer user_data) @@ -99,6 +102,7 @@ static void dbus_scanout_dmabuf(DisplayChangeListener *dcl, return; } + /* FIXME: add missing x/y/w/h support */ qemu_dbus_display1_listener_call_scanout_dmabuf( ddl->proxy, g_variant_new_handle(0), @@ -126,6 +130,10 @@ static void dbus_scanout_texture(DisplayChangeListener *dcl, .width = backing_width, .height = backing_height, .y0_top = backing_y_0_top, + .x = x, + .y = y, + .scanout_width = w, + .scanout_height = h, }; assert(tex_id); @@ -149,7 +157,7 @@ static void dbus_cursor_dmabuf(DisplayChangeListener *dcl, DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); DisplaySurface *ds; GVariant *v_data = NULL; - egl_fb cursor_fb; + egl_fb cursor_fb = EGL_FB_INIT; if (!dmabuf) { qemu_dbus_display1_listener_call_mouse_set( @@ -229,12 +237,14 @@ static void dbus_gl_refresh(DisplayChangeListener *dcl) ddl->gl_updates = 0; } } +#endif static void dbus_refresh(DisplayChangeListener *dcl) { graphic_hw_update(dcl->con); } +#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM) static void dbus_gl_gfx_update(DisplayChangeListener *dcl, int x, int y, int w, int h) { @@ -242,6 +252,7 @@ static void dbus_gl_gfx_update(DisplayChangeListener *dcl, ddl->gl_updates++; } +#endif static void dbus_gfx_update(DisplayChangeListener *dcl, int x, int y, int w, int h) @@ -296,6 +307,7 @@ static void dbus_gfx_update(DisplayChangeListener *dcl, DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL); } +#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM) static void dbus_gl_gfx_switch(DisplayChangeListener *dcl, struct DisplaySurface *new_surface) { @@ -311,6 +323,7 @@ static void dbus_gl_gfx_switch(DisplayChangeListener *dcl, width, height, 0, 0, width, height); } } +#endif static void dbus_gfx_switch(DisplayChangeListener *dcl, struct DisplaySurface *new_surface) @@ -339,14 +352,13 @@ static void dbus_cursor_define(DisplayChangeListener *dcl, DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); GVariant *v_data = NULL; - cursor_get(c); v_data = g_variant_new_from_data( G_VARIANT_TYPE("ay"), c->data, c->width * c->height * 4, TRUE, - (GDestroyNotify)cursor_put, - c); + (GDestroyNotify)cursor_unref, + cursor_ref(c)); qemu_dbus_display1_listener_call_cursor_define( ddl->proxy, @@ -362,6 +374,7 @@ static void dbus_cursor_define(DisplayChangeListener *dcl, NULL); } +#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM) const DisplayChangeListenerOps dbus_gl_dcl_ops = { .dpy_name = "dbus-gl", .dpy_gfx_update = dbus_gl_gfx_update, @@ -379,6 +392,7 @@ const DisplayChangeListenerOps dbus_gl_dcl_ops = { .dpy_gl_release_dmabuf = dbus_release_dmabuf, .dpy_gl_update = dbus_scanout_update, }; +#endif const DisplayChangeListenerOps dbus_dcl_ops = { .dpy_name = "dbus", @@ -407,11 +421,12 @@ dbus_display_listener_constructed(GObject *object) { DBusDisplayListener *ddl = DBUS_DISPLAY_LISTENER(object); + ddl->dcl.ops = &dbus_dcl_ops; +#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM) if (display_opengl) { ddl->dcl.ops = &dbus_gl_dcl_ops; - } else { - ddl->dcl.ops = &dbus_dcl_ops; } +#endif G_OBJECT_CLASS(dbus_display_listener_parent_class)->constructed(object); } diff --git a/ui/dbus.c b/ui/dbus.c index f2dcba03d0..b9e9698503 100644 --- a/ui/dbus.c +++ b/ui/dbus.c @@ -30,8 +30,10 @@ #include "qom/object_interfaces.h" #include "sysemu/sysemu.h" #include "ui/dbus-module.h" +#ifdef CONFIG_OPENGL #include "ui/egl-helpers.h" #include "ui/egl-context.h" +#endif #include "audio/audio.h" #include "audio/audio_int.h" #include "qapi/error.h" @@ -41,11 +43,14 @@ static DBusDisplay *dbus_display; +#ifdef CONFIG_OPENGL static QEMUGLContext dbus_create_context(DisplayGLCtx *dgc, QEMUGLParams *params) { +#ifdef CONFIG_GBM eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE, qemu_egl_rn_ctx); +#endif return qemu_egl_create_context(dgc, params); } @@ -53,7 +58,11 @@ static bool dbus_is_compatible_dcl(DisplayGLCtx *dgc, DisplayChangeListener *dcl) { - return dcl->ops == &dbus_gl_dcl_ops || dcl->ops == &dbus_console_dcl_ops; + return +#ifdef CONFIG_GBM + dcl->ops == &dbus_gl_dcl_ops || +#endif + dcl->ops == &dbus_console_dcl_ops; } static void @@ -84,6 +93,7 @@ static const DisplayGLCtxOps dbus_gl_ops = { .dpy_gl_ctx_destroy_texture = dbus_destroy_texture, .dpy_gl_ctx_update_texture = dbus_update_texture, }; +#endif static NotifierList dbus_display_notifiers = NOTIFIER_LIST_INITIALIZER(dbus_display_notifiers); @@ -112,10 +122,12 @@ dbus_display_init(Object *o) DBusDisplay *dd = DBUS_DISPLAY(o); g_autoptr(GDBusObjectSkeleton) vm = NULL; +#ifdef CONFIG_OPENGL dd->glctx.ops = &dbus_gl_ops; if (display_opengl) { dd->glctx.gls = qemu_gl_init_shader(); } +#endif dd->iface = qemu_dbus_display1_vm_skeleton_new(); dd->consoles = g_ptr_array_new_with_free_func(g_object_unref); @@ -152,7 +164,9 @@ dbus_display_finalize(Object *o) g_clear_object(&dd->iface); g_free(dd->dbus_addr); g_free(dd->audiodev); +#ifdef CONFIG_OPENGL g_clear_pointer(&dd->glctx.gls, qemu_gl_fini_shader); +#endif dbus_display = NULL; } @@ -220,7 +234,7 @@ dbus_display_complete(UserCreatable *uc, Error **errp) dd->audiodev); return; } - audio_state->drv->set_dbus_server(audio_state, dd->server); + audio_state->drv->set_dbus_server(audio_state, dd->server, dd->p2p); } consoles = g_array_new(FALSE, FALSE, sizeof(guint32)); @@ -290,11 +304,20 @@ dbus_display_add_client(int csock, Error **errp) g_cancellable_cancel(dbus_display->add_client_cancellable); } +#ifdef WIN32 + socket = g_socket_new_from_fd(_get_osfhandle(csock), &err); +#else socket = g_socket_new_from_fd(csock, &err); +#endif if (!socket) { error_setg(errp, "Failed to setup D-Bus socket: %s", err->message); + close(csock); return false; } +#ifdef WIN32 + /* socket owns the SOCKET handle now, so release our osf handle */ + qemu_close_socket_osfhandle(csock); +#endif conn = g_socket_connection_factory_create_connection(socket); @@ -451,12 +474,11 @@ early_dbus_init(DisplayOptions *opts) DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAYGL_MODE_OFF; if (mode != DISPLAYGL_MODE_OFF) { - if (egl_rendernode_init(opts->u.dbus.rendernode, mode) < 0) { - error_report("dbus: render node init failed"); - exit(1); - } - - display_opengl = 1; +#ifdef CONFIG_OPENGL + egl_init(opts->u.dbus.rendernode, mode, &error_fatal); +#else + error_report("dbus: GL rendering is not supported"); +#endif } type_register(&dbus_vc_type_info); diff --git a/ui/egl-headless.c b/ui/egl-headless.c index ae07e91302..ef70e6a18e 100644 --- a/ui/egl-headless.c +++ b/ui/egl-headless.c @@ -1,7 +1,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qemu/module.h" -#include "sysemu/sysemu.h" +#include "qapi/error.h" #include "ui/console.h" #include "ui/egl-helpers.h" #include "ui/egl-context.h" @@ -191,21 +191,21 @@ static const DisplayGLCtxOps eglctx_ops = { static void early_egl_headless_init(DisplayOptions *opts) { - display_opengl = 1; + DisplayGLMode mode = DISPLAYGL_MODE_ON; + + if (opts->has_gl) { + mode = opts->gl; + } + + egl_init(opts->u.egl_headless.rendernode, mode, &error_fatal); } static void egl_headless_init(DisplayState *ds, DisplayOptions *opts) { - DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAYGL_MODE_ON; QemuConsole *con; egl_dpy *edpy; int idx; - if (egl_rendernode_init(opts->u.egl_headless.rendernode, mode) < 0) { - error_report("egl: render node init failed"); - exit(1); - } - for (idx = 0;; idx++) { DisplayGLCtx *ctx; diff --git a/ui/egl-helpers.c b/ui/egl-helpers.c index 3a88245b67..4203163ace 100644 --- a/ui/egl-helpers.c +++ b/ui/egl-helpers.c @@ -19,6 +19,8 @@ #include "qemu/error-report.h" #include "ui/console.h" #include "ui/egl-helpers.h" +#include "sysemu/sysemu.h" +#include "qapi/error.h" EGLDisplay *qemu_egl_display; EGLConfig qemu_egl_config; @@ -26,6 +28,48 @@ DisplayGLMode qemu_egl_mode; /* ------------------------------------------------------------------ */ +#if defined(CONFIG_X11) || defined(CONFIG_GBM) +static const char *egl_get_error_string(void) +{ + EGLint error = eglGetError(); + + switch (error) { + case EGL_SUCCESS: + return "EGL_SUCCESS"; + case EGL_NOT_INITIALIZED: + return "EGL_NOT_INITIALIZED"; + case EGL_BAD_ACCESS: + return "EGL_BAD_ACCESS"; + case EGL_BAD_ALLOC: + return "EGL_BAD_ALLOC"; + case EGL_BAD_ATTRIBUTE: + return "EGL_BAD_ATTRIBUTE"; + case EGL_BAD_CONTEXT: + return "EGL_BAD_CONTEXT"; + case EGL_BAD_CONFIG: + return "EGL_BAD_CONFIG"; + case EGL_BAD_CURRENT_SURFACE: + return "EGL_BAD_CURRENT_SURFACE"; + case EGL_BAD_DISPLAY: + return "EGL_BAD_DISPLAY"; + case EGL_BAD_SURFACE: + return "EGL_BAD_SURFACE"; + case EGL_BAD_MATCH: + return "EGL_BAD_MATCH"; + case EGL_BAD_PARAMETER: + return "EGL_BAD_PARAMETER"; + case EGL_BAD_NATIVE_PIXMAP: + return "EGL_BAD_NATIVE_PIXMAP"; + case EGL_BAD_NATIVE_WINDOW: + return "EGL_BAD_NATIVE_WINDOW"; + case EGL_CONTEXT_LOST: + return "EGL_CONTEXT_LOST"; + default: + return "Unknown EGL error"; + } +} +#endif + static void egl_fb_delete_texture(egl_fb *fb) { if (!fb->delete_texture) { @@ -438,20 +482,20 @@ static int qemu_egl_init_dpy(EGLNativeDisplayType dpy, qemu_egl_display = qemu_egl_get_display(dpy, platform); if (qemu_egl_display == EGL_NO_DISPLAY) { - error_report("egl: eglGetDisplay failed"); + error_report("egl: eglGetDisplay failed: %s", egl_get_error_string()); return -1; } b = eglInitialize(qemu_egl_display, &major, &minor); if (b == EGL_FALSE) { - error_report("egl: eglInitialize failed"); + error_report("egl: eglInitialize failed: %s", egl_get_error_string()); return -1; } b = eglBindAPI(gles ? EGL_OPENGL_ES_API : EGL_OPENGL_API); if (b == EGL_FALSE) { - error_report("egl: eglBindAPI failed (%s mode)", - gles ? "gles" : "core"); + error_report("egl: eglBindAPI failed (%s mode): %s", + gles ? "gles" : "core", egl_get_error_string()); return -1; } @@ -459,8 +503,8 @@ static int qemu_egl_init_dpy(EGLNativeDisplayType dpy, gles ? conf_att_gles : conf_att_core, &qemu_egl_config, 1, &n); if (b == EGL_FALSE || n != 1) { - error_report("egl: eglChooseConfig failed (%s mode)", - gles ? "gles" : "core"); + error_report("egl: eglChooseConfig failed (%s mode): %s", + gles ? "gles" : "core", egl_get_error_string()); return -1; } @@ -527,3 +571,25 @@ EGLContext qemu_egl_init_ctx(void) return ectx; } + +bool egl_init(const char *rendernode, DisplayGLMode mode, Error **errp) +{ + ERRP_GUARD(); + + if (mode == DISPLAYGL_MODE_OFF) { + error_setg(errp, "egl: turning off GL doesn't make sense"); + return false; + } + +#ifdef CONFIG_GBM + if (egl_rendernode_init(rendernode, mode) < 0) { + error_setg(errp, "egl: render node init failed"); + return false; + } + display_opengl = 1; + return true; +#else + error_setg(errp, "egl: not available on this platform"); + return false; +#endif +} diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c index e84431790c..19130041bc 100644 --- a/ui/gtk-egl.c +++ b/ui/gtk-egl.c @@ -88,8 +88,8 @@ void gd_egl_draw(VirtualConsole *vc) #endif gd_egl_scanout_flush(&vc->gfx.dcl, 0, 0, vc->gfx.w, vc->gfx.h); - vc->gfx.scale_x = (double)ww / vc->gfx.w; - vc->gfx.scale_y = (double)wh / vc->gfx.h; + vc->gfx.scale_x = (double)ww / surface_width(vc->gfx.ds); + vc->gfx.scale_y = (double)wh / surface_height(vc->gfx.ds); glFlush(); #ifdef CONFIG_GBM @@ -256,7 +256,7 @@ void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl, } gd_egl_scanout_texture(dcl, dmabuf->texture, - false, dmabuf->width, dmabuf->height, + dmabuf->y0_top, dmabuf->width, dmabuf->height, 0, 0, dmabuf->width, dmabuf->height); if (dmabuf->allow_fences) { diff --git a/ui/gtk-gl-area.c b/ui/gtk-gl-area.c index 7696df1f6b..c384a1516b 100644 --- a/ui/gtk-gl-area.c +++ b/ui/gtk-gl-area.c @@ -298,7 +298,7 @@ void gd_gl_area_scanout_dmabuf(DisplayChangeListener *dcl, } gd_gl_area_scanout_texture(dcl, dmabuf->texture, - false, dmabuf->width, dmabuf->height, + dmabuf->y0_top, dmabuf->width, dmabuf->height, 0, 0, dmabuf->width, dmabuf->height); if (dmabuf->allow_fences) { diff --git a/ui/gtk.c b/ui/gtk.c index fd82e9b1ca..e50f950f2b 100644 --- a/ui/gtk.c +++ b/ui/gtk.c @@ -130,6 +130,13 @@ typedef struct VCChardev VCChardev; DECLARE_INSTANCE_CHECKER(VCChardev, VC_CHARDEV, TYPE_CHARDEV_VC) +struct touch_slot { + int x; + int y; + int tracking_id; +}; +static struct touch_slot touch_slots[INPUT_EVENT_SLOTS_MAX]; + bool gtk_use_gl_area; static void gd_grab_pointer(VirtualConsole *vc, const char *reason); @@ -450,7 +457,8 @@ static void gd_mouse_set(DisplayChangeListener *dcl, GdkDisplay *dpy; gint x_root, y_root; - if (qemu_input_is_absolute()) { + if (!gtk_widget_get_realized(vc->gfx.drawing_area) || + qemu_input_is_absolute()) { return; } @@ -868,7 +876,6 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, { VirtualConsole *vc = opaque; GtkDisplayState *s = vc->s; - GdkWindow *window; int x, y; int mx, my; int fbh, fbw; @@ -881,10 +888,9 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, fbw = surface_width(vc->gfx.ds) * vc->gfx.scale_x; fbh = surface_height(vc->gfx.ds) * vc->gfx.scale_y; - window = gtk_widget_get_window(vc->gfx.drawing_area); - ww = gdk_window_get_width(window); - wh = gdk_window_get_height(window); - ws = gdk_window_get_scale_factor(window); + ww = gtk_widget_get_allocated_width(widget); + wh = gtk_widget_get_allocated_height(widget); + ws = gtk_widget_get_scale_factor(widget); mx = my = 0; if (ww > fbw) { @@ -1058,6 +1064,82 @@ static gboolean gd_scroll_event(GtkWidget *widget, GdkEventScroll *scroll, } +static gboolean gd_touch_event(GtkWidget *widget, GdkEventTouch *touch, + void *opaque) +{ + VirtualConsole *vc = opaque; + struct touch_slot *slot; + uint64_t num_slot = GPOINTER_TO_UINT(touch->sequence); + bool needs_sync = false; + int update; + int type = -1; + int i; + + if (num_slot >= INPUT_EVENT_SLOTS_MAX) { + warn_report("gtk: unexpected touch slot number: % " PRId64" >= %d\n", + num_slot, INPUT_EVENT_SLOTS_MAX); + return FALSE; + } + + slot = &touch_slots[num_slot]; + slot->x = touch->x; + slot->y = touch->y; + + switch (touch->type) { + case GDK_TOUCH_BEGIN: + type = INPUT_MULTI_TOUCH_TYPE_BEGIN; + slot->tracking_id = num_slot; + break; + case GDK_TOUCH_UPDATE: + type = INPUT_MULTI_TOUCH_TYPE_UPDATE; + break; + case GDK_TOUCH_END: + case GDK_TOUCH_CANCEL: + type = INPUT_MULTI_TOUCH_TYPE_END; + break; + default: + warn_report("gtk: unexpected touch event type\n"); + } + + for (i = 0; i < INPUT_EVENT_SLOTS_MAX; ++i) { + if (i == num_slot) { + update = type; + } else { + update = INPUT_MULTI_TOUCH_TYPE_UPDATE; + } + + slot = &touch_slots[i]; + + if (slot->tracking_id == -1) { + continue; + } + + if (update == INPUT_MULTI_TOUCH_TYPE_END) { + slot->tracking_id = -1; + qemu_input_queue_mtt(vc->gfx.dcl.con, update, i, slot->tracking_id); + needs_sync = true; + } else { + qemu_input_queue_mtt(vc->gfx.dcl.con, update, i, slot->tracking_id); + qemu_input_queue_btn(vc->gfx.dcl.con, INPUT_BUTTON_TOUCH, true); + qemu_input_queue_mtt_abs(vc->gfx.dcl.con, + INPUT_AXIS_X, (int) slot->x, + 0, surface_width(vc->gfx.ds), + i, slot->tracking_id); + qemu_input_queue_mtt_abs(vc->gfx.dcl.con, + INPUT_AXIS_Y, (int) slot->y, + 0, surface_height(vc->gfx.ds), + i, slot->tracking_id); + needs_sync = true; + } + } + + if (needs_sync) { + qemu_input_event_sync(); + } + + return TRUE; +} + static const guint16 *gd_get_keymap(size_t *maplen) { GdkDisplay *dpy = gdk_display_get_default(); @@ -1783,7 +1865,9 @@ static void gd_vc_chr_accept_input(Chardev *chr) VCChardev *vcd = VC_CHARDEV(chr); VirtualConsole *vc = vcd->console; - gd_vc_send_chars(vc); + if (vc) { + gd_vc_send_chars(vc); + } } static void gd_vc_chr_set_echo(Chardev *chr, bool echo) @@ -1977,6 +2061,8 @@ static void gd_connect_vc_gfx_signals(VirtualConsole *vc) G_CALLBACK(gd_key_event), vc); g_signal_connect(vc->gfx.drawing_area, "key-release-event", G_CALLBACK(gd_key_event), vc); + g_signal_connect(vc->gfx.drawing_area, "touch-event", + G_CALLBACK(gd_touch_event), vc); g_signal_connect(vc->gfx.drawing_area, "enter-notify-event", G_CALLBACK(gd_enter_event), vc); @@ -2086,6 +2172,7 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, GSList *group, GtkWidget *view_menu) { bool zoom_to_fit = false; + int i; vc->label = qemu_console_get_label(con); vc->s = s; @@ -2133,6 +2220,7 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, GDK_BUTTON_PRESS_MASK | GDK_BUTTON_RELEASE_MASK | GDK_BUTTON_MOTION_MASK | + GDK_TOUCH_MASK | GDK_ENTER_NOTIFY_MASK | GDK_LEAVE_NOTIFY_MASK | GDK_SCROLL_MASK | @@ -2168,6 +2256,11 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, s->free_scale = true; } + for (i = 0; i < INPUT_EVENT_SLOTS_MAX; i++) { + struct touch_slot *slot = &touch_slots[i]; + slot->tracking_id = -1; + } + return group; } @@ -2442,6 +2535,12 @@ static void early_gtk_display_init(DisplayOptions *opts) gtk_use_gl_area = true; gtk_gl_area_init(); } else +#endif +#if defined(GDK_WINDOWING_WIN32) + if (GDK_IS_WIN32_DISPLAY(gdk_display_get_default())) { + gtk_use_gl_area = true; + gtk_gl_area_init(); + } else #endif { #ifdef CONFIG_X11 diff --git a/ui/input.c b/ui/input.c index f2d1e7a3a7..1aad64b07c 100644 --- a/ui/input.c +++ b/ui/input.c @@ -212,6 +212,7 @@ static void qemu_input_event_trace(QemuConsole *src, InputEvent *evt) InputKeyEvent *key; InputBtnEvent *btn; InputMoveEvent *move; + InputMultiTouchEvent *mtt; if (src) { idx = qemu_console_get_index(src); @@ -250,6 +251,11 @@ static void qemu_input_event_trace(QemuConsole *src, InputEvent *evt) name = InputAxis_str(move->axis); trace_input_event_abs(idx, name, move->value); break; + case INPUT_EVENT_KIND_MTT: + mtt = evt->u.mtt.data; + name = InputAxis_str(mtt->axis); + trace_input_event_mtt(idx, name, mtt->value); + break; case INPUT_EVENT_KIND__MAX: /* keep gcc happy */ break; @@ -541,6 +547,42 @@ void qemu_input_queue_abs(QemuConsole *src, InputAxis axis, int value, qemu_input_event_send(src, &evt); } +void qemu_input_queue_mtt(QemuConsole *src, InputMultiTouchType type, + int slot, int tracking_id) +{ + InputMultiTouchEvent mtt = { + .type = type, + .slot = slot, + .tracking_id = tracking_id, + }; + InputEvent evt = { + .type = INPUT_EVENT_KIND_MTT, + .u.mtt.data = &mtt, + }; + + qemu_input_event_send(src, &evt); +} + +void qemu_input_queue_mtt_abs(QemuConsole *src, InputAxis axis, int value, + int min_in, int max_in, int slot, int tracking_id) +{ + InputMultiTouchEvent mtt = { + .type = INPUT_MULTI_TOUCH_TYPE_DATA, + .slot = slot, + .tracking_id = tracking_id, + .axis = axis, + .value = qemu_input_scale_axis(value, min_in, max_in, + INPUT_EVENT_ABS_MIN, + INPUT_EVENT_ABS_MAX), + }; + InputEvent evt = { + .type = INPUT_EVENT_KIND_MTT, + .u.mtt.data = &mtt, + }; + + qemu_input_event_send(src, &evt); +} + void qemu_input_check_mode_change(void) { static int current_is_absolute; diff --git a/ui/meson.build b/ui/meson.build index 612ea2325b..e09b616a66 100644 --- a/ui/meson.build +++ b/ui/meson.build @@ -83,7 +83,9 @@ if dbus_display '--interface-prefix', 'org.qemu.', '--c-namespace', 'QemuDBus', '--generate-c-code', '@BASENAME@']) - dbus_ss.add(when: [gio, pixman, opengl, gbm], + dbus_display1_lib = static_library('dbus-display1', dbus_display1, dependencies: gio) + dbus_display1_dep = declare_dependency(link_with: dbus_display1_lib, include_directories: include_directories('.')) + dbus_ss.add(when: [gio, pixman, dbus_display1_dep], if_true: [files( 'dbus-chardev.c', 'dbus-clipboard.c', @@ -91,7 +93,7 @@ if dbus_display 'dbus-error.c', 'dbus-listener.c', 'dbus.c', - ), dbus_display1]) + ), opengl, gbm]) ui_modules += {'dbus' : dbus_ss} endif @@ -160,15 +162,15 @@ keymaps = [ ] if have_system or xkbcommon.found() + keycodemapdb_proj = subproject('keycodemapdb', required: true) foreach e : keymaps output = 'input-keymap-@0@-to-@1@.c.inc'.format(e[0], e[1]) genh += custom_target(output, output: output, capture: true, - input: files('keycodemapdb/data/keymaps.csv'), - command: [python, files('keycodemapdb/tools/keymap-gen'), - 'code-map', - '--lang', 'glib2', + input: keycodemapdb_proj.get_variable('keymaps_csv'), + command: [python, keycodemapdb_proj.get_variable('keymap_gen').full_path(), + 'code-map', '--lang', 'glib2', '--varname', 'qemu_input_map_@0@_to_@1@'.format(e[0], e[1]), '@INPUT0@', e[0], e[1]]) endforeach diff --git a/ui/sdl2-gl.c b/ui/sdl2-gl.c index 39cab8cde7..bbfa70eac3 100644 --- a/ui/sdl2-gl.c +++ b/ui/sdl2-gl.c @@ -67,6 +67,10 @@ void sdl2_gl_update(DisplayChangeListener *dcl, assert(scon->opengl); + if (!scon->real_window) { + return; + } + SDL_GL_MakeCurrent(scon->real_window, scon->winctx); surface_gl_update_texture(scon->gls, scon->surface, x, y, w, h); scon->updates++; diff --git a/ui/sdl2.c b/ui/sdl2.c index 8cb77416af..9d703200bf 100644 --- a/ui/sdl2.c +++ b/ui/sdl2.c @@ -58,6 +58,11 @@ static Notifier mouse_mode_notifier; #define SDL2_MAX_IDLE_COUNT (2 * GUI_REFRESH_INTERVAL_DEFAULT \ / SDL2_REFRESH_INTERVAL_BUSY + 1) +/* introduced in SDL 2.0.10 */ +#ifndef SDL_HINT_RENDER_BATCHING +#define SDL_HINT_RENDER_BATCHING "SDL_RENDER_BATCHING" +#endif + static void sdl_update_caption(struct sdl2_console *scon); static struct sdl2_console *get_scon_from_window(uint32_t window_id) @@ -99,9 +104,20 @@ void sdl2_window_create(struct sdl2_console *scon) surface_width(scon->surface), surface_height(scon->surface), flags); - scon->real_renderer = SDL_CreateRenderer(scon->real_window, -1, 0); if (scon->opengl) { - scon->winctx = SDL_GL_GetCurrentContext(); + const char *driver = "opengl"; + + if (scon->opts->gl == DISPLAYGL_MODE_ES) { + driver = "opengles2"; + } + + SDL_SetHint(SDL_HINT_RENDER_DRIVER, driver); + SDL_SetHint(SDL_HINT_RENDER_BATCHING, "1"); + } + scon->real_renderer = SDL_CreateRenderer(scon->real_window, -1, 0); + + if (scon->opengl) { + scon->winctx = SDL_GL_CreateContext(scon->real_window); } sdl_update_caption(scon); } @@ -112,6 +128,8 @@ void sdl2_window_destroy(struct sdl2_console *scon) return; } + SDL_GL_DeleteContext(scon->winctx); + scon->winctx = NULL; SDL_DestroyRenderer(scon->real_renderer); scon->real_renderer = NULL; SDL_DestroyWindow(scon->real_window); @@ -825,21 +843,9 @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) assert(o->type == DISPLAY_TYPE_SDL); -#ifdef __linux__ - /* on Linux, SDL may use fbcon|directfb|svgalib when run without - * accessible $DISPLAY to open X11 window. This is often the case - * when qemu is run using sudo. But in this case, and when actually - * run in X11 environment, SDL fights with X11 for the video card, - * making current display unavailable, often until reboot. - * So make x11 the default SDL video driver if this variable is unset. - * This is a bit hackish but saves us from bigger problem. - * Maybe it's a good idea to fix this in SDL instead. - */ - if (!g_setenv("SDL_VIDEODRIVER", "x11", 0)) { - fprintf(stderr, "Could not set SDL_VIDEODRIVER environment variable\n"); - exit(1); + if (SDL_GetHintBoolean("QEMU_ENABLE_SDL_LOGGING", SDL_FALSE)) { + SDL_LogSetAllPriority(SDL_LOG_PRIORITY_VERBOSE); } -#endif if (SDL_Init(SDL_INIT_VIDEO)) { fprintf(stderr, "Could not initialize SDL(%s) - exiting\n", @@ -849,7 +855,14 @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR /* only available since SDL 2.0.8 */ SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0"); #endif +#ifndef CONFIG_WIN32 + /* QEMU uses its own low level keyboard hook procecure on Windows */ SDL_SetHint(SDL_HINT_GRAB_KEYBOARD, "1"); +#endif +#ifdef SDL_HINT_ALLOW_ALT_TAB_WHILE_GRABBED + SDL_SetHint(SDL_HINT_ALLOW_ALT_TAB_WHILE_GRABBED, "0"); +#endif + SDL_SetHint(SDL_HINT_WINDOWS_NO_CLOSE_ON_ALT_F4, "1"); memset(&info, 0, sizeof(info)); SDL_VERSION(&info.version); diff --git a/ui/shader/texture-blit-flip.vert b/ui/shader/texture-blit-flip.vert index ba081fa5a6..f7a448d229 100644 --- a/ui/shader/texture-blit-flip.vert +++ b/ui/shader/texture-blit-flip.vert @@ -1,4 +1,3 @@ - #version 300 es in vec2 in_position; diff --git a/ui/shader/texture-blit.frag b/ui/shader/texture-blit.frag index bfa202c22b..8ed95a46b6 100644 --- a/ui/shader/texture-blit.frag +++ b/ui/shader/texture-blit.frag @@ -1,4 +1,3 @@ - #version 300 es uniform sampler2D image; diff --git a/ui/shader/texture-blit.vert b/ui/shader/texture-blit.vert index 6fe2744d68..fb48d70665 100644 --- a/ui/shader/texture-blit.vert +++ b/ui/shader/texture-blit.vert @@ -1,4 +1,3 @@ - #version 300 es in vec2 in_position; diff --git a/ui/spice-core.c b/ui/spice-core.c index 76f7c2bc3d..52a59386d7 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -90,13 +90,23 @@ struct SpiceWatch { static void watch_read(void *opaque) { SpiceWatch *watch = opaque; - watch->func(watch->fd, SPICE_WATCH_EVENT_READ, watch->opaque); + int fd = watch->fd; + +#ifdef WIN32 + fd = _get_osfhandle(fd); +#endif + watch->func(fd, SPICE_WATCH_EVENT_READ, watch->opaque); } static void watch_write(void *opaque) { SpiceWatch *watch = opaque; - watch->func(watch->fd, SPICE_WATCH_EVENT_WRITE, watch->opaque); + int fd = watch->fd; + +#ifdef WIN32 + fd = _get_osfhandle(fd); +#endif + watch->func(fd, SPICE_WATCH_EVENT_WRITE, watch->opaque); } static void watch_update_mask(SpiceWatch *watch, int event_mask) @@ -117,6 +127,14 @@ static SpiceWatch *watch_add(int fd, int event_mask, SpiceWatchFunc func, void * { SpiceWatch *watch; +#ifdef WIN32 + fd = _open_osfhandle(fd, _O_BINARY); + if (fd < 0) { + error_setg_win32(&error_warn, WSAGetLastError(), "Couldn't associate a FD with the SOCKET"); + return NULL; + } +#endif + watch = g_malloc0(sizeof(*watch)); watch->fd = fd; watch->func = func; @@ -129,6 +147,10 @@ static SpiceWatch *watch_add(int fd, int event_mask, SpiceWatchFunc func, void * static void watch_remove(SpiceWatch *watch) { qemu_set_fd_handler(watch->fd, NULL, NULL, NULL); +#ifdef WIN32 + /* SOCKET is owned by spice */ + qemu_close_socket_osfhandle(watch->fd); +#endif g_free(watch); } @@ -820,12 +842,7 @@ static void qemu_spice_init(void) "incompatible with -spice port/tls-port"); exit(1); } - if (egl_rendernode_init(qemu_opt_get(opts, "rendernode"), - DISPLAYGL_MODE_ON) != 0) { - error_report("Failed to initialize EGL render node for SPICE GL"); - exit(1); - } - display_opengl = 1; + egl_init(qemu_opt_get(opts, "rendernode"), DISPLAYGL_MODE_ON, &error_fatal); spice_opengl = 1; } #endif @@ -913,6 +930,9 @@ static int qemu_spice_set_pw_expire(time_t expires) static int qemu_spice_display_add_client(int csock, int skipauth, int tls) { +#ifdef WIN32 + csock = qemu_close_socket_osfhandle(csock); +#endif if (tls) { return spice_server_add_ssl_client(spice_server, csock, skipauth); } else { diff --git a/ui/spice-display.c b/ui/spice-display.c index 16802f99cb..5bee19a7f9 100644 --- a/ui/spice-display.c +++ b/ui/spice-display.c @@ -460,11 +460,11 @@ void qemu_spice_cursor_refresh_bh(void *opaque) if (ssd->cursor) { QEMUCursor *c = ssd->cursor; assert(ssd->dcl.con); - cursor_get(c); + cursor_ref(c); qemu_mutex_unlock(&ssd->lock); dpy_cursor_define(ssd->dcl.con, c); qemu_mutex_lock(&ssd->lock); - cursor_put(c); + cursor_unref(c); } if (ssd->mouse_x != -1 && ssd->mouse_y != -1) { @@ -765,8 +765,8 @@ static void display_mouse_define(DisplayChangeListener *dcl, SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); qemu_mutex_lock(&ssd->lock); - cursor_get(c); - cursor_put(ssd->cursor); + cursor_ref(c); + cursor_unref(ssd->cursor); ssd->cursor = c; ssd->hot_x = c->hot_x; ssd->hot_y = c->hot_y; diff --git a/ui/trace-events b/ui/trace-events index 977577fbba..6747361745 100644 --- a/ui/trace-events +++ b/ui/trace-events @@ -90,6 +90,7 @@ input_event_key_qcode(int conidx, const char *qcode, bool down) "con %d, key qco input_event_btn(int conidx, const char *btn, bool down) "con %d, button %s, down %d" input_event_rel(int conidx, const char *axis, int value) "con %d, axis %s, value %d" input_event_abs(int conidx, const char *axis, int value) "con %d, axis %s, value 0x%x" +input_event_mtt(int conidx, const char *axis, int value) "con %d, axis %s, value 0x%x" input_event_sync(void) "" input_mouse_mode(int absolute) "absolute %d" diff --git a/ui/ui-hmp-cmds.c b/ui/ui-hmp-cmds.c index 5c456ecc02..c671389473 100644 --- a/ui/ui-hmp-cmds.c +++ b/ui/ui-hmp-cmds.c @@ -458,3 +458,20 @@ hmp_screendump(Monitor *mon, const QDict *qdict) end: hmp_handle_error(mon, err); } + +void hmp_client_migrate_info(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + const char *protocol = qdict_get_str(qdict, "protocol"); + const char *hostname = qdict_get_str(qdict, "hostname"); + bool has_port = qdict_haskey(qdict, "port"); + int port = qdict_get_try_int(qdict, "port", -1); + bool has_tls_port = qdict_haskey(qdict, "tls-port"); + int tls_port = qdict_get_try_int(qdict, "tls-port", -1); + const char *cert_subject = qdict_get_try_str(qdict, "cert-subject"); + + qmp_client_migrate_info(protocol, hostname, + has_port, port, has_tls_port, tls_port, + cert_subject, &err); + hmp_handle_error(mon, err); +} diff --git a/ui/ui-qmp-cmds.c b/ui/ui-qmp-cmds.c index dbc4afcd73..a37a7024f3 100644 --- a/ui/ui-qmp-cmds.c +++ b/ui/ui-qmp-cmds.c @@ -175,3 +175,32 @@ void qmp_display_update(DisplayUpdateOptions *arg, Error **errp) abort(); } } + +void qmp_client_migrate_info(const char *protocol, const char *hostname, + bool has_port, int64_t port, + bool has_tls_port, int64_t tls_port, + const char *cert_subject, + Error **errp) +{ + if (strcmp(protocol, "spice") == 0) { + if (!qemu_using_spice(errp)) { + return; + } + + if (!has_port && !has_tls_port) { + error_setg(errp, QERR_MISSING_PARAMETER, "port/tls-port"); + return; + } + + if (qemu_spice.migrate_info(hostname, + has_port ? port : -1, + has_tls_port ? tls_port : -1, + cert_subject)) { + error_setg(errp, "Could not set up display for migration"); + return; + } + return; + } + + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "protocol", "'spice'"); +} diff --git a/ui/vnc.c b/ui/vnc.c index d9eacad759..9d8a24dd8a 100644 --- a/ui/vnc.c +++ b/ui/vnc.c @@ -988,10 +988,10 @@ static void vnc_mouse_set(DisplayChangeListener *dcl, static int vnc_cursor_define(VncState *vs) { - QEMUCursor *c = vs->vd->cursor; + QEMUCursor *c = qemu_console_get_cursor(vs->vd->dcl.con); int isize; - if (!vs->vd->cursor) { + if (!c) { return -1; } @@ -1029,11 +1029,7 @@ static void vnc_dpy_cursor_define(DisplayChangeListener *dcl, VncDisplay *vd = container_of(dcl, VncDisplay, dcl); VncState *vs; - cursor_put(vd->cursor); g_free(vd->cursor_mask); - - vd->cursor = c; - cursor_get(vd->cursor); vd->cursor_msize = cursor_get_mono_bpl(c) * c->height; vd->cursor_mask = g_malloc0(vd->cursor_msize); cursor_get_mono_mask(c, 0, vd->cursor_mask); @@ -3755,7 +3751,7 @@ static int vnc_display_get_address(const char *addrstr, addr->type = SOCKET_ADDRESS_TYPE_INET; inet = &addr->u.inet; - if (addrstr[0] == '[' && addrstr[hostlen - 1] == ']') { + if (hostlen && addrstr[0] == '[' && addrstr[hostlen - 1] == ']') { inet->host = g_strndup(addrstr + 1, hostlen - 2); } else { inet->host = g_strndup(addrstr, hostlen); diff --git a/ui/vnc.h b/ui/vnc.h index a60fb13115..757fa83044 100644 --- a/ui/vnc.h +++ b/ui/vnc.h @@ -159,7 +159,6 @@ struct VncDisplay QKbdState *kbd; QemuMutex mutex; - QEMUCursor *cursor; int cursor_msize; uint8_t *cursor_mask; diff --git a/util/aio-posix.c b/util/aio-posix.c index 6cc6256d53..7f2c99729d 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -99,7 +99,6 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node) void aio_set_fd_handler(AioContext *ctx, int fd, - bool is_external, IOHandler *io_read, IOHandler *io_write, AioPollFn *io_poll, @@ -144,7 +143,6 @@ void aio_set_fd_handler(AioContext *ctx, new_node->io_poll = io_poll; new_node->io_poll_ready = io_poll_ready; new_node->opaque = opaque; - new_node->is_external = is_external; if (is_new) { new_node->pfd.fd = fd; @@ -180,9 +178,9 @@ void aio_set_fd_handler(AioContext *ctx, } } -void aio_set_fd_poll(AioContext *ctx, int fd, - IOHandler *io_poll_begin, - IOHandler *io_poll_end) +static void aio_set_fd_poll(AioContext *ctx, int fd, + IOHandler *io_poll_begin, + IOHandler *io_poll_end) { AioHandler *node = find_aio_handler(ctx, fd); @@ -196,12 +194,11 @@ void aio_set_fd_poll(AioContext *ctx, int fd, void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, - bool is_external, EventNotifierHandler *io_read, AioPollFn *io_poll, EventNotifierHandler *io_poll_ready) { - aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external, + aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), (IOHandler *)io_read, NULL, io_poll, (IOHandler *)io_poll_ready, notifier); } @@ -285,13 +282,11 @@ bool aio_pending(AioContext *ctx) /* TODO should this check poll ready? */ revents = node->pfd.revents & node->pfd.events; - if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read && - aio_node_check(ctx, node->is_external)) { + if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) { result = true; break; } - if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write && - aio_node_check(ctx, node->is_external)) { + if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) { result = true; break; } @@ -350,11 +345,20 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll); } if (!QLIST_IS_INSERTED(node, node_deleted) && - poll_ready && revents == 0 && - aio_node_check(ctx, node->is_external) && - node->io_poll_ready) { + poll_ready && revents == 0 && node->io_poll_ready) { + /* + * Remove temporarily to avoid infinite loops when ->io_poll_ready() + * calls aio_poll() before clearing the condition that made the poll + * handler become ready. + */ + QLIST_SAFE_REMOVE(node, node_poll); + node->io_poll_ready(node->opaque); + if (!QLIST_IS_INSERTED(node, node_poll)) { + QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll); + } + /* * Return early since revents was zero. aio_notify() does not count as * progress. @@ -364,7 +368,6 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) if (!QLIST_IS_INSERTED(node, node_deleted) && (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && - aio_node_check(ctx, node->is_external) && node->io_read) { node->io_read(node->opaque); @@ -375,7 +378,6 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) } if (!QLIST_IS_INSERTED(node, node_deleted) && (revents & (G_IO_OUT | G_IO_ERR)) && - aio_node_check(ctx, node->is_external) && node->io_write) { node->io_write(node->opaque); progress = true; @@ -436,8 +438,7 @@ static bool run_poll_handlers_once(AioContext *ctx, AioHandler *tmp; QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) { - if (aio_node_check(ctx, node->is_external) && - node->io_poll(node->opaque)) { + if (node->io_poll(node->opaque)) { aio_add_poll_ready_handler(ready_list, node); node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS; diff --git a/util/aio-posix.h b/util/aio-posix.h index 80b927c7f4..4264c518be 100644 --- a/util/aio-posix.h +++ b/util/aio-posix.h @@ -38,7 +38,6 @@ struct AioHandler { #endif int64_t poll_idle_timeout; /* when to stop userspace polling */ bool poll_ready; /* has polling detected an event? */ - bool is_external; }; /* Add a handler to a ready list */ diff --git a/util/aio-wait.c b/util/aio-wait.c index 98c5accd29..b5336cf5fd 100644 --- a/util/aio-wait.c +++ b/util/aio-wait.c @@ -82,5 +82,5 @@ void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque) assert(qemu_get_current_aio_context() == qemu_get_aio_context()); aio_bh_schedule_oneshot(ctx, aio_wait_bh, &data); - AIO_WAIT_WHILE(ctx, !data.done); + AIO_WAIT_WHILE_UNLOCKED(NULL, !data.done); } diff --git a/util/aio-win32.c b/util/aio-win32.c index 80cfe012ad..948ef47a4d 100644 --- a/util/aio-win32.c +++ b/util/aio-win32.c @@ -22,6 +22,7 @@ #include "qemu/sockets.h" #include "qapi/error.h" #include "qemu/rcu_queue.h" +#include "qemu/error-report.h" struct AioHandler { EventNotifier *e; @@ -31,7 +32,6 @@ struct AioHandler { GPollFD pfd; int deleted; void *opaque; - bool is_external; QLIST_ENTRY(AioHandler) node; }; @@ -63,20 +63,26 @@ static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node) void aio_set_fd_handler(AioContext *ctx, int fd, - bool is_external, IOHandler *io_read, IOHandler *io_write, AioPollFn *io_poll, IOHandler *io_poll_ready, void *opaque) { - /* fd is a SOCKET in our case */ AioHandler *old_node; AioHandler *node = NULL; + SOCKET s; + + if (!fd_is_socket(fd)) { + error_report("fd=%d is not a socket, AIO implementation is missing", fd); + return; + } + + s = _get_osfhandle(fd); qemu_lockcnt_lock(&ctx->list_lock); QLIST_FOREACH(old_node, &ctx->aio_handlers, node) { - if (old_node->pfd.fd == fd && !old_node->deleted) { + if (old_node->pfd.fd == s && !old_node->deleted) { break; } } @@ -87,7 +93,7 @@ void aio_set_fd_handler(AioContext *ctx, /* Alloc and insert if it's not already there */ node = g_new0(AioHandler, 1); - node->pfd.fd = fd; + node->pfd.fd = s; node->pfd.events = 0; if (node->io_read) { @@ -103,7 +109,6 @@ void aio_set_fd_handler(AioContext *ctx, node->opaque = opaque; node->io_read = io_read; node->io_write = io_write; - node->is_external = is_external; if (io_read) { bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE; @@ -115,7 +120,7 @@ void aio_set_fd_handler(AioContext *ctx, QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); event = event_notifier_get_handle(&ctx->notifier); - WSAEventSelect(node->pfd.fd, event, bitmask); + qemu_socket_select(fd, event, bitmask, NULL); } if (old_node) { aio_remove_fd_handler(ctx, old_node); @@ -125,16 +130,8 @@ void aio_set_fd_handler(AioContext *ctx, aio_notify(ctx); } -void aio_set_fd_poll(AioContext *ctx, int fd, - IOHandler *io_poll_begin, - IOHandler *io_poll_end) -{ - /* Not implemented */ -} - void aio_set_event_notifier(AioContext *ctx, EventNotifier *e, - bool is_external, EventNotifierHandler *io_notify, AioPollFn *io_poll, EventNotifierHandler *io_poll_ready) @@ -160,7 +157,6 @@ void aio_set_event_notifier(AioContext *ctx, node->e = e; node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); node->pfd.events = G_IO_IN; - node->is_external = is_external; QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); g_source_add_poll(&ctx->source, &node->pfd); @@ -367,8 +363,7 @@ bool aio_poll(AioContext *ctx, bool blocking) /* fill fd sets */ count = 0; QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { - if (!node->deleted && node->io_notify - && aio_node_check(ctx, node->is_external)) { + if (!node->deleted && node->io_notify) { assert(count < MAXIMUM_WAIT_OBJECTS); events[count++] = event_notifier_get_handle(node->e); } diff --git a/util/async-teardown.c b/util/async-teardown.c index 62cdeb0f20..3ab19c8740 100644 --- a/util/async-teardown.c +++ b/util/async-teardown.c @@ -12,6 +12,9 @@ */ #include "qemu/osdep.h" +#include "qemu/config-file.h" +#include "qemu/option.h" +#include "qemu/module.h" #include #include #include @@ -144,3 +147,21 @@ void init_async_teardown(void) clone(async_teardown_fn, new_stack_for_clone(), CLONE_VM, NULL); sigprocmask(SIG_SETMASK, &old_signals, NULL); } + +static QemuOptsList qemu_run_with_opts = { + .name = "run-with", + .head = QTAILQ_HEAD_INITIALIZER(qemu_run_with_opts.head), + .desc = { + { + .name = "async-teardown", + .type = QEMU_OPT_BOOL, + }, + { /* end of list */ } + }, +}; + +static void register_teardown(void) +{ + qemu_add_opts(&qemu_run_with_opts); +} +opts_init(register_teardown); diff --git a/util/async.c b/util/async.c index 0657b75397..8f90ddc304 100644 --- a/util/async.c +++ b/util/async.c @@ -65,6 +65,7 @@ struct QEMUBH { void *opaque; QSLIST_ENTRY(QEMUBH) next; unsigned flags; + MemReentrancyGuard *reentrancy_guard; }; /* Called concurrently from any thread */ @@ -74,14 +75,21 @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) unsigned old_flags; /* - * The memory barrier implicit in qatomic_fetch_or makes sure that: - * 1. idle & any writes needed by the callback are done before the - * locations are read in the aio_bh_poll. - * 2. ctx is loaded before the callback has a chance to execute and bh - * could be freed. + * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that + * insertion starts after BH_PENDING is set. */ old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); + if (!(old_flags & BH_PENDING)) { + /* + * At this point the bottom half becomes visible to aio_bh_poll(). + * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in + * aio_bh_poll(), ensuring that: + * 1. any writes needed by the callback are visible from the callback + * after aio_bh_dequeue() returns bh. + * 2. ctx is loaded before the callback has a chance to execute and bh + * could be freed. + */ QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); } @@ -107,11 +115,8 @@ static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) QSLIST_REMOVE_HEAD(head, next); /* - * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory - * barrier ensures that the callback sees all writes done by the scheduling - * thread. It also ensures that the scheduling thread sees the cleared - * flag before bh->cb has run, and thus will call aio_notify again if - * necessary. + * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that + * the removal finishes before BH_PENDING is reset. */ *flags = qatomic_fetch_and(&bh->flags, ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); @@ -133,7 +138,7 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, } QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, - const char *name) + const char *name, MemReentrancyGuard *reentrancy_guard) { QEMUBH *bh; bh = g_new(QEMUBH, 1); @@ -142,13 +147,30 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, .cb = cb, .opaque = opaque, .name = name, + .reentrancy_guard = reentrancy_guard, }; return bh; } void aio_bh_call(QEMUBH *bh) { + bool last_engaged_in_io = false; + + /* Make a copy of the guard-pointer as cb may free the bh */ + MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard; + if (reentrancy_guard) { + last_engaged_in_io = reentrancy_guard->engaged_in_io; + if (reentrancy_guard->engaged_in_io) { + trace_reentrant_aio(bh->ctx, bh->name); + } + reentrancy_guard->engaged_in_io = true; + } + bh->cb(bh->opaque); + + if (reentrancy_guard) { + reentrancy_guard->engaged_in_io = last_engaged_in_io; + } } /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ @@ -158,8 +180,23 @@ int aio_bh_poll(AioContext *ctx) BHListSlice *s; int ret = 0; + /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */ QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); + + /* + * GCC13 [-Werror=dangling-pointer=] complains that the local variable + * 'slice' is being stored in the global 'ctx->bh_slice_list' but the + * list is emptied before this function returns. + */ +#if !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wdangling-pointer=" +#endif QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); +#if !defined(__clang__) +#pragma GCC diagnostic pop +#endif while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { QEMUBH *bh; @@ -372,7 +409,7 @@ aio_ctx_finalize(GSource *source) g_free(bh); } - aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL); + aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL, NULL); event_notifier_cleanup(&ctx->notifier); qemu_rec_mutex_destroy(&ctx->lock); qemu_lockcnt_destroy(&ctx->list_lock); @@ -448,15 +485,15 @@ LuringState *aio_get_linux_io_uring(AioContext *ctx) void aio_notify(AioContext *ctx) { /* - * Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in - * aio_notify_accept. + * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with + * smp_mb() in aio_notify_accept(). */ smp_wmb(); qatomic_set(&ctx->notified, true); /* - * Write ctx->notified before reading ctx->notify_me. Pairs - * with smp_mb in aio_ctx_prepare or aio_poll. + * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me. + * Pairs with smp_mb() in aio_ctx_prepare or aio_poll. */ smp_mb(); if (qatomic_read(&ctx->notify_me)) { @@ -469,8 +506,9 @@ void aio_notify_accept(AioContext *ctx) qatomic_set(&ctx->notified, false); /* - * Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb - * in aio_notify. + * Order reads of ctx->notified (in aio_context_notifier_poll()) and the + * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs + * with smp_wmb() in aio_notify. */ smp_mb(); } @@ -493,6 +531,11 @@ static bool aio_context_notifier_poll(void *opaque) EventNotifier *e = opaque; AioContext *ctx = container_of(e, AioContext, notifier); + /* + * No need for load-acquire because we just want to kick the + * event loop. aio_notify_accept() takes care of synchronizing + * the event loop with the producers. + */ return qatomic_read(&ctx->notified); } @@ -550,7 +593,6 @@ AioContext *aio_context_new(Error **errp) QSLIST_INIT(&ctx->scheduled_coroutines); aio_set_event_notifier(ctx, &ctx->notifier, - false, aio_context_notifier_cb, aio_context_notifier_poll, aio_context_notifier_poll_ready); diff --git a/util/bitops.c b/util/bitops.c index 3fe6b1c4f1..4b647b3eff 100644 --- a/util/bitops.c +++ b/util/bitops.c @@ -71,8 +71,8 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, found_first: tmp &= (~0UL >> (BITS_PER_LONG - size)); - if (tmp == 0UL) { /* Are any bits set? */ - return result + size; /* Nope. */ + if (tmp == 0UL) { /* Are any bits set? */ + return result + size; /* Nope. */ } found_middle: return result + ctzl(tmp); @@ -120,8 +120,8 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, found_first: tmp |= ~0UL << size; - if (tmp == ~0UL) { /* Are any bits zero? */ - return result + size; /* Nope. */ + if (tmp == ~0UL) { /* Are any bits zero? */ + return result + size; /* Nope. */ } found_middle: return result + ctzl(~tmp); diff --git a/util/bufferiszero.c b/util/bufferiszero.c index 1790ded7d4..3e6a5dfd63 100644 --- a/util/bufferiszero.c +++ b/util/bufferiszero.c @@ -24,6 +24,7 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "qemu/bswap.h" +#include "host/cpuinfo.h" static bool buffer_zero_int(const void *buf, size_t len) @@ -184,112 +185,75 @@ buffer_zero_avx512(const void *buf, size_t len) } #endif /* CONFIG_AVX512F_OPT */ - -/* Note that for test_buffer_is_zero_next_accel, the most preferred - * ISA must have the least significant bit. - */ -#define CACHE_AVX512F 1 -#define CACHE_AVX2 2 -#define CACHE_SSE4 4 -#define CACHE_SSE2 8 - -/* Make sure that these variables are appropriately initialized when +/* + * Make sure that these variables are appropriately initialized when * SSE2 is enabled on the compiler command-line, but the compiler is * too old to support CONFIG_AVX2_OPT. */ #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) -# define INIT_CACHE 0 -# define INIT_ACCEL buffer_zero_int +# define INIT_USED 0 +# define INIT_LENGTH 0 +# define INIT_ACCEL buffer_zero_int #else # ifndef __SSE2__ # error "ISA selection confusion" # endif -# define INIT_CACHE CACHE_SSE2 -# define INIT_ACCEL buffer_zero_sse2 +# define INIT_USED CPUINFO_SSE2 +# define INIT_LENGTH 64 +# define INIT_ACCEL buffer_zero_sse2 #endif -static unsigned cpuid_cache = INIT_CACHE; +static unsigned used_accel = INIT_USED; +static unsigned length_to_accel = INIT_LENGTH; static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL; -static int length_to_accel = 64; -static void init_accel(unsigned cache) +static unsigned __attribute__((noinline)) +select_accel_cpuinfo(unsigned info) { - bool (*fn)(const void *, size_t) = buffer_zero_int; - if (cache & CACHE_SSE2) { - fn = buffer_zero_sse2; - length_to_accel = 64; - } -#ifdef CONFIG_AVX2_OPT - if (cache & CACHE_SSE4) { - fn = buffer_zero_sse4; - length_to_accel = 64; - } - if (cache & CACHE_AVX2) { - fn = buffer_zero_avx2; - length_to_accel = 128; - } -#endif + /* Array is sorted in order of algorithm preference. */ + static const struct { + unsigned bit; + unsigned len; + bool (*fn)(const void *, size_t); + } all[] = { #ifdef CONFIG_AVX512F_OPT - if (cache & CACHE_AVX512F) { - fn = buffer_zero_avx512; - length_to_accel = 256; - } + { CPUINFO_AVX512F, 256, buffer_zero_avx512 }, #endif - buffer_accel = fn; +#ifdef CONFIG_AVX2_OPT + { CPUINFO_AVX2, 128, buffer_zero_avx2 }, + { CPUINFO_SSE4, 64, buffer_zero_sse4 }, +#endif + { CPUINFO_SSE2, 64, buffer_zero_sse2 }, + { CPUINFO_ALWAYS, 0, buffer_zero_int }, + }; + + for (unsigned i = 0; i < ARRAY_SIZE(all); ++i) { + if (info & all[i].bit) { + length_to_accel = all[i].len; + buffer_accel = all[i].fn; + return all[i].bit; + } + } + return 0; } #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) -#include "qemu/cpuid.h" - -static void __attribute__((constructor)) init_cpuid_cache(void) +static void __attribute__((constructor)) init_accel(void) { - unsigned max = __get_cpuid_max(0, NULL); - int a, b, c, d; - unsigned cache = 0; - - if (max >= 1) { - __cpuid(1, a, b, c, d); - if (d & bit_SSE2) { - cache |= CACHE_SSE2; - } - if (c & bit_SSE4_1) { - cache |= CACHE_SSE4; - } - - /* We must check that AVX is not just available, but usable. */ - if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) { - int bv; - __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); - __cpuid_count(7, 0, a, b, c, d); - if ((bv & 0x6) == 0x6 && (b & bit_AVX2)) { - cache |= CACHE_AVX2; - } - /* 0xe6: - * XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15 - * and ZMM16-ZMM31 state are enabled by OS) - * XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS) - */ - if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512F)) { - cache |= CACHE_AVX512F; - } - } - } - cpuid_cache = cache; - init_accel(cache); + used_accel = select_accel_cpuinfo(cpuinfo_init()); } #endif /* CONFIG_AVX2_OPT */ bool test_buffer_is_zero_next_accel(void) { - /* If no bits set, we just tested buffer_zero_int, and there - are no more acceleration options to test. */ - if (cpuid_cache == 0) { - return false; - } - /* Disable the accelerator we used before and select a new one. */ - cpuid_cache &= cpuid_cache - 1; - init_accel(cpuid_cache); - return true; + /* + * Accumulate the accelerators that we've already tested, and + * remove them from the set to test this round. We'll get back + * a zero from select_accel_cpuinfo when there are no more. + */ + unsigned used = select_accel_cpuinfo(cpuinfo & ~used_accel); + used_accel |= used; + return used; } static bool select_accel_fn(const void *buf, size_t len) diff --git a/util/coroutine-win32.c b/util/coroutine-windows.c similarity index 100% rename from util/coroutine-win32.c rename to util/coroutine-windows.c diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c new file mode 100644 index 0000000000..f99acb7884 --- /dev/null +++ b/util/cpuinfo-aarch64.c @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Host specific cpu indentification for AArch64. + */ + +#include "qemu/osdep.h" +#include "host/cpuinfo.h" + +#ifdef CONFIG_LINUX +# ifdef CONFIG_GETAUXVAL +# include +# else +# include +# include "elf.h" +# endif +#endif +#ifdef CONFIG_DARWIN +# include +#endif + +unsigned cpuinfo; + +#ifdef CONFIG_DARWIN +static bool sysctl_for_bool(const char *name) +{ + int val = 0; + size_t len = sizeof(val); + + if (sysctlbyname(name, &val, &len, NULL, 0) == 0) { + return val != 0; + } + + /* + * We might in the future ask for properties not present in older kernels, + * but we're only asking about static properties, all of which should be + * 'int'. So we shouln't see ENOMEM (val too small), or any of the other + * more exotic errors. + */ + assert(errno == ENOENT); + return false; +} +#endif + +/* Called both as constructor and (possibly) via other constructors. */ +unsigned __attribute__((constructor)) cpuinfo_init(void) +{ + unsigned info = cpuinfo; + + if (info) { + return info; + } + + info = CPUINFO_ALWAYS; + +#ifdef CONFIG_LINUX + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + info |= (hwcap & HWCAP_ATOMICS ? CPUINFO_LSE : 0); + info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0); +#endif +#ifdef CONFIG_DARWIN + info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE") * CPUINFO_LSE; + info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE2") * CPUINFO_LSE2; +#endif + + cpuinfo = info; + return info; +} diff --git a/util/cpuinfo-i386.c b/util/cpuinfo-i386.c new file mode 100644 index 0000000000..ab6143d9e7 --- /dev/null +++ b/util/cpuinfo-i386.c @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Host specific cpu indentification for x86. + */ + +#include "qemu/osdep.h" +#include "host/cpuinfo.h" +#ifdef CONFIG_CPUID_H +# include "qemu/cpuid.h" +#endif + +unsigned cpuinfo; + +/* Called both as constructor and (possibly) via other constructors. */ +unsigned __attribute__((constructor)) cpuinfo_init(void) +{ + unsigned info = cpuinfo; + + if (info) { + return info; + } + +#ifdef CONFIG_CPUID_H + unsigned max, a, b, c, d, b7 = 0, c7 = 0; + + max = __get_cpuid_max(0, 0); + + if (max >= 7) { + __cpuid_count(7, 0, a, b7, c7, d); + info |= (b7 & bit_BMI ? CPUINFO_BMI1 : 0); + info |= (b7 & bit_BMI2 ? CPUINFO_BMI2 : 0); + } + + if (max >= 1) { + __cpuid(1, a, b, c, d); + + info |= (d & bit_CMOV ? CPUINFO_CMOV : 0); + info |= (d & bit_SSE2 ? CPUINFO_SSE2 : 0); + info |= (c & bit_SSE4_1 ? CPUINFO_SSE4 : 0); + info |= (c & bit_MOVBE ? CPUINFO_MOVBE : 0); + info |= (c & bit_POPCNT ? CPUINFO_POPCNT : 0); + + /* For AVX features, we must check available and usable. */ + if ((c & bit_AVX) && (c & bit_OSXSAVE)) { + unsigned bv = xgetbv_low(0); + + if ((bv & 6) == 6) { + info |= CPUINFO_AVX1; + info |= (b7 & bit_AVX2 ? CPUINFO_AVX2 : 0); + + if ((bv & 0xe0) == 0xe0) { + info |= (b7 & bit_AVX512F ? CPUINFO_AVX512F : 0); + info |= (b7 & bit_AVX512VL ? CPUINFO_AVX512VL : 0); + info |= (b7 & bit_AVX512BW ? CPUINFO_AVX512BW : 0); + info |= (b7 & bit_AVX512DQ ? CPUINFO_AVX512DQ : 0); + info |= (c7 & bit_AVX512VBMI2 ? CPUINFO_AVX512VBMI2 : 0); + } + + /* + * The Intel SDM has added: + * Processors that enumerate support for Intel® AVX + * (by setting the feature flag CPUID.01H:ECX.AVX[bit 28]) + * guarantee that the 16-byte memory operations performed + * by the following instructions will always be carried + * out atomically: + * - MOVAPD, MOVAPS, and MOVDQA. + * - VMOVAPD, VMOVAPS, and VMOVDQA when encoded with VEX.128. + * - VMOVAPD, VMOVAPS, VMOVDQA32, and VMOVDQA64 when encoded + * with EVEX.128 and k0 (masking disabled). + * Note that these instructions require the linear addresses + * of their memory operands to be 16-byte aligned. + * + * AMD has provided an even stronger guarantee that processors + * with AVX provide 16-byte atomicity for all cachable, + * naturally aligned single loads and stores, e.g. MOVDQU. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688 + */ + __cpuid(0, a, b, c, d); + if (c == signature_INTEL_ecx) { + info |= CPUINFO_ATOMIC_VMOVDQA; + } else if (c == signature_AMD_ecx) { + info |= CPUINFO_ATOMIC_VMOVDQA | CPUINFO_ATOMIC_VMOVDQU; + } + } + } + } + + max = __get_cpuid_max(0x8000000, 0); + if (max >= 1) { + __cpuid(0x80000001, a, b, c, d); + info |= (c & bit_LZCNT ? CPUINFO_LZCNT : 0); + } +#endif + + info |= CPUINFO_ALWAYS; + cpuinfo = info; + return info; +} diff --git a/util/crc32c.c b/util/crc32c.c index 762657d853..ea7f345de8 100644 --- a/util/crc32c.c +++ b/util/crc32c.c @@ -113,3 +113,11 @@ uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length) return crc^0xffffffff; } +uint32_t iov_crc32c(uint32_t crc, const struct iovec *iov, size_t iov_cnt) +{ + while (iov_cnt--) { + crc = crc32c(crc, iov->iov_base, iov->iov_len) ^ 0xffffffff; + iov++; + } + return crc ^ 0xffffffff; +} diff --git a/util/envlist.c b/util/envlist.c index ab5553498a..db937c0427 100644 --- a/util/envlist.c +++ b/util/envlist.c @@ -3,13 +3,13 @@ #include "qemu/envlist.h" struct envlist_entry { - const char *ev_var; /* actual env value */ - QLIST_ENTRY(envlist_entry) ev_link; + const char *ev_var; /* actual env value */ + QLIST_ENTRY(envlist_entry) ev_link; }; struct envlist { - QLIST_HEAD(, envlist_entry) el_entries; /* actual entries */ - size_t el_count; /* number of entries */ + QLIST_HEAD(, envlist_entry) el_entries; /* actual entries */ + size_t el_count; /* number of entries */ }; static int envlist_parse(envlist_t *envlist, @@ -21,14 +21,14 @@ static int envlist_parse(envlist_t *envlist, envlist_t * envlist_create(void) { - envlist_t *envlist; + envlist_t *envlist; - envlist = g_malloc(sizeof(*envlist)); + envlist = g_malloc(sizeof(*envlist)); - QLIST_INIT(&envlist->el_entries); - envlist->el_count = 0; + QLIST_INIT(&envlist->el_entries); + envlist->el_count = 0; - return (envlist); + return (envlist); } /* @@ -37,18 +37,18 @@ envlist_create(void) void envlist_free(envlist_t *envlist) { - struct envlist_entry *entry; + struct envlist_entry *entry; - assert(envlist != NULL); + assert(envlist != NULL); - while (envlist->el_entries.lh_first != NULL) { - entry = envlist->el_entries.lh_first; - QLIST_REMOVE(entry, ev_link); + while (envlist->el_entries.lh_first != NULL) { + entry = envlist->el_entries.lh_first; + QLIST_REMOVE(entry, ev_link); - g_free((char *)entry->ev_var); - g_free(entry); - } - g_free(envlist); + g_free((char *)entry->ev_var); + g_free(entry); + } + g_free(envlist); } /* @@ -65,7 +65,7 @@ envlist_free(envlist_t *envlist) int envlist_parse_set(envlist_t *envlist, const char *env) { - return (envlist_parse(envlist, env, &envlist_setenv)); + return (envlist_parse(envlist, env, &envlist_setenv)); } /* @@ -77,7 +77,7 @@ envlist_parse_set(envlist_t *envlist, const char *env) int envlist_parse_unset(envlist_t *envlist, const char *env) { - return (envlist_parse(envlist, env, &envlist_unsetenv)); + return (envlist_parse(envlist, env, &envlist_unsetenv)); } /* @@ -90,15 +90,15 @@ static int envlist_parse(envlist_t *envlist, const char *env, int (*callback)(envlist_t *, const char *)) { - char *tmpenv, *envvar; - char *envsave = NULL; + char *tmpenv, *envvar; + char *envsave = NULL; int ret = 0; assert(callback != NULL); - if ((envlist == NULL) || (env == NULL)) - return (EINVAL); + if ((envlist == NULL) || (env == NULL)) + return (EINVAL); - tmpenv = g_strdup(env); + tmpenv = g_strdup(env); envsave = tmpenv; do { @@ -109,7 +109,7 @@ envlist_parse(envlist_t *envlist, const char *env, if ((*callback)(envlist, tmpenv) != 0) { ret = errno; break; - } + } tmpenv = envvar + 1; } while (envvar != NULL); @@ -126,42 +126,42 @@ envlist_parse(envlist_t *envlist, const char *env, int envlist_setenv(envlist_t *envlist, const char *env) { - struct envlist_entry *entry = NULL; - const char *eq_sign; - size_t envname_len; + struct envlist_entry *entry = NULL; + const char *eq_sign; + size_t envname_len; - if ((envlist == NULL) || (env == NULL)) - return (EINVAL); + if ((envlist == NULL) || (env == NULL)) + return (EINVAL); - /* find out first equals sign in given env */ - if ((eq_sign = strchr(env, '=')) == NULL) - return (EINVAL); - envname_len = eq_sign - env + 1; + /* find out first equals sign in given env */ + if ((eq_sign = strchr(env, '=')) == NULL) + return (EINVAL); + envname_len = eq_sign - env + 1; - /* - * If there already exists variable with given name - * we remove and release it before allocating a whole - * new entry. - */ - for (entry = envlist->el_entries.lh_first; entry != NULL; - entry = entry->ev_link.le_next) { - if (strncmp(entry->ev_var, env, envname_len) == 0) - break; - } + /* + * If there already exists variable with given name + * we remove and release it before allocating a whole + * new entry. + */ + for (entry = envlist->el_entries.lh_first; entry != NULL; + entry = entry->ev_link.le_next) { + if (strncmp(entry->ev_var, env, envname_len) == 0) + break; + } - if (entry != NULL) { - QLIST_REMOVE(entry, ev_link); - g_free((char *)entry->ev_var); - g_free(entry); - } else { - envlist->el_count++; - } + if (entry != NULL) { + QLIST_REMOVE(entry, ev_link); + g_free((char *)entry->ev_var); + g_free(entry); + } else { + envlist->el_count++; + } - entry = g_malloc(sizeof(*entry)); - entry->ev_var = g_strdup(env); - QLIST_INSERT_HEAD(&envlist->el_entries, entry, ev_link); + entry = g_malloc(sizeof(*entry)); + entry->ev_var = g_strdup(env); + QLIST_INSERT_HEAD(&envlist->el_entries, entry, ev_link); - return (0); + return (0); } /* @@ -171,34 +171,34 @@ envlist_setenv(envlist_t *envlist, const char *env) int envlist_unsetenv(envlist_t *envlist, const char *env) { - struct envlist_entry *entry; - size_t envname_len; + struct envlist_entry *entry; + size_t envname_len; - if ((envlist == NULL) || (env == NULL)) - return (EINVAL); + if ((envlist == NULL) || (env == NULL)) + return (EINVAL); - /* env is not allowed to contain '=' */ - if (strchr(env, '=') != NULL) - return (EINVAL); + /* env is not allowed to contain '=' */ + if (strchr(env, '=') != NULL) + return (EINVAL); - /* - * Find out the requested entry and remove - * it from the list. - */ - envname_len = strlen(env); - for (entry = envlist->el_entries.lh_first; entry != NULL; - entry = entry->ev_link.le_next) { - if (strncmp(entry->ev_var, env, envname_len) == 0) - break; - } - if (entry != NULL) { - QLIST_REMOVE(entry, ev_link); - g_free((char *)entry->ev_var); - g_free(entry); + /* + * Find out the requested entry and remove + * it from the list. + */ + envname_len = strlen(env); + for (entry = envlist->el_entries.lh_first; entry != NULL; + entry = entry->ev_link.le_next) { + if (strncmp(entry->ev_var, env, envname_len) == 0) + break; + } + if (entry != NULL) { + QLIST_REMOVE(entry, ev_link); + g_free((char *)entry->ev_var); + g_free(entry); - envlist->el_count--; - } - return (0); + envlist->el_count--; + } + return (0); } /* @@ -214,19 +214,19 @@ envlist_unsetenv(envlist_t *envlist, const char *env) char ** envlist_to_environ(const envlist_t *envlist, size_t *count) { - struct envlist_entry *entry; - char **env, **penv; + struct envlist_entry *entry; + char **env, **penv; - penv = env = g_new(char *, envlist->el_count + 1); + penv = env = g_new(char *, envlist->el_count + 1); - for (entry = envlist->el_entries.lh_first; entry != NULL; - entry = entry->ev_link.le_next) { - *(penv++) = g_strdup(entry->ev_var); - } - *penv = NULL; /* NULL terminate the list */ + for (entry = envlist->el_entries.lh_first; entry != NULL; + entry = entry->ev_link.le_next) { + *(penv++) = g_strdup(entry->ev_var); + } + *penv = NULL; /* NULL terminate the list */ - if (count != NULL) - *count = envlist->el_count; + if (count != NULL) + *count = envlist->el_count; - return (env); + return (env); } diff --git a/util/error.c b/util/error.c index 1e7af665b8..e5e247209a 100644 --- a/util/error.c +++ b/util/error.c @@ -27,8 +27,9 @@ struct Error Error *error_abort; Error *error_fatal; +Error *error_warn; -static void error_handle_fatal(Error **errp, Error *err) +static void error_handle(Error **errp, Error *err) { if (errp == &error_abort) { fprintf(stderr, "Unexpected error in %s() at %s:%d:\n", @@ -43,6 +44,13 @@ static void error_handle_fatal(Error **errp, Error *err) error_report_err(err); exit(1); } + if (errp == &error_warn) { + warn_report_err(err); + } else if (errp && !*errp) { + *errp = err; + } else { + error_free(err); + } } G_GNUC_PRINTF(6, 0) @@ -71,8 +79,7 @@ static void error_setv(Error **errp, err->line = line; err->func = func; - error_handle_fatal(errp, err); - *errp = err; + error_handle(errp, err); errno = saved_errno; } @@ -284,12 +291,7 @@ void error_propagate(Error **dst_errp, Error *local_err) if (!local_err) { return; } - error_handle_fatal(dst_errp, local_err); - if (dst_errp && !*dst_errp) { - *dst_errp = local_err; - } else { - error_free(local_err); - } + error_handle(dst_errp, local_err); } void error_propagate_prepend(Error **dst_errp, Error *err, diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c index e11a8a022e..c6413cb18f 100644 --- a/util/fdmon-epoll.c +++ b/util/fdmon-epoll.c @@ -64,11 +64,6 @@ static int fdmon_epoll_wait(AioContext *ctx, AioHandlerList *ready_list, int i, ret = 0; struct epoll_event events[128]; - /* Fall back while external clients are disabled */ - if (qatomic_read(&ctx->external_disable_cnt)) { - return fdmon_poll_ops.wait(ctx, ready_list, timeout); - } - if (timeout > 0) { ret = qemu_poll_ns(&pfd, 1, timeout); if (ret > 0) { @@ -127,23 +122,29 @@ static bool fdmon_epoll_try_enable(AioContext *ctx) bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) { + bool ok; + if (ctx->epollfd < 0) { return false; } - /* Do not upgrade while external clients are disabled */ - if (qatomic_read(&ctx->external_disable_cnt)) { + if (npfd < EPOLL_ENABLE_THRESHOLD) { return false; } - if (npfd >= EPOLL_ENABLE_THRESHOLD) { - if (fdmon_epoll_try_enable(ctx)) { - return true; - } else { - fdmon_epoll_disable(ctx); - } + /* The list must not change while we add fds to epoll */ + if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { + return false; } - return false; + + ok = fdmon_epoll_try_enable(ctx); + + qemu_lockcnt_inc_and_unlock(&ctx->list_lock); + + if (!ok) { + fdmon_epoll_disable(ctx); + } + return ok; } void fdmon_epoll_setup(AioContext *ctx) diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c index ab43052dd7..17ec18b7bd 100644 --- a/util/fdmon-io_uring.c +++ b/util/fdmon-io_uring.c @@ -276,11 +276,6 @@ static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list, unsigned wait_nr = 1; /* block until at least one cqe is ready */ int ret; - /* Fall back while external clients are disabled */ - if (qatomic_read(&ctx->external_disable_cnt)) { - return fdmon_poll_ops.wait(ctx, ready_list, timeout); - } - if (timeout == 0) { wait_nr = 0; /* non-blocking */ } else if (timeout > 0) { @@ -315,8 +310,7 @@ static bool fdmon_io_uring_need_wait(AioContext *ctx) return true; } - /* Are we falling back to fdmon-poll? */ - return qatomic_read(&ctx->external_disable_cnt); + return false; } static const FDMonOps fdmon_io_uring_ops = { diff --git a/util/fdmon-poll.c b/util/fdmon-poll.c index 5fe3b47865..17df917cf9 100644 --- a/util/fdmon-poll.c +++ b/util/fdmon-poll.c @@ -65,8 +65,7 @@ static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list, assert(npfd == 0); QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { - if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events - && aio_node_check(ctx, node->is_external)) { + if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events) { add_pollfd(node); } } diff --git a/util/guest-random.c b/util/guest-random.c index 23643f86cc..a24d27624c 100644 --- a/util/guest-random.c +++ b/util/guest-random.c @@ -14,7 +14,7 @@ #include "qapi/error.h" #include "qemu/guest-random.h" #include "crypto/random.h" -#include "sysemu/replay.h" +#include "exec/replay-core.h" static __thread GRand *thread_rand; diff --git a/util/log.c b/util/log.c index 7837ff9917..53b4f6c58e 100644 --- a/util/log.c +++ b/util/log.c @@ -489,7 +489,7 @@ const QEMULogItem qemu_log_items[] = { "do not chain compiled TBs so that \"exec\" and \"cpu\" show\n" "complete traces" }, #ifdef CONFIG_PLUGIN - { CPU_LOG_PLUGIN, "plugin", "output from TCG plugins\n"}, + { CPU_LOG_PLUGIN, "plugin", "output from TCG plugins"}, #endif { LOG_STRACE, "strace", "log every user-mode syscall, its input, and its result" }, diff --git a/util/main-loop.c b/util/main-loop.c index 3c0f525192..014c795916 100644 --- a/util/main-loop.c +++ b/util/main-loop.c @@ -252,10 +252,6 @@ static int max_priority; static int glib_pollfds_idx; static int glib_n_poll_fds; -void qemu_fd_register(int fd) -{ -} - static void glib_pollfds_fill(int64_t *cur_timeout) { GMainContext *context = g_main_context_default(); @@ -414,13 +410,6 @@ void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque) } } -void qemu_fd_register(int fd) -{ - WSAEventSelect(fd, event_notifier_get_handle(&qemu_aio_context->notifier), - FD_READ | FD_ACCEPT | FD_CLOSE | - FD_CONNECT | FD_WRITE | FD_OOB); -} - static int pollfds_fill(GArray *pollfds, fd_set *rfds, fd_set *wfds, fd_set *xfds) { @@ -616,9 +605,11 @@ void main_loop_wait(int nonblocking) /* Functions to operate on the main QEMU AioContext. */ -QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name) +QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name, + MemReentrancyGuard *reentrancy_guard) { - return aio_bh_new_full(qemu_aio_context, cb, opaque, name); + return aio_bh_new_full(qemu_aio_context, cb, opaque, name, + reentrancy_guard); } /* @@ -653,14 +644,13 @@ void qemu_set_fd_handler(int fd, void *opaque) { iohandler_init(); - aio_set_fd_handler(iohandler_ctx, fd, false, - fd_read, fd_write, NULL, NULL, opaque); + aio_set_fd_handler(iohandler_ctx, fd, fd_read, fd_write, NULL, NULL, + opaque); } void event_notifier_set_handler(EventNotifier *e, EventNotifierHandler *handler) { iohandler_init(); - aio_set_event_notifier(iohandler_ctx, e, false, - handler, NULL, NULL); + aio_set_event_notifier(iohandler_ctx, e, handler, NULL, NULL); } diff --git a/util/meson.build b/util/meson.build index 26c73e586b..3a93071d27 100644 --- a/util/meson.build +++ b/util/meson.build @@ -26,6 +26,9 @@ util_ss.add(when: 'CONFIG_WIN32', if_true: files('oslib-win32.c')) util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c')) util_ss.add(when: 'CONFIG_WIN32', if_true: winmm) util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch) +if glib_has_gslice + util_ss.add(files('qtree.c')) +endif util_ss.add(files('envlist.c', 'path.c', 'module.c')) util_ss.add(files('host-utils.c')) util_ss.add(files('bitmap.c', 'bitops.c')) @@ -75,7 +78,7 @@ if have_block or have_ga util_ss.add(files('base64.c')) util_ss.add(files('main-loop.c')) util_ss.add(files('qemu-coroutine.c', 'qemu-coroutine-lock.c', 'qemu-coroutine-io.c')) - util_ss.add(files('coroutine-@0@.c'.format(config_host['CONFIG_COROUTINE_BACKEND']))) + util_ss.add(files(f'coroutine-@coroutine_backend@.c')) util_ss.add(files('thread-pool.c', 'qemu-timer.c')) util_ss.add(files('qemu-sockets.c')) endif @@ -105,3 +108,9 @@ if have_block endif util_ss.add(when: 'CONFIG_LINUX', if_true: files('vfio-helpers.c')) endif + +if cpu == 'aarch64' + util_ss.add(files('cpuinfo-aarch64.c')) +elif cpu in ['x86', 'x86_64'] + util_ss.add(files('cpuinfo-i386.c')) +endif diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c index 5ed7d29183..ed14f9c64d 100644 --- a/util/mmap-alloc.c +++ b/util/mmap-alloc.c @@ -27,8 +27,36 @@ #ifdef CONFIG_LINUX #include +#include #endif +QemuFsType qemu_fd_getfs(int fd) +{ +#ifdef CONFIG_LINUX + struct statfs fs; + int ret; + + if (fd < 0) { + return QEMU_FS_TYPE_UNKNOWN; + } + + do { + ret = fstatfs(fd, &fs); + } while (ret != 0 && errno == EINTR); + + switch (fs.f_type) { + case TMPFS_MAGIC: + return QEMU_FS_TYPE_TMPFS; + case HUGETLBFS_MAGIC: + return QEMU_FS_TYPE_HUGETLBFS; + default: + return QEMU_FS_TYPE_UNKNOWN; + } +#else + return QEMU_FS_TYPE_UNKNOWN; +#endif +} + size_t qemu_fd_getpagesize(int fd) { #ifdef CONFIG_LINUX diff --git a/util/oslib-posix.c b/util/oslib-posix.c index 77d882e681..760390b31e 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -583,76 +583,6 @@ char *qemu_get_pid_name(pid_t pid) } -pid_t qemu_fork(Error **errp) -{ - sigset_t oldmask, newmask; - struct sigaction sig_action; - int saved_errno; - pid_t pid; - - /* - * Need to block signals now, so that child process can safely - * kill off caller's signal handlers without a race. - */ - sigfillset(&newmask); - if (pthread_sigmask(SIG_SETMASK, &newmask, &oldmask) != 0) { - error_setg_errno(errp, errno, - "cannot block signals"); - return -1; - } - - pid = fork(); - saved_errno = errno; - - if (pid < 0) { - /* attempt to restore signal mask, but ignore failure, to - * avoid obscuring the fork failure */ - (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL); - error_setg_errno(errp, saved_errno, - "cannot fork child process"); - errno = saved_errno; - return -1; - } else if (pid) { - /* parent process */ - - /* Restore our original signal mask now that the child is - * safely running. Only documented failures are EFAULT (not - * possible, since we are using just-grabbed mask) or EINVAL - * (not possible, since we are using correct arguments). */ - (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL); - } else { - /* child process */ - size_t i; - - /* Clear out all signal handlers from parent so nothing - * unexpected can happen in our child once we unblock - * signals */ - sig_action.sa_handler = SIG_DFL; - sig_action.sa_flags = 0; - sigemptyset(&sig_action.sa_mask); - - for (i = 1; i < NSIG; i++) { - /* Only possible errors are EFAULT or EINVAL The former - * won't happen, the latter we expect, so no need to check - * return value */ - (void)sigaction(i, &sig_action, NULL); - } - - /* Unmask all signals in child, since we've no idea what the - * caller's done with their signal mask and don't want to - * propagate that to children */ - sigemptyset(&newmask); - if (pthread_sigmask(SIG_SETMASK, &newmask, NULL) != 0) { - Error *local_err = NULL; - error_setg_errno(&local_err, errno, - "cannot unblock signals"); - error_report_err(local_err); - _exit(1); - } - } - return pid; -} - void *qemu_alloc_stack(size_t *sz) { void *ptr, *guardpage; diff --git a/util/oslib-win32.c b/util/oslib-win32.c index 07ade41800..fafbab80b4 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -180,7 +180,7 @@ static int socket_error(void) void qemu_socket_set_block(int fd) { unsigned long opt = 0; - WSAEventSelect(fd, NULL, 0); + qemu_socket_unselect(fd, NULL); ioctlsocket(fd, FIONBIO, &opt); } @@ -283,21 +283,155 @@ char *qemu_get_pid_name(pid_t pid) } -pid_t qemu_fork(Error **errp) +bool qemu_socket_select(int sockfd, WSAEVENT hEventObject, + long lNetworkEvents, Error **errp) { - errno = ENOSYS; - error_setg_errno(errp, errno, - "cannot fork child process"); - return -1; + SOCKET s = _get_osfhandle(sockfd); + + if (errp == NULL) { + errp = &error_warn; + } + + if (s == INVALID_SOCKET) { + error_setg(errp, "invalid socket fd=%d", sockfd); + return false; + } + + if (WSAEventSelect(s, hEventObject, lNetworkEvents) != 0) { + error_setg_win32(errp, WSAGetLastError(), "failed to WSAEventSelect()"); + return false; + } + + return true; } +bool qemu_socket_unselect(int sockfd, Error **errp) +{ + return qemu_socket_select(sockfd, NULL, 0, errp); +} + +int qemu_socketpair(int domain, int type, int protocol, int sv[2]) +{ + struct sockaddr_un addr = { + 0, + }; + socklen_t socklen; + int listener = -1; + int client = -1; + int server = -1; + g_autofree char *path = NULL; + int tmpfd; + u_long arg; + int ret = -1; + + g_return_val_if_fail(sv != NULL, -1); + + addr.sun_family = AF_UNIX; + socklen = sizeof(addr); + + tmpfd = g_file_open_tmp(NULL, &path, NULL); + if (tmpfd == -1 || !path) { + errno = EACCES; + goto out; + } + + close(tmpfd); + + if (strlen(path) >= sizeof(addr.sun_path)) { + errno = EINVAL; + goto out; + } + + strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1); + + listener = socket(domain, type, protocol); + if (listener == -1) { + goto out; + } + + if (DeleteFile(path) == 0 && GetLastError() != ERROR_FILE_NOT_FOUND) { + errno = EACCES; + goto out; + } + g_clear_pointer(&path, g_free); + + if (bind(listener, (struct sockaddr *)&addr, socklen) == -1) { + goto out; + } + + if (listen(listener, 1) == -1) { + goto out; + } + + client = socket(domain, type, protocol); + if (client == -1) { + goto out; + } + + arg = 1; + if (ioctlsocket(client, FIONBIO, &arg) != NO_ERROR) { + goto out; + } + + if (connect(client, (struct sockaddr *)&addr, socklen) == -1 && + WSAGetLastError() != WSAEWOULDBLOCK) { + goto out; + } + + server = accept(listener, NULL, NULL); + if (server == -1) { + goto out; + } + + arg = 0; + if (ioctlsocket(client, FIONBIO, &arg) != NO_ERROR) { + goto out; + } + + arg = 0; + if (ioctlsocket(client, SIO_AF_UNIX_GETPEERPID, &arg) != NO_ERROR) { + goto out; + } + + if (arg != GetCurrentProcessId()) { + errno = EPERM; + goto out; + } + + sv[0] = server; + server = -1; + sv[1] = client; + client = -1; + ret = 0; + +out: + if (listener != -1) { + close(listener); + } + if (client != -1) { + close(client); + } + if (server != -1) { + close(server); + } + if (path) { + DeleteFile(path); + } + return ret; +} #undef connect int qemu_connect_wrap(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { int ret; - ret = connect(sockfd, addr, addrlen); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = connect(s, addr, addrlen); if (ret < 0) { if (WSAGetLastError() == WSAEWOULDBLOCK) { errno = EINPROGRESS; @@ -313,7 +447,13 @@ int qemu_connect_wrap(int sockfd, const struct sockaddr *addr, int qemu_listen_wrap(int sockfd, int backlog) { int ret; - ret = listen(sockfd, backlog); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = listen(s, backlog); if (ret < 0) { errno = socket_error(); } @@ -326,23 +466,112 @@ int qemu_bind_wrap(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { int ret; - ret = bind(sockfd, addr, addrlen); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = bind(s, addr, addrlen); if (ret < 0) { errno = socket_error(); } return ret; } +EXCEPTION_DISPOSITION +win32_close_exception_handler(struct _EXCEPTION_RECORD*, + void*, struct _CONTEXT*, void*) +{ + return EXCEPTION_EXECUTE_HANDLER; +} + +#undef close +int qemu_close_socket_osfhandle(int fd) +{ + SOCKET s = _get_osfhandle(fd); + DWORD flags = 0; + + /* + * If we were to just call _close on the descriptor, it would close the + * HANDLE, but it wouldn't free any of the resources associated to the + * SOCKET, and we can't call _close after calling closesocket, because + * closesocket has already closed the HANDLE, and _close would attempt to + * close the HANDLE again, resulting in a double free. We can however + * protect the HANDLE from actually being closed long enough to close the + * file descriptor, then close the socket itself. + */ + if (!GetHandleInformation((HANDLE)s, &flags)) { + errno = EACCES; + return -1; + } + + if (!SetHandleInformation((HANDLE)s, HANDLE_FLAG_PROTECT_FROM_CLOSE, HANDLE_FLAG_PROTECT_FROM_CLOSE)) { + errno = EACCES; + return -1; + } + + __try1(win32_close_exception_handler) { + /* + * close() returns EBADF since we PROTECT_FROM_CLOSE the underlying + * handle, but the FD is actually freed + */ + if (close(fd) < 0 && errno != EBADF) { + return -1; + } + } + __except1 { + } + + if (!SetHandleInformation((HANDLE)s, flags, flags)) { + errno = EACCES; + return -1; + } + + return 0; +} + +int qemu_close_wrap(int fd) +{ + SOCKET s = INVALID_SOCKET; + int ret = -1; + + if (!fd_is_socket(fd)) { + return close(fd); + } + + s = _get_osfhandle(fd); + qemu_close_socket_osfhandle(fd); + + ret = closesocket(s); + if (ret < 0) { + errno = socket_error(); + } + + return ret; +} + #undef socket int qemu_socket_wrap(int domain, int type, int protocol) { - int ret; - ret = socket(domain, type, protocol); - if (ret < 0) { + SOCKET s; + int fd; + + s = socket(domain, type, protocol); + if (s == -1) { errno = socket_error(); + return -1; } - return ret; + + fd = _open_osfhandle(s, _O_BINARY); + if (fd < 0) { + closesocket(s); + /* _open_osfhandle may not set errno, and closesocket() may override it */ + errno = ENOMEM; + } + + return fd; } @@ -350,12 +579,27 @@ int qemu_socket_wrap(int domain, int type, int protocol) int qemu_accept_wrap(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { - int ret; - ret = accept(sockfd, addr, addrlen); - if (ret < 0) { - errno = socket_error(); + int fd; + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; } - return ret; + + s = accept(s, addr, addrlen); + if (s == -1) { + errno = socket_error(); + return -1; + } + + fd = _open_osfhandle(s, _O_BINARY); + if (fd < 0) { + closesocket(s); + /* _open_osfhandle may not set errno, and closesocket() may override it */ + errno = ENOMEM; + } + + return fd; } @@ -363,7 +607,13 @@ int qemu_accept_wrap(int sockfd, struct sockaddr *addr, int qemu_shutdown_wrap(int sockfd, int how) { int ret; - ret = shutdown(sockfd, how); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = shutdown(s, how); if (ret < 0) { errno = socket_error(); } @@ -375,19 +625,13 @@ int qemu_shutdown_wrap(int sockfd, int how) int qemu_ioctlsocket_wrap(int fd, int req, void *val) { int ret; - ret = ioctlsocket(fd, req, val); - if (ret < 0) { - errno = socket_error(); + SOCKET s = _get_osfhandle(fd); + + if (s == INVALID_SOCKET) { + return -1; } - return ret; -} - -#undef closesocket -int qemu_closesocket_wrap(int fd) -{ - int ret; - ret = closesocket(fd); + ret = ioctlsocket(s, req, val); if (ret < 0) { errno = socket_error(); } @@ -400,7 +644,13 @@ int qemu_getsockopt_wrap(int sockfd, int level, int optname, void *optval, socklen_t *optlen) { int ret; - ret = getsockopt(sockfd, level, optname, optval, optlen); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = getsockopt(s, level, optname, optval, optlen); if (ret < 0) { errno = socket_error(); } @@ -413,7 +663,13 @@ int qemu_setsockopt_wrap(int sockfd, int level, int optname, const void *optval, socklen_t optlen) { int ret; - ret = setsockopt(sockfd, level, optname, optval, optlen); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = setsockopt(s, level, optname, optval, optlen); if (ret < 0) { errno = socket_error(); } @@ -426,7 +682,13 @@ int qemu_getpeername_wrap(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { int ret; - ret = getpeername(sockfd, addr, addrlen); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = getpeername(s, addr, addrlen); if (ret < 0) { errno = socket_error(); } @@ -439,7 +701,13 @@ int qemu_getsockname_wrap(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { int ret; - ret = getsockname(sockfd, addr, addrlen); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = getsockname(s, addr, addrlen); if (ret < 0) { errno = socket_error(); } @@ -451,7 +719,13 @@ int qemu_getsockname_wrap(int sockfd, struct sockaddr *addr, ssize_t qemu_send_wrap(int sockfd, const void *buf, size_t len, int flags) { int ret; - ret = send(sockfd, buf, len, flags); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = send(s, buf, len, flags); if (ret < 0) { errno = socket_error(); } @@ -464,7 +738,13 @@ ssize_t qemu_sendto_wrap(int sockfd, const void *buf, size_t len, int flags, const struct sockaddr *addr, socklen_t addrlen) { int ret; - ret = sendto(sockfd, buf, len, flags, addr, addrlen); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = sendto(s, buf, len, flags, addr, addrlen); if (ret < 0) { errno = socket_error(); } @@ -476,7 +756,13 @@ ssize_t qemu_sendto_wrap(int sockfd, const void *buf, size_t len, int flags, ssize_t qemu_recv_wrap(int sockfd, void *buf, size_t len, int flags) { int ret; - ret = recv(sockfd, buf, len, flags); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = recv(s, buf, len, flags); if (ret < 0) { errno = socket_error(); } @@ -489,7 +775,13 @@ ssize_t qemu_recvfrom_wrap(int sockfd, void *buf, size_t len, int flags, struct sockaddr *addr, socklen_t *addrlen) { int ret; - ret = recvfrom(sockfd, buf, len, flags, addr, addrlen); + SOCKET s = _get_osfhandle(sockfd); + + if (s == INVALID_SOCKET) { + return -1; + } + + ret = recvfrom(s, buf, len, flags, addr, addrlen); if (ret < 0) { errno = socket_error(); } diff --git a/util/qemu-coroutine-io.c b/util/qemu-coroutine-io.c index d791932d63..364f4d5abf 100644 --- a/util/qemu-coroutine-io.c +++ b/util/qemu-coroutine-io.c @@ -74,8 +74,7 @@ typedef struct { static void fd_coroutine_enter(void *opaque) { FDYieldUntilData *data = opaque; - aio_set_fd_handler(data->ctx, data->fd, false, - NULL, NULL, NULL, NULL, NULL); + aio_set_fd_handler(data->ctx, data->fd, NULL, NULL, NULL, NULL, NULL); qemu_coroutine_enter(data->co); } @@ -87,7 +86,7 @@ void coroutine_fn yield_until_fd_readable(int fd) data.ctx = qemu_get_current_aio_context(); data.co = qemu_coroutine_self(); data.fd = fd; - aio_set_fd_handler( - data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, NULL, &data); + aio_set_fd_handler(data.ctx, fd, fd_coroutine_enter, NULL, NULL, NULL, + &data); qemu_coroutine_yield(); } diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c index 58f3f77181..84a50a9e91 100644 --- a/util/qemu-coroutine-lock.c +++ b/util/qemu-coroutine-lock.c @@ -201,10 +201,16 @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx, trace_qemu_co_mutex_lock_entry(mutex, self); push_waiter(mutex, &w); + /* + * Add waiter before reading mutex->handoff. Pairs with qatomic_mb_set + * in qemu_co_mutex_unlock. + */ + smp_mb__after_rmw(); + /* This is the "Responsibility Hand-Off" protocol; a lock() picks from * a concurrent unlock() the responsibility of waking somebody up. */ - old_handoff = qatomic_mb_read(&mutex->handoff); + old_handoff = qatomic_read(&mutex->handoff); if (old_handoff && has_waiters(mutex) && qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { @@ -303,6 +309,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) } our_handoff = mutex->sequence; + /* Set handoff before checking for waiters. */ qatomic_mb_set(&mutex->handoff, our_handoff); if (!has_waiters(mutex)) { /* The concurrent lock has not added itself yet, so it diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c index 8494523692..17a88f6505 100644 --- a/util/qemu-coroutine.c +++ b/util/qemu-coroutine.c @@ -127,9 +127,13 @@ void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co) Coroutine *to = QSIMPLEQ_FIRST(&pending); CoroutineAction ret; - /* Cannot rely on the read barrier for to in aio_co_wake(), as there are - * callers outside of aio_co_wake() */ - const char *scheduled = qatomic_mb_read(&to->scheduled); + /* + * Read to before to->scheduled; pairs with qatomic_cmpxchg in + * qemu_co_sleep(), aio_co_schedule() etc. + */ + smp_read_barrier_depends(); + + const char *scheduled = qatomic_read(&to->scheduled); QSIMPLEQ_REMOVE_HEAD(&pending, co_queue_next); diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c index 6538859b87..c06a4dce77 100644 --- a/util/qemu-sockets.c +++ b/util/qemu-sockets.c @@ -326,7 +326,7 @@ static int inet_listen_saddr(InetSocketAddress *saddr, * recover from this situation, so we need to recreate the * socket to allow bind attempts for subsequent ports: */ - closesocket(slisten); + close(slisten); slisten = -1; } } @@ -337,7 +337,7 @@ static int inet_listen_saddr(InetSocketAddress *saddr, listen_failed: saved_errno = errno; if (slisten >= 0) { - closesocket(slisten); + close(slisten); } freeaddrinfo(res); errno = saved_errno; @@ -380,7 +380,7 @@ static int inet_connect_addr(const InetSocketAddress *saddr, if (rc < 0) { error_setg_errno(errp, errno, "Failed to connect to '%s:%s'", saddr->host, saddr->port); - closesocket(sock); + close(sock); return -1; } @@ -483,7 +483,7 @@ int inet_connect_saddr(InetSocketAddress *saddr, Error **errp) if (ret < 0) { error_setg_errno(errp, errno, "Unable to set KEEPALIVE"); - closesocket(sock); + close(sock); return -1; } } @@ -580,7 +580,7 @@ static int inet_dgram_saddr(InetSocketAddress *sraddr, err: if (sock != -1) { - closesocket(sock); + close(sock); } if (local) { freeaddrinfo(local); @@ -777,7 +777,7 @@ static int vsock_connect_addr(const VsockSocketAddress *vaddr, if (rc < 0) { error_setg_errno(errp, errno, "Failed to connect to '%s:%s'", vaddr->cid, vaddr->port); - closesocket(sock); + close(sock); return -1; } @@ -814,13 +814,13 @@ static int vsock_listen_saddr(VsockSocketAddress *vaddr, if (bind(slisten, (const struct sockaddr *)&svm, sizeof(svm)) != 0) { error_setg_errno(errp, errno, "Failed to bind socket"); - closesocket(slisten); + close(slisten); return -1; } if (listen(slisten, num) != 0) { error_setg_errno(errp, errno, "Failed to listen on socket"); - closesocket(slisten); + close(slisten); return -1; } return slisten; @@ -978,7 +978,7 @@ static int unix_listen_saddr(UnixSocketAddress *saddr, err: g_free(pathbuf); - closesocket(sock); + close(sock); return -1; } @@ -1041,7 +1041,7 @@ static int unix_connect_saddr(UnixSocketAddress *saddr, Error **errp) return sock; err: - closesocket(sock); + close(sock); return -1; } @@ -1238,7 +1238,7 @@ int socket_listen(SocketAddress *addr, int num, Error **errp) */ if (listen(fd, num) != 0) { error_setg_errno(errp, errno, "Failed to listen on fd socket"); - closesocket(fd); + close(fd); return -1; } break; diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c index 93d2505797..b2e26e2120 100644 --- a/util/qemu-thread-posix.c +++ b/util/qemu-thread-posix.c @@ -384,13 +384,21 @@ void qemu_event_destroy(QemuEvent *ev) void qemu_event_set(QemuEvent *ev) { - /* qemu_event_set has release semantics, but because it *loads* + assert(ev->initialized); + + /* + * Pairs with both qemu_event_reset() and qemu_event_wait(). + * + * qemu_event_set has release semantics, but because it *loads* * ev->value we need a full memory barrier here. */ - assert(ev->initialized); smp_mb(); if (qatomic_read(&ev->value) != EV_SET) { - if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) { + int old = qatomic_xchg(&ev->value, EV_SET); + + /* Pairs with memory barrier in kernel futex_wait system call. */ + smp_mb__after_rmw(); + if (old == EV_BUSY) { /* There were waiters, wake them up. */ qemu_futex_wake(ev, INT_MAX); } @@ -399,18 +407,19 @@ void qemu_event_set(QemuEvent *ev) void qemu_event_reset(QemuEvent *ev) { - unsigned value; - assert(ev->initialized); - value = qatomic_read(&ev->value); - smp_mb_acquire(); - if (value == EV_SET) { - /* - * If there was a concurrent reset (or even reset+wait), - * do nothing. Otherwise change EV_SET->EV_FREE. - */ - qatomic_or(&ev->value, EV_FREE); - } + + /* + * If there was a concurrent reset (or even reset+wait), + * do nothing. Otherwise change EV_SET->EV_FREE. + */ + qatomic_or(&ev->value, EV_FREE); + + /* + * Order reset before checking the condition in the caller. + * Pairs with the first memory barrier in qemu_event_set(). + */ + smp_mb__after_rmw(); } void qemu_event_wait(QemuEvent *ev) @@ -418,20 +427,40 @@ void qemu_event_wait(QemuEvent *ev) unsigned value; assert(ev->initialized); - value = qatomic_read(&ev->value); - smp_mb_acquire(); + + /* + * qemu_event_wait must synchronize with qemu_event_set even if it does + * not go down the slow path, so this load-acquire is needed that + * synchronizes with the first memory barrier in qemu_event_set(). + * + * If we do go down the slow path, there is no requirement at all: we + * might miss a qemu_event_set() here but ultimately the memory barrier in + * qemu_futex_wait() will ensure the check is done correctly. + */ + value = qatomic_load_acquire(&ev->value); if (value != EV_SET) { if (value == EV_FREE) { /* - * Leave the event reset and tell qemu_event_set that there - * are waiters. No need to retry, because there cannot be - * a concurrent busy->free transition. After the CAS, the - * event will be either set or busy. + * Leave the event reset and tell qemu_event_set that there are + * waiters. No need to retry, because there cannot be a concurrent + * busy->free transition. After the CAS, the event will be either + * set or busy. + * + * This cmpxchg doesn't have particular ordering requirements if it + * succeeds (moving the store earlier can only cause qemu_event_set() + * to issue _more_ wakeups), the failing case needs acquire semantics + * like the load above. */ if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { return; } } + + /* + * This is the final check for a concurrent set, so it does need + * a smp_mb() pairing with the second barrier of qemu_event_set(). + * The barrier is inside the FUTEX_WAIT system call. + */ qemu_futex_wait(ev, EV_BUSY); } } diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c index 69db254ac7..a7fe3cc345 100644 --- a/util/qemu-thread-win32.c +++ b/util/qemu-thread-win32.c @@ -272,12 +272,20 @@ void qemu_event_destroy(QemuEvent *ev) void qemu_event_set(QemuEvent *ev) { assert(ev->initialized); - /* qemu_event_set has release semantics, but because it *loads* + + /* + * Pairs with both qemu_event_reset() and qemu_event_wait(). + * + * qemu_event_set has release semantics, but because it *loads* * ev->value we need a full memory barrier here. */ smp_mb(); if (qatomic_read(&ev->value) != EV_SET) { - if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) { + int old = qatomic_xchg(&ev->value, EV_SET); + + /* Pairs with memory barrier after ResetEvent. */ + smp_mb__after_rmw(); + if (old == EV_BUSY) { /* There were waiters, wake them up. */ SetEvent(ev->event); } @@ -286,17 +294,19 @@ void qemu_event_set(QemuEvent *ev) void qemu_event_reset(QemuEvent *ev) { - unsigned value; - assert(ev->initialized); - value = qatomic_read(&ev->value); - smp_mb_acquire(); - if (value == EV_SET) { - /* If there was a concurrent reset (or even reset+wait), - * do nothing. Otherwise change EV_SET->EV_FREE. - */ - qatomic_or(&ev->value, EV_FREE); - } + + /* + * If there was a concurrent reset (or even reset+wait), + * do nothing. Otherwise change EV_SET->EV_FREE. + */ + qatomic_or(&ev->value, EV_FREE); + + /* + * Order reset before checking the condition in the caller. + * Pairs with the first memory barrier in qemu_event_set(). + */ + smp_mb__after_rmw(); } void qemu_event_wait(QemuEvent *ev) @@ -304,29 +314,49 @@ void qemu_event_wait(QemuEvent *ev) unsigned value; assert(ev->initialized); - value = qatomic_read(&ev->value); - smp_mb_acquire(); + + /* + * qemu_event_wait must synchronize with qemu_event_set even if it does + * not go down the slow path, so this load-acquire is needed that + * synchronizes with the first memory barrier in qemu_event_set(). + * + * If we do go down the slow path, there is no requirement at all: we + * might miss a qemu_event_set() here but ultimately the memory barrier in + * qemu_futex_wait() will ensure the check is done correctly. + */ + value = qatomic_load_acquire(&ev->value); if (value != EV_SET) { if (value == EV_FREE) { - /* qemu_event_set is not yet going to call SetEvent, but we are - * going to do another check for EV_SET below when setting EV_BUSY. - * At that point it is safe to call WaitForSingleObject. + /* + * Here the underlying kernel event is reset, but qemu_event_set is + * not yet going to call SetEvent. However, there will be another + * check for EV_SET below when setting EV_BUSY. At that point it + * is safe to call WaitForSingleObject. */ ResetEvent(ev->event); - /* Tell qemu_event_set that there are waiters. No need to retry - * because there cannot be a concurrent busy->free transition. - * After the CAS, the event will be either set or busy. + /* + * It is not clear whether ResetEvent provides this barrier; kernel + * APIs (KeResetEvent/KeClearEvent) do not. Better safe than sorry! + */ + smp_mb(); + + /* + * Leave the event reset and tell qemu_event_set that there are + * waiters. No need to retry, because there cannot be a concurrent + * busy->free transition. After the CAS, the event will be either + * set or busy. */ if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { - value = EV_SET; - } else { - value = EV_BUSY; + return; } } - if (value == EV_BUSY) { - WaitForSingleObject(ev->event, INFINITE); - } + + /* + * ev->value is now EV_BUSY. Since we didn't observe EV_SET, + * qemu_event_set() must observe EV_BUSY and call SetEvent(). + */ + WaitForSingleObject(ev->event, INFINITE); } } diff --git a/util/qtree.c b/util/qtree.c new file mode 100644 index 0000000000..31f0b46182 --- /dev/null +++ b/util/qtree.c @@ -0,0 +1,1390 @@ +/* + * GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * SPDX-License-Identifier: LGPL-2.1-or-later + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +/* + * MT safe + */ + +#include "qemu/osdep.h" +#include "qemu/qtree.h" + +/** + * SECTION:trees-binary + * @title: Balanced Binary Trees + * @short_description: a sorted collection of key/value pairs optimized + * for searching and traversing in order + * + * The #QTree structure and its associated functions provide a sorted + * collection of key/value pairs optimized for searching and traversing + * in order. This means that most of the operations (access, search, + * insertion, deletion, ...) on #QTree are O(log(n)) in average and O(n) + * in worst case for time complexity. But, note that maintaining a + * balanced sorted #QTree of n elements is done in time O(n log(n)). + * + * To create a new #QTree use q_tree_new(). + * + * To insert a key/value pair into a #QTree use q_tree_insert() + * (O(n log(n))). + * + * To remove a key/value pair use q_tree_remove() (O(n log(n))). + * + * To look up the value corresponding to a given key, use + * q_tree_lookup() and q_tree_lookup_extended(). + * + * To find out the number of nodes in a #QTree, use q_tree_nnodes(). To + * get the height of a #QTree, use q_tree_height(). + * + * To traverse a #QTree, calling a function for each node visited in + * the traversal, use q_tree_foreach(). + * + * To destroy a #QTree, use q_tree_destroy(). + **/ + +#define MAX_GTREE_HEIGHT 40 + +/** + * QTree: + * + * The QTree struct is an opaque data structure representing a + * [balanced binary tree][glib-Balanced-Binary-Trees]. It should be + * accessed only by using the following functions. + */ +struct _QTree { + QTreeNode *root; + GCompareDataFunc key_compare; + GDestroyNotify key_destroy_func; + GDestroyNotify value_destroy_func; + gpointer key_compare_data; + guint nnodes; + gint ref_count; +}; + +struct _QTreeNode { + gpointer key; /* key for this node */ + gpointer value; /* value stored at this node */ + QTreeNode *left; /* left subtree */ + QTreeNode *right; /* right subtree */ + gint8 balance; /* height (right) - height (left) */ + guint8 left_child; + guint8 right_child; +}; + + +static QTreeNode *q_tree_node_new(gpointer key, + gpointer value); +static QTreeNode *q_tree_insert_internal(QTree *tree, + gpointer key, + gpointer value, + gboolean replace); +static gboolean q_tree_remove_internal(QTree *tree, + gconstpointer key, + gboolean steal); +static QTreeNode *q_tree_node_balance(QTreeNode *node); +static QTreeNode *q_tree_find_node(QTree *tree, + gconstpointer key); +static QTreeNode *q_tree_node_search(QTreeNode *node, + GCompareFunc search_func, + gconstpointer data); +static QTreeNode *q_tree_node_rotate_left(QTreeNode *node); +static QTreeNode *q_tree_node_rotate_right(QTreeNode *node); +#ifdef Q_TREE_DEBUG +static void q_tree_node_check(QTreeNode *node); +#endif + +static QTreeNode* +q_tree_node_new(gpointer key, + gpointer value) +{ + QTreeNode *node = g_new(QTreeNode, 1); + + node->balance = 0; + node->left = NULL; + node->right = NULL; + node->left_child = FALSE; + node->right_child = FALSE; + node->key = key; + node->value = value; + + return node; +} + +/** + * q_tree_new: + * @key_compare_func: the function used to order the nodes in the #QTree. + * It should return values similar to the standard strcmp() function - + * 0 if the two arguments are equal, a negative value if the first argument + * comes before the second, or a positive value if the first argument comes + * after the second. + * + * Creates a new #QTree. + * + * Returns: a newly allocated #QTree + */ +QTree * +q_tree_new(GCompareFunc key_compare_func) +{ + g_return_val_if_fail(key_compare_func != NULL, NULL); + + return q_tree_new_full((GCompareDataFunc) key_compare_func, NULL, + NULL, NULL); +} + +/** + * q_tree_new_with_data: + * @key_compare_func: qsort()-style comparison function + * @key_compare_data: data to pass to comparison function + * + * Creates a new #QTree with a comparison function that accepts user data. + * See q_tree_new() for more details. + * + * Returns: a newly allocated #QTree + */ +QTree * +q_tree_new_with_data(GCompareDataFunc key_compare_func, + gpointer key_compare_data) +{ + g_return_val_if_fail(key_compare_func != NULL, NULL); + + return q_tree_new_full(key_compare_func, key_compare_data, + NULL, NULL); +} + +/** + * q_tree_new_full: + * @key_compare_func: qsort()-style comparison function + * @key_compare_data: data to pass to comparison function + * @key_destroy_func: a function to free the memory allocated for the key + * used when removing the entry from the #QTree or %NULL if you don't + * want to supply such a function + * @value_destroy_func: a function to free the memory allocated for the + * value used when removing the entry from the #QTree or %NULL if you + * don't want to supply such a function + * + * Creates a new #QTree like q_tree_new() and allows to specify functions + * to free the memory allocated for the key and value that get called when + * removing the entry from the #QTree. + * + * Returns: a newly allocated #QTree + */ +QTree * +q_tree_new_full(GCompareDataFunc key_compare_func, + gpointer key_compare_data, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func) +{ + QTree *tree; + + g_return_val_if_fail(key_compare_func != NULL, NULL); + + tree = g_new(QTree, 1); + tree->root = NULL; + tree->key_compare = key_compare_func; + tree->key_destroy_func = key_destroy_func; + tree->value_destroy_func = value_destroy_func; + tree->key_compare_data = key_compare_data; + tree->nnodes = 0; + tree->ref_count = 1; + + return tree; +} + +/** + * q_tree_node_first: + * @tree: a #QTree + * + * Returns the first in-order node of the tree, or %NULL + * for an empty tree. + * + * Returns: (nullable) (transfer none): the first node in the tree + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_node_first(QTree *tree) +{ + QTreeNode *tmp; + + g_return_val_if_fail(tree != NULL, NULL); + + if (!tree->root) { + return NULL; + } + + tmp = tree->root; + + while (tmp->left_child) { + tmp = tmp->left; + } + + return tmp; +} + +/** + * q_tree_node_previous + * @node: a #QTree node + * + * Returns the previous in-order node of the tree, or %NULL + * if the passed node was already the first one. + * + * Returns: (nullable) (transfer none): the previous node in the tree + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_node_previous(QTreeNode *node) +{ + QTreeNode *tmp; + + g_return_val_if_fail(node != NULL, NULL); + + tmp = node->left; + + if (node->left_child) { + while (tmp->right_child) { + tmp = tmp->right; + } + } + + return tmp; +} + +/** + * q_tree_node_next + * @node: a #QTree node + * + * Returns the next in-order node of the tree, or %NULL + * if the passed node was already the last one. + * + * Returns: (nullable) (transfer none): the next node in the tree + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_node_next(QTreeNode *node) +{ + QTreeNode *tmp; + + g_return_val_if_fail(node != NULL, NULL); + + tmp = node->right; + + if (node->right_child) { + while (tmp->left_child) { + tmp = tmp->left; + } + } + + return tmp; +} + +/** + * q_tree_remove_all: + * @tree: a #QTree + * + * Removes all nodes from a #QTree and destroys their keys and values, + * then resets the #QTree’s root to %NULL. + * + * Since: 2.70 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static void QEMU_DISABLE_CFI +q_tree_remove_all(QTree *tree) +{ + QTreeNode *node; + QTreeNode *next; + + g_return_if_fail(tree != NULL); + + node = q_tree_node_first(tree); + + while (node) { + next = q_tree_node_next(node); + + if (tree->key_destroy_func) { + tree->key_destroy_func(node->key); + } + if (tree->value_destroy_func) { + tree->value_destroy_func(node->value); + } + g_free(node); + +#ifdef Q_TREE_DEBUG + g_assert(tree->nnodes > 0); + tree->nnodes--; +#endif + + node = next; + } + +#ifdef Q_TREE_DEBUG + g_assert(tree->nnodes == 0); +#endif + + tree->root = NULL; +#ifndef Q_TREE_DEBUG + tree->nnodes = 0; +#endif +} + +/** + * q_tree_ref: + * @tree: a #QTree + * + * Increments the reference count of @tree by one. + * + * It is safe to call this function from any thread. + * + * Returns: the passed in #QTree + * + * Since: 2.22 + */ +QTree * +q_tree_ref(QTree *tree) +{ + g_return_val_if_fail(tree != NULL, NULL); + + g_atomic_int_inc(&tree->ref_count); + + return tree; +} + +/** + * q_tree_unref: + * @tree: a #QTree + * + * Decrements the reference count of @tree by one. + * If the reference count drops to 0, all keys and values will + * be destroyed (if destroy functions were specified) and all + * memory allocated by @tree will be released. + * + * It is safe to call this function from any thread. + * + * Since: 2.22 + */ +void +q_tree_unref(QTree *tree) +{ + g_return_if_fail(tree != NULL); + + if (g_atomic_int_dec_and_test(&tree->ref_count)) { + q_tree_remove_all(tree); + g_free(tree); + } +} + +/** + * q_tree_destroy: + * @tree: a #QTree + * + * Removes all keys and values from the #QTree and decreases its + * reference count by one. If keys and/or values are dynamically + * allocated, you should either free them first or create the #QTree + * using q_tree_new_full(). In the latter case the destroy functions + * you supplied will be called on all keys and values before destroying + * the #QTree. + */ +void +q_tree_destroy(QTree *tree) +{ + g_return_if_fail(tree != NULL); + + q_tree_remove_all(tree); + q_tree_unref(tree); +} + +/** + * q_tree_insert_node: + * @tree: a #QTree + * @key: the key to insert + * @value: the value corresponding to the key + * + * Inserts a key/value pair into a #QTree. + * + * If the given key already exists in the #QTree its corresponding value + * is set to the new value. If you supplied a @value_destroy_func when + * creating the #QTree, the old value is freed using that function. If + * you supplied a @key_destroy_func when creating the #QTree, the passed + * key is freed using that function. + * + * The tree is automatically 'balanced' as new key/value pairs are added, + * so that the distance from the root to every leaf is as small as possible. + * The cost of maintaining a balanced tree while inserting new key/value + * result in a O(n log(n)) operation where most of the other operations + * are O(log(n)). + * + * Returns: (transfer none): the inserted (or set) node. + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_insert_node(QTree *tree, + gpointer key, + gpointer value) +{ + QTreeNode *node; + + g_return_val_if_fail(tree != NULL, NULL); + + node = q_tree_insert_internal(tree, key, value, FALSE); + +#ifdef Q_TREE_DEBUG + q_tree_node_check(tree->root); +#endif + + return node; +} + +/** + * q_tree_insert: + * @tree: a #QTree + * @key: the key to insert + * @value: the value corresponding to the key + * + * Inserts a key/value pair into a #QTree. + * + * Inserts a new key and value into a #QTree as q_tree_insert_node() does, + * only this function does not return the inserted or set node. + */ +void +q_tree_insert(QTree *tree, + gpointer key, + gpointer value) +{ + q_tree_insert_node(tree, key, value); +} + +/** + * q_tree_replace_node: + * @tree: a #QTree + * @key: the key to insert + * @value: the value corresponding to the key + * + * Inserts a new key and value into a #QTree similar to q_tree_insert_node(). + * The difference is that if the key already exists in the #QTree, it gets + * replaced by the new key. If you supplied a @value_destroy_func when + * creating the #QTree, the old value is freed using that function. If you + * supplied a @key_destroy_func when creating the #QTree, the old key is + * freed using that function. + * + * The tree is automatically 'balanced' as new key/value pairs are added, + * so that the distance from the root to every leaf is as small as possible. + * + * Returns: (transfer none): the inserted (or set) node. + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_replace_node(QTree *tree, + gpointer key, + gpointer value) +{ + QTreeNode *node; + + g_return_val_if_fail(tree != NULL, NULL); + + node = q_tree_insert_internal(tree, key, value, TRUE); + +#ifdef Q_TREE_DEBUG + q_tree_node_check(tree->root); +#endif + + return node; +} + +/** + * q_tree_replace: + * @tree: a #QTree + * @key: the key to insert + * @value: the value corresponding to the key + * + * Inserts a new key and value into a #QTree as q_tree_replace_node() does, + * only this function does not return the inserted or set node. + */ +void +q_tree_replace(QTree *tree, + gpointer key, + gpointer value) +{ + q_tree_replace_node(tree, key, value); +} + +/* internal insert routine */ +static QTreeNode * QEMU_DISABLE_CFI +q_tree_insert_internal(QTree *tree, + gpointer key, + gpointer value, + gboolean replace) +{ + QTreeNode *node, *retnode; + QTreeNode *path[MAX_GTREE_HEIGHT]; + int idx; + + g_return_val_if_fail(tree != NULL, NULL); + + if (!tree->root) { + tree->root = q_tree_node_new(key, value); + tree->nnodes++; + return tree->root; + } + + idx = 0; + path[idx++] = NULL; + node = tree->root; + + while (1) { + int cmp = tree->key_compare(key, node->key, tree->key_compare_data); + + if (cmp == 0) { + if (tree->value_destroy_func) { + tree->value_destroy_func(node->value); + } + + node->value = value; + + if (replace) { + if (tree->key_destroy_func) { + tree->key_destroy_func(node->key); + } + + node->key = key; + } else { + /* free the passed key */ + if (tree->key_destroy_func) { + tree->key_destroy_func(key); + } + } + + return node; + } else if (cmp < 0) { + if (node->left_child) { + path[idx++] = node; + node = node->left; + } else { + QTreeNode *child = q_tree_node_new(key, value); + + child->left = node->left; + child->right = node; + node->left = child; + node->left_child = TRUE; + node->balance -= 1; + + tree->nnodes++; + + retnode = child; + break; + } + } else { + if (node->right_child) { + path[idx++] = node; + node = node->right; + } else { + QTreeNode *child = q_tree_node_new(key, value); + + child->right = node->right; + child->left = node; + node->right = child; + node->right_child = TRUE; + node->balance += 1; + + tree->nnodes++; + + retnode = child; + break; + } + } + } + + /* + * Restore balance. This is the goodness of a non-recursive + * implementation, when we are done with balancing we 'break' + * the loop and we are done. + */ + while (1) { + QTreeNode *bparent = path[--idx]; + gboolean left_node = (bparent && node == bparent->left); + g_assert(!bparent || bparent->left == node || bparent->right == node); + + if (node->balance < -1 || node->balance > 1) { + node = q_tree_node_balance(node); + if (bparent == NULL) { + tree->root = node; + } else if (left_node) { + bparent->left = node; + } else { + bparent->right = node; + } + } + + if (node->balance == 0 || bparent == NULL) { + break; + } + + if (left_node) { + bparent->balance -= 1; + } else { + bparent->balance += 1; + } + + node = bparent; + } + + return retnode; +} + +/** + * q_tree_remove: + * @tree: a #QTree + * @key: the key to remove + * + * Removes a key/value pair from a #QTree. + * + * If the #QTree was created using q_tree_new_full(), the key and value + * are freed using the supplied destroy functions, otherwise you have to + * make sure that any dynamically allocated values are freed yourself. + * If the key does not exist in the #QTree, the function does nothing. + * + * The cost of maintaining a balanced tree while removing a key/value + * result in a O(n log(n)) operation where most of the other operations + * are O(log(n)). + * + * Returns: %TRUE if the key was found (prior to 2.8, this function + * returned nothing) + */ +gboolean +q_tree_remove(QTree *tree, + gconstpointer key) +{ + gboolean removed; + + g_return_val_if_fail(tree != NULL, FALSE); + + removed = q_tree_remove_internal(tree, key, FALSE); + +#ifdef Q_TREE_DEBUG + q_tree_node_check(tree->root); +#endif + + return removed; +} + +/** + * q_tree_steal: + * @tree: a #QTree + * @key: the key to remove + * + * Removes a key and its associated value from a #QTree without calling + * the key and value destroy functions. + * + * If the key does not exist in the #QTree, the function does nothing. + * + * Returns: %TRUE if the key was found (prior to 2.8, this function + * returned nothing) + */ +gboolean +q_tree_steal(QTree *tree, + gconstpointer key) +{ + gboolean removed; + + g_return_val_if_fail(tree != NULL, FALSE); + + removed = q_tree_remove_internal(tree, key, TRUE); + +#ifdef Q_TREE_DEBUG + q_tree_node_check(tree->root); +#endif + + return removed; +} + +/* internal remove routine */ +static gboolean QEMU_DISABLE_CFI +q_tree_remove_internal(QTree *tree, + gconstpointer key, + gboolean steal) +{ + QTreeNode *node, *parent, *balance; + QTreeNode *path[MAX_GTREE_HEIGHT]; + int idx; + gboolean left_node; + + g_return_val_if_fail(tree != NULL, FALSE); + + if (!tree->root) { + return FALSE; + } + + idx = 0; + path[idx++] = NULL; + node = tree->root; + + while (1) { + int cmp = tree->key_compare(key, node->key, tree->key_compare_data); + + if (cmp == 0) { + break; + } else if (cmp < 0) { + if (!node->left_child) { + return FALSE; + } + + path[idx++] = node; + node = node->left; + } else { + if (!node->right_child) { + return FALSE; + } + + path[idx++] = node; + node = node->right; + } + } + + /* + * The following code is almost equal to q_tree_remove_node, + * except that we do not have to call q_tree_node_parent. + */ + balance = parent = path[--idx]; + g_assert(!parent || parent->left == node || parent->right == node); + left_node = (parent && node == parent->left); + + if (!node->left_child) { + if (!node->right_child) { + if (!parent) { + tree->root = NULL; + } else if (left_node) { + parent->left_child = FALSE; + parent->left = node->left; + parent->balance += 1; + } else { + parent->right_child = FALSE; + parent->right = node->right; + parent->balance -= 1; + } + } else { + /* node has a right child */ + QTreeNode *tmp = q_tree_node_next(node); + tmp->left = node->left; + + if (!parent) { + tree->root = node->right; + } else if (left_node) { + parent->left = node->right; + parent->balance += 1; + } else { + parent->right = node->right; + parent->balance -= 1; + } + } + } else { + /* node has a left child */ + if (!node->right_child) { + QTreeNode *tmp = q_tree_node_previous(node); + tmp->right = node->right; + + if (parent == NULL) { + tree->root = node->left; + } else if (left_node) { + parent->left = node->left; + parent->balance += 1; + } else { + parent->right = node->left; + parent->balance -= 1; + } + } else { + /* node has a both children (pant, pant!) */ + QTreeNode *prev = node->left; + QTreeNode *next = node->right; + QTreeNode *nextp = node; + int old_idx = idx + 1; + idx++; + + /* path[idx] == parent */ + /* find the immediately next node (and its parent) */ + while (next->left_child) { + path[++idx] = nextp = next; + next = next->left; + } + + path[old_idx] = next; + balance = path[idx]; + + /* remove 'next' from the tree */ + if (nextp != node) { + if (next->right_child) { + nextp->left = next->right; + } else { + nextp->left_child = FALSE; + } + nextp->balance += 1; + + next->right_child = TRUE; + next->right = node->right; + } else { + node->balance -= 1; + } + + /* set the prev to point to the right place */ + while (prev->right_child) { + prev = prev->right; + } + prev->right = next; + + /* prepare 'next' to replace 'node' */ + next->left_child = TRUE; + next->left = node->left; + next->balance = node->balance; + + if (!parent) { + tree->root = next; + } else if (left_node) { + parent->left = next; + } else { + parent->right = next; + } + } + } + + /* restore balance */ + if (balance) { + while (1) { + QTreeNode *bparent = path[--idx]; + g_assert(!bparent || + bparent->left == balance || + bparent->right == balance); + left_node = (bparent && balance == bparent->left); + + if (balance->balance < -1 || balance->balance > 1) { + balance = q_tree_node_balance(balance); + if (!bparent) { + tree->root = balance; + } else if (left_node) { + bparent->left = balance; + } else { + bparent->right = balance; + } + } + + if (balance->balance != 0 || !bparent) { + break; + } + + if (left_node) { + bparent->balance += 1; + } else { + bparent->balance -= 1; + } + + balance = bparent; + } + } + + if (!steal) { + if (tree->key_destroy_func) { + tree->key_destroy_func(node->key); + } + if (tree->value_destroy_func) { + tree->value_destroy_func(node->value); + } + } + + g_free(node); + + tree->nnodes--; + + return TRUE; +} + +/** + * q_tree_lookup_node: + * @tree: a #QTree + * @key: the key to look up + * + * Gets the tree node corresponding to the given key. Since a #QTree is + * automatically balanced as key/value pairs are added, key lookup + * is O(log n) (where n is the number of key/value pairs in the tree). + * + * Returns: (nullable) (transfer none): the tree node corresponding to + * the key, or %NULL if the key was not found + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_lookup_node(QTree *tree, + gconstpointer key) +{ + g_return_val_if_fail(tree != NULL, NULL); + + return q_tree_find_node(tree, key); +} + +/** + * q_tree_lookup: + * @tree: a #QTree + * @key: the key to look up + * + * Gets the value corresponding to the given key. Since a #QTree is + * automatically balanced as key/value pairs are added, key lookup + * is O(log n) (where n is the number of key/value pairs in the tree). + * + * Returns: the value corresponding to the key, or %NULL + * if the key was not found + */ +gpointer +q_tree_lookup(QTree *tree, + gconstpointer key) +{ + QTreeNode *node; + + node = q_tree_lookup_node(tree, key); + + return node ? node->value : NULL; +} + +/** + * q_tree_lookup_extended: + * @tree: a #QTree + * @lookup_key: the key to look up + * @orig_key: (out) (optional) (nullable): returns the original key + * @value: (out) (optional) (nullable): returns the value associated with + * the key + * + * Looks up a key in the #QTree, returning the original key and the + * associated value. This is useful if you need to free the memory + * allocated for the original key, for example before calling + * q_tree_remove(). + * + * Returns: %TRUE if the key was found in the #QTree + */ +gboolean +q_tree_lookup_extended(QTree *tree, + gconstpointer lookup_key, + gpointer *orig_key, + gpointer *value) +{ + QTreeNode *node; + + g_return_val_if_fail(tree != NULL, FALSE); + + node = q_tree_find_node(tree, lookup_key); + + if (node) { + if (orig_key) { + *orig_key = node->key; + } + if (value) { + *value = node->value; + } + return TRUE; + } else { + return FALSE; + } +} + +/** + * q_tree_foreach: + * @tree: a #QTree + * @func: the function to call for each node visited. + * If this function returns %TRUE, the traversal is stopped. + * @user_data: user data to pass to the function + * + * Calls the given function for each of the key/value pairs in the #QTree. + * The function is passed the key and value of each pair, and the given + * @data parameter. The tree is traversed in sorted order. + * + * The tree may not be modified while iterating over it (you can't + * add/remove items). To remove all items matching a predicate, you need + * to add each item to a list in your #GTraverseFunc as you walk over + * the tree, then walk the list and remove each item. + */ +void +q_tree_foreach(QTree *tree, + GTraverseFunc func, + gpointer user_data) +{ + QTreeNode *node; + + g_return_if_fail(tree != NULL); + + if (!tree->root) { + return; + } + + node = q_tree_node_first(tree); + + while (node) { + if ((*func)(node->key, node->value, user_data)) { + break; + } + + node = q_tree_node_next(node); + } +} + +/** + * q_tree_search_node: + * @tree: a #QTree + * @search_func: a function used to search the #QTree + * @user_data: the data passed as the second argument to @search_func + * + * Searches a #QTree using @search_func. + * + * The @search_func is called with a pointer to the key of a key/value + * pair in the tree, and the passed in @user_data. If @search_func returns + * 0 for a key/value pair, then the corresponding node is returned as + * the result of q_tree_search(). If @search_func returns -1, searching + * will proceed among the key/value pairs that have a smaller key; if + * @search_func returns 1, searching will proceed among the key/value + * pairs that have a larger key. + * + * Returns: (nullable) (transfer none): the node corresponding to the + * found key, or %NULL if the key was not found + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_search_node(QTree *tree, + GCompareFunc search_func, + gconstpointer user_data) +{ + g_return_val_if_fail(tree != NULL, NULL); + + if (!tree->root) { + return NULL; + } + + return q_tree_node_search(tree->root, search_func, user_data); +} + +/** + * q_tree_search: + * @tree: a #QTree + * @search_func: a function used to search the #QTree + * @user_data: the data passed as the second argument to @search_func + * + * Searches a #QTree using @search_func. + * + * The @search_func is called with a pointer to the key of a key/value + * pair in the tree, and the passed in @user_data. If @search_func returns + * 0 for a key/value pair, then the corresponding value is returned as + * the result of q_tree_search(). If @search_func returns -1, searching + * will proceed among the key/value pairs that have a smaller key; if + * @search_func returns 1, searching will proceed among the key/value + * pairs that have a larger key. + * + * Returns: the value corresponding to the found key, or %NULL + * if the key was not found + */ +gpointer +q_tree_search(QTree *tree, + GCompareFunc search_func, + gconstpointer user_data) +{ + QTreeNode *node; + + node = q_tree_search_node(tree, search_func, user_data); + + return node ? node->value : NULL; +} + +/** + * q_tree_height: + * @tree: a #QTree + * + * Gets the height of a #QTree. + * + * If the #QTree contains no nodes, the height is 0. + * If the #QTree contains only one root node the height is 1. + * If the root node has children the height is 2, etc. + * + * Returns: the height of @tree + */ +gint +q_tree_height(QTree *tree) +{ + QTreeNode *node; + gint height; + + g_return_val_if_fail(tree != NULL, 0); + + if (!tree->root) { + return 0; + } + + height = 0; + node = tree->root; + + while (1) { + height += 1 + MAX(node->balance, 0); + + if (!node->left_child) { + return height; + } + + node = node->left; + } +} + +/** + * q_tree_nnodes: + * @tree: a #QTree + * + * Gets the number of nodes in a #QTree. + * + * Returns: the number of nodes in @tree + */ +gint +q_tree_nnodes(QTree *tree) +{ + g_return_val_if_fail(tree != NULL, 0); + + return tree->nnodes; +} + +static QTreeNode * +q_tree_node_balance(QTreeNode *node) +{ + if (node->balance < -1) { + if (node->left->balance > 0) { + node->left = q_tree_node_rotate_left(node->left); + } + node = q_tree_node_rotate_right(node); + } else if (node->balance > 1) { + if (node->right->balance < 0) { + node->right = q_tree_node_rotate_right(node->right); + } + node = q_tree_node_rotate_left(node); + } + + return node; +} + +static QTreeNode * QEMU_DISABLE_CFI +q_tree_find_node(QTree *tree, + gconstpointer key) +{ + QTreeNode *node; + gint cmp; + + node = tree->root; + if (!node) { + return NULL; + } + + while (1) { + cmp = tree->key_compare(key, node->key, tree->key_compare_data); + if (cmp == 0) { + return node; + } else if (cmp < 0) { + if (!node->left_child) { + return NULL; + } + + node = node->left; + } else { + if (!node->right_child) { + return NULL; + } + + node = node->right; + } + } +} + +static QTreeNode * +q_tree_node_search(QTreeNode *node, + GCompareFunc search_func, + gconstpointer data) +{ + gint dir; + + if (!node) { + return NULL; + } + + while (1) { + dir = (*search_func)(node->key, data); + if (dir == 0) { + return node; + } else if (dir < 0) { + if (!node->left_child) { + return NULL; + } + + node = node->left; + } else { + if (!node->right_child) { + return NULL; + } + + node = node->right; + } + } +} + +static QTreeNode * +q_tree_node_rotate_left(QTreeNode *node) +{ + QTreeNode *right; + gint a_bal; + gint b_bal; + + right = node->right; + + if (right->left_child) { + node->right = right->left; + } else { + node->right_child = FALSE; + right->left_child = TRUE; + } + right->left = node; + + a_bal = node->balance; + b_bal = right->balance; + + if (b_bal <= 0) { + if (a_bal >= 1) { + right->balance = b_bal - 1; + } else { + right->balance = a_bal + b_bal - 2; + } + node->balance = a_bal - 1; + } else { + if (a_bal <= b_bal) { + right->balance = a_bal - 2; + } else { + right->balance = b_bal - 1; + } + node->balance = a_bal - b_bal - 1; + } + + return right; +} + +static QTreeNode * +q_tree_node_rotate_right(QTreeNode *node) +{ + QTreeNode *left; + gint a_bal; + gint b_bal; + + left = node->left; + + if (left->right_child) { + node->left = left->right; + } else { + node->left_child = FALSE; + left->right_child = TRUE; + } + left->right = node; + + a_bal = node->balance; + b_bal = left->balance; + + if (b_bal <= 0) { + if (b_bal > a_bal) { + left->balance = b_bal + 1; + } else { + left->balance = a_bal + 2; + } + node->balance = a_bal - b_bal + 1; + } else { + if (a_bal <= -1) { + left->balance = b_bal + 1; + } else { + left->balance = a_bal + b_bal + 2; + } + node->balance = a_bal + 1; + } + + return left; +} + +#ifdef Q_TREE_DEBUG +static gint +q_tree_node_height(QTreeNode *node) +{ + gint left_height; + gint right_height; + + if (node) { + left_height = 0; + right_height = 0; + + if (node->left_child) { + left_height = q_tree_node_height(node->left); + } + + if (node->right_child) { + right_height = q_tree_node_height(node->right); + } + + return MAX(left_height, right_height) + 1; + } + + return 0; +} + +static void q_tree_node_check(QTreeNode *node) +{ + gint left_height; + gint right_height; + gint balance; + QTreeNode *tmp; + + if (node) { + if (node->left_child) { + tmp = q_tree_node_previous(node); + g_assert(tmp->right == node); + } + + if (node->right_child) { + tmp = q_tree_node_next(node); + g_assert(tmp->left == node); + } + + left_height = 0; + right_height = 0; + + if (node->left_child) { + left_height = q_tree_node_height(node->left); + } + if (node->right_child) { + right_height = q_tree_node_height(node->right); + } + + balance = right_height - left_height; + g_assert(balance == node->balance); + + if (node->left_child) { + q_tree_node_check(node->left); + } + if (node->right_child) { + q_tree_node_check(node->right); + } + } +} +#endif diff --git a/util/rcu.c b/util/rcu.c index b6d6c71cff..30a7e22026 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -83,12 +83,6 @@ static void wait_for_readers(void) */ qemu_event_reset(&rcu_gp_event); - /* Instead of using qatomic_mb_set for index->waiting, and - * qatomic_mb_read for index->ctr, memory barriers are placed - * manually since writes to different threads are independent. - * qemu_event_reset has acquire semantics, so no memory barrier - * is needed here. - */ QLIST_FOREACH(index, ®istry, node) { qatomic_set(&index->waiting, true); } @@ -96,6 +90,10 @@ static void wait_for_readers(void) /* Here, order the stores to index->waiting before the loads of * index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(), * ensuring that the loads of index->ctr are sequentially consistent. + * + * If this is the last iteration, this barrier also prevents + * frees from seeping upwards, and orders the two wait phases + * on architectures with 32-bit longs; see synchronize_rcu(). */ smp_mb_global(); @@ -104,7 +102,7 @@ static void wait_for_readers(void) QLIST_REMOVE(index, node); QLIST_INSERT_HEAD(&qsreaders, index, node); - /* No need for mb_set here, worst of all we + /* No need for memory barriers here, worst of all we * get some extra futex wakeups. */ qatomic_set(&index->waiting, false); @@ -149,26 +147,26 @@ void synchronize_rcu(void) /* Write RCU-protected pointers before reading p_rcu_reader->ctr. * Pairs with smp_mb_placeholder() in rcu_read_lock(). + * + * Also orders write to RCU-protected pointers before + * write to rcu_gp_ctr. */ smp_mb_global(); QEMU_LOCK_GUARD(&rcu_registry_lock); if (!QLIST_EMPTY(®istry)) { - /* In either case, the qatomic_mb_set below blocks stores that free - * old RCU-protected pointers. - */ if (sizeof(rcu_gp_ctr) < 8) { /* For architectures with 32-bit longs, a two-subphases algorithm * ensures we do not encounter overflow bugs. * * Switch parity: 0 -> 1, 1 -> 0. */ - qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); + qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); wait_for_readers(); - qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); + qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); } else { /* Increment current grace period. */ - qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); + qatomic_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); } wait_for_readers(); @@ -191,8 +189,22 @@ static void enqueue(struct rcu_head *node) struct rcu_head **old_tail; node->next = NULL; + + /* + * Make this node the tail of the list. The node will be + * used by further enqueue operations, but it will not + * be dequeued yet... + */ old_tail = qatomic_xchg(&tail, &node->next); - qatomic_mb_set(old_tail, node); + + /* + * ... until it is pointed to from another item in the list. + * In the meantime, try_dequeue() will find a NULL next pointer + * and loop. + * + * Synchronizes with qatomic_load_acquire() in try_dequeue(). + */ + qatomic_store_release(old_tail, node); } static struct rcu_head *try_dequeue(void) @@ -200,26 +212,31 @@ static struct rcu_head *try_dequeue(void) struct rcu_head *node, *next; retry: - /* Test for an empty list, which we do not expect. Note that for + /* Head is only written by this thread, so no need for barriers. */ + node = head; + + /* + * If the head node has NULL in its next pointer, the value is + * wrong and we need to wait until its enqueuer finishes the update. + */ + next = qatomic_load_acquire(&node->next); + if (!next) { + return NULL; + } + + /* + * Test for an empty list, which we do not expect. Note that for * the consumer head and tail are always consistent. The head * is consistent because only the consumer reads/writes it. * The tail, because it is the first step in the enqueuing. * It is only the next pointers that might be inconsistent. */ - if (head == &dummy && qatomic_mb_read(&tail) == &dummy.next) { + if (head == &dummy && qatomic_read(&tail) == &dummy.next) { abort(); } - /* If the head node has NULL in its next pointer, the value is - * wrong and we need to wait until its enqueuer finishes the update. - */ - node = head; - next = qatomic_mb_read(&head->next); - if (!next) { - return NULL; - } - - /* Since we are the sole consumer, and we excluded the empty case + /* + * Since we are the sole consumer, and we excluded the empty case * above, the queue will always have at least two nodes: the * dummy node, and the one being removed. So we do not need to update * the tail pointer. diff --git a/util/stats64.c b/util/stats64.c index 897613c949..09736014ec 100644 --- a/util/stats64.c +++ b/util/stats64.c @@ -57,6 +57,17 @@ uint64_t stat64_get(const Stat64 *s) return ((uint64_t)high << 32) | low; } +void stat64_set(Stat64 *s, uint64_t val) +{ + while (!stat64_wrtrylock(s)) { + cpu_relax(); + } + + qatomic_set(&s->high, val >> 32); + qatomic_set(&s->low, val); + stat64_wrunlock(s); +} + bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high) { uint32_t old; diff --git a/util/systemd.c b/util/systemd.c index 5bcac9b401..ced518f771 100644 --- a/util/systemd.c +++ b/util/systemd.c @@ -51,6 +51,7 @@ unsigned int check_socket_activation(void) /* So these are not passed to any child processes we might start. */ unsetenv("LISTEN_FDS"); unsetenv("LISTEN_PID"); + unsetenv("LISTEN_FDNAMES"); /* So the file descriptors don't leak into child processes. */ for (i = 0; i < nr_fds; ++i) { diff --git a/util/thread-pool.c b/util/thread-pool.c index 31113b5860..0d97888df0 100644 --- a/util/thread-pool.c +++ b/util/thread-pool.c @@ -48,7 +48,7 @@ struct ThreadPoolElement { /* Access to this list is protected by lock. */ QTAILQ_ENTRY(ThreadPoolElement) reqs; - /* Access to this list is protected by the global mutex. */ + /* This list is only written by the thread pool's mother thread. */ QLIST_ENTRY(ThreadPoolElement) all; }; @@ -175,7 +175,6 @@ static void thread_pool_completion_bh(void *opaque) ThreadPool *pool = opaque; ThreadPoolElement *elem, *next; - aio_context_acquire(pool->ctx); restart: QLIST_FOREACH_SAFE(elem, &pool->head, all, next) { if (elem->state != THREAD_DONE) { @@ -195,9 +194,7 @@ restart: */ qemu_bh_schedule(pool->completion_bh); - aio_context_release(pool->ctx); elem->common.cb(elem->common.opaque, elem->ret); - aio_context_acquire(pool->ctx); /* We can safely cancel the completion_bh here regardless of someone * else having scheduled it meanwhile because we reenter the @@ -211,7 +208,6 @@ restart: qemu_aio_unref(elem); } } - aio_context_release(pool->ctx); } static void thread_pool_cancel(BlockAIOCB *acb) @@ -245,11 +241,15 @@ static const AIOCBInfo thread_pool_aiocb_info = { .get_aio_context = thread_pool_get_aio_context, }; -BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool, - ThreadPoolFunc *func, void *arg, - BlockCompletionFunc *cb, void *opaque) +BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, + BlockCompletionFunc *cb, void *opaque) { ThreadPoolElement *req; + AioContext *ctx = qemu_get_current_aio_context(); + ThreadPool *pool = aio_get_thread_pool(ctx); + + /* Assert that the thread submitting work is the same running the pool */ + assert(pool->ctx == qemu_get_current_aio_context()); req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque); req->func = func; @@ -284,19 +284,18 @@ static void thread_pool_co_cb(void *opaque, int ret) aio_co_wake(co->co); } -int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func, - void *arg) +int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg) { ThreadPoolCo tpc = { .co = qemu_coroutine_self(), .ret = -EINPROGRESS }; assert(qemu_in_coroutine()); - thread_pool_submit_aio(pool, func, arg, thread_pool_co_cb, &tpc); + thread_pool_submit_aio(func, arg, thread_pool_co_cb, &tpc); qemu_coroutine_yield(); return tpc.ret; } -void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg) +void thread_pool_submit(ThreadPoolFunc *func, void *arg) { - thread_pool_submit_aio(pool, func, arg, NULL, NULL); + thread_pool_submit_aio(func, arg, NULL, NULL); } void thread_pool_update_params(ThreadPool *pool, AioContext *ctx) diff --git a/util/trace-events b/util/trace-events index 16f78d8fe5..3f7e766683 100644 --- a/util/trace-events +++ b/util/trace-events @@ -11,6 +11,7 @@ poll_remove(void *ctx, void *node, int fd) "ctx %p node %p fd %d" # async.c aio_co_schedule(void *ctx, void *co) "ctx %p co %p" aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p" +reentrant_aio(void *ctx, const char *name) "ctx %p name %s" # thread-pool.c thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p" diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c index 2d8af38f88..f8bab46c68 100644 --- a/util/vfio-helpers.c +++ b/util/vfio-helpers.c @@ -106,15 +106,17 @@ struct QEMUVFIOState { */ static char *sysfs_find_group_file(const char *device, Error **errp) { + g_autoptr(GError) gerr = NULL; char *sysfs_link; char *sysfs_group; char *p; char *path = NULL; sysfs_link = g_strdup_printf("/sys/bus/pci/devices/%s/iommu_group", device); - sysfs_group = g_malloc0(PATH_MAX); - if (readlink(sysfs_link, sysfs_group, PATH_MAX - 1) == -1) { - error_setg_errno(errp, errno, "Failed to find iommu group sysfs path"); + sysfs_group = g_file_read_link(sysfs_link, &gerr); + if (gerr) { + error_setg(errp, "Failed to find iommu group sysfs path: %s", + gerr->message); goto out; } p = strrchr(sysfs_group, '/'); diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c index 40f36ea214..cd17fb5326 100644 --- a/util/vhost-user-server.c +++ b/util/vhost-user-server.c @@ -75,20 +75,26 @@ static void panic_cb(VuDev *vu_dev, const char *buf) error_report("vu_panic: %s", buf); } -void vhost_user_server_ref(VuServer *server) +void vhost_user_server_inc_in_flight(VuServer *server) { assert(!server->wait_idle); - server->refcount++; + qatomic_inc(&server->in_flight); } -void vhost_user_server_unref(VuServer *server) +void vhost_user_server_dec_in_flight(VuServer *server) { - server->refcount--; - if (server->wait_idle && !server->refcount) { - aio_co_wake(server->co_trip); + if (qatomic_fetch_dec(&server->in_flight) == 1) { + if (server->wait_idle) { + aio_co_wake(server->co_trip); + } } } +bool vhost_user_server_has_in_flight(VuServer *server) +{ + return qatomic_load_acquire(&server->in_flight) > 0; +} + static bool coroutine_fn vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg) { @@ -192,13 +198,13 @@ static coroutine_fn void vu_client_trip(void *opaque) /* Keep running */ } - if (server->refcount) { + if (vhost_user_server_has_in_flight(server)) { /* Wait for requests to complete before we can unmap the memory */ server->wait_idle = true; qemu_coroutine_yield(); server->wait_idle = false; } - assert(server->refcount == 0); + assert(!vhost_user_server_has_in_flight(server)); vu_deinit(vu_dev); @@ -272,7 +278,7 @@ set_watch(VuDev *vu_dev, int fd, int vu_evt, vu_fd_watch->fd = fd; vu_fd_watch->cb = cb; qemu_socket_set_nonblock(fd); - aio_set_fd_handler(server->ioc->ctx, fd, true, kick_handler, + aio_set_fd_handler(server->ioc->ctx, fd, kick_handler, NULL, NULL, NULL, vu_fd_watch); vu_fd_watch->vu_dev = vu_dev; vu_fd_watch->pvt = pvt; @@ -293,8 +299,7 @@ static void remove_watch(VuDev *vu_dev, int fd) if (!vu_fd_watch) { return; } - aio_set_fd_handler(server->ioc->ctx, fd, true, - NULL, NULL, NULL, NULL, NULL); + aio_set_fd_handler(server->ioc->ctx, fd, NULL, NULL, NULL, NULL, NULL); QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next); g_free(vu_fd_watch); @@ -346,10 +351,9 @@ static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc, aio_context_release(server->ctx); } +/* server->ctx acquired by caller */ void vhost_user_server_stop(VuServer *server) { - aio_context_acquire(server->ctx); - qemu_bh_delete(server->restart_listener_bh); server->restart_listener_bh = NULL; @@ -357,7 +361,7 @@ void vhost_user_server_stop(VuServer *server) VuFdWatch *vu_fd_watch; QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { - aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true, + aio_set_fd_handler(server->ctx, vu_fd_watch->fd, NULL, NULL, NULL, NULL, vu_fd_watch); } @@ -366,8 +370,6 @@ void vhost_user_server_stop(VuServer *server) AIO_WAIT_WHILE(server->ctx, server->co_trip); } - aio_context_release(server->ctx); - if (server->listener) { qio_net_listener_disconnect(server->listener); object_unref(OBJECT(server->listener)); @@ -400,7 +402,7 @@ void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx) qio_channel_attach_aio_context(server->ioc, ctx); QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { - aio_set_fd_handler(ctx, vu_fd_watch->fd, true, kick_handler, NULL, + aio_set_fd_handler(ctx, vu_fd_watch->fd, kick_handler, NULL, NULL, NULL, vu_fd_watch); } @@ -414,7 +416,7 @@ void vhost_user_server_detach_aio_context(VuServer *server) VuFdWatch *vu_fd_watch; QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { - aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true, + aio_set_fd_handler(server->ctx, vu_fd_watch->fd, NULL, NULL, NULL, NULL, vu_fd_watch); }